aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/README.ipw2100246
-rw-r--r--Documentation/networking/README.ipw2200300
-rw-r--r--drivers/net/skge.c1710
-rw-r--r--drivers/net/skge.h586
-rw-r--r--drivers/net/wireless/Kconfig104
-rw-r--r--drivers/net/wireless/Makefile4
-rw-r--r--drivers/net/wireless/atmel.c62
-rw-r--r--drivers/net/wireless/ieee802_11.h78
-rw-r--r--drivers/net/wireless/ipw2100.c8641
-rw-r--r--drivers/net/wireless/ipw2100.h1195
-rw-r--r--drivers/net/wireless/ipw2200.c7361
-rw-r--r--drivers/net/wireless/ipw2200.h1770
-rw-r--r--drivers/net/wireless/orinoco.c11
-rw-r--r--drivers/net/wireless/wl3501.h4
-rw-r--r--drivers/usb/net/Makefile2
-rw-r--r--drivers/usb/net/zd1201.c16
-rw-r--r--include/linux/etherdevice.h2
-rw-r--r--include/net/ieee80211.h50
-rw-r--r--include/net/ieee80211_crypt.h86
-rw-r--r--net/Kconfig2
-rw-r--r--net/Makefile1
-rw-r--r--net/ieee80211/Kconfig69
-rw-r--r--net/ieee80211/Makefile11
-rw-r--r--net/ieee80211/ieee80211_crypt.c259
-rw-r--r--net/ieee80211/ieee80211_crypt_ccmp.c470
-rw-r--r--net/ieee80211/ieee80211_crypt_tkip.c708
-rw-r--r--net/ieee80211/ieee80211_crypt_wep.c272
-rw-r--r--net/ieee80211/ieee80211_module.c273
-rw-r--r--net/ieee80211/ieee80211_rx.c1205
-rw-r--r--net/ieee80211/ieee80211_tx.c447
-rw-r--r--net/ieee80211/ieee80211_wx.c471
31 files changed, 24886 insertions, 1530 deletions
diff --git a/Documentation/networking/README.ipw2100 b/Documentation/networking/README.ipw2100
new file mode 100644
index 000000000000..2046948b020d
--- /dev/null
+++ b/Documentation/networking/README.ipw2100
@@ -0,0 +1,246 @@
1
2===========================
3Intel(R) PRO/Wireless 2100 Network Connection Driver for Linux
4README.ipw2100
5
6March 14, 2005
7
8===========================
9Index
10---------------------------
110. Introduction
121. Release 1.1.0 Current Features
132. Command Line Parameters
143. Sysfs Helper Files
154. Radio Kill Switch
165. Dynamic Firmware
176. Power Management
187. Support
198. License
20
21
22===========================
230. Introduction
24------------ ----- ----- ---- --- -- -
25
26This document provides a brief overview of the features supported by the
27IPW2100 driver project. The main project website, where the latest
28development version of the driver can be found, is:
29
30 http://ipw2100.sourceforge.net
31
32There you can find the not only the latest releases, but also information about
33potential fixes and patches, as well as links to the development mailing list
34for the driver project.
35
36
37===========================
381. Release 1.1.0 Current Supported Features
39---------------------------
40- Managed (BSS) and Ad-Hoc (IBSS)
41- WEP (shared key and open)
42- Wireless Tools support
43- 802.1x (tested with XSupplicant 1.0.1)
44
45Enabled (but not supported) features:
46- Monitor/RFMon mode
47- WPA/WPA2
48
49The distinction between officially supported and enabled is a reflection
50on the amount of validation and interoperability testing that has been
51performed on a given feature.
52
53
54===========================
552. Command Line Parameters
56---------------------------
57
58If the driver is built as a module, the following optional parameters are used
59by entering them on the command line with the modprobe command using this
60syntax:
61
62 modprobe ipw2100 [<option>=<VAL1><,VAL2>...]
63
64For example, to disable the radio on driver loading, enter:
65
66 modprobe ipw2100 disable=1
67
68The ipw2100 driver supports the following module parameters:
69
70Name Value Example:
71debug 0x0-0xffffffff debug=1024
72mode 0,1,2 mode=1 /* AdHoc */
73channel int channel=3 /* Only valid in AdHoc or Monitor */
74associate boolean associate=0 /* Do NOT auto associate */
75disable boolean disable=1 /* Do not power the HW */
76
77
78===========================
793. Sysfs Helper Files
80---------------------------
81
82There are several ways to control the behavior of the driver. Many of the
83general capabilities are exposed through the Wireless Tools (iwconfig). There
84are a few capabilities that are exposed through entries in the Linux Sysfs.
85
86
87----- Driver Level ------
88For the driver level files, look in /sys/bus/pci/drivers/ipw2100/
89
90 debug_level
91
92 This controls the same global as the 'debug' module parameter. For
93 information on the various debugging levels available, run the 'dvals'
94 script found in the driver source directory.
95
96 NOTE: 'debug_level' is only enabled if CONFIG_IPW2100_DEBUG is turn
97 on.
98
99----- Device Level ------
100For the device level files look in
101
102 /sys/bus/pci/drivers/ipw2100/{PCI-ID}/
103
104For example:
105 /sys/bus/pci/drivers/ipw2100/0000:02:01.0
106
107For the device level files, see /sys/bus/pci/drivers/ipw2100:
108
109 rf_kill
110 read -
111 0 = RF kill not enabled (radio on)
112 1 = SW based RF kill active (radio off)
113 2 = HW based RF kill active (radio off)
114 3 = Both HW and SW RF kill active (radio off)
115 write -
116 0 = If SW based RF kill active, turn the radio back on
117 1 = If radio is on, activate SW based RF kill
118
119 NOTE: If you enable the SW based RF kill and then toggle the HW
120 based RF kill from ON -> OFF -> ON, the radio will NOT come back on
121
122
123===========================
1244. Radio Kill Switch
125---------------------------
126Most laptops provide the ability for the user to physically disable the radio.
127Some vendors have implemented this as a physical switch that requires no
128software to turn the radio off and on. On other laptops, however, the switch
129is controlled through a button being pressed and a software driver then making
130calls to turn the radio off and on. This is referred to as a "software based
131RF kill switch"
132
133See the Sysfs helper file 'rf_kill' for determining the state of the RF switch
134on your system.
135
136
137===========================
1385. Dynamic Firmware
139---------------------------
140As the firmware is licensed under a restricted use license, it can not be
141included within the kernel sources. To enable the IPW2100 you will need a
142firmware image to load into the wireless NIC's processors.
143
144You can obtain these images from <http://ipw2100.sf.net/firmware.php>.
145
146See INSTALL for instructions on installing the firmware.
147
148
149===========================
1506. Power Management
151---------------------------
152The IPW2100 supports the configuration of the Power Save Protocol
153through a private wireless extension interface. The IPW2100 supports
154the following different modes:
155
156 off No power management. Radio is always on.
157 on Automatic power management
158 1-5 Different levels of power management. The higher the
159 number the greater the power savings, but with an impact to
160 packet latencies.
161
162Power management works by powering down the radio after a certain
163interval of time has passed where no packets are passed through the
164radio. Once powered down, the radio remains in that state for a given
165period of time. For higher power savings, the interval between last
166packet processed to sleep is shorter and the sleep period is longer.
167
168When the radio is asleep, the access point sending data to the station
169must buffer packets at the AP until the station wakes up and requests
170any buffered packets. If you have an AP that does not correctly support
171the PSP protocol you may experience packet loss or very poor performance
172while power management is enabled. If this is the case, you will need
173to try and find a firmware update for your AP, or disable power
174management (via `iwconfig eth1 power off`)
175
176To configure the power level on the IPW2100 you use a combination of
177iwconfig and iwpriv. iwconfig is used to turn power management on, off,
178and set it to auto.
179
180 iwconfig eth1 power off Disables radio power down
181 iwconfig eth1 power on Enables radio power management to
182 last set level (defaults to AUTO)
183 iwpriv eth1 set_power 0 Sets power level to AUTO and enables
184 power management if not previously
185 enabled.
186 iwpriv eth1 set_power 1-5 Set the power level as specified,
187 enabling power management if not
188 previously enabled.
189
190You can view the current power level setting via:
191
192 iwpriv eth1 get_power
193
194It will return the current period or timeout that is configured as a string
195in the form of xxxx/yyyy (z) where xxxx is the timeout interval (amount of
196time after packet processing), yyyy is the period to sleep (amount of time to
197wait before powering the radio and querying the access point for buffered
198packets), and z is the 'power level'. If power management is turned off the
199xxxx/yyyy will be replaced with 'off' -- the level reported will be the active
200level if `iwconfig eth1 power on` is invoked.
201
202
203===========================
2047. Support
205---------------------------
206
207For general development information and support,
208go to:
209
210 http://ipw2100.sf.net/
211
212The ipw2100 1.1.0 driver and firmware can be downloaded from:
213
214 http://support.intel.com
215
216For installation support on the ipw2100 1.1.0 driver on Linux kernels
2172.6.8 or greater, email support is available from:
218
219 http://supportmail.intel.com
220
221===========================
2228. License
223---------------------------
224
225 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
226
227 This program is free software; you can redistribute it and/or modify it
228 under the terms of the GNU General Public License (version 2) as
229 published by the Free Software Foundation.
230
231 This program is distributed in the hope that it will be useful, but WITHOUT
232 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
233 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
234 more details.
235
236 You should have received a copy of the GNU General Public License along with
237 this program; if not, write to the Free Software Foundation, Inc., 59
238 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
239
240 The full GNU General Public License is included in this distribution in the
241 file called LICENSE.
242
243 License Contact Information:
244 James P. Ketrenos <ipw2100-admin@linux.intel.com>
245 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
246
diff --git a/Documentation/networking/README.ipw2200 b/Documentation/networking/README.ipw2200
new file mode 100644
index 000000000000..6916080c5f03
--- /dev/null
+++ b/Documentation/networking/README.ipw2200
@@ -0,0 +1,300 @@
1
2Intel(R) PRO/Wireless 2915ABG Driver for Linux in support of:
3
4Intel(R) PRO/Wireless 2200BG Network Connection
5Intel(R) PRO/Wireless 2915ABG Network Connection
6
7Note: The Intel(R) PRO/Wireless 2915ABG Driver for Linux and Intel(R)
8PRO/Wireless 2200BG Driver for Linux is a unified driver that works on
9both hardware adapters listed above. In this document the Intel(R)
10PRO/Wireless 2915ABG Driver for Linux will be used to reference the
11unified driver.
12
13Copyright (C) 2004-2005, Intel Corporation
14
15README.ipw2200
16
17Version: 1.0.0
18Date : January 31, 2005
19
20
21Index
22-----------------------------------------------
231. Introduction
241.1. Overview of features
251.2. Module parameters
261.3. Wireless Extension Private Methods
271.4. Sysfs Helper Files
282. About the Version Numbers
293. Support
304. License
31
32
331. Introduction
34-----------------------------------------------
35The following sections attempt to provide a brief introduction to using
36the Intel(R) PRO/Wireless 2915ABG Driver for Linux.
37
38This document is not meant to be a comprehensive manual on
39understanding or using wireless technologies, but should be sufficient
40to get you moving without wires on Linux.
41
42For information on building and installing the driver, see the INSTALL
43file.
44
45
461.1. Overview of Features
47-----------------------------------------------
48The current release (1.0.0) supports the following features:
49
50+ BSS mode (Infrastructure, Managed)
51+ IBSS mode (Ad-Hoc)
52+ WEP (OPEN and SHARED KEY mode)
53+ 802.1x EAP via wpa_supplicant and xsupplicant
54+ Wireless Extension support
55+ Full B and G rate support (2200 and 2915)
56+ Full A rate support (2915 only)
57+ Transmit power control
58+ S state support (ACPI suspend/resume)
59+ long/short preamble support
60
61
62
631.2. Command Line Parameters
64-----------------------------------------------
65
66Like many modules used in the Linux kernel, the Intel(R) PRO/Wireless
672915ABG Driver for Linux allows certain configuration options to be
68provided as module parameters. The most common way to specify a module
69parameter is via the command line.
70
71The general form is:
72
73% modprobe ipw2200 parameter=value
74
75Where the supported parameter are:
76
77 associate
78 Set to 0 to disable the auto scan-and-associate functionality of the
79 driver. If disabled, the driver will not attempt to scan
80 for and associate to a network until it has been configured with
81 one or more properties for the target network, for example configuring
82 the network SSID. Default is 1 (auto-associate)
83
84 Example: % modprobe ipw2200 associate=0
85
86 auto_create
87 Set to 0 to disable the auto creation of an Ad-Hoc network
88 matching the channel and network name parameters provided.
89 Default is 1.
90
91 channel
92 channel number for association. The normal method for setting
93 the channel would be to use the standard wireless tools
94 (i.e. `iwconfig eth1 channel 10`), but it is useful sometimes
95 to set this while debugging. Channel 0 means 'ANY'
96
97 debug
98 If using a debug build, this is used to control the amount of debug
99 info is logged. See the 'dval' and 'load' script for more info on
100 how to use this (the dval and load scripts are provided as part
101 of the ipw2200 development snapshot releases available from the
102 SourceForge project at http://ipw2200.sf.net)
103
104 mode
105 Can be used to set the default mode of the adapter.
106 0 = Managed, 1 = Ad-Hoc
107
108
1091.3. Wireless Extension Private Methods
110-----------------------------------------------
111
112As an interface designed to handle generic hardware, there are certain
113capabilities not exposed through the normal Wireless Tool interface. As
114such, a provision is provided for a driver to declare custom, or
115private, methods. The Intel(R) PRO/Wireless 2915ABG Driver for Linux
116defines several of these to configure various settings.
117
118The general form of using the private wireless methods is:
119
120 % iwpriv $IFNAME method parameters
121
122Where $IFNAME is the interface name the device is registered with
123(typically eth1, customized via one of the various network interface
124name managers, such as ifrename)
125
126The supported private methods are:
127
128 get_mode
129 Can be used to report out which IEEE mode the driver is
130 configured to support. Example:
131
132 % iwpriv eth1 get_mode
133 eth1 get_mode:802.11bg (6)
134
135 set_mode
136 Can be used to configure which IEEE mode the driver will
137 support.
138
139 Usage:
140 % iwpriv eth1 set_mode {mode}
141 Where {mode} is a number in the range 1-7:
142 1 802.11a (2915 only)
143 2 802.11b
144 3 802.11ab (2915 only)
145 4 802.11g
146 5 802.11ag (2915 only)
147 6 802.11bg
148 7 802.11abg (2915 only)
149
150 get_preamble
151 Can be used to report configuration of preamble length.
152
153 set_preamble
154 Can be used to set the configuration of preamble length:
155
156 Usage:
157 % iwpriv eth1 set_preamble {mode}
158 Where {mode} is one of:
159 1 Long preamble only
160 0 Auto (long or short based on connection)
161
162
1631.4. Sysfs Helper Files:
164-----------------------------------------------
165
166The Linux kernel provides a pseudo file system that can be used to
167access various components of the operating system. The Intel(R)
168PRO/Wireless 2915ABG Driver for Linux exposes several configuration
169parameters through this mechanism.
170
171An entry in the sysfs can support reading and/or writing. You can
172typically query the contents of a sysfs entry through the use of cat,
173and can set the contents via echo. For example:
174
175% cat /sys/bus/pci/drivers/ipw2200/debug_level
176
177Will report the current debug level of the driver's logging subsystem
178(only available if CONFIG_IPW_DEBUG was configured when the driver was
179built).
180
181You can set the debug level via:
182
183% echo $VALUE > /sys/bus/pci/drivers/ipw2200/debug_level
184
185Where $VALUE would be a number in the case of this sysfs entry. The
186input to sysfs files does not have to be a number. For example, the
187firmware loader used by hotplug utilizes sysfs entries for transferring
188the firmware image from user space into the driver.
189
190The Intel(R) PRO/Wireless 2915ABG Driver for Linux exposes sysfs entries
191at two levels -- driver level, which apply to all instances of the
192driver (in the event that there are more than one device installed) and
193device level, which applies only to the single specific instance.
194
195
1961.4.1 Driver Level Sysfs Helper Files
197-----------------------------------------------
198
199For the driver level files, look in /sys/bus/pci/drivers/ipw2200/
200
201 debug_level
202
203 This controls the same global as the 'debug' module parameter
204
205
2061.4.2 Device Level Sysfs Helper Files
207-----------------------------------------------
208
209For the device level files, look in
210
211 /sys/bus/pci/drivers/ipw2200/{PCI-ID}/
212
213For example:
214 /sys/bus/pci/drivers/ipw2200/0000:02:01.0
215
216For the device level files, see /sys/bus/pci/[drivers/ipw2200:
217
218 rf_kill
219 read -
220 0 = RF kill not enabled (radio on)
221 1 = SW based RF kill active (radio off)
222 2 = HW based RF kill active (radio off)
223 3 = Both HW and SW RF kill active (radio off)
224 write -
225 0 = If SW based RF kill active, turn the radio back on
226 1 = If radio is on, activate SW based RF kill
227
228 NOTE: If you enable the SW based RF kill and then toggle the HW
229 based RF kill from ON -> OFF -> ON, the radio will NOT come back on
230
231 ucode
232 read-only access to the ucode version number
233
234
2352. About the Version Numbers
236-----------------------------------------------
237
238Due to the nature of open source development projects, there are
239frequently changes being incorporated that have not gone through
240a complete validation process. These changes are incorporated into
241development snapshot releases.
242
243Releases are numbered with a three level scheme:
244
245 major.minor.development
246
247Any version where the 'development' portion is 0 (for example
2481.0.0, 1.1.0, etc.) indicates a stable version that will be made
249available for kernel inclusion.
250
251Any version where the 'development' portion is not a 0 (for
252example 1.0.1, 1.1.5, etc.) indicates a development version that is
253being made available for testing and cutting edge users. The stability
254and functionality of the development releases are not know. We make
255efforts to try and keep all snapshots reasonably stable, but due to the
256frequency of their release, and the desire to get those releases
257available as quickly as possible, unknown anomalies should be expected.
258
259The major version number will be incremented when significant changes
260are made to the driver. Currently, there are no major changes planned.
261
262
2633. Support
264-----------------------------------------------
265
266For installation support of the 1.0.0 version, you can contact
267http://supportmail.intel.com, or you can use the open source project
268support.
269
270For general information and support, go to:
271
272 http://ipw2200.sf.net/
273
274
2754. License
276-----------------------------------------------
277
278 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
279
280 This program is free software; you can redistribute it and/or modify it
281 under the terms of the GNU General Public License version 2 as
282 published by the Free Software Foundation.
283
284 This program is distributed in the hope that it will be useful, but WITHOUT
285 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
286 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
287 more details.
288
289 You should have received a copy of the GNU General Public License along with
290 this program; if not, write to the Free Software Foundation, Inc., 59
291 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
292
293 The full GNU General Public License is included in this distribution in the
294 file called LICENSE.
295
296 Contact Information:
297 James P. Ketrenos <ipw2100-admin@linux.intel.com>
298 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
299
300
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 30e8d589d167..3dbb1cb09ed8 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -7,7 +7,7 @@
7 * of the original driver such as link fail-over and link management because 7 * of the original driver such as link fail-over and link management because
8 * those should be done at higher levels. 8 * those should be done at higher levels.
9 * 9 *
10 * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org> 10 * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
@@ -42,19 +42,20 @@
42#include "skge.h" 42#include "skge.h"
43 43
44#define DRV_NAME "skge" 44#define DRV_NAME "skge"
45#define DRV_VERSION "0.6" 45#define DRV_VERSION "0.7"
46#define PFX DRV_NAME " " 46#define PFX DRV_NAME " "
47 47
48#define DEFAULT_TX_RING_SIZE 128 48#define DEFAULT_TX_RING_SIZE 128
49#define DEFAULT_RX_RING_SIZE 512 49#define DEFAULT_RX_RING_SIZE 512
50#define MAX_TX_RING_SIZE 1024 50#define MAX_TX_RING_SIZE 1024
51#define MAX_RX_RING_SIZE 4096 51#define MAX_RX_RING_SIZE 4096
52#define RX_COPY_THRESHOLD 128
53#define RX_BUF_SIZE 1536
52#define PHY_RETRIES 1000 54#define PHY_RETRIES 1000
53#define ETH_JUMBO_MTU 9000 55#define ETH_JUMBO_MTU 9000
54#define TX_WATCHDOG (5 * HZ) 56#define TX_WATCHDOG (5 * HZ)
55#define NAPI_WEIGHT 64 57#define NAPI_WEIGHT 64
56#define BLINK_HZ (HZ/4) 58#define BLINK_HZ (HZ/4)
57#define LINK_POLL_HZ (HZ/10)
58 59
59MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); 60MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver");
60MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>"); 61MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
@@ -70,28 +71,17 @@ module_param(debug, int, 0);
70MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 71MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
71 72
72static const struct pci_device_id skge_id_table[] = { 73static const struct pci_device_id skge_id_table[] = {
73 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940, 74 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) },
74 PCI_ANY_ID, PCI_ANY_ID }, 75 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
75 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B, 76 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
76 PCI_ANY_ID, PCI_ANY_ID }, 77 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) },
77 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE, 78 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
78 PCI_ANY_ID, PCI_ANY_ID }, 79 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), },
79 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU, 80 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
80 PCI_ANY_ID, PCI_ANY_ID }, 81 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
81 { PCI_VENDOR_ID_SYSKONNECT, 0x9E00, /* SK-9Exx */ 82 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
82 PCI_ANY_ID, PCI_ANY_ID }, 83 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1032) },
83 { PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T, 84 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) },
84 PCI_ANY_ID, PCI_ANY_ID },
85 { PCI_VENDOR_ID_MARVELL, 0x4320, /* Gigabit Ethernet Controller */
86 PCI_ANY_ID, PCI_ANY_ID },
87 { PCI_VENDOR_ID_MARVELL, 0x5005, /* Marvell (11ab), Belkin */
88 PCI_ANY_ID, PCI_ANY_ID },
89 { PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD,
90 PCI_ANY_ID, PCI_ANY_ID },
91 { PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1032,
92 PCI_ANY_ID, PCI_ANY_ID },
93 { PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064,
94 PCI_ANY_ID, PCI_ANY_ID },
95 { 0 } 85 { 0 }
96}; 86};
97MODULE_DEVICE_TABLE(pci, skge_id_table); 87MODULE_DEVICE_TABLE(pci, skge_id_table);
@@ -99,19 +89,22 @@ MODULE_DEVICE_TABLE(pci, skge_id_table);
99static int skge_up(struct net_device *dev); 89static int skge_up(struct net_device *dev);
100static int skge_down(struct net_device *dev); 90static int skge_down(struct net_device *dev);
101static void skge_tx_clean(struct skge_port *skge); 91static void skge_tx_clean(struct skge_port *skge);
102static void skge_xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 92static void xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
103static void skge_gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 93static void gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
104static void genesis_get_stats(struct skge_port *skge, u64 *data); 94static void genesis_get_stats(struct skge_port *skge, u64 *data);
105static void yukon_get_stats(struct skge_port *skge, u64 *data); 95static void yukon_get_stats(struct skge_port *skge, u64 *data);
106static void yukon_init(struct skge_hw *hw, int port); 96static void yukon_init(struct skge_hw *hw, int port);
107static void yukon_reset(struct skge_hw *hw, int port); 97static void yukon_reset(struct skge_hw *hw, int port);
108static void genesis_mac_init(struct skge_hw *hw, int port); 98static void genesis_mac_init(struct skge_hw *hw, int port);
109static void genesis_reset(struct skge_hw *hw, int port); 99static void genesis_reset(struct skge_hw *hw, int port);
100static void genesis_link_up(struct skge_port *skge);
110 101
102/* Avoid conditionals by using array */
111static const int txqaddr[] = { Q_XA1, Q_XA2 }; 103static const int txqaddr[] = { Q_XA1, Q_XA2 };
112static const int rxqaddr[] = { Q_R1, Q_R2 }; 104static const int rxqaddr[] = { Q_R1, Q_R2 };
113static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; 105static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
114static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 106static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
107static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
115 108
116/* Don't need to look at whole 16K. 109/* Don't need to look at whole 16K.
117 * last interesting register is descriptor poll timer. 110 * last interesting register is descriptor poll timer.
@@ -154,7 +147,7 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
154static int wol_supported(const struct skge_hw *hw) 147static int wol_supported(const struct skge_hw *hw)
155{ 148{
156 return !((hw->chip_id == CHIP_ID_GENESIS || 149 return !((hw->chip_id == CHIP_ID_GENESIS ||
157 (hw->chip_id == CHIP_ID_YUKON && chip_rev(hw) == 0))); 150 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)));
158} 151}
159 152
160static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 153static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -170,7 +163,7 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
170 struct skge_port *skge = netdev_priv(dev); 163 struct skge_port *skge = netdev_priv(dev);
171 struct skge_hw *hw = skge->hw; 164 struct skge_hw *hw = skge->hw;
172 165
173 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0) 166 if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
174 return -EOPNOTSUPP; 167 return -EOPNOTSUPP;
175 168
176 if (wol->wolopts == WAKE_MAGIC && !wol_supported(hw)) 169 if (wol->wolopts == WAKE_MAGIC && !wol_supported(hw))
@@ -190,6 +183,36 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
190 return 0; 183 return 0;
191} 184}
192 185
186/* Determine supported/adverised modes based on hardware.
187 * Note: ethtoool ADVERTISED_xxx == SUPPORTED_xxx
188 */
189static u32 skge_supported_modes(const struct skge_hw *hw)
190{
191 u32 supported;
192
193 if (iscopper(hw)) {
194 supported = SUPPORTED_10baseT_Half
195 | SUPPORTED_10baseT_Full
196 | SUPPORTED_100baseT_Half
197 | SUPPORTED_100baseT_Full
198 | SUPPORTED_1000baseT_Half
199 | SUPPORTED_1000baseT_Full
200 | SUPPORTED_Autoneg| SUPPORTED_TP;
201
202 if (hw->chip_id == CHIP_ID_GENESIS)
203 supported &= ~(SUPPORTED_10baseT_Half
204 | SUPPORTED_10baseT_Full
205 | SUPPORTED_100baseT_Half
206 | SUPPORTED_100baseT_Full);
207
208 else if (hw->chip_id == CHIP_ID_YUKON)
209 supported &= ~SUPPORTED_1000baseT_Half;
210 } else
211 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
212 | SUPPORTED_Autoneg;
213
214 return supported;
215}
193 216
194static int skge_get_settings(struct net_device *dev, 217static int skge_get_settings(struct net_device *dev,
195 struct ethtool_cmd *ecmd) 218 struct ethtool_cmd *ecmd)
@@ -198,38 +221,13 @@ static int skge_get_settings(struct net_device *dev,
198 struct skge_hw *hw = skge->hw; 221 struct skge_hw *hw = skge->hw;
199 222
200 ecmd->transceiver = XCVR_INTERNAL; 223 ecmd->transceiver = XCVR_INTERNAL;
224 ecmd->supported = skge_supported_modes(hw);
201 225
202 if (iscopper(hw)) { 226 if (iscopper(hw)) {
203 if (hw->chip_id == CHIP_ID_GENESIS)
204 ecmd->supported = SUPPORTED_1000baseT_Full
205 | SUPPORTED_1000baseT_Half
206 | SUPPORTED_Autoneg | SUPPORTED_TP;
207 else {
208 ecmd->supported = SUPPORTED_10baseT_Half
209 | SUPPORTED_10baseT_Full
210 | SUPPORTED_100baseT_Half
211 | SUPPORTED_100baseT_Full
212 | SUPPORTED_1000baseT_Half
213 | SUPPORTED_1000baseT_Full
214 | SUPPORTED_Autoneg| SUPPORTED_TP;
215
216 if (hw->chip_id == CHIP_ID_YUKON)
217 ecmd->supported &= ~SUPPORTED_1000baseT_Half;
218
219 else if (hw->chip_id == CHIP_ID_YUKON_FE)
220 ecmd->supported &= ~(SUPPORTED_1000baseT_Half
221 | SUPPORTED_1000baseT_Full);
222 }
223
224 ecmd->port = PORT_TP; 227 ecmd->port = PORT_TP;
225 ecmd->phy_address = hw->phy_addr; 228 ecmd->phy_address = hw->phy_addr;
226 } else { 229 } else
227 ecmd->supported = SUPPORTED_1000baseT_Full
228 | SUPPORTED_FIBRE
229 | SUPPORTED_Autoneg;
230
231 ecmd->port = PORT_FIBRE; 230 ecmd->port = PORT_FIBRE;
232 }
233 231
234 ecmd->advertising = skge->advertising; 232 ecmd->advertising = skge->advertising;
235 ecmd->autoneg = skge->autoneg; 233 ecmd->autoneg = skge->autoneg;
@@ -238,65 +236,57 @@ static int skge_get_settings(struct net_device *dev,
238 return 0; 236 return 0;
239} 237}
240 238
241static u32 skge_modes(const struct skge_hw *hw)
242{
243 u32 modes = ADVERTISED_Autoneg
244 | ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half
245 | ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half
246 | ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half;
247
248 if (iscopper(hw)) {
249 modes |= ADVERTISED_TP;
250 switch(hw->chip_id) {
251 case CHIP_ID_GENESIS:
252 modes &= ~(ADVERTISED_100baseT_Full
253 | ADVERTISED_100baseT_Half
254 | ADVERTISED_10baseT_Full
255 | ADVERTISED_10baseT_Half);
256 break;
257
258 case CHIP_ID_YUKON:
259 modes &= ~ADVERTISED_1000baseT_Half;
260 break;
261
262 case CHIP_ID_YUKON_FE:
263 modes &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
264 break;
265 }
266 } else {
267 modes |= ADVERTISED_FIBRE;
268 modes &= ~ADVERTISED_1000baseT_Half;
269 }
270 return modes;
271}
272
273static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 239static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
274{ 240{
275 struct skge_port *skge = netdev_priv(dev); 241 struct skge_port *skge = netdev_priv(dev);
276 const struct skge_hw *hw = skge->hw; 242 const struct skge_hw *hw = skge->hw;
243 u32 supported = skge_supported_modes(hw);
277 244
278 if (ecmd->autoneg == AUTONEG_ENABLE) { 245 if (ecmd->autoneg == AUTONEG_ENABLE) {
279 if (ecmd->advertising & skge_modes(hw)) 246 ecmd->advertising = supported;
280 return -EINVAL; 247 skge->duplex = -1;
248 skge->speed = -1;
281 } else { 249 } else {
250 u32 setting;
251
282 switch(ecmd->speed) { 252 switch(ecmd->speed) {
283 case SPEED_1000: 253 case SPEED_1000:
284 if (hw->chip_id == CHIP_ID_YUKON_FE) 254 if (ecmd->duplex == DUPLEX_FULL)
255 setting = SUPPORTED_1000baseT_Full;
256 else if (ecmd->duplex == DUPLEX_HALF)
257 setting = SUPPORTED_1000baseT_Half;
258 else
285 return -EINVAL; 259 return -EINVAL;
286 break; 260 break;
287 case SPEED_100: 261 case SPEED_100:
262 if (ecmd->duplex == DUPLEX_FULL)
263 setting = SUPPORTED_100baseT_Full;
264 else if (ecmd->duplex == DUPLEX_HALF)
265 setting = SUPPORTED_100baseT_Half;
266 else
267 return -EINVAL;
268 break;
269
288 case SPEED_10: 270 case SPEED_10:
289 if (iscopper(hw) || hw->chip_id == CHIP_ID_GENESIS) 271 if (ecmd->duplex == DUPLEX_FULL)
272 setting = SUPPORTED_10baseT_Full;
273 else if (ecmd->duplex == DUPLEX_HALF)
274 setting = SUPPORTED_10baseT_Half;
275 else
290 return -EINVAL; 276 return -EINVAL;
291 break; 277 break;
292 default: 278 default:
293 return -EINVAL; 279 return -EINVAL;
294 } 280 }
281
282 if ((setting & supported) == 0)
283 return -EINVAL;
284
285 skge->speed = ecmd->speed;
286 skge->duplex = ecmd->duplex;
295 } 287 }
296 288
297 skge->autoneg = ecmd->autoneg; 289 skge->autoneg = ecmd->autoneg;
298 skge->speed = ecmd->speed;
299 skge->duplex = ecmd->duplex;
300 skge->advertising = ecmd->advertising; 290 skge->advertising = ecmd->advertising;
301 291
302 if (netif_running(dev)) { 292 if (netif_running(dev)) {
@@ -393,7 +383,7 @@ static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
393{ 383{
394 int i; 384 int i;
395 385
396 switch(stringset) { 386 switch (stringset) {
397 case ETH_SS_STATS: 387 case ETH_SS_STATS:
398 for (i = 0; i < ARRAY_SIZE(skge_stats); i++) 388 for (i = 0; i < ARRAY_SIZE(skge_stats); i++)
399 memcpy(data + i * ETH_GSTRING_LEN, 389 memcpy(data + i * ETH_GSTRING_LEN,
@@ -511,14 +501,6 @@ static int skge_set_rx_csum(struct net_device *dev, u32 data)
511 return 0; 501 return 0;
512} 502}
513 503
514/* Only Yukon II supports TSO (not implemented yet) */
515static int skge_set_tso(struct net_device *dev, u32 data)
516{
517 if (data)
518 return -EOPNOTSUPP;
519 return 0;
520}
521
522static void skge_get_pauseparam(struct net_device *dev, 504static void skge_get_pauseparam(struct net_device *dev,
523 struct ethtool_pauseparam *ecmd) 505 struct ethtool_pauseparam *ecmd)
524{ 506{
@@ -540,9 +522,9 @@ static int skge_set_pauseparam(struct net_device *dev,
540 skge->autoneg = ecmd->autoneg; 522 skge->autoneg = ecmd->autoneg;
541 if (ecmd->rx_pause && ecmd->tx_pause) 523 if (ecmd->rx_pause && ecmd->tx_pause)
542 skge->flow_control = FLOW_MODE_SYMMETRIC; 524 skge->flow_control = FLOW_MODE_SYMMETRIC;
543 else if(ecmd->rx_pause && !ecmd->tx_pause) 525 else if (ecmd->rx_pause && !ecmd->tx_pause)
544 skge->flow_control = FLOW_MODE_REM_SEND; 526 skge->flow_control = FLOW_MODE_REM_SEND;
545 else if(!ecmd->rx_pause && ecmd->tx_pause) 527 else if (!ecmd->rx_pause && ecmd->tx_pause)
546 skge->flow_control = FLOW_MODE_LOC_SEND; 528 skge->flow_control = FLOW_MODE_LOC_SEND;
547 else 529 else
548 skge->flow_control = FLOW_MODE_NONE; 530 skge->flow_control = FLOW_MODE_NONE;
@@ -559,8 +541,6 @@ static inline u32 hwkhz(const struct skge_hw *hw)
559{ 541{
560 if (hw->chip_id == CHIP_ID_GENESIS) 542 if (hw->chip_id == CHIP_ID_GENESIS)
561 return 53215; /* or: 53.125 MHz */ 543 return 53215; /* or: 53.125 MHz */
562 else if (hw->chip_id == CHIP_ID_YUKON_EC)
563 return 125000; /* or: 125.000 MHz */
564 else 544 else
565 return 78215; /* or: 78.125 MHz */ 545 return 78215; /* or: 78.125 MHz */
566} 546}
@@ -643,30 +623,18 @@ static int skge_set_coalesce(struct net_device *dev,
643static void skge_led_on(struct skge_hw *hw, int port) 623static void skge_led_on(struct skge_hw *hw, int port)
644{ 624{
645 if (hw->chip_id == CHIP_ID_GENESIS) { 625 if (hw->chip_id == CHIP_ID_GENESIS) {
646 skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_ON); 626 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON);
647 skge_write8(hw, B0_LED, LED_STAT_ON); 627 skge_write8(hw, B0_LED, LED_STAT_ON);
648 628
649 skge_write8(hw, SKGEMAC_REG(port, RX_LED_TST), LED_T_ON); 629 skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON);
650 skge_write32(hw, SKGEMAC_REG(port, RX_LED_VAL), 100); 630 skge_write32(hw, SK_REG(port, RX_LED_VAL), 100);
651 skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_START); 631 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
652 632
653 switch (hw->phy_type) { 633 /* For Broadcom Phy only */
654 case SK_PHY_BCOM: 634 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON);
655 skge_xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL,
656 PHY_B_PEC_LED_ON);
657 break;
658 case SK_PHY_LONE:
659 skge_xm_phy_write(hw, port, PHY_LONE_LED_CFG,
660 0x0800);
661 break;
662 default:
663 skge_write8(hw, SKGEMAC_REG(port, TX_LED_TST), LED_T_ON);
664 skge_write32(hw, SKGEMAC_REG(port, TX_LED_VAL), 100);
665 skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_START);
666 }
667 } else { 635 } else {
668 skge_gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); 636 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
669 skge_gm_phy_write(hw, port, PHY_MARV_LED_OVER, 637 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
670 PHY_M_LED_MO_DUP(MO_LED_ON) | 638 PHY_M_LED_MO_DUP(MO_LED_ON) |
671 PHY_M_LED_MO_10(MO_LED_ON) | 639 PHY_M_LED_MO_10(MO_LED_ON) |
672 PHY_M_LED_MO_100(MO_LED_ON) | 640 PHY_M_LED_MO_100(MO_LED_ON) |
@@ -678,28 +646,17 @@ static void skge_led_on(struct skge_hw *hw, int port)
678static void skge_led_off(struct skge_hw *hw, int port) 646static void skge_led_off(struct skge_hw *hw, int port)
679{ 647{
680 if (hw->chip_id == CHIP_ID_GENESIS) { 648 if (hw->chip_id == CHIP_ID_GENESIS) {
681 skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_OFF); 649 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
682 skge_write8(hw, B0_LED, LED_STAT_OFF); 650 skge_write8(hw, B0_LED, LED_STAT_OFF);
683 651
684 skge_write32(hw, SKGEMAC_REG(port, RX_LED_VAL), 0); 652 skge_write32(hw, SK_REG(port, RX_LED_VAL), 0);
685 skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_T_OFF); 653 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF);
686 654
687 switch (hw->phy_type) { 655 /* Broadcom only */
688 case SK_PHY_BCOM: 656 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF);
689 skge_xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL,
690 PHY_B_PEC_LED_OFF);
691 break;
692 case SK_PHY_LONE:
693 skge_xm_phy_write(hw, port, PHY_LONE_LED_CFG,
694 PHY_L_LC_LEDT);
695 break;
696 default:
697 skge_write32(hw, SKGEMAC_REG(port, TX_LED_VAL), 0);
698 skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_T_OFF);
699 }
700 } else { 657 } else {
701 skge_gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); 658 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
702 skge_gm_phy_write(hw, port, PHY_MARV_LED_OVER, 659 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
703 PHY_M_LED_MO_DUP(MO_LED_OFF) | 660 PHY_M_LED_MO_DUP(MO_LED_OFF) |
704 PHY_M_LED_MO_10(MO_LED_OFF) | 661 PHY_M_LED_MO_10(MO_LED_OFF) |
705 PHY_M_LED_MO_100(MO_LED_OFF) | 662 PHY_M_LED_MO_100(MO_LED_OFF) |
@@ -730,7 +687,7 @@ static int skge_phys_id(struct net_device *dev, u32 data)
730{ 687{
731 struct skge_port *skge = netdev_priv(dev); 688 struct skge_port *skge = netdev_priv(dev);
732 689
733 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) 690 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
734 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); 691 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
735 692
736 /* start blinking */ 693 /* start blinking */
@@ -763,8 +720,6 @@ static struct ethtool_ops skge_ethtool_ops = {
763 .set_pauseparam = skge_set_pauseparam, 720 .set_pauseparam = skge_set_pauseparam,
764 .get_coalesce = skge_get_coalesce, 721 .get_coalesce = skge_get_coalesce,
765 .set_coalesce = skge_set_coalesce, 722 .set_coalesce = skge_set_coalesce,
766 .get_tso = ethtool_op_get_tso,
767 .set_tso = skge_set_tso,
768 .get_sg = ethtool_op_get_sg, 723 .get_sg = ethtool_op_get_sg,
769 .set_sg = skge_set_sg, 724 .set_sg = skge_set_sg,
770 .get_tx_csum = ethtool_op_get_tx_csum, 725 .get_tx_csum = ethtool_op_get_tx_csum,
@@ -793,6 +748,7 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
793 748
794 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { 749 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
795 e->desc = d; 750 e->desc = d;
751 e->skb = NULL;
796 if (i == ring->count - 1) { 752 if (i == ring->count - 1) {
797 e->next = ring->start; 753 e->next = ring->start;
798 d->next_offset = base; 754 d->next_offset = base;
@@ -806,24 +762,23 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
806 return 0; 762 return 0;
807} 763}
808 764
809/* Setup buffer for receiving */ 765static struct sk_buff *skge_rx_alloc(struct net_device *dev, unsigned int size)
810static inline int skge_rx_alloc(struct skge_port *skge,
811 struct skge_element *e)
812{ 766{
813 unsigned long bufsize = skge->netdev->mtu + ETH_HLEN; /* VLAN? */ 767 struct sk_buff *skb = dev_alloc_skb(size);
814 struct skge_rx_desc *rd = e->desc;
815 struct sk_buff *skb;
816 u64 map;
817 768
818 skb = dev_alloc_skb(bufsize + NET_IP_ALIGN); 769 if (likely(skb)) {
819 if (unlikely(!skb)) { 770 skb->dev = dev;
820 printk(KERN_DEBUG PFX "%s: out of memory for receive\n", 771 skb_reserve(skb, NET_IP_ALIGN);
821 skge->netdev->name);
822 return -ENOMEM;
823 } 772 }
773 return skb;
774}
824 775
825 skb->dev = skge->netdev; 776/* Allocate and setup a new buffer for receiving */
826 skb_reserve(skb, NET_IP_ALIGN); 777static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
778 struct sk_buff *skb, unsigned int bufsize)
779{
780 struct skge_rx_desc *rd = e->desc;
781 u64 map;
827 782
828 map = pci_map_single(skge->hw->pdev, skb->data, bufsize, 783 map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
829 PCI_DMA_FROMDEVICE); 784 PCI_DMA_FROMDEVICE);
@@ -841,55 +796,69 @@ static inline int skge_rx_alloc(struct skge_port *skge,
841 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; 796 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
842 pci_unmap_addr_set(e, mapaddr, map); 797 pci_unmap_addr_set(e, mapaddr, map);
843 pci_unmap_len_set(e, maplen, bufsize); 798 pci_unmap_len_set(e, maplen, bufsize);
844 return 0;
845} 799}
846 800
847/* Free all unused buffers in receive ring, assumes receiver stopped */ 801/* Resume receiving using existing skb,
802 * Note: DMA address is not changed by chip.
803 * MTU not changed while receiver active.
804 */
805static void skge_rx_reuse(struct skge_element *e, unsigned int size)
806{
807 struct skge_rx_desc *rd = e->desc;
808
809 rd->csum2 = 0;
810 rd->csum2_start = ETH_HLEN;
811
812 wmb();
813
814 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size;
815}
816
817
818/* Free all buffers in receive ring, assumes receiver stopped */
848static void skge_rx_clean(struct skge_port *skge) 819static void skge_rx_clean(struct skge_port *skge)
849{ 820{
850 struct skge_hw *hw = skge->hw; 821 struct skge_hw *hw = skge->hw;
851 struct skge_ring *ring = &skge->rx_ring; 822 struct skge_ring *ring = &skge->rx_ring;
852 struct skge_element *e; 823 struct skge_element *e;
853 824
854 for (e = ring->to_clean; e != ring->to_use; e = e->next) { 825 e = ring->start;
826 do {
855 struct skge_rx_desc *rd = e->desc; 827 struct skge_rx_desc *rd = e->desc;
856 rd->control = 0; 828 rd->control = 0;
857 829 if (e->skb) {
858 pci_unmap_single(hw->pdev, 830 pci_unmap_single(hw->pdev,
859 pci_unmap_addr(e, mapaddr), 831 pci_unmap_addr(e, mapaddr),
860 pci_unmap_len(e, maplen), 832 pci_unmap_len(e, maplen),
861 PCI_DMA_FROMDEVICE); 833 PCI_DMA_FROMDEVICE);
862 dev_kfree_skb(e->skb); 834 dev_kfree_skb(e->skb);
863 e->skb = NULL; 835 e->skb = NULL;
864 } 836 }
865 ring->to_clean = e; 837 } while ((e = e->next) != ring->start);
866} 838}
867 839
840
868/* Allocate buffers for receive ring 841/* Allocate buffers for receive ring
869 * For receive: to_use is refill location 842 * For receive: to_clean is next received frame.
870 * to_clean is next received frame.
871 *
872 * if (to_use == to_clean)
873 * then ring all frames in ring need buffers
874 * if (to_use->next == to_clean)
875 * then ring all frames in ring have buffers
876 */ 843 */
877static int skge_rx_fill(struct skge_port *skge) 844static int skge_rx_fill(struct skge_port *skge)
878{ 845{
879 struct skge_ring *ring = &skge->rx_ring; 846 struct skge_ring *ring = &skge->rx_ring;
880 struct skge_element *e; 847 struct skge_element *e;
881 int ret = 0; 848 unsigned int bufsize = skge->rx_buf_size;
882 849
883 for (e = ring->to_use; e->next != ring->to_clean; e = e->next) { 850 e = ring->start;
884 if (skge_rx_alloc(skge, e)) { 851 do {
885 ret = 1; 852 struct sk_buff *skb = skge_rx_alloc(skge->netdev, bufsize);
886 break;
887 }
888 853
889 } 854 if (!skb)
890 ring->to_use = e; 855 return -ENOMEM;
856
857 skge_rx_setup(skge, e, skb, bufsize);
858 } while ( (e = e->next) != ring->start);
891 859
892 return ret; 860 ring->to_clean = ring->start;
861 return 0;
893} 862}
894 863
895static void skge_link_up(struct skge_port *skge) 864static void skge_link_up(struct skge_port *skge)
@@ -919,50 +888,50 @@ static void skge_link_down(struct skge_port *skge)
919 printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name); 888 printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name);
920} 889}
921 890
922static u16 skge_xm_phy_read(struct skge_hw *hw, int port, u16 reg) 891static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
923{ 892{
924 int i; 893 int i;
925 u16 v; 894 u16 v;
926 895
927 skge_xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); 896 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
928 v = skge_xm_read16(hw, port, XM_PHY_DATA); 897 v = xm_read16(hw, port, XM_PHY_DATA);
929 if (hw->phy_type != SK_PHY_XMAC) {
930 for (i = 0; i < PHY_RETRIES; i++) {
931 udelay(1);
932 if (skge_xm_read16(hw, port, XM_MMU_CMD)
933 & XM_MMU_PHY_RDY)
934 goto ready;
935 }
936 898
937 printk(KERN_WARNING PFX "%s: phy read timed out\n", 899 /* Need to wait for external PHY */
938 hw->dev[port]->name); 900 for (i = 0; i < PHY_RETRIES; i++) {
939 return 0; 901 udelay(1);
940 ready: 902 if (xm_read16(hw, port, XM_MMU_CMD)
941 v = skge_xm_read16(hw, port, XM_PHY_DATA); 903 & XM_MMU_PHY_RDY)
904 goto ready;
942 } 905 }
943 906
907 printk(KERN_WARNING PFX "%s: phy read timed out\n",
908 hw->dev[port]->name);
909 return 0;
910 ready:
911 v = xm_read16(hw, port, XM_PHY_DATA);
912
944 return v; 913 return v;
945} 914}
946 915
947static void skge_xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) 916static void xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
948{ 917{
949 int i; 918 int i;
950 919
951 skge_xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); 920 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
952 for (i = 0; i < PHY_RETRIES; i++) { 921 for (i = 0; i < PHY_RETRIES; i++) {
953 if (!(skge_xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) 922 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
954 goto ready; 923 goto ready;
955 cpu_relax(); 924 udelay(1);
956 } 925 }
957 printk(KERN_WARNING PFX "%s: phy write failed to come ready\n", 926 printk(KERN_WARNING PFX "%s: phy write failed to come ready\n",
958 hw->dev[port]->name); 927 hw->dev[port]->name);
959 928
960 929
961 ready: 930 ready:
962 skge_xm_write16(hw, port, XM_PHY_DATA, val); 931 xm_write16(hw, port, XM_PHY_DATA, val);
963 for (i = 0; i < PHY_RETRIES; i++) { 932 for (i = 0; i < PHY_RETRIES; i++) {
964 udelay(1); 933 udelay(1);
965 if (!(skge_xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) 934 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
966 return; 935 return;
967 } 936 }
968 printk(KERN_WARNING PFX "%s: phy write timed out\n", 937 printk(KERN_WARNING PFX "%s: phy write timed out\n",
@@ -999,34 +968,112 @@ static void genesis_init(struct skge_hw *hw)
999 968
1000static void genesis_reset(struct skge_hw *hw, int port) 969static void genesis_reset(struct skge_hw *hw, int port)
1001{ 970{
1002 int i; 971 const u8 zero[8] = { 0 };
1003 u64 zero = 0;
1004 972
1005 /* reset the statistics module */ 973 /* reset the statistics module */
1006 skge_xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); 974 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
1007 skge_xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */ 975 xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */
1008 skge_xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ 976 xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */
1009 skge_xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ 977 xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */
1010 skge_xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ 978 xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */
1011 979
1012 /* disable all PHY IRQs */ 980 /* disable Broadcom PHY IRQ */
1013 if (hw->phy_type == SK_PHY_BCOM) 981 xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
1014 skge_xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
1015 982
1016 skge_xm_outhash(hw, port, XM_HSM, (u8 *) &zero); 983 xm_outhash(hw, port, XM_HSM, zero);
1017 for (i = 0; i < 15; i++)
1018 skge_xm_outaddr(hw, port, XM_EXM(i), (u8 *) &zero);
1019 skge_xm_outhash(hw, port, XM_SRC_CHK, (u8 *) &zero);
1020} 984}
1021 985
1022 986
1023static void genesis_mac_init(struct skge_hw *hw, int port) 987/* Convert mode to MII values */
988static const u16 phy_pause_map[] = {
989 [FLOW_MODE_NONE] = 0,
990 [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM,
991 [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
992 [FLOW_MODE_REM_SEND] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
993};
994
995
996/* Check status of Broadcom phy link */
997static void bcom_check_link(struct skge_hw *hw, int port)
1024{ 998{
1025 struct skge_port *skge = netdev_priv(hw->dev[port]); 999 struct net_device *dev = hw->dev[port];
1000 struct skge_port *skge = netdev_priv(dev);
1001 u16 status;
1002
1003 /* read twice because of latch */
1004 (void) xm_phy_read(hw, port, PHY_BCOM_STAT);
1005 status = xm_phy_read(hw, port, PHY_BCOM_STAT);
1006
1007 pr_debug("bcom_check_link status=0x%x\n", status);
1008
1009 if ((status & PHY_ST_LSYNC) == 0) {
1010 u16 cmd = xm_read16(hw, port, XM_MMU_CMD);
1011 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1012 xm_write16(hw, port, XM_MMU_CMD, cmd);
1013 /* dummy read to ensure writing */
1014 (void) xm_read16(hw, port, XM_MMU_CMD);
1015
1016 if (netif_carrier_ok(dev))
1017 skge_link_down(skge);
1018 } else {
1019 if (skge->autoneg == AUTONEG_ENABLE &&
1020 (status & PHY_ST_AN_OVER)) {
1021 u16 lpa = xm_phy_read(hw, port, PHY_BCOM_AUNE_LP);
1022 u16 aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
1023
1024 if (lpa & PHY_B_AN_RF) {
1025 printk(KERN_NOTICE PFX "%s: remote fault\n",
1026 dev->name);
1027 return;
1028 }
1029
1030 /* Check Duplex mismatch */
1031 switch(aux & PHY_B_AS_AN_RES_MSK) {
1032 case PHY_B_RES_1000FD:
1033 skge->duplex = DUPLEX_FULL;
1034 break;
1035 case PHY_B_RES_1000HD:
1036 skge->duplex = DUPLEX_HALF;
1037 break;
1038 default:
1039 printk(KERN_NOTICE PFX "%s: duplex mismatch\n",
1040 dev->name);
1041 return;
1042 }
1043
1044
1045 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1046 switch (aux & PHY_B_AS_PAUSE_MSK) {
1047 case PHY_B_AS_PAUSE_MSK:
1048 skge->flow_control = FLOW_MODE_SYMMETRIC;
1049 break;
1050 case PHY_B_AS_PRR:
1051 skge->flow_control = FLOW_MODE_REM_SEND;
1052 break;
1053 case PHY_B_AS_PRT:
1054 skge->flow_control = FLOW_MODE_LOC_SEND;
1055 break;
1056 default:
1057 skge->flow_control = FLOW_MODE_NONE;
1058 }
1059
1060 skge->speed = SPEED_1000;
1061 }
1062
1063 if (!netif_carrier_ok(dev))
1064 genesis_link_up(skge);
1065 }
1066}
1067
1068/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
1069 * Phy on for 100 or 10Mbit operation
1070 */
1071static void bcom_phy_init(struct skge_port *skge, int jumbo)
1072{
1073 struct skge_hw *hw = skge->hw;
1074 int port = skge->port;
1026 int i; 1075 int i;
1027 u32 r; 1076 u16 id1, r, ext, ctl;
1028 u16 id1;
1029 u16 ctrl1, ctrl2, ctrl3, ctrl4, ctrl5;
1030 1077
1031 /* magic workaround patterns for Broadcom */ 1078 /* magic workaround patterns for Broadcom */
1032 static const struct { 1079 static const struct {
@@ -1042,16 +1089,120 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1042 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, 1089 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
1043 }; 1090 };
1044 1091
1092 pr_debug("bcom_phy_init\n");
1093
1094 /* read Id from external PHY (all have the same address) */
1095 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
1096
1097 /* Optimize MDIO transfer by suppressing preamble. */
1098 r = xm_read16(hw, port, XM_MMU_CMD);
1099 r |= XM_MMU_NO_PRE;
1100 xm_write16(hw, port, XM_MMU_CMD,r);
1101
1102 switch(id1) {
1103 case PHY_BCOM_ID1_C0:
1104 /*
1105 * Workaround BCOM Errata for the C0 type.
1106 * Write magic patterns to reserved registers.
1107 */
1108 for (i = 0; i < ARRAY_SIZE(C0hack); i++)
1109 xm_phy_write(hw, port,
1110 C0hack[i].reg, C0hack[i].val);
1111
1112 break;
1113 case PHY_BCOM_ID1_A1:
1114 /*
1115 * Workaround BCOM Errata for the A1 type.
1116 * Write magic patterns to reserved registers.
1117 */
1118 for (i = 0; i < ARRAY_SIZE(A1hack); i++)
1119 xm_phy_write(hw, port,
1120 A1hack[i].reg, A1hack[i].val);
1121 break;
1122 }
1123
1124 /*
1125 * Workaround BCOM Errata (#10523) for all BCom PHYs.
1126 * Disable Power Management after reset.
1127 */
1128 r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
1129 r |= PHY_B_AC_DIS_PM;
1130 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r);
1131
1132 /* Dummy read */
1133 xm_read16(hw, port, XM_ISRC);
1134
1135 ext = PHY_B_PEC_EN_LTR; /* enable tx led */
1136 ctl = PHY_CT_SP1000; /* always 1000mbit */
1137
1138 if (skge->autoneg == AUTONEG_ENABLE) {
1139 /*
1140 * Workaround BCOM Errata #1 for the C5 type.
1141 * 1000Base-T Link Acquisition Failure in Slave Mode
1142 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
1143 */
1144 u16 adv = PHY_B_1000C_RD;
1145 if (skge->advertising & ADVERTISED_1000baseT_Half)
1146 adv |= PHY_B_1000C_AHD;
1147 if (skge->advertising & ADVERTISED_1000baseT_Full)
1148 adv |= PHY_B_1000C_AFD;
1149 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv);
1150
1151 ctl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1152 } else {
1153 if (skge->duplex == DUPLEX_FULL)
1154 ctl |= PHY_CT_DUP_MD;
1155 /* Force to slave */
1156 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE);
1157 }
1158
1159 /* Set autonegotiation pause parameters */
1160 xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV,
1161 phy_pause_map[skge->flow_control] | PHY_AN_CSMA);
1162
1163 /* Handle Jumbo frames */
1164 if (jumbo) {
1165 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1166 PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK);
1167
1168 ext |= PHY_B_PEC_HIGH_LA;
1169
1170 }
1171
1172 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext);
1173 xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl);
1174
1175 /* Use link status change interrrupt */
1176 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1177
1178 bcom_check_link(hw, port);
1179}
1180
1181static void genesis_mac_init(struct skge_hw *hw, int port)
1182{
1183 struct net_device *dev = hw->dev[port];
1184 struct skge_port *skge = netdev_priv(dev);
1185 int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN;
1186 int i;
1187 u32 r;
1188 const u8 zero[6] = { 0 };
1189
1190 /* Clear MIB counters */
1191 xm_write16(hw, port, XM_STAT_CMD,
1192 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1193 /* Clear two times according to Errata #3 */
1194 xm_write16(hw, port, XM_STAT_CMD,
1195 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1045 1196
1046 /* initialize Rx, Tx and Link LED */ 1197 /* initialize Rx, Tx and Link LED */
1047 skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_ON); 1198 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON);
1048 skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON); 1199 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON);
1049 1200
1050 skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_START); 1201 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
1051 skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_START); 1202 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
1052 1203
1053 /* Unreset the XMAC. */ 1204 /* Unreset the XMAC. */
1054 skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); 1205 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
1055 1206
1056 /* 1207 /*
1057 * Perform additional initialization for external PHYs, 1208 * Perform additional initialization for external PHYs,
@@ -1059,67 +1210,56 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1059 * GMII mode. 1210 * GMII mode.
1060 */ 1211 */
1061 spin_lock_bh(&hw->phy_lock); 1212 spin_lock_bh(&hw->phy_lock);
1062 if (hw->phy_type != SK_PHY_XMAC) { 1213 /* Take external Phy out of reset */
1063 /* Take PHY out of reset. */ 1214 r = skge_read32(hw, B2_GP_IO);
1064 r = skge_read32(hw, B2_GP_IO); 1215 if (port == 0)
1065 if (port == 0) 1216 r |= GP_DIR_0|GP_IO_0;
1066 r |= GP_DIR_0|GP_IO_0; 1217 else
1067 else 1218 r |= GP_DIR_2|GP_IO_2;
1068 r |= GP_DIR_2|GP_IO_2;
1069
1070 skge_write32(hw, B2_GP_IO, r);
1071 skge_read32(hw, B2_GP_IO);
1072
1073 /* Enable GMII mode on the XMAC. */
1074 skge_xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
1075
1076 id1 = skge_xm_phy_read(hw, port, PHY_XMAC_ID1);
1077
1078 /* Optimize MDIO transfer by suppressing preamble. */
1079 skge_xm_write16(hw, port, XM_MMU_CMD,
1080 skge_xm_read16(hw, port, XM_MMU_CMD)
1081 | XM_MMU_NO_PRE);
1082
1083 if (id1 == PHY_BCOM_ID1_C0) {
1084 /*
1085 * Workaround BCOM Errata for the C0 type.
1086 * Write magic patterns to reserved registers.
1087 */
1088 for (i = 0; i < ARRAY_SIZE(C0hack); i++)
1089 skge_xm_phy_write(hw, port,
1090 C0hack[i].reg, C0hack[i].val);
1091
1092 } else if (id1 == PHY_BCOM_ID1_A1) {
1093 /*
1094 * Workaround BCOM Errata for the A1 type.
1095 * Write magic patterns to reserved registers.
1096 */
1097 for (i = 0; i < ARRAY_SIZE(A1hack); i++)
1098 skge_xm_phy_write(hw, port,
1099 A1hack[i].reg, A1hack[i].val);
1100 }
1101 1219
1102 /* 1220 skge_write32(hw, B2_GP_IO, r);
1103 * Workaround BCOM Errata (#10523) for all BCom PHYs. 1221 skge_read32(hw, B2_GP_IO);
1104 * Disable Power Management after reset. 1222 spin_unlock_bh(&hw->phy_lock);
1105 */
1106 r = skge_xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
1107 skge_xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r | PHY_B_AC_DIS_PM);
1108 }
1109 1223
1110 /* Dummy read */ 1224 /* Enable GMII interfac */
1111 skge_xm_read16(hw, port, XM_ISRC); 1225 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
1226
1227 bcom_phy_init(skge, jumbo);
1228
1229 /* Set Station Address */
1230 xm_outaddr(hw, port, XM_SA, dev->dev_addr);
1231
1232 /* We don't use match addresses so clear */
1233 for (i = 1; i < 16; i++)
1234 xm_outaddr(hw, port, XM_EXM(i), zero);
1112 1235
1113 r = skge_xm_read32(hw, port, XM_MODE); 1236 /* configure Rx High Water Mark (XM_RX_HI_WM) */
1114 skge_xm_write32(hw, port, XM_MODE, r|XM_MD_CSA); 1237 xm_write16(hw, port, XM_RX_HI_WM, 1450);
1115 1238
1116 /* We don't need the FCS appended to the packet. */ 1239 /* We don't need the FCS appended to the packet. */
1117 r = skge_xm_read16(hw, port, XM_RX_CMD); 1240 r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS;
1118 skge_xm_write16(hw, port, XM_RX_CMD, r | XM_RX_STRIP_FCS); 1241 if (jumbo)
1242 r |= XM_RX_BIG_PK_OK;
1243
1244 if (skge->duplex == DUPLEX_HALF) {
1245 /*
1246 * If in manual half duplex mode the other side might be in
1247 * full duplex mode, so ignore if a carrier extension is not seen
1248 * on frames received
1249 */
1250 r |= XM_RX_DIS_CEXT;
1251 }
1252 xm_write16(hw, port, XM_RX_CMD, r);
1253
1119 1254
1120 /* We want short frames padded to 60 bytes. */ 1255 /* We want short frames padded to 60 bytes. */
1121 r = skge_xm_read16(hw, port, XM_TX_CMD); 1256 xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD);
1122 skge_xm_write16(hw, port, XM_TX_CMD, r | XM_TX_AUTO_PAD); 1257
1258 /*
1259 * Bump up the transmit threshold. This helps hold off transmit
1260 * underruns when we're blasting traffic from both ports at once.
1261 */
1262 xm_write16(hw, port, XM_TX_THR, 512);
1123 1263
1124 /* 1264 /*
1125 * Enable the reception of all error frames. This is is 1265 * Enable the reception of all error frames. This is is
@@ -1135,19 +1275,22 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1135 * case the XMAC will start transfering frames out of the 1275 * case the XMAC will start transfering frames out of the
1136 * RX FIFO as soon as the FIFO threshold is reached. 1276 * RX FIFO as soon as the FIFO threshold is reached.
1137 */ 1277 */
1138 r = skge_xm_read32(hw, port, XM_MODE); 1278 xm_write32(hw, port, XM_MODE, XM_DEF_MODE);
1139 skge_xm_write32(hw, port, XM_MODE,
1140 XM_MD_RX_CRCE|XM_MD_RX_LONG|XM_MD_RX_RUNT|
1141 XM_MD_RX_ERR|XM_MD_RX_IRLE);
1142 1279
1143 skge_xm_outaddr(hw, port, XM_SA, hw->dev[port]->dev_addr);
1144 skge_xm_outaddr(hw, port, XM_EXM(0), hw->dev[port]->dev_addr);
1145 1280
1146 /* 1281 /*
1147 * Bump up the transmit threshold. This helps hold off transmit 1282 * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
1148 * underruns when we're blasting traffic from both ports at once. 1283 * - Enable all bits excepting 'Octets Rx OK Low CntOv'
1284 * and 'Octets Rx OK Hi Cnt Ov'.
1149 */ 1285 */
1150 skge_xm_write16(hw, port, XM_TX_THR, 512); 1286 xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK);
1287
1288 /*
1289 * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
1290 * - Enable all bits excepting 'Octets Tx OK Low CntOv'
1291 * and 'Octets Tx OK Hi Cnt Ov'.
1292 */
1293 xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK);
1151 1294
1152 /* Configure MAC arbiter */ 1295 /* Configure MAC arbiter */
1153 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); 1296 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
@@ -1164,137 +1307,30 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1164 skge_write8(hw, B3_MA_RCINI_TX2, 0); 1307 skge_write8(hw, B3_MA_RCINI_TX2, 0);
1165 1308
1166 /* Configure Rx MAC FIFO */ 1309 /* Configure Rx MAC FIFO */
1167 skge_write8(hw, SKGEMAC_REG(port, RX_MFF_CTRL2), MFF_RST_CLR); 1310 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR);
1168 skge_write16(hw, SKGEMAC_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT); 1311 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT);
1169 skge_write8(hw, SKGEMAC_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD); 1312 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD);
1170 1313
1171 /* Configure Tx MAC FIFO */ 1314 /* Configure Tx MAC FIFO */
1172 skge_write8(hw, SKGEMAC_REG(port, TX_MFF_CTRL2), MFF_RST_CLR); 1315 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR);
1173 skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); 1316 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF);
1174 skge_write8(hw, SKGEMAC_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD); 1317 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD);
1175 1318
1176 if (hw->dev[port]->mtu > ETH_DATA_LEN) { 1319 if (jumbo) {
1177 /* Enable frame flushing if jumbo frames used */ 1320 /* Enable frame flushing if jumbo frames used */
1178 skge_write16(hw, SKGEMAC_REG(port,RX_MFF_CTRL1), MFF_ENA_FLUSH); 1321 skge_write16(hw, SK_REG(port,RX_MFF_CTRL1), MFF_ENA_FLUSH);
1179 } else { 1322 } else {
1180 /* enable timeout timers if normal frames */ 1323 /* enable timeout timers if normal frames */
1181 skge_write16(hw, B3_PA_CTRL, 1324 skge_write16(hw, B3_PA_CTRL,
1182 port == 0 ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2); 1325 (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2);
1183 } 1326 }
1184
1185
1186 r = skge_xm_read16(hw, port, XM_RX_CMD);
1187 if (hw->dev[port]->mtu > ETH_DATA_LEN)
1188 skge_xm_write16(hw, port, XM_RX_CMD, r | XM_RX_BIG_PK_OK);
1189 else
1190 skge_xm_write16(hw, port, XM_RX_CMD, r & ~(XM_RX_BIG_PK_OK));
1191
1192 switch (hw->phy_type) {
1193 case SK_PHY_XMAC:
1194 if (skge->autoneg == AUTONEG_ENABLE) {
1195 ctrl1 = PHY_X_AN_FD | PHY_X_AN_HD;
1196
1197 switch (skge->flow_control) {
1198 case FLOW_MODE_NONE:
1199 ctrl1 |= PHY_X_P_NO_PAUSE;
1200 break;
1201 case FLOW_MODE_LOC_SEND:
1202 ctrl1 |= PHY_X_P_ASYM_MD;
1203 break;
1204 case FLOW_MODE_SYMMETRIC:
1205 ctrl1 |= PHY_X_P_SYM_MD;
1206 break;
1207 case FLOW_MODE_REM_SEND:
1208 ctrl1 |= PHY_X_P_BOTH_MD;
1209 break;
1210 }
1211
1212 skge_xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl1);
1213 ctrl2 = PHY_CT_ANE | PHY_CT_RE_CFG;
1214 } else {
1215 ctrl2 = 0;
1216 if (skge->duplex == DUPLEX_FULL)
1217 ctrl2 |= PHY_CT_DUP_MD;
1218 }
1219
1220 skge_xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl2);
1221 break;
1222
1223 case SK_PHY_BCOM:
1224 ctrl1 = PHY_CT_SP1000;
1225 ctrl2 = 0;
1226 ctrl3 = PHY_SEL_TYPE;
1227 ctrl4 = PHY_B_PEC_EN_LTR;
1228 ctrl5 = PHY_B_AC_TX_TST;
1229
1230 if (skge->autoneg == AUTONEG_ENABLE) {
1231 /*
1232 * Workaround BCOM Errata #1 for the C5 type.
1233 * 1000Base-T Link Acquisition Failure in Slave Mode
1234 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
1235 */
1236 ctrl2 |= PHY_B_1000C_RD;
1237 if (skge->advertising & ADVERTISED_1000baseT_Half)
1238 ctrl2 |= PHY_B_1000C_AHD;
1239 if (skge->advertising & ADVERTISED_1000baseT_Full)
1240 ctrl2 |= PHY_B_1000C_AFD;
1241
1242 /* Set Flow-control capabilities */
1243 switch (skge->flow_control) {
1244 case FLOW_MODE_NONE:
1245 ctrl3 |= PHY_B_P_NO_PAUSE;
1246 break;
1247 case FLOW_MODE_LOC_SEND:
1248 ctrl3 |= PHY_B_P_ASYM_MD;
1249 break;
1250 case FLOW_MODE_SYMMETRIC:
1251 ctrl3 |= PHY_B_P_SYM_MD;
1252 break;
1253 case FLOW_MODE_REM_SEND:
1254 ctrl3 |= PHY_B_P_BOTH_MD;
1255 break;
1256 }
1257
1258 /* Restart Auto-negotiation */
1259 ctrl1 |= PHY_CT_ANE | PHY_CT_RE_CFG;
1260 } else {
1261 if (skge->duplex == DUPLEX_FULL)
1262 ctrl1 |= PHY_CT_DUP_MD;
1263
1264 ctrl2 |= PHY_B_1000C_MSE; /* set it to Slave */
1265 }
1266
1267 skge_xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, ctrl2);
1268 skge_xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, ctrl3);
1269
1270 if (skge->netdev->mtu > ETH_DATA_LEN) {
1271 ctrl4 |= PHY_B_PEC_HIGH_LA;
1272 ctrl5 |= PHY_B_AC_LONG_PACK;
1273
1274 skge_xm_phy_write(hw, port,PHY_BCOM_AUX_CTRL, ctrl5);
1275 }
1276
1277 skge_xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ctrl4);
1278 skge_xm_phy_write(hw, port, PHY_BCOM_CTRL, ctrl1);
1279 break;
1280 }
1281 spin_unlock_bh(&hw->phy_lock);
1282
1283 /* Clear MIB counters */
1284 skge_xm_write16(hw, port, XM_STAT_CMD,
1285 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1286 /* Clear two times according to Errata #3 */
1287 skge_xm_write16(hw, port, XM_STAT_CMD,
1288 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1289
1290 /* Start polling for link status */
1291 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1292} 1327}
1293 1328
1294static void genesis_stop(struct skge_port *skge) 1329static void genesis_stop(struct skge_port *skge)
1295{ 1330{
1296 struct skge_hw *hw = skge->hw; 1331 struct skge_hw *hw = skge->hw;
1297 int port = skge->port; 1332 int port = skge->port;
1333 u32 reg;
1298 1334
1299 /* Clear Tx packet arbiter timeout IRQ */ 1335 /* Clear Tx packet arbiter timeout IRQ */
1300 skge_write16(hw, B3_PA_CTRL, 1336 skge_write16(hw, B3_PA_CTRL,
@@ -1304,33 +1340,30 @@ static void genesis_stop(struct skge_port *skge)
1304 * If the transfer stucks at the MAC the STOP command will not 1340 * If the transfer stucks at the MAC the STOP command will not
1305 * terminate if we don't flush the XMAC's transmit FIFO ! 1341 * terminate if we don't flush the XMAC's transmit FIFO !
1306 */ 1342 */
1307 skge_xm_write32(hw, port, XM_MODE, 1343 xm_write32(hw, port, XM_MODE,
1308 skge_xm_read32(hw, port, XM_MODE)|XM_MD_FTF); 1344 xm_read32(hw, port, XM_MODE)|XM_MD_FTF);
1309 1345
1310 1346
1311 /* Reset the MAC */ 1347 /* Reset the MAC */
1312 skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); 1348 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST);
1313 1349
1314 /* For external PHYs there must be special handling */ 1350 /* For external PHYs there must be special handling */
1315 if (hw->phy_type != SK_PHY_XMAC) { 1351 reg = skge_read32(hw, B2_GP_IO);
1316 u32 reg = skge_read32(hw, B2_GP_IO); 1352 if (port == 0) {
1317 1353 reg |= GP_DIR_0;
1318 if (port == 0) { 1354 reg &= ~GP_IO_0;
1319 reg |= GP_DIR_0; 1355 } else {
1320 reg &= ~GP_IO_0; 1356 reg |= GP_DIR_2;
1321 } else { 1357 reg &= ~GP_IO_2;
1322 reg |= GP_DIR_2;
1323 reg &= ~GP_IO_2;
1324 }
1325 skge_write32(hw, B2_GP_IO, reg);
1326 skge_read32(hw, B2_GP_IO);
1327 } 1358 }
1359 skge_write32(hw, B2_GP_IO, reg);
1360 skge_read32(hw, B2_GP_IO);
1328 1361
1329 skge_xm_write16(hw, port, XM_MMU_CMD, 1362 xm_write16(hw, port, XM_MMU_CMD,
1330 skge_xm_read16(hw, port, XM_MMU_CMD) 1363 xm_read16(hw, port, XM_MMU_CMD)
1331 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); 1364 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1332 1365
1333 skge_xm_read16(hw, port, XM_MMU_CMD); 1366 xm_read16(hw, port, XM_MMU_CMD);
1334} 1367}
1335 1368
1336 1369
@@ -1341,11 +1374,11 @@ static void genesis_get_stats(struct skge_port *skge, u64 *data)
1341 int i; 1374 int i;
1342 unsigned long timeout = jiffies + HZ; 1375 unsigned long timeout = jiffies + HZ;
1343 1376
1344 skge_xm_write16(hw, port, 1377 xm_write16(hw, port,
1345 XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC); 1378 XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC);
1346 1379
1347 /* wait for update to complete */ 1380 /* wait for update to complete */
1348 while (skge_xm_read16(hw, port, XM_STAT_CMD) 1381 while (xm_read16(hw, port, XM_STAT_CMD)
1349 & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) { 1382 & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) {
1350 if (time_after(jiffies, timeout)) 1383 if (time_after(jiffies, timeout))
1351 break; 1384 break;
@@ -1353,68 +1386,60 @@ static void genesis_get_stats(struct skge_port *skge, u64 *data)
1353 } 1386 }
1354 1387
1355 /* special case for 64 bit octet counter */ 1388 /* special case for 64 bit octet counter */
1356 data[0] = (u64) skge_xm_read32(hw, port, XM_TXO_OK_HI) << 32 1389 data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32
1357 | skge_xm_read32(hw, port, XM_TXO_OK_LO); 1390 | xm_read32(hw, port, XM_TXO_OK_LO);
1358 data[1] = (u64) skge_xm_read32(hw, port, XM_RXO_OK_HI) << 32 1391 data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32
1359 | skge_xm_read32(hw, port, XM_RXO_OK_LO); 1392 | xm_read32(hw, port, XM_RXO_OK_LO);
1360 1393
1361 for (i = 2; i < ARRAY_SIZE(skge_stats); i++) 1394 for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
1362 data[i] = skge_xm_read32(hw, port, skge_stats[i].xmac_offset); 1395 data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset);
1363} 1396}
1364 1397
1365static void genesis_mac_intr(struct skge_hw *hw, int port) 1398static void genesis_mac_intr(struct skge_hw *hw, int port)
1366{ 1399{
1367 struct skge_port *skge = netdev_priv(hw->dev[port]); 1400 struct skge_port *skge = netdev_priv(hw->dev[port]);
1368 u16 status = skge_xm_read16(hw, port, XM_ISRC); 1401 u16 status = xm_read16(hw, port, XM_ISRC);
1369 1402
1370 pr_debug("genesis_intr status %x\n", status); 1403 if (netif_msg_intr(skge))
1371 if (hw->phy_type == SK_PHY_XMAC) { 1404 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
1372 /* LInk down, start polling for state change */ 1405 skge->netdev->name, status);
1373 if (status & XM_IS_INP_ASS) {
1374 skge_xm_write16(hw, port, XM_IMSK,
1375 skge_xm_read16(hw, port, XM_IMSK) | XM_IS_INP_ASS);
1376 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1377 }
1378 else if (status & XM_IS_AND)
1379 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1380 }
1381 1406
1382 if (status & XM_IS_TXF_UR) { 1407 if (status & XM_IS_TXF_UR) {
1383 skge_xm_write32(hw, port, XM_MODE, XM_MD_FTF); 1408 xm_write32(hw, port, XM_MODE, XM_MD_FTF);
1384 ++skge->net_stats.tx_fifo_errors; 1409 ++skge->net_stats.tx_fifo_errors;
1385 } 1410 }
1386 if (status & XM_IS_RXF_OV) { 1411 if (status & XM_IS_RXF_OV) {
1387 skge_xm_write32(hw, port, XM_MODE, XM_MD_FRF); 1412 xm_write32(hw, port, XM_MODE, XM_MD_FRF);
1388 ++skge->net_stats.rx_fifo_errors; 1413 ++skge->net_stats.rx_fifo_errors;
1389 } 1414 }
1390} 1415}
1391 1416
1392static void skge_gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) 1417static void gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
1393{ 1418{
1394 int i; 1419 int i;
1395 1420
1396 skge_gma_write16(hw, port, GM_SMI_DATA, val); 1421 gma_write16(hw, port, GM_SMI_DATA, val);
1397 skge_gma_write16(hw, port, GM_SMI_CTRL, 1422 gma_write16(hw, port, GM_SMI_CTRL,
1398 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); 1423 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
1399 for (i = 0; i < PHY_RETRIES; i++) { 1424 for (i = 0; i < PHY_RETRIES; i++) {
1400 udelay(1); 1425 udelay(1);
1401 1426
1402 if (!(skge_gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) 1427 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
1403 break; 1428 break;
1404 } 1429 }
1405} 1430}
1406 1431
1407static u16 skge_gm_phy_read(struct skge_hw *hw, int port, u16 reg) 1432static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
1408{ 1433{
1409 int i; 1434 int i;
1410 1435
1411 skge_gma_write16(hw, port, GM_SMI_CTRL, 1436 gma_write16(hw, port, GM_SMI_CTRL,
1412 GM_SMI_CT_PHY_AD(hw->phy_addr) 1437 GM_SMI_CT_PHY_AD(hw->phy_addr)
1413 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 1438 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
1414 1439
1415 for (i = 0; i < PHY_RETRIES; i++) { 1440 for (i = 0; i < PHY_RETRIES; i++) {
1416 udelay(1); 1441 udelay(1);
1417 if (skge_gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) 1442 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
1418 goto ready; 1443 goto ready;
1419 } 1444 }
1420 1445
@@ -1422,24 +1447,7 @@ static u16 skge_gm_phy_read(struct skge_hw *hw, int port, u16 reg)
1422 hw->dev[port]->name); 1447 hw->dev[port]->name);
1423 return 0; 1448 return 0;
1424 ready: 1449 ready:
1425 return skge_gma_read16(hw, port, GM_SMI_DATA); 1450 return gma_read16(hw, port, GM_SMI_DATA);
1426}
1427
1428static void genesis_link_down(struct skge_port *skge)
1429{
1430 struct skge_hw *hw = skge->hw;
1431 int port = skge->port;
1432
1433 pr_debug("genesis_link_down\n");
1434
1435 skge_xm_write16(hw, port, XM_MMU_CMD,
1436 skge_xm_read16(hw, port, XM_MMU_CMD)
1437 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1438
1439 /* dummy read to ensure writing */
1440 (void) skge_xm_read16(hw, port, XM_MMU_CMD);
1441
1442 skge_link_down(skge);
1443} 1451}
1444 1452
1445static void genesis_link_up(struct skge_port *skge) 1453static void genesis_link_up(struct skge_port *skge)
@@ -1450,7 +1458,7 @@ static void genesis_link_up(struct skge_port *skge)
1450 u32 mode, msk; 1458 u32 mode, msk;
1451 1459
1452 pr_debug("genesis_link_up\n"); 1460 pr_debug("genesis_link_up\n");
1453 cmd = skge_xm_read16(hw, port, XM_MMU_CMD); 1461 cmd = xm_read16(hw, port, XM_MMU_CMD);
1454 1462
1455 /* 1463 /*
1456 * enabling pause frame reception is required for 1000BT 1464 * enabling pause frame reception is required for 1000BT
@@ -1458,14 +1466,15 @@ static void genesis_link_up(struct skge_port *skge)
1458 */ 1466 */
1459 if (skge->flow_control == FLOW_MODE_NONE || 1467 if (skge->flow_control == FLOW_MODE_NONE ||
1460 skge->flow_control == FLOW_MODE_LOC_SEND) 1468 skge->flow_control == FLOW_MODE_LOC_SEND)
1469 /* Disable Pause Frame Reception */
1461 cmd |= XM_MMU_IGN_PF; 1470 cmd |= XM_MMU_IGN_PF;
1462 else 1471 else
1463 /* Enable Pause Frame Reception */ 1472 /* Enable Pause Frame Reception */
1464 cmd &= ~XM_MMU_IGN_PF; 1473 cmd &= ~XM_MMU_IGN_PF;
1465 1474
1466 skge_xm_write16(hw, port, XM_MMU_CMD, cmd); 1475 xm_write16(hw, port, XM_MMU_CMD, cmd);
1467 1476
1468 mode = skge_xm_read32(hw, port, XM_MODE); 1477 mode = xm_read32(hw, port, XM_MODE);
1469 if (skge->flow_control == FLOW_MODE_SYMMETRIC || 1478 if (skge->flow_control == FLOW_MODE_SYMMETRIC ||
1470 skge->flow_control == FLOW_MODE_LOC_SEND) { 1479 skge->flow_control == FLOW_MODE_LOC_SEND) {
1471 /* 1480 /*
@@ -1479,10 +1488,10 @@ static void genesis_link_up(struct skge_port *skge)
1479 /* XM_PAUSE_DA = '010000C28001' (default) */ 1488 /* XM_PAUSE_DA = '010000C28001' (default) */
1480 /* XM_MAC_PTIME = 0xffff (maximum) */ 1489 /* XM_MAC_PTIME = 0xffff (maximum) */
1481 /* remember this value is defined in big endian (!) */ 1490 /* remember this value is defined in big endian (!) */
1482 skge_xm_write16(hw, port, XM_MAC_PTIME, 0xffff); 1491 xm_write16(hw, port, XM_MAC_PTIME, 0xffff);
1483 1492
1484 mode |= XM_PAUSE_MODE; 1493 mode |= XM_PAUSE_MODE;
1485 skge_write16(hw, SKGEMAC_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE); 1494 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE);
1486 } else { 1495 } else {
1487 /* 1496 /*
1488 * disable pause frame generation is required for 1000BT 1497 * disable pause frame generation is required for 1000BT
@@ -1491,125 +1500,68 @@ static void genesis_link_up(struct skge_port *skge)
1491 /* Disable Pause Mode in Mode Register */ 1500 /* Disable Pause Mode in Mode Register */
1492 mode &= ~XM_PAUSE_MODE; 1501 mode &= ~XM_PAUSE_MODE;
1493 1502
1494 skge_write16(hw, SKGEMAC_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE); 1503 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE);
1495 } 1504 }
1496 1505
1497 skge_xm_write32(hw, port, XM_MODE, mode); 1506 xm_write32(hw, port, XM_MODE, mode);
1498 1507
1499 msk = XM_DEF_MSK; 1508 msk = XM_DEF_MSK;
1500 if (hw->phy_type != SK_PHY_XMAC) 1509 /* disable GP0 interrupt bit for external Phy */
1501 msk |= XM_IS_INP_ASS; /* disable GP0 interrupt bit */ 1510 msk |= XM_IS_INP_ASS;
1502 1511
1503 skge_xm_write16(hw, port, XM_IMSK, msk); 1512 xm_write16(hw, port, XM_IMSK, msk);
1504 skge_xm_read16(hw, port, XM_ISRC); 1513 xm_read16(hw, port, XM_ISRC);
1505 1514
1506 /* get MMU Command Reg. */ 1515 /* get MMU Command Reg. */
1507 cmd = skge_xm_read16(hw, port, XM_MMU_CMD); 1516 cmd = xm_read16(hw, port, XM_MMU_CMD);
1508 if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL) 1517 if (skge->duplex == DUPLEX_FULL)
1509 cmd |= XM_MMU_GMII_FD; 1518 cmd |= XM_MMU_GMII_FD;
1510 1519
1511 if (hw->phy_type == SK_PHY_BCOM) { 1520 /*
1512 /* 1521 * Workaround BCOM Errata (#10523) for all BCom Phys
1513 * Workaround BCOM Errata (#10523) for all BCom Phys 1522 * Enable Power Management after link up
1514 * Enable Power Management after link up 1523 */
1515 */ 1524 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1516 skge_xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, 1525 xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL)
1517 skge_xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL) 1526 & ~PHY_B_AC_DIS_PM);
1518 & ~PHY_B_AC_DIS_PM); 1527 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1519 skge_xm_phy_write(hw, port, PHY_BCOM_INT_MASK,
1520 PHY_B_DEF_MSK);
1521 }
1522 1528
1523 /* enable Rx/Tx */ 1529 /* enable Rx/Tx */
1524 skge_xm_write16(hw, port, XM_MMU_CMD, 1530 xm_write16(hw, port, XM_MMU_CMD,
1525 cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX); 1531 cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1526 skge_link_up(skge); 1532 skge_link_up(skge);
1527} 1533}
1528 1534
1529 1535
1530static void genesis_bcom_intr(struct skge_port *skge) 1536static inline void bcom_phy_intr(struct skge_port *skge)
1531{ 1537{
1532 struct skge_hw *hw = skge->hw; 1538 struct skge_hw *hw = skge->hw;
1533 int port = skge->port; 1539 int port = skge->port;
1534 u16 stat = skge_xm_phy_read(hw, port, PHY_BCOM_INT_STAT); 1540 u16 isrc;
1541
1542 isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
1543 if (netif_msg_intr(skge))
1544 printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x\n",
1545 skge->netdev->name, isrc);
1535 1546
1536 pr_debug("genesis_bcom intr stat=%x\n", stat); 1547 if (isrc & PHY_B_IS_PSE)
1548 printk(KERN_ERR PFX "%s: uncorrectable pair swap error\n",
1549 hw->dev[port]->name);
1537 1550
1538 /* Workaround BCom Errata: 1551 /* Workaround BCom Errata:
1539 * enable and disable loopback mode if "NO HCD" occurs. 1552 * enable and disable loopback mode if "NO HCD" occurs.
1540 */ 1553 */
1541 if (stat & PHY_B_IS_NO_HDCL) { 1554 if (isrc & PHY_B_IS_NO_HDCL) {
1542 u16 ctrl = skge_xm_phy_read(hw, port, PHY_BCOM_CTRL); 1555 u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL);
1543 skge_xm_phy_write(hw, port, PHY_BCOM_CTRL, 1556 xm_phy_write(hw, port, PHY_BCOM_CTRL,
1544 ctrl | PHY_CT_LOOP); 1557 ctrl | PHY_CT_LOOP);
1545 skge_xm_phy_write(hw, port, PHY_BCOM_CTRL, 1558 xm_phy_write(hw, port, PHY_BCOM_CTRL,
1546 ctrl & ~PHY_CT_LOOP); 1559 ctrl & ~PHY_CT_LOOP);
1547 } 1560 }
1548 1561
1549 stat = skge_xm_phy_read(hw, port, PHY_BCOM_STAT); 1562 if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
1550 if (stat & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) { 1563 bcom_check_link(hw, port);
1551 u16 aux = skge_xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
1552 if ( !(aux & PHY_B_AS_LS) && netif_carrier_ok(skge->netdev))
1553 genesis_link_down(skge);
1554
1555 else if (stat & PHY_B_IS_LST_CHANGE) {
1556 if (aux & PHY_B_AS_AN_C) {
1557 switch (aux & PHY_B_AS_AN_RES_MSK) {
1558 case PHY_B_RES_1000FD:
1559 skge->duplex = DUPLEX_FULL;
1560 break;
1561 case PHY_B_RES_1000HD:
1562 skge->duplex = DUPLEX_HALF;
1563 break;
1564 }
1565
1566 switch (aux & PHY_B_AS_PAUSE_MSK) {
1567 case PHY_B_AS_PAUSE_MSK:
1568 skge->flow_control = FLOW_MODE_SYMMETRIC;
1569 break;
1570 case PHY_B_AS_PRR:
1571 skge->flow_control = FLOW_MODE_REM_SEND;
1572 break;
1573 case PHY_B_AS_PRT:
1574 skge->flow_control = FLOW_MODE_LOC_SEND;
1575 break;
1576 default:
1577 skge->flow_control = FLOW_MODE_NONE;
1578 }
1579 skge->speed = SPEED_1000;
1580 }
1581 genesis_link_up(skge);
1582 }
1583 else
1584 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1585 }
1586}
1587 1564
1588/* Perodic poll of phy status to check for link transistion */
1589static void skge_link_timer(unsigned long __arg)
1590{
1591 struct skge_port *skge = (struct skge_port *) __arg;
1592 struct skge_hw *hw = skge->hw;
1593 int port = skge->port;
1594
1595 if (hw->chip_id != CHIP_ID_GENESIS || !netif_running(skge->netdev))
1596 return;
1597
1598 spin_lock_bh(&hw->phy_lock);
1599 if (hw->phy_type == SK_PHY_BCOM)
1600 genesis_bcom_intr(skge);
1601 else {
1602 int i;
1603 for (i = 0; i < 3; i++)
1604 if (skge_xm_read16(hw, port, XM_ISRC) & XM_IS_INP_ASS)
1605 break;
1606
1607 if (i == 3)
1608 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1609 else
1610 genesis_link_up(skge);
1611 }
1612 spin_unlock_bh(&hw->phy_lock);
1613} 1565}
1614 1566
1615/* Marvell Phy Initailization */ 1567/* Marvell Phy Initailization */
@@ -1621,31 +1573,27 @@ static void yukon_init(struct skge_hw *hw, int port)
1621 1573
1622 pr_debug("yukon_init\n"); 1574 pr_debug("yukon_init\n");
1623 if (skge->autoneg == AUTONEG_ENABLE) { 1575 if (skge->autoneg == AUTONEG_ENABLE) {
1624 u16 ectrl = skge_gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 1576 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
1625 1577
1626 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | 1578 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
1627 PHY_M_EC_MAC_S_MSK); 1579 PHY_M_EC_MAC_S_MSK);
1628 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); 1580 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
1629 1581
1630 /* on PHY 88E1111 there is a change for downshift control */ 1582 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1631 if (hw->chip_id == CHIP_ID_YUKON_EC)
1632 ectrl |= PHY_M_EC_M_DSC_2(0) | PHY_M_EC_DOWN_S_ENA;
1633 else
1634 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1635 1583
1636 skge_gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); 1584 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
1637 } 1585 }
1638 1586
1639 ctrl = skge_gm_phy_read(hw, port, PHY_MARV_CTRL); 1587 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1640 if (skge->autoneg == AUTONEG_DISABLE) 1588 if (skge->autoneg == AUTONEG_DISABLE)
1641 ctrl &= ~PHY_CT_ANE; 1589 ctrl &= ~PHY_CT_ANE;
1642 1590
1643 ctrl |= PHY_CT_RESET; 1591 ctrl |= PHY_CT_RESET;
1644 skge_gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 1592 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1645 1593
1646 ctrl = 0; 1594 ctrl = 0;
1647 ct1000 = 0; 1595 ct1000 = 0;
1648 adv = PHY_SEL_TYPE; 1596 adv = PHY_AN_CSMA;
1649 1597
1650 if (skge->autoneg == AUTONEG_ENABLE) { 1598 if (skge->autoneg == AUTONEG_ENABLE) {
1651 if (iscopper(hw)) { 1599 if (iscopper(hw)) {
@@ -1661,41 +1609,12 @@ static void yukon_init(struct skge_hw *hw, int port)
1661 adv |= PHY_M_AN_10_FD; 1609 adv |= PHY_M_AN_10_FD;
1662 if (skge->advertising & ADVERTISED_10baseT_Half) 1610 if (skge->advertising & ADVERTISED_10baseT_Half)
1663 adv |= PHY_M_AN_10_HD; 1611 adv |= PHY_M_AN_10_HD;
1664 1612 } else /* special defines for FIBER (88E1011S only) */
1665 /* Set Flow-control capabilities */
1666 switch (skge->flow_control) {
1667 case FLOW_MODE_NONE:
1668 adv |= PHY_B_P_NO_PAUSE;
1669 break;
1670 case FLOW_MODE_LOC_SEND:
1671 adv |= PHY_B_P_ASYM_MD;
1672 break;
1673 case FLOW_MODE_SYMMETRIC:
1674 adv |= PHY_B_P_SYM_MD;
1675 break;
1676 case FLOW_MODE_REM_SEND:
1677 adv |= PHY_B_P_BOTH_MD;
1678 break;
1679 }
1680 } else { /* special defines for FIBER (88E1011S only) */
1681 adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD; 1613 adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
1682 1614
1683 /* Set Flow-control capabilities */ 1615 /* Set Flow-control capabilities */
1684 switch (skge->flow_control) { 1616 adv |= phy_pause_map[skge->flow_control];
1685 case FLOW_MODE_NONE: 1617
1686 adv |= PHY_M_P_NO_PAUSE_X;
1687 break;
1688 case FLOW_MODE_LOC_SEND:
1689 adv |= PHY_M_P_ASYM_MD_X;
1690 break;
1691 case FLOW_MODE_SYMMETRIC:
1692 adv |= PHY_M_P_SYM_MD_X;
1693 break;
1694 case FLOW_MODE_REM_SEND:
1695 adv |= PHY_M_P_BOTH_MD_X;
1696 break;
1697 }
1698 }
1699 /* Restart Auto-negotiation */ 1618 /* Restart Auto-negotiation */
1700 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; 1619 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1701 } else { 1620 } else {
@@ -1717,36 +1636,23 @@ static void yukon_init(struct skge_hw *hw, int port)
1717 ctrl |= PHY_CT_RESET; 1636 ctrl |= PHY_CT_RESET;
1718 } 1637 }
1719 1638
1720 if (hw->chip_id != CHIP_ID_YUKON_FE) 1639 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
1721 skge_gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
1722 1640
1723 skge_gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); 1641 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
1724 skge_gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 1642 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1725 1643
1726 /* Setup Phy LED's */ 1644 /* Setup Phy LED's */
1727 ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS); 1645 ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
1728 ledover = 0; 1646 ledover = 0;
1729 1647
1730 if (hw->chip_id == CHIP_ID_YUKON_FE) { 1648 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
1731 /* on 88E3082 these bits are at 11..9 (shifted left) */
1732 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
1733
1734 skge_gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR,
1735 ((skge_gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR)
1736
1737 & ~PHY_M_FELP_LED1_MSK)
1738 | PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL)));
1739 } else {
1740 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
1741 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
1742 1649
1743 /* turn off the Rx LED (LED_RX) */ 1650 /* turn off the Rx LED (LED_RX) */
1744 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); 1651 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
1745 }
1746 1652
1747 /* disable blink mode (LED_DUPLEX) on collisions */ 1653 /* disable blink mode (LED_DUPLEX) on collisions */
1748 ctrl |= PHY_M_LEDC_DP_CTRL; 1654 ctrl |= PHY_M_LEDC_DP_CTRL;
1749 skge_gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 1655 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
1750 1656
1751 if (skge->autoneg == AUTONEG_DISABLE || skge->speed == SPEED_100) { 1657 if (skge->autoneg == AUTONEG_DISABLE || skge->speed == SPEED_100) {
1752 /* turn on 100 Mbps LED (LED_LINK100) */ 1658 /* turn on 100 Mbps LED (LED_LINK100) */
@@ -1754,25 +1660,25 @@ static void yukon_init(struct skge_hw *hw, int port)
1754 } 1660 }
1755 1661
1756 if (ledover) 1662 if (ledover)
1757 skge_gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); 1663 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
1758 1664
1759 /* Enable phy interrupt on autonegotiation complete (or link up) */ 1665 /* Enable phy interrupt on autonegotiation complete (or link up) */
1760 if (skge->autoneg == AUTONEG_ENABLE) 1666 if (skge->autoneg == AUTONEG_ENABLE)
1761 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); 1667 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
1762 else 1668 else
1763 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); 1669 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1764} 1670}
1765 1671
1766static void yukon_reset(struct skge_hw *hw, int port) 1672static void yukon_reset(struct skge_hw *hw, int port)
1767{ 1673{
1768 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */ 1674 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */
1769 skge_gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ 1675 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
1770 skge_gma_write16(hw, port, GM_MC_ADDR_H2, 0); 1676 gma_write16(hw, port, GM_MC_ADDR_H2, 0);
1771 skge_gma_write16(hw, port, GM_MC_ADDR_H3, 0); 1677 gma_write16(hw, port, GM_MC_ADDR_H3, 0);
1772 skge_gma_write16(hw, port, GM_MC_ADDR_H4, 0); 1678 gma_write16(hw, port, GM_MC_ADDR_H4, 0);
1773 1679
1774 skge_gma_write16(hw, port, GM_RX_CTRL, 1680 gma_write16(hw, port, GM_RX_CTRL,
1775 skge_gma_read16(hw, port, GM_RX_CTRL) 1681 gma_read16(hw, port, GM_RX_CTRL)
1776 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 1682 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
1777} 1683}
1778 1684
@@ -1785,17 +1691,17 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1785 1691
1786 /* WA code for COMA mode -- set PHY reset */ 1692 /* WA code for COMA mode -- set PHY reset */
1787 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1693 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1788 chip_rev(hw) == CHIP_REV_YU_LITE_A3) 1694 hw->chip_rev == CHIP_REV_YU_LITE_A3)
1789 skge_write32(hw, B2_GP_IO, 1695 skge_write32(hw, B2_GP_IO,
1790 (skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9)); 1696 (skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9));
1791 1697
1792 /* hard reset */ 1698 /* hard reset */
1793 skge_write32(hw, SKGEMAC_REG(port, GPHY_CTRL), GPC_RST_SET); 1699 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1794 skge_write32(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_RST_SET); 1700 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1795 1701
1796 /* WA code for COMA mode -- clear PHY reset */ 1702 /* WA code for COMA mode -- clear PHY reset */
1797 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1703 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1798 chip_rev(hw) == CHIP_REV_YU_LITE_A3) 1704 hw->chip_rev == CHIP_REV_YU_LITE_A3)
1799 skge_write32(hw, B2_GP_IO, 1705 skge_write32(hw, B2_GP_IO,
1800 (skge_read32(hw, B2_GP_IO) | GP_DIR_9) 1706 (skge_read32(hw, B2_GP_IO) | GP_DIR_9)
1801 & ~GP_IO_9); 1707 & ~GP_IO_9);
@@ -1806,13 +1712,13 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1806 reg |= iscopper(hw) ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; 1712 reg |= iscopper(hw) ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
1807 1713
1808 /* Clear GMC reset */ 1714 /* Clear GMC reset */
1809 skge_write32(hw, SKGEMAC_REG(port, GPHY_CTRL), reg | GPC_RST_SET); 1715 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
1810 skge_write32(hw, SKGEMAC_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); 1716 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
1811 skge_write32(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); 1717 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
1812 if (skge->autoneg == AUTONEG_DISABLE) { 1718 if (skge->autoneg == AUTONEG_DISABLE) {
1813 reg = GM_GPCR_AU_ALL_DIS; 1719 reg = GM_GPCR_AU_ALL_DIS;
1814 skge_gma_write16(hw, port, GM_GP_CTRL, 1720 gma_write16(hw, port, GM_GP_CTRL,
1815 skge_gma_read16(hw, port, GM_GP_CTRL) | reg); 1721 gma_read16(hw, port, GM_GP_CTRL) | reg);
1816 1722
1817 switch (skge->speed) { 1723 switch (skge->speed) {
1818 case SPEED_1000: 1724 case SPEED_1000:
@@ -1828,7 +1734,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1828 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; 1734 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
1829 switch (skge->flow_control) { 1735 switch (skge->flow_control) {
1830 case FLOW_MODE_NONE: 1736 case FLOW_MODE_NONE:
1831 skge_write32(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 1737 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1832 reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; 1738 reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1833 break; 1739 break;
1834 case FLOW_MODE_LOC_SEND: 1740 case FLOW_MODE_LOC_SEND:
@@ -1836,7 +1742,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1836 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; 1742 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1837 } 1743 }
1838 1744
1839 skge_gma_write16(hw, port, GM_GP_CTRL, reg); 1745 gma_write16(hw, port, GM_GP_CTRL, reg);
1840 skge_read16(hw, GMAC_IRQ_SRC); 1746 skge_read16(hw, GMAC_IRQ_SRC);
1841 1747
1842 spin_lock_bh(&hw->phy_lock); 1748 spin_lock_bh(&hw->phy_lock);
@@ -1844,25 +1750,25 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1844 spin_unlock_bh(&hw->phy_lock); 1750 spin_unlock_bh(&hw->phy_lock);
1845 1751
1846 /* MIB clear */ 1752 /* MIB clear */
1847 reg = skge_gma_read16(hw, port, GM_PHY_ADDR); 1753 reg = gma_read16(hw, port, GM_PHY_ADDR);
1848 skge_gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); 1754 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
1849 1755
1850 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 1756 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
1851 skge_gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); 1757 gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
1852 skge_gma_write16(hw, port, GM_PHY_ADDR, reg); 1758 gma_write16(hw, port, GM_PHY_ADDR, reg);
1853 1759
1854 /* transmit control */ 1760 /* transmit control */
1855 skge_gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 1761 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
1856 1762
1857 /* receive control reg: unicast + multicast + no FCS */ 1763 /* receive control reg: unicast + multicast + no FCS */
1858 skge_gma_write16(hw, port, GM_RX_CTRL, 1764 gma_write16(hw, port, GM_RX_CTRL,
1859 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); 1765 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
1860 1766
1861 /* transmit flow control */ 1767 /* transmit flow control */
1862 skge_gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); 1768 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
1863 1769
1864 /* transmit parameter */ 1770 /* transmit parameter */
1865 skge_gma_write16(hw, port, GM_TX_PARAM, 1771 gma_write16(hw, port, GM_TX_PARAM,
1866 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | 1772 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
1867 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 1773 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
1868 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); 1774 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF));
@@ -1872,33 +1778,33 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1872 if (hw->dev[port]->mtu > 1500) 1778 if (hw->dev[port]->mtu > 1500)
1873 reg |= GM_SMOD_JUMBO_ENA; 1779 reg |= GM_SMOD_JUMBO_ENA;
1874 1780
1875 skge_gma_write16(hw, port, GM_SERIAL_MODE, reg); 1781 gma_write16(hw, port, GM_SERIAL_MODE, reg);
1876 1782
1877 /* physical address: used for pause frames */ 1783 /* physical address: used for pause frames */
1878 skge_gm_set_addr(hw, port, GM_SRC_ADDR_1L, addr); 1784 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
1879 /* virtual address for data */ 1785 /* virtual address for data */
1880 skge_gm_set_addr(hw, port, GM_SRC_ADDR_2L, addr); 1786 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
1881 1787
1882 /* enable interrupt mask for counter overflows */ 1788 /* enable interrupt mask for counter overflows */
1883 skge_gma_write16(hw, port, GM_TX_IRQ_MSK, 0); 1789 gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
1884 skge_gma_write16(hw, port, GM_RX_IRQ_MSK, 0); 1790 gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
1885 skge_gma_write16(hw, port, GM_TR_IRQ_MSK, 0); 1791 gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
1886 1792
1887 /* Initialize Mac Fifo */ 1793 /* Initialize Mac Fifo */
1888 1794
1889 /* Configure Rx MAC FIFO */ 1795 /* Configure Rx MAC FIFO */
1890 skge_write16(hw, SKGEMAC_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); 1796 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
1891 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 1797 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
1892 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1798 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1893 chip_rev(hw) == CHIP_REV_YU_LITE_A3) 1799 hw->chip_rev == CHIP_REV_YU_LITE_A3)
1894 reg &= ~GMF_RX_F_FL_ON; 1800 reg &= ~GMF_RX_F_FL_ON;
1895 skge_write8(hw, SKGEMAC_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 1801 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1896 skge_write16(hw, SKGEMAC_REG(port, RX_GMF_CTRL_T), reg); 1802 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
1897 skge_write16(hw, SKGEMAC_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF); 1803 skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
1898 1804
1899 /* Configure Tx MAC FIFO */ 1805 /* Configure Tx MAC FIFO */
1900 skge_write8(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); 1806 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
1901 skge_write16(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); 1807 skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
1902} 1808}
1903 1809
1904static void yukon_stop(struct skge_port *skge) 1810static void yukon_stop(struct skge_port *skge)
@@ -1907,19 +1813,19 @@ static void yukon_stop(struct skge_port *skge)
1907 int port = skge->port; 1813 int port = skge->port;
1908 1814
1909 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1815 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1910 chip_rev(hw) == CHIP_REV_YU_LITE_A3) { 1816 hw->chip_rev == CHIP_REV_YU_LITE_A3) {
1911 skge_write32(hw, B2_GP_IO, 1817 skge_write32(hw, B2_GP_IO,
1912 skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9); 1818 skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9);
1913 } 1819 }
1914 1820
1915 skge_gma_write16(hw, port, GM_GP_CTRL, 1821 gma_write16(hw, port, GM_GP_CTRL,
1916 skge_gma_read16(hw, port, GM_GP_CTRL) 1822 gma_read16(hw, port, GM_GP_CTRL)
1917 & ~(GM_GPCR_RX_ENA|GM_GPCR_RX_ENA)); 1823 & ~(GM_GPCR_RX_ENA|GM_GPCR_RX_ENA));
1918 skge_gma_read16(hw, port, GM_GP_CTRL); 1824 gma_read16(hw, port, GM_GP_CTRL);
1919 1825
1920 /* set GPHY Control reset */ 1826 /* set GPHY Control reset */
1921 skge_gma_write32(hw, port, GPHY_CTRL, GPC_RST_SET); 1827 gma_write32(hw, port, GPHY_CTRL, GPC_RST_SET);
1922 skge_gma_write32(hw, port, GMAC_CTRL, GMC_RST_SET); 1828 gma_write32(hw, port, GMAC_CTRL, GMC_RST_SET);
1923} 1829}
1924 1830
1925static void yukon_get_stats(struct skge_port *skge, u64 *data) 1831static void yukon_get_stats(struct skge_port *skge, u64 *data)
@@ -1928,39 +1834,40 @@ static void yukon_get_stats(struct skge_port *skge, u64 *data)
1928 int port = skge->port; 1834 int port = skge->port;
1929 int i; 1835 int i;
1930 1836
1931 data[0] = (u64) skge_gma_read32(hw, port, GM_TXO_OK_HI) << 32 1837 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
1932 | skge_gma_read32(hw, port, GM_TXO_OK_LO); 1838 | gma_read32(hw, port, GM_TXO_OK_LO);
1933 data[1] = (u64) skge_gma_read32(hw, port, GM_RXO_OK_HI) << 32 1839 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
1934 | skge_gma_read32(hw, port, GM_RXO_OK_LO); 1840 | gma_read32(hw, port, GM_RXO_OK_LO);
1935 1841
1936 for (i = 2; i < ARRAY_SIZE(skge_stats); i++) 1842 for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
1937 data[i] = skge_gma_read32(hw, port, 1843 data[i] = gma_read32(hw, port,
1938 skge_stats[i].gma_offset); 1844 skge_stats[i].gma_offset);
1939} 1845}
1940 1846
1941static void yukon_mac_intr(struct skge_hw *hw, int port) 1847static void yukon_mac_intr(struct skge_hw *hw, int port)
1942{ 1848{
1943 struct skge_port *skge = netdev_priv(hw->dev[port]); 1849 struct net_device *dev = hw->dev[port];
1944 u8 status = skge_read8(hw, SKGEMAC_REG(port, GMAC_IRQ_SRC)); 1850 struct skge_port *skge = netdev_priv(dev);
1851 u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
1852
1853 if (netif_msg_intr(skge))
1854 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
1855 dev->name, status);
1945 1856
1946 pr_debug("yukon_intr status %x\n", status);
1947 if (status & GM_IS_RX_FF_OR) { 1857 if (status & GM_IS_RX_FF_OR) {
1948 ++skge->net_stats.rx_fifo_errors; 1858 ++skge->net_stats.rx_fifo_errors;
1949 skge_gma_write8(hw, port, RX_GMF_CTRL_T, GMF_CLI_RX_FO); 1859 gma_write8(hw, port, RX_GMF_CTRL_T, GMF_CLI_RX_FO);
1950 } 1860 }
1951 if (status & GM_IS_TX_FF_UR) { 1861 if (status & GM_IS_TX_FF_UR) {
1952 ++skge->net_stats.tx_fifo_errors; 1862 ++skge->net_stats.tx_fifo_errors;
1953 skge_gma_write8(hw, port, TX_GMF_CTRL_T, GMF_CLI_TX_FU); 1863 gma_write8(hw, port, TX_GMF_CTRL_T, GMF_CLI_TX_FU);
1954 } 1864 }
1955 1865
1956} 1866}
1957 1867
1958static u16 yukon_speed(const struct skge_hw *hw, u16 aux) 1868static u16 yukon_speed(const struct skge_hw *hw, u16 aux)
1959{ 1869{
1960 if (hw->chip_id == CHIP_ID_YUKON_FE) 1870 switch (aux & PHY_M_PS_SPEED_MSK) {
1961 return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10;
1962
1963 switch(aux & PHY_M_PS_SPEED_MSK) {
1964 case PHY_M_PS_SPEED_1000: 1871 case PHY_M_PS_SPEED_1000:
1965 return SPEED_1000; 1872 return SPEED_1000;
1966 case PHY_M_PS_SPEED_100: 1873 case PHY_M_PS_SPEED_100:
@@ -1981,15 +1888,15 @@ static void yukon_link_up(struct skge_port *skge)
1981 /* Enable Transmit FIFO Underrun */ 1888 /* Enable Transmit FIFO Underrun */
1982 skge_write8(hw, GMAC_IRQ_MSK, GMAC_DEF_MSK); 1889 skge_write8(hw, GMAC_IRQ_MSK, GMAC_DEF_MSK);
1983 1890
1984 reg = skge_gma_read16(hw, port, GM_GP_CTRL); 1891 reg = gma_read16(hw, port, GM_GP_CTRL);
1985 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) 1892 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
1986 reg |= GM_GPCR_DUP_FULL; 1893 reg |= GM_GPCR_DUP_FULL;
1987 1894
1988 /* enable Rx/Tx */ 1895 /* enable Rx/Tx */
1989 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 1896 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1990 skge_gma_write16(hw, port, GM_GP_CTRL, reg); 1897 gma_write16(hw, port, GM_GP_CTRL, reg);
1991 1898
1992 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); 1899 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1993 skge_link_up(skge); 1900 skge_link_up(skge);
1994} 1901}
1995 1902
@@ -1999,16 +1906,15 @@ static void yukon_link_down(struct skge_port *skge)
1999 int port = skge->port; 1906 int port = skge->port;
2000 1907
2001 pr_debug("yukon_link_down\n"); 1908 pr_debug("yukon_link_down\n");
2002 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); 1909 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
2003 skge_gm_phy_write(hw, port, GM_GP_CTRL, 1910 gm_phy_write(hw, port, GM_GP_CTRL,
2004 skge_gm_phy_read(hw, port, GM_GP_CTRL) 1911 gm_phy_read(hw, port, GM_GP_CTRL)
2005 & ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)); 1912 & ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA));
2006 1913
2007 if (hw->chip_id != CHIP_ID_YUKON_FE && 1914 if (skge->flow_control == FLOW_MODE_REM_SEND) {
2008 skge->flow_control == FLOW_MODE_REM_SEND) {
2009 /* restore Asymmetric Pause bit */ 1915 /* restore Asymmetric Pause bit */
2010 skge_gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, 1916 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
2011 skge_gm_phy_read(hw, port, 1917 gm_phy_read(hw, port,
2012 PHY_MARV_AUNE_ADV) 1918 PHY_MARV_AUNE_ADV)
2013 | PHY_M_AN_ASP); 1919 | PHY_M_AN_ASP);
2014 1920
@@ -2027,20 +1933,21 @@ static void yukon_phy_intr(struct skge_port *skge)
2027 const char *reason = NULL; 1933 const char *reason = NULL;
2028 u16 istatus, phystat; 1934 u16 istatus, phystat;
2029 1935
2030 istatus = skge_gm_phy_read(hw, port, PHY_MARV_INT_STAT); 1936 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
2031 phystat = skge_gm_phy_read(hw, port, PHY_MARV_PHY_STAT); 1937 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
2032 pr_debug("yukon phy intr istat=%x phy_stat=%x\n", istatus, phystat); 1938
1939 if (netif_msg_intr(skge))
1940 printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x 0x%x\n",
1941 skge->netdev->name, istatus, phystat);
2033 1942
2034 if (istatus & PHY_M_IS_AN_COMPL) { 1943 if (istatus & PHY_M_IS_AN_COMPL) {
2035 if (skge_gm_phy_read(hw, port, PHY_MARV_AUNE_LP) 1944 if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
2036 & PHY_M_AN_RF) { 1945 & PHY_M_AN_RF) {
2037 reason = "remote fault"; 1946 reason = "remote fault";
2038 goto failed; 1947 goto failed;
2039 } 1948 }
2040 1949
2041 if (!(hw->chip_id == CHIP_ID_YUKON_FE || hw->chip_id == CHIP_ID_YUKON_EC) 1950 if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
2042 && (skge_gm_phy_read(hw, port, PHY_MARV_1000T_STAT)
2043 & PHY_B_1000S_MSF)) {
2044 reason = "master/slave fault"; 1951 reason = "master/slave fault";
2045 goto failed; 1952 goto failed;
2046 } 1953 }
@@ -2054,10 +1961,6 @@ static void yukon_phy_intr(struct skge_port *skge)
2054 ? DUPLEX_FULL : DUPLEX_HALF; 1961 ? DUPLEX_FULL : DUPLEX_HALF;
2055 skge->speed = yukon_speed(hw, phystat); 1962 skge->speed = yukon_speed(hw, phystat);
2056 1963
2057 /* Tx & Rx Pause Enabled bits are at 9..8 */
2058 if (hw->chip_id == CHIP_ID_YUKON_XL)
2059 phystat >>= 6;
2060
2061 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 1964 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
2062 switch (phystat & PHY_M_PS_PAUSE_MSK) { 1965 switch (phystat & PHY_M_PS_PAUSE_MSK) {
2063 case PHY_M_PS_PAUSE_MSK: 1966 case PHY_M_PS_PAUSE_MSK:
@@ -2075,9 +1978,9 @@ static void yukon_phy_intr(struct skge_port *skge)
2075 1978
2076 if (skge->flow_control == FLOW_MODE_NONE || 1979 if (skge->flow_control == FLOW_MODE_NONE ||
2077 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) 1980 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
2078 skge_write8(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 1981 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
2079 else 1982 else
2080 skge_write8(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_ON); 1983 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
2081 yukon_link_up(skge); 1984 yukon_link_up(skge);
2082 return; 1985 return;
2083 } 1986 }
@@ -2161,6 +2064,12 @@ static int skge_up(struct net_device *dev)
2161 if (netif_msg_ifup(skge)) 2064 if (netif_msg_ifup(skge))
2162 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); 2065 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
2163 2066
2067 if (dev->mtu > RX_BUF_SIZE)
2068 skge->rx_buf_size = dev->mtu + ETH_HLEN + NET_IP_ALIGN;
2069 else
2070 skge->rx_buf_size = RX_BUF_SIZE;
2071
2072
2164 rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); 2073 rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc);
2165 tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); 2074 tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc);
2166 skge->mem_size = tx_size + rx_size; 2075 skge->mem_size = tx_size + rx_size;
@@ -2173,7 +2082,8 @@ static int skge_up(struct net_device *dev)
2173 if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma))) 2082 if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma)))
2174 goto free_pci_mem; 2083 goto free_pci_mem;
2175 2084
2176 if (skge_rx_fill(skge)) 2085 err = skge_rx_fill(skge);
2086 if (err)
2177 goto free_rx_ring; 2087 goto free_rx_ring;
2178 2088
2179 if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, 2089 if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
@@ -2182,6 +2092,10 @@ static int skge_up(struct net_device *dev)
2182 2092
2183 skge->tx_avail = skge->tx_ring.count - 1; 2093 skge->tx_avail = skge->tx_ring.count - 1;
2184 2094
2095 /* Enable IRQ from port */
2096 hw->intr_mask |= portirqmask[port];
2097 skge_write32(hw, B0_IMSK, hw->intr_mask);
2098
2185 /* Initialze MAC */ 2099 /* Initialze MAC */
2186 if (hw->chip_id == CHIP_ID_GENESIS) 2100 if (hw->chip_id == CHIP_ID_GENESIS)
2187 genesis_mac_init(hw, port); 2101 genesis_mac_init(hw, port);
@@ -2189,7 +2103,7 @@ static int skge_up(struct net_device *dev)
2189 yukon_mac_init(hw, port); 2103 yukon_mac_init(hw, port);
2190 2104
2191 /* Configure RAMbuffers */ 2105 /* Configure RAMbuffers */
2192 chunk = hw->ram_size / (isdualport(hw) ? 4 : 2); 2106 chunk = hw->ram_size / ((hw->ports + 1)*2);
2193 ram_addr = hw->ram_offset + 2 * chunk * port; 2107 ram_addr = hw->ram_offset + 2 * chunk * port;
2194 2108
2195 skge_ramset(hw, rxqaddr[port], ram_addr, chunk); 2109 skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
@@ -2227,7 +2141,6 @@ static int skge_down(struct net_device *dev)
2227 netif_stop_queue(dev); 2141 netif_stop_queue(dev);
2228 2142
2229 del_timer_sync(&skge->led_blink); 2143 del_timer_sync(&skge->led_blink);
2230 del_timer_sync(&skge->link_check);
2231 2144
2232 /* Stop transmitter */ 2145 /* Stop transmitter */
2233 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); 2146 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
@@ -2240,12 +2153,12 @@ static int skge_down(struct net_device *dev)
2240 yukon_stop(skge); 2153 yukon_stop(skge);
2241 2154
2242 /* Disable Force Sync bit and Enable Alloc bit */ 2155 /* Disable Force Sync bit and Enable Alloc bit */
2243 skge_write8(hw, SKGEMAC_REG(port, TXA_CTRL), 2156 skge_write8(hw, SK_REG(port, TXA_CTRL),
2244 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 2157 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
2245 2158
2246 /* Stop Interval Timer and Limit Counter of Tx Arbiter */ 2159 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
2247 skge_write32(hw, SKGEMAC_REG(port, TXA_ITI_INI), 0L); 2160 skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
2248 skge_write32(hw, SKGEMAC_REG(port, TXA_LIM_INI), 0L); 2161 skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
2249 2162
2250 /* Reset PCI FIFO */ 2163 /* Reset PCI FIFO */
2251 skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET); 2164 skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET);
@@ -2260,13 +2173,13 @@ static int skge_down(struct net_device *dev)
2260 skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); 2173 skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET);
2261 2174
2262 if (hw->chip_id == CHIP_ID_GENESIS) { 2175 if (hw->chip_id == CHIP_ID_GENESIS) {
2263 skge_write8(hw, SKGEMAC_REG(port, TX_MFF_CTRL2), MFF_RST_SET); 2176 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET);
2264 skge_write8(hw, SKGEMAC_REG(port, RX_MFF_CTRL2), MFF_RST_SET); 2177 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET);
2265 skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_STOP); 2178 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_STOP);
2266 skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_STOP); 2179 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_STOP);
2267 } else { 2180 } else {
2268 skge_write8(hw, SKGEMAC_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 2181 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
2269 skge_write8(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); 2182 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
2270 } 2183 }
2271 2184
2272 /* turn off led's */ 2185 /* turn off led's */
@@ -2299,10 +2212,10 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2299 2212
2300 local_irq_save(flags); 2213 local_irq_save(flags);
2301 if (!spin_trylock(&skge->tx_lock)) { 2214 if (!spin_trylock(&skge->tx_lock)) {
2302 /* Collision - tell upper layer to requeue */ 2215 /* Collision - tell upper layer to requeue */
2303 local_irq_restore(flags); 2216 local_irq_restore(flags);
2304 return NETDEV_TX_LOCKED; 2217 return NETDEV_TX_LOCKED;
2305 } 2218 }
2306 2219
2307 if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) { 2220 if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) {
2308 netif_stop_queue(dev); 2221 netif_stop_queue(dev);
@@ -2333,7 +2246,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2333 * does. Looks like hardware is wrong? 2246 * does. Looks like hardware is wrong?
2334 */ 2247 */
2335 if (ip->protocol == IPPROTO_UDP 2248 if (ip->protocol == IPPROTO_UDP
2336 && chip_rev(hw) == 0 && hw->chip_id == CHIP_ID_YUKON) 2249 && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
2337 control = BMU_TCP_CHECK; 2250 control = BMU_TCP_CHECK;
2338 else 2251 else
2339 control = BMU_UDP_CHECK; 2252 control = BMU_UDP_CHECK;
@@ -2394,6 +2307,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2394 2307
2395static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e) 2308static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e)
2396{ 2309{
2310 /* This ring element can be skb or fragment */
2397 if (e->skb) { 2311 if (e->skb) {
2398 pci_unmap_single(hw->pdev, 2312 pci_unmap_single(hw->pdev,
2399 pci_unmap_addr(e, mapaddr), 2313 pci_unmap_addr(e, mapaddr),
@@ -2438,16 +2352,17 @@ static void skge_tx_timeout(struct net_device *dev)
2438static int skge_change_mtu(struct net_device *dev, int new_mtu) 2352static int skge_change_mtu(struct net_device *dev, int new_mtu)
2439{ 2353{
2440 int err = 0; 2354 int err = 0;
2355 int running = netif_running(dev);
2441 2356
2442 if(new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) 2357 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
2443 return -EINVAL; 2358 return -EINVAL;
2444 2359
2445 dev->mtu = new_mtu;
2446 2360
2447 if (netif_running(dev)) { 2361 if (running)
2448 skge_down(dev); 2362 skge_down(dev);
2363 dev->mtu = new_mtu;
2364 if (running)
2449 skge_up(dev); 2365 skge_up(dev);
2450 }
2451 2366
2452 return err; 2367 return err;
2453} 2368}
@@ -2462,7 +2377,9 @@ static void genesis_set_multicast(struct net_device *dev)
2462 u32 mode; 2377 u32 mode;
2463 u8 filter[8]; 2378 u8 filter[8];
2464 2379
2465 mode = skge_xm_read32(hw, port, XM_MODE); 2380 pr_debug("genesis_set_multicast flags=%x count=%d\n", dev->flags, dev->mc_count);
2381
2382 mode = xm_read32(hw, port, XM_MODE);
2466 mode |= XM_MD_ENA_HASH; 2383 mode |= XM_MD_ENA_HASH;
2467 if (dev->flags & IFF_PROMISC) 2384 if (dev->flags & IFF_PROMISC)
2468 mode |= XM_MD_ENA_PROM; 2385 mode |= XM_MD_ENA_PROM;
@@ -2473,17 +2390,16 @@ static void genesis_set_multicast(struct net_device *dev)
2473 memset(filter, 0xff, sizeof(filter)); 2390 memset(filter, 0xff, sizeof(filter));
2474 else { 2391 else {
2475 memset(filter, 0, sizeof(filter)); 2392 memset(filter, 0, sizeof(filter));
2476 for(i = 0; list && i < count; i++, list = list->next) { 2393 for (i = 0; list && i < count; i++, list = list->next) {
2477 u32 crc = crc32_le(~0, list->dmi_addr, ETH_ALEN); 2394 u32 crc, bit;
2478 u8 bit = 63 - (crc & 63); 2395 crc = ether_crc_le(ETH_ALEN, list->dmi_addr);
2479 2396 bit = ~crc & 0x3f;
2480 filter[bit/8] |= 1 << (bit%8); 2397 filter[bit/8] |= 1 << (bit%8);
2481 } 2398 }
2482 } 2399 }
2483 2400
2484 skge_xm_outhash(hw, port, XM_HSM, filter); 2401 xm_write32(hw, port, XM_MODE, mode);
2485 2402 xm_outhash(hw, port, XM_HSM, filter);
2486 skge_xm_write32(hw, port, XM_MODE, mode);
2487} 2403}
2488 2404
2489static void yukon_set_multicast(struct net_device *dev) 2405static void yukon_set_multicast(struct net_device *dev)
@@ -2497,7 +2413,7 @@ static void yukon_set_multicast(struct net_device *dev)
2497 2413
2498 memset(filter, 0, sizeof(filter)); 2414 memset(filter, 0, sizeof(filter));
2499 2415
2500 reg = skge_gma_read16(hw, port, GM_RX_CTRL); 2416 reg = gma_read16(hw, port, GM_RX_CTRL);
2501 reg |= GM_RXCR_UCF_ENA; 2417 reg |= GM_RXCR_UCF_ENA;
2502 2418
2503 if (dev->flags & IFF_PROMISC) /* promiscious */ 2419 if (dev->flags & IFF_PROMISC) /* promiscious */
@@ -2510,23 +2426,23 @@ static void yukon_set_multicast(struct net_device *dev)
2510 int i; 2426 int i;
2511 reg |= GM_RXCR_MCF_ENA; 2427 reg |= GM_RXCR_MCF_ENA;
2512 2428
2513 for(i = 0; list && i < dev->mc_count; i++, list = list->next) { 2429 for (i = 0; list && i < dev->mc_count; i++, list = list->next) {
2514 u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f; 2430 u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f;
2515 filter[bit/8] |= 1 << (bit%8); 2431 filter[bit/8] |= 1 << (bit%8);
2516 } 2432 }
2517 } 2433 }
2518 2434
2519 2435
2520 skge_gma_write16(hw, port, GM_MC_ADDR_H1, 2436 gma_write16(hw, port, GM_MC_ADDR_H1,
2521 (u16)filter[0] | ((u16)filter[1] << 8)); 2437 (u16)filter[0] | ((u16)filter[1] << 8));
2522 skge_gma_write16(hw, port, GM_MC_ADDR_H2, 2438 gma_write16(hw, port, GM_MC_ADDR_H2,
2523 (u16)filter[2] | ((u16)filter[3] << 8)); 2439 (u16)filter[2] | ((u16)filter[3] << 8));
2524 skge_gma_write16(hw, port, GM_MC_ADDR_H3, 2440 gma_write16(hw, port, GM_MC_ADDR_H3,
2525 (u16)filter[4] | ((u16)filter[5] << 8)); 2441 (u16)filter[4] | ((u16)filter[5] << 8));
2526 skge_gma_write16(hw, port, GM_MC_ADDR_H4, 2442 gma_write16(hw, port, GM_MC_ADDR_H4,
2527 (u16)filter[6] | ((u16)filter[7] << 8)); 2443 (u16)filter[6] | ((u16)filter[7] << 8));
2528 2444
2529 skge_gma_write16(hw, port, GM_RX_CTRL, reg); 2445 gma_write16(hw, port, GM_RX_CTRL, reg);
2530} 2446}
2531 2447
2532static inline int bad_phy_status(const struct skge_hw *hw, u32 status) 2448static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
@@ -2545,28 +2461,76 @@ static void skge_rx_error(struct skge_port *skge, int slot,
2545 printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n", 2461 printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n",
2546 skge->netdev->name, slot, control, status); 2462 skge->netdev->name, slot, control, status);
2547 2463
2548 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF) 2464 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
2549 || (control & BMU_BBC) > skge->netdev->mtu + VLAN_ETH_HLEN)
2550 skge->net_stats.rx_length_errors++; 2465 skge->net_stats.rx_length_errors++;
2551 else { 2466 else if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2552 if (skge->hw->chip_id == CHIP_ID_GENESIS) { 2467 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2553 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) 2468 skge->net_stats.rx_length_errors++;
2554 skge->net_stats.rx_length_errors++; 2469 if (status & XMR_FS_FRA_ERR)
2555 if (status & XMR_FS_FRA_ERR) 2470 skge->net_stats.rx_frame_errors++;
2556 skge->net_stats.rx_frame_errors++; 2471 if (status & XMR_FS_FCS_ERR)
2557 if (status & XMR_FS_FCS_ERR) 2472 skge->net_stats.rx_crc_errors++;
2558 skge->net_stats.rx_crc_errors++; 2473 } else {
2559 } else { 2474 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2560 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) 2475 skge->net_stats.rx_length_errors++;
2561 skge->net_stats.rx_length_errors++; 2476 if (status & GMR_FS_FRAGMENT)
2562 if (status & GMR_FS_FRAGMENT) 2477 skge->net_stats.rx_frame_errors++;
2563 skge->net_stats.rx_frame_errors++; 2478 if (status & GMR_FS_CRC_ERR)
2564 if (status & GMR_FS_CRC_ERR) 2479 skge->net_stats.rx_crc_errors++;
2565 skge->net_stats.rx_crc_errors++; 2480 }
2481}
2482
2483/* Get receive buffer from descriptor.
2484 * Handles copy of small buffers and reallocation failures
2485 */
2486static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2487 struct skge_element *e,
2488 unsigned int len)
2489{
2490 struct sk_buff *nskb, *skb;
2491
2492 if (len < RX_COPY_THRESHOLD) {
2493 nskb = skge_rx_alloc(skge->netdev, len + NET_IP_ALIGN);
2494 if (unlikely(!nskb))
2495 return NULL;
2496
2497 pci_dma_sync_single_for_cpu(skge->hw->pdev,
2498 pci_unmap_addr(e, mapaddr),
2499 len, PCI_DMA_FROMDEVICE);
2500 memcpy(nskb->data, e->skb->data, len);
2501 pci_dma_sync_single_for_device(skge->hw->pdev,
2502 pci_unmap_addr(e, mapaddr),
2503 len, PCI_DMA_FROMDEVICE);
2504
2505 if (skge->rx_csum) {
2506 struct skge_rx_desc *rd = e->desc;
2507 nskb->csum = le16_to_cpu(rd->csum2);
2508 nskb->ip_summed = CHECKSUM_HW;
2566 } 2509 }
2510 skge_rx_reuse(e, skge->rx_buf_size);
2511 return nskb;
2512 } else {
2513 nskb = skge_rx_alloc(skge->netdev, skge->rx_buf_size);
2514 if (unlikely(!nskb))
2515 return NULL;
2516
2517 pci_unmap_single(skge->hw->pdev,
2518 pci_unmap_addr(e, mapaddr),
2519 pci_unmap_len(e, maplen),
2520 PCI_DMA_FROMDEVICE);
2521 skb = e->skb;
2522 if (skge->rx_csum) {
2523 struct skge_rx_desc *rd = e->desc;
2524 skb->csum = le16_to_cpu(rd->csum2);
2525 skb->ip_summed = CHECKSUM_HW;
2526 }
2527
2528 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
2529 return skb;
2567 } 2530 }
2568} 2531}
2569 2532
2533
2570static int skge_poll(struct net_device *dev, int *budget) 2534static int skge_poll(struct net_device *dev, int *budget)
2571{ 2535{
2572 struct skge_port *skge = netdev_priv(dev); 2536 struct skge_port *skge = netdev_priv(dev);
@@ -2575,13 +2539,12 @@ static int skge_poll(struct net_device *dev, int *budget)
2575 struct skge_element *e; 2539 struct skge_element *e;
2576 unsigned int to_do = min(dev->quota, *budget); 2540 unsigned int to_do = min(dev->quota, *budget);
2577 unsigned int work_done = 0; 2541 unsigned int work_done = 0;
2578 int done;
2579 static const u32 irqmask[] = { IS_PORT_1, IS_PORT_2 };
2580 2542
2581 for (e = ring->to_clean; e != ring->to_use && work_done < to_do; 2543 pr_debug("skge_poll\n");
2582 e = e->next) { 2544
2545 for (e = ring->to_clean; work_done < to_do; e = e->next) {
2583 struct skge_rx_desc *rd = e->desc; 2546 struct skge_rx_desc *rd = e->desc;
2584 struct sk_buff *skb = e->skb; 2547 struct sk_buff *skb;
2585 u32 control, len, status; 2548 u32 control, len, status;
2586 2549
2587 rmb(); 2550 rmb();
@@ -2590,19 +2553,12 @@ static int skge_poll(struct net_device *dev, int *budget)
2590 break; 2553 break;
2591 2554
2592 len = control & BMU_BBC; 2555 len = control & BMU_BBC;
2593 e->skb = NULL;
2594
2595 pci_unmap_single(hw->pdev,
2596 pci_unmap_addr(e, mapaddr),
2597 pci_unmap_len(e, maplen),
2598 PCI_DMA_FROMDEVICE);
2599
2600 status = rd->status; 2556 status = rd->status;
2601 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF) 2557
2602 || len > dev->mtu + VLAN_ETH_HLEN 2558 if (unlikely((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)
2603 || bad_phy_status(hw, status)) { 2559 || bad_phy_status(hw, status))) {
2604 skge_rx_error(skge, e - ring->start, control, status); 2560 skge_rx_error(skge, e - ring->start, control, status);
2605 dev_kfree_skb(skb); 2561 skge_rx_reuse(e, skge->rx_buf_size);
2606 continue; 2562 continue;
2607 } 2563 }
2608 2564
@@ -2610,43 +2566,37 @@ static int skge_poll(struct net_device *dev, int *budget)
2610 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", 2566 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2611 dev->name, e - ring->start, rd->status, len); 2567 dev->name, e - ring->start, rd->status, len);
2612 2568
2613 skb_put(skb, len); 2569 skb = skge_rx_get(skge, e, len);
2614 skb->protocol = eth_type_trans(skb, dev); 2570 if (likely(skb)) {
2615 2571 skb_put(skb, len);
2616 if (skge->rx_csum) { 2572 skb->protocol = eth_type_trans(skb, dev);
2617 skb->csum = le16_to_cpu(rd->csum2);
2618 skb->ip_summed = CHECKSUM_HW;
2619 }
2620 2573
2621 dev->last_rx = jiffies; 2574 dev->last_rx = jiffies;
2622 netif_receive_skb(skb); 2575 netif_receive_skb(skb);
2623 2576
2624 ++work_done; 2577 ++work_done;
2578 } else
2579 skge_rx_reuse(e, skge->rx_buf_size);
2625 } 2580 }
2626 ring->to_clean = e; 2581 ring->to_clean = e;
2627 2582
2628 *budget -= work_done;
2629 dev->quota -= work_done;
2630 done = work_done < to_do;
2631
2632 if (skge_rx_fill(skge))
2633 done = 0;
2634
2635 /* restart receiver */ 2583 /* restart receiver */
2636 wmb(); 2584 wmb();
2637 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), 2585 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR),
2638 CSR_START | CSR_IRQ_CL_F); 2586 CSR_START | CSR_IRQ_CL_F);
2639 2587
2640 if (done) { 2588 *budget -= work_done;
2641 local_irq_disable(); 2589 dev->quota -= work_done;
2642 hw->intr_mask |= irqmask[skge->port];
2643 /* Order is important since data can get interrupted */
2644 skge_write32(hw, B0_IMSK, hw->intr_mask);
2645 __netif_rx_complete(dev);
2646 local_irq_enable();
2647 }
2648 2590
2649 return !done; 2591 if (work_done >= to_do)
2592 return 1; /* not done */
2593
2594 local_irq_disable();
2595 __netif_rx_complete(dev);
2596 hw->intr_mask |= portirqmask[skge->port];
2597 skge_write32(hw, B0_IMSK, hw->intr_mask);
2598 local_irq_enable();
2599 return 0;
2650} 2600}
2651 2601
2652static inline void skge_tx_intr(struct net_device *dev) 2602static inline void skge_tx_intr(struct net_device *dev)
@@ -2657,7 +2607,7 @@ static inline void skge_tx_intr(struct net_device *dev)
2657 struct skge_element *e; 2607 struct skge_element *e;
2658 2608
2659 spin_lock(&skge->tx_lock); 2609 spin_lock(&skge->tx_lock);
2660 for(e = ring->to_clean; e != ring->to_use; e = e->next) { 2610 for (e = ring->to_clean; e != ring->to_use; e = e->next) {
2661 struct skge_tx_desc *td = e->desc; 2611 struct skge_tx_desc *td = e->desc;
2662 u32 control; 2612 u32 control;
2663 2613
@@ -2690,12 +2640,12 @@ static void skge_mac_parity(struct skge_hw *hw, int port)
2690 : (port == 0 ? "(port A)": "(port B")); 2640 : (port == 0 ? "(port A)": "(port B"));
2691 2641
2692 if (hw->chip_id == CHIP_ID_GENESIS) 2642 if (hw->chip_id == CHIP_ID_GENESIS)
2693 skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), 2643 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
2694 MFF_CLR_PERR); 2644 MFF_CLR_PERR);
2695 else 2645 else
2696 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ 2646 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
2697 skge_write8(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), 2647 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T),
2698 (hw->chip_id == CHIP_ID_YUKON && chip_rev(hw) == 0) 2648 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
2699 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); 2649 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
2700} 2650}
2701 2651
@@ -2703,16 +2653,16 @@ static void skge_pci_clear(struct skge_hw *hw)
2703{ 2653{
2704 u16 status; 2654 u16 status;
2705 2655
2706 status = skge_read16(hw, SKGEPCI_REG(PCI_STATUS)); 2656 pci_read_config_word(hw->pdev, PCI_STATUS, &status);
2707 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2657 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2708 skge_write16(hw, SKGEPCI_REG(PCI_STATUS), 2658 pci_write_config_word(hw->pdev, PCI_STATUS,
2709 status | PCI_STATUS_ERROR_BITS); 2659 status | PCI_STATUS_ERROR_BITS);
2710 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2660 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2711} 2661}
2712 2662
2713static void skge_mac_intr(struct skge_hw *hw, int port) 2663static void skge_mac_intr(struct skge_hw *hw, int port)
2714{ 2664{
2715 if (hw->chip_id == CHIP_ID_GENESIS) 2665 if (hw->chip_id == CHIP_ID_GENESIS)
2716 genesis_mac_intr(hw, port); 2666 genesis_mac_intr(hw, port);
2717 else 2667 else
2718 yukon_mac_intr(hw, port); 2668 yukon_mac_intr(hw, port);
@@ -2726,9 +2676,9 @@ static void skge_error_irq(struct skge_hw *hw)
2726 if (hw->chip_id == CHIP_ID_GENESIS) { 2676 if (hw->chip_id == CHIP_ID_GENESIS) {
2727 /* clear xmac errors */ 2677 /* clear xmac errors */
2728 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) 2678 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
2729 skge_write16(hw, SKGEMAC_REG(0, RX_MFF_CTRL1), MFF_CLR_INSTAT); 2679 skge_write16(hw, SK_REG(0, RX_MFF_CTRL1), MFF_CLR_INSTAT);
2730 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) 2680 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2))
2731 skge_write16(hw, SKGEMAC_REG(0, RX_MFF_CTRL2), MFF_CLR_INSTAT); 2681 skge_write16(hw, SK_REG(0, RX_MFF_CTRL2), MFF_CLR_INSTAT);
2732 } else { 2682 } else {
2733 /* Timestamp (unused) overflow */ 2683 /* Timestamp (unused) overflow */
2734 if (hwstatus & IS_IRQ_TIST_OV) 2684 if (hwstatus & IS_IRQ_TIST_OV)
@@ -2803,8 +2753,8 @@ static void skge_extirq(unsigned long data)
2803 2753
2804 if (hw->chip_id != CHIP_ID_GENESIS) 2754 if (hw->chip_id != CHIP_ID_GENESIS)
2805 yukon_phy_intr(skge); 2755 yukon_phy_intr(skge);
2806 else if (hw->phy_type == SK_PHY_BCOM) 2756 else
2807 genesis_bcom_intr(skge); 2757 bcom_phy_intr(skge);
2808 } 2758 }
2809 } 2759 }
2810 spin_unlock(&hw->phy_lock); 2760 spin_unlock(&hw->phy_lock);
@@ -2824,19 +2774,14 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2824 return IRQ_NONE; 2774 return IRQ_NONE;
2825 2775
2826 status &= hw->intr_mask; 2776 status &= hw->intr_mask;
2827 2777 if (status & IS_R1_F) {
2828 if ((status & IS_R1_F) && netif_rx_schedule_prep(hw->dev[0])) {
2829 status &= ~IS_R1_F;
2830 hw->intr_mask &= ~IS_R1_F; 2778 hw->intr_mask &= ~IS_R1_F;
2831 skge_write32(hw, B0_IMSK, hw->intr_mask); 2779 netif_rx_schedule(hw->dev[0]);
2832 __netif_rx_schedule(hw->dev[0]);
2833 } 2780 }
2834 2781
2835 if ((status & IS_R2_F) && netif_rx_schedule_prep(hw->dev[1])) { 2782 if (status & IS_R2_F) {
2836 status &= ~IS_R2_F;
2837 hw->intr_mask &= ~IS_R2_F; 2783 hw->intr_mask &= ~IS_R2_F;
2838 skge_write32(hw, B0_IMSK, hw->intr_mask); 2784 netif_rx_schedule(hw->dev[1]);
2839 __netif_rx_schedule(hw->dev[1]);
2840 } 2785 }
2841 2786
2842 if (status & IS_XA1_F) 2787 if (status & IS_XA1_F)
@@ -2845,9 +2790,27 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2845 if (status & IS_XA2_F) 2790 if (status & IS_XA2_F)
2846 skge_tx_intr(hw->dev[1]); 2791 skge_tx_intr(hw->dev[1]);
2847 2792
2793 if (status & IS_PA_TO_RX1) {
2794 struct skge_port *skge = netdev_priv(hw->dev[0]);
2795 ++skge->net_stats.rx_over_errors;
2796 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
2797 }
2798
2799 if (status & IS_PA_TO_RX2) {
2800 struct skge_port *skge = netdev_priv(hw->dev[1]);
2801 ++skge->net_stats.rx_over_errors;
2802 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
2803 }
2804
2805 if (status & IS_PA_TO_TX1)
2806 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
2807
2808 if (status & IS_PA_TO_TX2)
2809 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
2810
2848 if (status & IS_MAC1) 2811 if (status & IS_MAC1)
2849 skge_mac_intr(hw, 0); 2812 skge_mac_intr(hw, 0);
2850 2813
2851 if (status & IS_MAC2) 2814 if (status & IS_MAC2)
2852 skge_mac_intr(hw, 1); 2815 skge_mac_intr(hw, 1);
2853 2816
@@ -2859,8 +2822,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2859 tasklet_schedule(&hw->ext_tasklet); 2822 tasklet_schedule(&hw->ext_tasklet);
2860 } 2823 }
2861 2824
2862 if (status) 2825 skge_write32(hw, B0_IMSK, hw->intr_mask);
2863 skge_write32(hw, B0_IMSK, hw->intr_mask);
2864 2826
2865 return IRQ_HANDLED; 2827 return IRQ_HANDLED;
2866} 2828}
@@ -2904,9 +2866,6 @@ static const struct {
2904 { CHIP_ID_YUKON, "Yukon" }, 2866 { CHIP_ID_YUKON, "Yukon" },
2905 { CHIP_ID_YUKON_LITE, "Yukon-Lite"}, 2867 { CHIP_ID_YUKON_LITE, "Yukon-Lite"},
2906 { CHIP_ID_YUKON_LP, "Yukon-LP"}, 2868 { CHIP_ID_YUKON_LP, "Yukon-LP"},
2907 { CHIP_ID_YUKON_XL, "Yukon-2 XL"},
2908 { CHIP_ID_YUKON_EC, "YUKON-2 EC"},
2909 { CHIP_ID_YUKON_FE, "YUKON-2 FE"},
2910}; 2869};
2911 2870
2912static const char *skge_board_name(const struct skge_hw *hw) 2871static const char *skge_board_name(const struct skge_hw *hw)
@@ -2930,8 +2889,8 @@ static const char *skge_board_name(const struct skge_hw *hw)
2930static int skge_reset(struct skge_hw *hw) 2889static int skge_reset(struct skge_hw *hw)
2931{ 2890{
2932 u16 ctst; 2891 u16 ctst;
2933 u8 t8; 2892 u8 t8, mac_cfg;
2934 int i, ports; 2893 int i;
2935 2894
2936 ctst = skge_read16(hw, B0_CTST); 2895 ctst = skge_read16(hw, B0_CTST);
2937 2896
@@ -2952,12 +2911,9 @@ static int skge_reset(struct skge_hw *hw)
2952 hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; 2911 hw->phy_type = skge_read8(hw, B2_E_1) & 0xf;
2953 hw->pmd_type = skge_read8(hw, B2_PMD_TYP); 2912 hw->pmd_type = skge_read8(hw, B2_PMD_TYP);
2954 2913
2955 switch(hw->chip_id) { 2914 switch (hw->chip_id) {
2956 case CHIP_ID_GENESIS: 2915 case CHIP_ID_GENESIS:
2957 switch (hw->phy_type) { 2916 switch (hw->phy_type) {
2958 case SK_PHY_XMAC:
2959 hw->phy_addr = PHY_ADDR_XMAC;
2960 break;
2961 case SK_PHY_BCOM: 2917 case SK_PHY_BCOM:
2962 hw->phy_addr = PHY_ADDR_BCOM; 2918 hw->phy_addr = PHY_ADDR_BCOM;
2963 break; 2919 break;
@@ -2986,8 +2942,9 @@ static int skge_reset(struct skge_hw *hw)
2986 return -EOPNOTSUPP; 2942 return -EOPNOTSUPP;
2987 } 2943 }
2988 2944
2989 hw->mac_cfg = skge_read8(hw, B2_MAC_CFG); 2945 mac_cfg = skge_read8(hw, B2_MAC_CFG);
2990 ports = isdualport(hw) ? 2 : 1; 2946 hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2;
2947 hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4;
2991 2948
2992 /* read the adapters RAM size */ 2949 /* read the adapters RAM size */
2993 t8 = skge_read8(hw, B2_E_0); 2950 t8 = skge_read8(hw, B2_E_0);
@@ -3010,9 +2967,9 @@ static int skge_reset(struct skge_hw *hw)
3010 /* switch power to VCC (WA for VAUX problem) */ 2967 /* switch power to VCC (WA for VAUX problem) */
3011 skge_write8(hw, B0_POWER_CTRL, 2968 skge_write8(hw, B0_POWER_CTRL,
3012 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 2969 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
3013 for (i = 0; i < ports; i++) { 2970 for (i = 0; i < hw->ports; i++) {
3014 skge_write16(hw, SKGEMAC_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 2971 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
3015 skge_write16(hw, SKGEMAC_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); 2972 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
3016 } 2973 }
3017 } 2974 }
3018 2975
@@ -3022,8 +2979,8 @@ static int skge_reset(struct skge_hw *hw)
3022 skge_write8(hw, B0_LED, LED_STAT_ON); 2979 skge_write8(hw, B0_LED, LED_STAT_ON);
3023 2980
3024 /* enable the Tx Arbiters */ 2981 /* enable the Tx Arbiters */
3025 for (i = 0; i < ports; i++) 2982 for (i = 0; i < hw->ports; i++)
3026 skge_write8(hw, SKGEMAC_REG(i, TXA_CTRL), TXA_ENA_ARB); 2983 skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
3027 2984
3028 /* Initialize ram interface */ 2985 /* Initialize ram interface */
3029 skge_write16(hw, B3_RI_CTRL, RI_RST_CLR); 2986 skge_write16(hw, B3_RI_CTRL, RI_RST_CLR);
@@ -3050,16 +3007,14 @@ static int skge_reset(struct skge_hw *hw)
3050 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); 3007 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
3051 skge_write32(hw, B2_IRQM_CTRL, TIM_START); 3008 skge_write32(hw, B2_IRQM_CTRL, TIM_START);
3052 3009
3053 hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1; 3010 hw->intr_mask = IS_HW_ERR | IS_EXT_REG;
3054 if (isdualport(hw))
3055 hw->intr_mask |= IS_PORT_2;
3056 skge_write32(hw, B0_IMSK, hw->intr_mask); 3011 skge_write32(hw, B0_IMSK, hw->intr_mask);
3057 3012
3058 if (hw->chip_id != CHIP_ID_GENESIS) 3013 if (hw->chip_id != CHIP_ID_GENESIS)
3059 skge_write8(hw, GMAC_IRQ_MSK, 0); 3014 skge_write8(hw, GMAC_IRQ_MSK, 0);
3060 3015
3061 spin_lock_bh(&hw->phy_lock); 3016 spin_lock_bh(&hw->phy_lock);
3062 for (i = 0; i < ports; i++) { 3017 for (i = 0; i < hw->ports; i++) {
3063 if (hw->chip_id == CHIP_ID_GENESIS) 3018 if (hw->chip_id == CHIP_ID_GENESIS)
3064 genesis_reset(hw, i); 3019 genesis_reset(hw, i);
3065 else 3020 else
@@ -3071,7 +3026,8 @@ static int skge_reset(struct skge_hw *hw)
3071} 3026}
3072 3027
3073/* Initialize network device */ 3028/* Initialize network device */
3074static struct net_device *skge_devinit(struct skge_hw *hw, int port) 3029static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3030 int highmem)
3075{ 3031{
3076 struct skge_port *skge; 3032 struct skge_port *skge;
3077 struct net_device *dev = alloc_etherdev(sizeof(*skge)); 3033 struct net_device *dev = alloc_etherdev(sizeof(*skge));
@@ -3104,6 +3060,8 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port)
3104#endif 3060#endif
3105 dev->irq = hw->pdev->irq; 3061 dev->irq = hw->pdev->irq;
3106 dev->features = NETIF_F_LLTX; 3062 dev->features = NETIF_F_LLTX;
3063 if (highmem)
3064 dev->features |= NETIF_F_HIGHDMA;
3107 3065
3108 skge = netdev_priv(dev); 3066 skge = netdev_priv(dev);
3109 skge->netdev = dev; 3067 skge->netdev = dev;
@@ -3117,7 +3075,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port)
3117 skge->flow_control = FLOW_MODE_SYMMETRIC; 3075 skge->flow_control = FLOW_MODE_SYMMETRIC;
3118 skge->duplex = -1; 3076 skge->duplex = -1;
3119 skge->speed = -1; 3077 skge->speed = -1;
3120 skge->advertising = skge_modes(hw); 3078 skge->advertising = skge_supported_modes(hw);
3121 3079
3122 hw->dev[port] = dev; 3080 hw->dev[port] = dev;
3123 3081
@@ -3125,10 +3083,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port)
3125 3083
3126 spin_lock_init(&skge->tx_lock); 3084 spin_lock_init(&skge->tx_lock);
3127 3085
3128 init_timer(&skge->link_check);
3129 skge->link_check.function = skge_link_timer;
3130 skge->link_check.data = (unsigned long) skge;
3131
3132 init_timer(&skge->led_blink); 3086 init_timer(&skge->led_blink);
3133 skge->led_blink.function = skge_blink_timer; 3087 skge->led_blink.function = skge_blink_timer;
3134 skge->led_blink.data = (unsigned long) skge; 3088 skge->led_blink.data = (unsigned long) skge;
@@ -3232,14 +3186,11 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3232 3186
3233 printk(KERN_INFO PFX "addr 0x%lx irq %d chip %s rev %d\n", 3187 printk(KERN_INFO PFX "addr 0x%lx irq %d chip %s rev %d\n",
3234 pci_resource_start(pdev, 0), pdev->irq, 3188 pci_resource_start(pdev, 0), pdev->irq,
3235 skge_board_name(hw), chip_rev(hw)); 3189 skge_board_name(hw), hw->chip_rev);
3236 3190
3237 if ((dev = skge_devinit(hw, 0)) == NULL) 3191 if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
3238 goto err_out_led_off; 3192 goto err_out_led_off;
3239 3193
3240 if (using_dac)
3241 dev->features |= NETIF_F_HIGHDMA;
3242
3243 if ((err = register_netdev(dev))) { 3194 if ((err = register_netdev(dev))) {
3244 printk(KERN_ERR PFX "%s: cannot register net device\n", 3195 printk(KERN_ERR PFX "%s: cannot register net device\n",
3245 pci_name(pdev)); 3196 pci_name(pdev));
@@ -3248,10 +3199,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3248 3199
3249 skge_show_addr(dev); 3200 skge_show_addr(dev);
3250 3201
3251 if (isdualport(hw) && (dev1 = skge_devinit(hw, 1))) { 3202 if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
3252 if (using_dac)
3253 dev1->features |= NETIF_F_HIGHDMA;
3254
3255 if (register_netdev(dev1) == 0) 3203 if (register_netdev(dev1) == 0)
3256 skge_show_addr(dev1); 3204 skge_show_addr(dev1);
3257 else { 3205 else {
@@ -3288,7 +3236,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3288 struct skge_hw *hw = pci_get_drvdata(pdev); 3236 struct skge_hw *hw = pci_get_drvdata(pdev);
3289 struct net_device *dev0, *dev1; 3237 struct net_device *dev0, *dev1;
3290 3238
3291 if(!hw) 3239 if (!hw)
3292 return; 3240 return;
3293 3241
3294 if ((dev1 = hw->dev[1])) 3242 if ((dev1 = hw->dev[1]))
@@ -3316,7 +3264,7 @@ static int skge_suspend(struct pci_dev *pdev, u32 state)
3316 struct skge_hw *hw = pci_get_drvdata(pdev); 3264 struct skge_hw *hw = pci_get_drvdata(pdev);
3317 int i, wol = 0; 3265 int i, wol = 0;
3318 3266
3319 for(i = 0; i < 2; i++) { 3267 for (i = 0; i < 2; i++) {
3320 struct net_device *dev = hw->dev[i]; 3268 struct net_device *dev = hw->dev[i];
3321 3269
3322 if (dev) { 3270 if (dev) {
@@ -3349,11 +3297,11 @@ static int skge_resume(struct pci_dev *pdev)
3349 3297
3350 skge_reset(hw); 3298 skge_reset(hw);
3351 3299
3352 for(i = 0; i < 2; i++) { 3300 for (i = 0; i < 2; i++) {
3353 struct net_device *dev = hw->dev[i]; 3301 struct net_device *dev = hw->dev[i];
3354 if (dev) { 3302 if (dev) {
3355 netif_device_attach(dev); 3303 netif_device_attach(dev);
3356 if(netif_running(dev)) 3304 if (netif_running(dev))
3357 skge_up(dev); 3305 skge_up(dev);
3358 } 3306 }
3359 } 3307 }
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 36c62b68fab4..14d0cc01fb9a 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -7,31 +7,6 @@
7/* PCI config registers */ 7/* PCI config registers */
8#define PCI_DEV_REG1 0x40 8#define PCI_DEV_REG1 0x40
9#define PCI_DEV_REG2 0x44 9#define PCI_DEV_REG2 0x44
10#ifndef PCI_VPD
11#define PCI_VPD 0x50
12#endif
13
14/* PCI_OUR_REG_2 32 bit Our Register 2 */
15enum {
16 PCI_VPD_WR_THR = 0xff<<24, /* Bit 31..24: VPD Write Threshold */
17 PCI_DEV_SEL = 0x7f<<17, /* Bit 23..17: EEPROM Device Select */
18 PCI_VPD_ROM_SZ = 7 <<14, /* Bit 16..14: VPD ROM Size */
19 /* Bit 13..12: reserved */
20 PCI_EN_DUMMY_RD = 1<<3, /* Enable Dummy Read */
21 PCI_REV_DESC = 1<<2, /* Reverse Desc. Bytes */
22 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
23};
24
25/* PCI_VPD_ADR_REG 16 bit VPD Address Register */
26enum {
27 PCI_VPD_FLAG = 1<<15, /* starts VPD rd/wr cycle */
28 PCI_VPD_ADR_MSK =0x7fffL, /* Bit 14.. 0: VPD Address Mask */
29 VPD_RES_ID = 0x82,
30 VPD_RES_READ = 0x90,
31 VPD_RES_WRITE = 0x81,
32 VPD_RES_END = 0x78,
33};
34
35 10
36#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ 11#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
37 PCI_STATUS_SIG_SYSTEM_ERROR | \ 12 PCI_STATUS_SIG_SYSTEM_ERROR | \
@@ -39,7 +14,6 @@ enum {
39 PCI_STATUS_REC_TARGET_ABORT | \ 14 PCI_STATUS_REC_TARGET_ABORT | \
40 PCI_STATUS_PARITY) 15 PCI_STATUS_PARITY)
41 16
42
43enum csr_regs { 17enum csr_regs {
44 B0_RAP = 0x0000, 18 B0_RAP = 0x0000,
45 B0_CTST = 0x0004, 19 B0_CTST = 0x0004,
@@ -229,8 +203,11 @@ enum {
229 IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */ 203 IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */
230 IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */ 204 IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */
231 205
232 IS_PORT_1 = IS_XA1_F| IS_R1_F| IS_MAC1, 206 IS_TO_PORT1 = IS_PA_TO_RX1 | IS_PA_TO_TX1,
233 IS_PORT_2 = IS_XA2_F| IS_R2_F| IS_MAC2, 207 IS_TO_PORT2 = IS_PA_TO_RX2 | IS_PA_TO_TX2,
208
209 IS_PORT_1 = IS_XA1_F| IS_R1_F | IS_TO_PORT1 | IS_MAC1,
210 IS_PORT_2 = IS_XA2_F| IS_R2_F | IS_TO_PORT2 | IS_MAC2,
234}; 211};
235 212
236 213
@@ -288,14 +265,6 @@ enum {
288 CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */ 265 CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */
289}; 266};
290 267
291/* B2_LD_TEST 8 bit EPROM loader test register */
292enum {
293 LD_T_ON = 1<<3, /* Loader Test mode on */
294 LD_T_OFF = 1<<2, /* Loader Test mode off */
295 LD_T_STEP = 1<<1, /* Decrement FPROM addr. Counter */
296 LD_START = 1<<0, /* Start loading FPROM */
297};
298
299/* B2_TI_CTRL 8 bit Timer control */ 268/* B2_TI_CTRL 8 bit Timer control */
300/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ 269/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */
301enum { 270enum {
@@ -313,16 +282,6 @@ enum {
313 TIM_T_STEP = 1<<0, /* Test step */ 282 TIM_T_STEP = 1<<0, /* Test step */
314}; 283};
315 284
316/* B28_DPT_INI 32 bit Descriptor Poll Timer Init Val */
317/* B28_DPT_VAL 32 bit Descriptor Poll Timer Curr Val */
318/* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */
319enum {
320 DPT_MSK = 0x00ffffffL, /* Bit 23.. 0: Desc Poll Timer Bits */
321
322 DPT_START = 1<<1, /* Start Descriptor Poll Timer */
323 DPT_STOP = 1<<0, /* Stop Descriptor Poll Timer */
324};
325
326/* B2_GP_IO 32 bit General Purpose I/O Register */ 285/* B2_GP_IO 32 bit General Purpose I/O Register */
327enum { 286enum {
328 GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */ 287 GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */
@@ -348,30 +307,6 @@ enum {
348 GP_IO_0 = 1<<0, /* IO_0 pin */ 307 GP_IO_0 = 1<<0, /* IO_0 pin */
349}; 308};
350 309
351/* Rx/Tx Path related Arbiter Test Registers */
352/* B3_MA_TO_TEST 16 bit MAC Arbiter Timeout Test Reg */
353/* B3_MA_RC_TEST 16 bit MAC Arbiter Recovery Test Reg */
354/* B3_PA_TEST 16 bit Packet Arbiter Test Register */
355/* Bit 15, 11, 7, and 3 are reserved in B3_PA_TEST */
356enum {
357 TX2_T_EV = 1<<15,/* TX2 Timeout/Recv Event occured */
358 TX2_T_ON = 1<<14,/* TX2 Timeout/Recv Timer Test On */
359 TX2_T_OFF = 1<<13,/* TX2 Timeout/Recv Timer Tst Off */
360 TX2_T_STEP = 1<<12,/* TX2 Timeout/Recv Timer Step */
361 TX1_T_EV = 1<<11,/* TX1 Timeout/Recv Event occured */
362 TX1_T_ON = 1<<10,/* TX1 Timeout/Recv Timer Test On */
363 TX1_T_OFF = 1<<9, /* TX1 Timeout/Recv Timer Tst Off */
364 TX1_T_STEP = 1<<8, /* TX1 Timeout/Recv Timer Step */
365 RX2_T_EV = 1<<7, /* RX2 Timeout/Recv Event occured */
366 RX2_T_ON = 1<<6, /* RX2 Timeout/Recv Timer Test On */
367 RX2_T_OFF = 1<<5, /* RX2 Timeout/Recv Timer Tst Off */
368 RX2_T_STEP = 1<<4, /* RX2 Timeout/Recv Timer Step */
369 RX1_T_EV = 1<<3, /* RX1 Timeout/Recv Event occured */
370 RX1_T_ON = 1<<2, /* RX1 Timeout/Recv Timer Test On */
371 RX1_T_OFF = 1<<1, /* RX1 Timeout/Recv Timer Tst Off */
372 RX1_T_STEP = 1<<0, /* RX1 Timeout/Recv Timer Step */
373};
374
375/* Descriptor Bit Definition */ 310/* Descriptor Bit Definition */
376/* TxCtrl Transmit Buffer Control Field */ 311/* TxCtrl Transmit Buffer Control Field */
377/* RxCtrl Receive Buffer Control Field */ 312/* RxCtrl Receive Buffer Control Field */
@@ -428,14 +363,6 @@ enum {
428 RI_RST_SET = 1<<0, /* Set RAM Interface Reset */ 363 RI_RST_SET = 1<<0, /* Set RAM Interface Reset */
429}; 364};
430 365
431/* B3_RI_TEST 8 bit RAM Iface Test Register */
432enum {
433 RI_T_EV = 1<<3, /* Timeout Event occured */
434 RI_T_ON = 1<<2, /* Timeout Timer Test On */
435 RI_T_OFF = 1<<1, /* Timeout Timer Test Off */
436 RI_T_STEP = 1<<0, /* Timeout Timer Step */
437};
438
439/* MAC Arbiter Registers */ 366/* MAC Arbiter Registers */
440/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */ 367/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */
441enum { 368enum {
@@ -452,19 +379,6 @@ enum {
452#define SK_PKT_TO_MAX 0xffff /* Maximum value */ 379#define SK_PKT_TO_MAX 0xffff /* Maximum value */
453#define SK_RI_TO_53 36 /* RAM interface timeout */ 380#define SK_RI_TO_53 36 /* RAM interface timeout */
454 381
455
456/* B3_MA_RC_CTRL 16 bit MAC Arbiter Recovery Ctrl Reg */
457enum {
458 MA_ENA_REC_TX2 = 1<<7, /* Enable Recovery Timer TX2 */
459 MA_DIS_REC_TX2 = 1<<6, /* Disable Recovery Timer TX2 */
460 MA_ENA_REC_TX1 = 1<<5, /* Enable Recovery Timer TX1 */
461 MA_DIS_REC_TX1 = 1<<4, /* Disable Recovery Timer TX1 */
462 MA_ENA_REC_RX2 = 1<<3, /* Enable Recovery Timer RX2 */
463 MA_DIS_REC_RX2 = 1<<2, /* Disable Recovery Timer RX2 */
464 MA_ENA_REC_RX1 = 1<<1, /* Enable Recovery Timer RX1 */
465 MA_DIS_REC_RX1 = 1<<0, /* Disable Recovery Timer RX1 */
466};
467
468/* Packet Arbiter Registers */ 382/* Packet Arbiter Registers */
469/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */ 383/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */
470enum { 384enum {
@@ -488,7 +402,7 @@ enum {
488 PA_ENA_TO_TX1 | PA_ENA_TO_TX2) 402 PA_ENA_TO_TX1 | PA_ENA_TO_TX2)
489 403
490 404
491/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */ 405/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
492/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ 406/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */
493/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ 407/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */
494/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ 408/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */
@@ -511,7 +425,7 @@ enum {
511/* 425/*
512 * Bank 4 - 5 426 * Bank 4 - 5
513 */ 427 */
514/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */ 428/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
515enum { 429enum {
516 TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/ 430 TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/
517 TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */ 431 TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */
@@ -537,7 +451,7 @@ enum {
537 451
538/* Queue Register Offsets, use Q_ADDR() to access */ 452/* Queue Register Offsets, use Q_ADDR() to access */
539enum { 453enum {
540 B8_Q_REGS = 0x0400, /* base of Queue registers */ 454 B8_Q_REGS = 0x0400, /* base of Queue registers */
541 Q_D = 0x00, /* 8*32 bit Current Descriptor */ 455 Q_D = 0x00, /* 8*32 bit Current Descriptor */
542 Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */ 456 Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */
543 Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */ 457 Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */
@@ -618,8 +532,7 @@ enum {
618enum { 532enum {
619 PHY_ADDR_XMAC = 0<<8, 533 PHY_ADDR_XMAC = 0<<8,
620 PHY_ADDR_BCOM = 1<<8, 534 PHY_ADDR_BCOM = 1<<8,
621 PHY_ADDR_LONE = 3<<8, 535
622 PHY_ADDR_NAT = 0<<8,
623/* GPHY address (bits 15..11 of SMI control reg) */ 536/* GPHY address (bits 15..11 of SMI control reg) */
624 PHY_ADDR_MARV = 0, 537 PHY_ADDR_MARV = 0,
625}; 538};
@@ -986,7 +899,7 @@ enum {
986 LINKLED_BLINK_OFF = 0x10, 899 LINKLED_BLINK_OFF = 0x10,
987 LINKLED_BLINK_ON = 0x20, 900 LINKLED_BLINK_ON = 0x20,
988}; 901};
989 902
990/* GMAC and GPHY Control Registers (YUKON only) */ 903/* GMAC and GPHY Control Registers (YUKON only) */
991enum { 904enum {
992 GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */ 905 GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */
@@ -1151,54 +1064,6 @@ enum {
1151 PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */ 1064 PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */
1152}; 1065};
1153 1066
1154/* Level One-PHY Registers, indirect addressed over XMAC */
1155enum {
1156 PHY_LONE_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
1157 PHY_LONE_STAT = 0x01,/* 16 bit r/o PHY Status Register */
1158 PHY_LONE_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
1159 PHY_LONE_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
1160 PHY_LONE_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
1161 PHY_LONE_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
1162 PHY_LONE_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
1163 PHY_LONE_NEPG = 0x07,/* 16 bit r/w Next Page Register */
1164 PHY_LONE_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
1165 /* Level One-specific registers */
1166 PHY_LONE_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
1167 PHY_LONE_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
1168 PHY_LONE_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
1169 PHY_LONE_PORT_CFG = 0x10,/* 16 bit r/w Port Configuration Reg*/
1170 PHY_LONE_Q_STAT = 0x11,/* 16 bit r/o Quick Status Reg */
1171 PHY_LONE_INT_ENAB = 0x12,/* 16 bit r/w Interrupt Enable Reg */
1172 PHY_LONE_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */
1173 PHY_LONE_LED_CFG = 0x14,/* 16 bit r/w LED Configuration Reg */
1174 PHY_LONE_PORT_CTRL = 0x15,/* 16 bit r/w Port Control Reg */
1175 PHY_LONE_CIM = 0x16,/* 16 bit r/o CIM Reg */
1176};
1177
1178/* National-PHY Registers, indirect addressed over XMAC */
1179enum {
1180 PHY_NAT_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
1181 PHY_NAT_STAT = 0x01,/* 16 bit r/w PHY Status Register */
1182 PHY_NAT_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
1183 PHY_NAT_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
1184 PHY_NAT_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
1185 PHY_NAT_AUNE_LP = 0x05,/* 16 bit r/o Link Partner Ability Reg */
1186 PHY_NAT_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
1187 PHY_NAT_NEPG = 0x07,/* 16 bit r/w Next Page Register */
1188 PHY_NAT_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner Reg */
1189 /* National-specific registers */
1190 PHY_NAT_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
1191 PHY_NAT_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
1192 PHY_NAT_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Register */
1193 PHY_NAT_EXT_CTRL1 = 0x10,/* 16 bit r/o Extended Control Reg1 */
1194 PHY_NAT_Q_STAT1 = 0x11,/* 16 bit r/o Quick Status Reg1 */
1195 PHY_NAT_10B_OP = 0x12,/* 16 bit r/o 10Base-T Operations Reg */
1196 PHY_NAT_EXT_CTRL2 = 0x13,/* 16 bit r/o Extended Control Reg1 */
1197 PHY_NAT_Q_STAT2 = 0x14,/* 16 bit r/o Quick Status Reg2 */
1198
1199 PHY_NAT_PHY_ADDR = 0x19,/* 16 bit r/o PHY Address Register */
1200};
1201
1202enum { 1067enum {
1203 PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */ 1068 PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */
1204 PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */ 1069 PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */
@@ -1253,8 +1118,29 @@ enum {
1253 PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ 1118 PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
1254}; 1119};
1255 1120
1121/* Advertisement register bits */
1256enum { 1122enum {
1257 PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ 1123 PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
1124 PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
1125 PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */
1126
1127 PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */
1128 PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */
1129 PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */
1130 PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */
1131 PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */
1132 PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */
1133 PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */
1134 PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */
1135 PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1136 PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA,
1137 PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL |
1138 PHY_AN_100HALF | PHY_AN_100FULL,
1139};
1140
1141/* Xmac Specific */
1142enum {
1143 PHY_X_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
1258 PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ 1144 PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
1259 PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */ 1145 PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */
1260 1146
@@ -1263,82 +1149,6 @@ enum {
1263 PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */ 1149 PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */
1264}; 1150};
1265 1151
1266enum {
1267 PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */
1268
1269 PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */
1270 PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */
1271 PHY_B_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1272};
1273
1274enum {
1275 PHY_L_AN_RF = 1<<13, /* Bit 13: Remote Fault */
1276 /* Bit 12: reserved */
1277 PHY_L_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */
1278 PHY_L_AN_PC = 1<<10, /* Bit 10: Pause Capable */
1279
1280 PHY_L_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1281};
1282
1283/* PHY_NAT_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement */
1284/* PHY_NAT_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
1285/* PHY_AN_NXT_PG (see XMAC) Bit 15: Request Next Page */
1286enum {
1287 PHY_N_AN_RF = 1<<13, /* Bit 13: Remote Fault */
1288
1289 PHY_N_AN_100F = 1<<11, /* Bit 11: 100Base-T2 FD Support */
1290 PHY_N_AN_100H = 1<<10, /* Bit 10: 100Base-T2 HD Support */
1291
1292 PHY_N_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1293};
1294
1295/* field type definition for PHY_x_AN_SEL */
1296enum {
1297 PHY_SEL_TYPE = 1, /* 00001 = Ethernet */
1298};
1299
1300enum {
1301 PHY_ANE_LP_NP = 1<<3, /* Bit 3: Link Partner can Next Page */
1302 PHY_ANE_LOC_NP = 1<<2, /* Bit 2: Local PHY can Next Page */
1303 PHY_ANE_RX_PG = 1<<1, /* Bit 1: Page Received */
1304};
1305
1306enum {
1307 PHY_ANE_PAR_DF = 1<<4, /* Bit 4: Parallel Detection Fault */
1308
1309 PHY_ANE_LP_CAP = 1<<0, /* Bit 0: Link Partner Auto-Neg. Cap. */
1310};
1311
1312enum {
1313 PHY_NP_MORE = 1<<15, /* Bit 15: More, Next Pages to follow */
1314 PHY_NP_ACK1 = 1<<14, /* Bit 14: (ro) Ack1, for receiving a message */
1315 PHY_NP_MSG_VAL = 1<<13, /* Bit 13: Message Page valid */
1316 PHY_NP_ACK2 = 1<<12, /* Bit 12: Ack2, comply with msg content */
1317 PHY_NP_TOG = 1<<11, /* Bit 11: Toggle Bit, ensure sync */
1318 PHY_NP_MSG = 0x07ff, /* Bit 10..0: Message from/to Link Partner */
1319};
1320
1321enum {
1322 PHY_X_EX_FD = 1<<15, /* Bit 15: Device Supports Full Duplex */
1323 PHY_X_EX_HD = 1<<14, /* Bit 14: Device Supports Half Duplex */
1324};
1325
1326enum {
1327 PHY_X_RS_PAUSE = 3<<7,/* Bit 8..7: selected Pause Mode */
1328 PHY_X_RS_HD = 1<<6, /* Bit 6: Half Duplex Mode selected */
1329 PHY_X_RS_FD = 1<<5, /* Bit 5: Full Duplex Mode selected */
1330 PHY_X_RS_ABLMIS = 1<<4, /* Bit 4: duplex or pause cap mismatch */
1331 PHY_X_RS_PAUMIS = 1<<3, /* Bit 3: pause capability mismatch */
1332};
1333
1334/** Remote Fault Bits (PHY_X_AN_RFB) encoding */
1335enum {
1336 X_RFB_OK = 0<<12,/* Bit 13..12 No errors, Link OK */
1337 X_RFB_LF = 1<<12, /* Bit 13..12 Link Failure */
1338 X_RFB_OFF = 2<<12,/* Bit 13..12 Offline */
1339 X_RFB_AN_ERR = 3<<12,/* Bit 13..12 Auto-Negotiation Error */
1340};
1341
1342/* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */ 1152/* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */
1343enum { 1153enum {
1344 PHY_X_P_NO_PAUSE = 0<<7,/* Bit 8..7: no Pause Mode */ 1154 PHY_X_P_NO_PAUSE = 0<<7,/* Bit 8..7: no Pause Mode */
@@ -1418,6 +1228,16 @@ enum {
1418 PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */ 1228 PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */
1419}; 1229};
1420 1230
1231/* PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/
1232/* PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
1233enum {
1234 PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */
1235
1236 PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */
1237 PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */
1238};
1239
1240
1421/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/ 1241/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/
1422enum { 1242enum {
1423 PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */ 1243 PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */
@@ -1478,7 +1298,9 @@ enum {
1478 PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */ 1298 PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */
1479 PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */ 1299 PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */
1480}; 1300};
1481#define PHY_B_DEF_MSK (~(PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) 1301#define PHY_B_DEF_MSK \
1302 (~(PHY_B_IS_PSE | PHY_B_IS_AN_PR | PHY_B_IS_DUP_CHANGE | \
1303 PHY_B_IS_LSP_CHANGE | PHY_B_IS_LST_CHANGE))
1482 1304
1483/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */ 1305/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */
1484enum { 1306enum {
@@ -1495,166 +1317,6 @@ enum {
1495 PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */ 1317 PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */
1496}; 1318};
1497 1319
1498/*
1499 * Level One-Specific
1500 */
1501/***** PHY_LONE_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
1502enum {
1503 PHY_L_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */
1504 PHY_L_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */
1505 PHY_L_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */
1506 PHY_L_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */
1507 PHY_L_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */
1508 PHY_L_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */
1509};
1510
1511/***** PHY_LONE_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
1512enum {
1513 PHY_L_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
1514 PHY_L_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
1515 PHY_L_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
1516 PHY_L_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */
1517 PHY_L_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */
1518 PHY_L_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */
1519
1520 PHY_L_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
1521
1522/***** PHY_LONE_EXT_STAT 16 bit r/o Extended Status Register *****/
1523 PHY_L_ES_X_FD_CAP = 1<<15, /* Bit 15: 1000Base-X FD capable */
1524 PHY_L_ES_X_HD_CAP = 1<<14, /* Bit 14: 1000Base-X HD capable */
1525 PHY_L_ES_T_FD_CAP = 1<<13, /* Bit 13: 1000Base-T FD capable */
1526 PHY_L_ES_T_HD_CAP = 1<<12, /* Bit 12: 1000Base-T HD capable */
1527};
1528
1529/***** PHY_LONE_PORT_CFG 16 bit r/w Port Configuration Reg *****/
1530enum {
1531 PHY_L_PC_REP_MODE = 1<<15, /* Bit 15: Repeater Mode */
1532
1533 PHY_L_PC_TX_DIS = 1<<13, /* Bit 13: Tx output Disabled */
1534 PHY_L_PC_BY_SCR = 1<<12, /* Bit 12: Bypass Scrambler */
1535 PHY_L_PC_BY_45 = 1<<11, /* Bit 11: Bypass 4B5B-Decoder */
1536 PHY_L_PC_JAB_DIS = 1<<10, /* Bit 10: Jabber Disabled */
1537 PHY_L_PC_SQE = 1<<9, /* Bit 9: Enable Heartbeat */
1538 PHY_L_PC_TP_LOOP = 1<<8, /* Bit 8: TP Loopback */
1539 PHY_L_PC_SSS = 1<<7, /* Bit 7: Smart Speed Selection */
1540 PHY_L_PC_FIFO_SIZE = 1<<6, /* Bit 6: FIFO Size */
1541 PHY_L_PC_PRE_EN = 1<<5, /* Bit 5: Preamble Enable */
1542 PHY_L_PC_CIM = 1<<4, /* Bit 4: Carrier Integrity Mon */
1543 PHY_L_PC_10_SER = 1<<3, /* Bit 3: Use Serial Output */
1544 PHY_L_PC_ANISOL = 1<<2, /* Bit 2: Unisolate Port */
1545 PHY_L_PC_TEN_BIT = 1<<1, /* Bit 1: 10bit iface mode on */
1546 PHY_L_PC_ALTCLOCK = 1<<0, /* Bit 0: (ro) ALTCLOCK Mode on */
1547};
1548
1549/***** PHY_LONE_Q_STAT 16 bit r/o Quick Status Reg *****/
1550enum {
1551 PHY_L_QS_D_RATE = 3<<14,/* Bit 15..14: Data Rate */
1552 PHY_L_QS_TX_STAT = 1<<13, /* Bit 13: Transmitting */
1553 PHY_L_QS_RX_STAT = 1<<12, /* Bit 12: Receiving */
1554 PHY_L_QS_COL_STAT = 1<<11, /* Bit 11: Collision */
1555 PHY_L_QS_L_STAT = 1<<10, /* Bit 10: Link is up */
1556 PHY_L_QS_DUP_MOD = 1<<9, /* Bit 9: Full/Half Duplex */
1557 PHY_L_QS_AN = 1<<8, /* Bit 8: AutoNeg is On */
1558 PHY_L_QS_AN_C = 1<<7, /* Bit 7: AN is Complete */
1559 PHY_L_QS_LLE = 7<<4,/* Bit 6..4: Line Length Estim. */
1560 PHY_L_QS_PAUSE = 1<<3, /* Bit 3: LP advertised Pause */
1561 PHY_L_QS_AS_PAUSE = 1<<2, /* Bit 2: LP adv. asym. Pause */
1562 PHY_L_QS_ISOLATE = 1<<1, /* Bit 1: CIM Isolated */
1563 PHY_L_QS_EVENT = 1<<0, /* Bit 0: Event has occurred */
1564};
1565
1566/***** PHY_LONE_INT_ENAB 16 bit r/w Interrupt Enable Reg *****/
1567/***** PHY_LONE_INT_STAT 16 bit r/o Interrupt Status Reg *****/
1568enum {
1569 PHY_L_IS_AN_F = 1<<13, /* Bit 13: Auto-Negotiation fault */
1570 PHY_L_IS_CROSS = 1<<11, /* Bit 11: Crossover used */
1571 PHY_L_IS_POL = 1<<10, /* Bit 10: Polarity correct. used */
1572 PHY_L_IS_SS = 1<<9, /* Bit 9: Smart Speed Downgrade */
1573 PHY_L_IS_CFULL = 1<<8, /* Bit 8: Counter Full */
1574 PHY_L_IS_AN_C = 1<<7, /* Bit 7: AutoNeg Complete */
1575 PHY_L_IS_SPEED = 1<<6, /* Bit 6: Speed Changed */
1576 PHY_L_IS_DUP = 1<<5, /* Bit 5: Duplex Changed */
1577 PHY_L_IS_LS = 1<<4, /* Bit 4: Link Status Changed */
1578 PHY_L_IS_ISOL = 1<<3, /* Bit 3: Isolate Occured */
1579 PHY_L_IS_MDINT = 1<<2, /* Bit 2: (ro) STAT: MII Int Pending */
1580 PHY_L_IS_INTEN = 1<<1, /* Bit 1: ENAB: Enable IRQs */
1581 PHY_L_IS_FORCE = 1<<0, /* Bit 0: ENAB: Force Interrupt */
1582};
1583
1584/* int. mask */
1585#define PHY_L_DEF_MSK (PHY_L_IS_LS | PHY_L_IS_ISOL | PHY_L_IS_INTEN)
1586
1587/***** PHY_LONE_LED_CFG 16 bit r/w LED Configuration Reg *****/
1588enum {
1589 PHY_L_LC_LEDC = 3<<14,/* Bit 15..14: Col/Blink/On/Off */
1590 PHY_L_LC_LEDR = 3<<12,/* Bit 13..12: Rx/Blink/On/Off */
1591 PHY_L_LC_LEDT = 3<<10,/* Bit 11..10: Tx/Blink/On/Off */
1592 PHY_L_LC_LEDG = 3<<8,/* Bit 9..8: Giga/Blink/On/Off */
1593 PHY_L_LC_LEDS = 3<<6,/* Bit 7..6: 10-100/Blink/On/Off */
1594 PHY_L_LC_LEDL = 3<<4,/* Bit 5..4: Link/Blink/On/Off */
1595 PHY_L_LC_LEDF = 3<<2,/* Bit 3..2: Duplex/Blink/On/Off */
1596 PHY_L_LC_PSTRECH= 1<<1, /* Bit 1: Strech LED Pulses */
1597 PHY_L_LC_FREQ = 1<<0, /* Bit 0: 30/100 ms */
1598};
1599
1600/***** PHY_LONE_PORT_CTRL 16 bit r/w Port Control Reg *****/
1601enum {
1602 PHY_L_PC_TX_TCLK = 1<<15, /* Bit 15: Enable TX_TCLK */
1603 PHY_L_PC_ALT_NP = 1<<13, /* Bit 14: Alternate Next Page */
1604 PHY_L_PC_GMII_ALT= 1<<12, /* Bit 13: Alternate GMII driver */
1605 PHY_L_PC_TEN_CRS = 1<<10, /* Bit 10: Extend CRS*/
1606};
1607
1608/***** PHY_LONE_CIM 16 bit r/o CIM Reg *****/
1609enum {
1610 PHY_L_CIM_ISOL = 0xff<<8,/* Bit 15..8: Isolate Count */
1611 PHY_L_CIM_FALSE_CAR = 0xff, /* Bit 7..0: False Carrier Count */
1612};
1613
1614/*
1615 * Pause Bits (PHY_L_AN_ASP and PHY_L_AN_PC) encoding
1616 */
1617enum {
1618 PHY_L_P_NO_PAUSE= 0<<10,/* Bit 11..10: no Pause Mode */
1619 PHY_L_P_SYM_MD = 1<<10, /* Bit 11..10: symmetric Pause Mode */
1620 PHY_L_P_ASYM_MD = 2<<10,/* Bit 11..10: asymmetric Pause Mode */
1621 PHY_L_P_BOTH_MD = 3<<10,/* Bit 11..10: both Pause Mode */
1622};
1623
1624/*
1625 * National-Specific
1626 */
1627/***** PHY_NAT_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
1628enum {
1629 PHY_N_1000C_TEST= 7<<13,/* Bit 15..13: Test Modes */
1630 PHY_N_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */
1631 PHY_N_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */
1632 PHY_N_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */
1633 PHY_N_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */
1634 PHY_N_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */
1635 PHY_N_1000C_APC = 1<<7, /* Bit 7: Asymmetric Pause Cap. */};
1636
1637
1638/***** PHY_NAT_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
1639enum {
1640 PHY_N_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
1641 PHY_N_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
1642 PHY_N_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
1643 PHY_N_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status*/
1644 PHY_N_1000S_LP_FD= 1<<11, /* Bit 11: Link Partner can FD */
1645 PHY_N_1000S_LP_HD= 1<<10, /* Bit 10: Link Partner can HD */
1646 PHY_N_1000C_LP_APC= 1<<9, /* Bit 9: LP Asym. Pause Cap. */
1647 PHY_N_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
1648};
1649
1650/***** PHY_NAT_EXT_STAT 16 bit r/o Extended Status Register *****/
1651enum {
1652 PHY_N_ES_X_FD_CAP= 1<<15, /* Bit 15: 1000Base-X FD capable */
1653 PHY_N_ES_X_HD_CAP= 1<<14, /* Bit 14: 1000Base-X HD capable */
1654 PHY_N_ES_T_FD_CAP= 1<<13, /* Bit 13: 1000Base-T FD capable */
1655 PHY_N_ES_T_HD_CAP= 1<<12, /* Bit 12: 1000Base-T HD capable */
1656};
1657
1658/** Marvell-Specific */ 1320/** Marvell-Specific */
1659enum { 1321enum {
1660 PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */ 1322 PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */
@@ -1718,7 +1380,7 @@ enum {
1718 PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */ 1380 PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */
1719}; 1381};
1720 1382
1721#define PHY_M_PC_MDI_XMODE(x) (((x)<<5) & PHY_M_PC_MDIX_MSK) 1383#define PHY_M_PC_MDI_XMODE(x) (((x)<<5) & PHY_M_PC_MDIX_MSK)
1722 1384
1723enum { 1385enum {
1724 PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */ 1386 PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */
@@ -2105,7 +1767,7 @@ enum {
2105 GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */ 1767 GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */
2106 GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */ 1768 GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */
2107}; 1769};
2108 1770
2109/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ 1771/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */
2110enum { 1772enum {
2111 GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */ 1773 GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */
@@ -2127,7 +1789,7 @@ enum {
2127 1789
2128#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) 1790#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
2129#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS) 1791#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS)
2130 1792
2131/* GM_TX_CTRL 16 bit r/w Transmit Control Register */ 1793/* GM_TX_CTRL 16 bit r/w Transmit Control Register */
2132enum { 1794enum {
2133 GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ 1795 GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
@@ -2138,7 +1800,7 @@ enum {
2138 1800
2139#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) 1801#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
2140#define TX_COL_DEF 0x04 1802#define TX_COL_DEF 0x04
2141 1803
2142/* GM_RX_CTRL 16 bit r/w Receive Control Register */ 1804/* GM_RX_CTRL 16 bit r/w Receive Control Register */
2143enum { 1805enum {
2144 GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */ 1806 GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */
@@ -2146,7 +1808,7 @@ enum {
2146 GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */ 1808 GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */
2147 GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */ 1809 GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */
2148}; 1810};
2149 1811
2150/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ 1812/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */
2151enum { 1813enum {
2152 GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */ 1814 GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */
@@ -2171,7 +1833,7 @@ enum {
2171 GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */ 1833 GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */
2172 GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ 1834 GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
2173}; 1835};
2174 1836
2175#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) 1837#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
2176#define DATA_BLIND_DEF 0x04 1838#define DATA_BLIND_DEF 0x04
2177 1839
@@ -2186,7 +1848,7 @@ enum {
2186 GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */ 1848 GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */
2187 GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */ 1849 GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */
2188}; 1850};
2189 1851
2190#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK) 1852#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK)
2191#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK) 1853#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK)
2192 1854
@@ -2195,7 +1857,7 @@ enum {
2195 GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */ 1857 GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */
2196 GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */ 1858 GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */
2197}; 1859};
2198 1860
2199/* Receive Frame Status Encoding */ 1861/* Receive Frame Status Encoding */
2200enum { 1862enum {
2201 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ 1863 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */
@@ -2217,12 +1879,12 @@ enum {
2217/* 1879/*
2218 * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR) 1880 * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR)
2219 */ 1881 */
2220 GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR | 1882 GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR |
2221 GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC | 1883 GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC |
2222 GMR_FS_JABBER, 1884 GMR_FS_JABBER,
2223/* Rx GMAC FIFO Flush Mask (default) */ 1885/* Rx GMAC FIFO Flush Mask (default) */
2224 RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR | 1886 RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR |
2225 GMR_FS_BAD_FC | GMR_FS_GOOD_FC | GMR_FS_UN_SIZE | 1887 GMR_FS_BAD_FC | GMR_FS_GOOD_FC | GMR_FS_UN_SIZE |
2226 GMR_FS_JABBER, 1888 GMR_FS_JABBER,
2227}; 1889};
2228 1890
@@ -2540,10 +2202,6 @@ enum {
2540}; 2202};
2541 2203
2542 2204
2543/* XM_PHY_ADDR 16 bit r/w PHY Address Register */
2544#define XM_PHY_ADDR_SZ 0x1f /* Bit 4..0: PHY Address bits */
2545
2546
2547/* XM_GP_PORT 32 bit r/w General Purpose Port Register */ 2205/* XM_GP_PORT 32 bit r/w General Purpose Port Register */
2548enum { 2206enum {
2549 XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */ 2207 XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */
@@ -2662,8 +2320,8 @@ enum {
2662}; 2320};
2663 2321
2664#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I) 2322#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I)
2665#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\ 2323#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\
2666 XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA | XM_MD_CAA) 2324 XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA)
2667 2325
2668/* XM_STAT_CMD 16 bit r/w Statistics Command Register */ 2326/* XM_STAT_CMD 16 bit r/w Statistics Command Register */
2669enum { 2327enum {
@@ -2793,28 +2451,20 @@ struct skge_hw {
2793 u32 intr_mask; 2451 u32 intr_mask;
2794 struct net_device *dev[2]; 2452 struct net_device *dev[2];
2795 2453
2796 u8 mac_cfg;
2797 u8 chip_id; 2454 u8 chip_id;
2455 u8 chip_rev;
2798 u8 phy_type; 2456 u8 phy_type;
2799 u8 pmd_type; 2457 u8 pmd_type;
2800 u16 phy_addr; 2458 u16 phy_addr;
2459 u8 ports;
2801 2460
2802 u32 ram_size; 2461 u32 ram_size;
2803 u32 ram_offset; 2462 u32 ram_offset;
2804 2463
2805 struct tasklet_struct ext_tasklet; 2464 struct tasklet_struct ext_tasklet;
2806 spinlock_t phy_lock; 2465 spinlock_t phy_lock;
2807}; 2466};
2808 2467
2809static inline int isdualport(const struct skge_hw *hw)
2810{
2811 return !(hw->mac_cfg & CFG_SNG_MAC);
2812}
2813
2814static inline u8 chip_rev(const struct skge_hw *hw)
2815{
2816 return (hw->mac_cfg & CFG_CHIP_R_MSK) >> 4;
2817}
2818 2468
2819static inline int iscopper(const struct skge_hw *hw) 2469static inline int iscopper(const struct skge_hw *hw)
2820{ 2470{
@@ -2827,7 +2477,7 @@ enum {
2827 FLOW_MODE_REM_SEND = 2, /* Symmetric or just remote */ 2477 FLOW_MODE_REM_SEND = 2, /* Symmetric or just remote */
2828 FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */ 2478 FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */
2829}; 2479};
2830 2480
2831struct skge_port { 2481struct skge_port {
2832 u32 msg_enable; 2482 u32 msg_enable;
2833 struct skge_hw *hw; 2483 struct skge_hw *hw;
@@ -2853,8 +2503,8 @@ struct skge_port {
2853 void *mem; /* PCI memory for rings */ 2503 void *mem; /* PCI memory for rings */
2854 dma_addr_t dma; 2504 dma_addr_t dma;
2855 unsigned long mem_size; 2505 unsigned long mem_size;
2506 unsigned int rx_buf_size;
2856 2507
2857 struct timer_list link_check;
2858 struct timer_list led_blink; 2508 struct timer_list led_blink;
2859}; 2509};
2860 2510
@@ -2863,7 +2513,6 @@ struct skge_port {
2863static inline u32 skge_read32(const struct skge_hw *hw, int reg) 2513static inline u32 skge_read32(const struct skge_hw *hw, int reg)
2864{ 2514{
2865 return readl(hw->regs + reg); 2515 return readl(hw->regs + reg);
2866
2867} 2516}
2868 2517
2869static inline u16 skge_read16(const struct skge_hw *hw, int reg) 2518static inline u16 skge_read16(const struct skge_hw *hw, int reg)
@@ -2892,114 +2541,87 @@ static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val)
2892} 2541}
2893 2542
2894/* MAC Related Registers inside the device. */ 2543/* MAC Related Registers inside the device. */
2895#define SKGEMAC_REG(port,reg) (((port)<<7)+(reg)) 2544#define SK_REG(port,reg) (((port)<<7)+(reg))
2896 2545#define SK_XMAC_REG(port, reg) \
2897/* PCI config space can be accessed via memory mapped space */
2898#define SKGEPCI_REG(reg) ((reg)+ 0x380)
2899
2900#define SKGEXM_REG(port, reg) \
2901 ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1) 2546 ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1)
2902 2547
2903static inline u32 skge_xm_read32(const struct skge_hw *hw, int port, int reg) 2548static inline u32 xm_read32(const struct skge_hw *hw, int port, int reg)
2904{
2905 return skge_read32(hw, SKGEXM_REG(port,reg));
2906}
2907
2908static inline u16 skge_xm_read16(const struct skge_hw *hw, int port, int reg)
2909{ 2549{
2910 return skge_read16(hw, SKGEXM_REG(port,reg)); 2550 u32 v;
2551 v = skge_read16(hw, SK_XMAC_REG(port, reg));
2552 v |= (u32)skge_read16(hw, SK_XMAC_REG(port, reg+2)) << 16;
2553 return v;
2911} 2554}
2912 2555
2913static inline u8 skge_xm_read8(const struct skge_hw *hw, int port, int reg) 2556static inline u16 xm_read16(const struct skge_hw *hw, int port, int reg)
2914{ 2557{
2915 return skge_read8(hw, SKGEXM_REG(port,reg)); 2558 return skge_read16(hw, SK_XMAC_REG(port,reg));
2916} 2559}
2917 2560
2918static inline void skge_xm_write32(const struct skge_hw *hw, int port, int r, u32 v) 2561static inline void xm_write32(const struct skge_hw *hw, int port, int r, u32 v)
2919{ 2562{
2920 skge_write32(hw, SKGEXM_REG(port,r), v); 2563 skge_write16(hw, SK_XMAC_REG(port,r), v & 0xffff);
2564 skge_write16(hw, SK_XMAC_REG(port,r+2), v >> 16);
2921} 2565}
2922 2566
2923static inline void skge_xm_write16(const struct skge_hw *hw, int port, int r, u16 v) 2567static inline void xm_write16(const struct skge_hw *hw, int port, int r, u16 v)
2924{ 2568{
2925 skge_write16(hw, SKGEXM_REG(port,r), v); 2569 skge_write16(hw, SK_XMAC_REG(port,r), v);
2926} 2570}
2927 2571
2928static inline void skge_xm_write8(const struct skge_hw *hw, int port, int r, u8 v) 2572static inline void xm_outhash(const struct skge_hw *hw, int port, int reg,
2929{
2930 skge_write8(hw, SKGEXM_REG(port,r), v);
2931}
2932
2933static inline void skge_xm_outhash(const struct skge_hw *hw, int port, int reg,
2934 const u8 *hash) 2573 const u8 *hash)
2935{ 2574{
2936 skge_xm_write16(hw, port, reg, 2575 xm_write16(hw, port, reg, (u16)hash[0] | ((u16)hash[1] << 8));
2937 (u16)hash[0] | ((u16)hash[1] << 8)); 2576 xm_write16(hw, port, reg+2, (u16)hash[2] | ((u16)hash[3] << 8));
2938 skge_xm_write16(hw, port, reg+2, 2577 xm_write16(hw, port, reg+4, (u16)hash[4] | ((u16)hash[5] << 8));
2939 (u16)hash[2] | ((u16)hash[3] << 8)); 2578 xm_write16(hw, port, reg+6, (u16)hash[6] | ((u16)hash[7] << 8));
2940 skge_xm_write16(hw, port, reg+4,
2941 (u16)hash[4] | ((u16)hash[5] << 8));
2942 skge_xm_write16(hw, port, reg+6,
2943 (u16)hash[6] | ((u16)hash[7] << 8));
2944} 2579}
2945 2580
2946static inline void skge_xm_outaddr(const struct skge_hw *hw, int port, int reg, 2581static inline void xm_outaddr(const struct skge_hw *hw, int port, int reg,
2947 const u8 *addr) 2582 const u8 *addr)
2948{ 2583{
2949 skge_xm_write16(hw, port, reg, 2584 xm_write16(hw, port, reg, (u16)addr[0] | ((u16)addr[1] << 8));
2950 (u16)addr[0] | ((u16)addr[1] << 8)); 2585 xm_write16(hw, port, reg+2, (u16)addr[2] | ((u16)addr[3] << 8));
2951 skge_xm_write16(hw, port, reg, 2586 xm_write16(hw, port, reg+4, (u16)addr[4] | ((u16)addr[5] << 8));
2952 (u16)addr[2] | ((u16)addr[3] << 8));
2953 skge_xm_write16(hw, port, reg,
2954 (u16)addr[4] | ((u16)addr[5] << 8));
2955} 2587}
2956 2588
2589#define SK_GMAC_REG(port,reg) \
2590 (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg))
2957 2591
2958#define SKGEGMA_REG(port,reg) \ 2592static inline u16 gma_read16(const struct skge_hw *hw, int port, int reg)
2959 ((reg) + BASE_GMAC_1 + \
2960 (port) * (BASE_GMAC_2-BASE_GMAC_1))
2961
2962static inline u16 skge_gma_read16(const struct skge_hw *hw, int port, int reg)
2963{ 2593{
2964 return skge_read16(hw, SKGEGMA_REG(port,reg)); 2594 return skge_read16(hw, SK_GMAC_REG(port,reg));
2965} 2595}
2966 2596
2967static inline u32 skge_gma_read32(const struct skge_hw *hw, int port, int reg) 2597static inline u32 gma_read32(const struct skge_hw *hw, int port, int reg)
2968{ 2598{
2969 return (u32) skge_read16(hw, SKGEGMA_REG(port,reg)) 2599 return (u32) skge_read16(hw, SK_GMAC_REG(port,reg))
2970 | ((u32)skge_read16(hw, SKGEGMA_REG(port,reg+4)) << 16); 2600 | ((u32)skge_read16(hw, SK_GMAC_REG(port,reg+4)) << 16);
2971} 2601}
2972 2602
2973static inline u8 skge_gma_read8(const struct skge_hw *hw, int port, int reg) 2603static inline void gma_write16(const struct skge_hw *hw, int port, int r, u16 v)
2974{ 2604{
2975 return skge_read8(hw, SKGEGMA_REG(port,reg)); 2605 skge_write16(hw, SK_GMAC_REG(port,r), v);
2976} 2606}
2977 2607
2978static inline void skge_gma_write16(const struct skge_hw *hw, int port, int r, u16 v) 2608static inline void gma_write32(const struct skge_hw *hw, int port, int r, u32 v)
2979{ 2609{
2980 skge_write16(hw, SKGEGMA_REG(port,r), v); 2610 skge_write16(hw, SK_GMAC_REG(port, r), (u16) v);
2611 skge_write32(hw, SK_GMAC_REG(port, r+4), (u16)(v >> 16));
2981} 2612}
2982 2613
2983static inline void skge_gma_write32(const struct skge_hw *hw, int port, int r, u32 v) 2614static inline void gma_write8(const struct skge_hw *hw, int port, int r, u8 v)
2984{ 2615{
2985 skge_write16(hw, SKGEGMA_REG(port, r), (u16) v); 2616 skge_write8(hw, SK_GMAC_REG(port,r), v);
2986 skge_write32(hw, SKGEGMA_REG(port, r+4), (u16)(v >> 16));
2987} 2617}
2988 2618
2989static inline void skge_gma_write8(const struct skge_hw *hw, int port, int r, u8 v) 2619static inline void gma_set_addr(struct skge_hw *hw, int port, int reg,
2990{
2991 skge_write8(hw, SKGEGMA_REG(port,r), v);
2992}
2993
2994static inline void skge_gm_set_addr(struct skge_hw *hw, int port, int reg,
2995 const u8 *addr) 2620 const u8 *addr)
2996{ 2621{
2997 skge_gma_write16(hw, port, reg, 2622 gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8));
2998 (u16) addr[0] | ((u16) addr[1] << 8)); 2623 gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
2999 skge_gma_write16(hw, port, reg+4, 2624 gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
3000 (u16) addr[2] | ((u16) addr[3] << 8));
3001 skge_gma_write16(hw, port, reg+8,
3002 (u16) addr[4] | ((u16) addr[5] << 8));
3003} 2625}
3004 2626
3005#endif 2627#endif
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 1d3231cc471a..d20e0da05a26 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -137,6 +137,110 @@ config PCMCIA_RAYCS
137comment "Wireless 802.11b ISA/PCI cards support" 137comment "Wireless 802.11b ISA/PCI cards support"
138 depends on NET_RADIO && (ISA || PCI || PPC_PMAC || PCMCIA) 138 depends on NET_RADIO && (ISA || PCI || PPC_PMAC || PCMCIA)
139 139
140config IPW2100
141 tristate "Intel PRO/Wireless 2100 Network Connection"
142 depends on NET_RADIO && PCI && IEEE80211
143 select FW_LOADER
144 ---help---
145 A driver for the Intel PRO/Wireless 2100 Network
146 Connection 802.11b wireless network adapter.
147
148 See <file:Documentation/networking/README.ipw2100> for information on
149 the capabilities currently enabled in this driver and for tips
150 for debugging issues and problems.
151
152 In order to use this driver, you will need a firmware image for it.
153 You can obtain the firmware from
154 <http://ipw2100.sf.net/>. Once you have the firmware image, you
155 will need to place it in /etc/firmware.
156
157 You will also very likely need the Wireless Tools in order to
158 configure your card:
159
160 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
161
162 If you want to compile the driver as a module ( = code which can be
163 inserted in and remvoed from the running kernel whenever you want),
164 say M here and read <file:Documentation/modules.txt>. The module
165 will be called ipw2100.ko.
166
167config IPW2100_PROMISC
168 bool "Enable promiscuous mode"
169 depends on IPW2100
170 ---help---
171 Enables promiscuous/monitor mode support for the ipw2100 driver.
172 With this feature compiled into the driver, you can switch to
173 promiscuous mode via the Wireless Tool's Monitor mode. While in this
174 mode, no packets can be sent.
175
176config IPW_DEBUG
177 bool "Enable full debugging output in IPW2100 module."
178 depends on IPW2100
179 ---help---
180 This option will enable debug tracing output for the IPW2100.
181
182 This will result in the kernel module being ~60k larger. You can
183 control which debug output is sent to the kernel log by setting the
184 value in
185
186 /sys/bus/pci/drivers/ipw2100/debug_level
187
188 This entry will only exist if this option is enabled.
189
190 If you are not trying to debug or develop the IPW2100 driver, you
191 most likely want to say N here.
192
193config IPW2200
194 tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
195 depends on IEEE80211 && PCI
196 select FW_LOADER
197 ---help---
198 A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network
199 Connection adapters.
200
201 See <file:Documentation/networking/README.ipw2200> for
202 information on the capabilities currently enabled in this
203 driver and for tips for debugging issues and problems.
204
205 In order to use this driver, you will need a firmware image for it.
206 You can obtain the firmware from
207 <http://ipw2200.sf.net/>. See the above referenced README.ipw2200
208 for information on where to install the firmare images.
209
210 You will also very likely need the Wireless Tools in order to
211 configure your card:
212
213 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
214
215 If you want to compile the driver as a module ( = code which can be
216 inserted in and remvoed from the running kernel whenever you want),
217 say M here and read <file:Documentation/modules.txt>. The module
218 will be called ipw2200.ko.
219
220config IPW_DEBUG
221 bool "Enable full debugging output in IPW2200 module."
222 depends on IPW2200
223 ---help---
224 This option will enable debug tracing output for the IPW2200.
225
226 This will result in the kernel module being ~100k larger. You can
227 control which debug output is sent to the kernel log by setting the
228 value in
229
230 /sys/bus/pci/drivers/ipw2200/debug_level
231
232 This entry will only exist if this option is enabled.
233
234 To set a value, simply echo an 8-byte hex value to the same file:
235
236 % echo 0x00000FFO > /sys/bus/pci/drivers/ipw2200/debug_level
237
238 You can find the list of debug mask values in
239 drivers/net/wireless/ipw2200.h
240
241 If you are not trying to debug or develop the IPW2200 driver, you
242 most likely want to say N here.
243
140config AIRO 244config AIRO
141 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" 245 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
142 depends on NET_RADIO && ISA && (PCI || BROKEN) 246 depends on NET_RADIO && ISA && (PCI || BROKEN)
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 2b87841322cc..0859787581bb 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -2,6 +2,10 @@
2# Makefile for the Linux Wireless network device drivers. 2# Makefile for the Linux Wireless network device drivers.
3# 3#
4 4
5obj-$(CONFIG_IPW2100) += ipw2100.o
6
7obj-$(CONFIG_IPW2200) += ipw2200.o
8
5obj-$(CONFIG_STRIP) += strip.o 9obj-$(CONFIG_STRIP) += strip.o
6obj-$(CONFIG_ARLAN) += arlan.o 10obj-$(CONFIG_ARLAN) += arlan.o
7 11
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 18a7d38d2a13..bed160a25cab 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -68,7 +68,7 @@
68#include <linux/device.h> 68#include <linux/device.h>
69#include <linux/moduleparam.h> 69#include <linux/moduleparam.h>
70#include <linux/firmware.h> 70#include <linux/firmware.h>
71#include "ieee802_11.h" 71#include <net/ieee80211.h>
72#include "atmel.h" 72#include "atmel.h"
73 73
74#define DRIVER_MAJOR 0 74#define DRIVER_MAJOR 0
@@ -618,12 +618,12 @@ static int atmel_lock_mac(struct atmel_private *priv);
618static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data); 618static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data);
619static void atmel_command_irq(struct atmel_private *priv); 619static void atmel_command_irq(struct atmel_private *priv);
620static int atmel_validate_channel(struct atmel_private *priv, int channel); 620static int atmel_validate_channel(struct atmel_private *priv, int channel);
621static void atmel_management_frame(struct atmel_private *priv, struct ieee802_11_hdr *header, 621static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header,
622 u16 frame_len, u8 rssi); 622 u16 frame_len, u8 rssi);
623static void atmel_management_timer(u_long a); 623static void atmel_management_timer(u_long a);
624static void atmel_send_command(struct atmel_private *priv, int command, void *cmd, int cmd_size); 624static void atmel_send_command(struct atmel_private *priv, int command, void *cmd, int cmd_size);
625static int atmel_send_command_wait(struct atmel_private *priv, int command, void *cmd, int cmd_size); 625static int atmel_send_command_wait(struct atmel_private *priv, int command, void *cmd, int cmd_size);
626static void atmel_transmit_management_frame(struct atmel_private *priv, struct ieee802_11_hdr *header, 626static void atmel_transmit_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header,
627 u8 *body, int body_len); 627 u8 *body, int body_len);
628 628
629static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index); 629static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index);
@@ -827,7 +827,7 @@ static void tx_update_descriptor(struct atmel_private *priv, int is_bcast, u16 l
827static int start_tx (struct sk_buff *skb, struct net_device *dev) 827static int start_tx (struct sk_buff *skb, struct net_device *dev)
828{ 828{
829 struct atmel_private *priv = netdev_priv(dev); 829 struct atmel_private *priv = netdev_priv(dev);
830 struct ieee802_11_hdr header; 830 struct ieee80211_hdr header;
831 unsigned long flags; 831 unsigned long flags;
832 u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; 832 u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
833 u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; 833 u8 SNAP_RFC1024[6] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
@@ -863,17 +863,17 @@ static int start_tx (struct sk_buff *skb, struct net_device *dev)
863 return 1; 863 return 1;
864 } 864 }
865 865
866 frame_ctl = IEEE802_11_FTYPE_DATA; 866 frame_ctl = IEEE80211_FTYPE_DATA;
867 header.duration_id = 0; 867 header.duration_id = 0;
868 header.seq_ctl = 0; 868 header.seq_ctl = 0;
869 if (priv->wep_is_on) 869 if (priv->wep_is_on)
870 frame_ctl |= IEEE802_11_FCTL_WEP; 870 frame_ctl |= IEEE80211_FCTL_WEP;
871 if (priv->operating_mode == IW_MODE_ADHOC) { 871 if (priv->operating_mode == IW_MODE_ADHOC) {
872 memcpy(&header.addr1, skb->data, 6); 872 memcpy(&header.addr1, skb->data, 6);
873 memcpy(&header.addr2, dev->dev_addr, 6); 873 memcpy(&header.addr2, dev->dev_addr, 6);
874 memcpy(&header.addr3, priv->BSSID, 6); 874 memcpy(&header.addr3, priv->BSSID, 6);
875 } else { 875 } else {
876 frame_ctl |= IEEE802_11_FCTL_TODS; 876 frame_ctl |= IEEE80211_FCTL_TODS;
877 memcpy(&header.addr1, priv->CurrentBSSID, 6); 877 memcpy(&header.addr1, priv->CurrentBSSID, 6);
878 memcpy(&header.addr2, dev->dev_addr, 6); 878 memcpy(&header.addr2, dev->dev_addr, 6);
879 memcpy(&header.addr3, skb->data, 6); 879 memcpy(&header.addr3, skb->data, 6);
@@ -902,7 +902,7 @@ static int start_tx (struct sk_buff *skb, struct net_device *dev)
902} 902}
903 903
904static void atmel_transmit_management_frame(struct atmel_private *priv, 904static void atmel_transmit_management_frame(struct atmel_private *priv,
905 struct ieee802_11_hdr *header, 905 struct ieee80211_hdr *header,
906 u8 *body, int body_len) 906 u8 *body, int body_len)
907{ 907{
908 u16 buff; 908 u16 buff;
@@ -917,7 +917,7 @@ static void atmel_transmit_management_frame(struct atmel_private *priv,
917 tx_update_descriptor(priv, header->addr1[0] & 0x01, len, buff, TX_PACKET_TYPE_MGMT); 917 tx_update_descriptor(priv, header->addr1[0] & 0x01, len, buff, TX_PACKET_TYPE_MGMT);
918} 918}
919 919
920static void fast_rx_path(struct atmel_private *priv, struct ieee802_11_hdr *header, 920static void fast_rx_path(struct atmel_private *priv, struct ieee80211_hdr *header,
921 u16 msdu_size, u16 rx_packet_loc, u32 crc) 921 u16 msdu_size, u16 rx_packet_loc, u32 crc)
922{ 922{
923 /* fast path: unfragmented packet copy directly into skbuf */ 923 /* fast path: unfragmented packet copy directly into skbuf */
@@ -955,7 +955,7 @@ static void fast_rx_path(struct atmel_private *priv, struct ieee802_11_hdr *head
955 } 955 }
956 956
957 memcpy(skbp, header->addr1, 6); /* destination address */ 957 memcpy(skbp, header->addr1, 6); /* destination address */
958 if (le16_to_cpu(header->frame_ctl) & IEEE802_11_FCTL_FROMDS) 958 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS)
959 memcpy(&skbp[6], header->addr3, 6); 959 memcpy(&skbp[6], header->addr3, 6);
960 else 960 else
961 memcpy(&skbp[6], header->addr2, 6); /* source address */ 961 memcpy(&skbp[6], header->addr2, 6); /* source address */
@@ -990,14 +990,14 @@ static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size)
990 return (crc ^ 0xffffffff) == netcrc; 990 return (crc ^ 0xffffffff) == netcrc;
991} 991}
992 992
993static void frag_rx_path(struct atmel_private *priv, struct ieee802_11_hdr *header, 993static void frag_rx_path(struct atmel_private *priv, struct ieee80211_hdr *header,
994 u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no, u8 frag_no, int more_frags) 994 u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no, u8 frag_no, int more_frags)
995{ 995{
996 u8 mac4[6]; 996 u8 mac4[6];
997 u8 source[6]; 997 u8 source[6];
998 struct sk_buff *skb; 998 struct sk_buff *skb;
999 999
1000 if (le16_to_cpu(header->frame_ctl) & IEEE802_11_FCTL_FROMDS) 1000 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS)
1001 memcpy(source, header->addr3, 6); 1001 memcpy(source, header->addr3, 6);
1002 else 1002 else
1003 memcpy(source, header->addr2, 6); 1003 memcpy(source, header->addr2, 6);
@@ -1082,7 +1082,7 @@ static void frag_rx_path(struct atmel_private *priv, struct ieee802_11_hdr *head
1082static void rx_done_irq(struct atmel_private *priv) 1082static void rx_done_irq(struct atmel_private *priv)
1083{ 1083{
1084 int i; 1084 int i;
1085 struct ieee802_11_hdr header; 1085 struct ieee80211_hdr header;
1086 1086
1087 for (i = 0; 1087 for (i = 0;
1088 atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID && 1088 atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID &&
@@ -1117,7 +1117,7 @@ static void rx_done_irq(struct atmel_private *priv)
1117 /* probe for CRC use here if needed once five packets have arrived with 1117 /* probe for CRC use here if needed once five packets have arrived with
1118 the same crc status, we assume we know what's happening and stop probing */ 1118 the same crc status, we assume we know what's happening and stop probing */
1119 if (priv->probe_crc) { 1119 if (priv->probe_crc) {
1120 if (!priv->wep_is_on || !(frame_ctl & IEEE802_11_FCTL_WEP)) { 1120 if (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_WEP)) {
1121 priv->do_rx_crc = probe_crc(priv, rx_packet_loc, msdu_size); 1121 priv->do_rx_crc = probe_crc(priv, rx_packet_loc, msdu_size);
1122 } else { 1122 } else {
1123 priv->do_rx_crc = probe_crc(priv, rx_packet_loc + 24, msdu_size - 24); 1123 priv->do_rx_crc = probe_crc(priv, rx_packet_loc + 24, msdu_size - 24);
@@ -1132,16 +1132,16 @@ static void rx_done_irq(struct atmel_private *priv)
1132 } 1132 }
1133 1133
1134 /* don't CRC header when WEP in use */ 1134 /* don't CRC header when WEP in use */
1135 if (priv->do_rx_crc && (!priv->wep_is_on || !(frame_ctl & IEEE802_11_FCTL_WEP))) { 1135 if (priv->do_rx_crc && (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_WEP))) {
1136 crc = crc32_le(0xffffffff, (unsigned char *)&header, 24); 1136 crc = crc32_le(0xffffffff, (unsigned char *)&header, 24);
1137 } 1137 }
1138 msdu_size -= 24; /* header */ 1138 msdu_size -= 24; /* header */
1139 1139
1140 if ((frame_ctl & IEEE802_11_FCTL_FTYPE) == IEEE802_11_FTYPE_DATA) { 1140 if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) {
1141 1141
1142 int more_fragments = frame_ctl & IEEE802_11_FCTL_MOREFRAGS; 1142 int more_fragments = frame_ctl & IEEE80211_FCTL_MOREFRAGS;
1143 u8 packet_fragment_no = seq_control & IEEE802_11_SCTL_FRAG; 1143 u8 packet_fragment_no = seq_control & IEEE80211_SCTL_FRAG;
1144 u16 packet_sequence_no = (seq_control & IEEE802_11_SCTL_SEQ) >> 4; 1144 u16 packet_sequence_no = (seq_control & IEEE80211_SCTL_SEQ) >> 4;
1145 1145
1146 if (!more_fragments && packet_fragment_no == 0 ) { 1146 if (!more_fragments && packet_fragment_no == 0 ) {
1147 fast_rx_path(priv, &header, msdu_size, rx_packet_loc, crc); 1147 fast_rx_path(priv, &header, msdu_size, rx_packet_loc, crc);
@@ -1151,7 +1151,7 @@ static void rx_done_irq(struct atmel_private *priv)
1151 } 1151 }
1152 } 1152 }
1153 1153
1154 if ((frame_ctl & IEEE802_11_FCTL_FTYPE) == IEEE802_11_FTYPE_MGMT) { 1154 if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
1155 /* copy rest of packet into buffer */ 1155 /* copy rest of packet into buffer */
1156 atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size); 1156 atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
1157 1157
@@ -2663,10 +2663,10 @@ static void handle_beacon_probe(struct atmel_private *priv, u16 capability, u8 c
2663 2663
2664static void send_authentication_request(struct atmel_private *priv, u8 *challenge, int challenge_len) 2664static void send_authentication_request(struct atmel_private *priv, u8 *challenge, int challenge_len)
2665{ 2665{
2666 struct ieee802_11_hdr header; 2666 struct ieee80211_hdr header;
2667 struct auth_body auth; 2667 struct auth_body auth;
2668 2668
2669 header.frame_ctl = cpu_to_le16(IEEE802_11_FTYPE_MGMT | IEEE802_11_STYPE_AUTH); 2669 header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
2670 header.duration_id = cpu_to_le16(0x8000); 2670 header.duration_id = cpu_to_le16(0x8000);
2671 header.seq_ctl = 0; 2671 header.seq_ctl = 0;
2672 memcpy(header.addr1, priv->CurrentBSSID, 6); 2672 memcpy(header.addr1, priv->CurrentBSSID, 6);
@@ -2677,7 +2677,7 @@ static void send_authentication_request(struct atmel_private *priv, u8 *challeng
2677 auth.alg = cpu_to_le16(C80211_MGMT_AAN_SHAREDKEY); 2677 auth.alg = cpu_to_le16(C80211_MGMT_AAN_SHAREDKEY);
2678 /* no WEP for authentication frames with TrSeqNo 1 */ 2678 /* no WEP for authentication frames with TrSeqNo 1 */
2679 if (priv->CurrentAuthentTransactionSeqNum != 1) 2679 if (priv->CurrentAuthentTransactionSeqNum != 1)
2680 header.frame_ctl |= cpu_to_le16(IEEE802_11_FCTL_WEP); 2680 header.frame_ctl |= cpu_to_le16(IEEE80211_FCTL_WEP);
2681 } else { 2681 } else {
2682 auth.alg = cpu_to_le16(C80211_MGMT_AAN_OPENSYSTEM); 2682 auth.alg = cpu_to_le16(C80211_MGMT_AAN_OPENSYSTEM);
2683 } 2683 }
@@ -2701,7 +2701,7 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2701{ 2701{
2702 u8 *ssid_el_p; 2702 u8 *ssid_el_p;
2703 int bodysize; 2703 int bodysize;
2704 struct ieee802_11_hdr header; 2704 struct ieee80211_hdr header;
2705 struct ass_req_format { 2705 struct ass_req_format {
2706 u16 capability; 2706 u16 capability;
2707 u16 listen_interval; 2707 u16 listen_interval;
@@ -2714,8 +2714,8 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2714 u8 rates[4]; 2714 u8 rates[4];
2715 } body; 2715 } body;
2716 2716
2717 header.frame_ctl = cpu_to_le16(IEEE802_11_FTYPE_MGMT | 2717 header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
2718 (is_reassoc ? IEEE802_11_STYPE_REASSOC_REQ : IEEE802_11_STYPE_ASSOC_REQ)); 2718 (is_reassoc ? IEEE80211_STYPE_REASSOC_REQ : IEEE80211_STYPE_ASSOC_REQ));
2719 header.duration_id = cpu_to_le16(0x8000); 2719 header.duration_id = cpu_to_le16(0x8000);
2720 header.seq_ctl = 0; 2720 header.seq_ctl = 0;
2721 2721
@@ -2751,9 +2751,9 @@ static void send_association_request(struct atmel_private *priv, int is_reassoc)
2751 atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize); 2751 atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize);
2752} 2752}
2753 2753
2754static int is_frame_from_current_bss(struct atmel_private *priv, struct ieee802_11_hdr *header) 2754static int is_frame_from_current_bss(struct atmel_private *priv, struct ieee80211_hdr *header)
2755{ 2755{
2756 if (le16_to_cpu(header->frame_ctl) & IEEE802_11_FCTL_FROMDS) 2756 if (le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_FROMDS)
2757 return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0; 2757 return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0;
2758 else 2758 else
2759 return memcmp(header->addr2, priv->CurrentBSSID, 6) == 0; 2759 return memcmp(header->addr2, priv->CurrentBSSID, 6) == 0;
@@ -2801,7 +2801,7 @@ static int retrieve_bss(struct atmel_private *priv)
2801} 2801}
2802 2802
2803 2803
2804static void store_bss_info(struct atmel_private *priv, struct ieee802_11_hdr *header, 2804static void store_bss_info(struct atmel_private *priv, struct ieee80211_hdr *header,
2805 u16 capability, u16 beacon_period, u8 channel, u8 rssi, 2805 u16 capability, u16 beacon_period, u8 channel, u8 rssi,
2806 u8 ssid_len, u8 *ssid, int is_beacon) 2806 u8 ssid_len, u8 *ssid, int is_beacon)
2807{ 2807{
@@ -3085,12 +3085,12 @@ static void atmel_smooth_qual(struct atmel_private *priv)
3085} 3085}
3086 3086
3087/* deals with incoming managment frames. */ 3087/* deals with incoming managment frames. */
3088static void atmel_management_frame(struct atmel_private *priv, struct ieee802_11_hdr *header, 3088static void atmel_management_frame(struct atmel_private *priv, struct ieee80211_hdr *header,
3089 u16 frame_len, u8 rssi) 3089 u16 frame_len, u8 rssi)
3090{ 3090{
3091 u16 subtype; 3091 u16 subtype;
3092 3092
3093 switch (subtype = le16_to_cpu(header->frame_ctl) & IEEE802_11_FCTL_STYPE) { 3093 switch (subtype = le16_to_cpu(header->frame_ctl) & IEEE80211_FCTL_STYPE) {
3094 case C80211_SUBTYPE_MGMT_BEACON : 3094 case C80211_SUBTYPE_MGMT_BEACON :
3095 case C80211_SUBTYPE_MGMT_ProbeResponse: 3095 case C80211_SUBTYPE_MGMT_ProbeResponse:
3096 3096
diff --git a/drivers/net/wireless/ieee802_11.h b/drivers/net/wireless/ieee802_11.h
deleted file mode 100644
index 53dd5248f9f1..000000000000
--- a/drivers/net/wireless/ieee802_11.h
+++ /dev/null
@@ -1,78 +0,0 @@
1#ifndef _IEEE802_11_H
2#define _IEEE802_11_H
3
4#define IEEE802_11_DATA_LEN 2304
5/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
6 6.2.1.1.2.
7
8 The figure in section 7.1.2 suggests a body size of up to 2312
9 bytes is allowed, which is a bit confusing, I suspect this
10 represents the 2304 bytes of real data, plus a possible 8 bytes of
11 WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
12
13
14#define IEEE802_11_HLEN 30
15#define IEEE802_11_FRAME_LEN (IEEE802_11_DATA_LEN + IEEE802_11_HLEN)
16
17struct ieee802_11_hdr {
18 u16 frame_ctl;
19 u16 duration_id;
20 u8 addr1[ETH_ALEN];
21 u8 addr2[ETH_ALEN];
22 u8 addr3[ETH_ALEN];
23 u16 seq_ctl;
24 u8 addr4[ETH_ALEN];
25} __attribute__ ((packed));
26
27/* Frame control field constants */
28#define IEEE802_11_FCTL_VERS 0x0002
29#define IEEE802_11_FCTL_FTYPE 0x000c
30#define IEEE802_11_FCTL_STYPE 0x00f0
31#define IEEE802_11_FCTL_TODS 0x0100
32#define IEEE802_11_FCTL_FROMDS 0x0200
33#define IEEE802_11_FCTL_MOREFRAGS 0x0400
34#define IEEE802_11_FCTL_RETRY 0x0800
35#define IEEE802_11_FCTL_PM 0x1000
36#define IEEE802_11_FCTL_MOREDATA 0x2000
37#define IEEE802_11_FCTL_WEP 0x4000
38#define IEEE802_11_FCTL_ORDER 0x8000
39
40#define IEEE802_11_FTYPE_MGMT 0x0000
41#define IEEE802_11_FTYPE_CTL 0x0004
42#define IEEE802_11_FTYPE_DATA 0x0008
43
44/* management */
45#define IEEE802_11_STYPE_ASSOC_REQ 0x0000
46#define IEEE802_11_STYPE_ASSOC_RESP 0x0010
47#define IEEE802_11_STYPE_REASSOC_REQ 0x0020
48#define IEEE802_11_STYPE_REASSOC_RESP 0x0030
49#define IEEE802_11_STYPE_PROBE_REQ 0x0040
50#define IEEE802_11_STYPE_PROBE_RESP 0x0050
51#define IEEE802_11_STYPE_BEACON 0x0080
52#define IEEE802_11_STYPE_ATIM 0x0090
53#define IEEE802_11_STYPE_DISASSOC 0x00A0
54#define IEEE802_11_STYPE_AUTH 0x00B0
55#define IEEE802_11_STYPE_DEAUTH 0x00C0
56
57/* control */
58#define IEEE802_11_STYPE_PSPOLL 0x00A0
59#define IEEE802_11_STYPE_RTS 0x00B0
60#define IEEE802_11_STYPE_CTS 0x00C0
61#define IEEE802_11_STYPE_ACK 0x00D0
62#define IEEE802_11_STYPE_CFEND 0x00E0
63#define IEEE802_11_STYPE_CFENDACK 0x00F0
64
65/* data */
66#define IEEE802_11_STYPE_DATA 0x0000
67#define IEEE802_11_STYPE_DATA_CFACK 0x0010
68#define IEEE802_11_STYPE_DATA_CFPOLL 0x0020
69#define IEEE802_11_STYPE_DATA_CFACKPOLL 0x0030
70#define IEEE802_11_STYPE_NULLFUNC 0x0040
71#define IEEE802_11_STYPE_CFACK 0x0050
72#define IEEE802_11_STYPE_CFPOLL 0x0060
73#define IEEE802_11_STYPE_CFACKPOLL 0x0070
74
75#define IEEE802_11_SCTL_FRAG 0x000F
76#define IEEE802_11_SCTL_SEQ 0xFFF0
77
78#endif /* _IEEE802_11_H */
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
new file mode 100644
index 000000000000..189ad7b2cec9
--- /dev/null
+++ b/drivers/net/wireless/ipw2100.c
@@ -0,0 +1,8641 @@
1/******************************************************************************
2
3 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
13
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18 The full GNU General Public License is included in this distribution in the
19 file called LICENSE.
20
21 Contact Information:
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25 Portions of this file are based on the sample_* files provided by Wireless
26 Extensions 0.26 package and copyright (c) 1997-2003 Jean Tourrilhes
27 <jt@hpl.hp.com>
28
29 Portions of this file are based on the Host AP project,
30 Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
31 <jkmaline@cc.hut.fi>
32 Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
33
34 Portions of ipw2100_mod_firmware_load, ipw2100_do_mod_firmware_load, and
35 ipw2100_fw_load are loosely based on drivers/sound/sound_firmware.c
36 available in the 2.4.25 kernel sources, and are copyright (c) Alan Cox
37
38******************************************************************************/
39/*
40
41 Initial driver on which this is based was developed by Janusz Gorycki,
42 Maciej Urbaniak, and Maciej Sosnowski.
43
44 Promiscuous mode support added by Jacek Wysoczynski and Maciej Urbaniak.
45
46Theory of Operation
47
48Tx - Commands and Data
49
50Firmware and host share a circular queue of Transmit Buffer Descriptors (TBDs)
51Each TBD contains a pointer to the physical (dma_addr_t) address of data being
52sent to the firmware as well as the length of the data.
53
54The host writes to the TBD queue at the WRITE index. The WRITE index points
55to the _next_ packet to be written and is advanced when after the TBD has been
56filled.
57
58The firmware pulls from the TBD queue at the READ index. The READ index points
59to the currently being read entry, and is advanced once the firmware is
60done with a packet.
61
62When data is sent to the firmware, the first TBD is used to indicate to the
63firmware if a Command or Data is being sent. If it is Command, all of the
64command information is contained within the physical address referred to by the
65TBD. If it is Data, the first TBD indicates the type of data packet, number
66of fragments, etc. The next TBD then referrs to the actual packet location.
67
68The Tx flow cycle is as follows:
69
701) ipw2100_tx() is called by kernel with SKB to transmit
712) Packet is move from the tx_free_list and appended to the transmit pending
72 list (tx_pend_list)
733) work is scheduled to move pending packets into the shared circular queue.
744) when placing packet in the circular queue, the incoming SKB is DMA mapped
75 to a physical address. That address is entered into a TBD. Two TBDs are
76 filled out. The first indicating a data packet, the second referring to the
77 actual payload data.
785) the packet is removed from tx_pend_list and placed on the end of the
79 firmware pending list (fw_pend_list)
806) firmware is notified that the WRITE index has
817) Once the firmware has processed the TBD, INTA is triggered.
828) For each Tx interrupt received from the firmware, the READ index is checked
83 to see which TBDs are done being processed.
849) For each TBD that has been processed, the ISR pulls the oldest packet
85 from the fw_pend_list.
8610)The packet structure contained in the fw_pend_list is then used
87 to unmap the DMA address and to free the SKB originally passed to the driver
88 from the kernel.
8911)The packet structure is placed onto the tx_free_list
90
91The above steps are the same for commands, only the msg_free_list/msg_pend_list
92are used instead of tx_free_list/tx_pend_list
93
94...
95
96Critical Sections / Locking :
97
98There are two locks utilized. The first is the low level lock (priv->low_lock)
99that protects the following:
100
101- Access to the Tx/Rx queue lists via priv->low_lock. The lists are as follows:
102
103 tx_free_list : Holds pre-allocated Tx buffers.
104 TAIL modified in __ipw2100_tx_process()
105 HEAD modified in ipw2100_tx()
106
107 tx_pend_list : Holds used Tx buffers waiting to go into the TBD ring
108 TAIL modified ipw2100_tx()
109 HEAD modified by X__ipw2100_tx_send_data()
110
111 msg_free_list : Holds pre-allocated Msg (Command) buffers
112 TAIL modified in __ipw2100_tx_process()
113 HEAD modified in ipw2100_hw_send_command()
114
115 msg_pend_list : Holds used Msg buffers waiting to go into the TBD ring
116 TAIL modified in ipw2100_hw_send_command()
117 HEAD modified in X__ipw2100_tx_send_commands()
118
119 The flow of data on the TX side is as follows:
120
121 MSG_FREE_LIST + COMMAND => MSG_PEND_LIST => TBD => MSG_FREE_LIST
122 TX_FREE_LIST + DATA => TX_PEND_LIST => TBD => TX_FREE_LIST
123
124 The methods that work on the TBD ring are protected via priv->low_lock.
125
126- The internal data state of the device itself
127- Access to the firmware read/write indexes for the BD queues
128 and associated logic
129
130All external entry functions are locked with the priv->action_lock to ensure
131that only one external action is invoked at a time.
132
133
134*/
135
136#include <linux/compiler.h>
137#include <linux/config.h>
138#include <linux/errno.h>
139#include <linux/if_arp.h>
140#include <linux/in6.h>
141#include <linux/in.h>
142#include <linux/ip.h>
143#include <linux/kernel.h>
144#include <linux/kmod.h>
145#include <linux/module.h>
146#include <linux/netdevice.h>
147#include <linux/ethtool.h>
148#include <linux/pci.h>
149#include <linux/dma-mapping.h>
150#include <linux/proc_fs.h>
151#include <linux/skbuff.h>
152#include <asm/uaccess.h>
153#include <asm/io.h>
154#define __KERNEL_SYSCALLS__
155#include <linux/fs.h>
156#include <linux/mm.h>
157#include <linux/slab.h>
158#include <linux/unistd.h>
159#include <linux/stringify.h>
160#include <linux/tcp.h>
161#include <linux/types.h>
162#include <linux/version.h>
163#include <linux/time.h>
164#include <linux/firmware.h>
165#include <linux/acpi.h>
166#include <linux/ctype.h>
167
168#include "ipw2100.h"
169
170#define IPW2100_VERSION "1.1.0"
171
172#define DRV_NAME "ipw2100"
173#define DRV_VERSION IPW2100_VERSION
174#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver"
175#define DRV_COPYRIGHT "Copyright(c) 2003-2004 Intel Corporation"
176
177
178/* Debugging stuff */
179#ifdef CONFIG_IPW_DEBUG
180#define CONFIG_IPW2100_RX_DEBUG /* Reception debugging */
181#endif
182
183MODULE_DESCRIPTION(DRV_DESCRIPTION);
184MODULE_VERSION(DRV_VERSION);
185MODULE_AUTHOR(DRV_COPYRIGHT);
186MODULE_LICENSE("GPL");
187
188static int debug = 0;
189static int mode = 0;
190static int channel = 0;
191static int associate = 1;
192static int disable = 0;
193#ifdef CONFIG_PM
194static struct ipw2100_fw ipw2100_firmware;
195#endif
196
197#include <linux/moduleparam.h>
198module_param(debug, int, 0444);
199module_param(mode, int, 0444);
200module_param(channel, int, 0444);
201module_param(associate, int, 0444);
202module_param(disable, int, 0444);
203
204MODULE_PARM_DESC(debug, "debug level");
205MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
206MODULE_PARM_DESC(channel, "channel");
207MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
208MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
209
210u32 ipw2100_debug_level = IPW_DL_NONE;
211
212#ifdef CONFIG_IPW_DEBUG
213static const char *command_types[] = {
214 "undefined",
215 "unused", /* HOST_ATTENTION */
216 "HOST_COMPLETE",
217 "unused", /* SLEEP */
218 "unused", /* HOST_POWER_DOWN */
219 "unused",
220 "SYSTEM_CONFIG",
221 "unused", /* SET_IMR */
222 "SSID",
223 "MANDATORY_BSSID",
224 "AUTHENTICATION_TYPE",
225 "ADAPTER_ADDRESS",
226 "PORT_TYPE",
227 "INTERNATIONAL_MODE",
228 "CHANNEL",
229 "RTS_THRESHOLD",
230 "FRAG_THRESHOLD",
231 "POWER_MODE",
232 "TX_RATES",
233 "BASIC_TX_RATES",
234 "WEP_KEY_INFO",
235 "unused",
236 "unused",
237 "unused",
238 "unused",
239 "WEP_KEY_INDEX",
240 "WEP_FLAGS",
241 "ADD_MULTICAST",
242 "CLEAR_ALL_MULTICAST",
243 "BEACON_INTERVAL",
244 "ATIM_WINDOW",
245 "CLEAR_STATISTICS",
246 "undefined",
247 "undefined",
248 "undefined",
249 "undefined",
250 "TX_POWER_INDEX",
251 "undefined",
252 "undefined",
253 "undefined",
254 "undefined",
255 "undefined",
256 "undefined",
257 "BROADCAST_SCAN",
258 "CARD_DISABLE",
259 "PREFERRED_BSSID",
260 "SET_SCAN_OPTIONS",
261 "SCAN_DWELL_TIME",
262 "SWEEP_TABLE",
263 "AP_OR_STATION_TABLE",
264 "GROUP_ORDINALS",
265 "SHORT_RETRY_LIMIT",
266 "LONG_RETRY_LIMIT",
267 "unused", /* SAVE_CALIBRATION */
268 "unused", /* RESTORE_CALIBRATION */
269 "undefined",
270 "undefined",
271 "undefined",
272 "HOST_PRE_POWER_DOWN",
273 "unused", /* HOST_INTERRUPT_COALESCING */
274 "undefined",
275 "CARD_DISABLE_PHY_OFF",
276 "MSDU_TX_RATES"
277 "undefined",
278 "undefined",
279 "SET_STATION_STAT_BITS",
280 "CLEAR_STATIONS_STAT_BITS",
281 "LEAP_ROGUE_MODE",
282 "SET_SECURITY_INFORMATION",
283 "DISASSOCIATION_BSSID",
284 "SET_WPA_ASS_IE"
285};
286#endif
287
288
289/* Pre-decl until we get the code solid and then we can clean it up */
290static void X__ipw2100_tx_send_commands(struct ipw2100_priv *priv);
291static void X__ipw2100_tx_send_data(struct ipw2100_priv *priv);
292static int ipw2100_adapter_setup(struct ipw2100_priv *priv);
293
294static void ipw2100_queues_initialize(struct ipw2100_priv *priv);
295static void ipw2100_queues_free(struct ipw2100_priv *priv);
296static int ipw2100_queues_allocate(struct ipw2100_priv *priv);
297
298
299static inline void read_register(struct net_device *dev, u32 reg, u32 *val)
300{
301 *val = readl((void *)(dev->base_addr + reg));
302 IPW_DEBUG_IO("r: 0x%08X => 0x%08X\n", reg, *val);
303}
304
305static inline void write_register(struct net_device *dev, u32 reg, u32 val)
306{
307 writel(val, (void *)(dev->base_addr + reg));
308 IPW_DEBUG_IO("w: 0x%08X <= 0x%08X\n", reg, val);
309}
310
311static inline void read_register_word(struct net_device *dev, u32 reg, u16 *val)
312{
313 *val = readw((void *)(dev->base_addr + reg));
314 IPW_DEBUG_IO("r: 0x%08X => %04X\n", reg, *val);
315}
316
317static inline void read_register_byte(struct net_device *dev, u32 reg, u8 *val)
318{
319 *val = readb((void *)(dev->base_addr + reg));
320 IPW_DEBUG_IO("r: 0x%08X => %02X\n", reg, *val);
321}
322
323static inline void write_register_word(struct net_device *dev, u32 reg, u16 val)
324{
325 writew(val, (void *)(dev->base_addr + reg));
326 IPW_DEBUG_IO("w: 0x%08X <= %04X\n", reg, val);
327}
328
329
330static inline void write_register_byte(struct net_device *dev, u32 reg, u8 val)
331{
332 writeb(val, (void *)(dev->base_addr + reg));
333 IPW_DEBUG_IO("w: 0x%08X =< %02X\n", reg, val);
334}
335
336static inline void read_nic_dword(struct net_device *dev, u32 addr, u32 *val)
337{
338 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
339 addr & IPW_REG_INDIRECT_ADDR_MASK);
340 read_register(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
341}
342
343static inline void write_nic_dword(struct net_device *dev, u32 addr, u32 val)
344{
345 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
346 addr & IPW_REG_INDIRECT_ADDR_MASK);
347 write_register(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
348}
349
350static inline void read_nic_word(struct net_device *dev, u32 addr, u16 *val)
351{
352 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
353 addr & IPW_REG_INDIRECT_ADDR_MASK);
354 read_register_word(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
355}
356
357static inline void write_nic_word(struct net_device *dev, u32 addr, u16 val)
358{
359 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
360 addr & IPW_REG_INDIRECT_ADDR_MASK);
361 write_register_word(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
362}
363
364static inline void read_nic_byte(struct net_device *dev, u32 addr, u8 *val)
365{
366 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
367 addr & IPW_REG_INDIRECT_ADDR_MASK);
368 read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
369}
370
371static inline void write_nic_byte(struct net_device *dev, u32 addr, u8 val)
372{
373 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
374 addr & IPW_REG_INDIRECT_ADDR_MASK);
375 write_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA, val);
376}
377
378static inline void write_nic_auto_inc_address(struct net_device *dev, u32 addr)
379{
380 write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS,
381 addr & IPW_REG_INDIRECT_ADDR_MASK);
382}
383
384static inline void write_nic_dword_auto_inc(struct net_device *dev, u32 val)
385{
386 write_register(dev, IPW_REG_AUTOINCREMENT_DATA, val);
387}
388
389static inline void write_nic_memory(struct net_device *dev, u32 addr, u32 len,
390 const u8 *buf)
391{
392 u32 aligned_addr;
393 u32 aligned_len;
394 u32 dif_len;
395 u32 i;
396
397 /* read first nibble byte by byte */
398 aligned_addr = addr & (~0x3);
399 dif_len = addr - aligned_addr;
400 if (dif_len) {
401 /* Start reading at aligned_addr + dif_len */
402 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
403 aligned_addr);
404 for (i = dif_len; i < 4; i++, buf++)
405 write_register_byte(
406 dev, IPW_REG_INDIRECT_ACCESS_DATA + i,
407 *buf);
408
409 len -= dif_len;
410 aligned_addr += 4;
411 }
412
413 /* read DWs through autoincrement registers */
414 write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS,
415 aligned_addr);
416 aligned_len = len & (~0x3);
417 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
418 write_register(
419 dev, IPW_REG_AUTOINCREMENT_DATA, *(u32 *)buf);
420
421 /* copy the last nibble */
422 dif_len = len - aligned_len;
423 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, aligned_addr);
424 for (i = 0; i < dif_len; i++, buf++)
425 write_register_byte(
426 dev, IPW_REG_INDIRECT_ACCESS_DATA + i, *buf);
427}
428
429static inline void read_nic_memory(struct net_device *dev, u32 addr, u32 len,
430 u8 *buf)
431{
432 u32 aligned_addr;
433 u32 aligned_len;
434 u32 dif_len;
435 u32 i;
436
437 /* read first nibble byte by byte */
438 aligned_addr = addr & (~0x3);
439 dif_len = addr - aligned_addr;
440 if (dif_len) {
441 /* Start reading at aligned_addr + dif_len */
442 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
443 aligned_addr);
444 for (i = dif_len; i < 4; i++, buf++)
445 read_register_byte(
446 dev, IPW_REG_INDIRECT_ACCESS_DATA + i, buf);
447
448 len -= dif_len;
449 aligned_addr += 4;
450 }
451
452 /* read DWs through autoincrement registers */
453 write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS,
454 aligned_addr);
455 aligned_len = len & (~0x3);
456 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
457 read_register(dev, IPW_REG_AUTOINCREMENT_DATA,
458 (u32 *)buf);
459
460 /* copy the last nibble */
461 dif_len = len - aligned_len;
462 write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS,
463 aligned_addr);
464 for (i = 0; i < dif_len; i++, buf++)
465 read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA +
466 i, buf);
467}
468
469static inline int ipw2100_hw_is_adapter_in_system(struct net_device *dev)
470{
471 return (dev->base_addr &&
472 (readl((void *)(dev->base_addr + IPW_REG_DOA_DEBUG_AREA_START))
473 == IPW_DATA_DOA_DEBUG_VALUE));
474}
475
476int ipw2100_get_ordinal(struct ipw2100_priv *priv, u32 ord,
477 void *val, u32 *len)
478{
479 struct ipw2100_ordinals *ordinals = &priv->ordinals;
480 u32 addr;
481 u32 field_info;
482 u16 field_len;
483 u16 field_count;
484 u32 total_length;
485
486 if (ordinals->table1_addr == 0) {
487 IPW_DEBUG_WARNING(DRV_NAME ": attempt to use fw ordinals "
488 "before they have been loaded.\n");
489 return -EINVAL;
490 }
491
492 if (IS_ORDINAL_TABLE_ONE(ordinals, ord)) {
493 if (*len < IPW_ORD_TAB_1_ENTRY_SIZE) {
494 *len = IPW_ORD_TAB_1_ENTRY_SIZE;
495
496 IPW_DEBUG_WARNING(DRV_NAME
497 ": ordinal buffer length too small, need %zd\n",
498 IPW_ORD_TAB_1_ENTRY_SIZE);
499
500 return -EINVAL;
501 }
502
503 read_nic_dword(priv->net_dev, ordinals->table1_addr + (ord << 2),
504 &addr);
505 read_nic_dword(priv->net_dev, addr, val);
506
507 *len = IPW_ORD_TAB_1_ENTRY_SIZE;
508
509 return 0;
510 }
511
512 if (IS_ORDINAL_TABLE_TWO(ordinals, ord)) {
513
514 ord -= IPW_START_ORD_TAB_2;
515
516 /* get the address of statistic */
517 read_nic_dword(priv->net_dev, ordinals->table2_addr + (ord << 3),
518 &addr);
519
520 /* get the second DW of statistics ;
521 * two 16-bit words - first is length, second is count */
522 read_nic_dword(priv->net_dev,
523 ordinals->table2_addr + (ord << 3) + sizeof(u32),
524 &field_info);
525
526 /* get each entry length */
527 field_len = *((u16 *)&field_info);
528
529 /* get number of entries */
530 field_count = *(((u16 *)&field_info) + 1);
531
532 /* abort if no enought memory */
533 total_length = field_len * field_count;
534 if (total_length > *len) {
535 *len = total_length;
536 return -EINVAL;
537 }
538
539 *len = total_length;
540 if (!total_length)
541 return 0;
542
543 /* read the ordinal data from the SRAM */
544 read_nic_memory(priv->net_dev, addr, total_length, val);
545
546 return 0;
547 }
548
549 IPW_DEBUG_WARNING(DRV_NAME ": ordinal %d neither in table 1 nor "
550 "in table 2\n", ord);
551
552 return -EINVAL;
553}
554
555static int ipw2100_set_ordinal(struct ipw2100_priv *priv, u32 ord, u32 *val,
556 u32 *len)
557{
558 struct ipw2100_ordinals *ordinals = &priv->ordinals;
559 u32 addr;
560
561 if (IS_ORDINAL_TABLE_ONE(ordinals, ord)) {
562 if (*len != IPW_ORD_TAB_1_ENTRY_SIZE) {
563 *len = IPW_ORD_TAB_1_ENTRY_SIZE;
564 IPW_DEBUG_INFO("wrong size\n");
565 return -EINVAL;
566 }
567
568 read_nic_dword(priv->net_dev, ordinals->table1_addr + (ord << 2),
569 &addr);
570
571 write_nic_dword(priv->net_dev, addr, *val);
572
573 *len = IPW_ORD_TAB_1_ENTRY_SIZE;
574
575 return 0;
576 }
577
578 IPW_DEBUG_INFO("wrong table\n");
579 if (IS_ORDINAL_TABLE_TWO(ordinals, ord))
580 return -EINVAL;
581
582 return -EINVAL;
583}
584
585static char *snprint_line(char *buf, size_t count,
586 const u8 *data, u32 len, u32 ofs)
587{
588 int out, i, j, l;
589 char c;
590
591 out = snprintf(buf, count, "%08X", ofs);
592
593 for (l = 0, i = 0; i < 2; i++) {
594 out += snprintf(buf + out, count - out, " ");
595 for (j = 0; j < 8 && l < len; j++, l++)
596 out += snprintf(buf + out, count - out, "%02X ",
597 data[(i * 8 + j)]);
598 for (; j < 8; j++)
599 out += snprintf(buf + out, count - out, " ");
600 }
601
602 out += snprintf(buf + out, count - out, " ");
603 for (l = 0, i = 0; i < 2; i++) {
604 out += snprintf(buf + out, count - out, " ");
605 for (j = 0; j < 8 && l < len; j++, l++) {
606 c = data[(i * 8 + j)];
607 if (!isascii(c) || !isprint(c))
608 c = '.';
609
610 out += snprintf(buf + out, count - out, "%c", c);
611 }
612
613 for (; j < 8; j++)
614 out += snprintf(buf + out, count - out, " ");
615 }
616
617 return buf;
618}
619
620static void printk_buf(int level, const u8 *data, u32 len)
621{
622 char line[81];
623 u32 ofs = 0;
624 if (!(ipw2100_debug_level & level))
625 return;
626
627 while (len) {
628 printk(KERN_DEBUG "%s\n",
629 snprint_line(line, sizeof(line), &data[ofs],
630 min(len, 16U), ofs));
631 ofs += 16;
632 len -= min(len, 16U);
633 }
634}
635
636
637
638#define MAX_RESET_BACKOFF 10
639
640static inline void schedule_reset(struct ipw2100_priv *priv)
641{
642 unsigned long now = get_seconds();
643
644 /* If we haven't received a reset request within the backoff period,
645 * then we can reset the backoff interval so this reset occurs
646 * immediately */
647 if (priv->reset_backoff &&
648 (now - priv->last_reset > priv->reset_backoff))
649 priv->reset_backoff = 0;
650
651 priv->last_reset = get_seconds();
652
653 if (!(priv->status & STATUS_RESET_PENDING)) {
654 IPW_DEBUG_INFO("%s: Scheduling firmware restart (%ds).\n",
655 priv->net_dev->name, priv->reset_backoff);
656 netif_carrier_off(priv->net_dev);
657 netif_stop_queue(priv->net_dev);
658 priv->status |= STATUS_RESET_PENDING;
659 if (priv->reset_backoff)
660 queue_delayed_work(priv->workqueue, &priv->reset_work,
661 priv->reset_backoff * HZ);
662 else
663 queue_work(priv->workqueue, &priv->reset_work);
664
665 if (priv->reset_backoff < MAX_RESET_BACKOFF)
666 priv->reset_backoff++;
667
668 wake_up_interruptible(&priv->wait_command_queue);
669 } else
670 IPW_DEBUG_INFO("%s: Firmware restart already in progress.\n",
671 priv->net_dev->name);
672
673}
674
675#define HOST_COMPLETE_TIMEOUT (2 * HZ)
676static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
677 struct host_command * cmd)
678{
679 struct list_head *element;
680 struct ipw2100_tx_packet *packet;
681 unsigned long flags;
682 int err = 0;
683
684 IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n",
685 command_types[cmd->host_command], cmd->host_command,
686 cmd->host_command_length);
687 printk_buf(IPW_DL_HC, (u8*)cmd->host_command_parameters,
688 cmd->host_command_length);
689
690 spin_lock_irqsave(&priv->low_lock, flags);
691
692 if (priv->fatal_error) {
693 IPW_DEBUG_INFO("Attempt to send command while hardware in fatal error condition.\n");
694 err = -EIO;
695 goto fail_unlock;
696 }
697
698 if (!(priv->status & STATUS_RUNNING)) {
699 IPW_DEBUG_INFO("Attempt to send command while hardware is not running.\n");
700 err = -EIO;
701 goto fail_unlock;
702 }
703
704 if (priv->status & STATUS_CMD_ACTIVE) {
705 IPW_DEBUG_INFO("Attempt to send command while another command is pending.\n");
706 err = -EBUSY;
707 goto fail_unlock;
708 }
709
710 if (list_empty(&priv->msg_free_list)) {
711 IPW_DEBUG_INFO("no available msg buffers\n");
712 goto fail_unlock;
713 }
714
715 priv->status |= STATUS_CMD_ACTIVE;
716 priv->messages_sent++;
717
718 element = priv->msg_free_list.next;
719
720 packet = list_entry(element, struct ipw2100_tx_packet, list);
721 packet->jiffy_start = jiffies;
722
723 /* initialize the firmware command packet */
724 packet->info.c_struct.cmd->host_command_reg = cmd->host_command;
725 packet->info.c_struct.cmd->host_command_reg1 = cmd->host_command1;
726 packet->info.c_struct.cmd->host_command_len_reg = cmd->host_command_length;
727 packet->info.c_struct.cmd->sequence = cmd->host_command_sequence;
728
729 memcpy(packet->info.c_struct.cmd->host_command_params_reg,
730 cmd->host_command_parameters,
731 sizeof(packet->info.c_struct.cmd->host_command_params_reg));
732
733 list_del(element);
734 DEC_STAT(&priv->msg_free_stat);
735
736 list_add_tail(element, &priv->msg_pend_list);
737 INC_STAT(&priv->msg_pend_stat);
738
739 X__ipw2100_tx_send_commands(priv);
740 X__ipw2100_tx_send_data(priv);
741
742 spin_unlock_irqrestore(&priv->low_lock, flags);
743
744 /*
745 * We must wait for this command to complete before another
746 * command can be sent... but if we wait more than 3 seconds
747 * then there is a problem.
748 */
749
750 err = wait_event_interruptible_timeout(
751 priv->wait_command_queue, !(priv->status & STATUS_CMD_ACTIVE),
752 HOST_COMPLETE_TIMEOUT);
753
754 if (err == 0) {
755 IPW_DEBUG_INFO("Command completion failed out after %dms.\n",
756 HOST_COMPLETE_TIMEOUT / (HZ / 100));
757 priv->fatal_error = IPW2100_ERR_MSG_TIMEOUT;
758 priv->status &= ~STATUS_CMD_ACTIVE;
759 schedule_reset(priv);
760 return -EIO;
761 }
762
763 if (priv->fatal_error) {
764 IPW_DEBUG_WARNING("%s: firmware fatal error\n",
765 priv->net_dev->name);
766 return -EIO;
767 }
768
769 /* !!!!! HACK TEST !!!!!
770 * When lots of debug trace statements are enabled, the driver
771 * doesn't seem to have as many firmware restart cycles...
772 *
773 * As a test, we're sticking in a 1/100s delay here */
774 set_current_state(TASK_UNINTERRUPTIBLE);
775 schedule_timeout(HZ / 100);
776
777 return 0;
778
779 fail_unlock:
780 spin_unlock_irqrestore(&priv->low_lock, flags);
781
782 return err;
783}
784
785
786/*
787 * Verify the values and data access of the hardware
788 * No locks needed or used. No functions called.
789 */
790static int ipw2100_verify(struct ipw2100_priv *priv)
791{
792 u32 data1, data2;
793 u32 address;
794
795 u32 val1 = 0x76543210;
796 u32 val2 = 0xFEDCBA98;
797
798 /* Domain 0 check - all values should be DOA_DEBUG */
799 for (address = IPW_REG_DOA_DEBUG_AREA_START;
800 address < IPW_REG_DOA_DEBUG_AREA_END;
801 address += sizeof(u32)) {
802 read_register(priv->net_dev, address, &data1);
803 if (data1 != IPW_DATA_DOA_DEBUG_VALUE)
804 return -EIO;
805 }
806
807 /* Domain 1 check - use arbitrary read/write compare */
808 for (address = 0; address < 5; address++) {
809 /* The memory area is not used now */
810 write_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x32,
811 val1);
812 write_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x36,
813 val2);
814 read_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x32,
815 &data1);
816 read_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x36,
817 &data2);
818 if (val1 == data1 && val2 == data2)
819 return 0;
820 }
821
822 return -EIO;
823}
824
825/*
826 *
827 * Loop until the CARD_DISABLED bit is the same value as the
828 * supplied parameter
829 *
830 * TODO: See if it would be more efficient to do a wait/wake
831 * cycle and have the completion event trigger the wakeup
832 *
833 */
834#define IPW_CARD_DISABLE_COMPLETE_WAIT 100 // 100 milli
835static int ipw2100_wait_for_card_state(struct ipw2100_priv *priv, int state)
836{
837 int i;
838 u32 card_state;
839 u32 len = sizeof(card_state);
840 int err;
841
842 for (i = 0; i <= IPW_CARD_DISABLE_COMPLETE_WAIT * 1000; i += 50) {
843 err = ipw2100_get_ordinal(priv, IPW_ORD_CARD_DISABLED,
844 &card_state, &len);
845 if (err) {
846 IPW_DEBUG_INFO("Query of CARD_DISABLED ordinal "
847 "failed.\n");
848 return 0;
849 }
850
851 /* We'll break out if either the HW state says it is
852 * in the state we want, or if HOST_COMPLETE command
853 * finishes */
854 if ((card_state == state) ||
855 ((priv->status & STATUS_ENABLED) ?
856 IPW_HW_STATE_ENABLED : IPW_HW_STATE_DISABLED) == state) {
857 if (state == IPW_HW_STATE_ENABLED)
858 priv->status |= STATUS_ENABLED;
859 else
860 priv->status &= ~STATUS_ENABLED;
861
862 return 0;
863 }
864
865 udelay(50);
866 }
867
868 IPW_DEBUG_INFO("ipw2100_wait_for_card_state to %s state timed out\n",
869 state ? "DISABLED" : "ENABLED");
870 return -EIO;
871}
872
873
874/*********************************************************************
875 Procedure : sw_reset_and_clock
876 Purpose : Asserts s/w reset, asserts clock initialization
877 and waits for clock stabilization
878 ********************************************************************/
879static int sw_reset_and_clock(struct ipw2100_priv *priv)
880{
881 int i;
882 u32 r;
883
884 // assert s/w reset
885 write_register(priv->net_dev, IPW_REG_RESET_REG,
886 IPW_AUX_HOST_RESET_REG_SW_RESET);
887
888 // wait for clock stabilization
889 for (i = 0; i < 1000; i++) {
890 udelay(IPW_WAIT_RESET_ARC_COMPLETE_DELAY);
891
892 // check clock ready bit
893 read_register(priv->net_dev, IPW_REG_RESET_REG, &r);
894 if (r & IPW_AUX_HOST_RESET_REG_PRINCETON_RESET)
895 break;
896 }
897
898 if (i == 1000)
899 return -EIO; // TODO: better error value
900
901 /* set "initialization complete" bit to move adapter to
902 * D0 state */
903 write_register(priv->net_dev, IPW_REG_GP_CNTRL,
904 IPW_AUX_HOST_GP_CNTRL_BIT_INIT_DONE);
905
906 /* wait for clock stabilization */
907 for (i = 0; i < 10000; i++) {
908 udelay(IPW_WAIT_CLOCK_STABILIZATION_DELAY * 4);
909
910 /* check clock ready bit */
911 read_register(priv->net_dev, IPW_REG_GP_CNTRL, &r);
912 if (r & IPW_AUX_HOST_GP_CNTRL_BIT_CLOCK_READY)
913 break;
914 }
915
916 if (i == 10000)
917 return -EIO; /* TODO: better error value */
918
919 /* set D0 standby bit */
920 read_register(priv->net_dev, IPW_REG_GP_CNTRL, &r);
921 write_register(priv->net_dev, IPW_REG_GP_CNTRL,
922 r | IPW_AUX_HOST_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
923
924 return 0;
925}
926
927/*********************************************************************
928 Procedure : ipw2100_download_firmware
929 Purpose : Initiaze adapter after power on.
930 The sequence is:
931 1. assert s/w reset first!
932 2. awake clocks & wait for clock stabilization
933 3. hold ARC (don't ask me why...)
934 4. load Dino ucode and reset/clock init again
935 5. zero-out shared mem
936 6. download f/w
937 *******************************************************************/
938static int ipw2100_download_firmware(struct ipw2100_priv *priv)
939{
940 u32 address;
941 int err;
942
943#ifndef CONFIG_PM
944 /* Fetch the firmware and microcode */
945 struct ipw2100_fw ipw2100_firmware;
946#endif
947
948 if (priv->fatal_error) {
949 IPW_DEBUG_ERROR("%s: ipw2100_download_firmware called after "
950 "fatal error %d. Interface must be brought down.\n",
951 priv->net_dev->name, priv->fatal_error);
952 return -EINVAL;
953 }
954
955#ifdef CONFIG_PM
956 if (!ipw2100_firmware.version) {
957 err = ipw2100_get_firmware(priv, &ipw2100_firmware);
958 if (err) {
959 IPW_DEBUG_ERROR("%s: ipw2100_get_firmware failed: %d\n",
960 priv->net_dev->name, err);
961 priv->fatal_error = IPW2100_ERR_FW_LOAD;
962 goto fail;
963 }
964 }
965#else
966 err = ipw2100_get_firmware(priv, &ipw2100_firmware);
967 if (err) {
968 IPW_DEBUG_ERROR("%s: ipw2100_get_firmware failed: %d\n",
969 priv->net_dev->name, err);
970 priv->fatal_error = IPW2100_ERR_FW_LOAD;
971 goto fail;
972 }
973#endif
974 priv->firmware_version = ipw2100_firmware.version;
975
976 /* s/w reset and clock stabilization */
977 err = sw_reset_and_clock(priv);
978 if (err) {
979 IPW_DEBUG_ERROR("%s: sw_reset_and_clock failed: %d\n",
980 priv->net_dev->name, err);
981 goto fail;
982 }
983
984 err = ipw2100_verify(priv);
985 if (err) {
986 IPW_DEBUG_ERROR("%s: ipw2100_verify failed: %d\n",
987 priv->net_dev->name, err);
988 goto fail;
989 }
990
991 /* Hold ARC */
992 write_nic_dword(priv->net_dev,
993 IPW_INTERNAL_REGISTER_HALT_AND_RESET,
994 0x80000000);
995
996 /* allow ARC to run */
997 write_register(priv->net_dev, IPW_REG_RESET_REG, 0);
998
999 /* load microcode */
1000 err = ipw2100_ucode_download(priv, &ipw2100_firmware);
1001 if (err) {
1002 IPW_DEBUG_ERROR("%s: Error loading microcode: %d\n",
1003 priv->net_dev->name, err);
1004 goto fail;
1005 }
1006
1007 /* release ARC */
1008 write_nic_dword(priv->net_dev,
1009 IPW_INTERNAL_REGISTER_HALT_AND_RESET,
1010 0x00000000);
1011
1012 /* s/w reset and clock stabilization (again!!!) */
1013 err = sw_reset_and_clock(priv);
1014 if (err) {
1015 IPW_DEBUG_ERROR("%s: sw_reset_and_clock failed: %d\n",
1016 priv->net_dev->name, err);
1017 goto fail;
1018 }
1019
1020 /* load f/w */
1021 err = ipw2100_fw_download(priv, &ipw2100_firmware);
1022 if (err) {
1023 IPW_DEBUG_ERROR("%s: Error loading firmware: %d\n",
1024 priv->net_dev->name, err);
1025 goto fail;
1026 }
1027
1028#ifndef CONFIG_PM
1029 /*
1030 * When the .resume method of the driver is called, the other
1031 * part of the system, i.e. the ide driver could still stay in
1032 * the suspend stage. This prevents us from loading the firmware
1033 * from the disk. --YZ
1034 */
1035
1036 /* free any storage allocated for firmware image */
1037 ipw2100_release_firmware(priv, &ipw2100_firmware);
1038#endif
1039
1040 /* zero out Domain 1 area indirectly (Si requirement) */
1041 for (address = IPW_HOST_FW_SHARED_AREA0;
1042 address < IPW_HOST_FW_SHARED_AREA0_END; address += 4)
1043 write_nic_dword(priv->net_dev, address, 0);
1044 for (address = IPW_HOST_FW_SHARED_AREA1;
1045 address < IPW_HOST_FW_SHARED_AREA1_END; address += 4)
1046 write_nic_dword(priv->net_dev, address, 0);
1047 for (address = IPW_HOST_FW_SHARED_AREA2;
1048 address < IPW_HOST_FW_SHARED_AREA2_END; address += 4)
1049 write_nic_dword(priv->net_dev, address, 0);
1050 for (address = IPW_HOST_FW_SHARED_AREA3;
1051 address < IPW_HOST_FW_SHARED_AREA3_END; address += 4)
1052 write_nic_dword(priv->net_dev, address, 0);
1053 for (address = IPW_HOST_FW_INTERRUPT_AREA;
1054 address < IPW_HOST_FW_INTERRUPT_AREA_END; address += 4)
1055 write_nic_dword(priv->net_dev, address, 0);
1056
1057 return 0;
1058
1059 fail:
1060 ipw2100_release_firmware(priv, &ipw2100_firmware);
1061 return err;
1062}
1063
1064static inline void ipw2100_enable_interrupts(struct ipw2100_priv *priv)
1065{
1066 if (priv->status & STATUS_INT_ENABLED)
1067 return;
1068 priv->status |= STATUS_INT_ENABLED;
1069 write_register(priv->net_dev, IPW_REG_INTA_MASK, IPW_INTERRUPT_MASK);
1070}
1071
1072static inline void ipw2100_disable_interrupts(struct ipw2100_priv *priv)
1073{
1074 if (!(priv->status & STATUS_INT_ENABLED))
1075 return;
1076 priv->status &= ~STATUS_INT_ENABLED;
1077 write_register(priv->net_dev, IPW_REG_INTA_MASK, 0x0);
1078}
1079
1080
1081static void ipw2100_initialize_ordinals(struct ipw2100_priv *priv)
1082{
1083 struct ipw2100_ordinals *ord = &priv->ordinals;
1084
1085 IPW_DEBUG_INFO("enter\n");
1086
1087 read_register(priv->net_dev, IPW_MEM_HOST_SHARED_ORDINALS_TABLE_1,
1088 &ord->table1_addr);
1089
1090 read_register(priv->net_dev, IPW_MEM_HOST_SHARED_ORDINALS_TABLE_2,
1091 &ord->table2_addr);
1092
1093 read_nic_dword(priv->net_dev, ord->table1_addr, &ord->table1_size);
1094 read_nic_dword(priv->net_dev, ord->table2_addr, &ord->table2_size);
1095
1096 ord->table2_size &= 0x0000FFFF;
1097
1098 IPW_DEBUG_INFO("table 1 size: %d\n", ord->table1_size);
1099 IPW_DEBUG_INFO("table 2 size: %d\n", ord->table2_size);
1100 IPW_DEBUG_INFO("exit\n");
1101}
1102
1103static inline void ipw2100_hw_set_gpio(struct ipw2100_priv *priv)
1104{
1105 u32 reg = 0;
1106 /*
1107 * Set GPIO 3 writable by FW; GPIO 1 writable
1108 * by driver and enable clock
1109 */
1110 reg = (IPW_BIT_GPIO_GPIO3_MASK | IPW_BIT_GPIO_GPIO1_ENABLE |
1111 IPW_BIT_GPIO_LED_OFF);
1112 write_register(priv->net_dev, IPW_REG_GPIO, reg);
1113}
1114
1115static inline int rf_kill_active(struct ipw2100_priv *priv)
1116{
1117#define MAX_RF_KILL_CHECKS 5
1118#define RF_KILL_CHECK_DELAY 40
1119
1120 unsigned short value = 0;
1121 u32 reg = 0;
1122 int i;
1123
1124 if (!(priv->hw_features & HW_FEATURE_RFKILL)) {
1125 priv->status &= ~STATUS_RF_KILL_HW;
1126 return 0;
1127 }
1128
1129 for (i = 0; i < MAX_RF_KILL_CHECKS; i++) {
1130 udelay(RF_KILL_CHECK_DELAY);
1131 read_register(priv->net_dev, IPW_REG_GPIO, &reg);
1132 value = (value << 1) | ((reg & IPW_BIT_GPIO_RF_KILL) ? 0 : 1);
1133 }
1134
1135 if (value == 0)
1136 priv->status |= STATUS_RF_KILL_HW;
1137 else
1138 priv->status &= ~STATUS_RF_KILL_HW;
1139
1140 return (value == 0);
1141}
1142
1143static int ipw2100_get_hw_features(struct ipw2100_priv *priv)
1144{
1145 u32 addr, len;
1146 u32 val;
1147
1148 /*
1149 * EEPROM_SRAM_DB_START_ADDRESS using ordinal in ordinal table 1
1150 */
1151 len = sizeof(addr);
1152 if (ipw2100_get_ordinal(
1153 priv, IPW_ORD_EEPROM_SRAM_DB_BLOCK_START_ADDRESS,
1154 &addr, &len)) {
1155 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
1156 __LINE__);
1157 return -EIO;
1158 }
1159
1160 IPW_DEBUG_INFO("EEPROM address: %08X\n", addr);
1161
1162 /*
1163 * EEPROM version is the byte at offset 0xfd in firmware
1164 * We read 4 bytes, then shift out the byte we actually want */
1165 read_nic_dword(priv->net_dev, addr + 0xFC, &val);
1166 priv->eeprom_version = (val >> 24) & 0xFF;
1167 IPW_DEBUG_INFO("EEPROM version: %d\n", priv->eeprom_version);
1168
1169 /*
1170 * HW RF Kill enable is bit 0 in byte at offset 0x21 in firmware
1171 *
1172 * notice that the EEPROM bit is reverse polarity, i.e.
1173 * bit = 0 signifies HW RF kill switch is supported
1174 * bit = 1 signifies HW RF kill switch is NOT supported
1175 */
1176 read_nic_dword(priv->net_dev, addr + 0x20, &val);
1177 if (!((val >> 24) & 0x01))
1178 priv->hw_features |= HW_FEATURE_RFKILL;
1179
1180 IPW_DEBUG_INFO("HW RF Kill: %ssupported.\n",
1181 (priv->hw_features & HW_FEATURE_RFKILL) ?
1182 "" : "not ");
1183
1184 return 0;
1185}
1186
1187/*
1188 * Start firmware execution after power on and intialization
1189 * The sequence is:
1190 * 1. Release ARC
1191 * 2. Wait for f/w initialization completes;
1192 */
1193static int ipw2100_start_adapter(struct ipw2100_priv *priv)
1194{
1195 int i;
1196 u32 inta, inta_mask, gpio;
1197
1198 IPW_DEBUG_INFO("enter\n");
1199
1200 if (priv->status & STATUS_RUNNING)
1201 return 0;
1202
1203 /*
1204 * Initialize the hw - drive adapter to DO state by setting
1205 * init_done bit. Wait for clk_ready bit and Download
1206 * fw & dino ucode
1207 */
1208 if (ipw2100_download_firmware(priv)) {
1209 IPW_DEBUG_ERROR("%s: Failed to power on the adapter.\n",
1210 priv->net_dev->name);
1211 return -EIO;
1212 }
1213
1214 /* Clear the Tx, Rx and Msg queues and the r/w indexes
1215 * in the firmware RBD and TBD ring queue */
1216 ipw2100_queues_initialize(priv);
1217
1218 ipw2100_hw_set_gpio(priv);
1219
1220 /* TODO -- Look at disabling interrupts here to make sure none
1221 * get fired during FW initialization */
1222
1223 /* Release ARC - clear reset bit */
1224 write_register(priv->net_dev, IPW_REG_RESET_REG, 0);
1225
1226 /* wait for f/w intialization complete */
1227 IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
1228 i = 5000;
1229 do {
1230 set_current_state(TASK_UNINTERRUPTIBLE);
1231 schedule_timeout(40 * HZ / 1000);
1232 /* Todo... wait for sync command ... */
1233
1234 read_register(priv->net_dev, IPW_REG_INTA, &inta);
1235
1236 /* check "init done" bit */
1237 if (inta & IPW2100_INTA_FW_INIT_DONE) {
1238 /* reset "init done" bit */
1239 write_register(priv->net_dev, IPW_REG_INTA,
1240 IPW2100_INTA_FW_INIT_DONE);
1241 break;
1242 }
1243
1244 /* check error conditions : we check these after the firmware
1245 * check so that if there is an error, the interrupt handler
1246 * will see it and the adapter will be reset */
1247 if (inta &
1248 (IPW2100_INTA_FATAL_ERROR | IPW2100_INTA_PARITY_ERROR)) {
1249 /* clear error conditions */
1250 write_register(priv->net_dev, IPW_REG_INTA,
1251 IPW2100_INTA_FATAL_ERROR |
1252 IPW2100_INTA_PARITY_ERROR);
1253 }
1254 } while (i--);
1255
1256 /* Clear out any pending INTAs since we aren't supposed to have
1257 * interrupts enabled at this point... */
1258 read_register(priv->net_dev, IPW_REG_INTA, &inta);
1259 read_register(priv->net_dev, IPW_REG_INTA_MASK, &inta_mask);
1260 inta &= IPW_INTERRUPT_MASK;
1261 /* Clear out any pending interrupts */
1262 if (inta & inta_mask)
1263 write_register(priv->net_dev, IPW_REG_INTA, inta);
1264
1265 IPW_DEBUG_FW("f/w initialization complete: %s\n",
1266 i ? "SUCCESS" : "FAILED");
1267
1268 if (!i) {
1269 IPW_DEBUG_WARNING("%s: Firmware did not initialize.\n",
1270 priv->net_dev->name);
1271 return -EIO;
1272 }
1273
1274 /* allow firmware to write to GPIO1 & GPIO3 */
1275 read_register(priv->net_dev, IPW_REG_GPIO, &gpio);
1276
1277 gpio |= (IPW_BIT_GPIO_GPIO1_MASK | IPW_BIT_GPIO_GPIO3_MASK);
1278
1279 write_register(priv->net_dev, IPW_REG_GPIO, gpio);
1280
1281 /* Ready to receive commands */
1282 priv->status |= STATUS_RUNNING;
1283
1284 /* The adapter has been reset; we are not associated */
1285 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
1286
1287 IPW_DEBUG_INFO("exit\n");
1288
1289 return 0;
1290}
1291
1292static inline void ipw2100_reset_fatalerror(struct ipw2100_priv *priv)
1293{
1294 if (!priv->fatal_error)
1295 return;
1296
1297 priv->fatal_errors[priv->fatal_index++] = priv->fatal_error;
1298 priv->fatal_index %= IPW2100_ERROR_QUEUE;
1299 priv->fatal_error = 0;
1300}
1301
1302
1303/* NOTE: Our interrupt is disabled when this method is called */
1304static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv)
1305{
1306 u32 reg;
1307 int i;
1308
1309 IPW_DEBUG_INFO("Power cycling the hardware.\n");
1310
1311 ipw2100_hw_set_gpio(priv);
1312
1313 /* Step 1. Stop Master Assert */
1314 write_register(priv->net_dev, IPW_REG_RESET_REG,
1315 IPW_AUX_HOST_RESET_REG_STOP_MASTER);
1316
1317 /* Step 2. Wait for stop Master Assert
1318 * (not more then 50us, otherwise ret error */
1319 i = 5;
1320 do {
1321 udelay(IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY);
1322 read_register(priv->net_dev, IPW_REG_RESET_REG, &reg);
1323
1324 if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED)
1325 break;
1326 } while(i--);
1327
1328 priv->status &= ~STATUS_RESET_PENDING;
1329
1330 if (!i) {
1331 IPW_DEBUG_INFO("exit - waited too long for master assert stop\n");
1332 return -EIO;
1333 }
1334
1335 write_register(priv->net_dev, IPW_REG_RESET_REG,
1336 IPW_AUX_HOST_RESET_REG_SW_RESET);
1337
1338
1339 /* Reset any fatal_error conditions */
1340 ipw2100_reset_fatalerror(priv);
1341
1342 /* At this point, the adapter is now stopped and disabled */
1343 priv->status &= ~(STATUS_RUNNING | STATUS_ASSOCIATING |
1344 STATUS_ASSOCIATED | STATUS_ENABLED);
1345
1346 return 0;
1347}
1348
1349/*
1350 * Send the CARD_DISABLE_PHY_OFF comamnd to the card to disable it
1351 *
1352 * After disabling, if the card was associated, a STATUS_ASSN_LOST will be sent.
1353 *
1354 * STATUS_CARD_DISABLE_NOTIFICATION will be sent regardless of
1355 * if STATUS_ASSN_LOST is sent.
1356 */
1357static int ipw2100_hw_phy_off(struct ipw2100_priv *priv)
1358{
1359
1360#define HW_PHY_OFF_LOOP_DELAY (HZ / 5000)
1361
1362 struct host_command cmd = {
1363 .host_command = CARD_DISABLE_PHY_OFF,
1364 .host_command_sequence = 0,
1365 .host_command_length = 0,
1366 };
1367 int err, i;
1368 u32 val1, val2;
1369
1370 IPW_DEBUG_HC("CARD_DISABLE_PHY_OFF\n");
1371
1372 /* Turn off the radio */
1373 err = ipw2100_hw_send_command(priv, &cmd);
1374 if (err)
1375 return err;
1376
1377 for (i = 0; i < 2500; i++) {
1378 read_nic_dword(priv->net_dev, IPW2100_CONTROL_REG, &val1);
1379 read_nic_dword(priv->net_dev, IPW2100_COMMAND, &val2);
1380
1381 if ((val1 & IPW2100_CONTROL_PHY_OFF) &&
1382 (val2 & IPW2100_COMMAND_PHY_OFF))
1383 return 0;
1384
1385 set_current_state(TASK_UNINTERRUPTIBLE);
1386 schedule_timeout(HW_PHY_OFF_LOOP_DELAY);
1387 }
1388
1389 return -EIO;
1390}
1391
1392
1393static int ipw2100_enable_adapter(struct ipw2100_priv *priv)
1394{
1395 struct host_command cmd = {
1396 .host_command = HOST_COMPLETE,
1397 .host_command_sequence = 0,
1398 .host_command_length = 0
1399 };
1400 int err = 0;
1401
1402 IPW_DEBUG_HC("HOST_COMPLETE\n");
1403
1404 if (priv->status & STATUS_ENABLED)
1405 return 0;
1406
1407 down(&priv->adapter_sem);
1408
1409 if (rf_kill_active(priv)) {
1410 IPW_DEBUG_HC("Command aborted due to RF kill active.\n");
1411 goto fail_up;
1412 }
1413
1414 err = ipw2100_hw_send_command(priv, &cmd);
1415 if (err) {
1416 IPW_DEBUG_INFO("Failed to send HOST_COMPLETE command\n");
1417 goto fail_up;
1418 }
1419
1420 err = ipw2100_wait_for_card_state(priv, IPW_HW_STATE_ENABLED);
1421 if (err) {
1422 IPW_DEBUG_INFO(
1423 "%s: card not responding to init command.\n",
1424 priv->net_dev->name);
1425 goto fail_up;
1426 }
1427
1428 if (priv->stop_hang_check) {
1429 priv->stop_hang_check = 0;
1430 queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2);
1431 }
1432
1433fail_up:
1434 up(&priv->adapter_sem);
1435 return err;
1436}
1437
1438static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv)
1439{
1440#define HW_POWER_DOWN_DELAY (HZ / 10)
1441
1442 struct host_command cmd = {
1443 .host_command = HOST_PRE_POWER_DOWN,
1444 .host_command_sequence = 0,
1445 .host_command_length = 0,
1446 };
1447 int err, i;
1448 u32 reg;
1449
1450 if (!(priv->status & STATUS_RUNNING))
1451 return 0;
1452
1453 priv->status |= STATUS_STOPPING;
1454
1455 /* We can only shut down the card if the firmware is operational. So,
1456 * if we haven't reset since a fatal_error, then we can not send the
1457 * shutdown commands. */
1458 if (!priv->fatal_error) {
1459 /* First, make sure the adapter is enabled so that the PHY_OFF
1460 * command can shut it down */
1461 ipw2100_enable_adapter(priv);
1462
1463 err = ipw2100_hw_phy_off(priv);
1464 if (err)
1465 IPW_DEBUG_WARNING("Error disabling radio %d\n", err);
1466
1467 /*
1468 * If in D0-standby mode going directly to D3 may cause a
1469 * PCI bus violation. Therefore we must change out of the D0
1470 * state.
1471 *
1472 * Sending the PREPARE_FOR_POWER_DOWN will restrict the
1473 * hardware from going into standby mode and will transition
1474 * out of D0-standy if it is already in that state.
1475 *
1476 * STATUS_PREPARE_POWER_DOWN_COMPLETE will be sent by the
1477 * driver upon completion. Once received, the driver can
1478 * proceed to the D3 state.
1479 *
1480 * Prepare for power down command to fw. This command would
1481 * take HW out of D0-standby and prepare it for D3 state.
1482 *
1483 * Currently FW does not support event notification for this
1484 * event. Therefore, skip waiting for it. Just wait a fixed
1485 * 100ms
1486 */
1487 IPW_DEBUG_HC("HOST_PRE_POWER_DOWN\n");
1488
1489 err = ipw2100_hw_send_command(priv, &cmd);
1490 if (err)
1491 IPW_DEBUG_WARNING(
1492 "%s: Power down command failed: Error %d\n",
1493 priv->net_dev->name, err);
1494 else {
1495 set_current_state(TASK_UNINTERRUPTIBLE);
1496 schedule_timeout(HW_POWER_DOWN_DELAY);
1497 }
1498 }
1499
1500 priv->status &= ~STATUS_ENABLED;
1501
1502 /*
1503 * Set GPIO 3 writable by FW; GPIO 1 writable
1504 * by driver and enable clock
1505 */
1506 ipw2100_hw_set_gpio(priv);
1507
1508 /*
1509 * Power down adapter. Sequence:
1510 * 1. Stop master assert (RESET_REG[9]=1)
1511 * 2. Wait for stop master (RESET_REG[8]==1)
1512 * 3. S/w reset assert (RESET_REG[7] = 1)
1513 */
1514
1515 /* Stop master assert */
1516 write_register(priv->net_dev, IPW_REG_RESET_REG,
1517 IPW_AUX_HOST_RESET_REG_STOP_MASTER);
1518
1519 /* wait stop master not more than 50 usec.
1520 * Otherwise return error. */
1521 for (i = 5; i > 0; i--) {
1522 udelay(10);
1523
1524 /* Check master stop bit */
1525 read_register(priv->net_dev, IPW_REG_RESET_REG, &reg);
1526
1527 if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED)
1528 break;
1529 }
1530
1531 if (i == 0)
1532 IPW_DEBUG_WARNING(DRV_NAME
1533 ": %s: Could now power down adapter.\n",
1534 priv->net_dev->name);
1535
1536 /* assert s/w reset */
1537 write_register(priv->net_dev, IPW_REG_RESET_REG,
1538 IPW_AUX_HOST_RESET_REG_SW_RESET);
1539
1540 priv->status &= ~(STATUS_RUNNING | STATUS_STOPPING);
1541
1542 return 0;
1543}
1544
1545
1546static int ipw2100_disable_adapter(struct ipw2100_priv *priv)
1547{
1548 struct host_command cmd = {
1549 .host_command = CARD_DISABLE,
1550 .host_command_sequence = 0,
1551 .host_command_length = 0
1552 };
1553 int err = 0;
1554
1555 IPW_DEBUG_HC("CARD_DISABLE\n");
1556
1557 if (!(priv->status & STATUS_ENABLED))
1558 return 0;
1559
1560 /* Make sure we clear the associated state */
1561 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1562
1563 if (!priv->stop_hang_check) {
1564 priv->stop_hang_check = 1;
1565 cancel_delayed_work(&priv->hang_check);
1566 }
1567
1568 down(&priv->adapter_sem);
1569
1570 err = ipw2100_hw_send_command(priv, &cmd);
1571 if (err) {
1572 IPW_DEBUG_WARNING("exit - failed to send CARD_DISABLE command\n");
1573 goto fail_up;
1574 }
1575
1576 err = ipw2100_wait_for_card_state(priv, IPW_HW_STATE_DISABLED);
1577 if (err) {
1578 IPW_DEBUG_WARNING("exit - card failed to change to DISABLED\n");
1579 goto fail_up;
1580 }
1581
1582 IPW_DEBUG_INFO("TODO: implement scan state machine\n");
1583
1584fail_up:
1585 up(&priv->adapter_sem);
1586 return err;
1587}
1588
1589int ipw2100_set_scan_options(struct ipw2100_priv *priv)
1590{
1591 struct host_command cmd = {
1592 .host_command = SET_SCAN_OPTIONS,
1593 .host_command_sequence = 0,
1594 .host_command_length = 8
1595 };
1596 int err;
1597
1598 IPW_DEBUG_INFO("enter\n");
1599
1600 IPW_DEBUG_SCAN("setting scan options\n");
1601
1602 cmd.host_command_parameters[0] = 0;
1603
1604 if (!(priv->config & CFG_ASSOCIATE))
1605 cmd.host_command_parameters[0] |= IPW_SCAN_NOASSOCIATE;
1606 if ((priv->sec.flags & SEC_ENABLED) && priv->sec.enabled)
1607 cmd.host_command_parameters[0] |= IPW_SCAN_MIXED_CELL;
1608 if (priv->config & CFG_PASSIVE_SCAN)
1609 cmd.host_command_parameters[0] |= IPW_SCAN_PASSIVE;
1610
1611 cmd.host_command_parameters[1] = priv->channel_mask;
1612
1613 err = ipw2100_hw_send_command(priv, &cmd);
1614
1615 IPW_DEBUG_HC("SET_SCAN_OPTIONS 0x%04X\n",
1616 cmd.host_command_parameters[0]);
1617
1618 return err;
1619}
1620
1621int ipw2100_start_scan(struct ipw2100_priv *priv)
1622{
1623 struct host_command cmd = {
1624 .host_command = BROADCAST_SCAN,
1625 .host_command_sequence = 0,
1626 .host_command_length = 4
1627 };
1628 int err;
1629
1630 IPW_DEBUG_HC("START_SCAN\n");
1631
1632 cmd.host_command_parameters[0] = 0;
1633
1634 /* No scanning if in monitor mode */
1635 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
1636 return 1;
1637
1638 if (priv->status & STATUS_SCANNING) {
1639 IPW_DEBUG_SCAN("Scan requested while already in scan...\n");
1640 return 0;
1641 }
1642
1643 IPW_DEBUG_INFO("enter\n");
1644
1645 /* Not clearing here; doing so makes iwlist always return nothing...
1646 *
1647 * We should modify the table logic to use aging tables vs. clearing
1648 * the table on each scan start.
1649 */
1650 IPW_DEBUG_SCAN("starting scan\n");
1651
1652 priv->status |= STATUS_SCANNING;
1653 err = ipw2100_hw_send_command(priv, &cmd);
1654 if (err)
1655 priv->status &= ~STATUS_SCANNING;
1656
1657 IPW_DEBUG_INFO("exit\n");
1658
1659 return err;
1660}
1661
1662static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
1663{
1664 unsigned long flags;
1665 int rc = 0;
1666 u32 lock;
1667 u32 ord_len = sizeof(lock);
1668
1669 /* Quite if manually disabled. */
1670 if (priv->status & STATUS_RF_KILL_SW) {
1671 IPW_DEBUG_INFO("%s: Radio is disabled by Manual Disable "
1672 "switch\n", priv->net_dev->name);
1673 return 0;
1674 }
1675
1676 /* If the interrupt is enabled, turn it off... */
1677 spin_lock_irqsave(&priv->low_lock, flags);
1678 ipw2100_disable_interrupts(priv);
1679
1680 /* Reset any fatal_error conditions */
1681 ipw2100_reset_fatalerror(priv);
1682 spin_unlock_irqrestore(&priv->low_lock, flags);
1683
1684 if (priv->status & STATUS_POWERED ||
1685 (priv->status & STATUS_RESET_PENDING)) {
1686 /* Power cycle the card ... */
1687 if (ipw2100_power_cycle_adapter(priv)) {
1688 IPW_DEBUG_WARNING("%s: Could not cycle adapter.\n",
1689 priv->net_dev->name);
1690 rc = 1;
1691 goto exit;
1692 }
1693 } else
1694 priv->status |= STATUS_POWERED;
1695
1696 /* Load the firmware, start the clocks, etc. */
1697 if (ipw2100_start_adapter(priv)) {
1698 IPW_DEBUG_ERROR("%s: Failed to start the firmware.\n",
1699 priv->net_dev->name);
1700 rc = 1;
1701 goto exit;
1702 }
1703
1704 ipw2100_initialize_ordinals(priv);
1705
1706 /* Determine capabilities of this particular HW configuration */
1707 if (ipw2100_get_hw_features(priv)) {
1708 IPW_DEBUG_ERROR("%s: Failed to determine HW features.\n",
1709 priv->net_dev->name);
1710 rc = 1;
1711 goto exit;
1712 }
1713
1714 lock = LOCK_NONE;
1715 if (ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len)) {
1716 IPW_DEBUG_ERROR("%s: Failed to clear ordinal lock.\n",
1717 priv->net_dev->name);
1718 rc = 1;
1719 goto exit;
1720 }
1721
1722 priv->status &= ~STATUS_SCANNING;
1723
1724 if (rf_kill_active(priv)) {
1725 printk(KERN_INFO "%s: Radio is disabled by RF switch.\n",
1726 priv->net_dev->name);
1727
1728 if (priv->stop_rf_kill) {
1729 priv->stop_rf_kill = 0;
1730 queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ);
1731 }
1732
1733 deferred = 1;
1734 }
1735
1736 /* Turn on the interrupt so that commands can be processed */
1737 ipw2100_enable_interrupts(priv);
1738
1739 /* Send all of the commands that must be sent prior to
1740 * HOST_COMPLETE */
1741 if (ipw2100_adapter_setup(priv)) {
1742 IPW_DEBUG_ERROR("%s: Failed to start the card.\n",
1743 priv->net_dev->name);
1744 rc = 1;
1745 goto exit;
1746 }
1747
1748 if (!deferred) {
1749 /* Enable the adapter - sends HOST_COMPLETE */
1750 if (ipw2100_enable_adapter(priv)) {
1751 IPW_DEBUG_ERROR(
1752 "%s: failed in call to enable adapter.\n",
1753 priv->net_dev->name);
1754 ipw2100_hw_stop_adapter(priv);
1755 rc = 1;
1756 goto exit;
1757 }
1758
1759
1760 /* Start a scan . . . */
1761 ipw2100_set_scan_options(priv);
1762 ipw2100_start_scan(priv);
1763 }
1764
1765 exit:
1766 return rc;
1767}
1768
1769/* Called by register_netdev() */
1770static int ipw2100_net_init(struct net_device *dev)
1771{
1772 struct ipw2100_priv *priv = ieee80211_priv(dev);
1773 return ipw2100_up(priv, 1);
1774}
1775
1776static void ipw2100_down(struct ipw2100_priv *priv)
1777{
1778 unsigned long flags;
1779 union iwreq_data wrqu = {
1780 .ap_addr = {
1781 .sa_family = ARPHRD_ETHER
1782 }
1783 };
1784 int associated = priv->status & STATUS_ASSOCIATED;
1785
1786 /* Kill the RF switch timer */
1787 if (!priv->stop_rf_kill) {
1788 priv->stop_rf_kill = 1;
1789 cancel_delayed_work(&priv->rf_kill);
1790 }
1791
1792 /* Kill the firmare hang check timer */
1793 if (!priv->stop_hang_check) {
1794 priv->stop_hang_check = 1;
1795 cancel_delayed_work(&priv->hang_check);
1796 }
1797
1798 /* Kill any pending resets */
1799 if (priv->status & STATUS_RESET_PENDING)
1800 cancel_delayed_work(&priv->reset_work);
1801
1802 /* Make sure the interrupt is on so that FW commands will be
1803 * processed correctly */
1804 spin_lock_irqsave(&priv->low_lock, flags);
1805 ipw2100_enable_interrupts(priv);
1806 spin_unlock_irqrestore(&priv->low_lock, flags);
1807
1808 if (ipw2100_hw_stop_adapter(priv))
1809 IPW_DEBUG_ERROR("%s: Error stopping adapter.\n",
1810 priv->net_dev->name);
1811
1812 /* Do not disable the interrupt until _after_ we disable
1813 * the adaptor. Otherwise the CARD_DISABLE command will never
1814 * be ack'd by the firmware */
1815 spin_lock_irqsave(&priv->low_lock, flags);
1816 ipw2100_disable_interrupts(priv);
1817 spin_unlock_irqrestore(&priv->low_lock, flags);
1818
1819#ifdef ACPI_CSTATE_LIMIT_DEFINED
1820 if (priv->config & CFG_C3_DISABLED) {
1821 IPW_DEBUG_INFO(DRV_NAME ": Resetting C3 transitions.\n");
1822 acpi_set_cstate_limit(priv->cstate_limit);
1823 priv->config &= ~CFG_C3_DISABLED;
1824 }
1825#endif
1826
1827 /* We have to signal any supplicant if we are disassociating */
1828 if (associated)
1829 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1830
1831 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1832 netif_carrier_off(priv->net_dev);
1833 netif_stop_queue(priv->net_dev);
1834}
1835
1836void ipw2100_reset_adapter(struct ipw2100_priv *priv)
1837{
1838 unsigned long flags;
1839 union iwreq_data wrqu = {
1840 .ap_addr = {
1841 .sa_family = ARPHRD_ETHER
1842 }
1843 };
1844 int associated = priv->status & STATUS_ASSOCIATED;
1845
1846 spin_lock_irqsave(&priv->low_lock, flags);
1847 IPW_DEBUG_INFO(DRV_NAME ": %s: Restarting adapter.\n",
1848 priv->net_dev->name);
1849 priv->resets++;
1850 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
1851 priv->status |= STATUS_SECURITY_UPDATED;
1852
1853 /* Force a power cycle even if interface hasn't been opened
1854 * yet */
1855 cancel_delayed_work(&priv->reset_work);
1856 priv->status |= STATUS_RESET_PENDING;
1857 spin_unlock_irqrestore(&priv->low_lock, flags);
1858
1859 down(&priv->action_sem);
1860 /* stop timed checks so that they don't interfere with reset */
1861 priv->stop_hang_check = 1;
1862 cancel_delayed_work(&priv->hang_check);
1863
1864 /* We have to signal any supplicant if we are disassociating */
1865 if (associated)
1866 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
1867
1868 ipw2100_up(priv, 0);
1869 up(&priv->action_sem);
1870
1871}
1872
1873
1874static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
1875{
1876
1877#define MAC_ASSOCIATION_READ_DELAY (HZ)
1878 int ret, len, essid_len;
1879 char essid[IW_ESSID_MAX_SIZE];
1880 u32 txrate;
1881 u32 chan;
1882 char *txratename;
1883 u8 bssid[ETH_ALEN];
1884
1885 /*
1886 * TBD: BSSID is usually 00:00:00:00:00:00 here and not
1887 * an actual MAC of the AP. Seems like FW sets this
1888 * address too late. Read it later and expose through
1889 * /proc or schedule a later task to query and update
1890 */
1891
1892 essid_len = IW_ESSID_MAX_SIZE;
1893 ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_SSID,
1894 essid, &essid_len);
1895 if (ret) {
1896 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
1897 __LINE__);
1898 return;
1899 }
1900
1901 len = sizeof(u32);
1902 ret = ipw2100_get_ordinal(priv, IPW_ORD_CURRENT_TX_RATE,
1903 &txrate, &len);
1904 if (ret) {
1905 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
1906 __LINE__);
1907 return;
1908 }
1909
1910 len = sizeof(u32);
1911 ret = ipw2100_get_ordinal(priv, IPW_ORD_OUR_FREQ, &chan, &len);
1912 if (ret) {
1913 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
1914 __LINE__);
1915 return;
1916 }
1917 len = ETH_ALEN;
1918 ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len);
1919 if (ret) {
1920 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
1921 __LINE__);
1922 return;
1923 }
1924 memcpy(priv->ieee->bssid, bssid, ETH_ALEN);
1925
1926
1927 switch (txrate) {
1928 case TX_RATE_1_MBIT:
1929 txratename = "1Mbps";
1930 break;
1931 case TX_RATE_2_MBIT:
1932 txratename = "2Mbsp";
1933 break;
1934 case TX_RATE_5_5_MBIT:
1935 txratename = "5.5Mbps";
1936 break;
1937 case TX_RATE_11_MBIT:
1938 txratename = "11Mbps";
1939 break;
1940 default:
1941 IPW_DEBUG_INFO("Unknown rate: %d\n", txrate);
1942 txratename = "unknown rate";
1943 break;
1944 }
1945
1946 IPW_DEBUG_INFO("%s: Associated with '%s' at %s, channel %d (BSSID="
1947 MAC_FMT ")\n",
1948 priv->net_dev->name, escape_essid(essid, essid_len),
1949 txratename, chan, MAC_ARG(bssid));
1950
1951 /* now we copy read ssid into dev */
1952 if (!(priv->config & CFG_STATIC_ESSID)) {
1953 priv->essid_len = min((u8)essid_len, (u8)IW_ESSID_MAX_SIZE);
1954 memcpy(priv->essid, essid, priv->essid_len);
1955 }
1956 priv->channel = chan;
1957 memcpy(priv->bssid, bssid, ETH_ALEN);
1958
1959 priv->status |= STATUS_ASSOCIATING;
1960 priv->connect_start = get_seconds();
1961
1962 queue_delayed_work(priv->workqueue, &priv->wx_event_work, HZ / 10);
1963}
1964
1965
1966int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
1967 int length, int batch_mode)
1968{
1969 int ssid_len = min(length, IW_ESSID_MAX_SIZE);
1970 struct host_command cmd = {
1971 .host_command = SSID,
1972 .host_command_sequence = 0,
1973 .host_command_length = ssid_len
1974 };
1975 int err;
1976
1977 IPW_DEBUG_HC("SSID: '%s'\n", escape_essid(essid, ssid_len));
1978
1979 if (ssid_len)
1980 memcpy((char*)cmd.host_command_parameters,
1981 essid, ssid_len);
1982
1983 if (!batch_mode) {
1984 err = ipw2100_disable_adapter(priv);
1985 if (err)
1986 return err;
1987 }
1988
1989 /* Bug in FW currently doesn't honor bit 0 in SET_SCAN_OPTIONS to
1990 * disable auto association -- so we cheat by setting a bogus SSID */
1991 if (!ssid_len && !(priv->config & CFG_ASSOCIATE)) {
1992 int i;
1993 u8 *bogus = (u8*)cmd.host_command_parameters;
1994 for (i = 0; i < IW_ESSID_MAX_SIZE; i++)
1995 bogus[i] = 0x18 + i;
1996 cmd.host_command_length = IW_ESSID_MAX_SIZE;
1997 }
1998
1999 /* NOTE: We always send the SSID command even if the provided ESSID is
2000 * the same as what we currently think is set. */
2001
2002 err = ipw2100_hw_send_command(priv, &cmd);
2003 if (!err) {
2004 memset(priv->essid + ssid_len, 0,
2005 IW_ESSID_MAX_SIZE - ssid_len);
2006 memcpy(priv->essid, essid, ssid_len);
2007 priv->essid_len = ssid_len;
2008 }
2009
2010 if (!batch_mode) {
2011 if (ipw2100_enable_adapter(priv))
2012 err = -EIO;
2013 }
2014
2015 return err;
2016}
2017
2018static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
2019{
2020 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
2021 "disassociated: '%s' " MAC_FMT " \n",
2022 escape_essid(priv->essid, priv->essid_len),
2023 MAC_ARG(priv->bssid));
2024
2025 priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING);
2026
2027 if (priv->status & STATUS_STOPPING) {
2028 IPW_DEBUG_INFO("Card is stopping itself, discard ASSN_LOST.\n");
2029 return;
2030 }
2031
2032 memset(priv->bssid, 0, ETH_ALEN);
2033 memset(priv->ieee->bssid, 0, ETH_ALEN);
2034
2035 netif_carrier_off(priv->net_dev);
2036 netif_stop_queue(priv->net_dev);
2037
2038 if (!(priv->status & STATUS_RUNNING))
2039 return;
2040
2041 if (priv->status & STATUS_SECURITY_UPDATED)
2042 queue_work(priv->workqueue, &priv->security_work);
2043
2044 queue_work(priv->workqueue, &priv->wx_event_work);
2045}
2046
2047static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
2048{
2049 IPW_DEBUG_INFO("%s: RF Kill state changed to radio OFF.\n",
2050 priv->net_dev->name);
2051
2052 /* RF_KILL is now enabled (else we wouldn't be here) */
2053 priv->status |= STATUS_RF_KILL_HW;
2054
2055#ifdef ACPI_CSTATE_LIMIT_DEFINED
2056 if (priv->config & CFG_C3_DISABLED) {
2057 IPW_DEBUG_INFO(DRV_NAME ": Resetting C3 transitions.\n");
2058 acpi_set_cstate_limit(priv->cstate_limit);
2059 priv->config &= ~CFG_C3_DISABLED;
2060 }
2061#endif
2062
2063 /* Make sure the RF Kill check timer is running */
2064 priv->stop_rf_kill = 0;
2065 cancel_delayed_work(&priv->rf_kill);
2066 queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ);
2067}
2068
2069static void isr_scan_complete(struct ipw2100_priv *priv, u32 status)
2070{
2071 IPW_DEBUG_SCAN("scan complete\n");
2072 /* Age the scan results... */
2073 priv->ieee->scans++;
2074 priv->status &= ~STATUS_SCANNING;
2075}
2076
2077#ifdef CONFIG_IPW_DEBUG
2078#define IPW2100_HANDLER(v, f) { v, f, # v }
2079struct ipw2100_status_indicator {
2080 int status;
2081 void (*cb)(struct ipw2100_priv *priv, u32 status);
2082 char *name;
2083};
2084#else
2085#define IPW2100_HANDLER(v, f) { v, f }
2086struct ipw2100_status_indicator {
2087 int status;
2088 void (*cb)(struct ipw2100_priv *priv, u32 status);
2089};
2090#endif /* CONFIG_IPW_DEBUG */
2091
2092static void isr_indicate_scanning(struct ipw2100_priv *priv, u32 status)
2093{
2094 IPW_DEBUG_SCAN("Scanning...\n");
2095 priv->status |= STATUS_SCANNING;
2096}
2097
2098const struct ipw2100_status_indicator status_handlers[] = {
2099 IPW2100_HANDLER(IPW_STATE_INITIALIZED, 0),
2100 IPW2100_HANDLER(IPW_STATE_COUNTRY_FOUND, 0),
2101 IPW2100_HANDLER(IPW_STATE_ASSOCIATED, isr_indicate_associated),
2102 IPW2100_HANDLER(IPW_STATE_ASSN_LOST, isr_indicate_association_lost),
2103 IPW2100_HANDLER(IPW_STATE_ASSN_CHANGED, 0),
2104 IPW2100_HANDLER(IPW_STATE_SCAN_COMPLETE, isr_scan_complete),
2105 IPW2100_HANDLER(IPW_STATE_ENTERED_PSP, 0),
2106 IPW2100_HANDLER(IPW_STATE_LEFT_PSP, 0),
2107 IPW2100_HANDLER(IPW_STATE_RF_KILL, isr_indicate_rf_kill),
2108 IPW2100_HANDLER(IPW_STATE_DISABLED, 0),
2109 IPW2100_HANDLER(IPW_STATE_POWER_DOWN, 0),
2110 IPW2100_HANDLER(IPW_STATE_SCANNING, isr_indicate_scanning),
2111 IPW2100_HANDLER(-1, 0)
2112};
2113
2114
2115static void isr_status_change(struct ipw2100_priv *priv, int status)
2116{
2117 int i;
2118
2119 if (status == IPW_STATE_SCANNING &&
2120 priv->status & STATUS_ASSOCIATED &&
2121 !(priv->status & STATUS_SCANNING)) {
2122 IPW_DEBUG_INFO("Scan detected while associated, with "
2123 "no scan request. Restarting firmware.\n");
2124
2125 /* Wake up any sleeping jobs */
2126 schedule_reset(priv);
2127 }
2128
2129 for (i = 0; status_handlers[i].status != -1; i++) {
2130 if (status == status_handlers[i].status) {
2131 IPW_DEBUG_NOTIF("Status change: %s\n",
2132 status_handlers[i].name);
2133 if (status_handlers[i].cb)
2134 status_handlers[i].cb(priv, status);
2135 priv->wstats.status = status;
2136 return;
2137 }
2138 }
2139
2140 IPW_DEBUG_NOTIF("unknown status received: %04x\n", status);
2141}
2142
2143static void isr_rx_complete_command(
2144 struct ipw2100_priv *priv,
2145 struct ipw2100_cmd_header *cmd)
2146{
2147#ifdef CONFIG_IPW_DEBUG
2148 if (cmd->host_command_reg < ARRAY_SIZE(command_types)) {
2149 IPW_DEBUG_HC("Command completed '%s (%d)'\n",
2150 command_types[cmd->host_command_reg],
2151 cmd->host_command_reg);
2152 }
2153#endif
2154 if (cmd->host_command_reg == HOST_COMPLETE)
2155 priv->status |= STATUS_ENABLED;
2156
2157 if (cmd->host_command_reg == CARD_DISABLE)
2158 priv->status &= ~STATUS_ENABLED;
2159
2160 priv->status &= ~STATUS_CMD_ACTIVE;
2161
2162 wake_up_interruptible(&priv->wait_command_queue);
2163}
2164
2165#ifdef CONFIG_IPW_DEBUG
2166const char *frame_types[] = {
2167 "COMMAND_STATUS_VAL",
2168 "STATUS_CHANGE_VAL",
2169 "P80211_DATA_VAL",
2170 "P8023_DATA_VAL",
2171 "HOST_NOTIFICATION_VAL"
2172};
2173#endif
2174
2175
2176static inline int ipw2100_alloc_skb(
2177 struct ipw2100_priv *priv,
2178 struct ipw2100_rx_packet *packet)
2179{
2180 packet->skb = dev_alloc_skb(sizeof(struct ipw2100_rx));
2181 if (!packet->skb)
2182 return -ENOMEM;
2183
2184 packet->rxp = (struct ipw2100_rx *)packet->skb->data;
2185 packet->dma_addr = pci_map_single(priv->pci_dev, packet->skb->data,
2186 sizeof(struct ipw2100_rx),
2187 PCI_DMA_FROMDEVICE);
2188 /* NOTE: pci_map_single does not return an error code, and 0 is a valid
2189 * dma_addr */
2190
2191 return 0;
2192}
2193
2194
2195#define SEARCH_ERROR 0xffffffff
2196#define SEARCH_FAIL 0xfffffffe
2197#define SEARCH_SUCCESS 0xfffffff0
2198#define SEARCH_DISCARD 0
2199#define SEARCH_SNAPSHOT 1
2200
2201#define SNAPSHOT_ADDR(ofs) (priv->snapshot[((ofs) >> 12) & 0xff] + ((ofs) & 0xfff))
2202static inline int ipw2100_snapshot_alloc(struct ipw2100_priv *priv)
2203{
2204 int i;
2205 if (priv->snapshot[0])
2206 return 1;
2207 for (i = 0; i < 0x30; i++) {
2208 priv->snapshot[i] = (u8*)kmalloc(0x1000, GFP_ATOMIC);
2209 if (!priv->snapshot[i]) {
2210 IPW_DEBUG_INFO("%s: Error allocating snapshot "
2211 "buffer %d\n", priv->net_dev->name, i);
2212 while (i > 0)
2213 kfree(priv->snapshot[--i]);
2214 priv->snapshot[0] = NULL;
2215 return 0;
2216 }
2217 }
2218
2219 return 1;
2220}
2221
2222static inline void ipw2100_snapshot_free(struct ipw2100_priv *priv)
2223{
2224 int i;
2225 if (!priv->snapshot[0])
2226 return;
2227 for (i = 0; i < 0x30; i++)
2228 kfree(priv->snapshot[i]);
2229 priv->snapshot[0] = NULL;
2230}
2231
2232static inline u32 ipw2100_match_buf(struct ipw2100_priv *priv, u8 *in_buf,
2233 size_t len, int mode)
2234{
2235 u32 i, j;
2236 u32 tmp;
2237 u8 *s, *d;
2238 u32 ret;
2239
2240 s = in_buf;
2241 if (mode == SEARCH_SNAPSHOT) {
2242 if (!ipw2100_snapshot_alloc(priv))
2243 mode = SEARCH_DISCARD;
2244 }
2245
2246 for (ret = SEARCH_FAIL, i = 0; i < 0x30000; i += 4) {
2247 read_nic_dword(priv->net_dev, i, &tmp);
2248 if (mode == SEARCH_SNAPSHOT)
2249 *(u32 *)SNAPSHOT_ADDR(i) = tmp;
2250 if (ret == SEARCH_FAIL) {
2251 d = (u8*)&tmp;
2252 for (j = 0; j < 4; j++) {
2253 if (*s != *d) {
2254 s = in_buf;
2255 continue;
2256 }
2257
2258 s++;
2259 d++;
2260
2261 if ((s - in_buf) == len)
2262 ret = (i + j) - len + 1;
2263 }
2264 } else if (mode == SEARCH_DISCARD)
2265 return ret;
2266 }
2267
2268 return ret;
2269}
2270
2271/*
2272 *
2273 * 0) Disconnect the SKB from the firmware (just unmap)
2274 * 1) Pack the ETH header into the SKB
2275 * 2) Pass the SKB to the network stack
2276 *
2277 * When packet is provided by the firmware, it contains the following:
2278 *
2279 * . ieee80211_hdr
2280 * . ieee80211_snap_hdr
2281 *
2282 * The size of the constructed ethernet
2283 *
2284 */
2285#ifdef CONFIG_IPW2100_RX_DEBUG
2286u8 packet_data[IPW_RX_NIC_BUFFER_LENGTH];
2287#endif
2288
2289static inline void ipw2100_corruption_detected(struct ipw2100_priv *priv,
2290 int i)
2291{
2292#ifdef CONFIG_IPW_DEBUG_C3
2293 struct ipw2100_status *status = &priv->status_queue.drv[i];
2294 u32 match, reg;
2295 int j;
2296#endif
2297#ifdef ACPI_CSTATE_LIMIT_DEFINED
2298 int limit;
2299#endif
2300
2301 IPW_DEBUG_INFO(DRV_NAME ": PCI latency error detected at "
2302 "0x%04zX.\n", i * sizeof(struct ipw2100_status));
2303
2304#ifdef ACPI_CSTATE_LIMIT_DEFINED
2305 IPW_DEBUG_INFO(DRV_NAME ": Disabling C3 transitions.\n");
2306 limit = acpi_get_cstate_limit();
2307 if (limit > 2) {
2308 priv->cstate_limit = limit;
2309 acpi_set_cstate_limit(2);
2310 priv->config |= CFG_C3_DISABLED;
2311 }
2312#endif
2313
2314#ifdef CONFIG_IPW_DEBUG_C3
2315 /* Halt the fimrware so we can get a good image */
2316 write_register(priv->net_dev, IPW_REG_RESET_REG,
2317 IPW_AUX_HOST_RESET_REG_STOP_MASTER);
2318 j = 5;
2319 do {
2320 udelay(IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY);
2321 read_register(priv->net_dev, IPW_REG_RESET_REG, &reg);
2322
2323 if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED)
2324 break;
2325 } while (j--);
2326
2327 match = ipw2100_match_buf(priv, (u8*)status,
2328 sizeof(struct ipw2100_status),
2329 SEARCH_SNAPSHOT);
2330 if (match < SEARCH_SUCCESS)
2331 IPW_DEBUG_INFO("%s: DMA status match in Firmware at "
2332 "offset 0x%06X, length %d:\n",
2333 priv->net_dev->name, match,
2334 sizeof(struct ipw2100_status));
2335 else
2336 IPW_DEBUG_INFO("%s: No DMA status match in "
2337 "Firmware.\n", priv->net_dev->name);
2338
2339 printk_buf((u8*)priv->status_queue.drv,
2340 sizeof(struct ipw2100_status) * RX_QUEUE_LENGTH);
2341#endif
2342
2343 priv->fatal_error = IPW2100_ERR_C3_CORRUPTION;
2344 priv->ieee->stats.rx_errors++;
2345 schedule_reset(priv);
2346}
2347
2348static inline void isr_rx(struct ipw2100_priv *priv, int i,
2349 struct ieee80211_rx_stats *stats)
2350{
2351 struct ipw2100_status *status = &priv->status_queue.drv[i];
2352 struct ipw2100_rx_packet *packet = &priv->rx_buffers[i];
2353
2354 IPW_DEBUG_RX("Handler...\n");
2355
2356 if (unlikely(status->frame_size > skb_tailroom(packet->skb))) {
2357 IPW_DEBUG_INFO("%s: frame_size (%u) > skb_tailroom (%u)!"
2358 " Dropping.\n",
2359 priv->net_dev->name,
2360 status->frame_size, skb_tailroom(packet->skb));
2361 priv->ieee->stats.rx_errors++;
2362 return;
2363 }
2364
2365 if (unlikely(!netif_running(priv->net_dev))) {
2366 priv->ieee->stats.rx_errors++;
2367 priv->wstats.discard.misc++;
2368 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
2369 return;
2370 }
2371
2372 if (unlikely(priv->ieee->iw_mode == IW_MODE_MONITOR &&
2373 status->flags & IPW_STATUS_FLAG_CRC_ERROR)) {
2374 IPW_DEBUG_RX("CRC error in packet. Dropping.\n");
2375 priv->ieee->stats.rx_errors++;
2376 return;
2377 }
2378
2379 if (unlikely(priv->ieee->iw_mode != IW_MODE_MONITOR &&
2380 !(priv->status & STATUS_ASSOCIATED))) {
2381 IPW_DEBUG_DROP("Dropping packet while not associated.\n");
2382 priv->wstats.discard.misc++;
2383 return;
2384 }
2385
2386
2387 pci_unmap_single(priv->pci_dev,
2388 packet->dma_addr,
2389 sizeof(struct ipw2100_rx),
2390 PCI_DMA_FROMDEVICE);
2391
2392 skb_put(packet->skb, status->frame_size);
2393
2394#ifdef CONFIG_IPW2100_RX_DEBUG
2395 /* Make a copy of the frame so we can dump it to the logs if
2396 * ieee80211_rx fails */
2397 memcpy(packet_data, packet->skb->data,
2398 min_t(u32, status->frame_size, IPW_RX_NIC_BUFFER_LENGTH));
2399#endif
2400
2401 if (!ieee80211_rx(priv->ieee, packet->skb, stats)) {
2402#ifdef CONFIG_IPW2100_RX_DEBUG
2403 IPW_DEBUG_DROP("%s: Non consumed packet:\n",
2404 priv->net_dev->name);
2405 printk_buf(IPW_DL_DROP, packet_data, status->frame_size);
2406#endif
2407 priv->ieee->stats.rx_errors++;
2408
2409 /* ieee80211_rx failed, so it didn't free the SKB */
2410 dev_kfree_skb_any(packet->skb);
2411 packet->skb = NULL;
2412 }
2413
2414 /* We need to allocate a new SKB and attach it to the RDB. */
2415 if (unlikely(ipw2100_alloc_skb(priv, packet))) {
2416 IPW_DEBUG_WARNING(
2417 "%s: Unable to allocate SKB onto RBD ring - disabling "
2418 "adapter.\n", priv->net_dev->name);
2419 /* TODO: schedule adapter shutdown */
2420 IPW_DEBUG_INFO("TODO: Shutdown adapter...\n");
2421 }
2422
2423 /* Update the RDB entry */
2424 priv->rx_queue.drv[i].host_addr = packet->dma_addr;
2425}
2426
2427static inline int ipw2100_corruption_check(struct ipw2100_priv *priv, int i)
2428{
2429 struct ipw2100_status *status = &priv->status_queue.drv[i];
2430 struct ipw2100_rx *u = priv->rx_buffers[i].rxp;
2431 u16 frame_type = status->status_fields & STATUS_TYPE_MASK;
2432
2433 switch (frame_type) {
2434 case COMMAND_STATUS_VAL:
2435 return (status->frame_size != sizeof(u->rx_data.command));
2436 case STATUS_CHANGE_VAL:
2437 return (status->frame_size != sizeof(u->rx_data.status));
2438 case HOST_NOTIFICATION_VAL:
2439 return (status->frame_size < sizeof(u->rx_data.notification));
2440 case P80211_DATA_VAL:
2441 case P8023_DATA_VAL:
2442#ifdef CONFIG_IPW2100_MONITOR
2443 return 0;
2444#else
2445 switch (WLAN_FC_GET_TYPE(u->rx_data.header.frame_ctl)) {
2446 case IEEE80211_FTYPE_MGMT:
2447 case IEEE80211_FTYPE_CTL:
2448 return 0;
2449 case IEEE80211_FTYPE_DATA:
2450 return (status->frame_size >
2451 IPW_MAX_802_11_PAYLOAD_LENGTH);
2452 }
2453#endif
2454 }
2455
2456 return 1;
2457}
2458
2459/*
2460 * ipw2100 interrupts are disabled at this point, and the ISR
2461 * is the only code that calls this method. So, we do not need
2462 * to play with any locks.
2463 *
2464 * RX Queue works as follows:
2465 *
2466 * Read index - firmware places packet in entry identified by the
2467 * Read index and advances Read index. In this manner,
2468 * Read index will always point to the next packet to
2469 * be filled--but not yet valid.
2470 *
2471 * Write index - driver fills this entry with an unused RBD entry.
2472 * This entry has not filled by the firmware yet.
2473 *
2474 * In between the W and R indexes are the RBDs that have been received
2475 * but not yet processed.
2476 *
2477 * The process of handling packets will start at WRITE + 1 and advance
2478 * until it reaches the READ index.
2479 *
2480 * The WRITE index is cached in the variable 'priv->rx_queue.next'.
2481 *
2482 */
2483static inline void __ipw2100_rx_process(struct ipw2100_priv *priv)
2484{
2485 struct ipw2100_bd_queue *rxq = &priv->rx_queue;
2486 struct ipw2100_status_queue *sq = &priv->status_queue;
2487 struct ipw2100_rx_packet *packet;
2488 u16 frame_type;
2489 u32 r, w, i, s;
2490 struct ipw2100_rx *u;
2491 struct ieee80211_rx_stats stats = {
2492 .mac_time = jiffies,
2493 };
2494
2495 read_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_READ_INDEX, &r);
2496 read_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_WRITE_INDEX, &w);
2497
2498 if (r >= rxq->entries) {
2499 IPW_DEBUG_RX("exit - bad read index\n");
2500 return;
2501 }
2502
2503 i = (rxq->next + 1) % rxq->entries;
2504 s = i;
2505 while (i != r) {
2506 /* IPW_DEBUG_RX("r = %d : w = %d : processing = %d\n",
2507 r, rxq->next, i); */
2508
2509 packet = &priv->rx_buffers[i];
2510
2511 /* Sync the DMA for the STATUS buffer so CPU is sure to get
2512 * the correct values */
2513 pci_dma_sync_single_for_cpu(
2514 priv->pci_dev,
2515 sq->nic + sizeof(struct ipw2100_status) * i,
2516 sizeof(struct ipw2100_status),
2517 PCI_DMA_FROMDEVICE);
2518
2519 /* Sync the DMA for the RX buffer so CPU is sure to get
2520 * the correct values */
2521 pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr,
2522 sizeof(struct ipw2100_rx),
2523 PCI_DMA_FROMDEVICE);
2524
2525 if (unlikely(ipw2100_corruption_check(priv, i))) {
2526 ipw2100_corruption_detected(priv, i);
2527 goto increment;
2528 }
2529
2530 u = packet->rxp;
2531 frame_type = sq->drv[i].status_fields &
2532 STATUS_TYPE_MASK;
2533 stats.rssi = sq->drv[i].rssi + IPW2100_RSSI_TO_DBM;
2534 stats.len = sq->drv[i].frame_size;
2535
2536 stats.mask = 0;
2537 if (stats.rssi != 0)
2538 stats.mask |= IEEE80211_STATMASK_RSSI;
2539 stats.freq = IEEE80211_24GHZ_BAND;
2540
2541 IPW_DEBUG_RX(
2542 "%s: '%s' frame type received (%d).\n",
2543 priv->net_dev->name, frame_types[frame_type],
2544 stats.len);
2545
2546 switch (frame_type) {
2547 case COMMAND_STATUS_VAL:
2548 /* Reset Rx watchdog */
2549 isr_rx_complete_command(
2550 priv, &u->rx_data.command);
2551 break;
2552
2553 case STATUS_CHANGE_VAL:
2554 isr_status_change(priv, u->rx_data.status);
2555 break;
2556
2557 case P80211_DATA_VAL:
2558 case P8023_DATA_VAL:
2559#ifdef CONFIG_IPW2100_MONITOR
2560 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
2561 isr_rx(priv, i, &stats);
2562 break;
2563 }
2564#endif
2565 if (stats.len < sizeof(u->rx_data.header))
2566 break;
2567 switch (WLAN_FC_GET_TYPE(u->rx_data.header.
2568 frame_ctl)) {
2569 case IEEE80211_FTYPE_MGMT:
2570 ieee80211_rx_mgt(priv->ieee,
2571 &u->rx_data.header,
2572 &stats);
2573 break;
2574
2575 case IEEE80211_FTYPE_CTL:
2576 break;
2577
2578 case IEEE80211_FTYPE_DATA:
2579 isr_rx(priv, i, &stats);
2580 break;
2581
2582 }
2583 break;
2584 }
2585
2586 increment:
2587 /* clear status field associated with this RBD */
2588 rxq->drv[i].status.info.field = 0;
2589
2590 i = (i + 1) % rxq->entries;
2591 }
2592
2593 if (i != s) {
2594 /* backtrack one entry, wrapping to end if at 0 */
2595 rxq->next = (i ? i : rxq->entries) - 1;
2596
2597 write_register(priv->net_dev,
2598 IPW_MEM_HOST_SHARED_RX_WRITE_INDEX,
2599 rxq->next);
2600 }
2601}
2602
2603
2604/*
2605 * __ipw2100_tx_process
2606 *
2607 * This routine will determine whether the next packet on
2608 * the fw_pend_list has been processed by the firmware yet.
2609 *
2610 * If not, then it does nothing and returns.
2611 *
2612 * If so, then it removes the item from the fw_pend_list, frees
2613 * any associated storage, and places the item back on the
2614 * free list of its source (either msg_free_list or tx_free_list)
2615 *
2616 * TX Queue works as follows:
2617 *
2618 * Read index - points to the next TBD that the firmware will
2619 * process. The firmware will read the data, and once
2620 * done processing, it will advance the Read index.
2621 *
2622 * Write index - driver fills this entry with an constructed TBD
2623 * entry. The Write index is not advanced until the
2624 * packet has been configured.
2625 *
2626 * In between the W and R indexes are the TBDs that have NOT been
2627 * processed. Lagging behind the R index are packets that have
2628 * been processed but have not been freed by the driver.
2629 *
2630 * In order to free old storage, an internal index will be maintained
2631 * that points to the next packet to be freed. When all used
2632 * packets have been freed, the oldest index will be the same as the
2633 * firmware's read index.
2634 *
2635 * The OLDEST index is cached in the variable 'priv->tx_queue.oldest'
2636 *
2637 * Because the TBD structure can not contain arbitrary data, the
2638 * driver must keep an internal queue of cached allocations such that
2639 * it can put that data back into the tx_free_list and msg_free_list
2640 * for use by future command and data packets.
2641 *
2642 */
2643static inline int __ipw2100_tx_process(struct ipw2100_priv *priv)
2644{
2645 struct ipw2100_bd_queue *txq = &priv->tx_queue;
2646 struct ipw2100_bd *tbd;
2647 struct list_head *element;
2648 struct ipw2100_tx_packet *packet;
2649 int descriptors_used;
2650 int e, i;
2651 u32 r, w, frag_num = 0;
2652
2653 if (list_empty(&priv->fw_pend_list))
2654 return 0;
2655
2656 element = priv->fw_pend_list.next;
2657
2658 packet = list_entry(element, struct ipw2100_tx_packet, list);
2659 tbd = &txq->drv[packet->index];
2660
2661 /* Determine how many TBD entries must be finished... */
2662 switch (packet->type) {
2663 case COMMAND:
2664 /* COMMAND uses only one slot; don't advance */
2665 descriptors_used = 1;
2666 e = txq->oldest;
2667 break;
2668
2669 case DATA:
2670 /* DATA uses two slots; advance and loop position. */
2671 descriptors_used = tbd->num_fragments;
2672 frag_num = tbd->num_fragments - 1;
2673 e = txq->oldest + frag_num;
2674 e %= txq->entries;
2675 break;
2676
2677 default:
2678 IPW_DEBUG_WARNING("%s: Bad fw_pend_list entry!\n",
2679 priv->net_dev->name);
2680 return 0;
2681 }
2682
2683 /* if the last TBD is not done by NIC yet, then packet is
2684 * not ready to be released.
2685 *
2686 */
2687 read_register(priv->net_dev, IPW_MEM_HOST_SHARED_TX_QUEUE_READ_INDEX,
2688 &r);
2689 read_register(priv->net_dev, IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX,
2690 &w);
2691 if (w != txq->next)
2692 IPW_DEBUG_WARNING("%s: write index mismatch\n",
2693 priv->net_dev->name);
2694
2695 /*
2696 * txq->next is the index of the last packet written txq->oldest is
2697 * the index of the r is the index of the next packet to be read by
2698 * firmware
2699 */
2700
2701
2702 /*
2703 * Quick graphic to help you visualize the following
2704 * if / else statement
2705 *
2706 * ===>| s---->|===============
2707 * e>|
2708 * | a | b | c | d | e | f | g | h | i | j | k | l
2709 * r---->|
2710 * w
2711 *
2712 * w - updated by driver
2713 * r - updated by firmware
2714 * s - start of oldest BD entry (txq->oldest)
2715 * e - end of oldest BD entry
2716 *
2717 */
2718 if (!((r <= w && (e < r || e >= w)) || (e < r && e >= w))) {
2719 IPW_DEBUG_TX("exit - no processed packets ready to release.\n");
2720 return 0;
2721 }
2722
2723 list_del(element);
2724 DEC_STAT(&priv->fw_pend_stat);
2725
2726#ifdef CONFIG_IPW_DEBUG
2727 {
2728 int i = txq->oldest;
2729 IPW_DEBUG_TX(
2730 "TX%d V=%p P=%04X T=%04X L=%d\n", i,
2731 &txq->drv[i],
2732 (u32)(txq->nic + i * sizeof(struct ipw2100_bd)),
2733 txq->drv[i].host_addr,
2734 txq->drv[i].buf_length);
2735
2736 if (packet->type == DATA) {
2737 i = (i + 1) % txq->entries;
2738
2739 IPW_DEBUG_TX(
2740 "TX%d V=%p P=%04X T=%04X L=%d\n", i,
2741 &txq->drv[i],
2742 (u32)(txq->nic + i *
2743 sizeof(struct ipw2100_bd)),
2744 (u32)txq->drv[i].host_addr,
2745 txq->drv[i].buf_length);
2746 }
2747 }
2748#endif
2749
2750 switch (packet->type) {
2751 case DATA:
2752 if (txq->drv[txq->oldest].status.info.fields.txType != 0)
2753 IPW_DEBUG_WARNING("%s: Queue mismatch. "
2754 "Expecting DATA TBD but pulled "
2755 "something else: ids %d=%d.\n",
2756 priv->net_dev->name, txq->oldest, packet->index);
2757
2758 /* DATA packet; we have to unmap and free the SKB */
2759 priv->ieee->stats.tx_packets++;
2760 for (i = 0; i < frag_num; i++) {
2761 tbd = &txq->drv[(packet->index + 1 + i) %
2762 txq->entries];
2763
2764 IPW_DEBUG_TX(
2765 "TX%d P=%08x L=%d\n",
2766 (packet->index + 1 + i) % txq->entries,
2767 tbd->host_addr, tbd->buf_length);
2768
2769 pci_unmap_single(priv->pci_dev,
2770 tbd->host_addr,
2771 tbd->buf_length,
2772 PCI_DMA_TODEVICE);
2773 }
2774
2775 priv->ieee->stats.tx_bytes += packet->info.d_struct.txb->payload_size;
2776 ieee80211_txb_free(packet->info.d_struct.txb);
2777 packet->info.d_struct.txb = NULL;
2778
2779 list_add_tail(element, &priv->tx_free_list);
2780 INC_STAT(&priv->tx_free_stat);
2781
2782 /* We have a free slot in the Tx queue, so wake up the
2783 * transmit layer if it is stopped. */
2784 if (priv->status & STATUS_ASSOCIATED &&
2785 netif_queue_stopped(priv->net_dev)) {
2786 IPW_DEBUG_INFO(KERN_INFO
2787 "%s: Waking net queue.\n",
2788 priv->net_dev->name);
2789 netif_wake_queue(priv->net_dev);
2790 }
2791
2792 /* A packet was processed by the hardware, so update the
2793 * watchdog */
2794 priv->net_dev->trans_start = jiffies;
2795
2796 break;
2797
2798 case COMMAND:
2799 if (txq->drv[txq->oldest].status.info.fields.txType != 1)
2800 IPW_DEBUG_WARNING("%s: Queue mismatch. "
2801 "Expecting COMMAND TBD but pulled "
2802 "something else: ids %d=%d.\n",
2803 priv->net_dev->name, txq->oldest, packet->index);
2804
2805#ifdef CONFIG_IPW_DEBUG
2806 if (packet->info.c_struct.cmd->host_command_reg <
2807 sizeof(command_types) / sizeof(*command_types))
2808 IPW_DEBUG_TX(
2809 "Command '%s (%d)' processed: %d.\n",
2810 command_types[packet->info.c_struct.cmd->host_command_reg],
2811 packet->info.c_struct.cmd->host_command_reg,
2812 packet->info.c_struct.cmd->cmd_status_reg);
2813#endif
2814
2815 list_add_tail(element, &priv->msg_free_list);
2816 INC_STAT(&priv->msg_free_stat);
2817 break;
2818 }
2819
2820 /* advance oldest used TBD pointer to start of next entry */
2821 txq->oldest = (e + 1) % txq->entries;
2822 /* increase available TBDs number */
2823 txq->available += descriptors_used;
2824 SET_STAT(&priv->txq_stat, txq->available);
2825
2826 IPW_DEBUG_TX("packet latency (send to process) %ld jiffies\n",
2827 jiffies - packet->jiffy_start);
2828
2829 return (!list_empty(&priv->fw_pend_list));
2830}
2831
2832
2833static inline void __ipw2100_tx_complete(struct ipw2100_priv *priv)
2834{
2835 int i = 0;
2836
2837 while (__ipw2100_tx_process(priv) && i < 200) i++;
2838
2839 if (i == 200) {
2840 IPW_DEBUG_WARNING(
2841 "%s: Driver is running slow (%d iters).\n",
2842 priv->net_dev->name, i);
2843 }
2844}
2845
2846
2847static void X__ipw2100_tx_send_commands(struct ipw2100_priv *priv)
2848{
2849 struct list_head *element;
2850 struct ipw2100_tx_packet *packet;
2851 struct ipw2100_bd_queue *txq = &priv->tx_queue;
2852 struct ipw2100_bd *tbd;
2853 int next = txq->next;
2854
2855 while (!list_empty(&priv->msg_pend_list)) {
2856 /* if there isn't enough space in TBD queue, then
2857 * don't stuff a new one in.
2858 * NOTE: 3 are needed as a command will take one,
2859 * and there is a minimum of 2 that must be
2860 * maintained between the r and w indexes
2861 */
2862 if (txq->available <= 3) {
2863 IPW_DEBUG_TX("no room in tx_queue\n");
2864 break;
2865 }
2866
2867 element = priv->msg_pend_list.next;
2868 list_del(element);
2869 DEC_STAT(&priv->msg_pend_stat);
2870
2871 packet = list_entry(element,
2872 struct ipw2100_tx_packet, list);
2873
2874 IPW_DEBUG_TX("using TBD at virt=%p, phys=%p\n",
2875 &txq->drv[txq->next],
2876 (void*)(txq->nic + txq->next *
2877 sizeof(struct ipw2100_bd)));
2878
2879 packet->index = txq->next;
2880
2881 tbd = &txq->drv[txq->next];
2882
2883 /* initialize TBD */
2884 tbd->host_addr = packet->info.c_struct.cmd_phys;
2885 tbd->buf_length = sizeof(struct ipw2100_cmd_header);
2886 /* not marking number of fragments causes problems
2887 * with f/w debug version */
2888 tbd->num_fragments = 1;
2889 tbd->status.info.field =
2890 IPW_BD_STATUS_TX_FRAME_COMMAND |
2891 IPW_BD_STATUS_TX_INTERRUPT_ENABLE;
2892
2893 /* update TBD queue counters */
2894 txq->next++;
2895 txq->next %= txq->entries;
2896 txq->available--;
2897 DEC_STAT(&priv->txq_stat);
2898
2899 list_add_tail(element, &priv->fw_pend_list);
2900 INC_STAT(&priv->fw_pend_stat);
2901 }
2902
2903 if (txq->next != next) {
2904 /* kick off the DMA by notifying firmware the
2905 * write index has moved; make sure TBD stores are sync'd */
2906 wmb();
2907 write_register(priv->net_dev,
2908 IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX,
2909 txq->next);
2910 }
2911}
2912
2913
2914/*
2915 * X__ipw2100_tx_send_data
2916 *
2917 */
2918static void X__ipw2100_tx_send_data(struct ipw2100_priv *priv)
2919{
2920 struct list_head *element;
2921 struct ipw2100_tx_packet *packet;
2922 struct ipw2100_bd_queue *txq = &priv->tx_queue;
2923 struct ipw2100_bd *tbd;
2924 int next = txq->next;
2925 int i = 0;
2926 struct ipw2100_data_header *ipw_hdr;
2927 struct ieee80211_hdr *hdr;
2928
2929 while (!list_empty(&priv->tx_pend_list)) {
2930 /* if there isn't enough space in TBD queue, then
2931 * don't stuff a new one in.
2932 * NOTE: 4 are needed as a data will take two,
2933 * and there is a minimum of 2 that must be
2934 * maintained between the r and w indexes
2935 */
2936 element = priv->tx_pend_list.next;
2937 packet = list_entry(element, struct ipw2100_tx_packet, list);
2938
2939 if (unlikely(1 + packet->info.d_struct.txb->nr_frags >
2940 IPW_MAX_BDS)) {
2941 /* TODO: Support merging buffers if more than
2942 * IPW_MAX_BDS are used */
2943 IPW_DEBUG_INFO(
2944 "%s: Maximum BD theshold exceeded. "
2945 "Increase fragmentation level.\n",
2946 priv->net_dev->name);
2947 }
2948
2949 if (txq->available <= 3 +
2950 packet->info.d_struct.txb->nr_frags) {
2951 IPW_DEBUG_TX("no room in tx_queue\n");
2952 break;
2953 }
2954
2955 list_del(element);
2956 DEC_STAT(&priv->tx_pend_stat);
2957
2958 tbd = &txq->drv[txq->next];
2959
2960 packet->index = txq->next;
2961
2962 ipw_hdr = packet->info.d_struct.data;
2963 hdr = (struct ieee80211_hdr *)packet->info.d_struct.txb->
2964 fragments[0]->data;
2965
2966 if (priv->ieee->iw_mode == IW_MODE_INFRA) {
2967 /* To DS: Addr1 = BSSID, Addr2 = SA,
2968 Addr3 = DA */
2969 memcpy(ipw_hdr->src_addr, hdr->addr2, ETH_ALEN);
2970 memcpy(ipw_hdr->dst_addr, hdr->addr3, ETH_ALEN);
2971 } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
2972 /* not From/To DS: Addr1 = DA, Addr2 = SA,
2973 Addr3 = BSSID */
2974 memcpy(ipw_hdr->src_addr, hdr->addr2, ETH_ALEN);
2975 memcpy(ipw_hdr->dst_addr, hdr->addr1, ETH_ALEN);
2976 }
2977
2978 ipw_hdr->host_command_reg = SEND;
2979 ipw_hdr->host_command_reg1 = 0;
2980
2981 /* For now we only support host based encryption */
2982 ipw_hdr->needs_encryption = 0;
2983 ipw_hdr->encrypted = packet->info.d_struct.txb->encrypted;
2984 if (packet->info.d_struct.txb->nr_frags > 1)
2985 ipw_hdr->fragment_size =
2986 packet->info.d_struct.txb->frag_size - IEEE80211_3ADDR_LEN;
2987 else
2988 ipw_hdr->fragment_size = 0;
2989
2990 tbd->host_addr = packet->info.d_struct.data_phys;
2991 tbd->buf_length = sizeof(struct ipw2100_data_header);
2992 tbd->num_fragments = 1 + packet->info.d_struct.txb->nr_frags;
2993 tbd->status.info.field =
2994 IPW_BD_STATUS_TX_FRAME_802_3 |
2995 IPW_BD_STATUS_TX_FRAME_NOT_LAST_FRAGMENT;
2996 txq->next++;
2997 txq->next %= txq->entries;
2998
2999 IPW_DEBUG_TX(
3000 "data header tbd TX%d P=%08x L=%d\n",
3001 packet->index, tbd->host_addr,
3002 tbd->buf_length);
3003#ifdef CONFIG_IPW_DEBUG
3004 if (packet->info.d_struct.txb->nr_frags > 1)
3005 IPW_DEBUG_FRAG("fragment Tx: %d frames\n",
3006 packet->info.d_struct.txb->nr_frags);
3007#endif
3008
3009 for (i = 0; i < packet->info.d_struct.txb->nr_frags; i++) {
3010 tbd = &txq->drv[txq->next];
3011 if (i == packet->info.d_struct.txb->nr_frags - 1)
3012 tbd->status.info.field =
3013 IPW_BD_STATUS_TX_FRAME_802_3 |
3014 IPW_BD_STATUS_TX_INTERRUPT_ENABLE;
3015 else
3016 tbd->status.info.field =
3017 IPW_BD_STATUS_TX_FRAME_802_3 |
3018 IPW_BD_STATUS_TX_FRAME_NOT_LAST_FRAGMENT;
3019
3020 tbd->buf_length = packet->info.d_struct.txb->
3021 fragments[i]->len - IEEE80211_3ADDR_LEN;
3022
3023 tbd->host_addr = pci_map_single(
3024 priv->pci_dev,
3025 packet->info.d_struct.txb->fragments[i]->data +
3026 IEEE80211_3ADDR_LEN,
3027 tbd->buf_length,
3028 PCI_DMA_TODEVICE);
3029
3030 IPW_DEBUG_TX(
3031 "data frag tbd TX%d P=%08x L=%d\n",
3032 txq->next, tbd->host_addr, tbd->buf_length);
3033
3034 pci_dma_sync_single_for_device(
3035 priv->pci_dev, tbd->host_addr,
3036 tbd->buf_length,
3037 PCI_DMA_TODEVICE);
3038
3039 txq->next++;
3040 txq->next %= txq->entries;
3041 }
3042
3043 txq->available -= 1 + packet->info.d_struct.txb->nr_frags;
3044 SET_STAT(&priv->txq_stat, txq->available);
3045
3046 list_add_tail(element, &priv->fw_pend_list);
3047 INC_STAT(&priv->fw_pend_stat);
3048 }
3049
3050 if (txq->next != next) {
3051 /* kick off the DMA by notifying firmware the
3052 * write index has moved; make sure TBD stores are sync'd */
3053 write_register(priv->net_dev,
3054 IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX,
3055 txq->next);
3056 }
3057 return;
3058}
3059
3060static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
3061{
3062 struct net_device *dev = priv->net_dev;
3063 unsigned long flags;
3064 u32 inta, tmp;
3065
3066 spin_lock_irqsave(&priv->low_lock, flags);
3067 ipw2100_disable_interrupts(priv);
3068
3069 read_register(dev, IPW_REG_INTA, &inta);
3070
3071 IPW_DEBUG_ISR("enter - INTA: 0x%08lX\n",
3072 (unsigned long)inta & IPW_INTERRUPT_MASK);
3073
3074 priv->in_isr++;
3075 priv->interrupts++;
3076
3077 /* We do not loop and keep polling for more interrupts as this
3078 * is frowned upon and doesn't play nicely with other potentially
3079 * chained IRQs */
3080 IPW_DEBUG_ISR("INTA: 0x%08lX\n",
3081 (unsigned long)inta & IPW_INTERRUPT_MASK);
3082
3083 if (inta & IPW2100_INTA_FATAL_ERROR) {
3084 IPW_DEBUG_WARNING(DRV_NAME
3085 ": Fatal interrupt. Scheduling firmware restart.\n");
3086 priv->inta_other++;
3087 write_register(
3088 dev, IPW_REG_INTA,
3089 IPW2100_INTA_FATAL_ERROR);
3090
3091 read_nic_dword(dev, IPW_NIC_FATAL_ERROR, &priv->fatal_error);
3092 IPW_DEBUG_INFO("%s: Fatal error value: 0x%08X\n",
3093 priv->net_dev->name, priv->fatal_error);
3094
3095 read_nic_dword(dev, IPW_ERROR_ADDR(priv->fatal_error), &tmp);
3096 IPW_DEBUG_INFO("%s: Fatal error address value: 0x%08X\n",
3097 priv->net_dev->name, tmp);
3098
3099 /* Wake up any sleeping jobs */
3100 schedule_reset(priv);
3101 }
3102
3103 if (inta & IPW2100_INTA_PARITY_ERROR) {
3104 IPW_DEBUG_ERROR("***** PARITY ERROR INTERRUPT !!!! \n");
3105 priv->inta_other++;
3106 write_register(
3107 dev, IPW_REG_INTA,
3108 IPW2100_INTA_PARITY_ERROR);
3109 }
3110
3111 if (inta & IPW2100_INTA_RX_TRANSFER) {
3112 IPW_DEBUG_ISR("RX interrupt\n");
3113
3114 priv->rx_interrupts++;
3115
3116 write_register(
3117 dev, IPW_REG_INTA,
3118 IPW2100_INTA_RX_TRANSFER);
3119
3120 __ipw2100_rx_process(priv);
3121 __ipw2100_tx_complete(priv);
3122 }
3123
3124 if (inta & IPW2100_INTA_TX_TRANSFER) {
3125 IPW_DEBUG_ISR("TX interrupt\n");
3126
3127 priv->tx_interrupts++;
3128
3129 write_register(dev, IPW_REG_INTA,
3130 IPW2100_INTA_TX_TRANSFER);
3131
3132 __ipw2100_tx_complete(priv);
3133 X__ipw2100_tx_send_commands(priv);
3134 X__ipw2100_tx_send_data(priv);
3135 }
3136
3137 if (inta & IPW2100_INTA_TX_COMPLETE) {
3138 IPW_DEBUG_ISR("TX complete\n");
3139 priv->inta_other++;
3140 write_register(
3141 dev, IPW_REG_INTA,
3142 IPW2100_INTA_TX_COMPLETE);
3143
3144 __ipw2100_tx_complete(priv);
3145 }
3146
3147 if (inta & IPW2100_INTA_EVENT_INTERRUPT) {
3148 /* ipw2100_handle_event(dev); */
3149 priv->inta_other++;
3150 write_register(
3151 dev, IPW_REG_INTA,
3152 IPW2100_INTA_EVENT_INTERRUPT);
3153 }
3154
3155 if (inta & IPW2100_INTA_FW_INIT_DONE) {
3156 IPW_DEBUG_ISR("FW init done interrupt\n");
3157 priv->inta_other++;
3158
3159 read_register(dev, IPW_REG_INTA, &tmp);
3160 if (tmp & (IPW2100_INTA_FATAL_ERROR |
3161 IPW2100_INTA_PARITY_ERROR)) {
3162 write_register(
3163 dev, IPW_REG_INTA,
3164 IPW2100_INTA_FATAL_ERROR |
3165 IPW2100_INTA_PARITY_ERROR);
3166 }
3167
3168 write_register(dev, IPW_REG_INTA,
3169 IPW2100_INTA_FW_INIT_DONE);
3170 }
3171
3172 if (inta & IPW2100_INTA_STATUS_CHANGE) {
3173 IPW_DEBUG_ISR("Status change interrupt\n");
3174 priv->inta_other++;
3175 write_register(
3176 dev, IPW_REG_INTA,
3177 IPW2100_INTA_STATUS_CHANGE);
3178 }
3179
3180 if (inta & IPW2100_INTA_SLAVE_MODE_HOST_COMMAND_DONE) {
3181 IPW_DEBUG_ISR("slave host mode interrupt\n");
3182 priv->inta_other++;
3183 write_register(
3184 dev, IPW_REG_INTA,
3185 IPW2100_INTA_SLAVE_MODE_HOST_COMMAND_DONE);
3186 }
3187
3188 priv->in_isr--;
3189 ipw2100_enable_interrupts(priv);
3190
3191 spin_unlock_irqrestore(&priv->low_lock, flags);
3192
3193 IPW_DEBUG_ISR("exit\n");
3194}
3195
3196
3197static irqreturn_t ipw2100_interrupt(int irq, void *data,
3198 struct pt_regs *regs)
3199{
3200 struct ipw2100_priv *priv = data;
3201 u32 inta, inta_mask;
3202
3203 if (!data)
3204 return IRQ_NONE;
3205
3206 spin_lock(&priv->low_lock);
3207
3208 /* We check to see if we should be ignoring interrupts before
3209 * we touch the hardware. During ucode load if we try and handle
3210 * an interrupt we can cause keyboard problems as well as cause
3211 * the ucode to fail to initialize */
3212 if (!(priv->status & STATUS_INT_ENABLED)) {
3213 /* Shared IRQ */
3214 goto none;
3215 }
3216
3217 read_register(priv->net_dev, IPW_REG_INTA_MASK, &inta_mask);
3218 read_register(priv->net_dev, IPW_REG_INTA, &inta);
3219
3220 if (inta == 0xFFFFFFFF) {
3221 /* Hardware disappeared */
3222 IPW_DEBUG_WARNING("IRQ INTA == 0xFFFFFFFF\n");
3223 goto none;
3224 }
3225
3226 inta &= IPW_INTERRUPT_MASK;
3227
3228 if (!(inta & inta_mask)) {
3229 /* Shared interrupt */
3230 goto none;
3231 }
3232
3233 /* We disable the hardware interrupt here just to prevent unneeded
3234 * calls to be made. We disable this again within the actual
3235 * work tasklet, so if another part of the code re-enables the
3236 * interrupt, that is fine */
3237 ipw2100_disable_interrupts(priv);
3238
3239 tasklet_schedule(&priv->irq_tasklet);
3240 spin_unlock(&priv->low_lock);
3241
3242 return IRQ_HANDLED;
3243 none:
3244 spin_unlock(&priv->low_lock);
3245 return IRQ_NONE;
3246}
3247
3248static int ipw2100_tx(struct ieee80211_txb *txb, struct net_device *dev)
3249{
3250 struct ipw2100_priv *priv = ieee80211_priv(dev);
3251 struct list_head *element;
3252 struct ipw2100_tx_packet *packet;
3253 unsigned long flags;
3254
3255 spin_lock_irqsave(&priv->low_lock, flags);
3256
3257 if (!(priv->status & STATUS_ASSOCIATED)) {
3258 IPW_DEBUG_INFO("Can not transmit when not connected.\n");
3259 priv->ieee->stats.tx_carrier_errors++;
3260 netif_stop_queue(dev);
3261 goto fail_unlock;
3262 }
3263
3264 if (list_empty(&priv->tx_free_list))
3265 goto fail_unlock;
3266
3267 element = priv->tx_free_list.next;
3268 packet = list_entry(element, struct ipw2100_tx_packet, list);
3269
3270 packet->info.d_struct.txb = txb;
3271
3272 IPW_DEBUG_TX("Sending fragment (%d bytes):\n",
3273 txb->fragments[0]->len);
3274 printk_buf(IPW_DL_TX, txb->fragments[0]->data,
3275 txb->fragments[0]->len);
3276
3277 packet->jiffy_start = jiffies;
3278
3279 list_del(element);
3280 DEC_STAT(&priv->tx_free_stat);
3281
3282 list_add_tail(element, &priv->tx_pend_list);
3283 INC_STAT(&priv->tx_pend_stat);
3284
3285 X__ipw2100_tx_send_data(priv);
3286
3287 spin_unlock_irqrestore(&priv->low_lock, flags);
3288 return 0;
3289
3290 fail_unlock:
3291 netif_stop_queue(dev);
3292 spin_unlock_irqrestore(&priv->low_lock, flags);
3293 return 1;
3294}
3295
3296
3297static int ipw2100_msg_allocate(struct ipw2100_priv *priv)
3298{
3299 int i, j, err = -EINVAL;
3300 void *v;
3301 dma_addr_t p;
3302
3303 priv->msg_buffers = (struct ipw2100_tx_packet *)kmalloc(
3304 IPW_COMMAND_POOL_SIZE * sizeof(struct ipw2100_tx_packet),
3305 GFP_KERNEL);
3306 if (!priv->msg_buffers) {
3307 IPW_DEBUG_ERROR("%s: PCI alloc failed for msg "
3308 "buffers.\n", priv->net_dev->name);
3309 return -ENOMEM;
3310 }
3311
3312 for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) {
3313 v = pci_alloc_consistent(
3314 priv->pci_dev,
3315 sizeof(struct ipw2100_cmd_header),
3316 &p);
3317 if (!v) {
3318 IPW_DEBUG_ERROR(
3319 "%s: PCI alloc failed for msg "
3320 "buffers.\n",
3321 priv->net_dev->name);
3322 err = -ENOMEM;
3323 break;
3324 }
3325
3326 memset(v, 0, sizeof(struct ipw2100_cmd_header));
3327
3328 priv->msg_buffers[i].type = COMMAND;
3329 priv->msg_buffers[i].info.c_struct.cmd =
3330 (struct ipw2100_cmd_header*)v;
3331 priv->msg_buffers[i].info.c_struct.cmd_phys = p;
3332 }
3333
3334 if (i == IPW_COMMAND_POOL_SIZE)
3335 return 0;
3336
3337 for (j = 0; j < i; j++) {
3338 pci_free_consistent(
3339 priv->pci_dev,
3340 sizeof(struct ipw2100_cmd_header),
3341 priv->msg_buffers[j].info.c_struct.cmd,
3342 priv->msg_buffers[j].info.c_struct.cmd_phys);
3343 }
3344
3345 kfree(priv->msg_buffers);
3346 priv->msg_buffers = NULL;
3347
3348 return err;
3349}
3350
3351static int ipw2100_msg_initialize(struct ipw2100_priv *priv)
3352{
3353 int i;
3354
3355 INIT_LIST_HEAD(&priv->msg_free_list);
3356 INIT_LIST_HEAD(&priv->msg_pend_list);
3357
3358 for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++)
3359 list_add_tail(&priv->msg_buffers[i].list, &priv->msg_free_list);
3360 SET_STAT(&priv->msg_free_stat, i);
3361
3362 return 0;
3363}
3364
3365static void ipw2100_msg_free(struct ipw2100_priv *priv)
3366{
3367 int i;
3368
3369 if (!priv->msg_buffers)
3370 return;
3371
3372 for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) {
3373 pci_free_consistent(priv->pci_dev,
3374 sizeof(struct ipw2100_cmd_header),
3375 priv->msg_buffers[i].info.c_struct.cmd,
3376 priv->msg_buffers[i].info.c_struct.cmd_phys);
3377 }
3378
3379 kfree(priv->msg_buffers);
3380 priv->msg_buffers = NULL;
3381}
3382
3383static ssize_t show_pci(struct device *d, struct device_attribute *attr,
3384 char *buf)
3385{
3386 struct pci_dev *pci_dev = container_of(d, struct pci_dev, dev);
3387 char *out = buf;
3388 int i, j;
3389 u32 val;
3390
3391 for (i = 0; i < 16; i++) {
3392 out += sprintf(out, "[%08X] ", i * 16);
3393 for (j = 0; j < 16; j += 4) {
3394 pci_read_config_dword(pci_dev, i * 16 + j, &val);
3395 out += sprintf(out, "%08X ", val);
3396 }
3397 out += sprintf(out, "\n");
3398 }
3399
3400 return out - buf;
3401}
3402static DEVICE_ATTR(pci, S_IRUGO, show_pci, NULL);
3403
3404static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
3405 char *buf)
3406{
3407 struct ipw2100_priv *p = d->driver_data;
3408 return sprintf(buf, "0x%08x\n", (int)p->config);
3409}
3410static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
3411
3412static ssize_t show_status(struct device *d, struct device_attribute *attr,
3413 char *buf)
3414{
3415 struct ipw2100_priv *p = d->driver_data;
3416 return sprintf(buf, "0x%08x\n", (int)p->status);
3417}
3418static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
3419
3420static ssize_t show_capability(struct device *d, struct device_attribute *attr,
3421 char *buf)
3422{
3423 struct ipw2100_priv *p = d->driver_data;
3424 return sprintf(buf, "0x%08x\n", (int)p->capability);
3425}
3426static DEVICE_ATTR(capability, S_IRUGO, show_capability, NULL);
3427
3428
3429#define IPW2100_REG(x) { IPW_ ##x, #x }
3430const struct {
3431 u32 addr;
3432 const char *name;
3433} hw_data[] = {
3434 IPW2100_REG(REG_GP_CNTRL),
3435 IPW2100_REG(REG_GPIO),
3436 IPW2100_REG(REG_INTA),
3437 IPW2100_REG(REG_INTA_MASK),
3438 IPW2100_REG(REG_RESET_REG),
3439};
3440#define IPW2100_NIC(x, s) { x, #x, s }
3441const struct {
3442 u32 addr;
3443 const char *name;
3444 size_t size;
3445} nic_data[] = {
3446 IPW2100_NIC(IPW2100_CONTROL_REG, 2),
3447 IPW2100_NIC(0x210014, 1),
3448 IPW2100_NIC(0x210000, 1),
3449};
3450#define IPW2100_ORD(x, d) { IPW_ORD_ ##x, #x, d }
3451const struct {
3452 u8 index;
3453 const char *name;
3454 const char *desc;
3455} ord_data[] = {
3456 IPW2100_ORD(STAT_TX_HOST_REQUESTS, "requested Host Tx's (MSDU)"),
3457 IPW2100_ORD(STAT_TX_HOST_COMPLETE, "successful Host Tx's (MSDU)"),
3458 IPW2100_ORD(STAT_TX_DIR_DATA, "successful Directed Tx's (MSDU)"),
3459 IPW2100_ORD(STAT_TX_DIR_DATA1, "successful Directed Tx's (MSDU) @ 1MB"),
3460 IPW2100_ORD(STAT_TX_DIR_DATA2, "successful Directed Tx's (MSDU) @ 2MB"),
3461 IPW2100_ORD(STAT_TX_DIR_DATA5_5, "successful Directed Tx's (MSDU) @ 5_5MB"),
3462 IPW2100_ORD(STAT_TX_DIR_DATA11, "successful Directed Tx's (MSDU) @ 11MB"),
3463 IPW2100_ORD(STAT_TX_NODIR_DATA1, "successful Non_Directed Tx's (MSDU) @ 1MB"),
3464 IPW2100_ORD(STAT_TX_NODIR_DATA2, "successful Non_Directed Tx's (MSDU) @ 2MB"),
3465 IPW2100_ORD(STAT_TX_NODIR_DATA5_5, "successful Non_Directed Tx's (MSDU) @ 5.5MB"),
3466 IPW2100_ORD(STAT_TX_NODIR_DATA11, "successful Non_Directed Tx's (MSDU) @ 11MB"),
3467 IPW2100_ORD(STAT_NULL_DATA, "successful NULL data Tx's"),
3468 IPW2100_ORD(STAT_TX_RTS, "successful Tx RTS"),
3469 IPW2100_ORD(STAT_TX_CTS, "successful Tx CTS"),
3470 IPW2100_ORD(STAT_TX_ACK, "successful Tx ACK"),
3471 IPW2100_ORD(STAT_TX_ASSN, "successful Association Tx's"),
3472 IPW2100_ORD(STAT_TX_ASSN_RESP, "successful Association response Tx's"),
3473 IPW2100_ORD(STAT_TX_REASSN, "successful Reassociation Tx's"),
3474 IPW2100_ORD(STAT_TX_REASSN_RESP, "successful Reassociation response Tx's"),
3475 IPW2100_ORD(STAT_TX_PROBE, "probes successfully transmitted"),
3476 IPW2100_ORD(STAT_TX_PROBE_RESP, "probe responses successfully transmitted"),
3477 IPW2100_ORD(STAT_TX_BEACON, "tx beacon"),
3478 IPW2100_ORD(STAT_TX_ATIM, "Tx ATIM"),
3479 IPW2100_ORD(STAT_TX_DISASSN, "successful Disassociation TX"),
3480 IPW2100_ORD(STAT_TX_AUTH, "successful Authentication Tx"),
3481 IPW2100_ORD(STAT_TX_DEAUTH, "successful Deauthentication TX"),
3482 IPW2100_ORD(STAT_TX_TOTAL_BYTES, "Total successful Tx data bytes"),
3483 IPW2100_ORD(STAT_TX_RETRIES, "Tx retries"),
3484 IPW2100_ORD(STAT_TX_RETRY1, "Tx retries at 1MBPS"),
3485 IPW2100_ORD(STAT_TX_RETRY2, "Tx retries at 2MBPS"),
3486 IPW2100_ORD(STAT_TX_RETRY5_5, "Tx retries at 5.5MBPS"),
3487 IPW2100_ORD(STAT_TX_RETRY11, "Tx retries at 11MBPS"),
3488 IPW2100_ORD(STAT_TX_FAILURES, "Tx Failures"),
3489 IPW2100_ORD(STAT_TX_MAX_TRIES_IN_HOP,"times max tries in a hop failed"),
3490 IPW2100_ORD(STAT_TX_DISASSN_FAIL, "times disassociation failed"),
3491 IPW2100_ORD(STAT_TX_ERR_CTS, "missed/bad CTS frames"),
3492 IPW2100_ORD(STAT_TX_ERR_ACK, "tx err due to acks"),
3493 IPW2100_ORD(STAT_RX_HOST, "packets passed to host"),
3494 IPW2100_ORD(STAT_RX_DIR_DATA, "directed packets"),
3495 IPW2100_ORD(STAT_RX_DIR_DATA1, "directed packets at 1MB"),
3496 IPW2100_ORD(STAT_RX_DIR_DATA2, "directed packets at 2MB"),
3497 IPW2100_ORD(STAT_RX_DIR_DATA5_5, "directed packets at 5.5MB"),
3498 IPW2100_ORD(STAT_RX_DIR_DATA11, "directed packets at 11MB"),
3499 IPW2100_ORD(STAT_RX_NODIR_DATA,"nondirected packets"),
3500 IPW2100_ORD(STAT_RX_NODIR_DATA1, "nondirected packets at 1MB"),
3501 IPW2100_ORD(STAT_RX_NODIR_DATA2, "nondirected packets at 2MB"),
3502 IPW2100_ORD(STAT_RX_NODIR_DATA5_5, "nondirected packets at 5.5MB"),
3503 IPW2100_ORD(STAT_RX_NODIR_DATA11, "nondirected packets at 11MB"),
3504 IPW2100_ORD(STAT_RX_NULL_DATA, "null data rx's"),
3505 IPW2100_ORD(STAT_RX_RTS, "Rx RTS"),
3506 IPW2100_ORD(STAT_RX_CTS, "Rx CTS"),
3507 IPW2100_ORD(STAT_RX_ACK, "Rx ACK"),
3508 IPW2100_ORD(STAT_RX_CFEND, "Rx CF End"),
3509 IPW2100_ORD(STAT_RX_CFEND_ACK, "Rx CF End + CF Ack"),
3510 IPW2100_ORD(STAT_RX_ASSN, "Association Rx's"),
3511 IPW2100_ORD(STAT_RX_ASSN_RESP, "Association response Rx's"),
3512 IPW2100_ORD(STAT_RX_REASSN, "Reassociation Rx's"),
3513 IPW2100_ORD(STAT_RX_REASSN_RESP, "Reassociation response Rx's"),
3514 IPW2100_ORD(STAT_RX_PROBE, "probe Rx's"),
3515 IPW2100_ORD(STAT_RX_PROBE_RESP, "probe response Rx's"),
3516 IPW2100_ORD(STAT_RX_BEACON, "Rx beacon"),
3517 IPW2100_ORD(STAT_RX_ATIM, "Rx ATIM"),
3518 IPW2100_ORD(STAT_RX_DISASSN, "disassociation Rx"),
3519 IPW2100_ORD(STAT_RX_AUTH, "authentication Rx"),
3520 IPW2100_ORD(STAT_RX_DEAUTH, "deauthentication Rx"),
3521 IPW2100_ORD(STAT_RX_TOTAL_BYTES,"Total rx data bytes received"),
3522 IPW2100_ORD(STAT_RX_ERR_CRC, "packets with Rx CRC error"),
3523 IPW2100_ORD(STAT_RX_ERR_CRC1, "Rx CRC errors at 1MB"),
3524 IPW2100_ORD(STAT_RX_ERR_CRC2, "Rx CRC errors at 2MB"),
3525 IPW2100_ORD(STAT_RX_ERR_CRC5_5, "Rx CRC errors at 5.5MB"),
3526 IPW2100_ORD(STAT_RX_ERR_CRC11, "Rx CRC errors at 11MB"),
3527 IPW2100_ORD(STAT_RX_DUPLICATE1, "duplicate rx packets at 1MB"),
3528 IPW2100_ORD(STAT_RX_DUPLICATE2, "duplicate rx packets at 2MB"),
3529 IPW2100_ORD(STAT_RX_DUPLICATE5_5, "duplicate rx packets at 5.5MB"),
3530 IPW2100_ORD(STAT_RX_DUPLICATE11, "duplicate rx packets at 11MB"),
3531 IPW2100_ORD(STAT_RX_DUPLICATE, "duplicate rx packets"),
3532 IPW2100_ORD(PERS_DB_LOCK, "locking fw permanent db"),
3533 IPW2100_ORD(PERS_DB_SIZE, "size of fw permanent db"),
3534 IPW2100_ORD(PERS_DB_ADDR, "address of fw permanent db"),
3535 IPW2100_ORD(STAT_RX_INVALID_PROTOCOL, "rx frames with invalid protocol"),
3536 IPW2100_ORD(SYS_BOOT_TIME, "Boot time"),
3537 IPW2100_ORD(STAT_RX_NO_BUFFER, "rx frames rejected due to no buffer"),
3538 IPW2100_ORD(STAT_RX_MISSING_FRAG, "rx frames dropped due to missing fragment"),
3539 IPW2100_ORD(STAT_RX_ORPHAN_FRAG, "rx frames dropped due to non-sequential fragment"),
3540 IPW2100_ORD(STAT_RX_ORPHAN_FRAME, "rx frames dropped due to unmatched 1st frame"),
3541 IPW2100_ORD(STAT_RX_FRAG_AGEOUT, "rx frames dropped due to uncompleted frame"),
3542 IPW2100_ORD(STAT_RX_ICV_ERRORS, "ICV errors during decryption"),
3543 IPW2100_ORD(STAT_PSP_SUSPENSION,"times adapter suspended"),
3544 IPW2100_ORD(STAT_PSP_BCN_TIMEOUT, "beacon timeout"),
3545 IPW2100_ORD(STAT_PSP_POLL_TIMEOUT, "poll response timeouts"),
3546 IPW2100_ORD(STAT_PSP_NONDIR_TIMEOUT, "timeouts waiting for last {broad,multi}cast pkt"),
3547 IPW2100_ORD(STAT_PSP_RX_DTIMS, "PSP DTIMs received"),
3548 IPW2100_ORD(STAT_PSP_RX_TIMS, "PSP TIMs received"),
3549 IPW2100_ORD(STAT_PSP_STATION_ID,"PSP Station ID"),
3550 IPW2100_ORD(LAST_ASSN_TIME, "RTC time of last association"),
3551 IPW2100_ORD(STAT_PERCENT_MISSED_BCNS,"current calculation of % missed beacons"),
3552 IPW2100_ORD(STAT_PERCENT_RETRIES,"current calculation of % missed tx retries"),
3553 IPW2100_ORD(ASSOCIATED_AP_PTR, "0 if not associated, else pointer to AP table entry"),
3554 IPW2100_ORD(AVAILABLE_AP_CNT, "AP's decsribed in the AP table"),
3555 IPW2100_ORD(AP_LIST_PTR, "Ptr to list of available APs"),
3556 IPW2100_ORD(STAT_AP_ASSNS, "associations"),
3557 IPW2100_ORD(STAT_ASSN_FAIL, "association failures"),
3558 IPW2100_ORD(STAT_ASSN_RESP_FAIL,"failures due to response fail"),
3559 IPW2100_ORD(STAT_FULL_SCANS, "full scans"),
3560 IPW2100_ORD(CARD_DISABLED, "Card Disabled"),
3561 IPW2100_ORD(STAT_ROAM_INHIBIT, "times roaming was inhibited due to activity"),
3562 IPW2100_ORD(RSSI_AT_ASSN, "RSSI of associated AP at time of association"),
3563 IPW2100_ORD(STAT_ASSN_CAUSE1, "reassociation: no probe response or TX on hop"),
3564 IPW2100_ORD(STAT_ASSN_CAUSE2, "reassociation: poor tx/rx quality"),
3565 IPW2100_ORD(STAT_ASSN_CAUSE3, "reassociation: tx/rx quality (excessive AP load"),
3566 IPW2100_ORD(STAT_ASSN_CAUSE4, "reassociation: AP RSSI level"),
3567 IPW2100_ORD(STAT_ASSN_CAUSE5, "reassociations due to load leveling"),
3568 IPW2100_ORD(STAT_AUTH_FAIL, "times authentication failed"),
3569 IPW2100_ORD(STAT_AUTH_RESP_FAIL,"times authentication response failed"),
3570 IPW2100_ORD(STATION_TABLE_CNT, "entries in association table"),
3571 IPW2100_ORD(RSSI_AVG_CURR, "Current avg RSSI"),
3572 IPW2100_ORD(POWER_MGMT_MODE, "Power mode - 0=CAM, 1=PSP"),
3573 IPW2100_ORD(COUNTRY_CODE, "IEEE country code as recv'd from beacon"),
3574 IPW2100_ORD(COUNTRY_CHANNELS, "channels suported by country"),
3575 IPW2100_ORD(RESET_CNT, "adapter resets (warm)"),
3576 IPW2100_ORD(BEACON_INTERVAL, "Beacon interval"),
3577 IPW2100_ORD(ANTENNA_DIVERSITY, "TRUE if antenna diversity is disabled"),
3578 IPW2100_ORD(DTIM_PERIOD, "beacon intervals between DTIMs"),
3579 IPW2100_ORD(OUR_FREQ, "current radio freq lower digits - channel ID"),
3580 IPW2100_ORD(RTC_TIME, "current RTC time"),
3581 IPW2100_ORD(PORT_TYPE, "operating mode"),
3582 IPW2100_ORD(CURRENT_TX_RATE, "current tx rate"),
3583 IPW2100_ORD(SUPPORTED_RATES, "supported tx rates"),
3584 IPW2100_ORD(ATIM_WINDOW, "current ATIM Window"),
3585 IPW2100_ORD(BASIC_RATES, "basic tx rates"),
3586 IPW2100_ORD(NIC_HIGHEST_RATE, "NIC highest tx rate"),
3587 IPW2100_ORD(AP_HIGHEST_RATE, "AP highest tx rate"),
3588 IPW2100_ORD(CAPABILITIES, "Management frame capability field"),
3589 IPW2100_ORD(AUTH_TYPE, "Type of authentication"),
3590 IPW2100_ORD(RADIO_TYPE, "Adapter card platform type"),
3591 IPW2100_ORD(RTS_THRESHOLD, "Min packet length for RTS handshaking"),
3592 IPW2100_ORD(INT_MODE, "International mode"),
3593 IPW2100_ORD(FRAGMENTATION_THRESHOLD, "protocol frag threshold"),
3594 IPW2100_ORD(EEPROM_SRAM_DB_BLOCK_START_ADDRESS, "EEPROM offset in SRAM"),
3595 IPW2100_ORD(EEPROM_SRAM_DB_BLOCK_SIZE, "EEPROM size in SRAM"),
3596 IPW2100_ORD(EEPROM_SKU_CAPABILITY, "EEPROM SKU Capability"),
3597 IPW2100_ORD(EEPROM_IBSS_11B_CHANNELS, "EEPROM IBSS 11b channel set"),
3598 IPW2100_ORD(MAC_VERSION, "MAC Version"),
3599 IPW2100_ORD(MAC_REVISION, "MAC Revision"),
3600 IPW2100_ORD(RADIO_VERSION, "Radio Version"),
3601 IPW2100_ORD(NIC_MANF_DATE_TIME, "MANF Date/Time STAMP"),
3602 IPW2100_ORD(UCODE_VERSION, "Ucode Version"),
3603};
3604
3605
3606static ssize_t show_registers(struct device *d, struct device_attribute *attr,
3607 char *buf)
3608{
3609 int i;
3610 struct ipw2100_priv *priv = dev_get_drvdata(d);
3611 struct net_device *dev = priv->net_dev;
3612 char * out = buf;
3613 u32 val = 0;
3614
3615 out += sprintf(out, "%30s [Address ] : Hex\n", "Register");
3616
3617 for (i = 0; i < (sizeof(hw_data) / sizeof(*hw_data)); i++) {
3618 read_register(dev, hw_data[i].addr, &val);
3619 out += sprintf(out, "%30s [%08X] : %08X\n",
3620 hw_data[i].name, hw_data[i].addr, val);
3621 }
3622
3623 return out - buf;
3624}
3625static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL);
3626
3627
3628static ssize_t show_hardware(struct device *d, struct device_attribute *attr,
3629 char *buf)
3630{
3631 struct ipw2100_priv *priv = dev_get_drvdata(d);
3632 struct net_device *dev = priv->net_dev;
3633 char * out = buf;
3634 int i;
3635
3636 out += sprintf(out, "%30s [Address ] : Hex\n", "NIC entry");
3637
3638 for (i = 0; i < (sizeof(nic_data) / sizeof(*nic_data)); i++) {
3639 u8 tmp8;
3640 u16 tmp16;
3641 u32 tmp32;
3642
3643 switch (nic_data[i].size) {
3644 case 1:
3645 read_nic_byte(dev, nic_data[i].addr, &tmp8);
3646 out += sprintf(out, "%30s [%08X] : %02X\n",
3647 nic_data[i].name, nic_data[i].addr,
3648 tmp8);
3649 break;
3650 case 2:
3651 read_nic_word(dev, nic_data[i].addr, &tmp16);
3652 out += sprintf(out, "%30s [%08X] : %04X\n",
3653 nic_data[i].name, nic_data[i].addr,
3654 tmp16);
3655 break;
3656 case 4:
3657 read_nic_dword(dev, nic_data[i].addr, &tmp32);
3658 out += sprintf(out, "%30s [%08X] : %08X\n",
3659 nic_data[i].name, nic_data[i].addr,
3660 tmp32);
3661 break;
3662 }
3663 }
3664 return out - buf;
3665}
3666static DEVICE_ATTR(hardware, S_IRUGO, show_hardware, NULL);
3667
3668
3669static ssize_t show_memory(struct device *d, struct device_attribute *attr,
3670 char *buf)
3671{
3672 struct ipw2100_priv *priv = dev_get_drvdata(d);
3673 struct net_device *dev = priv->net_dev;
3674 static unsigned long loop = 0;
3675 int len = 0;
3676 u32 buffer[4];
3677 int i;
3678 char line[81];
3679
3680 if (loop >= 0x30000)
3681 loop = 0;
3682
3683 /* sysfs provides us PAGE_SIZE buffer */
3684 while (len < PAGE_SIZE - 128 && loop < 0x30000) {
3685
3686 if (priv->snapshot[0]) for (i = 0; i < 4; i++)
3687 buffer[i] = *(u32 *)SNAPSHOT_ADDR(loop + i * 4);
3688 else for (i = 0; i < 4; i++)
3689 read_nic_dword(dev, loop + i * 4, &buffer[i]);
3690
3691 if (priv->dump_raw)
3692 len += sprintf(buf + len,
3693 "%c%c%c%c"
3694 "%c%c%c%c"
3695 "%c%c%c%c"
3696 "%c%c%c%c",
3697 ((u8*)buffer)[0x0],
3698 ((u8*)buffer)[0x1],
3699 ((u8*)buffer)[0x2],
3700 ((u8*)buffer)[0x3],
3701 ((u8*)buffer)[0x4],
3702 ((u8*)buffer)[0x5],
3703 ((u8*)buffer)[0x6],
3704 ((u8*)buffer)[0x7],
3705 ((u8*)buffer)[0x8],
3706 ((u8*)buffer)[0x9],
3707 ((u8*)buffer)[0xa],
3708 ((u8*)buffer)[0xb],
3709 ((u8*)buffer)[0xc],
3710 ((u8*)buffer)[0xd],
3711 ((u8*)buffer)[0xe],
3712 ((u8*)buffer)[0xf]);
3713 else
3714 len += sprintf(buf + len, "%s\n",
3715 snprint_line(line, sizeof(line),
3716 (u8*)buffer, 16, loop));
3717 loop += 16;
3718 }
3719
3720 return len;
3721}
3722
3723static ssize_t store_memory(struct device *d, struct device_attribute *attr,
3724 const char *buf, size_t count)
3725{
3726 struct ipw2100_priv *priv = dev_get_drvdata(d);
3727 struct net_device *dev = priv->net_dev;
3728 const char *p = buf;
3729
3730 if (count < 1)
3731 return count;
3732
3733 if (p[0] == '1' ||
3734 (count >= 2 && tolower(p[0]) == 'o' && tolower(p[1]) == 'n')) {
3735 IPW_DEBUG_INFO("%s: Setting memory dump to RAW mode.\n",
3736 dev->name);
3737 priv->dump_raw = 1;
3738
3739 } else if (p[0] == '0' || (count >= 2 && tolower(p[0]) == 'o' &&
3740 tolower(p[1]) == 'f')) {
3741 IPW_DEBUG_INFO("%s: Setting memory dump to HEX mode.\n",
3742 dev->name);
3743 priv->dump_raw = 0;
3744
3745 } else if (tolower(p[0]) == 'r') {
3746 IPW_DEBUG_INFO("%s: Resetting firmware snapshot.\n",
3747 dev->name);
3748 ipw2100_snapshot_free(priv);
3749
3750 } else
3751 IPW_DEBUG_INFO("%s: Usage: 0|on = HEX, 1|off = RAW, "
3752 "reset = clear memory snapshot\n",
3753 dev->name);
3754
3755 return count;
3756}
3757static DEVICE_ATTR(memory, S_IWUSR|S_IRUGO, show_memory, store_memory);
3758
3759
3760static ssize_t show_ordinals(struct device *d, struct device_attribute *attr,
3761 char *buf)
3762{
3763 struct ipw2100_priv *priv = dev_get_drvdata(d);
3764 u32 val = 0;
3765 int len = 0;
3766 u32 val_len;
3767 static int loop = 0;
3768
3769 if (loop >= sizeof(ord_data) / sizeof(*ord_data))
3770 loop = 0;
3771
3772 /* sysfs provides us PAGE_SIZE buffer */
3773 while (len < PAGE_SIZE - 128 &&
3774 loop < (sizeof(ord_data) / sizeof(*ord_data))) {
3775
3776 val_len = sizeof(u32);
3777
3778 if (ipw2100_get_ordinal(priv, ord_data[loop].index, &val,
3779 &val_len))
3780 len += sprintf(buf + len, "[0x%02X] = ERROR %s\n",
3781 ord_data[loop].index,
3782 ord_data[loop].desc);
3783 else
3784 len += sprintf(buf + len, "[0x%02X] = 0x%08X %s\n",
3785 ord_data[loop].index, val,
3786 ord_data[loop].desc);
3787 loop++;
3788 }
3789
3790 return len;
3791}
3792static DEVICE_ATTR(ordinals, S_IRUGO, show_ordinals, NULL);
3793
3794
3795static ssize_t show_stats(struct device *d, struct device_attribute *attr,
3796 char *buf)
3797{
3798 struct ipw2100_priv *priv = dev_get_drvdata(d);
3799 char * out = buf;
3800
3801 out += sprintf(out, "interrupts: %d {tx: %d, rx: %d, other: %d}\n",
3802 priv->interrupts, priv->tx_interrupts,
3803 priv->rx_interrupts, priv->inta_other);
3804 out += sprintf(out, "firmware resets: %d\n", priv->resets);
3805 out += sprintf(out, "firmware hangs: %d\n", priv->hangs);
3806#ifdef CONFIG_IPW_DEBUG
3807 out += sprintf(out, "packet mismatch image: %s\n",
3808 priv->snapshot[0] ? "YES" : "NO");
3809#endif
3810
3811 return out - buf;
3812}
3813static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL);
3814
3815
3816int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode)
3817{
3818 int err;
3819
3820 if (mode == priv->ieee->iw_mode)
3821 return 0;
3822
3823 err = ipw2100_disable_adapter(priv);
3824 if (err) {
3825 IPW_DEBUG_ERROR("%s: Could not disable adapter %d\n",
3826 priv->net_dev->name, err);
3827 return err;
3828 }
3829
3830 switch (mode) {
3831 case IW_MODE_INFRA:
3832 priv->net_dev->type = ARPHRD_ETHER;
3833 break;
3834 case IW_MODE_ADHOC:
3835 priv->net_dev->type = ARPHRD_ETHER;
3836 break;
3837#ifdef CONFIG_IPW2100_MONITOR
3838 case IW_MODE_MONITOR:
3839 priv->last_mode = priv->ieee->iw_mode;
3840 priv->net_dev->type = ARPHRD_IEEE80211;
3841 break;
3842#endif /* CONFIG_IPW2100_MONITOR */
3843 }
3844
3845 priv->ieee->iw_mode = mode;
3846
3847#ifdef CONFIG_PM
3848 /* Indicate ipw2100_download_firmware download firmware
3849 * from disk instead of memory. */
3850 ipw2100_firmware.version = 0;
3851#endif
3852
3853 printk(KERN_INFO "%s: Reseting on mode change.\n",
3854 priv->net_dev->name);
3855 priv->reset_backoff = 0;
3856 schedule_reset(priv);
3857
3858 return 0;
3859}
3860
3861static ssize_t show_internals(struct device *d, struct device_attribute *attr,
3862 char *buf)
3863{
3864 struct ipw2100_priv *priv = dev_get_drvdata(d);
3865 int len = 0;
3866
3867#define DUMP_VAR(x,y) len += sprintf(buf + len, # x ": %" # y "\n", priv-> x)
3868
3869 if (priv->status & STATUS_ASSOCIATED)
3870 len += sprintf(buf + len, "connected: %lu\n",
3871 get_seconds() - priv->connect_start);
3872 else
3873 len += sprintf(buf + len, "not connected\n");
3874
3875 DUMP_VAR(ieee->crypt[priv->ieee->tx_keyidx], p);
3876 DUMP_VAR(status, 08lx);
3877 DUMP_VAR(config, 08lx);
3878 DUMP_VAR(capability, 08lx);
3879
3880 len += sprintf(buf + len, "last_rtc: %lu\n", (unsigned long)priv->last_rtc);
3881
3882 DUMP_VAR(fatal_error, d);
3883 DUMP_VAR(stop_hang_check, d);
3884 DUMP_VAR(stop_rf_kill, d);
3885 DUMP_VAR(messages_sent, d);
3886
3887 DUMP_VAR(tx_pend_stat.value, d);
3888 DUMP_VAR(tx_pend_stat.hi, d);
3889
3890 DUMP_VAR(tx_free_stat.value, d);
3891 DUMP_VAR(tx_free_stat.lo, d);
3892
3893 DUMP_VAR(msg_free_stat.value, d);
3894 DUMP_VAR(msg_free_stat.lo, d);
3895
3896 DUMP_VAR(msg_pend_stat.value, d);
3897 DUMP_VAR(msg_pend_stat.hi, d);
3898
3899 DUMP_VAR(fw_pend_stat.value, d);
3900 DUMP_VAR(fw_pend_stat.hi, d);
3901
3902 DUMP_VAR(txq_stat.value, d);
3903 DUMP_VAR(txq_stat.lo, d);
3904
3905 DUMP_VAR(ieee->scans, d);
3906 DUMP_VAR(reset_backoff, d);
3907
3908 return len;
3909}
3910static DEVICE_ATTR(internals, S_IRUGO, show_internals, NULL);
3911
3912
3913static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr,
3914 char *buf)
3915{
3916 struct ipw2100_priv *priv = dev_get_drvdata(d);
3917 char essid[IW_ESSID_MAX_SIZE + 1];
3918 u8 bssid[ETH_ALEN];
3919 u32 chan = 0;
3920 char * out = buf;
3921 int length;
3922 int ret;
3923
3924 memset(essid, 0, sizeof(essid));
3925 memset(bssid, 0, sizeof(bssid));
3926
3927 length = IW_ESSID_MAX_SIZE;
3928 ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_SSID, essid, &length);
3929 if (ret)
3930 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
3931 __LINE__);
3932
3933 length = sizeof(bssid);
3934 ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID,
3935 bssid, &length);
3936 if (ret)
3937 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
3938 __LINE__);
3939
3940 length = sizeof(u32);
3941 ret = ipw2100_get_ordinal(priv, IPW_ORD_OUR_FREQ, &chan, &length);
3942 if (ret)
3943 IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
3944 __LINE__);
3945
3946 out += sprintf(out, "ESSID: %s\n", essid);
3947 out += sprintf(out, "BSSID: %02x:%02x:%02x:%02x:%02x:%02x\n",
3948 bssid[0], bssid[1], bssid[2],
3949 bssid[3], bssid[4], bssid[5]);
3950 out += sprintf(out, "Channel: %d\n", chan);
3951
3952 return out - buf;
3953}
3954static DEVICE_ATTR(bssinfo, S_IRUGO, show_bssinfo, NULL);
3955
3956
3957#ifdef CONFIG_IPW_DEBUG
3958static ssize_t show_debug_level(struct device_driver *d, char *buf)
3959{
3960 return sprintf(buf, "0x%08X\n", ipw2100_debug_level);
3961}
3962
3963static ssize_t store_debug_level(struct device_driver *d, const char *buf,
3964 size_t count)
3965{
3966 char *p = (char *)buf;
3967 u32 val;
3968
3969 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
3970 p++;
3971 if (p[0] == 'x' || p[0] == 'X')
3972 p++;
3973 val = simple_strtoul(p, &p, 16);
3974 } else
3975 val = simple_strtoul(p, &p, 10);
3976 if (p == buf)
3977 IPW_DEBUG_INFO(DRV_NAME
3978 ": %s is not in hex or decimal form.\n", buf);
3979 else
3980 ipw2100_debug_level = val;
3981
3982 return strnlen(buf, count);
3983}
3984static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, show_debug_level,
3985 store_debug_level);
3986#endif /* CONFIG_IPW_DEBUG */
3987
3988
3989static ssize_t show_fatal_error(struct device *d,
3990 struct device_attribute *attr, char *buf)
3991{
3992 struct ipw2100_priv *priv = dev_get_drvdata(d);
3993 char *out = buf;
3994 int i;
3995
3996 if (priv->fatal_error)
3997 out += sprintf(out, "0x%08X\n",
3998 priv->fatal_error);
3999 else
4000 out += sprintf(out, "0\n");
4001
4002 for (i = 1; i <= IPW2100_ERROR_QUEUE; i++) {
4003 if (!priv->fatal_errors[(priv->fatal_index - i) %
4004 IPW2100_ERROR_QUEUE])
4005 continue;
4006
4007 out += sprintf(out, "%d. 0x%08X\n", i,
4008 priv->fatal_errors[(priv->fatal_index - i) %
4009 IPW2100_ERROR_QUEUE]);
4010 }
4011
4012 return out - buf;
4013}
4014
4015static ssize_t store_fatal_error(struct device *d,
4016 struct device_attribute *attr, const char *buf, size_t count)
4017{
4018 struct ipw2100_priv *priv = dev_get_drvdata(d);
4019 schedule_reset(priv);
4020 return count;
4021}
4022static DEVICE_ATTR(fatal_error, S_IWUSR|S_IRUGO, show_fatal_error, store_fatal_error);
4023
4024
4025static ssize_t show_scan_age(struct device *d, struct device_attribute *attr,
4026 char *buf)
4027{
4028 struct ipw2100_priv *priv = dev_get_drvdata(d);
4029 return sprintf(buf, "%d\n", priv->ieee->scan_age);
4030}
4031
4032static ssize_t store_scan_age(struct device *d, struct device_attribute *attr,
4033 const char *buf, size_t count)
4034{
4035 struct ipw2100_priv *priv = dev_get_drvdata(d);
4036 struct net_device *dev = priv->net_dev;
4037 char buffer[] = "00000000";
4038 unsigned long len =
4039 (sizeof(buffer) - 1) > count ? count : sizeof(buffer) - 1;
4040 unsigned long val;
4041 char *p = buffer;
4042
4043 IPW_DEBUG_INFO("enter\n");
4044
4045 strncpy(buffer, buf, len);
4046 buffer[len] = 0;
4047
4048 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
4049 p++;
4050 if (p[0] == 'x' || p[0] == 'X')
4051 p++;
4052 val = simple_strtoul(p, &p, 16);
4053 } else
4054 val = simple_strtoul(p, &p, 10);
4055 if (p == buffer) {
4056 IPW_DEBUG_INFO("%s: user supplied invalid value.\n",
4057 dev->name);
4058 } else {
4059 priv->ieee->scan_age = val;
4060 IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age);
4061 }
4062
4063 IPW_DEBUG_INFO("exit\n");
4064 return len;
4065}
4066static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age);
4067
4068
4069static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
4070 char *buf)
4071{
4072 /* 0 - RF kill not enabled
4073 1 - SW based RF kill active (sysfs)
4074 2 - HW based RF kill active
4075 3 - Both HW and SW baed RF kill active */
4076 struct ipw2100_priv *priv = (struct ipw2100_priv *)d->driver_data;
4077 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
4078 (rf_kill_active(priv) ? 0x2 : 0x0);
4079 return sprintf(buf, "%i\n", val);
4080}
4081
4082static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio)
4083{
4084 if ((disable_radio ? 1 : 0) ==
4085 (priv->status & STATUS_RF_KILL_SW ? 1 : 0))
4086 return 0 ;
4087
4088 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
4089 disable_radio ? "OFF" : "ON");
4090
4091 down(&priv->action_sem);
4092
4093 if (disable_radio) {
4094 priv->status |= STATUS_RF_KILL_SW;
4095 ipw2100_down(priv);
4096 } else {
4097 priv->status &= ~STATUS_RF_KILL_SW;
4098 if (rf_kill_active(priv)) {
4099 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
4100 "disabled by HW switch\n");
4101 /* Make sure the RF_KILL check timer is running */
4102 priv->stop_rf_kill = 0;
4103 cancel_delayed_work(&priv->rf_kill);
4104 queue_delayed_work(priv->workqueue, &priv->rf_kill,
4105 HZ);
4106 } else
4107 schedule_reset(priv);
4108 }
4109
4110 up(&priv->action_sem);
4111 return 1;
4112}
4113
4114static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
4115 const char *buf, size_t count)
4116{
4117 struct ipw2100_priv *priv = dev_get_drvdata(d);
4118 ipw_radio_kill_sw(priv, buf[0] == '1');
4119 return count;
4120}
4121static DEVICE_ATTR(rf_kill, S_IWUSR|S_IRUGO, show_rf_kill, store_rf_kill);
4122
4123
4124static struct attribute *ipw2100_sysfs_entries[] = {
4125 &dev_attr_hardware.attr,
4126 &dev_attr_registers.attr,
4127 &dev_attr_ordinals.attr,
4128 &dev_attr_pci.attr,
4129 &dev_attr_stats.attr,
4130 &dev_attr_internals.attr,
4131 &dev_attr_bssinfo.attr,
4132 &dev_attr_memory.attr,
4133 &dev_attr_scan_age.attr,
4134 &dev_attr_fatal_error.attr,
4135 &dev_attr_rf_kill.attr,
4136 &dev_attr_cfg.attr,
4137 &dev_attr_status.attr,
4138 &dev_attr_capability.attr,
4139 NULL,
4140};
4141
4142static struct attribute_group ipw2100_attribute_group = {
4143 .attrs = ipw2100_sysfs_entries,
4144};
4145
4146
4147static int status_queue_allocate(struct ipw2100_priv *priv, int entries)
4148{
4149 struct ipw2100_status_queue *q = &priv->status_queue;
4150
4151 IPW_DEBUG_INFO("enter\n");
4152
4153 q->size = entries * sizeof(struct ipw2100_status);
4154 q->drv = (struct ipw2100_status *)pci_alloc_consistent(
4155 priv->pci_dev, q->size, &q->nic);
4156 if (!q->drv) {
4157 IPW_DEBUG_WARNING(
4158 "Can not allocate status queue.\n");
4159 return -ENOMEM;
4160 }
4161
4162 memset(q->drv, 0, q->size);
4163
4164 IPW_DEBUG_INFO("exit\n");
4165
4166 return 0;
4167}
4168
4169static void status_queue_free(struct ipw2100_priv *priv)
4170{
4171 IPW_DEBUG_INFO("enter\n");
4172
4173 if (priv->status_queue.drv) {
4174 pci_free_consistent(
4175 priv->pci_dev, priv->status_queue.size,
4176 priv->status_queue.drv, priv->status_queue.nic);
4177 priv->status_queue.drv = NULL;
4178 }
4179
4180 IPW_DEBUG_INFO("exit\n");
4181}
4182
4183static int bd_queue_allocate(struct ipw2100_priv *priv,
4184 struct ipw2100_bd_queue *q, int entries)
4185{
4186 IPW_DEBUG_INFO("enter\n");
4187
4188 memset(q, 0, sizeof(struct ipw2100_bd_queue));
4189
4190 q->entries = entries;
4191 q->size = entries * sizeof(struct ipw2100_bd);
4192 q->drv = pci_alloc_consistent(priv->pci_dev, q->size, &q->nic);
4193 if (!q->drv) {
4194 IPW_DEBUG_INFO("can't allocate shared memory for buffer descriptors\n");
4195 return -ENOMEM;
4196 }
4197 memset(q->drv, 0, q->size);
4198
4199 IPW_DEBUG_INFO("exit\n");
4200
4201 return 0;
4202}
4203
4204static void bd_queue_free(struct ipw2100_priv *priv,
4205 struct ipw2100_bd_queue *q)
4206{
4207 IPW_DEBUG_INFO("enter\n");
4208
4209 if (!q)
4210 return;
4211
4212 if (q->drv) {
4213 pci_free_consistent(priv->pci_dev,
4214 q->size, q->drv, q->nic);
4215 q->drv = NULL;
4216 }
4217
4218 IPW_DEBUG_INFO("exit\n");
4219}
4220
4221static void bd_queue_initialize(
4222 struct ipw2100_priv *priv, struct ipw2100_bd_queue * q,
4223 u32 base, u32 size, u32 r, u32 w)
4224{
4225 IPW_DEBUG_INFO("enter\n");
4226
4227 IPW_DEBUG_INFO("initializing bd queue at virt=%p, phys=%08x\n", q->drv, (u32)q->nic);
4228
4229 write_register(priv->net_dev, base, q->nic);
4230 write_register(priv->net_dev, size, q->entries);
4231 write_register(priv->net_dev, r, q->oldest);
4232 write_register(priv->net_dev, w, q->next);
4233
4234 IPW_DEBUG_INFO("exit\n");
4235}
4236
4237static void ipw2100_kill_workqueue(struct ipw2100_priv *priv)
4238{
4239 if (priv->workqueue) {
4240 priv->stop_rf_kill = 1;
4241 priv->stop_hang_check = 1;
4242 cancel_delayed_work(&priv->reset_work);
4243 cancel_delayed_work(&priv->security_work);
4244 cancel_delayed_work(&priv->wx_event_work);
4245 cancel_delayed_work(&priv->hang_check);
4246 cancel_delayed_work(&priv->rf_kill);
4247 destroy_workqueue(priv->workqueue);
4248 priv->workqueue = NULL;
4249 }
4250}
4251
4252static int ipw2100_tx_allocate(struct ipw2100_priv *priv)
4253{
4254 int i, j, err = -EINVAL;
4255 void *v;
4256 dma_addr_t p;
4257
4258 IPW_DEBUG_INFO("enter\n");
4259
4260 err = bd_queue_allocate(priv, &priv->tx_queue, TX_QUEUE_LENGTH);
4261 if (err) {
4262 IPW_DEBUG_ERROR("%s: failed bd_queue_allocate\n",
4263 priv->net_dev->name);
4264 return err;
4265 }
4266
4267 priv->tx_buffers = (struct ipw2100_tx_packet *)kmalloc(
4268 TX_PENDED_QUEUE_LENGTH * sizeof(struct ipw2100_tx_packet),
4269 GFP_ATOMIC);
4270 if (!priv->tx_buffers) {
4271 IPW_DEBUG_ERROR("%s: alloc failed form tx buffers.\n",
4272 priv->net_dev->name);
4273 bd_queue_free(priv, &priv->tx_queue);
4274 return -ENOMEM;
4275 }
4276
4277 for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) {
4278 v = pci_alloc_consistent(
4279 priv->pci_dev, sizeof(struct ipw2100_data_header), &p);
4280 if (!v) {
4281 IPW_DEBUG_ERROR("%s: PCI alloc failed for tx "
4282 "buffers.\n", priv->net_dev->name);
4283 err = -ENOMEM;
4284 break;
4285 }
4286
4287 priv->tx_buffers[i].type = DATA;
4288 priv->tx_buffers[i].info.d_struct.data = (struct ipw2100_data_header*)v;
4289 priv->tx_buffers[i].info.d_struct.data_phys = p;
4290 priv->tx_buffers[i].info.d_struct.txb = NULL;
4291 }
4292
4293 if (i == TX_PENDED_QUEUE_LENGTH)
4294 return 0;
4295
4296 for (j = 0; j < i; j++) {
4297 pci_free_consistent(
4298 priv->pci_dev,
4299 sizeof(struct ipw2100_data_header),
4300 priv->tx_buffers[j].info.d_struct.data,
4301 priv->tx_buffers[j].info.d_struct.data_phys);
4302 }
4303
4304 kfree(priv->tx_buffers);
4305 priv->tx_buffers = NULL;
4306
4307 return err;
4308}
4309
4310static void ipw2100_tx_initialize(struct ipw2100_priv *priv)
4311{
4312 int i;
4313
4314 IPW_DEBUG_INFO("enter\n");
4315
4316 /*
4317 * reinitialize packet info lists
4318 */
4319 INIT_LIST_HEAD(&priv->fw_pend_list);
4320 INIT_STAT(&priv->fw_pend_stat);
4321
4322 /*
4323 * reinitialize lists
4324 */
4325 INIT_LIST_HEAD(&priv->tx_pend_list);
4326 INIT_LIST_HEAD(&priv->tx_free_list);
4327 INIT_STAT(&priv->tx_pend_stat);
4328 INIT_STAT(&priv->tx_free_stat);
4329
4330 for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) {
4331 /* We simply drop any SKBs that have been queued for
4332 * transmit */
4333 if (priv->tx_buffers[i].info.d_struct.txb) {
4334 ieee80211_txb_free(priv->tx_buffers[i].info.d_struct.txb);
4335 priv->tx_buffers[i].info.d_struct.txb = NULL;
4336 }
4337
4338 list_add_tail(&priv->tx_buffers[i].list, &priv->tx_free_list);
4339 }
4340
4341 SET_STAT(&priv->tx_free_stat, i);
4342
4343 priv->tx_queue.oldest = 0;
4344 priv->tx_queue.available = priv->tx_queue.entries;
4345 priv->tx_queue.next = 0;
4346 INIT_STAT(&priv->txq_stat);
4347 SET_STAT(&priv->txq_stat, priv->tx_queue.available);
4348
4349 bd_queue_initialize(priv, &priv->tx_queue,
4350 IPW_MEM_HOST_SHARED_TX_QUEUE_BD_BASE,
4351 IPW_MEM_HOST_SHARED_TX_QUEUE_BD_SIZE,
4352 IPW_MEM_HOST_SHARED_TX_QUEUE_READ_INDEX,
4353 IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX);
4354
4355 IPW_DEBUG_INFO("exit\n");
4356
4357}
4358
4359static void ipw2100_tx_free(struct ipw2100_priv *priv)
4360{
4361 int i;
4362
4363 IPW_DEBUG_INFO("enter\n");
4364
4365 bd_queue_free(priv, &priv->tx_queue);
4366
4367 if (!priv->tx_buffers)
4368 return;
4369
4370 for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) {
4371 if (priv->tx_buffers[i].info.d_struct.txb) {
4372 ieee80211_txb_free(priv->tx_buffers[i].info.d_struct.txb);
4373 priv->tx_buffers[i].info.d_struct.txb = NULL;
4374 }
4375 if (priv->tx_buffers[i].info.d_struct.data)
4376 pci_free_consistent(
4377 priv->pci_dev,
4378 sizeof(struct ipw2100_data_header),
4379 priv->tx_buffers[i].info.d_struct.data,
4380 priv->tx_buffers[i].info.d_struct.data_phys);
4381 }
4382
4383 kfree(priv->tx_buffers);
4384 priv->tx_buffers = NULL;
4385
4386 IPW_DEBUG_INFO("exit\n");
4387}
4388
4389
4390
4391static int ipw2100_rx_allocate(struct ipw2100_priv *priv)
4392{
4393 int i, j, err = -EINVAL;
4394
4395 IPW_DEBUG_INFO("enter\n");
4396
4397 err = bd_queue_allocate(priv, &priv->rx_queue, RX_QUEUE_LENGTH);
4398 if (err) {
4399 IPW_DEBUG_INFO("failed bd_queue_allocate\n");
4400 return err;
4401 }
4402
4403 err = status_queue_allocate(priv, RX_QUEUE_LENGTH);
4404 if (err) {
4405 IPW_DEBUG_INFO("failed status_queue_allocate\n");
4406 bd_queue_free(priv, &priv->rx_queue);
4407 return err;
4408 }
4409
4410 /*
4411 * allocate packets
4412 */
4413 priv->rx_buffers = (struct ipw2100_rx_packet *)
4414 kmalloc(RX_QUEUE_LENGTH * sizeof(struct ipw2100_rx_packet),
4415 GFP_KERNEL);
4416 if (!priv->rx_buffers) {
4417 IPW_DEBUG_INFO("can't allocate rx packet buffer table\n");
4418
4419 bd_queue_free(priv, &priv->rx_queue);
4420
4421 status_queue_free(priv);
4422
4423 return -ENOMEM;
4424 }
4425
4426 for (i = 0; i < RX_QUEUE_LENGTH; i++) {
4427 struct ipw2100_rx_packet *packet = &priv->rx_buffers[i];
4428
4429 err = ipw2100_alloc_skb(priv, packet);
4430 if (unlikely(err)) {
4431 err = -ENOMEM;
4432 break;
4433 }
4434
4435 /* The BD holds the cache aligned address */
4436 priv->rx_queue.drv[i].host_addr = packet->dma_addr;
4437 priv->rx_queue.drv[i].buf_length = IPW_RX_NIC_BUFFER_LENGTH;
4438 priv->status_queue.drv[i].status_fields = 0;
4439 }
4440
4441 if (i == RX_QUEUE_LENGTH)
4442 return 0;
4443
4444 for (j = 0; j < i; j++) {
4445 pci_unmap_single(priv->pci_dev, priv->rx_buffers[j].dma_addr,
4446 sizeof(struct ipw2100_rx_packet),
4447 PCI_DMA_FROMDEVICE);
4448 dev_kfree_skb(priv->rx_buffers[j].skb);
4449 }
4450
4451 kfree(priv->rx_buffers);
4452 priv->rx_buffers = NULL;
4453
4454 bd_queue_free(priv, &priv->rx_queue);
4455
4456 status_queue_free(priv);
4457
4458 return err;
4459}
4460
4461static void ipw2100_rx_initialize(struct ipw2100_priv *priv)
4462{
4463 IPW_DEBUG_INFO("enter\n");
4464
4465 priv->rx_queue.oldest = 0;
4466 priv->rx_queue.available = priv->rx_queue.entries - 1;
4467 priv->rx_queue.next = priv->rx_queue.entries - 1;
4468
4469 INIT_STAT(&priv->rxq_stat);
4470 SET_STAT(&priv->rxq_stat, priv->rx_queue.available);
4471
4472 bd_queue_initialize(priv, &priv->rx_queue,
4473 IPW_MEM_HOST_SHARED_RX_BD_BASE,
4474 IPW_MEM_HOST_SHARED_RX_BD_SIZE,
4475 IPW_MEM_HOST_SHARED_RX_READ_INDEX,
4476 IPW_MEM_HOST_SHARED_RX_WRITE_INDEX);
4477
4478 /* set up the status queue */
4479 write_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_STATUS_BASE,
4480 priv->status_queue.nic);
4481
4482 IPW_DEBUG_INFO("exit\n");
4483}
4484
4485static void ipw2100_rx_free(struct ipw2100_priv *priv)
4486{
4487 int i;
4488
4489 IPW_DEBUG_INFO("enter\n");
4490
4491 bd_queue_free(priv, &priv->rx_queue);
4492 status_queue_free(priv);
4493
4494 if (!priv->rx_buffers)
4495 return;
4496
4497 for (i = 0; i < RX_QUEUE_LENGTH; i++) {
4498 if (priv->rx_buffers[i].rxp) {
4499 pci_unmap_single(priv->pci_dev,
4500 priv->rx_buffers[i].dma_addr,
4501 sizeof(struct ipw2100_rx),
4502 PCI_DMA_FROMDEVICE);
4503 dev_kfree_skb(priv->rx_buffers[i].skb);
4504 }
4505 }
4506
4507 kfree(priv->rx_buffers);
4508 priv->rx_buffers = NULL;
4509
4510 IPW_DEBUG_INFO("exit\n");
4511}
4512
4513static int ipw2100_read_mac_address(struct ipw2100_priv *priv)
4514{
4515 u32 length = ETH_ALEN;
4516 u8 mac[ETH_ALEN];
4517
4518 int err;
4519
4520 err = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ADAPTER_MAC,
4521 mac, &length);
4522 if (err) {
4523 IPW_DEBUG_INFO("MAC address read failed\n");
4524 return -EIO;
4525 }
4526 IPW_DEBUG_INFO("card MAC is %02X:%02X:%02X:%02X:%02X:%02X\n",
4527 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
4528
4529 memcpy(priv->net_dev->dev_addr, mac, ETH_ALEN);
4530
4531 return 0;
4532}
4533
4534/********************************************************************
4535 *
4536 * Firmware Commands
4537 *
4538 ********************************************************************/
4539
4540int ipw2100_set_mac_address(struct ipw2100_priv *priv, int batch_mode)
4541{
4542 struct host_command cmd = {
4543 .host_command = ADAPTER_ADDRESS,
4544 .host_command_sequence = 0,
4545 .host_command_length = ETH_ALEN
4546 };
4547 int err;
4548
4549 IPW_DEBUG_HC("SET_MAC_ADDRESS\n");
4550
4551 IPW_DEBUG_INFO("enter\n");
4552
4553 if (priv->config & CFG_CUSTOM_MAC) {
4554 memcpy(cmd.host_command_parameters, priv->mac_addr,
4555 ETH_ALEN);
4556 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
4557 } else
4558 memcpy(cmd.host_command_parameters, priv->net_dev->dev_addr,
4559 ETH_ALEN);
4560
4561 err = ipw2100_hw_send_command(priv, &cmd);
4562
4563 IPW_DEBUG_INFO("exit\n");
4564 return err;
4565}
4566
4567int ipw2100_set_port_type(struct ipw2100_priv *priv, u32 port_type,
4568 int batch_mode)
4569{
4570 struct host_command cmd = {
4571 .host_command = PORT_TYPE,
4572 .host_command_sequence = 0,
4573 .host_command_length = sizeof(u32)
4574 };
4575 int err;
4576
4577 switch (port_type) {
4578 case IW_MODE_INFRA:
4579 cmd.host_command_parameters[0] = IPW_BSS;
4580 break;
4581 case IW_MODE_ADHOC:
4582 cmd.host_command_parameters[0] = IPW_IBSS;
4583 break;
4584 }
4585
4586 IPW_DEBUG_HC("PORT_TYPE: %s\n",
4587 port_type == IPW_IBSS ? "Ad-Hoc" : "Managed");
4588
4589 if (!batch_mode) {
4590 err = ipw2100_disable_adapter(priv);
4591 if (err) {
4592 IPW_DEBUG_ERROR("%s: Could not disable adapter %d\n",
4593 priv->net_dev->name, err);
4594 return err;
4595 }
4596 }
4597
4598 /* send cmd to firmware */
4599 err = ipw2100_hw_send_command(priv, &cmd);
4600
4601 if (!batch_mode)
4602 ipw2100_enable_adapter(priv);
4603
4604 return err;
4605}
4606
4607
4608int ipw2100_set_channel(struct ipw2100_priv *priv, u32 channel, int batch_mode)
4609{
4610 struct host_command cmd = {
4611 .host_command = CHANNEL,
4612 .host_command_sequence = 0,
4613 .host_command_length = sizeof(u32)
4614 };
4615 int err;
4616
4617 cmd.host_command_parameters[0] = channel;
4618
4619 IPW_DEBUG_HC("CHANNEL: %d\n", channel);
4620
4621 /* If BSS then we don't support channel selection */
4622 if (priv->ieee->iw_mode == IW_MODE_INFRA)
4623 return 0;
4624
4625 if ((channel != 0) &&
4626 ((channel < REG_MIN_CHANNEL) || (channel > REG_MAX_CHANNEL)))
4627 return -EINVAL;
4628
4629 if (!batch_mode) {
4630 err = ipw2100_disable_adapter(priv);
4631 if (err)
4632 return err;
4633 }
4634
4635 err = ipw2100_hw_send_command(priv, &cmd);
4636 if (err) {
4637 IPW_DEBUG_INFO("Failed to set channel to %d",
4638 channel);
4639 return err;
4640 }
4641
4642 if (channel)
4643 priv->config |= CFG_STATIC_CHANNEL;
4644 else
4645 priv->config &= ~CFG_STATIC_CHANNEL;
4646
4647 priv->channel = channel;
4648
4649 if (!batch_mode) {
4650 err = ipw2100_enable_adapter(priv);
4651 if (err)
4652 return err;
4653 }
4654
4655 return 0;
4656}
4657
4658int ipw2100_system_config(struct ipw2100_priv *priv, int batch_mode)
4659{
4660 struct host_command cmd = {
4661 .host_command = SYSTEM_CONFIG,
4662 .host_command_sequence = 0,
4663 .host_command_length = 12,
4664 };
4665 u32 ibss_mask, len = sizeof(u32);
4666 int err;
4667
4668 /* Set system configuration */
4669
4670 if (!batch_mode) {
4671 err = ipw2100_disable_adapter(priv);
4672 if (err)
4673 return err;
4674 }
4675
4676 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
4677 cmd.host_command_parameters[0] |= IPW_CFG_IBSS_AUTO_START;
4678
4679 cmd.host_command_parameters[0] |= IPW_CFG_IBSS_MASK |
4680 IPW_CFG_BSS_MASK |
4681 IPW_CFG_802_1x_ENABLE;
4682
4683 if (!(priv->config & CFG_LONG_PREAMBLE))
4684 cmd.host_command_parameters[0] |= IPW_CFG_PREAMBLE_AUTO;
4685
4686 err = ipw2100_get_ordinal(priv,
4687 IPW_ORD_EEPROM_IBSS_11B_CHANNELS,
4688 &ibss_mask, &len);
4689 if (err)
4690 ibss_mask = IPW_IBSS_11B_DEFAULT_MASK;
4691
4692 cmd.host_command_parameters[1] = REG_CHANNEL_MASK;
4693 cmd.host_command_parameters[2] = REG_CHANNEL_MASK & ibss_mask;
4694
4695 /* 11b only */
4696 /*cmd.host_command_parameters[0] |= DIVERSITY_ANTENNA_A;*/
4697
4698 err = ipw2100_hw_send_command(priv, &cmd);
4699 if (err)
4700 return err;
4701
4702/* If IPv6 is configured in the kernel then we don't want to filter out all
4703 * of the multicast packets as IPv6 needs some. */
4704#if !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE)
4705 cmd.host_command = ADD_MULTICAST;
4706 cmd.host_command_sequence = 0;
4707 cmd.host_command_length = 0;
4708
4709 ipw2100_hw_send_command(priv, &cmd);
4710#endif
4711 if (!batch_mode) {
4712 err = ipw2100_enable_adapter(priv);
4713 if (err)
4714 return err;
4715 }
4716
4717 return 0;
4718}
4719
4720int ipw2100_set_tx_rates(struct ipw2100_priv *priv, u32 rate, int batch_mode)
4721{
4722 struct host_command cmd = {
4723 .host_command = BASIC_TX_RATES,
4724 .host_command_sequence = 0,
4725 .host_command_length = 4
4726 };
4727 int err;
4728
4729 cmd.host_command_parameters[0] = rate & TX_RATE_MASK;
4730
4731 if (!batch_mode) {
4732 err = ipw2100_disable_adapter(priv);
4733 if (err)
4734 return err;
4735 }
4736
4737 /* Set BASIC TX Rate first */
4738 ipw2100_hw_send_command(priv, &cmd);
4739
4740 /* Set TX Rate */
4741 cmd.host_command = TX_RATES;
4742 ipw2100_hw_send_command(priv, &cmd);
4743
4744 /* Set MSDU TX Rate */
4745 cmd.host_command = MSDU_TX_RATES;
4746 ipw2100_hw_send_command(priv, &cmd);
4747
4748 if (!batch_mode) {
4749 err = ipw2100_enable_adapter(priv);
4750 if (err)
4751 return err;
4752 }
4753
4754 priv->tx_rates = rate;
4755
4756 return 0;
4757}
4758
4759int ipw2100_set_power_mode(struct ipw2100_priv *priv,
4760 int power_level)
4761{
4762 struct host_command cmd = {
4763 .host_command = POWER_MODE,
4764 .host_command_sequence = 0,
4765 .host_command_length = 4
4766 };
4767 int err;
4768
4769 cmd.host_command_parameters[0] = power_level;
4770
4771 err = ipw2100_hw_send_command(priv, &cmd);
4772 if (err)
4773 return err;
4774
4775 if (power_level == IPW_POWER_MODE_CAM)
4776 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
4777 else
4778 priv->power_mode = IPW_POWER_ENABLED | power_level;
4779
4780#ifdef CONFIG_IPW2100_TX_POWER
4781 if (priv->port_type == IBSS &&
4782 priv->adhoc_power != DFTL_IBSS_TX_POWER) {
4783 /* Set beacon interval */
4784 cmd.host_command = TX_POWER_INDEX;
4785 cmd.host_command_parameters[0] = (u32)priv->adhoc_power;
4786
4787 err = ipw2100_hw_send_command(priv, &cmd);
4788 if (err)
4789 return err;
4790 }
4791#endif
4792
4793 return 0;
4794}
4795
4796
4797int ipw2100_set_rts_threshold(struct ipw2100_priv *priv, u32 threshold)
4798{
4799 struct host_command cmd = {
4800 .host_command = RTS_THRESHOLD,
4801 .host_command_sequence = 0,
4802 .host_command_length = 4
4803 };
4804 int err;
4805
4806 if (threshold & RTS_DISABLED)
4807 cmd.host_command_parameters[0] = MAX_RTS_THRESHOLD;
4808 else
4809 cmd.host_command_parameters[0] = threshold & ~RTS_DISABLED;
4810
4811 err = ipw2100_hw_send_command(priv, &cmd);
4812 if (err)
4813 return err;
4814
4815 priv->rts_threshold = threshold;
4816
4817 return 0;
4818}
4819
4820#if 0
4821int ipw2100_set_fragmentation_threshold(struct ipw2100_priv *priv,
4822 u32 threshold, int batch_mode)
4823{
4824 struct host_command cmd = {
4825 .host_command = FRAG_THRESHOLD,
4826 .host_command_sequence = 0,
4827 .host_command_length = 4,
4828 .host_command_parameters[0] = 0,
4829 };
4830 int err;
4831
4832 if (!batch_mode) {
4833 err = ipw2100_disable_adapter(priv);
4834 if (err)
4835 return err;
4836 }
4837
4838 if (threshold == 0)
4839 threshold = DEFAULT_FRAG_THRESHOLD;
4840 else {
4841 threshold = max(threshold, MIN_FRAG_THRESHOLD);
4842 threshold = min(threshold, MAX_FRAG_THRESHOLD);
4843 }
4844
4845 cmd.host_command_parameters[0] = threshold;
4846
4847 IPW_DEBUG_HC("FRAG_THRESHOLD: %u\n", threshold);
4848
4849 err = ipw2100_hw_send_command(priv, &cmd);
4850
4851 if (!batch_mode)
4852 ipw2100_enable_adapter(priv);
4853
4854 if (!err)
4855 priv->frag_threshold = threshold;
4856
4857 return err;
4858}
4859#endif
4860
4861int ipw2100_set_short_retry(struct ipw2100_priv *priv, u32 retry)
4862{
4863 struct host_command cmd = {
4864 .host_command = SHORT_RETRY_LIMIT,
4865 .host_command_sequence = 0,
4866 .host_command_length = 4
4867 };
4868 int err;
4869
4870 cmd.host_command_parameters[0] = retry;
4871
4872 err = ipw2100_hw_send_command(priv, &cmd);
4873 if (err)
4874 return err;
4875
4876 priv->short_retry_limit = retry;
4877
4878 return 0;
4879}
4880
4881int ipw2100_set_long_retry(struct ipw2100_priv *priv, u32 retry)
4882{
4883 struct host_command cmd = {
4884 .host_command = LONG_RETRY_LIMIT,
4885 .host_command_sequence = 0,
4886 .host_command_length = 4
4887 };
4888 int err;
4889
4890 cmd.host_command_parameters[0] = retry;
4891
4892 err = ipw2100_hw_send_command(priv, &cmd);
4893 if (err)
4894 return err;
4895
4896 priv->long_retry_limit = retry;
4897
4898 return 0;
4899}
4900
4901
4902int ipw2100_set_mandatory_bssid(struct ipw2100_priv *priv, u8 *bssid,
4903 int batch_mode)
4904{
4905 struct host_command cmd = {
4906 .host_command = MANDATORY_BSSID,
4907 .host_command_sequence = 0,
4908 .host_command_length = (bssid == NULL) ? 0 : ETH_ALEN
4909 };
4910 int err;
4911
4912#ifdef CONFIG_IPW_DEBUG
4913 if (bssid != NULL)
4914 IPW_DEBUG_HC(
4915 "MANDATORY_BSSID: %02X:%02X:%02X:%02X:%02X:%02X\n",
4916 bssid[0], bssid[1], bssid[2], bssid[3], bssid[4],
4917 bssid[5]);
4918 else
4919 IPW_DEBUG_HC("MANDATORY_BSSID: <clear>\n");
4920#endif
4921 /* if BSSID is empty then we disable mandatory bssid mode */
4922 if (bssid != NULL)
4923 memcpy((u8 *)cmd.host_command_parameters, bssid, ETH_ALEN);
4924
4925 if (!batch_mode) {
4926 err = ipw2100_disable_adapter(priv);
4927 if (err)
4928 return err;
4929 }
4930
4931 err = ipw2100_hw_send_command(priv, &cmd);
4932
4933 if (!batch_mode)
4934 ipw2100_enable_adapter(priv);
4935
4936 return err;
4937}
4938
4939#ifdef CONFIG_IEEE80211_WPA
4940static int ipw2100_disassociate_bssid(struct ipw2100_priv *priv)
4941{
4942 struct host_command cmd = {
4943 .host_command = DISASSOCIATION_BSSID,
4944 .host_command_sequence = 0,
4945 .host_command_length = ETH_ALEN
4946 };
4947 int err;
4948 int len;
4949
4950 IPW_DEBUG_HC("DISASSOCIATION_BSSID\n");
4951
4952 len = ETH_ALEN;
4953 /* The Firmware currently ignores the BSSID and just disassociates from
4954 * the currently associated AP -- but in the off chance that a future
4955 * firmware does use the BSSID provided here, we go ahead and try and
4956 * set it to the currently associated AP's BSSID */
4957 memcpy(cmd.host_command_parameters, priv->bssid, ETH_ALEN);
4958
4959 err = ipw2100_hw_send_command(priv, &cmd);
4960
4961 return err;
4962}
4963#endif
4964
4965/*
4966 * Pseudo code for setting up wpa_frame:
4967 */
4968#if 0
4969void x(struct ieee80211_assoc_frame *wpa_assoc)
4970{
4971 struct ipw2100_wpa_assoc_frame frame;
4972 frame->fixed_ie_mask = IPW_WPA_CAPABILTIES |
4973 IPW_WPA_LISTENINTERVAL |
4974 IPW_WPA_AP_ADDRESS;
4975 frame->capab_info = wpa_assoc->capab_info;
4976 frame->lisen_interval = wpa_assoc->listent_interval;
4977 memcpy(frame->current_ap, wpa_assoc->current_ap, ETH_ALEN);
4978
4979 /* UNKNOWN -- I'm not postivive about this part; don't have any WPA
4980 * setup here to test it with.
4981 *
4982 * Walk the IEs in the wpa_assoc and figure out the total size of all
4983 * that data. Stick that into frame->var_ie_len. Then memcpy() all of
4984 * the IEs from wpa_frame into frame.
4985 */
4986 frame->var_ie_len = calculate_ie_len(wpa_assoc);
4987 memcpy(frame->var_ie, wpa_assoc->variable, frame->var_ie_len);
4988
4989 ipw2100_set_wpa_ie(priv, &frame, 0);
4990}
4991#endif
4992
4993
4994
4995
4996static int ipw2100_set_wpa_ie(struct ipw2100_priv *,
4997 struct ipw2100_wpa_assoc_frame *, int)
4998__attribute__ ((unused));
4999
5000static int ipw2100_set_wpa_ie(struct ipw2100_priv *priv,
5001 struct ipw2100_wpa_assoc_frame *wpa_frame,
5002 int batch_mode)
5003{
5004 struct host_command cmd = {
5005 .host_command = SET_WPA_IE,
5006 .host_command_sequence = 0,
5007 .host_command_length = sizeof(struct ipw2100_wpa_assoc_frame),
5008 };
5009 int err;
5010
5011 IPW_DEBUG_HC("SET_WPA_IE\n");
5012
5013 if (!batch_mode) {
5014 err = ipw2100_disable_adapter(priv);
5015 if (err)
5016 return err;
5017 }
5018
5019 memcpy(cmd.host_command_parameters, wpa_frame,
5020 sizeof(struct ipw2100_wpa_assoc_frame));
5021
5022 err = ipw2100_hw_send_command(priv, &cmd);
5023
5024 if (!batch_mode) {
5025 if (ipw2100_enable_adapter(priv))
5026 err = -EIO;
5027 }
5028
5029 return err;
5030}
5031
5032struct security_info_params {
5033 u32 allowed_ciphers;
5034 u16 version;
5035 u8 auth_mode;
5036 u8 replay_counters_number;
5037 u8 unicast_using_group;
5038} __attribute__ ((packed));
5039
5040int ipw2100_set_security_information(struct ipw2100_priv *priv,
5041 int auth_mode,
5042 int security_level,
5043 int unicast_using_group,
5044 int batch_mode)
5045{
5046 struct host_command cmd = {
5047 .host_command = SET_SECURITY_INFORMATION,
5048 .host_command_sequence = 0,
5049 .host_command_length = sizeof(struct security_info_params)
5050 };
5051 struct security_info_params *security =
5052 (struct security_info_params *)&cmd.host_command_parameters;
5053 int err;
5054 memset(security, 0, sizeof(*security));
5055
5056 /* If shared key AP authentication is turned on, then we need to
5057 * configure the firmware to try and use it.
5058 *
5059 * Actual data encryption/decryption is handled by the host. */
5060 security->auth_mode = auth_mode;
5061 security->unicast_using_group = unicast_using_group;
5062
5063 switch (security_level) {
5064 default:
5065 case SEC_LEVEL_0:
5066 security->allowed_ciphers = IPW_NONE_CIPHER;
5067 break;
5068 case SEC_LEVEL_1:
5069 security->allowed_ciphers = IPW_WEP40_CIPHER |
5070 IPW_WEP104_CIPHER;
5071 break;
5072 case SEC_LEVEL_2:
5073 security->allowed_ciphers = IPW_WEP40_CIPHER |
5074 IPW_WEP104_CIPHER | IPW_TKIP_CIPHER;
5075 break;
5076 case SEC_LEVEL_2_CKIP:
5077 security->allowed_ciphers = IPW_WEP40_CIPHER |
5078 IPW_WEP104_CIPHER | IPW_CKIP_CIPHER;
5079 break;
5080 case SEC_LEVEL_3:
5081 security->allowed_ciphers = IPW_WEP40_CIPHER |
5082 IPW_WEP104_CIPHER | IPW_TKIP_CIPHER | IPW_CCMP_CIPHER;
5083 break;
5084 }
5085
5086 IPW_DEBUG_HC(
5087 "SET_SECURITY_INFORMATION: auth:%d cipher:0x%02X (level %d)\n",
5088 security->auth_mode, security->allowed_ciphers, security_level);
5089
5090 security->replay_counters_number = 0;
5091
5092 if (!batch_mode) {
5093 err = ipw2100_disable_adapter(priv);
5094 if (err)
5095 return err;
5096 }
5097
5098 err = ipw2100_hw_send_command(priv, &cmd);
5099
5100 if (!batch_mode)
5101 ipw2100_enable_adapter(priv);
5102
5103 return err;
5104}
5105
5106int ipw2100_set_tx_power(struct ipw2100_priv *priv,
5107 u32 tx_power)
5108{
5109 struct host_command cmd = {
5110 .host_command = TX_POWER_INDEX,
5111 .host_command_sequence = 0,
5112 .host_command_length = 4
5113 };
5114 int err = 0;
5115
5116 cmd.host_command_parameters[0] = tx_power;
5117
5118 if (priv->ieee->iw_mode == IW_MODE_ADHOC)
5119 err = ipw2100_hw_send_command(priv, &cmd);
5120 if (!err)
5121 priv->tx_power = tx_power;
5122
5123 return 0;
5124}
5125
5126int ipw2100_set_ibss_beacon_interval(struct ipw2100_priv *priv,
5127 u32 interval, int batch_mode)
5128{
5129 struct host_command cmd = {
5130 .host_command = BEACON_INTERVAL,
5131 .host_command_sequence = 0,
5132 .host_command_length = 4
5133 };
5134 int err;
5135
5136 cmd.host_command_parameters[0] = interval;
5137
5138 IPW_DEBUG_INFO("enter\n");
5139
5140 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
5141 if (!batch_mode) {
5142 err = ipw2100_disable_adapter(priv);
5143 if (err)
5144 return err;
5145 }
5146
5147 ipw2100_hw_send_command(priv, &cmd);
5148
5149 if (!batch_mode) {
5150 err = ipw2100_enable_adapter(priv);
5151 if (err)
5152 return err;
5153 }
5154 }
5155
5156 IPW_DEBUG_INFO("exit\n");
5157
5158 return 0;
5159}
5160
5161
5162void ipw2100_queues_initialize(struct ipw2100_priv *priv)
5163{
5164 ipw2100_tx_initialize(priv);
5165 ipw2100_rx_initialize(priv);
5166 ipw2100_msg_initialize(priv);
5167}
5168
5169void ipw2100_queues_free(struct ipw2100_priv *priv)
5170{
5171 ipw2100_tx_free(priv);
5172 ipw2100_rx_free(priv);
5173 ipw2100_msg_free(priv);
5174}
5175
5176int ipw2100_queues_allocate(struct ipw2100_priv *priv)
5177{
5178 if (ipw2100_tx_allocate(priv) ||
5179 ipw2100_rx_allocate(priv) ||
5180 ipw2100_msg_allocate(priv))
5181 goto fail;
5182
5183 return 0;
5184
5185 fail:
5186 ipw2100_tx_free(priv);
5187 ipw2100_rx_free(priv);
5188 ipw2100_msg_free(priv);
5189 return -ENOMEM;
5190}
5191
5192#define IPW_PRIVACY_CAPABLE 0x0008
5193
5194static int ipw2100_set_wep_flags(struct ipw2100_priv *priv, u32 flags,
5195 int batch_mode)
5196{
5197 struct host_command cmd = {
5198 .host_command = WEP_FLAGS,
5199 .host_command_sequence = 0,
5200 .host_command_length = 4
5201 };
5202 int err;
5203
5204 cmd.host_command_parameters[0] = flags;
5205
5206 IPW_DEBUG_HC("WEP_FLAGS: flags = 0x%08X\n", flags);
5207
5208 if (!batch_mode) {
5209 err = ipw2100_disable_adapter(priv);
5210 if (err) {
5211 IPW_DEBUG_ERROR("%s: Could not disable adapter %d\n",
5212 priv->net_dev->name, err);
5213 return err;
5214 }
5215 }
5216
5217 /* send cmd to firmware */
5218 err = ipw2100_hw_send_command(priv, &cmd);
5219
5220 if (!batch_mode)
5221 ipw2100_enable_adapter(priv);
5222
5223 return err;
5224}
5225
5226struct ipw2100_wep_key {
5227 u8 idx;
5228 u8 len;
5229 u8 key[13];
5230};
5231
5232/* Macros to ease up priting WEP keys */
5233#define WEP_FMT_64 "%02X%02X%02X%02X-%02X"
5234#define WEP_FMT_128 "%02X%02X%02X%02X-%02X%02X%02X%02X-%02X%02X%02X"
5235#define WEP_STR_64(x) x[0],x[1],x[2],x[3],x[4]
5236#define WEP_STR_128(x) x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10]
5237
5238
5239/**
5240 * Set a the wep key
5241 *
5242 * @priv: struct to work on
5243 * @idx: index of the key we want to set
5244 * @key: ptr to the key data to set
5245 * @len: length of the buffer at @key
5246 * @batch_mode: FIXME perform the operation in batch mode, not
5247 * disabling the device.
5248 *
5249 * @returns 0 if OK, < 0 errno code on error.
5250 *
5251 * Fill out a command structure with the new wep key, length an
5252 * index and send it down the wire.
5253 */
5254static int ipw2100_set_key(struct ipw2100_priv *priv,
5255 int idx, char *key, int len, int batch_mode)
5256{
5257 int keylen = len ? (len <= 5 ? 5 : 13) : 0;
5258 struct host_command cmd = {
5259 .host_command = WEP_KEY_INFO,
5260 .host_command_sequence = 0,
5261 .host_command_length = sizeof(struct ipw2100_wep_key),
5262 };
5263 struct ipw2100_wep_key *wep_key = (void*)cmd.host_command_parameters;
5264 int err;
5265
5266 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
5267 idx, keylen, len);
5268
5269 /* NOTE: We don't check cached values in case the firmware was reset
5270 * or some other problem is occuring. If the user is setting the key,
5271 * then we push the change */
5272
5273 wep_key->idx = idx;
5274 wep_key->len = keylen;
5275
5276 if (keylen) {
5277 memcpy(wep_key->key, key, len);
5278 memset(wep_key->key + len, 0, keylen - len);
5279 }
5280
5281 /* Will be optimized out on debug not being configured in */
5282 if (keylen == 0)
5283 IPW_DEBUG_WEP("%s: Clearing key %d\n",
5284 priv->net_dev->name, wep_key->idx);
5285 else if (keylen == 5)
5286 IPW_DEBUG_WEP("%s: idx: %d, len: %d key: " WEP_FMT_64 "\n",
5287 priv->net_dev->name, wep_key->idx, wep_key->len,
5288 WEP_STR_64(wep_key->key));
5289 else
5290 IPW_DEBUG_WEP("%s: idx: %d, len: %d key: " WEP_FMT_128
5291 "\n",
5292 priv->net_dev->name, wep_key->idx, wep_key->len,
5293 WEP_STR_128(wep_key->key));
5294
5295 if (!batch_mode) {
5296 err = ipw2100_disable_adapter(priv);
5297 /* FIXME: IPG: shouldn't this prink be in _disable_adapter()? */
5298 if (err) {
5299 IPW_DEBUG_ERROR("%s: Could not disable adapter %d\n",
5300 priv->net_dev->name, err);
5301 return err;
5302 }
5303 }
5304
5305 /* send cmd to firmware */
5306 err = ipw2100_hw_send_command(priv, &cmd);
5307
5308 if (!batch_mode) {
5309 int err2 = ipw2100_enable_adapter(priv);
5310 if (err == 0)
5311 err = err2;
5312 }
5313 return err;
5314}
5315
5316static int ipw2100_set_key_index(struct ipw2100_priv *priv,
5317 int idx, int batch_mode)
5318{
5319 struct host_command cmd = {
5320 .host_command = WEP_KEY_INDEX,
5321 .host_command_sequence = 0,
5322 .host_command_length = 4,
5323 .host_command_parameters = { idx },
5324 };
5325 int err;
5326
5327 IPW_DEBUG_HC("WEP_KEY_INDEX: index = %d\n", idx);
5328
5329 if (idx < 0 || idx > 3)
5330 return -EINVAL;
5331
5332 if (!batch_mode) {
5333 err = ipw2100_disable_adapter(priv);
5334 if (err) {
5335 IPW_DEBUG_ERROR("%s: Could not disable adapter %d\n",
5336 priv->net_dev->name, err);
5337 return err;
5338 }
5339 }
5340
5341 /* send cmd to firmware */
5342 err = ipw2100_hw_send_command(priv, &cmd);
5343
5344 if (!batch_mode)
5345 ipw2100_enable_adapter(priv);
5346
5347 return err;
5348}
5349
5350
5351static int ipw2100_configure_security(struct ipw2100_priv *priv,
5352 int batch_mode)
5353{
5354 int i, err, auth_mode, sec_level, use_group;
5355
5356 if (!(priv->status & STATUS_RUNNING))
5357 return 0;
5358
5359 if (!batch_mode) {
5360 err = ipw2100_disable_adapter(priv);
5361 if (err)
5362 return err;
5363 }
5364
5365 if (!priv->sec.enabled) {
5366 err = ipw2100_set_security_information(
5367 priv, IPW_AUTH_OPEN, SEC_LEVEL_0, 0, 1);
5368 } else {
5369 auth_mode = IPW_AUTH_OPEN;
5370 if ((priv->sec.flags & SEC_AUTH_MODE) &&
5371 (priv->sec.auth_mode == WLAN_AUTH_SHARED_KEY))
5372 auth_mode = IPW_AUTH_SHARED;
5373
5374 sec_level = SEC_LEVEL_0;
5375 if (priv->sec.flags & SEC_LEVEL)
5376 sec_level = priv->sec.level;
5377
5378 use_group = 0;
5379 if (priv->sec.flags & SEC_UNICAST_GROUP)
5380 use_group = priv->sec.unicast_uses_group;
5381
5382 err = ipw2100_set_security_information(
5383 priv, auth_mode, sec_level, use_group, 1);
5384 }
5385
5386 if (err)
5387 goto exit;
5388
5389 if (priv->sec.enabled) {
5390 for (i = 0; i < 4; i++) {
5391 if (!(priv->sec.flags & (1 << i))) {
5392 memset(priv->sec.keys[i], 0, WEP_KEY_LEN);
5393 priv->sec.key_sizes[i] = 0;
5394 } else {
5395 err = ipw2100_set_key(priv, i,
5396 priv->sec.keys[i],
5397 priv->sec.key_sizes[i],
5398 1);
5399 if (err)
5400 goto exit;
5401 }
5402 }
5403
5404 ipw2100_set_key_index(priv, priv->ieee->tx_keyidx, 1);
5405 }
5406
5407 /* Always enable privacy so the Host can filter WEP packets if
5408 * encrypted data is sent up */
5409 err = ipw2100_set_wep_flags(
5410 priv, priv->sec.enabled ? IPW_PRIVACY_CAPABLE : 0, 1);
5411 if (err)
5412 goto exit;
5413
5414 priv->status &= ~STATUS_SECURITY_UPDATED;
5415
5416 exit:
5417 if (!batch_mode)
5418 ipw2100_enable_adapter(priv);
5419
5420 return err;
5421}
5422
5423static void ipw2100_security_work(struct ipw2100_priv *priv)
5424{
5425 /* If we happen to have reconnected before we get a chance to
5426 * process this, then update the security settings--which causes
5427 * a disassociation to occur */
5428 if (!(priv->status & STATUS_ASSOCIATED) &&
5429 priv->status & STATUS_SECURITY_UPDATED)
5430 ipw2100_configure_security(priv, 0);
5431}
5432
5433static void shim__set_security(struct net_device *dev,
5434 struct ieee80211_security *sec)
5435{
5436 struct ipw2100_priv *priv = ieee80211_priv(dev);
5437 int i, force_update = 0;
5438
5439 down(&priv->action_sem);
5440 if (!(priv->status & STATUS_INITIALIZED))
5441 goto done;
5442
5443 for (i = 0; i < 4; i++) {
5444 if (sec->flags & (1 << i)) {
5445 priv->sec.key_sizes[i] = sec->key_sizes[i];
5446 if (sec->key_sizes[i] == 0)
5447 priv->sec.flags &= ~(1 << i);
5448 else
5449 memcpy(priv->sec.keys[i], sec->keys[i],
5450 sec->key_sizes[i]);
5451 priv->sec.flags |= (1 << i);
5452 priv->status |= STATUS_SECURITY_UPDATED;
5453 }
5454 }
5455
5456 if ((sec->flags & SEC_ACTIVE_KEY) &&
5457 priv->sec.active_key != sec->active_key) {
5458 if (sec->active_key <= 3) {
5459 priv->sec.active_key = sec->active_key;
5460 priv->sec.flags |= SEC_ACTIVE_KEY;
5461 } else
5462 priv->sec.flags &= ~SEC_ACTIVE_KEY;
5463
5464 priv->status |= STATUS_SECURITY_UPDATED;
5465 }
5466
5467 if ((sec->flags & SEC_AUTH_MODE) &&
5468 (priv->sec.auth_mode != sec->auth_mode)) {
5469 priv->sec.auth_mode = sec->auth_mode;
5470 priv->sec.flags |= SEC_AUTH_MODE;
5471 priv->status |= STATUS_SECURITY_UPDATED;
5472 }
5473
5474 if (sec->flags & SEC_ENABLED &&
5475 priv->sec.enabled != sec->enabled) {
5476 priv->sec.flags |= SEC_ENABLED;
5477 priv->sec.enabled = sec->enabled;
5478 priv->status |= STATUS_SECURITY_UPDATED;
5479 force_update = 1;
5480 }
5481
5482 if (sec->flags & SEC_LEVEL &&
5483 priv->sec.level != sec->level) {
5484 priv->sec.level = sec->level;
5485 priv->sec.flags |= SEC_LEVEL;
5486 priv->status |= STATUS_SECURITY_UPDATED;
5487 }
5488
5489 IPW_DEBUG_WEP("Security flags: %c %c%c%c%c %c%c%c%c\n",
5490 priv->sec.flags & (1<<8) ? '1' : '0',
5491 priv->sec.flags & (1<<7) ? '1' : '0',
5492 priv->sec.flags & (1<<6) ? '1' : '0',
5493 priv->sec.flags & (1<<5) ? '1' : '0',
5494 priv->sec.flags & (1<<4) ? '1' : '0',
5495 priv->sec.flags & (1<<3) ? '1' : '0',
5496 priv->sec.flags & (1<<2) ? '1' : '0',
5497 priv->sec.flags & (1<<1) ? '1' : '0',
5498 priv->sec.flags & (1<<0) ? '1' : '0');
5499
5500/* As a temporary work around to enable WPA until we figure out why
5501 * wpa_supplicant toggles the security capability of the driver, which
5502 * forces a disassocation with force_update...
5503 *
5504 * if (force_update || !(priv->status & STATUS_ASSOCIATED))*/
5505 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
5506 ipw2100_configure_security(priv, 0);
5507done:
5508 up(&priv->action_sem);
5509}
5510
5511static int ipw2100_adapter_setup(struct ipw2100_priv *priv)
5512{
5513 int err;
5514 int batch_mode = 1;
5515 u8 *bssid;
5516
5517 IPW_DEBUG_INFO("enter\n");
5518
5519 err = ipw2100_disable_adapter(priv);
5520 if (err)
5521 return err;
5522#ifdef CONFIG_IPW2100_MONITOR
5523 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
5524 err = ipw2100_set_channel(priv, priv->channel, batch_mode);
5525 if (err)
5526 return err;
5527
5528 IPW_DEBUG_INFO("exit\n");
5529
5530 return 0;
5531 }
5532#endif /* CONFIG_IPW2100_MONITOR */
5533
5534 err = ipw2100_read_mac_address(priv);
5535 if (err)
5536 return -EIO;
5537
5538 err = ipw2100_set_mac_address(priv, batch_mode);
5539 if (err)
5540 return err;
5541
5542 err = ipw2100_set_port_type(priv, priv->ieee->iw_mode, batch_mode);
5543 if (err)
5544 return err;
5545
5546 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
5547 err = ipw2100_set_channel(priv, priv->channel, batch_mode);
5548 if (err)
5549 return err;
5550 }
5551
5552 err = ipw2100_system_config(priv, batch_mode);
5553 if (err)
5554 return err;
5555
5556 err = ipw2100_set_tx_rates(priv, priv->tx_rates, batch_mode);
5557 if (err)
5558 return err;
5559
5560 /* Default to power mode OFF */
5561 err = ipw2100_set_power_mode(priv, IPW_POWER_MODE_CAM);
5562 if (err)
5563 return err;
5564
5565 err = ipw2100_set_rts_threshold(priv, priv->rts_threshold);
5566 if (err)
5567 return err;
5568
5569 if (priv->config & CFG_STATIC_BSSID)
5570 bssid = priv->bssid;
5571 else
5572 bssid = NULL;
5573 err = ipw2100_set_mandatory_bssid(priv, bssid, batch_mode);
5574 if (err)
5575 return err;
5576
5577 if (priv->config & CFG_STATIC_ESSID)
5578 err = ipw2100_set_essid(priv, priv->essid, priv->essid_len,
5579 batch_mode);
5580 else
5581 err = ipw2100_set_essid(priv, NULL, 0, batch_mode);
5582 if (err)
5583 return err;
5584
5585 err = ipw2100_configure_security(priv, batch_mode);
5586 if (err)
5587 return err;
5588
5589 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
5590 err = ipw2100_set_ibss_beacon_interval(
5591 priv, priv->beacon_interval, batch_mode);
5592 if (err)
5593 return err;
5594
5595 err = ipw2100_set_tx_power(priv, priv->tx_power);
5596 if (err)
5597 return err;
5598 }
5599
5600 /*
5601 err = ipw2100_set_fragmentation_threshold(
5602 priv, priv->frag_threshold, batch_mode);
5603 if (err)
5604 return err;
5605 */
5606
5607 IPW_DEBUG_INFO("exit\n");
5608
5609 return 0;
5610}
5611
5612
5613/*************************************************************************
5614 *
5615 * EXTERNALLY CALLED METHODS
5616 *
5617 *************************************************************************/
5618
5619/* This method is called by the network layer -- not to be confused with
5620 * ipw2100_set_mac_address() declared above called by this driver (and this
5621 * method as well) to talk to the firmware */
5622static int ipw2100_set_address(struct net_device *dev, void *p)
5623{
5624 struct ipw2100_priv *priv = ieee80211_priv(dev);
5625 struct sockaddr *addr = p;
5626 int err = 0;
5627
5628 if (!is_valid_ether_addr(addr->sa_data))
5629 return -EADDRNOTAVAIL;
5630
5631 down(&priv->action_sem);
5632
5633 priv->config |= CFG_CUSTOM_MAC;
5634 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
5635
5636 err = ipw2100_set_mac_address(priv, 0);
5637 if (err)
5638 goto done;
5639
5640 priv->reset_backoff = 0;
5641 up(&priv->action_sem);
5642 ipw2100_reset_adapter(priv);
5643 return 0;
5644
5645 done:
5646 up(&priv->action_sem);
5647 return err;
5648}
5649
5650static int ipw2100_open(struct net_device *dev)
5651{
5652 struct ipw2100_priv *priv = ieee80211_priv(dev);
5653 unsigned long flags;
5654 IPW_DEBUG_INFO("dev->open\n");
5655
5656 spin_lock_irqsave(&priv->low_lock, flags);
5657 if (priv->status & STATUS_ASSOCIATED)
5658 netif_start_queue(dev);
5659 spin_unlock_irqrestore(&priv->low_lock, flags);
5660
5661 return 0;
5662}
5663
5664static int ipw2100_close(struct net_device *dev)
5665{
5666 struct ipw2100_priv *priv = ieee80211_priv(dev);
5667 unsigned long flags;
5668 struct list_head *element;
5669 struct ipw2100_tx_packet *packet;
5670
5671 IPW_DEBUG_INFO("enter\n");
5672
5673 spin_lock_irqsave(&priv->low_lock, flags);
5674
5675 if (priv->status & STATUS_ASSOCIATED)
5676 netif_carrier_off(dev);
5677 netif_stop_queue(dev);
5678
5679 /* Flush the TX queue ... */
5680 while (!list_empty(&priv->tx_pend_list)) {
5681 element = priv->tx_pend_list.next;
5682 packet = list_entry(element, struct ipw2100_tx_packet, list);
5683
5684 list_del(element);
5685 DEC_STAT(&priv->tx_pend_stat);
5686
5687 ieee80211_txb_free(packet->info.d_struct.txb);
5688 packet->info.d_struct.txb = NULL;
5689
5690 list_add_tail(element, &priv->tx_free_list);
5691 INC_STAT(&priv->tx_free_stat);
5692 }
5693 spin_unlock_irqrestore(&priv->low_lock, flags);
5694
5695 IPW_DEBUG_INFO("exit\n");
5696
5697 return 0;
5698}
5699
5700
5701
5702/*
5703 * TODO: Fix this function... its just wrong
5704 */
5705static void ipw2100_tx_timeout(struct net_device *dev)
5706{
5707 struct ipw2100_priv *priv = ieee80211_priv(dev);
5708
5709 priv->ieee->stats.tx_errors++;
5710
5711#ifdef CONFIG_IPW2100_MONITOR
5712 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
5713 return;
5714#endif
5715
5716 IPW_DEBUG_INFO("%s: TX timed out. Scheduling firmware restart.\n",
5717 dev->name);
5718 schedule_reset(priv);
5719}
5720
5721
5722/*
5723 * TODO: reimplement it so that it reads statistics
5724 * from the adapter using ordinal tables
5725 * instead of/in addition to collecting them
5726 * in the driver
5727 */
5728static struct net_device_stats *ipw2100_stats(struct net_device *dev)
5729{
5730 struct ipw2100_priv *priv = ieee80211_priv(dev);
5731
5732 return &priv->ieee->stats;
5733}
5734
5735/* Support for wpa_supplicant. Will be replaced with WEXT once
5736 * they get WPA support. */
5737#ifdef CONFIG_IEEE80211_WPA
5738
5739/* following definitions must match definitions in driver_ipw2100.c */
5740
5741#define IPW2100_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30
5742
5743#define IPW2100_CMD_SET_WPA_PARAM 1
5744#define IPW2100_CMD_SET_WPA_IE 2
5745#define IPW2100_CMD_SET_ENCRYPTION 3
5746#define IPW2100_CMD_MLME 4
5747
5748#define IPW2100_PARAM_WPA_ENABLED 1
5749#define IPW2100_PARAM_TKIP_COUNTERMEASURES 2
5750#define IPW2100_PARAM_DROP_UNENCRYPTED 3
5751#define IPW2100_PARAM_PRIVACY_INVOKED 4
5752#define IPW2100_PARAM_AUTH_ALGS 5
5753#define IPW2100_PARAM_IEEE_802_1X 6
5754
5755#define IPW2100_MLME_STA_DEAUTH 1
5756#define IPW2100_MLME_STA_DISASSOC 2
5757
5758#define IPW2100_CRYPT_ERR_UNKNOWN_ALG 2
5759#define IPW2100_CRYPT_ERR_UNKNOWN_ADDR 3
5760#define IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED 4
5761#define IPW2100_CRYPT_ERR_KEY_SET_FAILED 5
5762#define IPW2100_CRYPT_ERR_TX_KEY_SET_FAILED 6
5763#define IPW2100_CRYPT_ERR_CARD_CONF_FAILED 7
5764
5765#define IPW2100_CRYPT_ALG_NAME_LEN 16
5766
5767struct ipw2100_param {
5768 u32 cmd;
5769 u8 sta_addr[ETH_ALEN];
5770 union {
5771 struct {
5772 u8 name;
5773 u32 value;
5774 } wpa_param;
5775 struct {
5776 u32 len;
5777 u8 *data;
5778 } wpa_ie;
5779 struct{
5780 int command;
5781 int reason_code;
5782 } mlme;
5783 struct {
5784 u8 alg[IPW2100_CRYPT_ALG_NAME_LEN];
5785 u8 set_tx;
5786 u32 err;
5787 u8 idx;
5788 u8 seq[8]; /* sequence counter (set: RX, get: TX) */
5789 u16 key_len;
5790 u8 key[0];
5791 } crypt;
5792
5793 } u;
5794};
5795
5796/* end of driver_ipw2100.c code */
5797
5798static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value){
5799
5800 struct ieee80211_device *ieee = priv->ieee;
5801 struct ieee80211_security sec = {
5802 .flags = SEC_LEVEL | SEC_ENABLED,
5803 };
5804 int ret = 0;
5805
5806 ieee->wpa_enabled = value;
5807
5808 if (value){
5809 sec.level = SEC_LEVEL_3;
5810 sec.enabled = 1;
5811 } else {
5812 sec.level = SEC_LEVEL_0;
5813 sec.enabled = 0;
5814 }
5815
5816 if (ieee->set_security)
5817 ieee->set_security(ieee->dev, &sec);
5818 else
5819 ret = -EOPNOTSUPP;
5820
5821 return ret;
5822}
5823
5824#define AUTH_ALG_OPEN_SYSTEM 0x1
5825#define AUTH_ALG_SHARED_KEY 0x2
5826
5827static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value){
5828
5829 struct ieee80211_device *ieee = priv->ieee;
5830 struct ieee80211_security sec = {
5831 .flags = SEC_AUTH_MODE,
5832 };
5833 int ret = 0;
5834
5835 if (value & AUTH_ALG_SHARED_KEY){
5836 sec.auth_mode = WLAN_AUTH_SHARED_KEY;
5837 ieee->open_wep = 0;
5838 } else {
5839 sec.auth_mode = WLAN_AUTH_OPEN;
5840 ieee->open_wep = 1;
5841 }
5842
5843 if (ieee->set_security)
5844 ieee->set_security(ieee->dev, &sec);
5845 else
5846 ret = -EOPNOTSUPP;
5847
5848 return ret;
5849}
5850
5851
5852static int ipw2100_wpa_set_param(struct net_device *dev, u8 name, u32 value){
5853
5854 struct ipw2100_priv *priv = ieee80211_priv(dev);
5855 int ret=0;
5856
5857 switch(name){
5858 case IPW2100_PARAM_WPA_ENABLED:
5859 ret = ipw2100_wpa_enable(priv, value);
5860 break;
5861
5862 case IPW2100_PARAM_TKIP_COUNTERMEASURES:
5863 priv->ieee->tkip_countermeasures=value;
5864 break;
5865
5866 case IPW2100_PARAM_DROP_UNENCRYPTED:
5867 priv->ieee->drop_unencrypted=value;
5868 break;
5869
5870 case IPW2100_PARAM_PRIVACY_INVOKED:
5871 priv->ieee->privacy_invoked=value;
5872 break;
5873
5874 case IPW2100_PARAM_AUTH_ALGS:
5875 ret = ipw2100_wpa_set_auth_algs(priv, value);
5876 break;
5877
5878 case IPW2100_PARAM_IEEE_802_1X:
5879 priv->ieee->ieee802_1x=value;
5880 break;
5881
5882 default:
5883 IPW_DEBUG_ERROR("%s: Unknown WPA param: %d\n",
5884 dev->name, name);
5885 ret = -EOPNOTSUPP;
5886 }
5887
5888 return ret;
5889}
5890
5891static int ipw2100_wpa_mlme(struct net_device *dev, int command, int reason){
5892
5893 struct ipw2100_priv *priv = ieee80211_priv(dev);
5894 int ret=0;
5895
5896 switch(command){
5897 case IPW2100_MLME_STA_DEAUTH:
5898 // silently ignore
5899 break;
5900
5901 case IPW2100_MLME_STA_DISASSOC:
5902 ipw2100_disassociate_bssid(priv);
5903 break;
5904
5905 default:
5906 IPW_DEBUG_ERROR("%s: Unknown MLME request: %d\n",
5907 dev->name, command);
5908 ret = -EOPNOTSUPP;
5909 }
5910
5911 return ret;
5912}
5913
5914
5915void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv,
5916 char *wpa_ie, int wpa_ie_len){
5917
5918 struct ipw2100_wpa_assoc_frame frame;
5919
5920 frame.fixed_ie_mask = 0;
5921
5922 /* copy WPA IE */
5923 memcpy(frame.var_ie, wpa_ie, wpa_ie_len);
5924 frame.var_ie_len = wpa_ie_len;
5925
5926 /* make sure WPA is enabled */
5927 ipw2100_wpa_enable(priv, 1);
5928 ipw2100_set_wpa_ie(priv, &frame, 0);
5929}
5930
5931
5932static int ipw2100_wpa_set_wpa_ie(struct net_device *dev,
5933 struct ipw2100_param *param, int plen){
5934
5935 struct ipw2100_priv *priv = ieee80211_priv(dev);
5936 struct ieee80211_device *ieee = priv->ieee;
5937 u8 *buf;
5938
5939 if (! ieee->wpa_enabled)
5940 return -EOPNOTSUPP;
5941
5942 if (param->u.wpa_ie.len > MAX_WPA_IE_LEN ||
5943 (param->u.wpa_ie.len &&
5944 param->u.wpa_ie.data==NULL))
5945 return -EINVAL;
5946
5947 if (param->u.wpa_ie.len){
5948 buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
5949 if (buf == NULL)
5950 return -ENOMEM;
5951
5952 memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
5953
5954 kfree(ieee->wpa_ie);
5955 ieee->wpa_ie = buf;
5956 ieee->wpa_ie_len = param->u.wpa_ie.len;
5957
5958 } else {
5959 kfree(ieee->wpa_ie);
5960 ieee->wpa_ie = NULL;
5961 ieee->wpa_ie_len = 0;
5962 }
5963
5964 ipw2100_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
5965
5966 return 0;
5967}
5968
5969/* implementation borrowed from hostap driver */
5970
5971static int ipw2100_wpa_set_encryption(struct net_device *dev,
5972 struct ipw2100_param *param, int param_len){
5973
5974 int ret = 0;
5975 struct ipw2100_priv *priv = ieee80211_priv(dev);
5976 struct ieee80211_device *ieee = priv->ieee;
5977 struct ieee80211_crypto_ops *ops;
5978 struct ieee80211_crypt_data **crypt;
5979
5980 struct ieee80211_security sec = {
5981 .flags = 0,
5982 };
5983
5984 param->u.crypt.err = 0;
5985 param->u.crypt.alg[IPW2100_CRYPT_ALG_NAME_LEN - 1] = '\0';
5986
5987 if (param_len !=
5988 (int) ((char *) param->u.crypt.key - (char *) param) +
5989 param->u.crypt.key_len){
5990 IPW_DEBUG_INFO("Len mismatch %d, %d\n", param_len, param->u.crypt.key_len);
5991 return -EINVAL;
5992 }
5993 if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
5994 param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
5995 param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
5996 if (param->u.crypt.idx >= WEP_KEYS)
5997 return -EINVAL;
5998 crypt = &ieee->crypt[param->u.crypt.idx];
5999 } else {
6000 return -EINVAL;
6001 }
6002
6003 if (strcmp(param->u.crypt.alg, "none") == 0) {
6004 if (crypt){
6005 sec.enabled = 0;
6006 sec.level = SEC_LEVEL_0;
6007 sec.flags |= SEC_ENABLED | SEC_LEVEL;
6008 ieee80211_crypt_delayed_deinit(ieee, crypt);
6009 }
6010 goto done;
6011 }
6012 sec.enabled = 1;
6013 sec.flags |= SEC_ENABLED;
6014
6015 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6016 if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
6017 request_module("ieee80211_crypt_wep");
6018 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6019 } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
6020 request_module("ieee80211_crypt_tkip");
6021 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6022 } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
6023 request_module("ieee80211_crypt_ccmp");
6024 ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
6025 }
6026 if (ops == NULL) {
6027 IPW_DEBUG_INFO("%s: unknown crypto alg '%s'\n",
6028 dev->name, param->u.crypt.alg);
6029 param->u.crypt.err = IPW2100_CRYPT_ERR_UNKNOWN_ALG;
6030 ret = -EINVAL;
6031 goto done;
6032 }
6033
6034 if (*crypt == NULL || (*crypt)->ops != ops) {
6035 struct ieee80211_crypt_data *new_crypt;
6036
6037 ieee80211_crypt_delayed_deinit(ieee, crypt);
6038
6039 new_crypt = (struct ieee80211_crypt_data *)
6040 kmalloc(sizeof(struct ieee80211_crypt_data), GFP_KERNEL);
6041 if (new_crypt == NULL) {
6042 ret = -ENOMEM;
6043 goto done;
6044 }
6045 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
6046 new_crypt->ops = ops;
6047 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
6048 new_crypt->priv = new_crypt->ops->init(param->u.crypt.idx);
6049
6050 if (new_crypt->priv == NULL) {
6051 kfree(new_crypt);
6052 param->u.crypt.err =
6053 IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED;
6054 ret = -EINVAL;
6055 goto done;
6056 }
6057
6058 *crypt = new_crypt;
6059 }
6060
6061 if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
6062 (*crypt)->ops->set_key(param->u.crypt.key,
6063 param->u.crypt.key_len, param->u.crypt.seq,
6064 (*crypt)->priv) < 0) {
6065 IPW_DEBUG_INFO("%s: key setting failed\n",
6066 dev->name);
6067 param->u.crypt.err = IPW2100_CRYPT_ERR_KEY_SET_FAILED;
6068 ret = -EINVAL;
6069 goto done;
6070 }
6071
6072 if (param->u.crypt.set_tx){
6073 ieee->tx_keyidx = param->u.crypt.idx;
6074 sec.active_key = param->u.crypt.idx;
6075 sec.flags |= SEC_ACTIVE_KEY;
6076 }
6077
6078 if (ops->name != NULL){
6079
6080 if (strcmp(ops->name, "WEP") == 0) {
6081 memcpy(sec.keys[param->u.crypt.idx], param->u.crypt.key, param->u.crypt.key_len);
6082 sec.key_sizes[param->u.crypt.idx] = param->u.crypt.key_len;
6083 sec.flags |= (1 << param->u.crypt.idx);
6084 sec.flags |= SEC_LEVEL;
6085 sec.level = SEC_LEVEL_1;
6086 } else if (strcmp(ops->name, "TKIP") == 0) {
6087 sec.flags |= SEC_LEVEL;
6088 sec.level = SEC_LEVEL_2;
6089 } else if (strcmp(ops->name, "CCMP") == 0) {
6090 sec.flags |= SEC_LEVEL;
6091 sec.level = SEC_LEVEL_3;
6092 }
6093 }
6094 done:
6095 if (ieee->set_security)
6096 ieee->set_security(ieee->dev, &sec);
6097
6098 /* Do not reset port if card is in Managed mode since resetting will
6099 * generate new IEEE 802.11 authentication which may end up in looping
6100 * with IEEE 802.1X. If your hardware requires a reset after WEP
6101 * configuration (for example... Prism2), implement the reset_port in
6102 * the callbacks structures used to initialize the 802.11 stack. */
6103 if (ieee->reset_on_keychange &&
6104 ieee->iw_mode != IW_MODE_INFRA &&
6105 ieee->reset_port &&
6106 ieee->reset_port(dev)) {
6107 IPW_DEBUG_INFO("%s: reset_port failed\n", dev->name);
6108 param->u.crypt.err = IPW2100_CRYPT_ERR_CARD_CONF_FAILED;
6109 return -EINVAL;
6110 }
6111
6112 return ret;
6113}
6114
6115
6116static int ipw2100_wpa_supplicant(struct net_device *dev, struct iw_point *p){
6117
6118 struct ipw2100_param *param;
6119 int ret=0;
6120
6121 IPW_DEBUG_IOCTL("wpa_supplicant: len=%d\n", p->length);
6122
6123 if (p->length < sizeof(struct ipw2100_param) || !p->pointer)
6124 return -EINVAL;
6125
6126 param = (struct ipw2100_param *)kmalloc(p->length, GFP_KERNEL);
6127 if (param == NULL)
6128 return -ENOMEM;
6129
6130 if (copy_from_user(param, p->pointer, p->length)){
6131 kfree(param);
6132 return -EFAULT;
6133 }
6134
6135 switch (param->cmd){
6136
6137 case IPW2100_CMD_SET_WPA_PARAM:
6138 ret = ipw2100_wpa_set_param(dev, param->u.wpa_param.name,
6139 param->u.wpa_param.value);
6140 break;
6141
6142 case IPW2100_CMD_SET_WPA_IE:
6143 ret = ipw2100_wpa_set_wpa_ie(dev, param, p->length);
6144 break;
6145
6146 case IPW2100_CMD_SET_ENCRYPTION:
6147 ret = ipw2100_wpa_set_encryption(dev, param, p->length);
6148 break;
6149
6150 case IPW2100_CMD_MLME:
6151 ret = ipw2100_wpa_mlme(dev, param->u.mlme.command,
6152 param->u.mlme.reason_code);
6153 break;
6154
6155 default:
6156 IPW_DEBUG_ERROR("%s: Unknown WPA supplicant request: %d\n",
6157 dev->name, param->cmd);
6158 ret = -EOPNOTSUPP;
6159
6160 }
6161
6162 if (ret == 0 && copy_to_user(p->pointer, param, p->length))
6163 ret = -EFAULT;
6164
6165 kfree(param);
6166 return ret;
6167}
6168#endif /* CONFIG_IEEE80211_WPA */
6169
6170static int ipw2100_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6171{
6172#ifdef CONFIG_IEEE80211_WPA
6173 struct iwreq *wrq = (struct iwreq *) rq;
6174 int ret=-1;
6175 switch (cmd){
6176 case IPW2100_IOCTL_WPA_SUPPLICANT:
6177 ret = ipw2100_wpa_supplicant(dev, &wrq->u.data);
6178 return ret;
6179
6180 default:
6181 return -EOPNOTSUPP;
6182 }
6183
6184#endif /* CONFIG_IEEE80211_WPA */
6185
6186 return -EOPNOTSUPP;
6187}
6188
6189
6190static void ipw_ethtool_get_drvinfo(struct net_device *dev,
6191 struct ethtool_drvinfo *info)
6192{
6193 struct ipw2100_priv *priv = ieee80211_priv(dev);
6194 char fw_ver[64], ucode_ver[64];
6195
6196 strcpy(info->driver, DRV_NAME);
6197 strcpy(info->version, DRV_VERSION);
6198
6199 ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver));
6200 ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver));
6201
6202 snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s",
6203 fw_ver, priv->eeprom_version, ucode_ver);
6204
6205 strcpy(info->bus_info, pci_name(priv->pci_dev));
6206}
6207
6208static u32 ipw2100_ethtool_get_link(struct net_device *dev)
6209{
6210 struct ipw2100_priv *priv = ieee80211_priv(dev);
6211 return (priv->status & STATUS_ASSOCIATED) ? 1 : 0;
6212}
6213
6214
6215static struct ethtool_ops ipw2100_ethtool_ops = {
6216 .get_link = ipw2100_ethtool_get_link,
6217 .get_drvinfo = ipw_ethtool_get_drvinfo,
6218};
6219
6220static void ipw2100_hang_check(void *adapter)
6221{
6222 struct ipw2100_priv *priv = adapter;
6223 unsigned long flags;
6224 u32 rtc = 0xa5a5a5a5;
6225 u32 len = sizeof(rtc);
6226 int restart = 0;
6227
6228 spin_lock_irqsave(&priv->low_lock, flags);
6229
6230 if (priv->fatal_error != 0) {
6231 /* If fatal_error is set then we need to restart */
6232 IPW_DEBUG_INFO("%s: Hardware fatal error detected.\n",
6233 priv->net_dev->name);
6234
6235 restart = 1;
6236 } else if (ipw2100_get_ordinal(priv, IPW_ORD_RTC_TIME, &rtc, &len) ||
6237 (rtc == priv->last_rtc)) {
6238 /* Check if firmware is hung */
6239 IPW_DEBUG_INFO("%s: Firmware RTC stalled.\n",
6240 priv->net_dev->name);
6241
6242 restart = 1;
6243 }
6244
6245 if (restart) {
6246 /* Kill timer */
6247 priv->stop_hang_check = 1;
6248 priv->hangs++;
6249
6250 /* Restart the NIC */
6251 schedule_reset(priv);
6252 }
6253
6254 priv->last_rtc = rtc;
6255
6256 if (!priv->stop_hang_check)
6257 queue_delayed_work(priv->workqueue, &priv->hang_check, HZ / 2);
6258
6259 spin_unlock_irqrestore(&priv->low_lock, flags);
6260}
6261
6262
6263static void ipw2100_rf_kill(void *adapter)
6264{
6265 struct ipw2100_priv *priv = adapter;
6266 unsigned long flags;
6267
6268 spin_lock_irqsave(&priv->low_lock, flags);
6269
6270 if (rf_kill_active(priv)) {
6271 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
6272 if (!priv->stop_rf_kill)
6273 queue_delayed_work(priv->workqueue, &priv->rf_kill, HZ);
6274 goto exit_unlock;
6275 }
6276
6277 /* RF Kill is now disabled, so bring the device back up */
6278
6279 if (!(priv->status & STATUS_RF_KILL_MASK)) {
6280 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
6281 "device\n");
6282 schedule_reset(priv);
6283 } else
6284 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
6285 "enabled\n");
6286
6287 exit_unlock:
6288 spin_unlock_irqrestore(&priv->low_lock, flags);
6289}
6290
6291static void ipw2100_irq_tasklet(struct ipw2100_priv *priv);
6292
6293/* Look into using netdev destructor to shutdown ieee80211? */
6294
6295static struct net_device *ipw2100_alloc_device(
6296 struct pci_dev *pci_dev,
6297 char *base_addr,
6298 unsigned long mem_start,
6299 unsigned long mem_len)
6300{
6301 struct ipw2100_priv *priv;
6302 struct net_device *dev;
6303
6304 dev = alloc_ieee80211(sizeof(struct ipw2100_priv));
6305 if (!dev)
6306 return NULL;
6307 priv = ieee80211_priv(dev);
6308 priv->ieee = netdev_priv(dev);
6309 priv->pci_dev = pci_dev;
6310 priv->net_dev = dev;
6311
6312 priv->ieee->hard_start_xmit = ipw2100_tx;
6313 priv->ieee->set_security = shim__set_security;
6314
6315 dev->open = ipw2100_open;
6316 dev->stop = ipw2100_close;
6317 dev->init = ipw2100_net_init;
6318 dev->do_ioctl = ipw2100_ioctl;
6319 dev->get_stats = ipw2100_stats;
6320 dev->ethtool_ops = &ipw2100_ethtool_ops;
6321 dev->tx_timeout = ipw2100_tx_timeout;
6322 dev->wireless_handlers = &ipw2100_wx_handler_def;
6323 dev->get_wireless_stats = ipw2100_wx_wireless_stats;
6324 dev->set_mac_address = ipw2100_set_address;
6325 dev->watchdog_timeo = 3*HZ;
6326 dev->irq = 0;
6327
6328 dev->base_addr = (unsigned long)base_addr;
6329 dev->mem_start = mem_start;
6330 dev->mem_end = dev->mem_start + mem_len - 1;
6331
6332 /* NOTE: We don't use the wireless_handlers hook
6333 * in dev as the system will start throwing WX requests
6334 * to us before we're actually initialized and it just
6335 * ends up causing problems. So, we just handle
6336 * the WX extensions through the ipw2100_ioctl interface */
6337
6338
6339 /* memset() puts everything to 0, so we only have explicitely set
6340 * those values that need to be something else */
6341
6342 /* If power management is turned on, default to AUTO mode */
6343 priv->power_mode = IPW_POWER_AUTO;
6344
6345
6346
6347#ifdef CONFIG_IEEE80211_WPA
6348 priv->ieee->wpa_enabled = 0;
6349 priv->ieee->tkip_countermeasures = 0;
6350 priv->ieee->drop_unencrypted = 0;
6351 priv->ieee->privacy_invoked = 0;
6352 priv->ieee->ieee802_1x = 1;
6353#endif /* CONFIG_IEEE80211_WPA */
6354
6355 /* Set module parameters */
6356 switch (mode) {
6357 case 1:
6358 priv->ieee->iw_mode = IW_MODE_ADHOC;
6359 break;
6360#ifdef CONFIG_IPW2100_MONITOR
6361 case 2:
6362 priv->ieee->iw_mode = IW_MODE_MONITOR;
6363 break;
6364#endif
6365 default:
6366 case 0:
6367 priv->ieee->iw_mode = IW_MODE_INFRA;
6368 break;
6369 }
6370
6371 if (disable == 1)
6372 priv->status |= STATUS_RF_KILL_SW;
6373
6374 if (channel != 0 &&
6375 ((channel >= REG_MIN_CHANNEL) &&
6376 (channel <= REG_MAX_CHANNEL))) {
6377 priv->config |= CFG_STATIC_CHANNEL;
6378 priv->channel = channel;
6379 }
6380
6381 if (associate)
6382 priv->config |= CFG_ASSOCIATE;
6383
6384 priv->beacon_interval = DEFAULT_BEACON_INTERVAL;
6385 priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT;
6386 priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT;
6387 priv->rts_threshold = DEFAULT_RTS_THRESHOLD | RTS_DISABLED;
6388 priv->frag_threshold = DEFAULT_FTS | FRAG_DISABLED;
6389 priv->tx_power = IPW_TX_POWER_DEFAULT;
6390 priv->tx_rates = DEFAULT_TX_RATES;
6391
6392 strcpy(priv->nick, "ipw2100");
6393
6394 spin_lock_init(&priv->low_lock);
6395 sema_init(&priv->action_sem, 1);
6396 sema_init(&priv->adapter_sem, 1);
6397
6398 init_waitqueue_head(&priv->wait_command_queue);
6399
6400 netif_carrier_off(dev);
6401
6402 INIT_LIST_HEAD(&priv->msg_free_list);
6403 INIT_LIST_HEAD(&priv->msg_pend_list);
6404 INIT_STAT(&priv->msg_free_stat);
6405 INIT_STAT(&priv->msg_pend_stat);
6406
6407 INIT_LIST_HEAD(&priv->tx_free_list);
6408 INIT_LIST_HEAD(&priv->tx_pend_list);
6409 INIT_STAT(&priv->tx_free_stat);
6410 INIT_STAT(&priv->tx_pend_stat);
6411
6412 INIT_LIST_HEAD(&priv->fw_pend_list);
6413 INIT_STAT(&priv->fw_pend_stat);
6414
6415
6416#ifdef CONFIG_SOFTWARE_SUSPEND2
6417 priv->workqueue = create_workqueue(DRV_NAME, 0);
6418#else
6419 priv->workqueue = create_workqueue(DRV_NAME);
6420#endif
6421 INIT_WORK(&priv->reset_work,
6422 (void (*)(void *))ipw2100_reset_adapter, priv);
6423 INIT_WORK(&priv->security_work,
6424 (void (*)(void *))ipw2100_security_work, priv);
6425 INIT_WORK(&priv->wx_event_work,
6426 (void (*)(void *))ipw2100_wx_event_work, priv);
6427 INIT_WORK(&priv->hang_check, ipw2100_hang_check, priv);
6428 INIT_WORK(&priv->rf_kill, ipw2100_rf_kill, priv);
6429
6430 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
6431 ipw2100_irq_tasklet, (unsigned long)priv);
6432
6433 /* NOTE: We do not start the deferred work for status checks yet */
6434 priv->stop_rf_kill = 1;
6435 priv->stop_hang_check = 1;
6436
6437 return dev;
6438}
6439
6440static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6441 const struct pci_device_id *ent)
6442{
6443 unsigned long mem_start, mem_len, mem_flags;
6444 char *base_addr = NULL;
6445 struct net_device *dev = NULL;
6446 struct ipw2100_priv *priv = NULL;
6447 int err = 0;
6448 int registered = 0;
6449 u32 val;
6450
6451 IPW_DEBUG_INFO("enter\n");
6452
6453 mem_start = pci_resource_start(pci_dev, 0);
6454 mem_len = pci_resource_len(pci_dev, 0);
6455 mem_flags = pci_resource_flags(pci_dev, 0);
6456
6457 if ((mem_flags & IORESOURCE_MEM) != IORESOURCE_MEM) {
6458 IPW_DEBUG_INFO("weird - resource type is not memory\n");
6459 err = -ENODEV;
6460 goto fail;
6461 }
6462
6463 base_addr = ioremap_nocache(mem_start, mem_len);
6464 if (!base_addr) {
6465 printk(KERN_WARNING DRV_NAME
6466 "Error calling ioremap_nocache.\n");
6467 err = -EIO;
6468 goto fail;
6469 }
6470
6471 /* allocate and initialize our net_device */
6472 dev = ipw2100_alloc_device(pci_dev, base_addr, mem_start, mem_len);
6473 if (!dev) {
6474 printk(KERN_WARNING DRV_NAME
6475 "Error calling ipw2100_alloc_device.\n");
6476 err = -ENOMEM;
6477 goto fail;
6478 }
6479
6480 /* set up PCI mappings for device */
6481 err = pci_enable_device(pci_dev);
6482 if (err) {
6483 printk(KERN_WARNING DRV_NAME
6484 "Error calling pci_enable_device.\n");
6485 return err;
6486 }
6487
6488 priv = ieee80211_priv(dev);
6489
6490 pci_set_master(pci_dev);
6491 pci_set_drvdata(pci_dev, priv);
6492
6493 err = pci_set_dma_mask(pci_dev, DMA_32BIT_MASK);
6494 if (err) {
6495 printk(KERN_WARNING DRV_NAME
6496 "Error calling pci_set_dma_mask.\n");
6497 pci_disable_device(pci_dev);
6498 return err;
6499 }
6500
6501 err = pci_request_regions(pci_dev, DRV_NAME);
6502 if (err) {
6503 printk(KERN_WARNING DRV_NAME
6504 "Error calling pci_request_regions.\n");
6505 pci_disable_device(pci_dev);
6506 return err;
6507 }
6508
6509 /* We disable the RETRY_TIMEOUT register (0x41) to keep
6510 * PCI Tx retries from interfering with C3 CPU state */
6511 pci_read_config_dword(pci_dev, 0x40, &val);
6512 if ((val & 0x0000ff00) != 0)
6513 pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff);
6514
6515 pci_set_power_state(pci_dev, PCI_D0);
6516
6517 if (!ipw2100_hw_is_adapter_in_system(dev)) {
6518 printk(KERN_WARNING DRV_NAME
6519 "Device not found via register read.\n");
6520 err = -ENODEV;
6521 goto fail;
6522 }
6523
6524 SET_NETDEV_DEV(dev, &pci_dev->dev);
6525
6526 /* Force interrupts to be shut off on the device */
6527 priv->status |= STATUS_INT_ENABLED;
6528 ipw2100_disable_interrupts(priv);
6529
6530 /* Allocate and initialize the Tx/Rx queues and lists */
6531 if (ipw2100_queues_allocate(priv)) {
6532 printk(KERN_WARNING DRV_NAME
6533 "Error calilng ipw2100_queues_allocate.\n");
6534 err = -ENOMEM;
6535 goto fail;
6536 }
6537 ipw2100_queues_initialize(priv);
6538
6539 err = request_irq(pci_dev->irq,
6540 ipw2100_interrupt, SA_SHIRQ,
6541 dev->name, priv);
6542 if (err) {
6543 printk(KERN_WARNING DRV_NAME
6544 "Error calling request_irq: %d.\n",
6545 pci_dev->irq);
6546 goto fail;
6547 }
6548 dev->irq = pci_dev->irq;
6549
6550 IPW_DEBUG_INFO("Attempting to register device...\n");
6551
6552 SET_MODULE_OWNER(dev);
6553
6554 printk(KERN_INFO DRV_NAME
6555 ": Detected Intel PRO/Wireless 2100 Network Connection\n");
6556
6557 /* Bring up the interface. Pre 0.46, after we registered the
6558 * network device we would call ipw2100_up. This introduced a race
6559 * condition with newer hotplug configurations (network was coming
6560 * up and making calls before the device was initialized).
6561 *
6562 * If we called ipw2100_up before we registered the device, then the
6563 * device name wasn't registered. So, we instead use the net_dev->init
6564 * member to call a function that then just turns and calls ipw2100_up.
6565 * net_dev->init is called after name allocation but before the
6566 * notifier chain is called */
6567 down(&priv->action_sem);
6568 err = register_netdev(dev);
6569 if (err) {
6570 printk(KERN_WARNING DRV_NAME
6571 "Error calling register_netdev.\n");
6572 goto fail_unlock;
6573 }
6574 registered = 1;
6575
6576 IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev));
6577
6578 /* perform this after register_netdev so that dev->name is set */
6579 sysfs_create_group(&pci_dev->dev.kobj, &ipw2100_attribute_group);
6580 netif_carrier_off(dev);
6581
6582 /* If the RF Kill switch is disabled, go ahead and complete the
6583 * startup sequence */
6584 if (!(priv->status & STATUS_RF_KILL_MASK)) {
6585 /* Enable the adapter - sends HOST_COMPLETE */
6586 if (ipw2100_enable_adapter(priv)) {
6587 printk(KERN_WARNING DRV_NAME
6588 ": %s: failed in call to enable adapter.\n",
6589 priv->net_dev->name);
6590 ipw2100_hw_stop_adapter(priv);
6591 err = -EIO;
6592 goto fail_unlock;
6593 }
6594
6595 /* Start a scan . . . */
6596 ipw2100_set_scan_options(priv);
6597 ipw2100_start_scan(priv);
6598 }
6599
6600 IPW_DEBUG_INFO("exit\n");
6601
6602 priv->status |= STATUS_INITIALIZED;
6603
6604 up(&priv->action_sem);
6605
6606 return 0;
6607
6608 fail_unlock:
6609 up(&priv->action_sem);
6610
6611 fail:
6612 if (dev) {
6613 if (registered)
6614 unregister_netdev(dev);
6615
6616 ipw2100_hw_stop_adapter(priv);
6617
6618 ipw2100_disable_interrupts(priv);
6619
6620 if (dev->irq)
6621 free_irq(dev->irq, priv);
6622
6623 ipw2100_kill_workqueue(priv);
6624
6625 /* These are safe to call even if they weren't allocated */
6626 ipw2100_queues_free(priv);
6627 sysfs_remove_group(&pci_dev->dev.kobj, &ipw2100_attribute_group);
6628
6629 free_ieee80211(dev);
6630 pci_set_drvdata(pci_dev, NULL);
6631 }
6632
6633 if (base_addr)
6634 iounmap((char*)base_addr);
6635
6636 pci_release_regions(pci_dev);
6637 pci_disable_device(pci_dev);
6638
6639 return err;
6640}
6641
6642static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
6643{
6644 struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
6645 struct net_device *dev;
6646
6647 if (priv) {
6648 down(&priv->action_sem);
6649
6650 priv->status &= ~STATUS_INITIALIZED;
6651
6652 dev = priv->net_dev;
6653 sysfs_remove_group(&pci_dev->dev.kobj, &ipw2100_attribute_group);
6654
6655#ifdef CONFIG_PM
6656 if (ipw2100_firmware.version)
6657 ipw2100_release_firmware(priv, &ipw2100_firmware);
6658#endif
6659 /* Take down the hardware */
6660 ipw2100_down(priv);
6661
6662 /* Release the semaphore so that the network subsystem can
6663 * complete any needed calls into the driver... */
6664 up(&priv->action_sem);
6665
6666 /* Unregister the device first - this results in close()
6667 * being called if the device is open. If we free storage
6668 * first, then close() will crash. */
6669 unregister_netdev(dev);
6670
6671 /* ipw2100_down will ensure that there is no more pending work
6672 * in the workqueue's, so we can safely remove them now. */
6673 ipw2100_kill_workqueue(priv);
6674
6675 ipw2100_queues_free(priv);
6676
6677 /* Free potential debugging firmware snapshot */
6678 ipw2100_snapshot_free(priv);
6679
6680 if (dev->irq)
6681 free_irq(dev->irq, priv);
6682
6683 if (dev->base_addr)
6684 iounmap((unsigned char *)dev->base_addr);
6685
6686 free_ieee80211(dev);
6687 }
6688
6689 pci_release_regions(pci_dev);
6690 pci_disable_device(pci_dev);
6691
6692 IPW_DEBUG_INFO("exit\n");
6693}
6694
6695
6696#ifdef CONFIG_PM
6697#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
6698static int ipw2100_suspend(struct pci_dev *pci_dev, u32 state)
6699#else
6700static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state)
6701#endif
6702{
6703 struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
6704 struct net_device *dev = priv->net_dev;
6705
6706 IPW_DEBUG_INFO("%s: Going into suspend...\n",
6707 dev->name);
6708
6709 down(&priv->action_sem);
6710 if (priv->status & STATUS_INITIALIZED) {
6711 /* Take down the device; powers it off, etc. */
6712 ipw2100_down(priv);
6713 }
6714
6715 /* Remove the PRESENT state of the device */
6716 netif_device_detach(dev);
6717
6718 pci_save_state(pci_dev);
6719 pci_disable_device (pci_dev);
6720 pci_set_power_state(pci_dev, PCI_D3hot);
6721
6722 up(&priv->action_sem);
6723
6724 return 0;
6725}
6726
6727static int ipw2100_resume(struct pci_dev *pci_dev)
6728{
6729 struct ipw2100_priv *priv = pci_get_drvdata(pci_dev);
6730 struct net_device *dev = priv->net_dev;
6731 u32 val;
6732
6733 if (IPW2100_PM_DISABLED)
6734 return 0;
6735
6736 down(&priv->action_sem);
6737
6738 IPW_DEBUG_INFO("%s: Coming out of suspend...\n",
6739 dev->name);
6740
6741 pci_set_power_state(pci_dev, PCI_D0);
6742 pci_enable_device(pci_dev);
6743 pci_restore_state(pci_dev);
6744
6745 /*
6746 * Suspend/Resume resets the PCI configuration space, so we have to
6747 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
6748 * from interfering with C3 CPU state. pci_restore_state won't help
6749 * here since it only restores the first 64 bytes pci config header.
6750 */
6751 pci_read_config_dword(pci_dev, 0x40, &val);
6752 if ((val & 0x0000ff00) != 0)
6753 pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff);
6754
6755 /* Set the device back into the PRESENT state; this will also wake
6756 * the queue of needed */
6757 netif_device_attach(dev);
6758
6759 /* Bring the device back up */
6760 if (!(priv->status & STATUS_RF_KILL_SW))
6761 ipw2100_up(priv, 0);
6762
6763 up(&priv->action_sem);
6764
6765 return 0;
6766}
6767#endif
6768
6769
6770#define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x }
6771
6772static struct pci_device_id ipw2100_pci_id_table[] __devinitdata = {
6773 IPW2100_DEV_ID(0x2520), /* IN 2100A mPCI 3A */
6774 IPW2100_DEV_ID(0x2521), /* IN 2100A mPCI 3B */
6775 IPW2100_DEV_ID(0x2524), /* IN 2100A mPCI 3B */
6776 IPW2100_DEV_ID(0x2525), /* IN 2100A mPCI 3B */
6777 IPW2100_DEV_ID(0x2526), /* IN 2100A mPCI Gen A3 */
6778 IPW2100_DEV_ID(0x2522), /* IN 2100 mPCI 3B */
6779 IPW2100_DEV_ID(0x2523), /* IN 2100 mPCI 3A */
6780 IPW2100_DEV_ID(0x2527), /* IN 2100 mPCI 3B */
6781 IPW2100_DEV_ID(0x2528), /* IN 2100 mPCI 3B */
6782 IPW2100_DEV_ID(0x2529), /* IN 2100 mPCI 3B */
6783 IPW2100_DEV_ID(0x252B), /* IN 2100 mPCI 3A */
6784 IPW2100_DEV_ID(0x252C), /* IN 2100 mPCI 3A */
6785 IPW2100_DEV_ID(0x252D), /* IN 2100 mPCI 3A */
6786
6787 IPW2100_DEV_ID(0x2550), /* IB 2100A mPCI 3B */
6788 IPW2100_DEV_ID(0x2551), /* IB 2100 mPCI 3B */
6789 IPW2100_DEV_ID(0x2553), /* IB 2100 mPCI 3B */
6790 IPW2100_DEV_ID(0x2554), /* IB 2100 mPCI 3B */
6791 IPW2100_DEV_ID(0x2555), /* IB 2100 mPCI 3B */
6792
6793 IPW2100_DEV_ID(0x2560), /* DE 2100A mPCI 3A */
6794 IPW2100_DEV_ID(0x2562), /* DE 2100A mPCI 3A */
6795 IPW2100_DEV_ID(0x2563), /* DE 2100A mPCI 3A */
6796 IPW2100_DEV_ID(0x2561), /* DE 2100 mPCI 3A */
6797 IPW2100_DEV_ID(0x2565), /* DE 2100 mPCI 3A */
6798 IPW2100_DEV_ID(0x2566), /* DE 2100 mPCI 3A */
6799 IPW2100_DEV_ID(0x2567), /* DE 2100 mPCI 3A */
6800
6801 IPW2100_DEV_ID(0x2570), /* GA 2100 mPCI 3B */
6802
6803 IPW2100_DEV_ID(0x2580), /* TO 2100A mPCI 3B */
6804 IPW2100_DEV_ID(0x2582), /* TO 2100A mPCI 3B */
6805 IPW2100_DEV_ID(0x2583), /* TO 2100A mPCI 3B */
6806 IPW2100_DEV_ID(0x2581), /* TO 2100 mPCI 3B */
6807 IPW2100_DEV_ID(0x2585), /* TO 2100 mPCI 3B */
6808 IPW2100_DEV_ID(0x2586), /* TO 2100 mPCI 3B */
6809 IPW2100_DEV_ID(0x2587), /* TO 2100 mPCI 3B */
6810
6811 IPW2100_DEV_ID(0x2590), /* SO 2100A mPCI 3B */
6812 IPW2100_DEV_ID(0x2592), /* SO 2100A mPCI 3B */
6813 IPW2100_DEV_ID(0x2591), /* SO 2100 mPCI 3B */
6814 IPW2100_DEV_ID(0x2593), /* SO 2100 mPCI 3B */
6815 IPW2100_DEV_ID(0x2596), /* SO 2100 mPCI 3B */
6816 IPW2100_DEV_ID(0x2598), /* SO 2100 mPCI 3B */
6817
6818 IPW2100_DEV_ID(0x25A0), /* HP 2100 mPCI 3B */
6819 {0,},
6820};
6821
6822MODULE_DEVICE_TABLE(pci, ipw2100_pci_id_table);
6823
6824static struct pci_driver ipw2100_pci_driver = {
6825 .name = DRV_NAME,
6826 .id_table = ipw2100_pci_id_table,
6827 .probe = ipw2100_pci_init_one,
6828 .remove = __devexit_p(ipw2100_pci_remove_one),
6829#ifdef CONFIG_PM
6830 .suspend = ipw2100_suspend,
6831 .resume = ipw2100_resume,
6832#endif
6833};
6834
6835
6836/**
6837 * Initialize the ipw2100 driver/module
6838 *
6839 * @returns 0 if ok, < 0 errno node con error.
6840 *
6841 * Note: we cannot init the /proc stuff until the PCI driver is there,
6842 * or we risk an unlikely race condition on someone accessing
6843 * uninitialized data in the PCI dev struct through /proc.
6844 */
6845static int __init ipw2100_init(void)
6846{
6847 int ret;
6848
6849 printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
6850 printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT);
6851
6852#ifdef CONFIG_IEEE80211_NOWEP
6853 IPW_DEBUG_INFO(DRV_NAME ": Compiled with WEP disabled.\n");
6854#endif
6855
6856 ret = pci_module_init(&ipw2100_pci_driver);
6857
6858#ifdef CONFIG_IPW_DEBUG
6859 ipw2100_debug_level = debug;
6860 driver_create_file(&ipw2100_pci_driver.driver,
6861 &driver_attr_debug_level);
6862#endif
6863
6864 return ret;
6865}
6866
6867
6868/**
6869 * Cleanup ipw2100 driver registration
6870 */
6871static void __exit ipw2100_exit(void)
6872{
6873 /* FIXME: IPG: check that we have no instances of the devices open */
6874#ifdef CONFIG_IPW_DEBUG
6875 driver_remove_file(&ipw2100_pci_driver.driver,
6876 &driver_attr_debug_level);
6877#endif
6878 pci_unregister_driver(&ipw2100_pci_driver);
6879}
6880
6881module_init(ipw2100_init);
6882module_exit(ipw2100_exit);
6883
6884#define WEXT_USECHANNELS 1
6885
6886const long ipw2100_frequencies[] = {
6887 2412, 2417, 2422, 2427,
6888 2432, 2437, 2442, 2447,
6889 2452, 2457, 2462, 2467,
6890 2472, 2484
6891};
6892
6893#define FREQ_COUNT (sizeof(ipw2100_frequencies) / \
6894 sizeof(ipw2100_frequencies[0]))
6895
6896const long ipw2100_rates_11b[] = {
6897 1000000,
6898 2000000,
6899 5500000,
6900 11000000
6901};
6902
6903#define RATE_COUNT (sizeof(ipw2100_rates_11b) / sizeof(ipw2100_rates_11b[0]))
6904
6905static int ipw2100_wx_get_name(struct net_device *dev,
6906 struct iw_request_info *info,
6907 union iwreq_data *wrqu, char *extra)
6908{
6909 /*
6910 * This can be called at any time. No action lock required
6911 */
6912
6913 struct ipw2100_priv *priv = ieee80211_priv(dev);
6914 if (!(priv->status & STATUS_ASSOCIATED))
6915 strcpy(wrqu->name, "unassociated");
6916 else
6917 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11b");
6918
6919 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
6920 return 0;
6921}
6922
6923
6924static int ipw2100_wx_set_freq(struct net_device *dev,
6925 struct iw_request_info *info,
6926 union iwreq_data *wrqu, char *extra)
6927{
6928 struct ipw2100_priv *priv = ieee80211_priv(dev);
6929 struct iw_freq *fwrq = &wrqu->freq;
6930 int err = 0;
6931
6932 if (priv->ieee->iw_mode == IW_MODE_INFRA)
6933 return -EOPNOTSUPP;
6934
6935 down(&priv->action_sem);
6936 if (!(priv->status & STATUS_INITIALIZED)) {
6937 err = -EIO;
6938 goto done;
6939 }
6940
6941 /* if setting by freq convert to channel */
6942 if (fwrq->e == 1) {
6943 if ((fwrq->m >= (int) 2.412e8 &&
6944 fwrq->m <= (int) 2.487e8)) {
6945 int f = fwrq->m / 100000;
6946 int c = 0;
6947
6948 while ((c < REG_MAX_CHANNEL) &&
6949 (f != ipw2100_frequencies[c]))
6950 c++;
6951
6952 /* hack to fall through */
6953 fwrq->e = 0;
6954 fwrq->m = c + 1;
6955 }
6956 }
6957
6958 if (fwrq->e > 0 || fwrq->m > 1000)
6959 return -EOPNOTSUPP;
6960 else { /* Set the channel */
6961 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
6962 err = ipw2100_set_channel(priv, fwrq->m, 0);
6963 }
6964
6965 done:
6966 up(&priv->action_sem);
6967 return err;
6968}
6969
6970
6971static int ipw2100_wx_get_freq(struct net_device *dev,
6972 struct iw_request_info *info,
6973 union iwreq_data *wrqu, char *extra)
6974{
6975 /*
6976 * This can be called at any time. No action lock required
6977 */
6978
6979 struct ipw2100_priv *priv = ieee80211_priv(dev);
6980
6981 wrqu->freq.e = 0;
6982
6983 /* If we are associated, trying to associate, or have a statically
6984 * configured CHANNEL then return that; otherwise return ANY */
6985 if (priv->config & CFG_STATIC_CHANNEL ||
6986 priv->status & STATUS_ASSOCIATED)
6987 wrqu->freq.m = priv->channel;
6988 else
6989 wrqu->freq.m = 0;
6990
6991 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
6992 return 0;
6993
6994}
6995
6996static int ipw2100_wx_set_mode(struct net_device *dev,
6997 struct iw_request_info *info,
6998 union iwreq_data *wrqu, char *extra)
6999{
7000 struct ipw2100_priv *priv = ieee80211_priv(dev);
7001 int err = 0;
7002
7003 IPW_DEBUG_WX("SET Mode -> %d \n", wrqu->mode);
7004
7005 if (wrqu->mode == priv->ieee->iw_mode)
7006 return 0;
7007
7008 down(&priv->action_sem);
7009 if (!(priv->status & STATUS_INITIALIZED)) {
7010 err = -EIO;
7011 goto done;
7012 }
7013
7014 switch (wrqu->mode) {
7015#ifdef CONFIG_IPW2100_MONITOR
7016 case IW_MODE_MONITOR:
7017 err = ipw2100_switch_mode(priv, IW_MODE_MONITOR);
7018 break;
7019#endif /* CONFIG_IPW2100_MONITOR */
7020 case IW_MODE_ADHOC:
7021 err = ipw2100_switch_mode(priv, IW_MODE_ADHOC);
7022 break;
7023 case IW_MODE_INFRA:
7024 case IW_MODE_AUTO:
7025 default:
7026 err = ipw2100_switch_mode(priv, IW_MODE_INFRA);
7027 break;
7028 }
7029
7030done:
7031 up(&priv->action_sem);
7032 return err;
7033}
7034
7035static int ipw2100_wx_get_mode(struct net_device *dev,
7036 struct iw_request_info *info,
7037 union iwreq_data *wrqu, char *extra)
7038{
7039 /*
7040 * This can be called at any time. No action lock required
7041 */
7042
7043 struct ipw2100_priv *priv = ieee80211_priv(dev);
7044
7045 wrqu->mode = priv->ieee->iw_mode;
7046 IPW_DEBUG_WX("GET Mode -> %d\n", wrqu->mode);
7047
7048 return 0;
7049}
7050
7051
7052#define POWER_MODES 5
7053
7054/* Values are in microsecond */
7055const s32 timeout_duration[POWER_MODES] = {
7056 350000,
7057 250000,
7058 75000,
7059 37000,
7060 25000,
7061};
7062
7063const s32 period_duration[POWER_MODES] = {
7064 400000,
7065 700000,
7066 1000000,
7067 1000000,
7068 1000000
7069};
7070
7071static int ipw2100_wx_get_range(struct net_device *dev,
7072 struct iw_request_info *info,
7073 union iwreq_data *wrqu, char *extra)
7074{
7075 /*
7076 * This can be called at any time. No action lock required
7077 */
7078
7079 struct ipw2100_priv *priv = ieee80211_priv(dev);
7080 struct iw_range *range = (struct iw_range *)extra;
7081 u16 val;
7082 int i, level;
7083
7084 wrqu->data.length = sizeof(*range);
7085 memset(range, 0, sizeof(*range));
7086
7087 /* Let's try to keep this struct in the same order as in
7088 * linux/include/wireless.h
7089 */
7090
7091 /* TODO: See what values we can set, and remove the ones we can't
7092 * set, or fill them with some default data.
7093 */
7094
7095 /* ~5 Mb/s real (802.11b) */
7096 range->throughput = 5 * 1000 * 1000;
7097
7098// range->sensitivity; /* signal level threshold range */
7099
7100 range->max_qual.qual = 100;
7101 /* TODO: Find real max RSSI and stick here */
7102 range->max_qual.level = 0;
7103 range->max_qual.noise = 0;
7104 range->max_qual.updated = 7; /* Updated all three */
7105
7106 range->avg_qual.qual = 70; /* > 8% missed beacons is 'bad' */
7107 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
7108 range->avg_qual.level = 20 + IPW2100_RSSI_TO_DBM;
7109 range->avg_qual.noise = 0;
7110 range->avg_qual.updated = 7; /* Updated all three */
7111
7112 range->num_bitrates = RATE_COUNT;
7113
7114 for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++) {
7115 range->bitrate[i] = ipw2100_rates_11b[i];
7116 }
7117
7118 range->min_rts = MIN_RTS_THRESHOLD;
7119 range->max_rts = MAX_RTS_THRESHOLD;
7120 range->min_frag = MIN_FRAG_THRESHOLD;
7121 range->max_frag = MAX_FRAG_THRESHOLD;
7122
7123 range->min_pmp = period_duration[0]; /* Minimal PM period */
7124 range->max_pmp = period_duration[POWER_MODES-1];/* Maximal PM period */
7125 range->min_pmt = timeout_duration[POWER_MODES-1]; /* Minimal PM timeout */
7126 range->max_pmt = timeout_duration[0];/* Maximal PM timeout */
7127
7128 /* How to decode max/min PM period */
7129 range->pmp_flags = IW_POWER_PERIOD;
7130 /* How to decode max/min PM period */
7131 range->pmt_flags = IW_POWER_TIMEOUT;
7132 /* What PM options are supported */
7133 range->pm_capa = IW_POWER_TIMEOUT | IW_POWER_PERIOD;
7134
7135 range->encoding_size[0] = 5;
7136 range->encoding_size[1] = 13; /* Different token sizes */
7137 range->num_encoding_sizes = 2; /* Number of entry in the list */
7138 range->max_encoding_tokens = WEP_KEYS; /* Max number of tokens */
7139// range->encoding_login_index; /* token index for login token */
7140
7141 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
7142 range->txpower_capa = IW_TXPOW_DBM;
7143 range->num_txpower = IW_MAX_TXPOWER;
7144 for (i = 0, level = (IPW_TX_POWER_MAX_DBM * 16); i < IW_MAX_TXPOWER;
7145 i++, level -= ((IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM) * 16) /
7146 (IW_MAX_TXPOWER - 1))
7147 range->txpower[i] = level / 16;
7148 } else {
7149 range->txpower_capa = 0;
7150 range->num_txpower = 0;
7151 }
7152
7153
7154 /* Set the Wireless Extension versions */
7155 range->we_version_compiled = WIRELESS_EXT;
7156 range->we_version_source = 16;
7157
7158// range->retry_capa; /* What retry options are supported */
7159// range->retry_flags; /* How to decode max/min retry limit */
7160// range->r_time_flags; /* How to decode max/min retry life */
7161// range->min_retry; /* Minimal number of retries */
7162// range->max_retry; /* Maximal number of retries */
7163// range->min_r_time; /* Minimal retry lifetime */
7164// range->max_r_time; /* Maximal retry lifetime */
7165
7166 range->num_channels = FREQ_COUNT;
7167
7168 val = 0;
7169 for (i = 0; i < FREQ_COUNT; i++) {
7170 // TODO: Include only legal frequencies for some countries
7171// if (local->channel_mask & (1 << i)) {
7172 range->freq[val].i = i + 1;
7173 range->freq[val].m = ipw2100_frequencies[i] * 100000;
7174 range->freq[val].e = 1;
7175 val++;
7176// }
7177 if (val == IW_MAX_FREQUENCIES)
7178 break;
7179 }
7180 range->num_frequency = val;
7181
7182 IPW_DEBUG_WX("GET Range\n");
7183
7184 return 0;
7185}
7186
7187static int ipw2100_wx_set_wap(struct net_device *dev,
7188 struct iw_request_info *info,
7189 union iwreq_data *wrqu, char *extra)
7190{
7191 struct ipw2100_priv *priv = ieee80211_priv(dev);
7192 int err = 0;
7193
7194 static const unsigned char any[] = {
7195 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
7196 };
7197 static const unsigned char off[] = {
7198 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
7199 };
7200
7201 // sanity checks
7202 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
7203 return -EINVAL;
7204
7205 down(&priv->action_sem);
7206 if (!(priv->status & STATUS_INITIALIZED)) {
7207 err = -EIO;
7208 goto done;
7209 }
7210
7211 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
7212 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
7213 /* we disable mandatory BSSID association */
7214 IPW_DEBUG_WX("exit - disable mandatory BSSID\n");
7215 priv->config &= ~CFG_STATIC_BSSID;
7216 err = ipw2100_set_mandatory_bssid(priv, NULL, 0);
7217 goto done;
7218 }
7219
7220 priv->config |= CFG_STATIC_BSSID;
7221 memcpy(priv->mandatory_bssid_mac, wrqu->ap_addr.sa_data, ETH_ALEN);
7222
7223 err = ipw2100_set_mandatory_bssid(priv, wrqu->ap_addr.sa_data, 0);
7224
7225 IPW_DEBUG_WX("SET BSSID -> %02X:%02X:%02X:%02X:%02X:%02X\n",
7226 wrqu->ap_addr.sa_data[0] & 0xff,
7227 wrqu->ap_addr.sa_data[1] & 0xff,
7228 wrqu->ap_addr.sa_data[2] & 0xff,
7229 wrqu->ap_addr.sa_data[3] & 0xff,
7230 wrqu->ap_addr.sa_data[4] & 0xff,
7231 wrqu->ap_addr.sa_data[5] & 0xff);
7232
7233 done:
7234 up(&priv->action_sem);
7235 return err;
7236}
7237
7238static int ipw2100_wx_get_wap(struct net_device *dev,
7239 struct iw_request_info *info,
7240 union iwreq_data *wrqu, char *extra)
7241{
7242 /*
7243 * This can be called at any time. No action lock required
7244 */
7245
7246 struct ipw2100_priv *priv = ieee80211_priv(dev);
7247
7248 /* If we are associated, trying to associate, or have a statically
7249 * configured BSSID then return that; otherwise return ANY */
7250 if (priv->config & CFG_STATIC_BSSID ||
7251 priv->status & STATUS_ASSOCIATED) {
7252 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
7253 memcpy(wrqu->ap_addr.sa_data, &priv->bssid, ETH_ALEN);
7254 } else
7255 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
7256
7257 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
7258 MAC_ARG(wrqu->ap_addr.sa_data));
7259 return 0;
7260}
7261
7262static int ipw2100_wx_set_essid(struct net_device *dev,
7263 struct iw_request_info *info,
7264 union iwreq_data *wrqu, char *extra)
7265{
7266 struct ipw2100_priv *priv = ieee80211_priv(dev);
7267 char *essid = ""; /* ANY */
7268 int length = 0;
7269 int err = 0;
7270
7271 down(&priv->action_sem);
7272 if (!(priv->status & STATUS_INITIALIZED)) {
7273 err = -EIO;
7274 goto done;
7275 }
7276
7277 if (wrqu->essid.flags && wrqu->essid.length) {
7278 length = wrqu->essid.length - 1;
7279 essid = extra;
7280 }
7281
7282 if (length == 0) {
7283 IPW_DEBUG_WX("Setting ESSID to ANY\n");
7284 priv->config &= ~CFG_STATIC_ESSID;
7285 err = ipw2100_set_essid(priv, NULL, 0, 0);
7286 goto done;
7287 }
7288
7289 length = min(length, IW_ESSID_MAX_SIZE);
7290
7291 priv->config |= CFG_STATIC_ESSID;
7292
7293 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
7294 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
7295 err = 0;
7296 goto done;
7297 }
7298
7299 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
7300 length);
7301
7302 priv->essid_len = length;
7303 memcpy(priv->essid, essid, priv->essid_len);
7304
7305 err = ipw2100_set_essid(priv, essid, length, 0);
7306
7307 done:
7308 up(&priv->action_sem);
7309 return err;
7310}
7311
7312static int ipw2100_wx_get_essid(struct net_device *dev,
7313 struct iw_request_info *info,
7314 union iwreq_data *wrqu, char *extra)
7315{
7316 /*
7317 * This can be called at any time. No action lock required
7318 */
7319
7320 struct ipw2100_priv *priv = ieee80211_priv(dev);
7321
7322 /* If we are associated, trying to associate, or have a statically
7323 * configured ESSID then return that; otherwise return ANY */
7324 if (priv->config & CFG_STATIC_ESSID ||
7325 priv->status & STATUS_ASSOCIATED) {
7326 IPW_DEBUG_WX("Getting essid: '%s'\n",
7327 escape_essid(priv->essid, priv->essid_len));
7328 memcpy(extra, priv->essid, priv->essid_len);
7329 wrqu->essid.length = priv->essid_len;
7330 wrqu->essid.flags = 1; /* active */
7331 } else {
7332 IPW_DEBUG_WX("Getting essid: ANY\n");
7333 wrqu->essid.length = 0;
7334 wrqu->essid.flags = 0; /* active */
7335 }
7336
7337 return 0;
7338}
7339
7340static int ipw2100_wx_set_nick(struct net_device *dev,
7341 struct iw_request_info *info,
7342 union iwreq_data *wrqu, char *extra)
7343{
7344 /*
7345 * This can be called at any time. No action lock required
7346 */
7347
7348 struct ipw2100_priv *priv = ieee80211_priv(dev);
7349
7350 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
7351 return -E2BIG;
7352
7353 wrqu->data.length = min((size_t)wrqu->data.length, sizeof(priv->nick));
7354 memset(priv->nick, 0, sizeof(priv->nick));
7355 memcpy(priv->nick, extra, wrqu->data.length);
7356
7357 IPW_DEBUG_WX("SET Nickname -> %s \n", priv->nick);
7358
7359 return 0;
7360}
7361
7362static int ipw2100_wx_get_nick(struct net_device *dev,
7363 struct iw_request_info *info,
7364 union iwreq_data *wrqu, char *extra)
7365{
7366 /*
7367 * This can be called at any time. No action lock required
7368 */
7369
7370 struct ipw2100_priv *priv = ieee80211_priv(dev);
7371
7372 wrqu->data.length = strlen(priv->nick) + 1;
7373 memcpy(extra, priv->nick, wrqu->data.length);
7374 wrqu->data.flags = 1; /* active */
7375
7376 IPW_DEBUG_WX("GET Nickname -> %s \n", extra);
7377
7378 return 0;
7379}
7380
7381static int ipw2100_wx_set_rate(struct net_device *dev,
7382 struct iw_request_info *info,
7383 union iwreq_data *wrqu, char *extra)
7384{
7385 struct ipw2100_priv *priv = ieee80211_priv(dev);
7386 u32 target_rate = wrqu->bitrate.value;
7387 u32 rate;
7388 int err = 0;
7389
7390 down(&priv->action_sem);
7391 if (!(priv->status & STATUS_INITIALIZED)) {
7392 err = -EIO;
7393 goto done;
7394 }
7395
7396 rate = 0;
7397
7398 if (target_rate == 1000000 ||
7399 (!wrqu->bitrate.fixed && target_rate > 1000000))
7400 rate |= TX_RATE_1_MBIT;
7401 if (target_rate == 2000000 ||
7402 (!wrqu->bitrate.fixed && target_rate > 2000000))
7403 rate |= TX_RATE_2_MBIT;
7404 if (target_rate == 5500000 ||
7405 (!wrqu->bitrate.fixed && target_rate > 5500000))
7406 rate |= TX_RATE_5_5_MBIT;
7407 if (target_rate == 11000000 ||
7408 (!wrqu->bitrate.fixed && target_rate > 11000000))
7409 rate |= TX_RATE_11_MBIT;
7410 if (rate == 0)
7411 rate = DEFAULT_TX_RATES;
7412
7413 err = ipw2100_set_tx_rates(priv, rate, 0);
7414
7415 IPW_DEBUG_WX("SET Rate -> %04X \n", rate);
7416 done:
7417 up(&priv->action_sem);
7418 return err;
7419}
7420
7421
7422static int ipw2100_wx_get_rate(struct net_device *dev,
7423 struct iw_request_info *info,
7424 union iwreq_data *wrqu, char *extra)
7425{
7426 struct ipw2100_priv *priv = ieee80211_priv(dev);
7427 int val;
7428 int len = sizeof(val);
7429 int err = 0;
7430
7431 if (!(priv->status & STATUS_ENABLED) ||
7432 priv->status & STATUS_RF_KILL_MASK ||
7433 !(priv->status & STATUS_ASSOCIATED)) {
7434 wrqu->bitrate.value = 0;
7435 return 0;
7436 }
7437
7438 down(&priv->action_sem);
7439 if (!(priv->status & STATUS_INITIALIZED)) {
7440 err = -EIO;
7441 goto done;
7442 }
7443
7444 err = ipw2100_get_ordinal(priv, IPW_ORD_CURRENT_TX_RATE, &val, &len);
7445 if (err) {
7446 IPW_DEBUG_WX("failed querying ordinals.\n");
7447 return err;
7448 }
7449
7450 switch (val & TX_RATE_MASK) {
7451 case TX_RATE_1_MBIT:
7452 wrqu->bitrate.value = 1000000;
7453 break;
7454 case TX_RATE_2_MBIT:
7455 wrqu->bitrate.value = 2000000;
7456 break;
7457 case TX_RATE_5_5_MBIT:
7458 wrqu->bitrate.value = 5500000;
7459 break;
7460 case TX_RATE_11_MBIT:
7461 wrqu->bitrate.value = 11000000;
7462 break;
7463 default:
7464 wrqu->bitrate.value = 0;
7465 }
7466
7467 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
7468
7469 done:
7470 up(&priv->action_sem);
7471 return err;
7472}
7473
7474static int ipw2100_wx_set_rts(struct net_device *dev,
7475 struct iw_request_info *info,
7476 union iwreq_data *wrqu, char *extra)
7477{
7478 struct ipw2100_priv *priv = ieee80211_priv(dev);
7479 int value, err;
7480
7481 /* Auto RTS not yet supported */
7482 if (wrqu->rts.fixed == 0)
7483 return -EINVAL;
7484
7485 down(&priv->action_sem);
7486 if (!(priv->status & STATUS_INITIALIZED)) {
7487 err = -EIO;
7488 goto done;
7489 }
7490
7491 if (wrqu->rts.disabled)
7492 value = priv->rts_threshold | RTS_DISABLED;
7493 else {
7494 if (wrqu->rts.value < 1 ||
7495 wrqu->rts.value > 2304) {
7496 err = -EINVAL;
7497 goto done;
7498 }
7499 value = wrqu->rts.value;
7500 }
7501
7502 err = ipw2100_set_rts_threshold(priv, value);
7503
7504 IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X \n", value);
7505 done:
7506 up(&priv->action_sem);
7507 return err;
7508}
7509
7510static int ipw2100_wx_get_rts(struct net_device *dev,
7511 struct iw_request_info *info,
7512 union iwreq_data *wrqu, char *extra)
7513{
7514 /*
7515 * This can be called at any time. No action lock required
7516 */
7517
7518 struct ipw2100_priv *priv = ieee80211_priv(dev);
7519
7520 wrqu->rts.value = priv->rts_threshold & ~RTS_DISABLED;
7521 wrqu->rts.fixed = 1; /* no auto select */
7522
7523 /* If RTS is set to the default value, then it is disabled */
7524 wrqu->rts.disabled = (priv->rts_threshold & RTS_DISABLED) ? 1 : 0;
7525
7526 IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X \n", wrqu->rts.value);
7527
7528 return 0;
7529}
7530
7531static int ipw2100_wx_set_txpow(struct net_device *dev,
7532 struct iw_request_info *info,
7533 union iwreq_data *wrqu, char *extra)
7534{
7535 struct ipw2100_priv *priv = ieee80211_priv(dev);
7536 int err = 0, value;
7537
7538 if (priv->ieee->iw_mode != IW_MODE_ADHOC)
7539 return -EINVAL;
7540
7541 if (wrqu->txpower.disabled == 1 || wrqu->txpower.fixed == 0)
7542 value = IPW_TX_POWER_DEFAULT;
7543 else {
7544 if (wrqu->txpower.value < IPW_TX_POWER_MIN_DBM ||
7545 wrqu->txpower.value > IPW_TX_POWER_MAX_DBM)
7546 return -EINVAL;
7547
7548 value = (wrqu->txpower.value - IPW_TX_POWER_MIN_DBM) * 16 /
7549 (IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM);
7550 }
7551
7552 down(&priv->action_sem);
7553 if (!(priv->status & STATUS_INITIALIZED)) {
7554 err = -EIO;
7555 goto done;
7556 }
7557
7558 err = ipw2100_set_tx_power(priv, value);
7559
7560 IPW_DEBUG_WX("SET TX Power -> %d \n", value);
7561
7562 done:
7563 up(&priv->action_sem);
7564 return err;
7565}
7566
7567static int ipw2100_wx_get_txpow(struct net_device *dev,
7568 struct iw_request_info *info,
7569 union iwreq_data *wrqu, char *extra)
7570{
7571 /*
7572 * This can be called at any time. No action lock required
7573 */
7574
7575 struct ipw2100_priv *priv = ieee80211_priv(dev);
7576
7577 if (priv->ieee->iw_mode != IW_MODE_ADHOC) {
7578 wrqu->power.disabled = 1;
7579 return 0;
7580 }
7581
7582 if (priv->tx_power == IPW_TX_POWER_DEFAULT) {
7583 wrqu->power.fixed = 0;
7584 wrqu->power.value = IPW_TX_POWER_MAX_DBM;
7585 wrqu->power.disabled = 1;
7586 } else {
7587 wrqu->power.disabled = 0;
7588 wrqu->power.fixed = 1;
7589 wrqu->power.value =
7590 (priv->tx_power *
7591 (IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM)) /
7592 (IPW_TX_POWER_MAX - IPW_TX_POWER_MIN) +
7593 IPW_TX_POWER_MIN_DBM;
7594 }
7595
7596 wrqu->power.flags = IW_TXPOW_DBM;
7597
7598 IPW_DEBUG_WX("GET TX Power -> %d \n", wrqu->power.value);
7599
7600 return 0;
7601}
7602
7603static int ipw2100_wx_set_frag(struct net_device *dev,
7604 struct iw_request_info *info,
7605 union iwreq_data *wrqu, char *extra)
7606{
7607 /*
7608 * This can be called at any time. No action lock required
7609 */
7610
7611 struct ipw2100_priv *priv = ieee80211_priv(dev);
7612
7613 if (!wrqu->frag.fixed)
7614 return -EINVAL;
7615
7616 if (wrqu->frag.disabled) {
7617 priv->frag_threshold |= FRAG_DISABLED;
7618 priv->ieee->fts = DEFAULT_FTS;
7619 } else {
7620 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
7621 wrqu->frag.value > MAX_FRAG_THRESHOLD)
7622 return -EINVAL;
7623
7624 priv->ieee->fts = wrqu->frag.value & ~0x1;
7625 priv->frag_threshold = priv->ieee->fts;
7626 }
7627
7628 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", priv->ieee->fts);
7629
7630 return 0;
7631}
7632
7633static int ipw2100_wx_get_frag(struct net_device *dev,
7634 struct iw_request_info *info,
7635 union iwreq_data *wrqu, char *extra)
7636{
7637 /*
7638 * This can be called at any time. No action lock required
7639 */
7640
7641 struct ipw2100_priv *priv = ieee80211_priv(dev);
7642 wrqu->frag.value = priv->frag_threshold & ~FRAG_DISABLED;
7643 wrqu->frag.fixed = 0; /* no auto select */
7644 wrqu->frag.disabled = (priv->frag_threshold & FRAG_DISABLED) ? 1 : 0;
7645
7646 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
7647
7648 return 0;
7649}
7650
7651static int ipw2100_wx_set_retry(struct net_device *dev,
7652 struct iw_request_info *info,
7653 union iwreq_data *wrqu, char *extra)
7654{
7655 struct ipw2100_priv *priv = ieee80211_priv(dev);
7656 int err = 0;
7657
7658 if (wrqu->retry.flags & IW_RETRY_LIFETIME ||
7659 wrqu->retry.disabled)
7660 return -EINVAL;
7661
7662 if (!(wrqu->retry.flags & IW_RETRY_LIMIT))
7663 return 0;
7664
7665 down(&priv->action_sem);
7666 if (!(priv->status & STATUS_INITIALIZED)) {
7667 err = -EIO;
7668 goto done;
7669 }
7670
7671 if (wrqu->retry.flags & IW_RETRY_MIN) {
7672 err = ipw2100_set_short_retry(priv, wrqu->retry.value);
7673 IPW_DEBUG_WX("SET Short Retry Limit -> %d \n",
7674 wrqu->retry.value);
7675 goto done;
7676 }
7677
7678 if (wrqu->retry.flags & IW_RETRY_MAX) {
7679 err = ipw2100_set_long_retry(priv, wrqu->retry.value);
7680 IPW_DEBUG_WX("SET Long Retry Limit -> %d \n",
7681 wrqu->retry.value);
7682 goto done;
7683 }
7684
7685 err = ipw2100_set_short_retry(priv, wrqu->retry.value);
7686 if (!err)
7687 err = ipw2100_set_long_retry(priv, wrqu->retry.value);
7688
7689 IPW_DEBUG_WX("SET Both Retry Limits -> %d \n", wrqu->retry.value);
7690
7691 done:
7692 up(&priv->action_sem);
7693 return err;
7694}
7695
7696static int ipw2100_wx_get_retry(struct net_device *dev,
7697 struct iw_request_info *info,
7698 union iwreq_data *wrqu, char *extra)
7699{
7700 /*
7701 * This can be called at any time. No action lock required
7702 */
7703
7704 struct ipw2100_priv *priv = ieee80211_priv(dev);
7705
7706 wrqu->retry.disabled = 0; /* can't be disabled */
7707
7708 if ((wrqu->retry.flags & IW_RETRY_TYPE) ==
7709 IW_RETRY_LIFETIME)
7710 return -EINVAL;
7711
7712 if (wrqu->retry.flags & IW_RETRY_MAX) {
7713 wrqu->retry.flags = IW_RETRY_LIMIT & IW_RETRY_MAX;
7714 wrqu->retry.value = priv->long_retry_limit;
7715 } else {
7716 wrqu->retry.flags =
7717 (priv->short_retry_limit !=
7718 priv->long_retry_limit) ?
7719 IW_RETRY_LIMIT & IW_RETRY_MIN : IW_RETRY_LIMIT;
7720
7721 wrqu->retry.value = priv->short_retry_limit;
7722 }
7723
7724 IPW_DEBUG_WX("GET Retry -> %d \n", wrqu->retry.value);
7725
7726 return 0;
7727}
7728
7729static int ipw2100_wx_set_scan(struct net_device *dev,
7730 struct iw_request_info *info,
7731 union iwreq_data *wrqu, char *extra)
7732{
7733 struct ipw2100_priv *priv = ieee80211_priv(dev);
7734 int err = 0;
7735
7736 down(&priv->action_sem);
7737 if (!(priv->status & STATUS_INITIALIZED)) {
7738 err = -EIO;
7739 goto done;
7740 }
7741
7742 IPW_DEBUG_WX("Initiating scan...\n");
7743 if (ipw2100_set_scan_options(priv) ||
7744 ipw2100_start_scan(priv)) {
7745 IPW_DEBUG_WX("Start scan failed.\n");
7746
7747 /* TODO: Mark a scan as pending so when hardware initialized
7748 * a scan starts */
7749 }
7750
7751 done:
7752 up(&priv->action_sem);
7753 return err;
7754}
7755
7756static int ipw2100_wx_get_scan(struct net_device *dev,
7757 struct iw_request_info *info,
7758 union iwreq_data *wrqu, char *extra)
7759{
7760 /*
7761 * This can be called at any time. No action lock required
7762 */
7763
7764 struct ipw2100_priv *priv = ieee80211_priv(dev);
7765 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
7766}
7767
7768
7769/*
7770 * Implementation based on code in hostap-driver v0.1.3 hostap_ioctl.c
7771 */
7772static int ipw2100_wx_set_encode(struct net_device *dev,
7773 struct iw_request_info *info,
7774 union iwreq_data *wrqu, char *key)
7775{
7776 /*
7777 * No check of STATUS_INITIALIZED required
7778 */
7779
7780 struct ipw2100_priv *priv = ieee80211_priv(dev);
7781 return ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
7782}
7783
7784static int ipw2100_wx_get_encode(struct net_device *dev,
7785 struct iw_request_info *info,
7786 union iwreq_data *wrqu, char *key)
7787{
7788 /*
7789 * This can be called at any time. No action lock required
7790 */
7791
7792 struct ipw2100_priv *priv = ieee80211_priv(dev);
7793 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
7794}
7795
7796static int ipw2100_wx_set_power(struct net_device *dev,
7797 struct iw_request_info *info,
7798 union iwreq_data *wrqu, char *extra)
7799{
7800 struct ipw2100_priv *priv = ieee80211_priv(dev);
7801 int err = 0;
7802
7803 down(&priv->action_sem);
7804 if (!(priv->status & STATUS_INITIALIZED)) {
7805 err = -EIO;
7806 goto done;
7807 }
7808
7809 if (wrqu->power.disabled) {
7810 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
7811 err = ipw2100_set_power_mode(priv, IPW_POWER_MODE_CAM);
7812 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
7813 goto done;
7814 }
7815
7816 switch (wrqu->power.flags & IW_POWER_MODE) {
7817 case IW_POWER_ON: /* If not specified */
7818 case IW_POWER_MODE: /* If set all mask */
7819 case IW_POWER_ALL_R: /* If explicitely state all */
7820 break;
7821 default: /* Otherwise we don't support it */
7822 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
7823 wrqu->power.flags);
7824 err = -EOPNOTSUPP;
7825 goto done;
7826 }
7827
7828 /* If the user hasn't specified a power management mode yet, default
7829 * to BATTERY */
7830 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
7831 err = ipw2100_set_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
7832
7833 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n",
7834 priv->power_mode);
7835
7836 done:
7837 up(&priv->action_sem);
7838 return err;
7839
7840}
7841
7842static int ipw2100_wx_get_power(struct net_device *dev,
7843 struct iw_request_info *info,
7844 union iwreq_data *wrqu, char *extra)
7845{
7846 /*
7847 * This can be called at any time. No action lock required
7848 */
7849
7850 struct ipw2100_priv *priv = ieee80211_priv(dev);
7851
7852 if (!(priv->power_mode & IPW_POWER_ENABLED)) {
7853 wrqu->power.disabled = 1;
7854 } else {
7855 wrqu->power.disabled = 0;
7856 wrqu->power.flags = 0;
7857 }
7858
7859 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
7860
7861 return 0;
7862}
7863
7864
7865/*
7866 *
7867 * IWPRIV handlers
7868 *
7869 */
7870#ifdef CONFIG_IPW2100_MONITOR
7871static int ipw2100_wx_set_promisc(struct net_device *dev,
7872 struct iw_request_info *info,
7873 union iwreq_data *wrqu, char *extra)
7874{
7875 struct ipw2100_priv *priv = ieee80211_priv(dev);
7876 int *parms = (int *)extra;
7877 int enable = (parms[0] > 0);
7878 int err = 0;
7879
7880 down(&priv->action_sem);
7881 if (!(priv->status & STATUS_INITIALIZED)) {
7882 err = -EIO;
7883 goto done;
7884 }
7885
7886 if (enable) {
7887 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
7888 err = ipw2100_set_channel(priv, parms[1], 0);
7889 goto done;
7890 }
7891 priv->channel = parms[1];
7892 err = ipw2100_switch_mode(priv, IW_MODE_MONITOR);
7893 } else {
7894 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
7895 err = ipw2100_switch_mode(priv, priv->last_mode);
7896 }
7897 done:
7898 up(&priv->action_sem);
7899 return err;
7900}
7901
7902static int ipw2100_wx_reset(struct net_device *dev,
7903 struct iw_request_info *info,
7904 union iwreq_data *wrqu, char *extra)
7905{
7906 struct ipw2100_priv *priv = ieee80211_priv(dev);
7907 if (priv->status & STATUS_INITIALIZED)
7908 schedule_reset(priv);
7909 return 0;
7910}
7911
7912#endif
7913
7914static int ipw2100_wx_set_powermode(struct net_device *dev,
7915 struct iw_request_info *info,
7916 union iwreq_data *wrqu, char *extra)
7917{
7918 struct ipw2100_priv *priv = ieee80211_priv(dev);
7919 int err = 0, mode = *(int *)extra;
7920
7921 down(&priv->action_sem);
7922 if (!(priv->status & STATUS_INITIALIZED)) {
7923 err = -EIO;
7924 goto done;
7925 }
7926
7927 if ((mode < 1) || (mode > POWER_MODES))
7928 mode = IPW_POWER_AUTO;
7929
7930 if (priv->power_mode != mode)
7931 err = ipw2100_set_power_mode(priv, mode);
7932 done:
7933 up(&priv->action_sem);
7934 return err;
7935}
7936
7937#define MAX_POWER_STRING 80
7938static int ipw2100_wx_get_powermode(struct net_device *dev,
7939 struct iw_request_info *info,
7940 union iwreq_data *wrqu, char *extra)
7941{
7942 /*
7943 * This can be called at any time. No action lock required
7944 */
7945
7946 struct ipw2100_priv *priv = ieee80211_priv(dev);
7947 int level = IPW_POWER_LEVEL(priv->power_mode);
7948 s32 timeout, period;
7949
7950 if (!(priv->power_mode & IPW_POWER_ENABLED)) {
7951 snprintf(extra, MAX_POWER_STRING,
7952 "Power save level: %d (Off)", level);
7953 } else {
7954 switch (level) {
7955 case IPW_POWER_MODE_CAM:
7956 snprintf(extra, MAX_POWER_STRING,
7957 "Power save level: %d (None)", level);
7958 break;
7959 case IPW_POWER_AUTO:
7960 snprintf(extra, MAX_POWER_STRING,
7961 "Power save level: %d (Auto)", 0);
7962 break;
7963 default:
7964 timeout = timeout_duration[level - 1] / 1000;
7965 period = period_duration[level - 1] / 1000;
7966 snprintf(extra, MAX_POWER_STRING,
7967 "Power save level: %d "
7968 "(Timeout %dms, Period %dms)",
7969 level, timeout, period);
7970 }
7971 }
7972
7973 wrqu->data.length = strlen(extra) + 1;
7974
7975 return 0;
7976}
7977
7978
7979static int ipw2100_wx_set_preamble(struct net_device *dev,
7980 struct iw_request_info *info,
7981 union iwreq_data *wrqu, char *extra)
7982{
7983 struct ipw2100_priv *priv = ieee80211_priv(dev);
7984 int err, mode = *(int *)extra;
7985
7986 down(&priv->action_sem);
7987 if (!(priv->status & STATUS_INITIALIZED)) {
7988 err = -EIO;
7989 goto done;
7990 }
7991
7992 if (mode == 1)
7993 priv->config |= CFG_LONG_PREAMBLE;
7994 else if (mode == 0)
7995 priv->config &= ~CFG_LONG_PREAMBLE;
7996 else {
7997 err = -EINVAL;
7998 goto done;
7999 }
8000
8001 err = ipw2100_system_config(priv, 0);
8002
8003done:
8004 up(&priv->action_sem);
8005 return err;
8006}
8007
8008static int ipw2100_wx_get_preamble(struct net_device *dev,
8009 struct iw_request_info *info,
8010 union iwreq_data *wrqu, char *extra)
8011{
8012 /*
8013 * This can be called at any time. No action lock required
8014 */
8015
8016 struct ipw2100_priv *priv = ieee80211_priv(dev);
8017
8018 if (priv->config & CFG_LONG_PREAMBLE)
8019 snprintf(wrqu->name, IFNAMSIZ, "long (1)");
8020 else
8021 snprintf(wrqu->name, IFNAMSIZ, "auto (0)");
8022
8023 return 0;
8024}
8025
8026static iw_handler ipw2100_wx_handlers[] =
8027{
8028 NULL, /* SIOCSIWCOMMIT */
8029 ipw2100_wx_get_name, /* SIOCGIWNAME */
8030 NULL, /* SIOCSIWNWID */
8031 NULL, /* SIOCGIWNWID */
8032 ipw2100_wx_set_freq, /* SIOCSIWFREQ */
8033 ipw2100_wx_get_freq, /* SIOCGIWFREQ */
8034 ipw2100_wx_set_mode, /* SIOCSIWMODE */
8035 ipw2100_wx_get_mode, /* SIOCGIWMODE */
8036 NULL, /* SIOCSIWSENS */
8037 NULL, /* SIOCGIWSENS */
8038 NULL, /* SIOCSIWRANGE */
8039 ipw2100_wx_get_range, /* SIOCGIWRANGE */
8040 NULL, /* SIOCSIWPRIV */
8041 NULL, /* SIOCGIWPRIV */
8042 NULL, /* SIOCSIWSTATS */
8043 NULL, /* SIOCGIWSTATS */
8044 NULL, /* SIOCSIWSPY */
8045 NULL, /* SIOCGIWSPY */
8046 NULL, /* SIOCGIWTHRSPY */
8047 NULL, /* SIOCWIWTHRSPY */
8048 ipw2100_wx_set_wap, /* SIOCSIWAP */
8049 ipw2100_wx_get_wap, /* SIOCGIWAP */
8050 NULL, /* -- hole -- */
8051 NULL, /* SIOCGIWAPLIST -- deprecated */
8052 ipw2100_wx_set_scan, /* SIOCSIWSCAN */
8053 ipw2100_wx_get_scan, /* SIOCGIWSCAN */
8054 ipw2100_wx_set_essid, /* SIOCSIWESSID */
8055 ipw2100_wx_get_essid, /* SIOCGIWESSID */
8056 ipw2100_wx_set_nick, /* SIOCSIWNICKN */
8057 ipw2100_wx_get_nick, /* SIOCGIWNICKN */
8058 NULL, /* -- hole -- */
8059 NULL, /* -- hole -- */
8060 ipw2100_wx_set_rate, /* SIOCSIWRATE */
8061 ipw2100_wx_get_rate, /* SIOCGIWRATE */
8062 ipw2100_wx_set_rts, /* SIOCSIWRTS */
8063 ipw2100_wx_get_rts, /* SIOCGIWRTS */
8064 ipw2100_wx_set_frag, /* SIOCSIWFRAG */
8065 ipw2100_wx_get_frag, /* SIOCGIWFRAG */
8066 ipw2100_wx_set_txpow, /* SIOCSIWTXPOW */
8067 ipw2100_wx_get_txpow, /* SIOCGIWTXPOW */
8068 ipw2100_wx_set_retry, /* SIOCSIWRETRY */
8069 ipw2100_wx_get_retry, /* SIOCGIWRETRY */
8070 ipw2100_wx_set_encode, /* SIOCSIWENCODE */
8071 ipw2100_wx_get_encode, /* SIOCGIWENCODE */
8072 ipw2100_wx_set_power, /* SIOCSIWPOWER */
8073 ipw2100_wx_get_power, /* SIOCGIWPOWER */
8074};
8075
8076#define IPW2100_PRIV_SET_MONITOR SIOCIWFIRSTPRIV
8077#define IPW2100_PRIV_RESET SIOCIWFIRSTPRIV+1
8078#define IPW2100_PRIV_SET_POWER SIOCIWFIRSTPRIV+2
8079#define IPW2100_PRIV_GET_POWER SIOCIWFIRSTPRIV+3
8080#define IPW2100_PRIV_SET_LONGPREAMBLE SIOCIWFIRSTPRIV+4
8081#define IPW2100_PRIV_GET_LONGPREAMBLE SIOCIWFIRSTPRIV+5
8082
8083static const struct iw_priv_args ipw2100_private_args[] = {
8084
8085#ifdef CONFIG_IPW2100_MONITOR
8086 {
8087 IPW2100_PRIV_SET_MONITOR,
8088 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"
8089 },
8090 {
8091 IPW2100_PRIV_RESET,
8092 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"
8093 },
8094#endif /* CONFIG_IPW2100_MONITOR */
8095
8096 {
8097 IPW2100_PRIV_SET_POWER,
8098 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_power"
8099 },
8100 {
8101 IPW2100_PRIV_GET_POWER,
8102 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_POWER_STRING, "get_power"
8103 },
8104 {
8105 IPW2100_PRIV_SET_LONGPREAMBLE,
8106 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_preamble"
8107 },
8108 {
8109 IPW2100_PRIV_GET_LONGPREAMBLE,
8110 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "get_preamble"
8111 },
8112};
8113
8114static iw_handler ipw2100_private_handler[] = {
8115#ifdef CONFIG_IPW2100_MONITOR
8116 ipw2100_wx_set_promisc,
8117 ipw2100_wx_reset,
8118#else /* CONFIG_IPW2100_MONITOR */
8119 NULL,
8120 NULL,
8121#endif /* CONFIG_IPW2100_MONITOR */
8122 ipw2100_wx_set_powermode,
8123 ipw2100_wx_get_powermode,
8124 ipw2100_wx_set_preamble,
8125 ipw2100_wx_get_preamble,
8126};
8127
8128struct iw_handler_def ipw2100_wx_handler_def =
8129{
8130 .standard = ipw2100_wx_handlers,
8131 .num_standard = sizeof(ipw2100_wx_handlers) / sizeof(iw_handler),
8132 .num_private = sizeof(ipw2100_private_handler) / sizeof(iw_handler),
8133 .num_private_args = sizeof(ipw2100_private_args) /
8134 sizeof(struct iw_priv_args),
8135 .private = (iw_handler *)ipw2100_private_handler,
8136 .private_args = (struct iw_priv_args *)ipw2100_private_args,
8137};
8138
8139/*
8140 * Get wireless statistics.
8141 * Called by /proc/net/wireless
8142 * Also called by SIOCGIWSTATS
8143 */
8144struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device * dev)
8145{
8146 enum {
8147 POOR = 30,
8148 FAIR = 60,
8149 GOOD = 80,
8150 VERY_GOOD = 90,
8151 EXCELLENT = 95,
8152 PERFECT = 100
8153 };
8154 int rssi_qual;
8155 int tx_qual;
8156 int beacon_qual;
8157
8158 struct ipw2100_priv *priv = ieee80211_priv(dev);
8159 struct iw_statistics *wstats;
8160 u32 rssi, quality, tx_retries, missed_beacons, tx_failures;
8161 u32 ord_len = sizeof(u32);
8162
8163 if (!priv)
8164 return (struct iw_statistics *) NULL;
8165
8166 wstats = &priv->wstats;
8167
8168 /* if hw is disabled, then ipw2100_get_ordinal() can't be called.
8169 * ipw2100_wx_wireless_stats seems to be called before fw is
8170 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
8171 * and associated; if not associcated, the values are all meaningless
8172 * anyway, so set them all to NULL and INVALID */
8173 if (!(priv->status & STATUS_ASSOCIATED)) {
8174 wstats->miss.beacon = 0;
8175 wstats->discard.retries = 0;
8176 wstats->qual.qual = 0;
8177 wstats->qual.level = 0;
8178 wstats->qual.noise = 0;
8179 wstats->qual.updated = 7;
8180 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
8181 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
8182 return wstats;
8183 }
8184
8185 if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_PERCENT_MISSED_BCNS,
8186 &missed_beacons, &ord_len))
8187 goto fail_get_ordinal;
8188
8189 /* If we don't have a connection the quality and level is 0*/
8190 if (!(priv->status & STATUS_ASSOCIATED)) {
8191 wstats->qual.qual = 0;
8192 wstats->qual.level = 0;
8193 } else {
8194 if (ipw2100_get_ordinal(priv, IPW_ORD_RSSI_AVG_CURR,
8195 &rssi, &ord_len))
8196 goto fail_get_ordinal;
8197 wstats->qual.level = rssi + IPW2100_RSSI_TO_DBM;
8198 if (rssi < 10)
8199 rssi_qual = rssi * POOR / 10;
8200 else if (rssi < 15)
8201 rssi_qual = (rssi - 10) * (FAIR - POOR) / 5 + POOR;
8202 else if (rssi < 20)
8203 rssi_qual = (rssi - 15) * (GOOD - FAIR) / 5 + FAIR;
8204 else if (rssi < 30)
8205 rssi_qual = (rssi - 20) * (VERY_GOOD - GOOD) /
8206 10 + GOOD;
8207 else
8208 rssi_qual = (rssi - 30) * (PERFECT - VERY_GOOD) /
8209 10 + VERY_GOOD;
8210
8211 if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_PERCENT_RETRIES,
8212 &tx_retries, &ord_len))
8213 goto fail_get_ordinal;
8214
8215 if (tx_retries > 75)
8216 tx_qual = (90 - tx_retries) * POOR / 15;
8217 else if (tx_retries > 70)
8218 tx_qual = (75 - tx_retries) * (FAIR - POOR) / 5 + POOR;
8219 else if (tx_retries > 65)
8220 tx_qual = (70 - tx_retries) * (GOOD - FAIR) / 5 + FAIR;
8221 else if (tx_retries > 50)
8222 tx_qual = (65 - tx_retries) * (VERY_GOOD - GOOD) /
8223 15 + GOOD;
8224 else
8225 tx_qual = (50 - tx_retries) *
8226 (PERFECT - VERY_GOOD) / 50 + VERY_GOOD;
8227
8228 if (missed_beacons > 50)
8229 beacon_qual = (60 - missed_beacons) * POOR / 10;
8230 else if (missed_beacons > 40)
8231 beacon_qual = (50 - missed_beacons) * (FAIR - POOR) /
8232 10 + POOR;
8233 else if (missed_beacons > 32)
8234 beacon_qual = (40 - missed_beacons) * (GOOD - FAIR) /
8235 18 + FAIR;
8236 else if (missed_beacons > 20)
8237 beacon_qual = (32 - missed_beacons) *
8238 (VERY_GOOD - GOOD) / 20 + GOOD;
8239 else
8240 beacon_qual = (20 - missed_beacons) *
8241 (PERFECT - VERY_GOOD) / 20 + VERY_GOOD;
8242
8243 quality = min(beacon_qual, min(tx_qual, rssi_qual));
8244
8245#ifdef CONFIG_IPW_DEBUG
8246 if (beacon_qual == quality)
8247 IPW_DEBUG_WX("Quality clamped by Missed Beacons\n");
8248 else if (tx_qual == quality)
8249 IPW_DEBUG_WX("Quality clamped by Tx Retries\n");
8250 else if (quality != 100)
8251 IPW_DEBUG_WX("Quality clamped by Signal Strength\n");
8252 else
8253 IPW_DEBUG_WX("Quality not clamped.\n");
8254#endif
8255
8256 wstats->qual.qual = quality;
8257 wstats->qual.level = rssi + IPW2100_RSSI_TO_DBM;
8258 }
8259
8260 wstats->qual.noise = 0;
8261 wstats->qual.updated = 7;
8262 wstats->qual.updated |= IW_QUAL_NOISE_INVALID;
8263
8264 /* FIXME: this is percent and not a # */
8265 wstats->miss.beacon = missed_beacons;
8266
8267 if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURES,
8268 &tx_failures, &ord_len))
8269 goto fail_get_ordinal;
8270 wstats->discard.retries = tx_failures;
8271
8272 return wstats;
8273
8274 fail_get_ordinal:
8275 IPW_DEBUG_WX("failed querying ordinals.\n");
8276
8277 return (struct iw_statistics *) NULL;
8278}
8279
8280void ipw2100_wx_event_work(struct ipw2100_priv *priv)
8281{
8282 union iwreq_data wrqu;
8283 int len = ETH_ALEN;
8284
8285 if (priv->status & STATUS_STOPPING)
8286 return;
8287
8288 down(&priv->action_sem);
8289
8290 IPW_DEBUG_WX("enter\n");
8291
8292 up(&priv->action_sem);
8293
8294 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
8295
8296 /* Fetch BSSID from the hardware */
8297 if (!(priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) ||
8298 priv->status & STATUS_RF_KILL_MASK ||
8299 ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID,
8300 &priv->bssid, &len)) {
8301 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
8302 } else {
8303 /* We now have the BSSID, so can finish setting to the full
8304 * associated state */
8305 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
8306 memcpy(&priv->ieee->bssid, priv->bssid, ETH_ALEN);
8307 priv->status &= ~STATUS_ASSOCIATING;
8308 priv->status |= STATUS_ASSOCIATED;
8309 netif_carrier_on(priv->net_dev);
8310 if (netif_queue_stopped(priv->net_dev)) {
8311 IPW_DEBUG_INFO("Waking net queue.\n");
8312 netif_wake_queue(priv->net_dev);
8313 } else {
8314 IPW_DEBUG_INFO("Starting net queue.\n");
8315 netif_start_queue(priv->net_dev);
8316 }
8317 }
8318
8319 if (!(priv->status & STATUS_ASSOCIATED)) {
8320 IPW_DEBUG_WX("Configuring ESSID\n");
8321 down(&priv->action_sem);
8322 /* This is a disassociation event, so kick the firmware to
8323 * look for another AP */
8324 if (priv->config & CFG_STATIC_ESSID)
8325 ipw2100_set_essid(priv, priv->essid, priv->essid_len, 0);
8326 else
8327 ipw2100_set_essid(priv, NULL, 0, 0);
8328 up(&priv->action_sem);
8329 }
8330
8331 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
8332}
8333
8334#define IPW2100_FW_MAJOR_VERSION 1
8335#define IPW2100_FW_MINOR_VERSION 3
8336
8337#define IPW2100_FW_MINOR(x) ((x & 0xff) >> 8)
8338#define IPW2100_FW_MAJOR(x) (x & 0xff)
8339
8340#define IPW2100_FW_VERSION ((IPW2100_FW_MINOR_VERSION << 8) | \
8341 IPW2100_FW_MAJOR_VERSION)
8342
8343#define IPW2100_FW_PREFIX "ipw2100-" __stringify(IPW2100_FW_MAJOR_VERSION) \
8344"." __stringify(IPW2100_FW_MINOR_VERSION)
8345
8346#define IPW2100_FW_NAME(x) IPW2100_FW_PREFIX "" x ".fw"
8347
8348
8349/*
8350
8351BINARY FIRMWARE HEADER FORMAT
8352
8353offset length desc
83540 2 version
83552 2 mode == 0:BSS,1:IBSS,2:MONITOR
83564 4 fw_len
83578 4 uc_len
8358C fw_len firmware data
835912 + fw_len uc_len microcode data
8360
8361*/
8362
8363struct ipw2100_fw_header {
8364 short version;
8365 short mode;
8366 unsigned int fw_size;
8367 unsigned int uc_size;
8368} __attribute__ ((packed));
8369
8370
8371
8372static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw)
8373{
8374 struct ipw2100_fw_header *h =
8375 (struct ipw2100_fw_header *)fw->fw_entry->data;
8376
8377 if (IPW2100_FW_MAJOR(h->version) != IPW2100_FW_MAJOR_VERSION) {
8378 IPW_DEBUG_WARNING("Firmware image not compatible "
8379 "(detected version id of %u). "
8380 "See Documentation/networking/README.ipw2100\n",
8381 h->version);
8382 return 1;
8383 }
8384
8385 fw->version = h->version;
8386 fw->fw.data = fw->fw_entry->data + sizeof(struct ipw2100_fw_header);
8387 fw->fw.size = h->fw_size;
8388 fw->uc.data = fw->fw.data + h->fw_size;
8389 fw->uc.size = h->uc_size;
8390
8391 return 0;
8392}
8393
8394
8395int ipw2100_get_firmware(struct ipw2100_priv *priv, struct ipw2100_fw *fw)
8396{
8397 char *fw_name;
8398 int rc;
8399
8400 IPW_DEBUG_INFO("%s: Using hotplug firmware load.\n",
8401 priv->net_dev->name);
8402
8403 switch (priv->ieee->iw_mode) {
8404 case IW_MODE_ADHOC:
8405 fw_name = IPW2100_FW_NAME("-i");
8406 break;
8407#ifdef CONFIG_IPW2100_MONITOR
8408 case IW_MODE_MONITOR:
8409 fw_name = IPW2100_FW_NAME("-p");
8410 break;
8411#endif
8412 case IW_MODE_INFRA:
8413 default:
8414 fw_name = IPW2100_FW_NAME("");
8415 break;
8416 }
8417
8418 rc = request_firmware(&fw->fw_entry, fw_name, &priv->pci_dev->dev);
8419
8420 if (rc < 0) {
8421 IPW_DEBUG_ERROR(
8422 "%s: Firmware '%s' not available or load failed.\n",
8423 priv->net_dev->name, fw_name);
8424 return rc;
8425 }
8426 IPW_DEBUG_INFO("firmware data %p size %zd\n", fw->fw_entry->data,
8427 fw->fw_entry->size);
8428
8429 ipw2100_mod_firmware_load(fw);
8430
8431 return 0;
8432}
8433
8434void ipw2100_release_firmware(struct ipw2100_priv *priv,
8435 struct ipw2100_fw *fw)
8436{
8437 fw->version = 0;
8438 if (fw->fw_entry)
8439 release_firmware(fw->fw_entry);
8440 fw->fw_entry = NULL;
8441}
8442
8443
8444int ipw2100_get_fwversion(struct ipw2100_priv *priv, char *buf, size_t max)
8445{
8446 char ver[MAX_FW_VERSION_LEN];
8447 u32 len = MAX_FW_VERSION_LEN;
8448 u32 tmp;
8449 int i;
8450 /* firmware version is an ascii string (max len of 14) */
8451 if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_FW_VER_NUM,
8452 ver, &len))
8453 return -EIO;
8454 tmp = max;
8455 if (len >= max)
8456 len = max - 1;
8457 for (i = 0; i < len; i++)
8458 buf[i] = ver[i];
8459 buf[i] = '\0';
8460 return tmp;
8461}
8462
8463int ipw2100_get_ucodeversion(struct ipw2100_priv *priv, char *buf, size_t max)
8464{
8465 u32 ver;
8466 u32 len = sizeof(ver);
8467 /* microcode version is a 32 bit integer */
8468 if (ipw2100_get_ordinal(priv, IPW_ORD_UCODE_VERSION,
8469 &ver, &len))
8470 return -EIO;
8471 return snprintf(buf, max, "%08X", ver);
8472}
8473
8474/*
8475 * On exit, the firmware will have been freed from the fw list
8476 */
8477int ipw2100_fw_download(struct ipw2100_priv *priv, struct ipw2100_fw *fw)
8478{
8479 /* firmware is constructed of N contiguous entries, each entry is
8480 * structured as:
8481 *
8482 * offset sie desc
8483 * 0 4 address to write to
8484 * 4 2 length of data run
8485 * 6 length data
8486 */
8487 unsigned int addr;
8488 unsigned short len;
8489
8490 const unsigned char *firmware_data = fw->fw.data;
8491 unsigned int firmware_data_left = fw->fw.size;
8492
8493 while (firmware_data_left > 0) {
8494 addr = *(u32 *)(firmware_data);
8495 firmware_data += 4;
8496 firmware_data_left -= 4;
8497
8498 len = *(u16 *)(firmware_data);
8499 firmware_data += 2;
8500 firmware_data_left -= 2;
8501
8502 if (len > 32) {
8503 IPW_DEBUG_ERROR(
8504 "Invalid firmware run-length of %d bytes\n",
8505 len);
8506 return -EINVAL;
8507 }
8508
8509 write_nic_memory(priv->net_dev, addr, len, firmware_data);
8510 firmware_data += len;
8511 firmware_data_left -= len;
8512 }
8513
8514 return 0;
8515}
8516
8517struct symbol_alive_response {
8518 u8 cmd_id;
8519 u8 seq_num;
8520 u8 ucode_rev;
8521 u8 eeprom_valid;
8522 u16 valid_flags;
8523 u8 IEEE_addr[6];
8524 u16 flags;
8525 u16 pcb_rev;
8526 u16 clock_settle_time; // 1us LSB
8527 u16 powerup_settle_time; // 1us LSB
8528 u16 hop_settle_time; // 1us LSB
8529 u8 date[3]; // month, day, year
8530 u8 time[2]; // hours, minutes
8531 u8 ucode_valid;
8532};
8533
8534int ipw2100_ucode_download(struct ipw2100_priv *priv, struct ipw2100_fw *fw)
8535{
8536 struct net_device *dev = priv->net_dev;
8537 const unsigned char *microcode_data = fw->uc.data;
8538 unsigned int microcode_data_left = fw->uc.size;
8539
8540 struct symbol_alive_response response;
8541 int i, j;
8542 u8 data;
8543
8544 /* Symbol control */
8545 write_nic_word(dev, IPW2100_CONTROL_REG, 0x703);
8546 readl((void *)(dev->base_addr));
8547 write_nic_word(dev, IPW2100_CONTROL_REG, 0x707);
8548 readl((void *)(dev->base_addr));
8549
8550 /* HW config */
8551 write_nic_byte(dev, 0x210014, 0x72); /* fifo width =16 */
8552 readl((void *)(dev->base_addr));
8553 write_nic_byte(dev, 0x210014, 0x72); /* fifo width =16 */
8554 readl((void *)(dev->base_addr));
8555
8556 /* EN_CS_ACCESS bit to reset control store pointer */
8557 write_nic_byte(dev, 0x210000, 0x40);
8558 readl((void *)(dev->base_addr));
8559 write_nic_byte(dev, 0x210000, 0x0);
8560 readl((void *)(dev->base_addr));
8561 write_nic_byte(dev, 0x210000, 0x40);
8562 readl((void *)(dev->base_addr));
8563
8564 /* copy microcode from buffer into Symbol */
8565
8566 while (microcode_data_left > 0) {
8567 write_nic_byte(dev, 0x210010, *microcode_data++);
8568 write_nic_byte(dev, 0x210010, *microcode_data++);
8569 microcode_data_left -= 2;
8570 }
8571
8572 /* EN_CS_ACCESS bit to reset the control store pointer */
8573 write_nic_byte(dev, 0x210000, 0x0);
8574 readl((void *)(dev->base_addr));
8575
8576 /* Enable System (Reg 0)
8577 * first enable causes garbage in RX FIFO */
8578 write_nic_byte(dev, 0x210000, 0x0);
8579 readl((void *)(dev->base_addr));
8580 write_nic_byte(dev, 0x210000, 0x80);
8581 readl((void *)(dev->base_addr));
8582
8583 /* Reset External Baseband Reg */
8584 write_nic_word(dev, IPW2100_CONTROL_REG, 0x703);
8585 readl((void *)(dev->base_addr));
8586 write_nic_word(dev, IPW2100_CONTROL_REG, 0x707);
8587 readl((void *)(dev->base_addr));
8588
8589 /* HW Config (Reg 5) */
8590 write_nic_byte(dev, 0x210014, 0x72); // fifo width =16
8591 readl((void *)(dev->base_addr));
8592 write_nic_byte(dev, 0x210014, 0x72); // fifo width =16
8593 readl((void *)(dev->base_addr));
8594
8595 /* Enable System (Reg 0)
8596 * second enable should be OK */
8597 write_nic_byte(dev, 0x210000, 0x00); // clear enable system
8598 readl((void *)(dev->base_addr));
8599 write_nic_byte(dev, 0x210000, 0x80); // set enable system
8600
8601 /* check Symbol is enabled - upped this from 5 as it wasn't always
8602 * catching the update */
8603 for (i = 0; i < 10; i++) {
8604 udelay(10);
8605
8606 /* check Dino is enabled bit */
8607 read_nic_byte(dev, 0x210000, &data);
8608 if (data & 0x1)
8609 break;
8610 }
8611
8612 if (i == 10) {
8613 IPW_DEBUG_ERROR("%s: Error initializing Symbol\n",
8614 dev->name);
8615 return -EIO;
8616 }
8617
8618 /* Get Symbol alive response */
8619 for (i = 0; i < 30; i++) {
8620 /* Read alive response structure */
8621 for (j = 0;
8622 j < (sizeof(struct symbol_alive_response) >> 1);
8623 j++)
8624 read_nic_word(dev, 0x210004,
8625 ((u16 *)&response) + j);
8626
8627 if ((response.cmd_id == 1) &&
8628 (response.ucode_valid == 0x1))
8629 break;
8630 udelay(10);
8631 }
8632
8633 if (i == 30) {
8634 IPW_DEBUG_ERROR("%s: No response from Symbol - hw not alive\n",
8635 dev->name);
8636 printk_buf(IPW_DL_ERROR, (u8*)&response, sizeof(response));
8637 return -EIO;
8638 }
8639
8640 return 0;
8641}
diff --git a/drivers/net/wireless/ipw2100.h b/drivers/net/wireless/ipw2100.h
new file mode 100644
index 000000000000..95a05b554c1a
--- /dev/null
+++ b/drivers/net/wireless/ipw2100.h
@@ -0,0 +1,1195 @@
1/******************************************************************************
2
3 Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
13
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18 The full GNU General Public License is included in this distribution in the
19 file called LICENSE.
20
21 Contact Information:
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25******************************************************************************/
26#ifndef _IPW2100_H
27#define _IPW2100_H
28
29#include <linux/sched.h>
30#include <linux/interrupt.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/list.h>
34#include <linux/delay.h>
35#include <linux/skbuff.h>
36#include <asm/io.h>
37#include <linux/socket.h>
38#include <linux/if_arp.h>
39#include <linux/wireless.h>
40#include <linux/version.h>
41#include <net/iw_handler.h> // new driver API
42
43#include <net/ieee80211.h>
44
45#include <linux/workqueue.h>
46
47struct ipw2100_priv;
48struct ipw2100_tx_packet;
49struct ipw2100_rx_packet;
50
51#ifdef CONFIG_IPW_DEBUG
52enum { IPW_DEBUG_ENABLED = 1 };
53extern u32 ipw2100_debug_level;
54#define IPW_DEBUG(level, message...) \
55do { \
56 if (ipw2100_debug_level & (level)) { \
57 printk(KERN_DEBUG "ipw2100: %c %s ", \
58 in_interrupt() ? 'I' : 'U', __FUNCTION__); \
59 printk(message); \
60 } \
61} while (0)
62#else
63enum { IPW_DEBUG_ENABLED = 0 };
64#define IPW_DEBUG(level, message...) do {} while (0)
65#endif /* CONFIG_IPW_DEBUG */
66
67#define IPW_DL_UNINIT 0x80000000
68#define IPW_DL_NONE 0x00000000
69#define IPW_DL_ALL 0x7FFFFFFF
70
71/*
72 * To use the debug system;
73 *
74 * If you are defining a new debug classification, simply add it to the #define
75 * list here in the form of:
76 *
77 * #define IPW_DL_xxxx VALUE
78 *
79 * shifting value to the left one bit from the previous entry. xxxx should be
80 * the name of the classification (for example, WEP)
81 *
82 * You then need to either add a IPW2100_xxxx_DEBUG() macro definition for your
83 * classification, or use IPW_DEBUG(IPW_DL_xxxx, ...) whenever you want
84 * to send output to that classification.
85 *
86 * To add your debug level to the list of levels seen when you perform
87 *
88 * % cat /proc/net/ipw2100/debug_level
89 *
90 * you simply need to add your entry to the ipw2100_debug_levels array.
91 *
92 * If you do not see debug_level in /proc/net/ipw2100 then you do not have
93 * CONFIG_IPW_DEBUG defined in your kernel configuration
94 *
95 */
96
97#define IPW_DL_ERROR (1<<0)
98#define IPW_DL_WARNING (1<<1)
99#define IPW_DL_INFO (1<<2)
100#define IPW_DL_WX (1<<3)
101#define IPW_DL_HC (1<<5)
102#define IPW_DL_STATE (1<<6)
103
104#define IPW_DL_NOTIF (1<<10)
105#define IPW_DL_SCAN (1<<11)
106#define IPW_DL_ASSOC (1<<12)
107#define IPW_DL_DROP (1<<13)
108
109#define IPW_DL_IOCTL (1<<14)
110#define IPW_DL_RF_KILL (1<<17)
111
112
113#define IPW_DL_MANAGE (1<<15)
114#define IPW_DL_FW (1<<16)
115
116#define IPW_DL_FRAG (1<<21)
117#define IPW_DL_WEP (1<<22)
118#define IPW_DL_TX (1<<23)
119#define IPW_DL_RX (1<<24)
120#define IPW_DL_ISR (1<<25)
121#define IPW_DL_IO (1<<26)
122#define IPW_DL_TRACE (1<<28)
123
124#define IPW_DEBUG_ERROR(f, a...) printk(KERN_ERR DRV_NAME ": " f, ## a)
125#define IPW_DEBUG_WARNING(f, a...) printk(KERN_WARNING DRV_NAME ": " f, ## a)
126#define IPW_DEBUG_INFO(f...) IPW_DEBUG(IPW_DL_INFO, ## f)
127#define IPW_DEBUG_WX(f...) IPW_DEBUG(IPW_DL_WX, ## f)
128#define IPW_DEBUG_SCAN(f...) IPW_DEBUG(IPW_DL_SCAN, ## f)
129#define IPW_DEBUG_NOTIF(f...) IPW_DEBUG(IPW_DL_NOTIF, ## f)
130#define IPW_DEBUG_TRACE(f...) IPW_DEBUG(IPW_DL_TRACE, ## f)
131#define IPW_DEBUG_RX(f...) IPW_DEBUG(IPW_DL_RX, ## f)
132#define IPW_DEBUG_TX(f...) IPW_DEBUG(IPW_DL_TX, ## f)
133#define IPW_DEBUG_ISR(f...) IPW_DEBUG(IPW_DL_ISR, ## f)
134#define IPW_DEBUG_MANAGEMENT(f...) IPW_DEBUG(IPW_DL_MANAGE, ## f)
135#define IPW_DEBUG_WEP(f...) IPW_DEBUG(IPW_DL_WEP, ## f)
136#define IPW_DEBUG_HC(f...) IPW_DEBUG(IPW_DL_HC, ## f)
137#define IPW_DEBUG_FRAG(f...) IPW_DEBUG(IPW_DL_FRAG, ## f)
138#define IPW_DEBUG_FW(f...) IPW_DEBUG(IPW_DL_FW, ## f)
139#define IPW_DEBUG_RF_KILL(f...) IPW_DEBUG(IPW_DL_RF_KILL, ## f)
140#define IPW_DEBUG_DROP(f...) IPW_DEBUG(IPW_DL_DROP, ## f)
141#define IPW_DEBUG_IO(f...) IPW_DEBUG(IPW_DL_IO, ## f)
142#define IPW_DEBUG_IOCTL(f...) IPW_DEBUG(IPW_DL_IOCTL, ## f)
143#define IPW_DEBUG_STATE(f, a...) IPW_DEBUG(IPW_DL_STATE | IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
144#define IPW_DEBUG_ASSOC(f, a...) IPW_DEBUG(IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
145
146enum {
147 IPW_HW_STATE_DISABLED = 1,
148 IPW_HW_STATE_ENABLED = 0
149};
150
151struct ssid_context {
152 char ssid[IW_ESSID_MAX_SIZE + 1];
153 int ssid_len;
154 unsigned char bssid[ETH_ALEN];
155 int port_type;
156 int channel;
157
158};
159
160extern const char *port_type_str[];
161extern const char *band_str[];
162
163#define NUMBER_OF_BD_PER_COMMAND_PACKET 1
164#define NUMBER_OF_BD_PER_DATA_PACKET 2
165
166#define IPW_MAX_BDS 6
167#define NUMBER_OF_OVERHEAD_BDS_PER_PACKETR 2
168#define NUMBER_OF_BDS_TO_LEAVE_FOR_COMMANDS 1
169
170#define REQUIRED_SPACE_IN_RING_FOR_COMMAND_PACKET \
171 (IPW_BD_QUEUE_W_R_MIN_SPARE + NUMBER_OF_BD_PER_COMMAND_PACKET)
172
173struct bd_status {
174 union {
175 struct { u8 nlf:1, txType:2, intEnabled:1, reserved:4;} fields;
176 u8 field;
177 } info;
178} __attribute__ ((packed));
179
180struct ipw2100_bd {
181 u32 host_addr;
182 u32 buf_length;
183 struct bd_status status;
184 /* number of fragments for frame (should be set only for
185 * 1st TBD) */
186 u8 num_fragments;
187 u8 reserved[6];
188} __attribute__ ((packed));
189
190#define IPW_BD_QUEUE_LENGTH(n) (1<<n)
191#define IPW_BD_ALIGNMENT(L) (L*sizeof(struct ipw2100_bd))
192
193#define IPW_BD_STATUS_TX_FRAME_802_3 0x00
194#define IPW_BD_STATUS_TX_FRAME_NOT_LAST_FRAGMENT 0x01
195#define IPW_BD_STATUS_TX_FRAME_COMMAND 0x02
196#define IPW_BD_STATUS_TX_FRAME_802_11 0x04
197#define IPW_BD_STATUS_TX_INTERRUPT_ENABLE 0x08
198
199struct ipw2100_bd_queue {
200 /* driver (virtual) pointer to queue */
201 struct ipw2100_bd *drv;
202
203 /* firmware (physical) pointer to queue */
204 dma_addr_t nic;
205
206 /* Length of phy memory allocated for BDs */
207 u32 size;
208
209 /* Number of BDs in queue (and in array) */
210 u32 entries;
211
212 /* Number of available BDs (invalid for NIC BDs) */
213 u32 available;
214
215 /* Offset of oldest used BD in array (next one to
216 * check for completion) */
217 u32 oldest;
218
219 /* Offset of next available (unused) BD */
220 u32 next;
221};
222
223#define RX_QUEUE_LENGTH 256
224#define TX_QUEUE_LENGTH 256
225#define HW_QUEUE_LENGTH 256
226
227#define TX_PENDED_QUEUE_LENGTH (TX_QUEUE_LENGTH / NUMBER_OF_BD_PER_DATA_PACKET)
228
229#define STATUS_TYPE_MASK 0x0000000f
230#define COMMAND_STATUS_VAL 0
231#define STATUS_CHANGE_VAL 1
232#define P80211_DATA_VAL 2
233#define P8023_DATA_VAL 3
234#define HOST_NOTIFICATION_VAL 4
235
236#define IPW2100_RSSI_TO_DBM (-98)
237
238struct ipw2100_status {
239 u32 frame_size;
240 u16 status_fields;
241 u8 flags;
242#define IPW_STATUS_FLAG_DECRYPTED (1<<0)
243#define IPW_STATUS_FLAG_WEP_ENCRYPTED (1<<1)
244#define IPW_STATUS_FLAG_CRC_ERROR (1<<2)
245 u8 rssi;
246} __attribute__ ((packed));
247
248struct ipw2100_status_queue {
249 /* driver (virtual) pointer to queue */
250 struct ipw2100_status *drv;
251
252 /* firmware (physical) pointer to queue */
253 dma_addr_t nic;
254
255 /* Length of phy memory allocated for BDs */
256 u32 size;
257};
258
259#define HOST_COMMAND_PARAMS_REG_LEN 100
260#define CMD_STATUS_PARAMS_REG_LEN 3
261
262#define IPW_WPA_CAPABILITIES 0x1
263#define IPW_WPA_LISTENINTERVAL 0x2
264#define IPW_WPA_AP_ADDRESS 0x4
265
266#define IPW_MAX_VAR_IE_LEN ((HOST_COMMAND_PARAMS_REG_LEN - 4) * sizeof(u32))
267
268struct ipw2100_wpa_assoc_frame {
269 u16 fixed_ie_mask;
270 struct {
271 u16 capab_info;
272 u16 listen_interval;
273 u8 current_ap[ETH_ALEN];
274 } fixed_ies;
275 u32 var_ie_len;
276 u8 var_ie[IPW_MAX_VAR_IE_LEN];
277};
278
279#define IPW_BSS 1
280#define IPW_MONITOR 2
281#define IPW_IBSS 3
282
283/**
284 * @struct _tx_cmd - HWCommand
285 * @brief H/W command structure.
286 */
287struct ipw2100_cmd_header {
288 u32 host_command_reg;
289 u32 host_command_reg1;
290 u32 sequence;
291 u32 host_command_len_reg;
292 u32 host_command_params_reg[HOST_COMMAND_PARAMS_REG_LEN];
293 u32 cmd_status_reg;
294 u32 cmd_status_params_reg[CMD_STATUS_PARAMS_REG_LEN];
295 u32 rxq_base_ptr;
296 u32 rxq_next_ptr;
297 u32 rxq_host_ptr;
298 u32 txq_base_ptr;
299 u32 txq_next_ptr;
300 u32 txq_host_ptr;
301 u32 tx_status_reg;
302 u32 reserved;
303 u32 status_change_reg;
304 u32 reserved1[3];
305 u32 *ordinal1_ptr;
306 u32 *ordinal2_ptr;
307} __attribute__ ((packed));
308
309struct ipw2100_data_header {
310 u32 host_command_reg;
311 u32 host_command_reg1;
312 u8 encrypted; // BOOLEAN in win! TRUE if frame is enc by driver
313 u8 needs_encryption; // BOOLEAN in win! TRUE if frma need to be enc in NIC
314 u8 wep_index; // 0 no key, 1-4 key index, 0xff immediate key
315 u8 key_size; // 0 no imm key, 0x5 64bit encr, 0xd 128bit encr, 0x10 128bit encr and 128bit IV
316 u8 key[16];
317 u8 reserved[10]; // f/w reserved
318 u8 src_addr[ETH_ALEN];
319 u8 dst_addr[ETH_ALEN];
320 u16 fragment_size;
321} __attribute__ ((packed));
322
323/* Host command data structure */
324struct host_command {
325 u32 host_command; // COMMAND ID
326 u32 host_command1; // COMMAND ID
327 u32 host_command_sequence; // UNIQUE COMMAND NUMBER (ID)
328 u32 host_command_length; // LENGTH
329 u32 host_command_parameters[HOST_COMMAND_PARAMS_REG_LEN]; // COMMAND PARAMETERS
330} __attribute__ ((packed));
331
332
333typedef enum {
334 POWER_ON_RESET,
335 EXIT_POWER_DOWN_RESET,
336 SW_RESET,
337 EEPROM_RW,
338 SW_RE_INIT
339} ipw2100_reset_event;
340
341enum {
342 COMMAND = 0xCAFE,
343 DATA,
344 RX
345};
346
347
348struct ipw2100_tx_packet {
349 int type;
350 int index;
351 union {
352 struct { /* COMMAND */
353 struct ipw2100_cmd_header* cmd;
354 dma_addr_t cmd_phys;
355 } c_struct;
356 struct { /* DATA */
357 struct ipw2100_data_header* data;
358 dma_addr_t data_phys;
359 struct ieee80211_txb *txb;
360 } d_struct;
361 } info;
362 int jiffy_start;
363
364 struct list_head list;
365};
366
367
368struct ipw2100_rx_packet {
369 struct ipw2100_rx *rxp;
370 dma_addr_t dma_addr;
371 int jiffy_start;
372 struct sk_buff *skb;
373 struct list_head list;
374};
375
376#define FRAG_DISABLED (1<<31)
377#define RTS_DISABLED (1<<31)
378#define MAX_RTS_THRESHOLD 2304U
379#define MIN_RTS_THRESHOLD 1U
380#define DEFAULT_RTS_THRESHOLD 1000U
381
382#define DEFAULT_BEACON_INTERVAL 100U
383#define DEFAULT_SHORT_RETRY_LIMIT 7U
384#define DEFAULT_LONG_RETRY_LIMIT 4U
385
386struct ipw2100_ordinals {
387 u32 table1_addr;
388 u32 table2_addr;
389 u32 table1_size;
390 u32 table2_size;
391};
392
393/* Host Notification header */
394struct ipw2100_notification {
395 u32 hnhdr_subtype; /* type of host notification */
396 u32 hnhdr_size; /* size in bytes of data
397 or number of entries, if table.
398 Does NOT include header */
399} __attribute__ ((packed));
400
401#define MAX_KEY_SIZE 16
402#define MAX_KEYS 8
403
404#define IPW2100_WEP_ENABLE (1<<1)
405#define IPW2100_WEP_DROP_CLEAR (1<<2)
406
407#define IPW_NONE_CIPHER (1<<0)
408#define IPW_WEP40_CIPHER (1<<1)
409#define IPW_TKIP_CIPHER (1<<2)
410#define IPW_CCMP_CIPHER (1<<4)
411#define IPW_WEP104_CIPHER (1<<5)
412#define IPW_CKIP_CIPHER (1<<6)
413
414#define IPW_AUTH_OPEN 0
415#define IPW_AUTH_SHARED 1
416
417struct statistic {
418 int value;
419 int hi;
420 int lo;
421};
422
423#define INIT_STAT(x) do { \
424 (x)->value = (x)->hi = 0; \
425 (x)->lo = 0x7fffffff; \
426} while (0)
427#define SET_STAT(x,y) do { \
428 (x)->value = y; \
429 if ((x)->value > (x)->hi) (x)->hi = (x)->value; \
430 if ((x)->value < (x)->lo) (x)->lo = (x)->value; \
431} while (0)
432#define INC_STAT(x) do { if (++(x)->value > (x)->hi) (x)->hi = (x)->value; } \
433while (0)
434#define DEC_STAT(x) do { if (--(x)->value < (x)->lo) (x)->lo = (x)->value; } \
435while (0)
436
437#define IPW2100_ERROR_QUEUE 5
438
439/* Power management code: enable or disable? */
440enum {
441#ifdef CONFIG_PM
442 IPW2100_PM_DISABLED = 0,
443 PM_STATE_SIZE = 16,
444#else
445 IPW2100_PM_DISABLED = 1,
446 PM_STATE_SIZE = 0,
447#endif
448};
449
450#define STATUS_POWERED (1<<0)
451#define STATUS_CMD_ACTIVE (1<<1) /**< host command in progress */
452#define STATUS_RUNNING (1<<2) /* Card initialized, but not enabled */
453#define STATUS_ENABLED (1<<3) /* Card enabled -- can scan,Tx,Rx */
454#define STATUS_STOPPING (1<<4) /* Card is in shutdown phase */
455#define STATUS_INITIALIZED (1<<5) /* Card is ready for external calls */
456#define STATUS_ASSOCIATING (1<<9) /* Associated, but no BSSID yet */
457#define STATUS_ASSOCIATED (1<<10) /* Associated and BSSID valid */
458#define STATUS_INT_ENABLED (1<<11)
459#define STATUS_RF_KILL_HW (1<<12)
460#define STATUS_RF_KILL_SW (1<<13)
461#define STATUS_RF_KILL_MASK (STATUS_RF_KILL_HW | STATUS_RF_KILL_SW)
462#define STATUS_EXIT_PENDING (1<<14)
463
464#define STATUS_SCAN_PENDING (1<<23)
465#define STATUS_SCANNING (1<<24)
466#define STATUS_SCAN_ABORTING (1<<25)
467#define STATUS_SCAN_COMPLETE (1<<26)
468#define STATUS_WX_EVENT_PENDING (1<<27)
469#define STATUS_RESET_PENDING (1<<29)
470#define STATUS_SECURITY_UPDATED (1<<30) /* Security sync needed */
471
472
473
474/* Internal NIC states */
475#define IPW_STATE_INITIALIZED (1<<0)
476#define IPW_STATE_COUNTRY_FOUND (1<<1)
477#define IPW_STATE_ASSOCIATED (1<<2)
478#define IPW_STATE_ASSN_LOST (1<<3)
479#define IPW_STATE_ASSN_CHANGED (1<<4)
480#define IPW_STATE_SCAN_COMPLETE (1<<5)
481#define IPW_STATE_ENTERED_PSP (1<<6)
482#define IPW_STATE_LEFT_PSP (1<<7)
483#define IPW_STATE_RF_KILL (1<<8)
484#define IPW_STATE_DISABLED (1<<9)
485#define IPW_STATE_POWER_DOWN (1<<10)
486#define IPW_STATE_SCANNING (1<<11)
487
488
489
490#define CFG_STATIC_CHANNEL (1<<0) /* Restrict assoc. to single channel */
491#define CFG_STATIC_ESSID (1<<1) /* Restrict assoc. to single SSID */
492#define CFG_STATIC_BSSID (1<<2) /* Restrict assoc. to single BSSID */
493#define CFG_CUSTOM_MAC (1<<3)
494#define CFG_LONG_PREAMBLE (1<<4)
495#define CFG_ASSOCIATE (1<<6)
496#define CFG_FIXED_RATE (1<<7)
497#define CFG_ADHOC_CREATE (1<<8)
498#define CFG_C3_DISABLED (1<<9)
499#define CFG_PASSIVE_SCAN (1<<10)
500
501#define CAP_SHARED_KEY (1<<0) /* Off = OPEN */
502#define CAP_PRIVACY_ON (1<<1) /* Off = No privacy */
503
504struct ipw2100_priv {
505
506 int stop_hang_check; /* Set 1 when shutting down to kill hang_check */
507 int stop_rf_kill; /* Set 1 when shutting down to kill rf_kill */
508
509 struct ieee80211_device *ieee;
510 unsigned long status;
511 unsigned long config;
512 unsigned long capability;
513
514 /* Statistics */
515 int resets;
516 int reset_backoff;
517
518 /* Context */
519 u8 essid[IW_ESSID_MAX_SIZE];
520 u8 essid_len;
521 u8 bssid[ETH_ALEN];
522 u8 channel;
523 int last_mode;
524 int cstate_limit;
525
526 unsigned long connect_start;
527 unsigned long last_reset;
528
529 u32 channel_mask;
530 u32 fatal_error;
531 u32 fatal_errors[IPW2100_ERROR_QUEUE];
532 u32 fatal_index;
533 int eeprom_version;
534 int firmware_version;
535 unsigned long hw_features;
536 int hangs;
537 u32 last_rtc;
538 int dump_raw; /* 1 to dump raw bytes in /sys/.../memory */
539 u8* snapshot[0x30];
540
541 u8 mandatory_bssid_mac[ETH_ALEN];
542 u8 mac_addr[ETH_ALEN];
543
544 int power_mode;
545
546 /* WEP data */
547 struct ieee80211_security sec;
548 int messages_sent;
549
550
551 int short_retry_limit;
552 int long_retry_limit;
553
554 u32 rts_threshold;
555 u32 frag_threshold;
556
557 int in_isr;
558
559 u32 tx_rates;
560 int tx_power;
561 u32 beacon_interval;
562
563 char nick[IW_ESSID_MAX_SIZE + 1];
564
565 struct ipw2100_status_queue status_queue;
566
567 struct statistic txq_stat;
568 struct statistic rxq_stat;
569 struct ipw2100_bd_queue rx_queue;
570 struct ipw2100_bd_queue tx_queue;
571 struct ipw2100_rx_packet *rx_buffers;
572
573 struct statistic fw_pend_stat;
574 struct list_head fw_pend_list;
575
576 struct statistic msg_free_stat;
577 struct statistic msg_pend_stat;
578 struct list_head msg_free_list;
579 struct list_head msg_pend_list;
580 struct ipw2100_tx_packet *msg_buffers;
581
582 struct statistic tx_free_stat;
583 struct statistic tx_pend_stat;
584 struct list_head tx_free_list;
585 struct list_head tx_pend_list;
586 struct ipw2100_tx_packet *tx_buffers;
587
588 struct ipw2100_ordinals ordinals;
589
590 struct pci_dev *pci_dev;
591
592 struct proc_dir_entry *dir_dev;
593
594 struct net_device *net_dev;
595 struct iw_statistics wstats;
596
597 struct tasklet_struct irq_tasklet;
598
599 struct workqueue_struct *workqueue;
600 struct work_struct reset_work;
601 struct work_struct security_work;
602 struct work_struct wx_event_work;
603 struct work_struct hang_check;
604 struct work_struct rf_kill;
605
606 u32 interrupts;
607 int tx_interrupts;
608 int rx_interrupts;
609 int inta_other;
610
611 spinlock_t low_lock;
612 struct semaphore action_sem;
613 struct semaphore adapter_sem;
614
615 wait_queue_head_t wait_command_queue;
616};
617
618
619/*********************************************************
620 * Host Command -> From Driver to FW
621 *********************************************************/
622
623/**
624 * Host command identifiers
625 */
626#define HOST_COMPLETE 2
627#define SYSTEM_CONFIG 6
628#define SSID 8
629#define MANDATORY_BSSID 9
630#define AUTHENTICATION_TYPE 10
631#define ADAPTER_ADDRESS 11
632#define PORT_TYPE 12
633#define INTERNATIONAL_MODE 13
634#define CHANNEL 14
635#define RTS_THRESHOLD 15
636#define FRAG_THRESHOLD 16
637#define POWER_MODE 17
638#define TX_RATES 18
639#define BASIC_TX_RATES 19
640#define WEP_KEY_INFO 20
641#define WEP_KEY_INDEX 25
642#define WEP_FLAGS 26
643#define ADD_MULTICAST 27
644#define CLEAR_ALL_MULTICAST 28
645#define BEACON_INTERVAL 29
646#define ATIM_WINDOW 30
647#define CLEAR_STATISTICS 31
648#define SEND 33
649#define TX_POWER_INDEX 36
650#define BROADCAST_SCAN 43
651#define CARD_DISABLE 44
652#define PREFERRED_BSSID 45
653#define SET_SCAN_OPTIONS 46
654#define SCAN_DWELL_TIME 47
655#define SWEEP_TABLE 48
656#define AP_OR_STATION_TABLE 49
657#define GROUP_ORDINALS 50
658#define SHORT_RETRY_LIMIT 51
659#define LONG_RETRY_LIMIT 52
660
661#define HOST_PRE_POWER_DOWN 58
662#define CARD_DISABLE_PHY_OFF 61
663#define MSDU_TX_RATES 62
664
665
666/* Rogue AP Detection */
667#define SET_STATION_STAT_BITS 64
668#define CLEAR_STATIONS_STAT_BITS 65
669#define LEAP_ROGUE_MODE 66 //TODO tbw replaced by CFG_LEAP_ROGUE_AP
670#define SET_SECURITY_INFORMATION 67
671#define DISASSOCIATION_BSSID 68
672#define SET_WPA_IE 69
673
674
675
676/* system configuration bit mask: */
677#define IPW_CFG_MONITOR 0x00004
678#define IPW_CFG_PREAMBLE_AUTO 0x00010
679#define IPW_CFG_IBSS_AUTO_START 0x00020
680#define IPW_CFG_LOOPBACK 0x00100
681#define IPW_CFG_ANSWER_BCSSID_PROBE 0x00800
682#define IPW_CFG_BT_SIDEBAND_SIGNAL 0x02000
683#define IPW_CFG_802_1x_ENABLE 0x04000
684#define IPW_CFG_BSS_MASK 0x08000
685#define IPW_CFG_IBSS_MASK 0x10000
686
687#define IPW_SCAN_NOASSOCIATE (1<<0)
688#define IPW_SCAN_MIXED_CELL (1<<1)
689/* RESERVED (1<<2) */
690#define IPW_SCAN_PASSIVE (1<<3)
691
692#define IPW_NIC_FATAL_ERROR 0x2A7F0
693#define IPW_ERROR_ADDR(x) (x & 0x3FFFF)
694#define IPW_ERROR_CODE(x) ((x & 0xFF000000) >> 24)
695#define IPW2100_ERR_C3_CORRUPTION (0x10 << 24)
696#define IPW2100_ERR_MSG_TIMEOUT (0x11 << 24)
697#define IPW2100_ERR_FW_LOAD (0x12 << 24)
698
699#define IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND 0x200
700#define IPW_MEM_SRAM_HOST_INTERRUPT_AREA_LOWER_BOUND IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x0D80
701
702#define IPW_MEM_HOST_SHARED_RX_BD_BASE (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x40)
703#define IPW_MEM_HOST_SHARED_RX_STATUS_BASE (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x44)
704#define IPW_MEM_HOST_SHARED_RX_BD_SIZE (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x48)
705#define IPW_MEM_HOST_SHARED_RX_READ_INDEX (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0xa0)
706
707#define IPW_MEM_HOST_SHARED_TX_QUEUE_BD_BASE (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x00)
708#define IPW_MEM_HOST_SHARED_TX_QUEUE_BD_SIZE (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x04)
709#define IPW_MEM_HOST_SHARED_TX_QUEUE_READ_INDEX (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x80)
710
711#define IPW_MEM_HOST_SHARED_RX_WRITE_INDEX \
712 (IPW_MEM_SRAM_HOST_INTERRUPT_AREA_LOWER_BOUND + 0x20)
713
714#define IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX \
715 (IPW_MEM_SRAM_HOST_INTERRUPT_AREA_LOWER_BOUND)
716
717#define IPW_MEM_HOST_SHARED_ORDINALS_TABLE_1 (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x180)
718#define IPW_MEM_HOST_SHARED_ORDINALS_TABLE_2 (IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND + 0x184)
719
720#define IPW2100_INTA_TX_TRANSFER (0x00000001) // Bit 0 (LSB)
721#define IPW2100_INTA_RX_TRANSFER (0x00000002) // Bit 1
722#define IPW2100_INTA_TX_COMPLETE (0x00000004) // Bit 2
723#define IPW2100_INTA_EVENT_INTERRUPT (0x00000008) // Bit 3
724#define IPW2100_INTA_STATUS_CHANGE (0x00000010) // Bit 4
725#define IPW2100_INTA_BEACON_PERIOD_EXPIRED (0x00000020) // Bit 5
726#define IPW2100_INTA_SLAVE_MODE_HOST_COMMAND_DONE (0x00010000) // Bit 16
727#define IPW2100_INTA_FW_INIT_DONE (0x01000000) // Bit 24
728#define IPW2100_INTA_FW_CALIBRATION_CALC (0x02000000) // Bit 25
729#define IPW2100_INTA_FATAL_ERROR (0x40000000) // Bit 30
730#define IPW2100_INTA_PARITY_ERROR (0x80000000) // Bit 31 (MSB)
731
732#define IPW_AUX_HOST_RESET_REG_PRINCETON_RESET (0x00000001)
733#define IPW_AUX_HOST_RESET_REG_FORCE_NMI (0x00000002)
734#define IPW_AUX_HOST_RESET_REG_PCI_HOST_CLUSTER_FATAL_NMI (0x00000004)
735#define IPW_AUX_HOST_RESET_REG_CORE_FATAL_NMI (0x00000008)
736#define IPW_AUX_HOST_RESET_REG_SW_RESET (0x00000080)
737#define IPW_AUX_HOST_RESET_REG_MASTER_DISABLED (0x00000100)
738#define IPW_AUX_HOST_RESET_REG_STOP_MASTER (0x00000200)
739
740#define IPW_AUX_HOST_GP_CNTRL_BIT_CLOCK_READY (0x00000001) // Bit 0 (LSB)
741#define IPW_AUX_HOST_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY (0x00000002) // Bit 1
742#define IPW_AUX_HOST_GP_CNTRL_BIT_INIT_DONE (0x00000004) // Bit 2
743#define IPW_AUX_HOST_GP_CNTRL_BITS_SYS_CONFIG (0x000007c0) // Bits 6-10
744#define IPW_AUX_HOST_GP_CNTRL_BIT_BUS_TYPE (0x00000200) // Bit 9
745#define IPW_AUX_HOST_GP_CNTRL_BIT_BAR0_BLOCK_SIZE (0x00000400) // Bit 10
746#define IPW_AUX_HOST_GP_CNTRL_BIT_USB_MODE (0x20000000) // Bit 29
747#define IPW_AUX_HOST_GP_CNTRL_BIT_HOST_FORCES_SYS_CLK (0x40000000) // Bit 30
748#define IPW_AUX_HOST_GP_CNTRL_BIT_FW_FORCES_SYS_CLK (0x80000000) // Bit 31 (MSB)
749
750#define IPW_BIT_GPIO_GPIO1_MASK 0x0000000C
751#define IPW_BIT_GPIO_GPIO3_MASK 0x000000C0
752#define IPW_BIT_GPIO_GPIO1_ENABLE 0x00000008
753#define IPW_BIT_GPIO_RF_KILL 0x00010000
754
755#define IPW_BIT_GPIO_LED_OFF 0x00002000 // Bit 13 = 1
756
757#define IPW_REG_DOMAIN_0_OFFSET 0x0000
758#define IPW_REG_DOMAIN_1_OFFSET IPW_MEM_SRAM_HOST_SHARED_LOWER_BOUND
759
760#define IPW_REG_INTA IPW_REG_DOMAIN_0_OFFSET + 0x0008
761#define IPW_REG_INTA_MASK IPW_REG_DOMAIN_0_OFFSET + 0x000C
762#define IPW_REG_INDIRECT_ACCESS_ADDRESS IPW_REG_DOMAIN_0_OFFSET + 0x0010
763#define IPW_REG_INDIRECT_ACCESS_DATA IPW_REG_DOMAIN_0_OFFSET + 0x0014
764#define IPW_REG_AUTOINCREMENT_ADDRESS IPW_REG_DOMAIN_0_OFFSET + 0x0018
765#define IPW_REG_AUTOINCREMENT_DATA IPW_REG_DOMAIN_0_OFFSET + 0x001C
766#define IPW_REG_RESET_REG IPW_REG_DOMAIN_0_OFFSET + 0x0020
767#define IPW_REG_GP_CNTRL IPW_REG_DOMAIN_0_OFFSET + 0x0024
768#define IPW_REG_GPIO IPW_REG_DOMAIN_0_OFFSET + 0x0030
769#define IPW_REG_FW_TYPE IPW_REG_DOMAIN_1_OFFSET + 0x0188
770#define IPW_REG_FW_VERSION IPW_REG_DOMAIN_1_OFFSET + 0x018C
771#define IPW_REG_FW_COMPATABILITY_VERSION IPW_REG_DOMAIN_1_OFFSET + 0x0190
772
773#define IPW_REG_INDIRECT_ADDR_MASK 0x00FFFFFC
774
775#define IPW_INTERRUPT_MASK 0xC1010013
776
777#define IPW2100_CONTROL_REG 0x220000
778#define IPW2100_CONTROL_PHY_OFF 0x8
779
780#define IPW2100_COMMAND 0x00300004
781#define IPW2100_COMMAND_PHY_ON 0x0
782#define IPW2100_COMMAND_PHY_OFF 0x1
783
784/* in DEBUG_AREA, values of memory always 0xd55555d5 */
785#define IPW_REG_DOA_DEBUG_AREA_START IPW_REG_DOMAIN_0_OFFSET + 0x0090
786#define IPW_REG_DOA_DEBUG_AREA_END IPW_REG_DOMAIN_0_OFFSET + 0x00FF
787#define IPW_DATA_DOA_DEBUG_VALUE 0xd55555d5
788
789#define IPW_INTERNAL_REGISTER_HALT_AND_RESET 0x003000e0
790
791#define IPW_WAIT_CLOCK_STABILIZATION_DELAY 50 // micro seconds
792#define IPW_WAIT_RESET_ARC_COMPLETE_DELAY 10 // micro seconds
793#define IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY 10 // micro seconds
794
795// BD ring queue read/write difference
796#define IPW_BD_QUEUE_W_R_MIN_SPARE 2
797
798#define IPW_CACHE_LINE_LENGTH_DEFAULT 0x80
799
800#define IPW_CARD_DISABLE_PHY_OFF_COMPLETE_WAIT 100 // 100 milli
801#define IPW_PREPARE_POWER_DOWN_COMPLETE_WAIT 100 // 100 milli
802
803
804
805
806#define IPW_HEADER_802_11_SIZE sizeof(struct ieee80211_hdr_3addr)
807#define IPW_MAX_80211_PAYLOAD_SIZE 2304U
808#define IPW_MAX_802_11_PAYLOAD_LENGTH 2312
809#define IPW_MAX_ACCEPTABLE_TX_FRAME_LENGTH 1536
810#define IPW_MIN_ACCEPTABLE_RX_FRAME_LENGTH 60
811#define IPW_MAX_ACCEPTABLE_RX_FRAME_LENGTH \
812 (IPW_MAX_ACCEPTABLE_TX_FRAME_LENGTH + IPW_HEADER_802_11_SIZE - \
813 sizeof(struct ethhdr))
814
815#define IPW_802_11_FCS_LENGTH 4
816#define IPW_RX_NIC_BUFFER_LENGTH \
817 (IPW_MAX_802_11_PAYLOAD_LENGTH + IPW_HEADER_802_11_SIZE + \
818 IPW_802_11_FCS_LENGTH)
819
820#define IPW_802_11_PAYLOAD_OFFSET \
821 (sizeof(struct ieee80211_hdr_3addr) + \
822 sizeof(struct ieee80211_snap_hdr))
823
824struct ipw2100_rx {
825 union {
826 unsigned char payload[IPW_RX_NIC_BUFFER_LENGTH];
827 struct ieee80211_hdr header;
828 u32 status;
829 struct ipw2100_notification notification;
830 struct ipw2100_cmd_header command;
831 } rx_data;
832} __attribute__ ((packed));
833
834/* Bit 0-7 are for 802.11b tx rates - . Bit 5-7 are reserved */
835#define TX_RATE_1_MBIT 0x0001
836#define TX_RATE_2_MBIT 0x0002
837#define TX_RATE_5_5_MBIT 0x0004
838#define TX_RATE_11_MBIT 0x0008
839#define TX_RATE_MASK 0x000F
840#define DEFAULT_TX_RATES 0x000F
841
842#define IPW_POWER_MODE_CAM 0x00 //(always on)
843#define IPW_POWER_INDEX_1 0x01
844#define IPW_POWER_INDEX_2 0x02
845#define IPW_POWER_INDEX_3 0x03
846#define IPW_POWER_INDEX_4 0x04
847#define IPW_POWER_INDEX_5 0x05
848#define IPW_POWER_AUTO 0x06
849#define IPW_POWER_MASK 0x0F
850#define IPW_POWER_ENABLED 0x10
851#define IPW_POWER_LEVEL(x) ((x) & IPW_POWER_MASK)
852
853#define IPW_TX_POWER_AUTO 0
854#define IPW_TX_POWER_ENHANCED 1
855
856#define IPW_TX_POWER_DEFAULT 32
857#define IPW_TX_POWER_MIN 0
858#define IPW_TX_POWER_MAX 16
859#define IPW_TX_POWER_MIN_DBM (-12)
860#define IPW_TX_POWER_MAX_DBM 16
861
862#define FW_SCAN_DONOT_ASSOCIATE 0x0001 // Dont Attempt to Associate after Scan
863#define FW_SCAN_PASSIVE 0x0008 // Force PASSSIVE Scan
864
865#define REG_MIN_CHANNEL 0
866#define REG_MAX_CHANNEL 14
867
868#define REG_CHANNEL_MASK 0x00003FFF
869#define IPW_IBSS_11B_DEFAULT_MASK 0x87ff
870
871#define DIVERSITY_EITHER 0 // Use both antennas
872#define DIVERSITY_ANTENNA_A 1 // Use antenna A
873#define DIVERSITY_ANTENNA_B 2 // Use antenna B
874
875
876#define HOST_COMMAND_WAIT 0
877#define HOST_COMMAND_NO_WAIT 1
878
879#define LOCK_NONE 0
880#define LOCK_DRIVER 1
881#define LOCK_FW 2
882
883#define TYPE_SWEEP_ORD 0x000D
884#define TYPE_IBSS_STTN_ORD 0x000E
885#define TYPE_BSS_AP_ORD 0x000F
886#define TYPE_RAW_BEACON_ENTRY 0x0010
887#define TYPE_CALIBRATION_DATA 0x0011
888#define TYPE_ROGUE_AP_DATA 0x0012
889#define TYPE_ASSOCIATION_REQUEST 0x0013
890#define TYPE_REASSOCIATION_REQUEST 0x0014
891
892
893#define HW_FEATURE_RFKILL (0x0001)
894#define RF_KILLSWITCH_OFF (1)
895#define RF_KILLSWITCH_ON (0)
896
897#define IPW_COMMAND_POOL_SIZE 40
898
899#define IPW_START_ORD_TAB_1 1
900#define IPW_START_ORD_TAB_2 1000
901
902#define IPW_ORD_TAB_1_ENTRY_SIZE sizeof(u32)
903
904#define IS_ORDINAL_TABLE_ONE(mgr,id) \
905 ((id >= IPW_START_ORD_TAB_1) && (id < mgr->table1_size))
906#define IS_ORDINAL_TABLE_TWO(mgr,id) \
907 ((id >= IPW_START_ORD_TAB_2) && (id < (mgr->table2_size + IPW_START_ORD_TAB_2)))
908
909#define BSS_ID_LENGTH 6
910
911// Fixed size data: Ordinal Table 1
912typedef enum _ORDINAL_TABLE_1 { // NS - means Not Supported by FW
913// Transmit statistics
914 IPW_ORD_STAT_TX_HOST_REQUESTS = 1,// # of requested Host Tx's (MSDU)
915 IPW_ORD_STAT_TX_HOST_COMPLETE, // # of successful Host Tx's (MSDU)
916 IPW_ORD_STAT_TX_DIR_DATA, // # of successful Directed Tx's (MSDU)
917
918 IPW_ORD_STAT_TX_DIR_DATA1 = 4, // # of successful Directed Tx's (MSDU) @ 1MB
919 IPW_ORD_STAT_TX_DIR_DATA2, // # of successful Directed Tx's (MSDU) @ 2MB
920 IPW_ORD_STAT_TX_DIR_DATA5_5, // # of successful Directed Tx's (MSDU) @ 5_5MB
921 IPW_ORD_STAT_TX_DIR_DATA11, // # of successful Directed Tx's (MSDU) @ 11MB
922 IPW_ORD_STAT_TX_DIR_DATA22, // # of successful Directed Tx's (MSDU) @ 22MB
923
924 IPW_ORD_STAT_TX_NODIR_DATA1 = 13,// # of successful Non_Directed Tx's (MSDU) @ 1MB
925 IPW_ORD_STAT_TX_NODIR_DATA2, // # of successful Non_Directed Tx's (MSDU) @ 2MB
926 IPW_ORD_STAT_TX_NODIR_DATA5_5, // # of successful Non_Directed Tx's (MSDU) @ 5.5MB
927 IPW_ORD_STAT_TX_NODIR_DATA11, // # of successful Non_Directed Tx's (MSDU) @ 11MB
928
929 IPW_ORD_STAT_NULL_DATA = 21, // # of successful NULL data Tx's
930 IPW_ORD_STAT_TX_RTS, // # of successful Tx RTS
931 IPW_ORD_STAT_TX_CTS, // # of successful Tx CTS
932 IPW_ORD_STAT_TX_ACK, // # of successful Tx ACK
933 IPW_ORD_STAT_TX_ASSN, // # of successful Association Tx's
934 IPW_ORD_STAT_TX_ASSN_RESP, // # of successful Association response Tx's
935 IPW_ORD_STAT_TX_REASSN, // # of successful Reassociation Tx's
936 IPW_ORD_STAT_TX_REASSN_RESP, // # of successful Reassociation response Tx's
937 IPW_ORD_STAT_TX_PROBE, // # of probes successfully transmitted
938 IPW_ORD_STAT_TX_PROBE_RESP, // # of probe responses successfully transmitted
939 IPW_ORD_STAT_TX_BEACON, // # of tx beacon
940 IPW_ORD_STAT_TX_ATIM, // # of Tx ATIM
941 IPW_ORD_STAT_TX_DISASSN, // # of successful Disassociation TX
942 IPW_ORD_STAT_TX_AUTH, // # of successful Authentication Tx
943 IPW_ORD_STAT_TX_DEAUTH, // # of successful Deauthentication TX
944
945 IPW_ORD_STAT_TX_TOTAL_BYTES = 41,// Total successful Tx data bytes
946 IPW_ORD_STAT_TX_RETRIES, // # of Tx retries
947 IPW_ORD_STAT_TX_RETRY1, // # of Tx retries at 1MBPS
948 IPW_ORD_STAT_TX_RETRY2, // # of Tx retries at 2MBPS
949 IPW_ORD_STAT_TX_RETRY5_5, // # of Tx retries at 5.5MBPS
950 IPW_ORD_STAT_TX_RETRY11, // # of Tx retries at 11MBPS
951
952 IPW_ORD_STAT_TX_FAILURES = 51, // # of Tx Failures
953 IPW_ORD_STAT_TX_ABORT_AT_HOP, //NS // # of Tx's aborted at hop time
954 IPW_ORD_STAT_TX_MAX_TRIES_IN_HOP,// # of times max tries in a hop failed
955 IPW_ORD_STAT_TX_ABORT_LATE_DMA, //NS // # of times tx aborted due to late dma setup
956 IPW_ORD_STAT_TX_ABORT_STX, //NS // # of times backoff aborted
957 IPW_ORD_STAT_TX_DISASSN_FAIL, // # of times disassociation failed
958 IPW_ORD_STAT_TX_ERR_CTS, // # of missed/bad CTS frames
959 IPW_ORD_STAT_TX_BPDU, //NS // # of spanning tree BPDUs sent
960 IPW_ORD_STAT_TX_ERR_ACK, // # of tx err due to acks
961
962 // Receive statistics
963 IPW_ORD_STAT_RX_HOST = 61, // # of packets passed to host
964 IPW_ORD_STAT_RX_DIR_DATA, // # of directed packets
965 IPW_ORD_STAT_RX_DIR_DATA1, // # of directed packets at 1MB
966 IPW_ORD_STAT_RX_DIR_DATA2, // # of directed packets at 2MB
967 IPW_ORD_STAT_RX_DIR_DATA5_5, // # of directed packets at 5.5MB
968 IPW_ORD_STAT_RX_DIR_DATA11, // # of directed packets at 11MB
969 IPW_ORD_STAT_RX_DIR_DATA22, // # of directed packets at 22MB
970
971 IPW_ORD_STAT_RX_NODIR_DATA = 71,// # of nondirected packets
972 IPW_ORD_STAT_RX_NODIR_DATA1, // # of nondirected packets at 1MB
973 IPW_ORD_STAT_RX_NODIR_DATA2, // # of nondirected packets at 2MB
974 IPW_ORD_STAT_RX_NODIR_DATA5_5, // # of nondirected packets at 5.5MB
975 IPW_ORD_STAT_RX_NODIR_DATA11, // # of nondirected packets at 11MB
976
977 IPW_ORD_STAT_RX_NULL_DATA = 80, // # of null data rx's
978 IPW_ORD_STAT_RX_POLL, //NS // # of poll rx
979 IPW_ORD_STAT_RX_RTS, // # of Rx RTS
980 IPW_ORD_STAT_RX_CTS, // # of Rx CTS
981 IPW_ORD_STAT_RX_ACK, // # of Rx ACK
982 IPW_ORD_STAT_RX_CFEND, // # of Rx CF End
983 IPW_ORD_STAT_RX_CFEND_ACK, // # of Rx CF End + CF Ack
984 IPW_ORD_STAT_RX_ASSN, // # of Association Rx's
985 IPW_ORD_STAT_RX_ASSN_RESP, // # of Association response Rx's
986 IPW_ORD_STAT_RX_REASSN, // # of Reassociation Rx's
987 IPW_ORD_STAT_RX_REASSN_RESP, // # of Reassociation response Rx's
988 IPW_ORD_STAT_RX_PROBE, // # of probe Rx's
989 IPW_ORD_STAT_RX_PROBE_RESP, // # of probe response Rx's
990 IPW_ORD_STAT_RX_BEACON, // # of Rx beacon
991 IPW_ORD_STAT_RX_ATIM, // # of Rx ATIM
992 IPW_ORD_STAT_RX_DISASSN, // # of disassociation Rx
993 IPW_ORD_STAT_RX_AUTH, // # of authentication Rx
994 IPW_ORD_STAT_RX_DEAUTH, // # of deauthentication Rx
995
996 IPW_ORD_STAT_RX_TOTAL_BYTES = 101,// Total rx data bytes received
997 IPW_ORD_STAT_RX_ERR_CRC, // # of packets with Rx CRC error
998 IPW_ORD_STAT_RX_ERR_CRC1, // # of Rx CRC errors at 1MB
999 IPW_ORD_STAT_RX_ERR_CRC2, // # of Rx CRC errors at 2MB
1000 IPW_ORD_STAT_RX_ERR_CRC5_5, // # of Rx CRC errors at 5.5MB
1001 IPW_ORD_STAT_RX_ERR_CRC11, // # of Rx CRC errors at 11MB
1002
1003 IPW_ORD_STAT_RX_DUPLICATE1 = 112, // # of duplicate rx packets at 1MB
1004 IPW_ORD_STAT_RX_DUPLICATE2, // # of duplicate rx packets at 2MB
1005 IPW_ORD_STAT_RX_DUPLICATE5_5, // # of duplicate rx packets at 5.5MB
1006 IPW_ORD_STAT_RX_DUPLICATE11, // # of duplicate rx packets at 11MB
1007 IPW_ORD_STAT_RX_DUPLICATE = 119, // # of duplicate rx packets
1008
1009 IPW_ORD_PERS_DB_LOCK = 120, // # locking fw permanent db
1010 IPW_ORD_PERS_DB_SIZE, // # size of fw permanent db
1011 IPW_ORD_PERS_DB_ADDR, // # address of fw permanent db
1012 IPW_ORD_STAT_RX_INVALID_PROTOCOL, // # of rx frames with invalid protocol
1013 IPW_ORD_SYS_BOOT_TIME, // # Boot time
1014 IPW_ORD_STAT_RX_NO_BUFFER, // # of rx frames rejected due to no buffer
1015 IPW_ORD_STAT_RX_ABORT_LATE_DMA, //NS // # of rx frames rejected due to dma setup too late
1016 IPW_ORD_STAT_RX_ABORT_AT_HOP, //NS // # of rx frames aborted due to hop
1017 IPW_ORD_STAT_RX_MISSING_FRAG, // # of rx frames dropped due to missing fragment
1018 IPW_ORD_STAT_RX_ORPHAN_FRAG, // # of rx frames dropped due to non-sequential fragment
1019 IPW_ORD_STAT_RX_ORPHAN_FRAME, // # of rx frames dropped due to unmatched 1st frame
1020 IPW_ORD_STAT_RX_FRAG_AGEOUT, // # of rx frames dropped due to uncompleted frame
1021 IPW_ORD_STAT_RX_BAD_SSID, //NS // Bad SSID (unused)
1022 IPW_ORD_STAT_RX_ICV_ERRORS, // # of ICV errors during decryption
1023
1024// PSP Statistics
1025 IPW_ORD_STAT_PSP_SUSPENSION = 137,// # of times adapter suspended
1026 IPW_ORD_STAT_PSP_BCN_TIMEOUT, // # of beacon timeout
1027 IPW_ORD_STAT_PSP_POLL_TIMEOUT, // # of poll response timeouts
1028 IPW_ORD_STAT_PSP_NONDIR_TIMEOUT,// # of timeouts waiting for last broadcast/muticast pkt
1029 IPW_ORD_STAT_PSP_RX_DTIMS, // # of PSP DTIMs received
1030 IPW_ORD_STAT_PSP_RX_TIMS, // # of PSP TIMs received
1031 IPW_ORD_STAT_PSP_STATION_ID, // PSP Station ID
1032
1033// Association and roaming
1034 IPW_ORD_LAST_ASSN_TIME = 147, // RTC time of last association
1035 IPW_ORD_STAT_PERCENT_MISSED_BCNS,// current calculation of % missed beacons
1036 IPW_ORD_STAT_PERCENT_RETRIES, // current calculation of % missed tx retries
1037 IPW_ORD_ASSOCIATED_AP_PTR, // If associated, this is ptr to the associated
1038 // AP table entry. set to 0 if not associated
1039 IPW_ORD_AVAILABLE_AP_CNT, // # of AP's decsribed in the AP table
1040 IPW_ORD_AP_LIST_PTR, // Ptr to list of available APs
1041 IPW_ORD_STAT_AP_ASSNS, // # of associations
1042 IPW_ORD_STAT_ASSN_FAIL, // # of association failures
1043 IPW_ORD_STAT_ASSN_RESP_FAIL, // # of failuresdue to response fail
1044 IPW_ORD_STAT_FULL_SCANS, // # of full scans
1045
1046 IPW_ORD_CARD_DISABLED, // # Card Disabled
1047 IPW_ORD_STAT_ROAM_INHIBIT, // # of times roaming was inhibited due to ongoing activity
1048 IPW_FILLER_40,
1049 IPW_ORD_RSSI_AT_ASSN = 160, // RSSI of associated AP at time of association
1050 IPW_ORD_STAT_ASSN_CAUSE1, // # of reassociations due to no tx from AP in last N
1051 // hops or no prob_ responses in last 3 minutes
1052 IPW_ORD_STAT_ASSN_CAUSE2, // # of reassociations due to poor tx/rx quality
1053 IPW_ORD_STAT_ASSN_CAUSE3, // # of reassociations due to tx/rx quality with excessive
1054 // load at the AP
1055 IPW_ORD_STAT_ASSN_CAUSE4, // # of reassociations due to AP RSSI level fell below
1056 // eligible group
1057 IPW_ORD_STAT_ASSN_CAUSE5, // # of reassociations due to load leveling
1058 IPW_ORD_STAT_ASSN_CAUSE6, //NS // # of reassociations due to dropped by Ap
1059 IPW_FILLER_41,
1060 IPW_FILLER_42,
1061 IPW_FILLER_43,
1062 IPW_ORD_STAT_AUTH_FAIL, // # of times authentication failed
1063 IPW_ORD_STAT_AUTH_RESP_FAIL, // # of times authentication response failed
1064 IPW_ORD_STATION_TABLE_CNT, // # of entries in association table
1065
1066// Other statistics
1067 IPW_ORD_RSSI_AVG_CURR = 173, // Current avg RSSI
1068 IPW_ORD_STEST_RESULTS_CURR, //NS // Current self test results word
1069 IPW_ORD_STEST_RESULTS_CUM, //NS // Cummulative self test results word
1070 IPW_ORD_SELF_TEST_STATUS, //NS //
1071 IPW_ORD_POWER_MGMT_MODE, // Power mode - 0=CAM, 1=PSP
1072 IPW_ORD_POWER_MGMT_INDEX, //NS //
1073 IPW_ORD_COUNTRY_CODE, // IEEE country code as recv'd from beacon
1074 IPW_ORD_COUNTRY_CHANNELS, // channels suported by country
1075// IPW_ORD_COUNTRY_CHANNELS:
1076// For 11b the lower 2-byte are used for channels from 1-14
1077// and the higher 2-byte are not used.
1078 IPW_ORD_RESET_CNT, // # of adapter resets (warm)
1079 IPW_ORD_BEACON_INTERVAL, // Beacon interval
1080
1081 IPW_ORD_PRINCETON_VERSION = 184, //NS // Princeton Version
1082 IPW_ORD_ANTENNA_DIVERSITY, // TRUE if antenna diversity is disabled
1083 IPW_ORD_CCA_RSSI, //NS // CCA RSSI value (factory programmed)
1084 IPW_ORD_STAT_EEPROM_UPDATE, //NS // # of times config EEPROM updated
1085 IPW_ORD_DTIM_PERIOD, // # of beacon intervals between DTIMs
1086 IPW_ORD_OUR_FREQ, // current radio freq lower digits - channel ID
1087
1088 IPW_ORD_RTC_TIME = 190, // current RTC time
1089 IPW_ORD_PORT_TYPE, // operating mode
1090 IPW_ORD_CURRENT_TX_RATE, // current tx rate
1091 IPW_ORD_SUPPORTED_RATES, // Bitmap of supported tx rates
1092 IPW_ORD_ATIM_WINDOW, // current ATIM Window
1093 IPW_ORD_BASIC_RATES, // bitmap of basic tx rates
1094 IPW_ORD_NIC_HIGHEST_RATE, // bitmap of basic tx rates
1095 IPW_ORD_AP_HIGHEST_RATE, // bitmap of basic tx rates
1096 IPW_ORD_CAPABILITIES, // Management frame capability field
1097 IPW_ORD_AUTH_TYPE, // Type of authentication
1098 IPW_ORD_RADIO_TYPE, // Adapter card platform type
1099 IPW_ORD_RTS_THRESHOLD = 201, // Min length of packet after which RTS handshaking is used
1100 IPW_ORD_INT_MODE, // International mode
1101 IPW_ORD_FRAGMENTATION_THRESHOLD, // protocol frag threshold
1102 IPW_ORD_EEPROM_SRAM_DB_BLOCK_START_ADDRESS, // EEPROM offset in SRAM
1103 IPW_ORD_EEPROM_SRAM_DB_BLOCK_SIZE, // EEPROM size in SRAM
1104 IPW_ORD_EEPROM_SKU_CAPABILITY, // EEPROM SKU Capability 206 =
1105 IPW_ORD_EEPROM_IBSS_11B_CHANNELS, // EEPROM IBSS 11b channel set
1106
1107 IPW_ORD_MAC_VERSION = 209, // MAC Version
1108 IPW_ORD_MAC_REVISION, // MAC Revision
1109 IPW_ORD_RADIO_VERSION, // Radio Version
1110 IPW_ORD_NIC_MANF_DATE_TIME, // MANF Date/Time STAMP
1111 IPW_ORD_UCODE_VERSION, // Ucode Version
1112 IPW_ORD_HW_RF_SWITCH_STATE = 214, // HW RF Kill Switch State
1113} ORDINALTABLE1;
1114
1115// ordinal table 2
1116// Variable length data:
1117#define IPW_FIRST_VARIABLE_LENGTH_ORDINAL 1001
1118
1119typedef enum _ORDINAL_TABLE_2 { // NS - means Not Supported by FW
1120 IPW_ORD_STAT_BASE = 1000, // contains number of variable ORDs
1121 IPW_ORD_STAT_ADAPTER_MAC = 1001, // 6 bytes: our adapter MAC address
1122 IPW_ORD_STAT_PREFERRED_BSSID = 1002, // 6 bytes: BSSID of the preferred AP
1123 IPW_ORD_STAT_MANDATORY_BSSID = 1003, // 6 bytes: BSSID of the mandatory AP
1124 IPW_FILL_1, //NS //
1125 IPW_ORD_STAT_COUNTRY_TEXT = 1005, // 36 bytes: Country name text, First two bytes are Country code
1126 IPW_ORD_STAT_ASSN_SSID = 1006, // 32 bytes: ESSID String
1127 IPW_ORD_STATION_TABLE = 1007, // ? bytes: Station/AP table (via Direct SSID Scans)
1128 IPW_ORD_STAT_SWEEP_TABLE = 1008, // ? bytes: Sweep/Host Table table (via Broadcast Scans)
1129 IPW_ORD_STAT_ROAM_LOG = 1009, // ? bytes: Roaming log
1130 IPW_ORD_STAT_RATE_LOG = 1010, //NS // 0 bytes: Rate log
1131 IPW_ORD_STAT_FIFO = 1011, //NS // 0 bytes: Fifo buffer data structures
1132 IPW_ORD_STAT_FW_VER_NUM = 1012, // 14 bytes: fw version ID string as in (a.bb.ccc; "0.08.011")
1133 IPW_ORD_STAT_FW_DATE = 1013, // 14 bytes: fw date string (mmm dd yyyy; "Mar 13 2002")
1134 IPW_ORD_STAT_ASSN_AP_BSSID = 1014, // 6 bytes: MAC address of associated AP
1135 IPW_ORD_STAT_DEBUG = 1015, //NS // ? bytes:
1136 IPW_ORD_STAT_NIC_BPA_NUM = 1016, // 11 bytes: NIC BPA number in ASCII
1137 IPW_ORD_STAT_UCODE_DATE = 1017, // 5 bytes: uCode date
1138 IPW_ORD_SECURITY_NGOTIATION_RESULT = 1018,
1139} ORDINALTABLE2; // NS - means Not Supported by FW
1140
1141#define IPW_LAST_VARIABLE_LENGTH_ORDINAL 1018
1142
1143#ifndef WIRELESS_SPY
1144#define WIRELESS_SPY // enable iwspy support
1145#endif
1146
1147extern struct iw_handler_def ipw2100_wx_handler_def;
1148extern struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device * dev);
1149extern void ipw2100_wx_event_work(struct ipw2100_priv *priv);
1150
1151#define IPW_HOST_FW_SHARED_AREA0 0x0002f200
1152#define IPW_HOST_FW_SHARED_AREA0_END 0x0002f510 // 0x310 bytes
1153
1154#define IPW_HOST_FW_SHARED_AREA1 0x0002f610
1155#define IPW_HOST_FW_SHARED_AREA1_END 0x0002f630 // 0x20 bytes
1156
1157#define IPW_HOST_FW_SHARED_AREA2 0x0002fa00
1158#define IPW_HOST_FW_SHARED_AREA2_END 0x0002fa20 // 0x20 bytes
1159
1160#define IPW_HOST_FW_SHARED_AREA3 0x0002fc00
1161#define IPW_HOST_FW_SHARED_AREA3_END 0x0002fc10 // 0x10 bytes
1162
1163#define IPW_HOST_FW_INTERRUPT_AREA 0x0002ff80
1164#define IPW_HOST_FW_INTERRUPT_AREA_END 0x00030000 // 0x80 bytes
1165
1166struct ipw2100_fw_chunk {
1167 unsigned char *buf;
1168 long len;
1169 long pos;
1170 struct list_head list;
1171};
1172
1173struct ipw2100_fw_chunk_set {
1174 const void *data;
1175 unsigned long size;
1176};
1177
1178struct ipw2100_fw {
1179 int version;
1180 struct ipw2100_fw_chunk_set fw;
1181 struct ipw2100_fw_chunk_set uc;
1182 const struct firmware *fw_entry;
1183};
1184
1185int ipw2100_get_firmware(struct ipw2100_priv *priv, struct ipw2100_fw *fw);
1186void ipw2100_release_firmware(struct ipw2100_priv *priv, struct ipw2100_fw *fw);
1187int ipw2100_fw_download(struct ipw2100_priv *priv, struct ipw2100_fw *fw);
1188int ipw2100_ucode_download(struct ipw2100_priv *priv, struct ipw2100_fw *fw);
1189
1190#define MAX_FW_VERSION_LEN 14
1191
1192int ipw2100_get_fwversion(struct ipw2100_priv *priv, char *buf, size_t max);
1193int ipw2100_get_ucodeversion(struct ipw2100_priv *priv, char *buf, size_t max);
1194
1195#endif /* _IPW2100_H */
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
new file mode 100644
index 000000000000..16cfd907e715
--- /dev/null
+++ b/drivers/net/wireless/ipw2200.c
@@ -0,0 +1,7361 @@
1/******************************************************************************
2
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
4
5 802.11 status code portion of this file from ethereal-0.10.6:
6 Copyright 2000, Axis Communications AB
7 Ethereal - Network traffic analyzer
8 By Gerald Combs <gerald@ethereal.com>
9 Copyright 1998 Gerald Combs
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31******************************************************************************/
32
33#include "ipw2200.h"
34
35#define IPW2200_VERSION "1.0.0"
36#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2200/2915 Network Driver"
37#define DRV_COPYRIGHT "Copyright(c) 2003-2004 Intel Corporation"
38#define DRV_VERSION IPW2200_VERSION
39
40MODULE_DESCRIPTION(DRV_DESCRIPTION);
41MODULE_VERSION(DRV_VERSION);
42MODULE_AUTHOR(DRV_COPYRIGHT);
43MODULE_LICENSE("GPL");
44
45static int debug = 0;
46static int channel = 0;
47static char *ifname;
48static int mode = 0;
49
50static u32 ipw_debug_level;
51static int associate = 1;
52static int auto_create = 1;
53static int disable = 0;
54static const char ipw_modes[] = {
55 'a', 'b', 'g', '?'
56};
57
58static void ipw_rx(struct ipw_priv *priv);
59static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
60 struct clx2_tx_queue *txq, int qindex);
61static int ipw_queue_reset(struct ipw_priv *priv);
62
63static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
64 int len, int sync);
65
66static void ipw_tx_queue_free(struct ipw_priv *);
67
68static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *);
69static void ipw_rx_queue_free(struct ipw_priv *, struct ipw_rx_queue *);
70static void ipw_rx_queue_replenish(void *);
71
72static int ipw_up(struct ipw_priv *);
73static void ipw_down(struct ipw_priv *);
74static int ipw_config(struct ipw_priv *);
75static int init_supported_rates(struct ipw_priv *priv, struct ipw_supported_rates *prates);
76
77static u8 band_b_active_channel[MAX_B_CHANNELS] = {
78 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0
79};
80static u8 band_a_active_channel[MAX_A_CHANNELS] = {
81 36, 40, 44, 48, 149, 153, 157, 161, 165, 52, 56, 60, 64, 0
82};
83
84static int is_valid_channel(int mode_mask, int channel)
85{
86 int i;
87
88 if (!channel)
89 return 0;
90
91 if (mode_mask & IEEE_A)
92 for (i = 0; i < MAX_A_CHANNELS; i++)
93 if (band_a_active_channel[i] == channel)
94 return IEEE_A;
95
96 if (mode_mask & (IEEE_B | IEEE_G))
97 for (i = 0; i < MAX_B_CHANNELS; i++)
98 if (band_b_active_channel[i] == channel)
99 return mode_mask & (IEEE_B | IEEE_G);
100
101 return 0;
102}
103
104static char *snprint_line(char *buf, size_t count,
105 const u8 *data, u32 len, u32 ofs)
106{
107 int out, i, j, l;
108 char c;
109
110 out = snprintf(buf, count, "%08X", ofs);
111
112 for (l = 0, i = 0; i < 2; i++) {
113 out += snprintf(buf + out, count - out, " ");
114 for (j = 0; j < 8 && l < len; j++, l++)
115 out += snprintf(buf + out, count - out, "%02X ",
116 data[(i * 8 + j)]);
117 for (; j < 8; j++)
118 out += snprintf(buf + out, count - out, " ");
119 }
120
121 out += snprintf(buf + out, count - out, " ");
122 for (l = 0, i = 0; i < 2; i++) {
123 out += snprintf(buf + out, count - out, " ");
124 for (j = 0; j < 8 && l < len; j++, l++) {
125 c = data[(i * 8 + j)];
126 if (!isascii(c) || !isprint(c))
127 c = '.';
128
129 out += snprintf(buf + out, count - out, "%c", c);
130 }
131
132 for (; j < 8; j++)
133 out += snprintf(buf + out, count - out, " ");
134 }
135
136 return buf;
137}
138
139static void printk_buf(int level, const u8 *data, u32 len)
140{
141 char line[81];
142 u32 ofs = 0;
143 if (!(ipw_debug_level & level))
144 return;
145
146 while (len) {
147 printk(KERN_DEBUG "%s\n",
148 snprint_line(line, sizeof(line), &data[ofs],
149 min(len, 16U), ofs));
150 ofs += 16;
151 len -= min(len, 16U);
152 }
153}
154
155static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg);
156#define ipw_read_reg32(a, b) _ipw_read_reg32(a, b)
157
158static u8 _ipw_read_reg8(struct ipw_priv *ipw, u32 reg);
159#define ipw_read_reg8(a, b) _ipw_read_reg8(a, b)
160
161static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value);
162static inline void ipw_write_reg8(struct ipw_priv *a, u32 b, u8 c)
163{
164 IPW_DEBUG_IO("%s %d: write_indirect8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(b), (u32)(c));
165 _ipw_write_reg8(a, b, c);
166}
167
168static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg, u16 value);
169static inline void ipw_write_reg16(struct ipw_priv *a, u32 b, u16 c)
170{
171 IPW_DEBUG_IO("%s %d: write_indirect16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(b), (u32)(c));
172 _ipw_write_reg16(a, b, c);
173}
174
175static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg, u32 value);
176static inline void ipw_write_reg32(struct ipw_priv *a, u32 b, u32 c)
177{
178 IPW_DEBUG_IO("%s %d: write_indirect32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(b), (u32)(c));
179 _ipw_write_reg32(a, b, c);
180}
181
182#define _ipw_write8(ipw, ofs, val) writeb((val), (ipw)->hw_base + (ofs))
183#define ipw_write8(ipw, ofs, val) \
184 IPW_DEBUG_IO("%s %d: write_direct8(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
185 _ipw_write8(ipw, ofs, val)
186
187#define _ipw_write16(ipw, ofs, val) writew((val), (ipw)->hw_base + (ofs))
188#define ipw_write16(ipw, ofs, val) \
189 IPW_DEBUG_IO("%s %d: write_direct16(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
190 _ipw_write16(ipw, ofs, val)
191
192#define _ipw_write32(ipw, ofs, val) writel((val), (ipw)->hw_base + (ofs))
193#define ipw_write32(ipw, ofs, val) \
194 IPW_DEBUG_IO("%s %d: write_direct32(0x%08X, 0x%08X)\n", __FILE__, __LINE__, (u32)(ofs), (u32)(val)); \
195 _ipw_write32(ipw, ofs, val)
196
197#define _ipw_read8(ipw, ofs) readb((ipw)->hw_base + (ofs))
198static inline u8 __ipw_read8(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) {
199 IPW_DEBUG_IO("%s %d: read_direct8(0x%08X)\n", f, l, (u32)(ofs));
200 return _ipw_read8(ipw, ofs);
201}
202#define ipw_read8(ipw, ofs) __ipw_read8(__FILE__, __LINE__, ipw, ofs)
203
204#define _ipw_read16(ipw, ofs) readw((ipw)->hw_base + (ofs))
205static inline u16 __ipw_read16(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) {
206 IPW_DEBUG_IO("%s %d: read_direct16(0x%08X)\n", f, l, (u32)(ofs));
207 return _ipw_read16(ipw, ofs);
208}
209#define ipw_read16(ipw, ofs) __ipw_read16(__FILE__, __LINE__, ipw, ofs)
210
211#define _ipw_read32(ipw, ofs) readl((ipw)->hw_base + (ofs))
212static inline u32 __ipw_read32(char *f, u32 l, struct ipw_priv *ipw, u32 ofs) {
213 IPW_DEBUG_IO("%s %d: read_direct32(0x%08X)\n", f, l, (u32)(ofs));
214 return _ipw_read32(ipw, ofs);
215}
216#define ipw_read32(ipw, ofs) __ipw_read32(__FILE__, __LINE__, ipw, ofs)
217
218static void _ipw_read_indirect(struct ipw_priv *, u32, u8 *, int);
219#define ipw_read_indirect(a, b, c, d) \
220 IPW_DEBUG_IO("%s %d: read_inddirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
221 _ipw_read_indirect(a, b, c, d)
222
223static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 *data, int num);
224#define ipw_write_indirect(a, b, c, d) \
225 IPW_DEBUG_IO("%s %d: write_indirect(0x%08X) %d bytes\n", __FILE__, __LINE__, (u32)(b), d); \
226 _ipw_write_indirect(a, b, c, d)
227
228/* indirect write s */
229static void _ipw_write_reg32(struct ipw_priv *priv, u32 reg,
230 u32 value)
231{
232 IPW_DEBUG_IO(" %p : reg = 0x%8X : value = 0x%8X\n",
233 priv, reg, value);
234 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
235 _ipw_write32(priv, CX2_INDIRECT_DATA, value);
236}
237
238
239static void _ipw_write_reg8(struct ipw_priv *priv, u32 reg, u8 value)
240{
241 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
242 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
243 _ipw_write8(priv, CX2_INDIRECT_DATA, value);
244 IPW_DEBUG_IO(" reg = 0x%8lX : value = 0x%8X\n",
245 (unsigned long)(priv->hw_base + CX2_INDIRECT_DATA),
246 value);
247}
248
249static void _ipw_write_reg16(struct ipw_priv *priv, u32 reg,
250 u16 value)
251{
252 IPW_DEBUG_IO(" reg = 0x%8X : value = 0x%8X\n", reg, value);
253 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
254 _ipw_write16(priv, CX2_INDIRECT_DATA, value);
255}
256
257/* indirect read s */
258
259static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
260{
261 u32 word;
262 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg & CX2_INDIRECT_ADDR_MASK);
263 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg);
264 word = _ipw_read32(priv, CX2_INDIRECT_DATA);
265 return (word >> ((reg & 0x3)*8)) & 0xff;
266}
267
268static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
269{
270 u32 value;
271
272 IPW_DEBUG_IO("%p : reg = 0x%08x\n", priv, reg);
273
274 _ipw_write32(priv, CX2_INDIRECT_ADDR, reg);
275 value = _ipw_read32(priv, CX2_INDIRECT_DATA);
276 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value);
277 return value;
278}
279
280/* iterative/auto-increment 32 bit reads and writes */
281static void _ipw_read_indirect(struct ipw_priv *priv, u32 addr, u8 * buf,
282 int num)
283{
284 u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
285 u32 dif_len = addr - aligned_addr;
286 u32 aligned_len;
287 u32 i;
288
289 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
290
291 /* Read the first nibble byte by byte */
292 if (unlikely(dif_len)) {
293 /* Start reading at aligned_addr + dif_len */
294 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
295 for (i = dif_len; i < 4; i++, buf++)
296 *buf = _ipw_read8(priv, CX2_INDIRECT_DATA + i);
297 num -= dif_len;
298 aligned_addr += 4;
299 }
300
301 /* Read DWs through autoinc register */
302 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
303 aligned_len = num & CX2_INDIRECT_ADDR_MASK;
304 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
305 *(u32*)buf = ipw_read32(priv, CX2_AUTOINC_DATA);
306
307 /* Copy the last nibble */
308 dif_len = num - aligned_len;
309 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
310 for (i = 0; i < dif_len; i++, buf++)
311 *buf = ipw_read8(priv, CX2_INDIRECT_DATA + i);
312}
313
314static void _ipw_write_indirect(struct ipw_priv *priv, u32 addr, u8 *buf,
315 int num)
316{
317 u32 aligned_addr = addr & CX2_INDIRECT_ADDR_MASK;
318 u32 dif_len = addr - aligned_addr;
319 u32 aligned_len;
320 u32 i;
321
322 IPW_DEBUG_IO("addr = %i, buf = %p, num = %i\n", addr, buf, num);
323
324 /* Write the first nibble byte by byte */
325 if (unlikely(dif_len)) {
326 /* Start writing at aligned_addr + dif_len */
327 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
328 for (i = dif_len; i < 4; i++, buf++)
329 _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
330 num -= dif_len;
331 aligned_addr += 4;
332 }
333
334 /* Write DWs through autoinc register */
335 _ipw_write32(priv, CX2_AUTOINC_ADDR, aligned_addr);
336 aligned_len = num & CX2_INDIRECT_ADDR_MASK;
337 for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4)
338 _ipw_write32(priv, CX2_AUTOINC_DATA, *(u32*)buf);
339
340 /* Copy the last nibble */
341 dif_len = num - aligned_len;
342 _ipw_write32(priv, CX2_INDIRECT_ADDR, aligned_addr);
343 for (i = 0; i < dif_len; i++, buf++)
344 _ipw_write8(priv, CX2_INDIRECT_DATA + i, *buf);
345}
346
347static void ipw_write_direct(struct ipw_priv *priv, u32 addr, void *buf,
348 int num)
349{
350 memcpy_toio((priv->hw_base + addr), buf, num);
351}
352
353static inline void ipw_set_bit(struct ipw_priv *priv, u32 reg, u32 mask)
354{
355 ipw_write32(priv, reg, ipw_read32(priv, reg) | mask);
356}
357
358static inline void ipw_clear_bit(struct ipw_priv *priv, u32 reg, u32 mask)
359{
360 ipw_write32(priv, reg, ipw_read32(priv, reg) & ~mask);
361}
362
363static inline void ipw_enable_interrupts(struct ipw_priv *priv)
364{
365 if (priv->status & STATUS_INT_ENABLED)
366 return;
367 priv->status |= STATUS_INT_ENABLED;
368 ipw_write32(priv, CX2_INTA_MASK_R, CX2_INTA_MASK_ALL);
369}
370
371static inline void ipw_disable_interrupts(struct ipw_priv *priv)
372{
373 if (!(priv->status & STATUS_INT_ENABLED))
374 return;
375 priv->status &= ~STATUS_INT_ENABLED;
376 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
377}
378
379static char *ipw_error_desc(u32 val)
380{
381 switch (val) {
382 case IPW_FW_ERROR_OK:
383 return "ERROR_OK";
384 case IPW_FW_ERROR_FAIL:
385 return "ERROR_FAIL";
386 case IPW_FW_ERROR_MEMORY_UNDERFLOW:
387 return "MEMORY_UNDERFLOW";
388 case IPW_FW_ERROR_MEMORY_OVERFLOW:
389 return "MEMORY_OVERFLOW";
390 case IPW_FW_ERROR_BAD_PARAM:
391 return "ERROR_BAD_PARAM";
392 case IPW_FW_ERROR_BAD_CHECKSUM:
393 return "ERROR_BAD_CHECKSUM";
394 case IPW_FW_ERROR_NMI_INTERRUPT:
395 return "ERROR_NMI_INTERRUPT";
396 case IPW_FW_ERROR_BAD_DATABASE:
397 return "ERROR_BAD_DATABASE";
398 case IPW_FW_ERROR_ALLOC_FAIL:
399 return "ERROR_ALLOC_FAIL";
400 case IPW_FW_ERROR_DMA_UNDERRUN:
401 return "ERROR_DMA_UNDERRUN";
402 case IPW_FW_ERROR_DMA_STATUS:
403 return "ERROR_DMA_STATUS";
404 case IPW_FW_ERROR_DINOSTATUS_ERROR:
405 return "ERROR_DINOSTATUS_ERROR";
406 case IPW_FW_ERROR_EEPROMSTATUS_ERROR:
407 return "ERROR_EEPROMSTATUS_ERROR";
408 case IPW_FW_ERROR_SYSASSERT:
409 return "ERROR_SYSASSERT";
410 case IPW_FW_ERROR_FATAL_ERROR:
411 return "ERROR_FATALSTATUS_ERROR";
412 default:
413 return "UNKNOWNSTATUS_ERROR";
414 }
415}
416
417static void ipw_dump_nic_error_log(struct ipw_priv *priv)
418{
419 u32 desc, time, blink1, blink2, ilink1, ilink2, idata, i, count, base;
420
421 base = ipw_read32(priv, IPWSTATUS_ERROR_LOG);
422 count = ipw_read_reg32(priv, base);
423
424 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
425 IPW_ERROR("Start IPW Error Log Dump:\n");
426 IPW_ERROR("Status: 0x%08X, Config: %08X\n",
427 priv->status, priv->config);
428 }
429
430 for (i = ERROR_START_OFFSET;
431 i <= count * ERROR_ELEM_SIZE;
432 i += ERROR_ELEM_SIZE) {
433 desc = ipw_read_reg32(priv, base + i);
434 time = ipw_read_reg32(priv, base + i + 1*sizeof(u32));
435 blink1 = ipw_read_reg32(priv, base + i + 2*sizeof(u32));
436 blink2 = ipw_read_reg32(priv, base + i + 3*sizeof(u32));
437 ilink1 = ipw_read_reg32(priv, base + i + 4*sizeof(u32));
438 ilink2 = ipw_read_reg32(priv, base + i + 5*sizeof(u32));
439 idata = ipw_read_reg32(priv, base + i + 6*sizeof(u32));
440
441 IPW_ERROR(
442 "%s %i 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
443 ipw_error_desc(desc), time, blink1, blink2,
444 ilink1, ilink2, idata);
445 }
446}
447
448static void ipw_dump_nic_event_log(struct ipw_priv *priv)
449{
450 u32 ev, time, data, i, count, base;
451
452 base = ipw_read32(priv, IPW_EVENT_LOG);
453 count = ipw_read_reg32(priv, base);
454
455 if (EVENT_START_OFFSET <= count * EVENT_ELEM_SIZE)
456 IPW_ERROR("Start IPW Event Log Dump:\n");
457
458 for (i = EVENT_START_OFFSET;
459 i <= count * EVENT_ELEM_SIZE;
460 i += EVENT_ELEM_SIZE) {
461 ev = ipw_read_reg32(priv, base + i);
462 time = ipw_read_reg32(priv, base + i + 1*sizeof(u32));
463 data = ipw_read_reg32(priv, base + i + 2*sizeof(u32));
464
465#ifdef CONFIG_IPW_DEBUG
466 IPW_ERROR("%i\t0x%08x\t%i\n", time, data, ev);
467#endif
468 }
469}
470
471static int ipw_get_ordinal(struct ipw_priv *priv, u32 ord, void *val,
472 u32 *len)
473{
474 u32 addr, field_info, field_len, field_count, total_len;
475
476 IPW_DEBUG_ORD("ordinal = %i\n", ord);
477
478 if (!priv || !val || !len) {
479 IPW_DEBUG_ORD("Invalid argument\n");
480 return -EINVAL;
481 }
482
483 /* verify device ordinal tables have been initialized */
484 if (!priv->table0_addr || !priv->table1_addr || !priv->table2_addr) {
485 IPW_DEBUG_ORD("Access ordinals before initialization\n");
486 return -EINVAL;
487 }
488
489 switch (IPW_ORD_TABLE_ID_MASK & ord) {
490 case IPW_ORD_TABLE_0_MASK:
491 /*
492 * TABLE 0: Direct access to a table of 32 bit values
493 *
494 * This is a very simple table with the data directly
495 * read from the table
496 */
497
498 /* remove the table id from the ordinal */
499 ord &= IPW_ORD_TABLE_VALUE_MASK;
500
501 /* boundary check */
502 if (ord > priv->table0_len) {
503 IPW_DEBUG_ORD("ordinal value (%i) longer then "
504 "max (%i)\n", ord, priv->table0_len);
505 return -EINVAL;
506 }
507
508 /* verify we have enough room to store the value */
509 if (*len < sizeof(u32)) {
510 IPW_DEBUG_ORD("ordinal buffer length too small, "
511 "need %zd\n", sizeof(u32));
512 return -EINVAL;
513 }
514
515 IPW_DEBUG_ORD("Reading TABLE0[%i] from offset 0x%08x\n",
516 ord, priv->table0_addr + (ord << 2));
517
518 *len = sizeof(u32);
519 ord <<= 2;
520 *((u32 *)val) = ipw_read32(priv, priv->table0_addr + ord);
521 break;
522
523 case IPW_ORD_TABLE_1_MASK:
524 /*
525 * TABLE 1: Indirect access to a table of 32 bit values
526 *
527 * This is a fairly large table of u32 values each
528 * representing starting addr for the data (which is
529 * also a u32)
530 */
531
532 /* remove the table id from the ordinal */
533 ord &= IPW_ORD_TABLE_VALUE_MASK;
534
535 /* boundary check */
536 if (ord > priv->table1_len) {
537 IPW_DEBUG_ORD("ordinal value too long\n");
538 return -EINVAL;
539 }
540
541 /* verify we have enough room to store the value */
542 if (*len < sizeof(u32)) {
543 IPW_DEBUG_ORD("ordinal buffer length too small, "
544 "need %zd\n", sizeof(u32));
545 return -EINVAL;
546 }
547
548 *((u32 *)val) = ipw_read_reg32(priv, (priv->table1_addr + (ord << 2)));
549 *len = sizeof(u32);
550 break;
551
552 case IPW_ORD_TABLE_2_MASK:
553 /*
554 * TABLE 2: Indirect access to a table of variable sized values
555 *
556 * This table consist of six values, each containing
557 * - dword containing the starting offset of the data
558 * - dword containing the lengh in the first 16bits
559 * and the count in the second 16bits
560 */
561
562 /* remove the table id from the ordinal */
563 ord &= IPW_ORD_TABLE_VALUE_MASK;
564
565 /* boundary check */
566 if (ord > priv->table2_len) {
567 IPW_DEBUG_ORD("ordinal value too long\n");
568 return -EINVAL;
569 }
570
571 /* get the address of statistic */
572 addr = ipw_read_reg32(priv, priv->table2_addr + (ord << 3));
573
574 /* get the second DW of statistics ;
575 * two 16-bit words - first is length, second is count */
576 field_info = ipw_read_reg32(priv, priv->table2_addr + (ord << 3) + sizeof(u32));
577
578 /* get each entry length */
579 field_len = *((u16 *)&field_info);
580
581 /* get number of entries */
582 field_count = *(((u16 *)&field_info) + 1);
583
584 /* abort if not enought memory */
585 total_len = field_len * field_count;
586 if (total_len > *len) {
587 *len = total_len;
588 return -EINVAL;
589 }
590
591 *len = total_len;
592 if (!total_len)
593 return 0;
594
595 IPW_DEBUG_ORD("addr = 0x%08x, total_len = %i, "
596 "field_info = 0x%08x\n",
597 addr, total_len, field_info);
598 ipw_read_indirect(priv, addr, val, total_len);
599 break;
600
601 default:
602 IPW_DEBUG_ORD("Invalid ordinal!\n");
603 return -EINVAL;
604
605 }
606
607
608 return 0;
609}
610
611static void ipw_init_ordinals(struct ipw_priv *priv)
612{
613 priv->table0_addr = IPW_ORDINALS_TABLE_LOWER;
614 priv->table0_len = ipw_read32(priv, priv->table0_addr);
615
616 IPW_DEBUG_ORD("table 0 offset at 0x%08x, len = %i\n",
617 priv->table0_addr, priv->table0_len);
618
619 priv->table1_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_1);
620 priv->table1_len = ipw_read_reg32(priv, priv->table1_addr);
621
622 IPW_DEBUG_ORD("table 1 offset at 0x%08x, len = %i\n",
623 priv->table1_addr, priv->table1_len);
624
625 priv->table2_addr = ipw_read32(priv, IPW_ORDINALS_TABLE_2);
626 priv->table2_len = ipw_read_reg32(priv, priv->table2_addr);
627 priv->table2_len &= 0x0000ffff; /* use first two bytes */
628
629 IPW_DEBUG_ORD("table 2 offset at 0x%08x, len = %i\n",
630 priv->table2_addr, priv->table2_len);
631
632}
633
634/*
635 * The following adds a new attribute to the sysfs representation
636 * of this device driver (i.e. a new file in /sys/bus/pci/drivers/ipw/)
637 * used for controling the debug level.
638 *
639 * See the level definitions in ipw for details.
640 */
641static ssize_t show_debug_level(struct device_driver *d, char *buf)
642{
643 return sprintf(buf, "0x%08X\n", ipw_debug_level);
644}
645static ssize_t store_debug_level(struct device_driver *d,
646 const char *buf, size_t count)
647{
648 char *p = (char *)buf;
649 u32 val;
650
651 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
652 p++;
653 if (p[0] == 'x' || p[0] == 'X')
654 p++;
655 val = simple_strtoul(p, &p, 16);
656 } else
657 val = simple_strtoul(p, &p, 10);
658 if (p == buf)
659 printk(KERN_INFO DRV_NAME
660 ": %s is not in hex or decimal form.\n", buf);
661 else
662 ipw_debug_level = val;
663
664 return strnlen(buf, count);
665}
666
667static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
668 show_debug_level, store_debug_level);
669
670static ssize_t show_status(struct device *d,
671 struct device_attribute *attr, char *buf)
672{
673 struct ipw_priv *p = d->driver_data;
674 return sprintf(buf, "0x%08x\n", (int)p->status);
675}
676static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
677
678static ssize_t show_cfg(struct device *d, struct device_attribute *attr,
679 char *buf)
680{
681 struct ipw_priv *p = d->driver_data;
682 return sprintf(buf, "0x%08x\n", (int)p->config);
683}
684static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL);
685
686static ssize_t show_nic_type(struct device *d,
687 struct device_attribute *attr, char *buf)
688{
689 struct ipw_priv *p = d->driver_data;
690 u8 type = p->eeprom[EEPROM_NIC_TYPE];
691
692 switch (type) {
693 case EEPROM_NIC_TYPE_STANDARD:
694 return sprintf(buf, "STANDARD\n");
695 case EEPROM_NIC_TYPE_DELL:
696 return sprintf(buf, "DELL\n");
697 case EEPROM_NIC_TYPE_FUJITSU:
698 return sprintf(buf, "FUJITSU\n");
699 case EEPROM_NIC_TYPE_IBM:
700 return sprintf(buf, "IBM\n");
701 case EEPROM_NIC_TYPE_HP:
702 return sprintf(buf, "HP\n");
703 }
704
705 return sprintf(buf, "UNKNOWN\n");
706}
707static DEVICE_ATTR(nic_type, S_IRUGO, show_nic_type, NULL);
708
709static ssize_t dump_error_log(struct device *d,
710 struct device_attribute *attr, const char *buf, size_t count)
711{
712 char *p = (char *)buf;
713
714 if (p[0] == '1')
715 ipw_dump_nic_error_log((struct ipw_priv*)d->driver_data);
716
717 return strnlen(buf, count);
718}
719static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, dump_error_log);
720
721static ssize_t dump_event_log(struct device *d,
722 struct device_attribute *attr, const char *buf, size_t count)
723{
724 char *p = (char *)buf;
725
726 if (p[0] == '1')
727 ipw_dump_nic_event_log((struct ipw_priv*)d->driver_data);
728
729 return strnlen(buf, count);
730}
731static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
732
733static ssize_t show_ucode_version(struct device *d,
734 struct device_attribute *attr, char *buf)
735{
736 u32 len = sizeof(u32), tmp = 0;
737 struct ipw_priv *p = d->driver_data;
738
739 if(ipw_get_ordinal(p, IPW_ORD_STAT_UCODE_VERSION, &tmp, &len))
740 return 0;
741
742 return sprintf(buf, "0x%08x\n", tmp);
743}
744static DEVICE_ATTR(ucode_version, S_IWUSR|S_IRUGO, show_ucode_version, NULL);
745
746static ssize_t show_rtc(struct device *d, struct device_attribute *attr,
747 char *buf)
748{
749 u32 len = sizeof(u32), tmp = 0;
750 struct ipw_priv *p = d->driver_data;
751
752 if(ipw_get_ordinal(p, IPW_ORD_STAT_RTC, &tmp, &len))
753 return 0;
754
755 return sprintf(buf, "0x%08x\n", tmp);
756}
757static DEVICE_ATTR(rtc, S_IWUSR|S_IRUGO, show_rtc, NULL);
758
759/*
760 * Add a device attribute to view/control the delay between eeprom
761 * operations.
762 */
763static ssize_t show_eeprom_delay(struct device *d,
764 struct device_attribute *attr, char *buf)
765{
766 int n = ((struct ipw_priv*)d->driver_data)->eeprom_delay;
767 return sprintf(buf, "%i\n", n);
768}
769static ssize_t store_eeprom_delay(struct device *d,
770 struct device_attribute *attr, const char *buf,
771 size_t count)
772{
773 struct ipw_priv *p = d->driver_data;
774 sscanf(buf, "%i", &p->eeprom_delay);
775 return strnlen(buf, count);
776}
777static DEVICE_ATTR(eeprom_delay, S_IWUSR|S_IRUGO,
778 show_eeprom_delay,store_eeprom_delay);
779
780static ssize_t show_command_event_reg(struct device *d,
781 struct device_attribute *attr, char *buf)
782{
783 u32 reg = 0;
784 struct ipw_priv *p = d->driver_data;
785
786 reg = ipw_read_reg32(p, CX2_INTERNAL_CMD_EVENT);
787 return sprintf(buf, "0x%08x\n", reg);
788}
789static ssize_t store_command_event_reg(struct device *d,
790 struct device_attribute *attr, const char *buf,
791 size_t count)
792{
793 u32 reg;
794 struct ipw_priv *p = d->driver_data;
795
796 sscanf(buf, "%x", &reg);
797 ipw_write_reg32(p, CX2_INTERNAL_CMD_EVENT, reg);
798 return strnlen(buf, count);
799}
800static DEVICE_ATTR(command_event_reg, S_IWUSR|S_IRUGO,
801 show_command_event_reg,store_command_event_reg);
802
803static ssize_t show_mem_gpio_reg(struct device *d,
804 struct device_attribute *attr, char *buf)
805{
806 u32 reg = 0;
807 struct ipw_priv *p = d->driver_data;
808
809 reg = ipw_read_reg32(p, 0x301100);
810 return sprintf(buf, "0x%08x\n", reg);
811}
812static ssize_t store_mem_gpio_reg(struct device *d,
813 struct device_attribute *attr, const char *buf,
814 size_t count)
815{
816 u32 reg;
817 struct ipw_priv *p = d->driver_data;
818
819 sscanf(buf, "%x", &reg);
820 ipw_write_reg32(p, 0x301100, reg);
821 return strnlen(buf, count);
822}
823static DEVICE_ATTR(mem_gpio_reg, S_IWUSR|S_IRUGO,
824 show_mem_gpio_reg,store_mem_gpio_reg);
825
826static ssize_t show_indirect_dword(struct device *d,
827 struct device_attribute *attr, char *buf)
828{
829 u32 reg = 0;
830 struct ipw_priv *priv = d->driver_data;
831 if (priv->status & STATUS_INDIRECT_DWORD)
832 reg = ipw_read_reg32(priv, priv->indirect_dword);
833 else
834 reg = 0;
835
836 return sprintf(buf, "0x%08x\n", reg);
837}
838static ssize_t store_indirect_dword(struct device *d,
839 struct device_attribute *attr, const char *buf,
840 size_t count)
841{
842 struct ipw_priv *priv = d->driver_data;
843
844 sscanf(buf, "%x", &priv->indirect_dword);
845 priv->status |= STATUS_INDIRECT_DWORD;
846 return strnlen(buf, count);
847}
848static DEVICE_ATTR(indirect_dword, S_IWUSR|S_IRUGO,
849 show_indirect_dword,store_indirect_dword);
850
851static ssize_t show_indirect_byte(struct device *d,
852 struct device_attribute *attr, char *buf)
853{
854 u8 reg = 0;
855 struct ipw_priv *priv = d->driver_data;
856 if (priv->status & STATUS_INDIRECT_BYTE)
857 reg = ipw_read_reg8(priv, priv->indirect_byte);
858 else
859 reg = 0;
860
861 return sprintf(buf, "0x%02x\n", reg);
862}
863static ssize_t store_indirect_byte(struct device *d,
864 struct device_attribute *attr, const char *buf,
865 size_t count)
866{
867 struct ipw_priv *priv = d->driver_data;
868
869 sscanf(buf, "%x", &priv->indirect_byte);
870 priv->status |= STATUS_INDIRECT_BYTE;
871 return strnlen(buf, count);
872}
873static DEVICE_ATTR(indirect_byte, S_IWUSR|S_IRUGO,
874 show_indirect_byte, store_indirect_byte);
875
876static ssize_t show_direct_dword(struct device *d,
877 struct device_attribute *attr, char *buf)
878{
879 u32 reg = 0;
880 struct ipw_priv *priv = d->driver_data;
881
882 if (priv->status & STATUS_DIRECT_DWORD)
883 reg = ipw_read32(priv, priv->direct_dword);
884 else
885 reg = 0;
886
887 return sprintf(buf, "0x%08x\n", reg);
888}
889static ssize_t store_direct_dword(struct device *d,
890 struct device_attribute *attr, const char *buf,
891 size_t count)
892{
893 struct ipw_priv *priv = d->driver_data;
894
895 sscanf(buf, "%x", &priv->direct_dword);
896 priv->status |= STATUS_DIRECT_DWORD;
897 return strnlen(buf, count);
898}
899static DEVICE_ATTR(direct_dword, S_IWUSR|S_IRUGO,
900 show_direct_dword,store_direct_dword);
901
902
903static inline int rf_kill_active(struct ipw_priv *priv)
904{
905 if (0 == (ipw_read32(priv, 0x30) & 0x10000))
906 priv->status |= STATUS_RF_KILL_HW;
907 else
908 priv->status &= ~STATUS_RF_KILL_HW;
909
910 return (priv->status & STATUS_RF_KILL_HW) ? 1 : 0;
911}
912
913static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr,
914 char *buf)
915{
916 /* 0 - RF kill not enabled
917 1 - SW based RF kill active (sysfs)
918 2 - HW based RF kill active
919 3 - Both HW and SW baed RF kill active */
920 struct ipw_priv *priv = d->driver_data;
921 int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) |
922 (rf_kill_active(priv) ? 0x2 : 0x0);
923 return sprintf(buf, "%i\n", val);
924}
925
926static int ipw_radio_kill_sw(struct ipw_priv *priv, int disable_radio)
927{
928 if ((disable_radio ? 1 : 0) ==
929 (priv->status & STATUS_RF_KILL_SW ? 1 : 0))
930 return 0 ;
931
932 IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n",
933 disable_radio ? "OFF" : "ON");
934
935 if (disable_radio) {
936 priv->status |= STATUS_RF_KILL_SW;
937
938 if (priv->workqueue) {
939 cancel_delayed_work(&priv->request_scan);
940 }
941 wake_up_interruptible(&priv->wait_command_queue);
942 queue_work(priv->workqueue, &priv->down);
943 } else {
944 priv->status &= ~STATUS_RF_KILL_SW;
945 if (rf_kill_active(priv)) {
946 IPW_DEBUG_RF_KILL("Can not turn radio back on - "
947 "disabled by HW switch\n");
948 /* Make sure the RF_KILL check timer is running */
949 cancel_delayed_work(&priv->rf_kill);
950 queue_delayed_work(priv->workqueue, &priv->rf_kill,
951 2 * HZ);
952 } else
953 queue_work(priv->workqueue, &priv->up);
954 }
955
956 return 1;
957}
958
959static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr,
960 const char *buf, size_t count)
961{
962 struct ipw_priv *priv = d->driver_data;
963
964 ipw_radio_kill_sw(priv, buf[0] == '1');
965
966 return count;
967}
968static DEVICE_ATTR(rf_kill, S_IWUSR|S_IRUGO, show_rf_kill, store_rf_kill);
969
970static void ipw_irq_tasklet(struct ipw_priv *priv)
971{
972 u32 inta, inta_mask, handled = 0;
973 unsigned long flags;
974 int rc = 0;
975
976 spin_lock_irqsave(&priv->lock, flags);
977
978 inta = ipw_read32(priv, CX2_INTA_RW);
979 inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
980 inta &= (CX2_INTA_MASK_ALL & inta_mask);
981
982 /* Add any cached INTA values that need to be handled */
983 inta |= priv->isr_inta;
984
985 /* handle all the justifications for the interrupt */
986 if (inta & CX2_INTA_BIT_RX_TRANSFER) {
987 ipw_rx(priv);
988 handled |= CX2_INTA_BIT_RX_TRANSFER;
989 }
990
991 if (inta & CX2_INTA_BIT_TX_CMD_QUEUE) {
992 IPW_DEBUG_HC("Command completed.\n");
993 rc = ipw_queue_tx_reclaim( priv, &priv->txq_cmd, -1);
994 priv->status &= ~STATUS_HCMD_ACTIVE;
995 wake_up_interruptible(&priv->wait_command_queue);
996 handled |= CX2_INTA_BIT_TX_CMD_QUEUE;
997 }
998
999 if (inta & CX2_INTA_BIT_TX_QUEUE_1) {
1000 IPW_DEBUG_TX("TX_QUEUE_1\n");
1001 rc = ipw_queue_tx_reclaim( priv, &priv->txq[0], 0);
1002 handled |= CX2_INTA_BIT_TX_QUEUE_1;
1003 }
1004
1005 if (inta & CX2_INTA_BIT_TX_QUEUE_2) {
1006 IPW_DEBUG_TX("TX_QUEUE_2\n");
1007 rc = ipw_queue_tx_reclaim( priv, &priv->txq[1], 1);
1008 handled |= CX2_INTA_BIT_TX_QUEUE_2;
1009 }
1010
1011 if (inta & CX2_INTA_BIT_TX_QUEUE_3) {
1012 IPW_DEBUG_TX("TX_QUEUE_3\n");
1013 rc = ipw_queue_tx_reclaim( priv, &priv->txq[2], 2);
1014 handled |= CX2_INTA_BIT_TX_QUEUE_3;
1015 }
1016
1017 if (inta & CX2_INTA_BIT_TX_QUEUE_4) {
1018 IPW_DEBUG_TX("TX_QUEUE_4\n");
1019 rc = ipw_queue_tx_reclaim( priv, &priv->txq[3], 3);
1020 handled |= CX2_INTA_BIT_TX_QUEUE_4;
1021 }
1022
1023 if (inta & CX2_INTA_BIT_STATUS_CHANGE) {
1024 IPW_WARNING("STATUS_CHANGE\n");
1025 handled |= CX2_INTA_BIT_STATUS_CHANGE;
1026 }
1027
1028 if (inta & CX2_INTA_BIT_BEACON_PERIOD_EXPIRED) {
1029 IPW_WARNING("TX_PERIOD_EXPIRED\n");
1030 handled |= CX2_INTA_BIT_BEACON_PERIOD_EXPIRED;
1031 }
1032
1033 if (inta & CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE) {
1034 IPW_WARNING("HOST_CMD_DONE\n");
1035 handled |= CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE;
1036 }
1037
1038 if (inta & CX2_INTA_BIT_FW_INITIALIZATION_DONE) {
1039 IPW_WARNING("FW_INITIALIZATION_DONE\n");
1040 handled |= CX2_INTA_BIT_FW_INITIALIZATION_DONE;
1041 }
1042
1043 if (inta & CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE) {
1044 IPW_WARNING("PHY_OFF_DONE\n");
1045 handled |= CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE;
1046 }
1047
1048 if (inta & CX2_INTA_BIT_RF_KILL_DONE) {
1049 IPW_DEBUG_RF_KILL("RF_KILL_DONE\n");
1050 priv->status |= STATUS_RF_KILL_HW;
1051 wake_up_interruptible(&priv->wait_command_queue);
1052 netif_carrier_off(priv->net_dev);
1053 netif_stop_queue(priv->net_dev);
1054 cancel_delayed_work(&priv->request_scan);
1055 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
1056 handled |= CX2_INTA_BIT_RF_KILL_DONE;
1057 }
1058
1059 if (inta & CX2_INTA_BIT_FATAL_ERROR) {
1060 IPW_ERROR("Firmware error detected. Restarting.\n");
1061#ifdef CONFIG_IPW_DEBUG
1062 if (ipw_debug_level & IPW_DL_FW_ERRORS) {
1063 ipw_dump_nic_error_log(priv);
1064 ipw_dump_nic_event_log(priv);
1065 }
1066#endif
1067 queue_work(priv->workqueue, &priv->adapter_restart);
1068 handled |= CX2_INTA_BIT_FATAL_ERROR;
1069 }
1070
1071 if (inta & CX2_INTA_BIT_PARITY_ERROR) {
1072 IPW_ERROR("Parity error\n");
1073 handled |= CX2_INTA_BIT_PARITY_ERROR;
1074 }
1075
1076 if (handled != inta) {
1077 IPW_ERROR("Unhandled INTA bits 0x%08x\n",
1078 inta & ~handled);
1079 }
1080
1081 /* enable all interrupts */
1082 ipw_enable_interrupts(priv);
1083
1084 spin_unlock_irqrestore(&priv->lock, flags);
1085}
1086
1087#ifdef CONFIG_IPW_DEBUG
1088#define IPW_CMD(x) case IPW_CMD_ ## x : return #x
1089static char *get_cmd_string(u8 cmd)
1090{
1091 switch (cmd) {
1092 IPW_CMD(HOST_COMPLETE);
1093 IPW_CMD(POWER_DOWN);
1094 IPW_CMD(SYSTEM_CONFIG);
1095 IPW_CMD(MULTICAST_ADDRESS);
1096 IPW_CMD(SSID);
1097 IPW_CMD(ADAPTER_ADDRESS);
1098 IPW_CMD(PORT_TYPE);
1099 IPW_CMD(RTS_THRESHOLD);
1100 IPW_CMD(FRAG_THRESHOLD);
1101 IPW_CMD(POWER_MODE);
1102 IPW_CMD(WEP_KEY);
1103 IPW_CMD(TGI_TX_KEY);
1104 IPW_CMD(SCAN_REQUEST);
1105 IPW_CMD(SCAN_REQUEST_EXT);
1106 IPW_CMD(ASSOCIATE);
1107 IPW_CMD(SUPPORTED_RATES);
1108 IPW_CMD(SCAN_ABORT);
1109 IPW_CMD(TX_FLUSH);
1110 IPW_CMD(QOS_PARAMETERS);
1111 IPW_CMD(DINO_CONFIG);
1112 IPW_CMD(RSN_CAPABILITIES);
1113 IPW_CMD(RX_KEY);
1114 IPW_CMD(CARD_DISABLE);
1115 IPW_CMD(SEED_NUMBER);
1116 IPW_CMD(TX_POWER);
1117 IPW_CMD(COUNTRY_INFO);
1118 IPW_CMD(AIRONET_INFO);
1119 IPW_CMD(AP_TX_POWER);
1120 IPW_CMD(CCKM_INFO);
1121 IPW_CMD(CCX_VER_INFO);
1122 IPW_CMD(SET_CALIBRATION);
1123 IPW_CMD(SENSITIVITY_CALIB);
1124 IPW_CMD(RETRY_LIMIT);
1125 IPW_CMD(IPW_PRE_POWER_DOWN);
1126 IPW_CMD(VAP_BEACON_TEMPLATE);
1127 IPW_CMD(VAP_DTIM_PERIOD);
1128 IPW_CMD(EXT_SUPPORTED_RATES);
1129 IPW_CMD(VAP_LOCAL_TX_PWR_CONSTRAINT);
1130 IPW_CMD(VAP_QUIET_INTERVALS);
1131 IPW_CMD(VAP_CHANNEL_SWITCH);
1132 IPW_CMD(VAP_MANDATORY_CHANNELS);
1133 IPW_CMD(VAP_CELL_PWR_LIMIT);
1134 IPW_CMD(VAP_CF_PARAM_SET);
1135 IPW_CMD(VAP_SET_BEACONING_STATE);
1136 IPW_CMD(MEASUREMENT);
1137 IPW_CMD(POWER_CAPABILITY);
1138 IPW_CMD(SUPPORTED_CHANNELS);
1139 IPW_CMD(TPC_REPORT);
1140 IPW_CMD(WME_INFO);
1141 IPW_CMD(PRODUCTION_COMMAND);
1142 default:
1143 return "UNKNOWN";
1144 }
1145}
1146#endif /* CONFIG_IPW_DEBUG */
1147
1148#define HOST_COMPLETE_TIMEOUT HZ
1149static int ipw_send_cmd(struct ipw_priv *priv, struct host_cmd *cmd)
1150{
1151 int rc = 0;
1152
1153 if (priv->status & STATUS_HCMD_ACTIVE) {
1154 IPW_ERROR("Already sending a command\n");
1155 return -1;
1156 }
1157
1158 priv->status |= STATUS_HCMD_ACTIVE;
1159
1160 IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n",
1161 get_cmd_string(cmd->cmd), cmd->cmd, cmd->len);
1162 printk_buf(IPW_DL_HOST_COMMAND, (u8*)cmd->param, cmd->len);
1163
1164 rc = ipw_queue_tx_hcmd(priv, cmd->cmd, &cmd->param, cmd->len, 0);
1165 if (rc)
1166 return rc;
1167
1168 rc = wait_event_interruptible_timeout(
1169 priv->wait_command_queue, !(priv->status & STATUS_HCMD_ACTIVE),
1170 HOST_COMPLETE_TIMEOUT);
1171 if (rc == 0) {
1172 IPW_DEBUG_INFO("Command completion failed out after %dms.\n",
1173 HOST_COMPLETE_TIMEOUT / (HZ / 1000));
1174 priv->status &= ~STATUS_HCMD_ACTIVE;
1175 return -EIO;
1176 }
1177 if (priv->status & STATUS_RF_KILL_MASK) {
1178 IPW_DEBUG_INFO("Command aborted due to RF Kill Switch\n");
1179 return -EIO;
1180 }
1181
1182 return 0;
1183}
1184
1185static int ipw_send_host_complete(struct ipw_priv *priv)
1186{
1187 struct host_cmd cmd = {
1188 .cmd = IPW_CMD_HOST_COMPLETE,
1189 .len = 0
1190 };
1191
1192 if (!priv) {
1193 IPW_ERROR("Invalid args\n");
1194 return -1;
1195 }
1196
1197 if (ipw_send_cmd(priv, &cmd)) {
1198 IPW_ERROR("failed to send HOST_COMPLETE command\n");
1199 return -1;
1200 }
1201
1202 return 0;
1203}
1204
1205static int ipw_send_system_config(struct ipw_priv *priv,
1206 struct ipw_sys_config *config)
1207{
1208 struct host_cmd cmd = {
1209 .cmd = IPW_CMD_SYSTEM_CONFIG,
1210 .len = sizeof(*config)
1211 };
1212
1213 if (!priv || !config) {
1214 IPW_ERROR("Invalid args\n");
1215 return -1;
1216 }
1217
1218 memcpy(&cmd.param,config,sizeof(*config));
1219 if (ipw_send_cmd(priv, &cmd)) {
1220 IPW_ERROR("failed to send SYSTEM_CONFIG command\n");
1221 return -1;
1222 }
1223
1224 return 0;
1225}
1226
1227static int ipw_send_ssid(struct ipw_priv *priv, u8 *ssid, int len)
1228{
1229 struct host_cmd cmd = {
1230 .cmd = IPW_CMD_SSID,
1231 .len = min(len, IW_ESSID_MAX_SIZE)
1232 };
1233
1234 if (!priv || !ssid) {
1235 IPW_ERROR("Invalid args\n");
1236 return -1;
1237 }
1238
1239 memcpy(&cmd.param, ssid, cmd.len);
1240 if (ipw_send_cmd(priv, &cmd)) {
1241 IPW_ERROR("failed to send SSID command\n");
1242 return -1;
1243 }
1244
1245 return 0;
1246}
1247
1248static int ipw_send_adapter_address(struct ipw_priv *priv, u8 *mac)
1249{
1250 struct host_cmd cmd = {
1251 .cmd = IPW_CMD_ADAPTER_ADDRESS,
1252 .len = ETH_ALEN
1253 };
1254
1255 if (!priv || !mac) {
1256 IPW_ERROR("Invalid args\n");
1257 return -1;
1258 }
1259
1260 IPW_DEBUG_INFO("%s: Setting MAC to " MAC_FMT "\n",
1261 priv->net_dev->name, MAC_ARG(mac));
1262
1263 memcpy(&cmd.param, mac, ETH_ALEN);
1264
1265 if (ipw_send_cmd(priv, &cmd)) {
1266 IPW_ERROR("failed to send ADAPTER_ADDRESS command\n");
1267 return -1;
1268 }
1269
1270 return 0;
1271}
1272
1273static void ipw_adapter_restart(void *adapter)
1274{
1275 struct ipw_priv *priv = adapter;
1276
1277 if (priv->status & STATUS_RF_KILL_MASK)
1278 return;
1279
1280 ipw_down(priv);
1281 if (ipw_up(priv)) {
1282 IPW_ERROR("Failed to up device\n");
1283 return;
1284 }
1285}
1286
1287
1288
1289
1290#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
1291
1292static void ipw_scan_check(void *data)
1293{
1294 struct ipw_priv *priv = data;
1295 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
1296 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
1297 "adapter (%dms).\n",
1298 IPW_SCAN_CHECK_WATCHDOG / 100);
1299 ipw_adapter_restart(priv);
1300 }
1301}
1302
1303static int ipw_send_scan_request_ext(struct ipw_priv *priv,
1304 struct ipw_scan_request_ext *request)
1305{
1306 struct host_cmd cmd = {
1307 .cmd = IPW_CMD_SCAN_REQUEST_EXT,
1308 .len = sizeof(*request)
1309 };
1310
1311 if (!priv || !request) {
1312 IPW_ERROR("Invalid args\n");
1313 return -1;
1314 }
1315
1316 memcpy(&cmd.param,request,sizeof(*request));
1317 if (ipw_send_cmd(priv, &cmd)) {
1318 IPW_ERROR("failed to send SCAN_REQUEST_EXT command\n");
1319 return -1;
1320 }
1321
1322 queue_delayed_work(priv->workqueue, &priv->scan_check,
1323 IPW_SCAN_CHECK_WATCHDOG);
1324 return 0;
1325}
1326
1327static int ipw_send_scan_abort(struct ipw_priv *priv)
1328{
1329 struct host_cmd cmd = {
1330 .cmd = IPW_CMD_SCAN_ABORT,
1331 .len = 0
1332 };
1333
1334 if (!priv) {
1335 IPW_ERROR("Invalid args\n");
1336 return -1;
1337 }
1338
1339 if (ipw_send_cmd(priv, &cmd)) {
1340 IPW_ERROR("failed to send SCAN_ABORT command\n");
1341 return -1;
1342 }
1343
1344 return 0;
1345}
1346
1347static int ipw_set_sensitivity(struct ipw_priv *priv, u16 sens)
1348{
1349 struct host_cmd cmd = {
1350 .cmd = IPW_CMD_SENSITIVITY_CALIB,
1351 .len = sizeof(struct ipw_sensitivity_calib)
1352 };
1353 struct ipw_sensitivity_calib *calib = (struct ipw_sensitivity_calib *)
1354 &cmd.param;
1355 calib->beacon_rssi_raw = sens;
1356 if (ipw_send_cmd(priv, &cmd)) {
1357 IPW_ERROR("failed to send SENSITIVITY CALIB command\n");
1358 return -1;
1359 }
1360
1361 return 0;
1362}
1363
1364static int ipw_send_associate(struct ipw_priv *priv,
1365 struct ipw_associate *associate)
1366{
1367 struct host_cmd cmd = {
1368 .cmd = IPW_CMD_ASSOCIATE,
1369 .len = sizeof(*associate)
1370 };
1371
1372 if (!priv || !associate) {
1373 IPW_ERROR("Invalid args\n");
1374 return -1;
1375 }
1376
1377 memcpy(&cmd.param,associate,sizeof(*associate));
1378 if (ipw_send_cmd(priv, &cmd)) {
1379 IPW_ERROR("failed to send ASSOCIATE command\n");
1380 return -1;
1381 }
1382
1383 return 0;
1384}
1385
1386static int ipw_send_supported_rates(struct ipw_priv *priv,
1387 struct ipw_supported_rates *rates)
1388{
1389 struct host_cmd cmd = {
1390 .cmd = IPW_CMD_SUPPORTED_RATES,
1391 .len = sizeof(*rates)
1392 };
1393
1394 if (!priv || !rates) {
1395 IPW_ERROR("Invalid args\n");
1396 return -1;
1397 }
1398
1399 memcpy(&cmd.param,rates,sizeof(*rates));
1400 if (ipw_send_cmd(priv, &cmd)) {
1401 IPW_ERROR("failed to send SUPPORTED_RATES command\n");
1402 return -1;
1403 }
1404
1405 return 0;
1406}
1407
1408static int ipw_set_random_seed(struct ipw_priv *priv)
1409{
1410 struct host_cmd cmd = {
1411 .cmd = IPW_CMD_SEED_NUMBER,
1412 .len = sizeof(u32)
1413 };
1414
1415 if (!priv) {
1416 IPW_ERROR("Invalid args\n");
1417 return -1;
1418 }
1419
1420 get_random_bytes(&cmd.param, sizeof(u32));
1421
1422 if (ipw_send_cmd(priv, &cmd)) {
1423 IPW_ERROR("failed to send SEED_NUMBER command\n");
1424 return -1;
1425 }
1426
1427 return 0;
1428}
1429
1430#if 0
1431static int ipw_send_card_disable(struct ipw_priv *priv, u32 phy_off)
1432{
1433 struct host_cmd cmd = {
1434 .cmd = IPW_CMD_CARD_DISABLE,
1435 .len = sizeof(u32)
1436 };
1437
1438 if (!priv) {
1439 IPW_ERROR("Invalid args\n");
1440 return -1;
1441 }
1442
1443 *((u32*)&cmd.param) = phy_off;
1444
1445 if (ipw_send_cmd(priv, &cmd)) {
1446 IPW_ERROR("failed to send CARD_DISABLE command\n");
1447 return -1;
1448 }
1449
1450 return 0;
1451}
1452#endif
1453
1454static int ipw_send_tx_power(struct ipw_priv *priv,
1455 struct ipw_tx_power *power)
1456{
1457 struct host_cmd cmd = {
1458 .cmd = IPW_CMD_TX_POWER,
1459 .len = sizeof(*power)
1460 };
1461
1462 if (!priv || !power) {
1463 IPW_ERROR("Invalid args\n");
1464 return -1;
1465 }
1466
1467 memcpy(&cmd.param,power,sizeof(*power));
1468 if (ipw_send_cmd(priv, &cmd)) {
1469 IPW_ERROR("failed to send TX_POWER command\n");
1470 return -1;
1471 }
1472
1473 return 0;
1474}
1475
1476static int ipw_send_rts_threshold(struct ipw_priv *priv, u16 rts)
1477{
1478 struct ipw_rts_threshold rts_threshold = {
1479 .rts_threshold = rts,
1480 };
1481 struct host_cmd cmd = {
1482 .cmd = IPW_CMD_RTS_THRESHOLD,
1483 .len = sizeof(rts_threshold)
1484 };
1485
1486 if (!priv) {
1487 IPW_ERROR("Invalid args\n");
1488 return -1;
1489 }
1490
1491 memcpy(&cmd.param, &rts_threshold, sizeof(rts_threshold));
1492 if (ipw_send_cmd(priv, &cmd)) {
1493 IPW_ERROR("failed to send RTS_THRESHOLD command\n");
1494 return -1;
1495 }
1496
1497 return 0;
1498}
1499
1500static int ipw_send_frag_threshold(struct ipw_priv *priv, u16 frag)
1501{
1502 struct ipw_frag_threshold frag_threshold = {
1503 .frag_threshold = frag,
1504 };
1505 struct host_cmd cmd = {
1506 .cmd = IPW_CMD_FRAG_THRESHOLD,
1507 .len = sizeof(frag_threshold)
1508 };
1509
1510 if (!priv) {
1511 IPW_ERROR("Invalid args\n");
1512 return -1;
1513 }
1514
1515 memcpy(&cmd.param, &frag_threshold, sizeof(frag_threshold));
1516 if (ipw_send_cmd(priv, &cmd)) {
1517 IPW_ERROR("failed to send FRAG_THRESHOLD command\n");
1518 return -1;
1519 }
1520
1521 return 0;
1522}
1523
1524static int ipw_send_power_mode(struct ipw_priv *priv, u32 mode)
1525{
1526 struct host_cmd cmd = {
1527 .cmd = IPW_CMD_POWER_MODE,
1528 .len = sizeof(u32)
1529 };
1530 u32 *param = (u32*)(&cmd.param);
1531
1532 if (!priv) {
1533 IPW_ERROR("Invalid args\n");
1534 return -1;
1535 }
1536
1537 /* If on battery, set to 3, if AC set to CAM, else user
1538 * level */
1539 switch (mode) {
1540 case IPW_POWER_BATTERY:
1541 *param = IPW_POWER_INDEX_3;
1542 break;
1543 case IPW_POWER_AC:
1544 *param = IPW_POWER_MODE_CAM;
1545 break;
1546 default:
1547 *param = mode;
1548 break;
1549 }
1550
1551 if (ipw_send_cmd(priv, &cmd)) {
1552 IPW_ERROR("failed to send POWER_MODE command\n");
1553 return -1;
1554 }
1555
1556 return 0;
1557}
1558
1559/*
1560 * The IPW device contains a Microwire compatible EEPROM that stores
1561 * various data like the MAC address. Usually the firmware has exclusive
1562 * access to the eeprom, but during device initialization (before the
1563 * device driver has sent the HostComplete command to the firmware) the
1564 * device driver has read access to the EEPROM by way of indirect addressing
1565 * through a couple of memory mapped registers.
1566 *
1567 * The following is a simplified implementation for pulling data out of the
1568 * the eeprom, along with some helper functions to find information in
1569 * the per device private data's copy of the eeprom.
1570 *
1571 * NOTE: To better understand how these functions work (i.e what is a chip
1572 * select and why do have to keep driving the eeprom clock?), read
1573 * just about any data sheet for a Microwire compatible EEPROM.
1574 */
1575
1576/* write a 32 bit value into the indirect accessor register */
1577static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
1578{
1579 ipw_write_reg32(p, FW_MEM_REG_EEPROM_ACCESS, data);
1580
1581 /* the eeprom requires some time to complete the operation */
1582 udelay(p->eeprom_delay);
1583
1584 return;
1585}
1586
1587/* perform a chip select operation */
1588static inline void eeprom_cs(struct ipw_priv* priv)
1589{
1590 eeprom_write_reg(priv,0);
1591 eeprom_write_reg(priv,EEPROM_BIT_CS);
1592 eeprom_write_reg(priv,EEPROM_BIT_CS|EEPROM_BIT_SK);
1593 eeprom_write_reg(priv,EEPROM_BIT_CS);
1594}
1595
1596/* perform a chip select operation */
1597static inline void eeprom_disable_cs(struct ipw_priv* priv)
1598{
1599 eeprom_write_reg(priv,EEPROM_BIT_CS);
1600 eeprom_write_reg(priv,0);
1601 eeprom_write_reg(priv,EEPROM_BIT_SK);
1602}
1603
1604/* push a single bit down to the eeprom */
1605static inline void eeprom_write_bit(struct ipw_priv *p,u8 bit)
1606{
1607 int d = ( bit ? EEPROM_BIT_DI : 0);
1608 eeprom_write_reg(p,EEPROM_BIT_CS|d);
1609 eeprom_write_reg(p,EEPROM_BIT_CS|d|EEPROM_BIT_SK);
1610}
1611
1612/* push an opcode followed by an address down to the eeprom */
1613static void eeprom_op(struct ipw_priv* priv, u8 op, u8 addr)
1614{
1615 int i;
1616
1617 eeprom_cs(priv);
1618 eeprom_write_bit(priv,1);
1619 eeprom_write_bit(priv,op&2);
1620 eeprom_write_bit(priv,op&1);
1621 for ( i=7; i>=0; i-- ) {
1622 eeprom_write_bit(priv,addr&(1<<i));
1623 }
1624}
1625
1626/* pull 16 bits off the eeprom, one bit at a time */
1627static u16 eeprom_read_u16(struct ipw_priv* priv, u8 addr)
1628{
1629 int i;
1630 u16 r=0;
1631
1632 /* Send READ Opcode */
1633 eeprom_op(priv,EEPROM_CMD_READ,addr);
1634
1635 /* Send dummy bit */
1636 eeprom_write_reg(priv,EEPROM_BIT_CS);
1637
1638 /* Read the byte off the eeprom one bit at a time */
1639 for ( i=0; i<16; i++ ) {
1640 u32 data = 0;
1641 eeprom_write_reg(priv,EEPROM_BIT_CS|EEPROM_BIT_SK);
1642 eeprom_write_reg(priv,EEPROM_BIT_CS);
1643 data = ipw_read_reg32(priv,FW_MEM_REG_EEPROM_ACCESS);
1644 r = (r<<1) | ((data & EEPROM_BIT_DO)?1:0);
1645 }
1646
1647 /* Send another dummy bit */
1648 eeprom_write_reg(priv,0);
1649 eeprom_disable_cs(priv);
1650
1651 return r;
1652}
1653
1654/* helper function for pulling the mac address out of the private */
1655/* data's copy of the eeprom data */
1656static void eeprom_parse_mac(struct ipw_priv* priv, u8* mac)
1657{
1658 u8* ee = (u8*)priv->eeprom;
1659 memcpy(mac, &ee[EEPROM_MAC_ADDRESS], 6);
1660}
1661
1662/*
1663 * Either the device driver (i.e. the host) or the firmware can
1664 * load eeprom data into the designated region in SRAM. If neither
1665 * happens then the FW will shutdown with a fatal error.
1666 *
1667 * In order to signal the FW to load the EEPROM, the EEPROM_LOAD_DISABLE
1668 * bit needs region of shared SRAM needs to be non-zero.
1669 */
1670static void ipw_eeprom_init_sram(struct ipw_priv *priv)
1671{
1672 int i;
1673 u16 *eeprom = (u16 *)priv->eeprom;
1674
1675 IPW_DEBUG_TRACE(">>\n");
1676
1677 /* read entire contents of eeprom into private buffer */
1678 for ( i=0; i<128; i++ )
1679 eeprom[i] = eeprom_read_u16(priv,(u8)i);
1680
1681 /*
1682 If the data looks correct, then copy it to our private
1683 copy. Otherwise let the firmware know to perform the operation
1684 on it's own
1685 */
1686 if ((priv->eeprom + EEPROM_VERSION) != 0) {
1687 IPW_DEBUG_INFO("Writing EEPROM data into SRAM\n");
1688
1689 /* write the eeprom data to sram */
1690 for( i=0; i<CX2_EEPROM_IMAGE_SIZE; i++ )
1691 ipw_write8(priv, IPW_EEPROM_DATA + i,
1692 priv->eeprom[i]);
1693
1694 /* Do not load eeprom data on fatal error or suspend */
1695 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
1696 } else {
1697 IPW_DEBUG_INFO("Enabling FW initializationg of SRAM\n");
1698
1699 /* Load eeprom data on fatal error or suspend */
1700 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 1);
1701 }
1702
1703 IPW_DEBUG_TRACE("<<\n");
1704}
1705
1706
1707static inline void ipw_zero_memory(struct ipw_priv *priv, u32 start, u32 count)
1708{
1709 count >>= 2;
1710 if (!count) return;
1711 _ipw_write32(priv, CX2_AUTOINC_ADDR, start);
1712 while (count--)
1713 _ipw_write32(priv, CX2_AUTOINC_DATA, 0);
1714}
1715
1716static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
1717{
1718 ipw_zero_memory(priv, CX2_SHARED_SRAM_DMA_CONTROL,
1719 CB_NUMBER_OF_ELEMENTS_SMALL *
1720 sizeof(struct command_block));
1721}
1722
1723static int ipw_fw_dma_enable(struct ipw_priv *priv)
1724{ /* start dma engine but no transfers yet*/
1725
1726 IPW_DEBUG_FW(">> : \n");
1727
1728 /* Start the dma */
1729 ipw_fw_dma_reset_command_blocks(priv);
1730
1731 /* Write CB base address */
1732 ipw_write_reg32(priv, CX2_DMA_I_CB_BASE, CX2_SHARED_SRAM_DMA_CONTROL);
1733
1734 IPW_DEBUG_FW("<< : \n");
1735 return 0;
1736}
1737
1738static void ipw_fw_dma_abort(struct ipw_priv *priv)
1739{
1740 u32 control = 0;
1741
1742 IPW_DEBUG_FW(">> :\n");
1743
1744 //set the Stop and Abort bit
1745 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
1746 ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
1747 priv->sram_desc.last_cb_index = 0;
1748
1749 IPW_DEBUG_FW("<< \n");
1750}
1751
1752static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index, struct command_block *cb)
1753{
1754 u32 address = CX2_SHARED_SRAM_DMA_CONTROL + (sizeof(struct command_block) * index);
1755 IPW_DEBUG_FW(">> :\n");
1756
1757 ipw_write_indirect(priv, address, (u8*)cb, (int)sizeof(struct command_block));
1758
1759 IPW_DEBUG_FW("<< :\n");
1760 return 0;
1761
1762}
1763
1764static int ipw_fw_dma_kick(struct ipw_priv *priv)
1765{
1766 u32 control = 0;
1767 u32 index=0;
1768
1769 IPW_DEBUG_FW(">> :\n");
1770
1771 for (index = 0; index < priv->sram_desc.last_cb_index; index++)
1772 ipw_fw_dma_write_command_block(priv, index, &priv->sram_desc.cb_list[index]);
1773
1774 /* Enable the DMA in the CSR register */
1775 ipw_clear_bit(priv, CX2_RESET_REG,CX2_RESET_REG_MASTER_DISABLED | CX2_RESET_REG_STOP_MASTER);
1776
1777 /* Set the Start bit. */
1778 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_START;
1779 ipw_write_reg32(priv, CX2_DMA_I_DMA_CONTROL, control);
1780
1781 IPW_DEBUG_FW("<< :\n");
1782 return 0;
1783}
1784
1785static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
1786{
1787 u32 address;
1788 u32 register_value=0;
1789 u32 cb_fields_address=0;
1790
1791 IPW_DEBUG_FW(">> :\n");
1792 address = ipw_read_reg32(priv,CX2_DMA_I_CURRENT_CB);
1793 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n",address);
1794
1795 /* Read the DMA Controlor register */
1796 register_value = ipw_read_reg32(priv, CX2_DMA_I_DMA_CONTROL);
1797 IPW_DEBUG_FW_INFO("CX2_DMA_I_DMA_CONTROL is 0x%x \n",register_value);
1798
1799 /* Print the CB values*/
1800 cb_fields_address = address;
1801 register_value = ipw_read_reg32(priv, cb_fields_address);
1802 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n",register_value);
1803
1804 cb_fields_address += sizeof(u32);
1805 register_value = ipw_read_reg32(priv, cb_fields_address);
1806 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n",register_value);
1807
1808 cb_fields_address += sizeof(u32);
1809 register_value = ipw_read_reg32(priv, cb_fields_address);
1810 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n",
1811 register_value);
1812
1813 cb_fields_address += sizeof(u32);
1814 register_value = ipw_read_reg32(priv, cb_fields_address);
1815 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n",register_value);
1816
1817 IPW_DEBUG_FW(">> :\n");
1818}
1819
1820static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
1821{
1822 u32 current_cb_address = 0;
1823 u32 current_cb_index = 0;
1824
1825 IPW_DEBUG_FW("<< :\n");
1826 current_cb_address= ipw_read_reg32(priv, CX2_DMA_I_CURRENT_CB);
1827
1828 current_cb_index = (current_cb_address - CX2_SHARED_SRAM_DMA_CONTROL )/
1829 sizeof (struct command_block);
1830
1831 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n",
1832 current_cb_index, current_cb_address );
1833
1834 IPW_DEBUG_FW(">> :\n");
1835 return current_cb_index;
1836
1837}
1838
1839static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
1840 u32 src_address,
1841 u32 dest_address,
1842 u32 length,
1843 int interrupt_enabled,
1844 int is_last)
1845{
1846
1847 u32 control = CB_VALID | CB_SRC_LE | CB_DEST_LE | CB_SRC_AUTOINC |
1848 CB_SRC_IO_GATED | CB_DEST_AUTOINC | CB_SRC_SIZE_LONG |
1849 CB_DEST_SIZE_LONG;
1850 struct command_block *cb;
1851 u32 last_cb_element=0;
1852
1853 IPW_DEBUG_FW_INFO("src_address=0x%x dest_address=0x%x length=0x%x\n",
1854 src_address, dest_address, length);
1855
1856 if (priv->sram_desc.last_cb_index >= CB_NUMBER_OF_ELEMENTS_SMALL)
1857 return -1;
1858
1859 last_cb_element = priv->sram_desc.last_cb_index;
1860 cb = &priv->sram_desc.cb_list[last_cb_element];
1861 priv->sram_desc.last_cb_index++;
1862
1863 /* Calculate the new CB control word */
1864 if (interrupt_enabled )
1865 control |= CB_INT_ENABLED;
1866
1867 if (is_last)
1868 control |= CB_LAST_VALID;
1869
1870 control |= length;
1871
1872 /* Calculate the CB Element's checksum value */
1873 cb->status = control ^src_address ^dest_address;
1874
1875 /* Copy the Source and Destination addresses */
1876 cb->dest_addr = dest_address;
1877 cb->source_addr = src_address;
1878
1879 /* Copy the Control Word last */
1880 cb->control = control;
1881
1882 return 0;
1883}
1884
1885static int ipw_fw_dma_add_buffer(struct ipw_priv *priv,
1886 u32 src_phys,
1887 u32 dest_address,
1888 u32 length)
1889{
1890 u32 bytes_left = length;
1891 u32 src_offset=0;
1892 u32 dest_offset=0;
1893 int status = 0;
1894 IPW_DEBUG_FW(">> \n");
1895 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n",
1896 src_phys, dest_address, length);
1897 while (bytes_left > CB_MAX_LENGTH) {
1898 status = ipw_fw_dma_add_command_block( priv,
1899 src_phys + src_offset,
1900 dest_address + dest_offset,
1901 CB_MAX_LENGTH, 0, 0);
1902 if (status) {
1903 IPW_DEBUG_FW_INFO(": Failed\n");
1904 return -1;
1905 } else
1906 IPW_DEBUG_FW_INFO(": Added new cb\n");
1907
1908 src_offset += CB_MAX_LENGTH;
1909 dest_offset += CB_MAX_LENGTH;
1910 bytes_left -= CB_MAX_LENGTH;
1911 }
1912
1913 /* add the buffer tail */
1914 if (bytes_left > 0) {
1915 status = ipw_fw_dma_add_command_block(
1916 priv, src_phys + src_offset,
1917 dest_address + dest_offset,
1918 bytes_left, 0, 0);
1919 if (status) {
1920 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
1921 return -1;
1922 } else
1923 IPW_DEBUG_FW_INFO(": Adding new cb - the buffer tail\n");
1924 }
1925
1926
1927 IPW_DEBUG_FW("<< \n");
1928 return 0;
1929}
1930
1931static int ipw_fw_dma_wait(struct ipw_priv *priv)
1932{
1933 u32 current_index = 0;
1934 u32 watchdog = 0;
1935
1936 IPW_DEBUG_FW(">> : \n");
1937
1938 current_index = ipw_fw_dma_command_block_index(priv);
1939 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%8X\n",
1940 (int) priv->sram_desc.last_cb_index);
1941
1942 while (current_index < priv->sram_desc.last_cb_index) {
1943 udelay(50);
1944 current_index = ipw_fw_dma_command_block_index(priv);
1945
1946 watchdog++;
1947
1948 if (watchdog > 400) {
1949 IPW_DEBUG_FW_INFO("Timeout\n");
1950 ipw_fw_dma_dump_command_block(priv);
1951 ipw_fw_dma_abort(priv);
1952 return -1;
1953 }
1954 }
1955
1956 ipw_fw_dma_abort(priv);
1957
1958 /*Disable the DMA in the CSR register*/
1959 ipw_set_bit(priv, CX2_RESET_REG,
1960 CX2_RESET_REG_MASTER_DISABLED | CX2_RESET_REG_STOP_MASTER);
1961
1962 IPW_DEBUG_FW("<< dmaWaitSync \n");
1963 return 0;
1964}
1965
1966static void ipw_remove_current_network(struct ipw_priv *priv)
1967{
1968 struct list_head *element, *safe;
1969 struct ieee80211_network *network = NULL;
1970 list_for_each_safe(element, safe, &priv->ieee->network_list) {
1971 network = list_entry(element, struct ieee80211_network, list);
1972 if (!memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
1973 list_del(element);
1974 list_add_tail(&network->list,
1975 &priv->ieee->network_free_list);
1976 }
1977 }
1978}
1979
1980/**
1981 * Check that card is still alive.
1982 * Reads debug register from domain0.
1983 * If card is present, pre-defined value should
1984 * be found there.
1985 *
1986 * @param priv
1987 * @return 1 if card is present, 0 otherwise
1988 */
1989static inline int ipw_alive(struct ipw_priv *priv)
1990{
1991 return ipw_read32(priv, 0x90) == 0xd55555d5;
1992}
1993
1994static inline int ipw_poll_bit(struct ipw_priv *priv, u32 addr, u32 mask,
1995 int timeout)
1996{
1997 int i = 0;
1998
1999 do {
2000 if ((ipw_read32(priv, addr) & mask) == mask)
2001 return i;
2002 mdelay(10);
2003 i += 10;
2004 } while (i < timeout);
2005
2006 return -ETIME;
2007}
2008
2009/* These functions load the firmware and micro code for the operation of
2010 * the ipw hardware. It assumes the buffer has all the bits for the
2011 * image and the caller is handling the memory allocation and clean up.
2012 */
2013
2014
2015static int ipw_stop_master(struct ipw_priv * priv)
2016{
2017 int rc;
2018
2019 IPW_DEBUG_TRACE(">> \n");
2020 /* stop master. typical delay - 0 */
2021 ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
2022
2023 rc = ipw_poll_bit(priv, CX2_RESET_REG,
2024 CX2_RESET_REG_MASTER_DISABLED, 100);
2025 if (rc < 0) {
2026 IPW_ERROR("stop master failed in 10ms\n");
2027 return -1;
2028 }
2029
2030 IPW_DEBUG_INFO("stop master %dms\n", rc);
2031
2032 return rc;
2033}
2034
2035static void ipw_arc_release(struct ipw_priv *priv)
2036{
2037 IPW_DEBUG_TRACE(">> \n");
2038 mdelay(5);
2039
2040 ipw_clear_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2041
2042 /* no one knows timing, for safety add some delay */
2043 mdelay(5);
2044}
2045
2046struct fw_header {
2047 u32 version;
2048 u32 mode;
2049};
2050
2051struct fw_chunk {
2052 u32 address;
2053 u32 length;
2054};
2055
2056#define IPW_FW_MAJOR_VERSION 2
2057#define IPW_FW_MINOR_VERSION 2
2058
2059#define IPW_FW_MINOR(x) ((x & 0xff) >> 8)
2060#define IPW_FW_MAJOR(x) (x & 0xff)
2061
2062#define IPW_FW_VERSION ((IPW_FW_MINOR_VERSION << 8) | \
2063 IPW_FW_MAJOR_VERSION)
2064
2065#define IPW_FW_PREFIX "ipw-" __stringify(IPW_FW_MAJOR_VERSION) \
2066"." __stringify(IPW_FW_MINOR_VERSION) "-"
2067
2068#if IPW_FW_MAJOR_VERSION >= 2 && IPW_FW_MINOR_VERSION > 0
2069#define IPW_FW_NAME(x) IPW_FW_PREFIX "" x ".fw"
2070#else
2071#define IPW_FW_NAME(x) "ipw2200_" x ".fw"
2072#endif
2073
2074static int ipw_load_ucode(struct ipw_priv *priv, u8 * data,
2075 size_t len)
2076{
2077 int rc = 0, i, addr;
2078 u8 cr = 0;
2079 u16 *image;
2080
2081 image = (u16 *)data;
2082
2083 IPW_DEBUG_TRACE(">> \n");
2084
2085 rc = ipw_stop_master(priv);
2086
2087 if (rc < 0)
2088 return rc;
2089
2090// spin_lock_irqsave(&priv->lock, flags);
2091
2092 for (addr = CX2_SHARED_LOWER_BOUND;
2093 addr < CX2_REGISTER_DOMAIN1_END; addr += 4) {
2094 ipw_write32(priv, addr, 0);
2095 }
2096
2097 /* no ucode (yet) */
2098 memset(&priv->dino_alive, 0, sizeof(priv->dino_alive));
2099 /* destroy DMA queues */
2100 /* reset sequence */
2101
2102 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET ,CX2_BIT_HALT_RESET_ON);
2103 ipw_arc_release(priv);
2104 ipw_write_reg32(priv, CX2_MEM_HALT_AND_RESET, CX2_BIT_HALT_RESET_OFF);
2105 mdelay(1);
2106
2107 /* reset PHY */
2108 ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, CX2_BASEBAND_POWER_DOWN);
2109 mdelay(1);
2110
2111 ipw_write_reg32(priv, CX2_INTERNAL_CMD_EVENT, 0);
2112 mdelay(1);
2113
2114 /* enable ucode store */
2115 ipw_write_reg8(priv, DINO_CONTROL_REG, 0x0);
2116 ipw_write_reg8(priv, DINO_CONTROL_REG, DINO_ENABLE_CS);
2117 mdelay(1);
2118
2119 /* write ucode */
2120 /**
2121 * @bug
2122 * Do NOT set indirect address register once and then
2123 * store data to indirect data register in the loop.
2124 * It seems very reasonable, but in this case DINO do not
2125 * accept ucode. It is essential to set address each time.
2126 */
2127 /* load new ipw uCode */
2128 for (i = 0; i < len / 2; i++)
2129 ipw_write_reg16(priv, CX2_BASEBAND_CONTROL_STORE, image[i]);
2130
2131
2132 /* enable DINO */
2133 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
2134 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS,
2135 DINO_ENABLE_SYSTEM );
2136
2137 /* this is where the igx / win driver deveates from the VAP driver.*/
2138
2139 /* wait for alive response */
2140 for (i = 0; i < 100; i++) {
2141 /* poll for incoming data */
2142 cr = ipw_read_reg8(priv, CX2_BASEBAND_CONTROL_STATUS);
2143 if (cr & DINO_RXFIFO_DATA)
2144 break;
2145 mdelay(1);
2146 }
2147
2148 if (cr & DINO_RXFIFO_DATA) {
2149 /* alive_command_responce size is NOT multiple of 4 */
2150 u32 response_buffer[(sizeof(priv->dino_alive) + 3) / 4];
2151
2152 for (i = 0; i < ARRAY_SIZE(response_buffer); i++)
2153 response_buffer[i] =
2154 ipw_read_reg32(priv,
2155 CX2_BASEBAND_RX_FIFO_READ);
2156 memcpy(&priv->dino_alive, response_buffer,
2157 sizeof(priv->dino_alive));
2158 if (priv->dino_alive.alive_command == 1
2159 && priv->dino_alive.ucode_valid == 1) {
2160 rc = 0;
2161 IPW_DEBUG_INFO(
2162 "Microcode OK, rev. %d (0x%x) dev. %d (0x%x) "
2163 "of %02d/%02d/%02d %02d:%02d\n",
2164 priv->dino_alive.software_revision,
2165 priv->dino_alive.software_revision,
2166 priv->dino_alive.device_identifier,
2167 priv->dino_alive.device_identifier,
2168 priv->dino_alive.time_stamp[0],
2169 priv->dino_alive.time_stamp[1],
2170 priv->dino_alive.time_stamp[2],
2171 priv->dino_alive.time_stamp[3],
2172 priv->dino_alive.time_stamp[4]);
2173 } else {
2174 IPW_DEBUG_INFO("Microcode is not alive\n");
2175 rc = -EINVAL;
2176 }
2177 } else {
2178 IPW_DEBUG_INFO("No alive response from DINO\n");
2179 rc = -ETIME;
2180 }
2181
2182 /* disable DINO, otherwise for some reason
2183 firmware have problem getting alive resp. */
2184 ipw_write_reg8(priv, CX2_BASEBAND_CONTROL_STATUS, 0);
2185
2186// spin_unlock_irqrestore(&priv->lock, flags);
2187
2188 return rc;
2189}
2190
2191static int ipw_load_firmware(struct ipw_priv *priv, u8 * data,
2192 size_t len)
2193{
2194 int rc = -1;
2195 int offset = 0;
2196 struct fw_chunk *chunk;
2197 dma_addr_t shared_phys;
2198 u8 *shared_virt;
2199
2200 IPW_DEBUG_TRACE("<< : \n");
2201 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
2202
2203 if (!shared_virt)
2204 return -ENOMEM;
2205
2206 memmove(shared_virt, data, len);
2207
2208 /* Start the Dma */
2209 rc = ipw_fw_dma_enable(priv);
2210
2211 if (priv->sram_desc.last_cb_index > 0) {
2212 /* the DMA is already ready this would be a bug. */
2213 BUG();
2214 goto out;
2215 }
2216
2217 do {
2218 chunk = (struct fw_chunk *)(data + offset);
2219 offset += sizeof(struct fw_chunk);
2220 /* build DMA packet and queue up for sending */
2221 /* dma to chunk->address, the chunk->length bytes from data +
2222 * offeset*/
2223 /* Dma loading */
2224 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset,
2225 chunk->address, chunk->length);
2226 if (rc) {
2227 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
2228 goto out;
2229 }
2230
2231 offset += chunk->length;
2232 } while (offset < len);
2233
2234 /* Run the DMA and wait for the answer*/
2235 rc = ipw_fw_dma_kick(priv);
2236 if (rc) {
2237 IPW_ERROR("dmaKick Failed\n");
2238 goto out;
2239 }
2240
2241 rc = ipw_fw_dma_wait(priv);
2242 if (rc) {
2243 IPW_ERROR("dmaWaitSync Failed\n");
2244 goto out;
2245 }
2246 out:
2247 pci_free_consistent( priv->pci_dev, len, shared_virt, shared_phys);
2248 return rc;
2249}
2250
2251/* stop nic */
2252static int ipw_stop_nic(struct ipw_priv *priv)
2253{
2254 int rc = 0;
2255
2256 /* stop*/
2257 ipw_write32(priv, CX2_RESET_REG, CX2_RESET_REG_STOP_MASTER);
2258
2259 rc = ipw_poll_bit(priv, CX2_RESET_REG,
2260 CX2_RESET_REG_MASTER_DISABLED, 500);
2261 if (rc < 0) {
2262 IPW_ERROR("wait for reg master disabled failed\n");
2263 return rc;
2264 }
2265
2266 ipw_set_bit(priv, CX2_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
2267
2268 return rc;
2269}
2270
2271static void ipw_start_nic(struct ipw_priv *priv)
2272{
2273 IPW_DEBUG_TRACE(">>\n");
2274
2275 /* prvHwStartNic release ARC*/
2276 ipw_clear_bit(priv, CX2_RESET_REG,
2277 CX2_RESET_REG_MASTER_DISABLED |
2278 CX2_RESET_REG_STOP_MASTER |
2279 CBD_RESET_REG_PRINCETON_RESET);
2280
2281 /* enable power management */
2282 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY);
2283
2284 IPW_DEBUG_TRACE("<<\n");
2285}
2286
2287static int ipw_init_nic(struct ipw_priv *priv)
2288{
2289 int rc;
2290
2291 IPW_DEBUG_TRACE(">>\n");
2292 /* reset */
2293 /*prvHwInitNic */
2294 /* set "initialization complete" bit to move adapter to D0 state */
2295 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
2296
2297 /* low-level PLL activation */
2298 ipw_write32(priv, CX2_READ_INT_REGISTER, CX2_BIT_INT_HOST_SRAM_READ_INT_REGISTER);
2299
2300 /* wait for clock stabilization */
2301 rc = ipw_poll_bit(priv, CX2_GP_CNTRL_RW,
2302 CX2_GP_CNTRL_BIT_CLOCK_READY, 250);
2303 if (rc < 0 )
2304 IPW_DEBUG_INFO("FAILED wait for clock stablization\n");
2305
2306 /* assert SW reset */
2307 ipw_set_bit(priv, CX2_RESET_REG, CX2_RESET_REG_SW_RESET);
2308
2309 udelay(10);
2310
2311 /* set "initialization complete" bit to move adapter to D0 state */
2312 ipw_set_bit(priv, CX2_GP_CNTRL_RW, CX2_GP_CNTRL_BIT_INIT_DONE);
2313
2314 IPW_DEBUG_TRACE(">>\n");
2315 return 0;
2316}
2317
2318
2319/* Call this function from process context, it will sleep in request_firmware.
2320 * Probe is an ok place to call this from.
2321 */
2322static int ipw_reset_nic(struct ipw_priv *priv)
2323{
2324 int rc = 0;
2325
2326 IPW_DEBUG_TRACE(">>\n");
2327
2328 rc = ipw_init_nic(priv);
2329
2330 /* Clear the 'host command active' bit... */
2331 priv->status &= ~STATUS_HCMD_ACTIVE;
2332 wake_up_interruptible(&priv->wait_command_queue);
2333
2334 IPW_DEBUG_TRACE("<<\n");
2335 return rc;
2336}
2337
2338static int ipw_get_fw(struct ipw_priv *priv,
2339 const struct firmware **fw, const char *name)
2340{
2341 struct fw_header *header;
2342 int rc;
2343
2344 /* ask firmware_class module to get the boot firmware off disk */
2345 rc = request_firmware(fw, name, &priv->pci_dev->dev);
2346 if (rc < 0) {
2347 IPW_ERROR("%s load failed: Reason %d\n", name, rc);
2348 return rc;
2349 }
2350
2351 header = (struct fw_header *)(*fw)->data;
2352 if (IPW_FW_MAJOR(header->version) != IPW_FW_MAJOR_VERSION) {
2353 IPW_ERROR("'%s' firmware version not compatible (%d != %d)\n",
2354 name,
2355 IPW_FW_MAJOR(header->version), IPW_FW_MAJOR_VERSION);
2356 return -EINVAL;
2357 }
2358
2359 IPW_DEBUG_INFO("Loading firmware '%s' file v%d.%d (%zd bytes)\n",
2360 name,
2361 IPW_FW_MAJOR(header->version),
2362 IPW_FW_MINOR(header->version),
2363 (*fw)->size - sizeof(struct fw_header));
2364 return 0;
2365}
2366
2367#define CX2_RX_BUF_SIZE (3000)
2368
2369static inline void ipw_rx_queue_reset(struct ipw_priv *priv,
2370 struct ipw_rx_queue *rxq)
2371{
2372 unsigned long flags;
2373 int i;
2374
2375 spin_lock_irqsave(&rxq->lock, flags);
2376
2377 INIT_LIST_HEAD(&rxq->rx_free);
2378 INIT_LIST_HEAD(&rxq->rx_used);
2379
2380 /* Fill the rx_used queue with _all_ of the Rx buffers */
2381 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
2382 /* In the reset function, these buffers may have been allocated
2383 * to an SKB, so we need to unmap and free potential storage */
2384 if (rxq->pool[i].skb != NULL) {
2385 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
2386 CX2_RX_BUF_SIZE,
2387 PCI_DMA_FROMDEVICE);
2388 dev_kfree_skb(rxq->pool[i].skb);
2389 }
2390 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
2391 }
2392
2393 /* Set us so that we have processed and used all buffers, but have
2394 * not restocked the Rx queue with fresh buffers */
2395 rxq->read = rxq->write = 0;
2396 rxq->processed = RX_QUEUE_SIZE - 1;
2397 rxq->free_count = 0;
2398 spin_unlock_irqrestore(&rxq->lock, flags);
2399}
2400
2401#ifdef CONFIG_PM
2402static int fw_loaded = 0;
2403static const struct firmware *bootfw = NULL;
2404static const struct firmware *firmware = NULL;
2405static const struct firmware *ucode = NULL;
2406#endif
2407
2408static int ipw_load(struct ipw_priv *priv)
2409{
2410#ifndef CONFIG_PM
2411 const struct firmware *bootfw = NULL;
2412 const struct firmware *firmware = NULL;
2413 const struct firmware *ucode = NULL;
2414#endif
2415 int rc = 0, retries = 3;
2416
2417#ifdef CONFIG_PM
2418 if (!fw_loaded) {
2419#endif
2420 rc = ipw_get_fw(priv, &bootfw, IPW_FW_NAME("boot"));
2421 if (rc)
2422 goto error;
2423
2424 switch (priv->ieee->iw_mode) {
2425 case IW_MODE_ADHOC:
2426 rc = ipw_get_fw(priv, &ucode,
2427 IPW_FW_NAME("ibss_ucode"));
2428 if (rc)
2429 goto error;
2430
2431 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("ibss"));
2432 break;
2433
2434#ifdef CONFIG_IPW_PROMISC
2435 case IW_MODE_MONITOR:
2436 rc = ipw_get_fw(priv, &ucode,
2437 IPW_FW_NAME("ibss_ucode"));
2438 if (rc)
2439 goto error;
2440
2441 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("sniffer"));
2442 break;
2443#endif
2444 case IW_MODE_INFRA:
2445 rc = ipw_get_fw(priv, &ucode,
2446 IPW_FW_NAME("bss_ucode"));
2447 if (rc)
2448 goto error;
2449
2450 rc = ipw_get_fw(priv, &firmware, IPW_FW_NAME("bss"));
2451 break;
2452
2453 default:
2454 rc = -EINVAL;
2455 }
2456
2457 if (rc)
2458 goto error;
2459
2460#ifdef CONFIG_PM
2461 fw_loaded = 1;
2462 }
2463#endif
2464
2465 if (!priv->rxq)
2466 priv->rxq = ipw_rx_queue_alloc(priv);
2467 else
2468 ipw_rx_queue_reset(priv, priv->rxq);
2469 if (!priv->rxq) {
2470 IPW_ERROR("Unable to initialize Rx queue\n");
2471 goto error;
2472 }
2473
2474 retry:
2475 /* Ensure interrupts are disabled */
2476 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
2477 priv->status &= ~STATUS_INT_ENABLED;
2478
2479 /* ack pending interrupts */
2480 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
2481
2482 ipw_stop_nic(priv);
2483
2484 rc = ipw_reset_nic(priv);
2485 if (rc) {
2486 IPW_ERROR("Unable to reset NIC\n");
2487 goto error;
2488 }
2489
2490 ipw_zero_memory(priv, CX2_NIC_SRAM_LOWER_BOUND,
2491 CX2_NIC_SRAM_UPPER_BOUND - CX2_NIC_SRAM_LOWER_BOUND);
2492
2493 /* DMA the initial boot firmware into the device */
2494 rc = ipw_load_firmware(priv, bootfw->data + sizeof(struct fw_header),
2495 bootfw->size - sizeof(struct fw_header));
2496 if (rc < 0) {
2497 IPW_ERROR("Unable to load boot firmware\n");
2498 goto error;
2499 }
2500
2501 /* kick start the device */
2502 ipw_start_nic(priv);
2503
2504 /* wait for the device to finish it's initial startup sequence */
2505 rc = ipw_poll_bit(priv, CX2_INTA_RW,
2506 CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
2507 if (rc < 0) {
2508 IPW_ERROR("device failed to boot initial fw image\n");
2509 goto error;
2510 }
2511 IPW_DEBUG_INFO("initial device response after %dms\n", rc);
2512
2513 /* ack fw init done interrupt */
2514 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
2515
2516 /* DMA the ucode into the device */
2517 rc = ipw_load_ucode(priv, ucode->data + sizeof(struct fw_header),
2518 ucode->size - sizeof(struct fw_header));
2519 if (rc < 0) {
2520 IPW_ERROR("Unable to load ucode\n");
2521 goto error;
2522 }
2523
2524 /* stop nic */
2525 ipw_stop_nic(priv);
2526
2527 /* DMA bss firmware into the device */
2528 rc = ipw_load_firmware(priv, firmware->data +
2529 sizeof(struct fw_header),
2530 firmware->size - sizeof(struct fw_header));
2531 if (rc < 0 ) {
2532 IPW_ERROR("Unable to load firmware\n");
2533 goto error;
2534 }
2535
2536 ipw_write32(priv, IPW_EEPROM_LOAD_DISABLE, 0);
2537
2538 rc = ipw_queue_reset(priv);
2539 if (rc) {
2540 IPW_ERROR("Unable to initialize queues\n");
2541 goto error;
2542 }
2543
2544 /* Ensure interrupts are disabled */
2545 ipw_write32(priv, CX2_INTA_MASK_R, ~CX2_INTA_MASK_ALL);
2546
2547 /* kick start the device */
2548 ipw_start_nic(priv);
2549
2550 if (ipw_read32(priv, CX2_INTA_RW) & CX2_INTA_BIT_PARITY_ERROR) {
2551 if (retries > 0) {
2552 IPW_WARNING("Parity error. Retrying init.\n");
2553 retries--;
2554 goto retry;
2555 }
2556
2557 IPW_ERROR("TODO: Handle parity error -- schedule restart?\n");
2558 rc = -EIO;
2559 goto error;
2560 }
2561
2562 /* wait for the device */
2563 rc = ipw_poll_bit(priv, CX2_INTA_RW,
2564 CX2_INTA_BIT_FW_INITIALIZATION_DONE, 500);
2565 if (rc < 0) {
2566 IPW_ERROR("device failed to start after 500ms\n");
2567 goto error;
2568 }
2569 IPW_DEBUG_INFO("device response after %dms\n", rc);
2570
2571 /* ack fw init done interrupt */
2572 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_BIT_FW_INITIALIZATION_DONE);
2573
2574 /* read eeprom data and initialize the eeprom region of sram */
2575 priv->eeprom_delay = 1;
2576 ipw_eeprom_init_sram(priv);
2577
2578 /* enable interrupts */
2579 ipw_enable_interrupts(priv);
2580
2581 /* Ensure our queue has valid packets */
2582 ipw_rx_queue_replenish(priv);
2583
2584 ipw_write32(priv, CX2_RX_READ_INDEX, priv->rxq->read);
2585
2586 /* ack pending interrupts */
2587 ipw_write32(priv, CX2_INTA_RW, CX2_INTA_MASK_ALL);
2588
2589#ifndef CONFIG_PM
2590 release_firmware(bootfw);
2591 release_firmware(ucode);
2592 release_firmware(firmware);
2593#endif
2594 return 0;
2595
2596 error:
2597 if (priv->rxq) {
2598 ipw_rx_queue_free(priv, priv->rxq);
2599 priv->rxq = NULL;
2600 }
2601 ipw_tx_queue_free(priv);
2602 if (bootfw)
2603 release_firmware(bootfw);
2604 if (ucode)
2605 release_firmware(ucode);
2606 if (firmware)
2607 release_firmware(firmware);
2608#ifdef CONFIG_PM
2609 fw_loaded = 0;
2610 bootfw = ucode = firmware = NULL;
2611#endif
2612
2613 return rc;
2614}
2615
2616/**
2617 * DMA services
2618 *
2619 * Theory of operation
2620 *
2621 * A queue is a circular buffers with 'Read' and 'Write' pointers.
2622 * 2 empty entries always kept in the buffer to protect from overflow.
2623 *
2624 * For Tx queue, there are low mark and high mark limits. If, after queuing
2625 * the packet for Tx, free space become < low mark, Tx queue stopped. When
2626 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
2627 * Tx queue resumed.
2628 *
2629 * The IPW operates with six queues, one receive queue in the device's
2630 * sram, one transmit queue for sending commands to the device firmware,
2631 * and four transmit queues for data.
2632 *
2633 * The four transmit queues allow for performing quality of service (qos)
2634 * transmissions as per the 802.11 protocol. Currently Linux does not
2635 * provide a mechanism to the user for utilizing prioritized queues, so
2636 * we only utilize the first data transmit queue (queue1).
2637 */
2638
2639/**
2640 * Driver allocates buffers of this size for Rx
2641 */
2642
2643static inline int ipw_queue_space(const struct clx2_queue *q)
2644{
2645 int s = q->last_used - q->first_empty;
2646 if (s <= 0)
2647 s += q->n_bd;
2648 s -= 2; /* keep some reserve to not confuse empty and full situations */
2649 if (s < 0)
2650 s = 0;
2651 return s;
2652}
2653
2654static inline int ipw_queue_inc_wrap(int index, int n_bd)
2655{
2656 return (++index == n_bd) ? 0 : index;
2657}
2658
2659/**
2660 * Initialize common DMA queue structure
2661 *
2662 * @param q queue to init
2663 * @param count Number of BD's to allocate. Should be power of 2
2664 * @param read_register Address for 'read' register
2665 * (not offset within BAR, full address)
2666 * @param write_register Address for 'write' register
2667 * (not offset within BAR, full address)
2668 * @param base_register Address for 'base' register
2669 * (not offset within BAR, full address)
2670 * @param size Address for 'size' register
2671 * (not offset within BAR, full address)
2672 */
2673static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q,
2674 int count, u32 read, u32 write,
2675 u32 base, u32 size)
2676{
2677 q->n_bd = count;
2678
2679 q->low_mark = q->n_bd / 4;
2680 if (q->low_mark < 4)
2681 q->low_mark = 4;
2682
2683 q->high_mark = q->n_bd / 8;
2684 if (q->high_mark < 2)
2685 q->high_mark = 2;
2686
2687 q->first_empty = q->last_used = 0;
2688 q->reg_r = read;
2689 q->reg_w = write;
2690
2691 ipw_write32(priv, base, q->dma_addr);
2692 ipw_write32(priv, size, count);
2693 ipw_write32(priv, read, 0);
2694 ipw_write32(priv, write, 0);
2695
2696 _ipw_read32(priv, 0x90);
2697}
2698
2699static int ipw_queue_tx_init(struct ipw_priv *priv,
2700 struct clx2_tx_queue *q,
2701 int count, u32 read, u32 write,
2702 u32 base, u32 size)
2703{
2704 struct pci_dev *dev = priv->pci_dev;
2705
2706 q->txb = kmalloc(sizeof(q->txb[0]) * count, GFP_KERNEL);
2707 if (!q->txb) {
2708 IPW_ERROR("vmalloc for auxilary BD structures failed\n");
2709 return -ENOMEM;
2710 }
2711
2712 q->bd = pci_alloc_consistent(dev,sizeof(q->bd[0])*count, &q->q.dma_addr);
2713 if (!q->bd) {
2714 IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
2715 sizeof(q->bd[0]) * count);
2716 kfree(q->txb);
2717 q->txb = NULL;
2718 return -ENOMEM;
2719 }
2720
2721 ipw_queue_init(priv, &q->q, count, read, write, base, size);
2722 return 0;
2723}
2724
2725/**
2726 * Free one TFD, those at index [txq->q.last_used].
2727 * Do NOT advance any indexes
2728 *
2729 * @param dev
2730 * @param txq
2731 */
2732static void ipw_queue_tx_free_tfd(struct ipw_priv *priv,
2733 struct clx2_tx_queue *txq)
2734{
2735 struct tfd_frame *bd = &txq->bd[txq->q.last_used];
2736 struct pci_dev *dev = priv->pci_dev;
2737 int i;
2738
2739 /* classify bd */
2740 if (bd->control_flags.message_type == TX_HOST_COMMAND_TYPE)
2741 /* nothing to cleanup after for host commands */
2742 return;
2743
2744 /* sanity check */
2745 if (bd->u.data.num_chunks > NUM_TFD_CHUNKS) {
2746 IPW_ERROR("Too many chunks: %i\n", bd->u.data.num_chunks);
2747 /** @todo issue fatal error, it is quite serious situation */
2748 return;
2749 }
2750
2751 /* unmap chunks if any */
2752 for (i = 0; i < bd->u.data.num_chunks; i++) {
2753 pci_unmap_single(dev, bd->u.data.chunk_ptr[i],
2754 bd->u.data.chunk_len[i], PCI_DMA_TODEVICE);
2755 if (txq->txb[txq->q.last_used]) {
2756 ieee80211_txb_free(txq->txb[txq->q.last_used]);
2757 txq->txb[txq->q.last_used] = NULL;
2758 }
2759 }
2760}
2761
2762/**
2763 * Deallocate DMA queue.
2764 *
2765 * Empty queue by removing and destroying all BD's.
2766 * Free all buffers.
2767 *
2768 * @param dev
2769 * @param q
2770 */
2771static void ipw_queue_tx_free(struct ipw_priv *priv,
2772 struct clx2_tx_queue *txq)
2773{
2774 struct clx2_queue *q = &txq->q;
2775 struct pci_dev *dev = priv->pci_dev;
2776
2777 if (q->n_bd == 0)
2778 return;
2779
2780 /* first, empty all BD's */
2781 for (; q->first_empty != q->last_used;
2782 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
2783 ipw_queue_tx_free_tfd(priv, txq);
2784 }
2785
2786 /* free buffers belonging to queue itself */
2787 pci_free_consistent(dev, sizeof(txq->bd[0])*q->n_bd, txq->bd,
2788 q->dma_addr);
2789 kfree(txq->txb);
2790
2791 /* 0 fill whole structure */
2792 memset(txq, 0, sizeof(*txq));
2793}
2794
2795
2796/**
2797 * Destroy all DMA queues and structures
2798 *
2799 * @param priv
2800 */
2801static void ipw_tx_queue_free(struct ipw_priv *priv)
2802{
2803 /* Tx CMD queue */
2804 ipw_queue_tx_free(priv, &priv->txq_cmd);
2805
2806 /* Tx queues */
2807 ipw_queue_tx_free(priv, &priv->txq[0]);
2808 ipw_queue_tx_free(priv, &priv->txq[1]);
2809 ipw_queue_tx_free(priv, &priv->txq[2]);
2810 ipw_queue_tx_free(priv, &priv->txq[3]);
2811}
2812
2813static void inline __maybe_wake_tx(struct ipw_priv *priv)
2814{
2815 if (netif_running(priv->net_dev)) {
2816 switch (priv->port_type) {
2817 case DCR_TYPE_MU_BSS:
2818 case DCR_TYPE_MU_IBSS:
2819 if (!(priv->status & STATUS_ASSOCIATED)) {
2820 return;
2821 }
2822 }
2823 netif_wake_queue(priv->net_dev);
2824 }
2825
2826}
2827
2828static inline void ipw_create_bssid(struct ipw_priv *priv, u8 *bssid)
2829{
2830 /* First 3 bytes are manufacturer */
2831 bssid[0] = priv->mac_addr[0];
2832 bssid[1] = priv->mac_addr[1];
2833 bssid[2] = priv->mac_addr[2];
2834
2835 /* Last bytes are random */
2836 get_random_bytes(&bssid[3], ETH_ALEN-3);
2837
2838 bssid[0] &= 0xfe; /* clear multicast bit */
2839 bssid[0] |= 0x02; /* set local assignment bit (IEEE802) */
2840}
2841
2842static inline u8 ipw_add_station(struct ipw_priv *priv, u8 *bssid)
2843{
2844 struct ipw_station_entry entry;
2845 int i;
2846
2847 for (i = 0; i < priv->num_stations; i++) {
2848 if (!memcmp(priv->stations[i], bssid, ETH_ALEN)) {
2849 /* Another node is active in network */
2850 priv->missed_adhoc_beacons = 0;
2851 if (!(priv->config & CFG_STATIC_CHANNEL))
2852 /* when other nodes drop out, we drop out */
2853 priv->config &= ~CFG_ADHOC_PERSIST;
2854
2855 return i;
2856 }
2857 }
2858
2859 if (i == MAX_STATIONS)
2860 return IPW_INVALID_STATION;
2861
2862 IPW_DEBUG_SCAN("Adding AdHoc station: " MAC_FMT "\n", MAC_ARG(bssid));
2863
2864 entry.reserved = 0;
2865 entry.support_mode = 0;
2866 memcpy(entry.mac_addr, bssid, ETH_ALEN);
2867 memcpy(priv->stations[i], bssid, ETH_ALEN);
2868 ipw_write_direct(priv, IPW_STATION_TABLE_LOWER + i * sizeof(entry),
2869 &entry,
2870 sizeof(entry));
2871 priv->num_stations++;
2872
2873 return i;
2874}
2875
2876static inline u8 ipw_find_station(struct ipw_priv *priv, u8 *bssid)
2877{
2878 int i;
2879
2880 for (i = 0; i < priv->num_stations; i++)
2881 if (!memcmp(priv->stations[i], bssid, ETH_ALEN))
2882 return i;
2883
2884 return IPW_INVALID_STATION;
2885}
2886
2887static void ipw_send_disassociate(struct ipw_priv *priv, int quiet)
2888{
2889 int err;
2890
2891 if (!(priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))) {
2892 IPW_DEBUG_ASSOC("Disassociating while not associated.\n");
2893 return;
2894 }
2895
2896 IPW_DEBUG_ASSOC("Disassocation attempt from " MAC_FMT " "
2897 "on channel %d.\n",
2898 MAC_ARG(priv->assoc_request.bssid),
2899 priv->assoc_request.channel);
2900
2901 priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED);
2902 priv->status |= STATUS_DISASSOCIATING;
2903
2904 if (quiet)
2905 priv->assoc_request.assoc_type = HC_DISASSOC_QUIET;
2906 else
2907 priv->assoc_request.assoc_type = HC_DISASSOCIATE;
2908 err = ipw_send_associate(priv, &priv->assoc_request);
2909 if (err) {
2910 IPW_DEBUG_HC("Attempt to send [dis]associate command "
2911 "failed.\n");
2912 return;
2913 }
2914
2915}
2916
2917static void ipw_disassociate(void *data)
2918{
2919 ipw_send_disassociate(data, 0);
2920}
2921
2922static void notify_wx_assoc_event(struct ipw_priv *priv)
2923{
2924 union iwreq_data wrqu;
2925 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
2926 if (priv->status & STATUS_ASSOCIATED)
2927 memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN);
2928 else
2929 memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
2930 wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
2931}
2932
2933struct ipw_status_code {
2934 u16 status;
2935 const char *reason;
2936};
2937
2938static const struct ipw_status_code ipw_status_codes[] = {
2939 {0x00, "Successful"},
2940 {0x01, "Unspecified failure"},
2941 {0x0A, "Cannot support all requested capabilities in the "
2942 "Capability information field"},
2943 {0x0B, "Reassociation denied due to inability to confirm that "
2944 "association exists"},
2945 {0x0C, "Association denied due to reason outside the scope of this "
2946 "standard"},
2947 {0x0D, "Responding station does not support the specified authentication "
2948 "algorithm"},
2949 {0x0E, "Received an Authentication frame with authentication sequence "
2950 "transaction sequence number out of expected sequence"},
2951 {0x0F, "Authentication rejected because of challenge failure"},
2952 {0x10, "Authentication rejected due to timeout waiting for next "
2953 "frame in sequence"},
2954 {0x11, "Association denied because AP is unable to handle additional "
2955 "associated stations"},
2956 {0x12, "Association denied due to requesting station not supporting all "
2957 "of the datarates in the BSSBasicServiceSet Parameter"},
2958 {0x13, "Association denied due to requesting station not supporting "
2959 "short preamble operation"},
2960 {0x14, "Association denied due to requesting station not supporting "
2961 "PBCC encoding"},
2962 {0x15, "Association denied due to requesting station not supporting "
2963 "channel agility"},
2964 {0x19, "Association denied due to requesting station not supporting "
2965 "short slot operation"},
2966 {0x1A, "Association denied due to requesting station not supporting "
2967 "DSSS-OFDM operation"},
2968 {0x28, "Invalid Information Element"},
2969 {0x29, "Group Cipher is not valid"},
2970 {0x2A, "Pairwise Cipher is not valid"},
2971 {0x2B, "AKMP is not valid"},
2972 {0x2C, "Unsupported RSN IE version"},
2973 {0x2D, "Invalid RSN IE Capabilities"},
2974 {0x2E, "Cipher suite is rejected per security policy"},
2975};
2976
2977#ifdef CONFIG_IPW_DEBUG
2978static const char *ipw_get_status_code(u16 status)
2979{
2980 int i;
2981 for (i = 0; i < ARRAY_SIZE(ipw_status_codes); i++)
2982 if (ipw_status_codes[i].status == status)
2983 return ipw_status_codes[i].reason;
2984 return "Unknown status value.";
2985}
2986#endif
2987
2988static void inline average_init(struct average *avg)
2989{
2990 memset(avg, 0, sizeof(*avg));
2991}
2992
2993static void inline average_add(struct average *avg, s16 val)
2994{
2995 avg->sum -= avg->entries[avg->pos];
2996 avg->sum += val;
2997 avg->entries[avg->pos++] = val;
2998 if (unlikely(avg->pos == AVG_ENTRIES)) {
2999 avg->init = 1;
3000 avg->pos = 0;
3001 }
3002}
3003
3004static s16 inline average_value(struct average *avg)
3005{
3006 if (!unlikely(avg->init)) {
3007 if (avg->pos)
3008 return avg->sum / avg->pos;
3009 return 0;
3010 }
3011
3012 return avg->sum / AVG_ENTRIES;
3013}
3014
3015static void ipw_reset_stats(struct ipw_priv *priv)
3016{
3017 u32 len = sizeof(u32);
3018
3019 priv->quality = 0;
3020
3021 average_init(&priv->average_missed_beacons);
3022 average_init(&priv->average_rssi);
3023 average_init(&priv->average_noise);
3024
3025 priv->last_rate = 0;
3026 priv->last_missed_beacons = 0;
3027 priv->last_rx_packets = 0;
3028 priv->last_tx_packets = 0;
3029 priv->last_tx_failures = 0;
3030
3031 /* Firmware managed, reset only when NIC is restarted, so we have to
3032 * normalize on the current value */
3033 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC,
3034 &priv->last_rx_err, &len);
3035 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE,
3036 &priv->last_tx_failures, &len);
3037
3038 /* Driver managed, reset with each association */
3039 priv->missed_adhoc_beacons = 0;
3040 priv->missed_beacons = 0;
3041 priv->tx_packets = 0;
3042 priv->rx_packets = 0;
3043
3044}
3045
3046
3047static inline u32 ipw_get_max_rate(struct ipw_priv *priv)
3048{
3049 u32 i = 0x80000000;
3050 u32 mask = priv->rates_mask;
3051 /* If currently associated in B mode, restrict the maximum
3052 * rate match to B rates */
3053 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
3054 mask &= IEEE80211_CCK_RATES_MASK;
3055
3056 /* TODO: Verify that the rate is supported by the current rates
3057 * list. */
3058
3059 while (i && !(mask & i)) i >>= 1;
3060 switch (i) {
3061 case IEEE80211_CCK_RATE_1MB_MASK: return 1000000;
3062 case IEEE80211_CCK_RATE_2MB_MASK: return 2000000;
3063 case IEEE80211_CCK_RATE_5MB_MASK: return 5500000;
3064 case IEEE80211_OFDM_RATE_6MB_MASK: return 6000000;
3065 case IEEE80211_OFDM_RATE_9MB_MASK: return 9000000;
3066 case IEEE80211_CCK_RATE_11MB_MASK: return 11000000;
3067 case IEEE80211_OFDM_RATE_12MB_MASK: return 12000000;
3068 case IEEE80211_OFDM_RATE_18MB_MASK: return 18000000;
3069 case IEEE80211_OFDM_RATE_24MB_MASK: return 24000000;
3070 case IEEE80211_OFDM_RATE_36MB_MASK: return 36000000;
3071 case IEEE80211_OFDM_RATE_48MB_MASK: return 48000000;
3072 case IEEE80211_OFDM_RATE_54MB_MASK: return 54000000;
3073 }
3074
3075 if (priv->ieee->mode == IEEE_B)
3076 return 11000000;
3077 else
3078 return 54000000;
3079}
3080
3081static u32 ipw_get_current_rate(struct ipw_priv *priv)
3082{
3083 u32 rate, len = sizeof(rate);
3084 int err;
3085
3086 if (!(priv->status & STATUS_ASSOCIATED))
3087 return 0;
3088
3089 if (priv->tx_packets > IPW_REAL_RATE_RX_PACKET_THRESHOLD) {
3090 err = ipw_get_ordinal(priv, IPW_ORD_STAT_TX_CURR_RATE, &rate,
3091 &len);
3092 if (err) {
3093 IPW_DEBUG_INFO("failed querying ordinals.\n");
3094 return 0;
3095 }
3096 } else
3097 return ipw_get_max_rate(priv);
3098
3099 switch (rate) {
3100 case IPW_TX_RATE_1MB: return 1000000;
3101 case IPW_TX_RATE_2MB: return 2000000;
3102 case IPW_TX_RATE_5MB: return 5500000;
3103 case IPW_TX_RATE_6MB: return 6000000;
3104 case IPW_TX_RATE_9MB: return 9000000;
3105 case IPW_TX_RATE_11MB: return 11000000;
3106 case IPW_TX_RATE_12MB: return 12000000;
3107 case IPW_TX_RATE_18MB: return 18000000;
3108 case IPW_TX_RATE_24MB: return 24000000;
3109 case IPW_TX_RATE_36MB: return 36000000;
3110 case IPW_TX_RATE_48MB: return 48000000;
3111 case IPW_TX_RATE_54MB: return 54000000;
3112 }
3113
3114 return 0;
3115}
3116
3117#define PERFECT_RSSI (-50)
3118#define WORST_RSSI (-85)
3119#define IPW_STATS_INTERVAL (2 * HZ)
3120static void ipw_gather_stats(struct ipw_priv *priv)
3121{
3122 u32 rx_err, rx_err_delta, rx_packets_delta;
3123 u32 tx_failures, tx_failures_delta, tx_packets_delta;
3124 u32 missed_beacons_percent, missed_beacons_delta;
3125 u32 quality = 0;
3126 u32 len = sizeof(u32);
3127 s16 rssi;
3128 u32 beacon_quality, signal_quality, tx_quality, rx_quality,
3129 rate_quality;
3130
3131 if (!(priv->status & STATUS_ASSOCIATED)) {
3132 priv->quality = 0;
3133 return;
3134 }
3135
3136 /* Update the statistics */
3137 ipw_get_ordinal(priv, IPW_ORD_STAT_MISSED_BEACONS,
3138 &priv->missed_beacons, &len);
3139 missed_beacons_delta = priv->missed_beacons -
3140 priv->last_missed_beacons;
3141 priv->last_missed_beacons = priv->missed_beacons;
3142 if (priv->assoc_request.beacon_interval) {
3143 missed_beacons_percent = missed_beacons_delta *
3144 (HZ * priv->assoc_request.beacon_interval) /
3145 (IPW_STATS_INTERVAL * 10);
3146 } else {
3147 missed_beacons_percent = 0;
3148 }
3149 average_add(&priv->average_missed_beacons, missed_beacons_percent);
3150
3151 ipw_get_ordinal(priv, IPW_ORD_STAT_RX_ERR_CRC, &rx_err, &len);
3152 rx_err_delta = rx_err - priv->last_rx_err;
3153 priv->last_rx_err = rx_err;
3154
3155 ipw_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURE, &tx_failures, &len);
3156 tx_failures_delta = tx_failures - priv->last_tx_failures;
3157 priv->last_tx_failures = tx_failures;
3158
3159 rx_packets_delta = priv->rx_packets - priv->last_rx_packets;
3160 priv->last_rx_packets = priv->rx_packets;
3161
3162 tx_packets_delta = priv->tx_packets - priv->last_tx_packets;
3163 priv->last_tx_packets = priv->tx_packets;
3164
3165 /* Calculate quality based on the following:
3166 *
3167 * Missed beacon: 100% = 0, 0% = 70% missed
3168 * Rate: 60% = 1Mbs, 100% = Max
3169 * Rx and Tx errors represent a straight % of total Rx/Tx
3170 * RSSI: 100% = > -50, 0% = < -80
3171 * Rx errors: 100% = 0, 0% = 50% missed
3172 *
3173 * The lowest computed quality is used.
3174 *
3175 */
3176#define BEACON_THRESHOLD 5
3177 beacon_quality = 100 - missed_beacons_percent;
3178 if (beacon_quality < BEACON_THRESHOLD)
3179 beacon_quality = 0;
3180 else
3181 beacon_quality = (beacon_quality - BEACON_THRESHOLD) * 100 /
3182 (100 - BEACON_THRESHOLD);
3183 IPW_DEBUG_STATS("Missed beacon: %3d%% (%d%%)\n",
3184 beacon_quality, missed_beacons_percent);
3185
3186 priv->last_rate = ipw_get_current_rate(priv);
3187 rate_quality = priv->last_rate * 40 / priv->last_rate + 60;
3188 IPW_DEBUG_STATS("Rate quality : %3d%% (%dMbs)\n",
3189 rate_quality, priv->last_rate / 1000000);
3190
3191 if (rx_packets_delta > 100 &&
3192 rx_packets_delta + rx_err_delta)
3193 rx_quality = 100 - (rx_err_delta * 100) /
3194 (rx_packets_delta + rx_err_delta);
3195 else
3196 rx_quality = 100;
3197 IPW_DEBUG_STATS("Rx quality : %3d%% (%u errors, %u packets)\n",
3198 rx_quality, rx_err_delta, rx_packets_delta);
3199
3200 if (tx_packets_delta > 100 &&
3201 tx_packets_delta + tx_failures_delta)
3202 tx_quality = 100 - (tx_failures_delta * 100) /
3203 (tx_packets_delta + tx_failures_delta);
3204 else
3205 tx_quality = 100;
3206 IPW_DEBUG_STATS("Tx quality : %3d%% (%u errors, %u packets)\n",
3207 tx_quality, tx_failures_delta, tx_packets_delta);
3208
3209 rssi = average_value(&priv->average_rssi);
3210 if (rssi > PERFECT_RSSI)
3211 signal_quality = 100;
3212 else if (rssi < WORST_RSSI)
3213 signal_quality = 0;
3214 else
3215 signal_quality = (rssi - WORST_RSSI) * 100 /
3216 (PERFECT_RSSI - WORST_RSSI);
3217 IPW_DEBUG_STATS("Signal level : %3d%% (%d dBm)\n",
3218 signal_quality, rssi);
3219
3220 quality = min(beacon_quality,
3221 min(rate_quality,
3222 min(tx_quality, min(rx_quality, signal_quality))));
3223 if (quality == beacon_quality)
3224 IPW_DEBUG_STATS(
3225 "Quality (%d%%): Clamped to missed beacons.\n",
3226 quality);
3227 if (quality == rate_quality)
3228 IPW_DEBUG_STATS(
3229 "Quality (%d%%): Clamped to rate quality.\n",
3230 quality);
3231 if (quality == tx_quality)
3232 IPW_DEBUG_STATS(
3233 "Quality (%d%%): Clamped to Tx quality.\n",
3234 quality);
3235 if (quality == rx_quality)
3236 IPW_DEBUG_STATS(
3237 "Quality (%d%%): Clamped to Rx quality.\n",
3238 quality);
3239 if (quality == signal_quality)
3240 IPW_DEBUG_STATS(
3241 "Quality (%d%%): Clamped to signal quality.\n",
3242 quality);
3243
3244 priv->quality = quality;
3245
3246 queue_delayed_work(priv->workqueue, &priv->gather_stats,
3247 IPW_STATS_INTERVAL);
3248}
3249
3250/**
3251 * Handle host notification packet.
3252 * Called from interrupt routine
3253 */
3254static inline void ipw_rx_notification(struct ipw_priv* priv,
3255 struct ipw_rx_notification *notif)
3256{
3257 IPW_DEBUG_NOTIF("type = %i (%d bytes)\n",
3258 notif->subtype, notif->size);
3259
3260 switch (notif->subtype) {
3261 case HOST_NOTIFICATION_STATUS_ASSOCIATED: {
3262 struct notif_association *assoc = &notif->u.assoc;
3263
3264 switch (assoc->state) {
3265 case CMAS_ASSOCIATED: {
3266 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3267 "associated: '%s' " MAC_FMT " \n",
3268 escape_essid(priv->essid, priv->essid_len),
3269 MAC_ARG(priv->bssid));
3270
3271 switch (priv->ieee->iw_mode) {
3272 case IW_MODE_INFRA:
3273 memcpy(priv->ieee->bssid, priv->bssid,
3274 ETH_ALEN);
3275 break;
3276
3277 case IW_MODE_ADHOC:
3278 memcpy(priv->ieee->bssid, priv->bssid,
3279 ETH_ALEN);
3280
3281 /* clear out the station table */
3282 priv->num_stations = 0;
3283
3284 IPW_DEBUG_ASSOC("queueing adhoc check\n");
3285 queue_delayed_work(priv->workqueue,
3286 &priv->adhoc_check,
3287 priv->assoc_request.beacon_interval);
3288 break;
3289 }
3290
3291 priv->status &= ~STATUS_ASSOCIATING;
3292 priv->status |= STATUS_ASSOCIATED;
3293
3294 netif_carrier_on(priv->net_dev);
3295 if (netif_queue_stopped(priv->net_dev)) {
3296 IPW_DEBUG_NOTIF("waking queue\n");
3297 netif_wake_queue(priv->net_dev);
3298 } else {
3299 IPW_DEBUG_NOTIF("starting queue\n");
3300 netif_start_queue(priv->net_dev);
3301 }
3302
3303 ipw_reset_stats(priv);
3304 /* Ensure the rate is updated immediately */
3305 priv->last_rate = ipw_get_current_rate(priv);
3306 schedule_work(&priv->gather_stats);
3307 notify_wx_assoc_event(priv);
3308
3309/* queue_delayed_work(priv->workqueue,
3310 &priv->request_scan,
3311 SCAN_ASSOCIATED_INTERVAL);
3312*/
3313 break;
3314 }
3315
3316 case CMAS_AUTHENTICATED: {
3317 if (priv->status & (STATUS_ASSOCIATED | STATUS_AUTH)) {
3318#ifdef CONFIG_IPW_DEBUG
3319 struct notif_authenticate *auth = &notif->u.auth;
3320 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3321 "deauthenticated: '%s' " MAC_FMT ": (0x%04X) - %s \n",
3322 escape_essid(priv->essid, priv->essid_len),
3323 MAC_ARG(priv->bssid),
3324 ntohs(auth->status),
3325 ipw_get_status_code(ntohs(auth->status)));
3326#endif
3327
3328 priv->status &= ~(STATUS_ASSOCIATING |
3329 STATUS_AUTH |
3330 STATUS_ASSOCIATED);
3331
3332 netif_carrier_off(priv->net_dev);
3333 netif_stop_queue(priv->net_dev);
3334 queue_work(priv->workqueue, &priv->request_scan);
3335 notify_wx_assoc_event(priv);
3336 break;
3337 }
3338
3339 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3340 "authenticated: '%s' " MAC_FMT "\n",
3341 escape_essid(priv->essid, priv->essid_len),
3342 MAC_ARG(priv->bssid));
3343 break;
3344 }
3345
3346 case CMAS_INIT: {
3347 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3348 "disassociated: '%s' " MAC_FMT " \n",
3349 escape_essid(priv->essid, priv->essid_len),
3350 MAC_ARG(priv->bssid));
3351
3352 priv->status &= ~(
3353 STATUS_DISASSOCIATING |
3354 STATUS_ASSOCIATING |
3355 STATUS_ASSOCIATED |
3356 STATUS_AUTH);
3357
3358 netif_stop_queue(priv->net_dev);
3359 if (!(priv->status & STATUS_ROAMING)) {
3360 netif_carrier_off(priv->net_dev);
3361 notify_wx_assoc_event(priv);
3362
3363 /* Cancel any queued work ... */
3364 cancel_delayed_work(&priv->request_scan);
3365 cancel_delayed_work(&priv->adhoc_check);
3366
3367 /* Queue up another scan... */
3368 queue_work(priv->workqueue,
3369 &priv->request_scan);
3370
3371 cancel_delayed_work(&priv->gather_stats);
3372 } else {
3373 priv->status |= STATUS_ROAMING;
3374 queue_work(priv->workqueue,
3375 &priv->request_scan);
3376 }
3377
3378 ipw_reset_stats(priv);
3379 break;
3380 }
3381
3382 default:
3383 IPW_ERROR("assoc: unknown (%d)\n",
3384 assoc->state);
3385 break;
3386 }
3387
3388 break;
3389 }
3390
3391 case HOST_NOTIFICATION_STATUS_AUTHENTICATE: {
3392 struct notif_authenticate *auth = &notif->u.auth;
3393 switch (auth->state) {
3394 case CMAS_AUTHENTICATED:
3395 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3396 "authenticated: '%s' " MAC_FMT " \n",
3397 escape_essid(priv->essid, priv->essid_len),
3398 MAC_ARG(priv->bssid));
3399 priv->status |= STATUS_AUTH;
3400 break;
3401
3402 case CMAS_INIT:
3403 if (priv->status & STATUS_AUTH) {
3404 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3405 "authentication failed (0x%04X): %s\n",
3406 ntohs(auth->status),
3407 ipw_get_status_code(ntohs(auth->status)));
3408 }
3409 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3410 "deauthenticated: '%s' " MAC_FMT "\n",
3411 escape_essid(priv->essid, priv->essid_len),
3412 MAC_ARG(priv->bssid));
3413
3414 priv->status &= ~(STATUS_ASSOCIATING |
3415 STATUS_AUTH |
3416 STATUS_ASSOCIATED);
3417
3418 netif_carrier_off(priv->net_dev);
3419 netif_stop_queue(priv->net_dev);
3420 queue_work(priv->workqueue, &priv->request_scan);
3421 notify_wx_assoc_event(priv);
3422 break;
3423
3424 case CMAS_TX_AUTH_SEQ_1:
3425 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3426 "AUTH_SEQ_1\n");
3427 break;
3428 case CMAS_RX_AUTH_SEQ_2:
3429 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3430 "AUTH_SEQ_2\n");
3431 break;
3432 case CMAS_AUTH_SEQ_1_PASS:
3433 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3434 "AUTH_SEQ_1_PASS\n");
3435 break;
3436 case CMAS_AUTH_SEQ_1_FAIL:
3437 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3438 "AUTH_SEQ_1_FAIL\n");
3439 break;
3440 case CMAS_TX_AUTH_SEQ_3:
3441 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3442 "AUTH_SEQ_3\n");
3443 break;
3444 case CMAS_RX_AUTH_SEQ_4:
3445 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3446 "RX_AUTH_SEQ_4\n");
3447 break;
3448 case CMAS_AUTH_SEQ_2_PASS:
3449 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3450 "AUTH_SEQ_2_PASS\n");
3451 break;
3452 case CMAS_AUTH_SEQ_2_FAIL:
3453 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3454 "AUT_SEQ_2_FAIL\n");
3455 break;
3456 case CMAS_TX_ASSOC:
3457 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3458 "TX_ASSOC\n");
3459 break;
3460 case CMAS_RX_ASSOC_RESP:
3461 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3462 "RX_ASSOC_RESP\n");
3463 break;
3464 case CMAS_ASSOCIATED:
3465 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
3466 "ASSOCIATED\n");
3467 break;
3468 default:
3469 IPW_DEBUG_NOTIF("auth: failure - %d\n", auth->state);
3470 break;
3471 }
3472 break;
3473 }
3474
3475 case HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT: {
3476 struct notif_channel_result *x = &notif->u.channel_result;
3477
3478 if (notif->size == sizeof(*x)) {
3479 IPW_DEBUG_SCAN("Scan result for channel %d\n",
3480 x->channel_num);
3481 } else {
3482 IPW_DEBUG_SCAN("Scan result of wrong size %d "
3483 "(should be %zd)\n",
3484 notif->size, sizeof(*x));
3485 }
3486 break;
3487 }
3488
3489 case HOST_NOTIFICATION_STATUS_SCAN_COMPLETED: {
3490 struct notif_scan_complete* x = &notif->u.scan_complete;
3491 if (notif->size == sizeof(*x)) {
3492 IPW_DEBUG_SCAN("Scan completed: type %d, %d channels, "
3493 "%d status\n",
3494 x->scan_type,
3495 x->num_channels,
3496 x->status);
3497 } else {
3498 IPW_ERROR("Scan completed of wrong size %d "
3499 "(should be %zd)\n",
3500 notif->size, sizeof(*x));
3501 }
3502
3503 priv->status &= ~(STATUS_SCANNING | STATUS_SCAN_ABORTING);
3504
3505 cancel_delayed_work(&priv->scan_check);
3506
3507 if (!(priv->status & (STATUS_ASSOCIATED |
3508 STATUS_ASSOCIATING |
3509 STATUS_ROAMING |
3510 STATUS_DISASSOCIATING)))
3511 queue_work(priv->workqueue, &priv->associate);
3512 else if (priv->status & STATUS_ROAMING) {
3513 /* If a scan completed and we are in roam mode, then
3514 * the scan that completed was the one requested as a
3515 * result of entering roam... so, schedule the
3516 * roam work */
3517 queue_work(priv->workqueue, &priv->roam);
3518 } else if (priv->status & STATUS_SCAN_PENDING)
3519 queue_work(priv->workqueue, &priv->request_scan);
3520
3521 priv->ieee->scans++;
3522 break;
3523 }
3524
3525 case HOST_NOTIFICATION_STATUS_FRAG_LENGTH: {
3526 struct notif_frag_length *x = &notif->u.frag_len;
3527
3528 if (notif->size == sizeof(*x)) {
3529 IPW_ERROR("Frag length: %d\n", x->frag_length);
3530 } else {
3531 IPW_ERROR("Frag length of wrong size %d "
3532 "(should be %zd)\n",
3533 notif->size, sizeof(*x));
3534 }
3535 break;
3536 }
3537
3538 case HOST_NOTIFICATION_STATUS_LINK_DETERIORATION: {
3539 struct notif_link_deterioration *x =
3540 &notif->u.link_deterioration;
3541 if (notif->size==sizeof(*x)) {
3542 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3543 "link deterioration: '%s' " MAC_FMT " \n",
3544 escape_essid(priv->essid, priv->essid_len),
3545 MAC_ARG(priv->bssid));
3546 memcpy(&priv->last_link_deterioration, x, sizeof(*x));
3547 } else {
3548 IPW_ERROR("Link Deterioration of wrong size %d "
3549 "(should be %zd)\n",
3550 notif->size, sizeof(*x));
3551 }
3552 break;
3553 }
3554
3555 case HOST_NOTIFICATION_DINO_CONFIG_RESPONSE: {
3556 IPW_ERROR("Dino config\n");
3557 if (priv->hcmd && priv->hcmd->cmd == HOST_CMD_DINO_CONFIG) {
3558 /* TODO: Do anything special? */
3559 } else {
3560 IPW_ERROR("Unexpected DINO_CONFIG_RESPONSE\n");
3561 }
3562 break;
3563 }
3564
3565 case HOST_NOTIFICATION_STATUS_BEACON_STATE: {
3566 struct notif_beacon_state *x = &notif->u.beacon_state;
3567 if (notif->size != sizeof(*x)) {
3568 IPW_ERROR("Beacon state of wrong size %d (should "
3569 "be %zd)\n", notif->size, sizeof(*x));
3570 break;
3571 }
3572
3573 if (x->state == HOST_NOTIFICATION_STATUS_BEACON_MISSING) {
3574 if (priv->status & STATUS_SCANNING) {
3575 /* Stop scan to keep fw from getting
3576 * stuck... */
3577 queue_work(priv->workqueue,
3578 &priv->abort_scan);
3579 }
3580
3581 if (x->number > priv->missed_beacon_threshold &&
3582 priv->status & STATUS_ASSOCIATED) {
3583 IPW_DEBUG(IPW_DL_INFO | IPW_DL_NOTIF |
3584 IPW_DL_STATE,
3585 "Missed beacon: %d - disassociate\n",
3586 x->number);
3587 queue_work(priv->workqueue,
3588 &priv->disassociate);
3589 } else if (x->number > priv->roaming_threshold) {
3590 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
3591 "Missed beacon: %d - initiate "
3592 "roaming\n",
3593 x->number);
3594 queue_work(priv->workqueue,
3595 &priv->roam);
3596 } else {
3597 IPW_DEBUG_NOTIF("Missed beacon: %d\n",
3598 x->number);
3599 }
3600
3601 priv->notif_missed_beacons = x->number;
3602
3603 }
3604
3605
3606 break;
3607 }
3608
3609 case HOST_NOTIFICATION_STATUS_TGI_TX_KEY: {
3610 struct notif_tgi_tx_key *x = &notif->u.tgi_tx_key;
3611 if (notif->size==sizeof(*x)) {
3612 IPW_ERROR("TGi Tx Key: state 0x%02x sec type "
3613 "0x%02x station %d\n",
3614 x->key_state,x->security_type,
3615 x->station_index);
3616 break;
3617 }
3618
3619 IPW_ERROR("TGi Tx Key of wrong size %d (should be %zd)\n",
3620 notif->size, sizeof(*x));
3621 break;
3622 }
3623
3624 case HOST_NOTIFICATION_CALIB_KEEP_RESULTS: {
3625 struct notif_calibration *x = &notif->u.calibration;
3626
3627 if (notif->size == sizeof(*x)) {
3628 memcpy(&priv->calib, x, sizeof(*x));
3629 IPW_DEBUG_INFO("TODO: Calibration\n");
3630 break;
3631 }
3632
3633 IPW_ERROR("Calibration of wrong size %d (should be %zd)\n",
3634 notif->size, sizeof(*x));
3635 break;
3636 }
3637
3638 case HOST_NOTIFICATION_NOISE_STATS: {
3639 if (notif->size == sizeof(u32)) {
3640 priv->last_noise = (u8)(notif->u.noise.value & 0xff);
3641 average_add(&priv->average_noise, priv->last_noise);
3642 break;
3643 }
3644
3645 IPW_ERROR("Noise stat is wrong size %d (should be %zd)\n",
3646 notif->size, sizeof(u32));
3647 break;
3648 }
3649
3650 default:
3651 IPW_ERROR("Unknown notification: "
3652 "subtype=%d,flags=0x%2x,size=%d\n",
3653 notif->subtype, notif->flags, notif->size);
3654 }
3655}
3656
3657/**
3658 * Destroys all DMA structures and initialise them again
3659 *
3660 * @param priv
3661 * @return error code
3662 */
3663static int ipw_queue_reset(struct ipw_priv *priv)
3664{
3665 int rc = 0;
3666 /** @todo customize queue sizes */
3667 int nTx = 64, nTxCmd = 8;
3668 ipw_tx_queue_free(priv);
3669 /* Tx CMD queue */
3670 rc = ipw_queue_tx_init(priv, &priv->txq_cmd, nTxCmd,
3671 CX2_TX_CMD_QUEUE_READ_INDEX,
3672 CX2_TX_CMD_QUEUE_WRITE_INDEX,
3673 CX2_TX_CMD_QUEUE_BD_BASE,
3674 CX2_TX_CMD_QUEUE_BD_SIZE);
3675 if (rc) {
3676 IPW_ERROR("Tx Cmd queue init failed\n");
3677 goto error;
3678 }
3679 /* Tx queue(s) */
3680 rc = ipw_queue_tx_init(priv, &priv->txq[0], nTx,
3681 CX2_TX_QUEUE_0_READ_INDEX,
3682 CX2_TX_QUEUE_0_WRITE_INDEX,
3683 CX2_TX_QUEUE_0_BD_BASE,
3684 CX2_TX_QUEUE_0_BD_SIZE);
3685 if (rc) {
3686 IPW_ERROR("Tx 0 queue init failed\n");
3687 goto error;
3688 }
3689 rc = ipw_queue_tx_init(priv, &priv->txq[1], nTx,
3690 CX2_TX_QUEUE_1_READ_INDEX,
3691 CX2_TX_QUEUE_1_WRITE_INDEX,
3692 CX2_TX_QUEUE_1_BD_BASE,
3693 CX2_TX_QUEUE_1_BD_SIZE);
3694 if (rc) {
3695 IPW_ERROR("Tx 1 queue init failed\n");
3696 goto error;
3697 }
3698 rc = ipw_queue_tx_init(priv, &priv->txq[2], nTx,
3699 CX2_TX_QUEUE_2_READ_INDEX,
3700 CX2_TX_QUEUE_2_WRITE_INDEX,
3701 CX2_TX_QUEUE_2_BD_BASE,
3702 CX2_TX_QUEUE_2_BD_SIZE);
3703 if (rc) {
3704 IPW_ERROR("Tx 2 queue init failed\n");
3705 goto error;
3706 }
3707 rc = ipw_queue_tx_init(priv, &priv->txq[3], nTx,
3708 CX2_TX_QUEUE_3_READ_INDEX,
3709 CX2_TX_QUEUE_3_WRITE_INDEX,
3710 CX2_TX_QUEUE_3_BD_BASE,
3711 CX2_TX_QUEUE_3_BD_SIZE);
3712 if (rc) {
3713 IPW_ERROR("Tx 3 queue init failed\n");
3714 goto error;
3715 }
3716 /* statistics */
3717 priv->rx_bufs_min = 0;
3718 priv->rx_pend_max = 0;
3719 return rc;
3720
3721 error:
3722 ipw_tx_queue_free(priv);
3723 return rc;
3724}
3725
3726/**
3727 * Reclaim Tx queue entries no more used by NIC.
3728 *
3729 * When FW adwances 'R' index, all entries between old and
3730 * new 'R' index need to be reclaimed. As result, some free space
3731 * forms. If there is enough free space (> low mark), wake Tx queue.
3732 *
3733 * @note Need to protect against garbage in 'R' index
3734 * @param priv
3735 * @param txq
3736 * @param qindex
3737 * @return Number of used entries remains in the queue
3738 */
3739static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
3740 struct clx2_tx_queue *txq, int qindex)
3741{
3742 u32 hw_tail;
3743 int used;
3744 struct clx2_queue *q = &txq->q;
3745
3746 hw_tail = ipw_read32(priv, q->reg_r);
3747 if (hw_tail >= q->n_bd) {
3748 IPW_ERROR
3749 ("Read index for DMA queue (%d) is out of range [0-%d)\n",
3750 hw_tail, q->n_bd);
3751 goto done;
3752 }
3753 for (; q->last_used != hw_tail;
3754 q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) {
3755 ipw_queue_tx_free_tfd(priv, txq);
3756 priv->tx_packets++;
3757 }
3758 done:
3759 if (ipw_queue_space(q) > q->low_mark && qindex >= 0) {
3760 __maybe_wake_tx(priv);
3761 }
3762 used = q->first_empty - q->last_used;
3763 if (used < 0)
3764 used += q->n_bd;
3765
3766 return used;
3767}
3768
3769static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
3770 int len, int sync)
3771{
3772 struct clx2_tx_queue *txq = &priv->txq_cmd;
3773 struct clx2_queue *q = &txq->q;
3774 struct tfd_frame *tfd;
3775
3776 if (ipw_queue_space(q) < (sync ? 1 : 2)) {
3777 IPW_ERROR("No space for Tx\n");
3778 return -EBUSY;
3779 }
3780
3781 tfd = &txq->bd[q->first_empty];
3782 txq->txb[q->first_empty] = NULL;
3783
3784 memset(tfd, 0, sizeof(*tfd));
3785 tfd->control_flags.message_type = TX_HOST_COMMAND_TYPE;
3786 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
3787 priv->hcmd_seq++;
3788 tfd->u.cmd.index = hcmd;
3789 tfd->u.cmd.length = len;
3790 memcpy(tfd->u.cmd.payload, buf, len);
3791 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
3792 ipw_write32(priv, q->reg_w, q->first_empty);
3793 _ipw_read32(priv, 0x90);
3794
3795 return 0;
3796}
3797
3798
3799
3800/*
3801 * Rx theory of operation
3802 *
3803 * The host allocates 32 DMA target addresses and passes the host address
3804 * to the firmware at register CX2_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
3805 * 0 to 31
3806 *
3807 * Rx Queue Indexes
3808 * The host/firmware share two index registers for managing the Rx buffers.
3809 *
3810 * The READ index maps to the first position that the firmware may be writing
3811 * to -- the driver can read up to (but not including) this position and get
3812 * good data.
3813 * The READ index is managed by the firmware once the card is enabled.
3814 *
3815 * The WRITE index maps to the last position the driver has read from -- the
3816 * position preceding WRITE is the last slot the firmware can place a packet.
3817 *
3818 * The queue is empty (no good data) if WRITE = READ - 1, and is full if
3819 * WRITE = READ.
3820 *
3821 * During initialization the host sets up the READ queue position to the first
3822 * INDEX position, and WRITE to the last (READ - 1 wrapped)
3823 *
3824 * When the firmware places a packet in a buffer it will advance the READ index
3825 * and fire the RX interrupt. The driver can then query the READ index and
3826 * process as many packets as possible, moving the WRITE index forward as it
3827 * resets the Rx queue buffers with new memory.
3828 *
3829 * The management in the driver is as follows:
3830 * + A list of pre-allocated SKBs is stored in ipw->rxq->rx_free. When
3831 * ipw->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
3832 * to replensish the ipw->rxq->rx_free.
3833 * + In ipw_rx_queue_replenish (scheduled) if 'processed' != 'read' then the
3834 * ipw->rxq is replenished and the READ INDEX is updated (updating the
3835 * 'processed' and 'read' driver indexes as well)
3836 * + A received packet is processed and handed to the kernel network stack,
3837 * detached from the ipw->rxq. The driver 'processed' index is updated.
3838 * + The Host/Firmware ipw->rxq is replenished at tasklet time from the rx_free
3839 * list. If there are no allocated buffers in ipw->rxq->rx_free, the READ
3840 * INDEX is not incremented and ipw->status(RX_STALLED) is set. If there
3841 * were enough free buffers and RX_STALLED is set it is cleared.
3842 *
3843 *
3844 * Driver sequence:
3845 *
3846 * ipw_rx_queue_alloc() Allocates rx_free
3847 * ipw_rx_queue_replenish() Replenishes rx_free list from rx_used, and calls
3848 * ipw_rx_queue_restock
3849 * ipw_rx_queue_restock() Moves available buffers from rx_free into Rx
3850 * queue, updates firmware pointers, and updates
3851 * the WRITE index. If insufficient rx_free buffers
3852 * are available, schedules ipw_rx_queue_replenish
3853 *
3854 * -- enable interrupts --
3855 * ISR - ipw_rx() Detach ipw_rx_mem_buffers from pool up to the
3856 * READ INDEX, detaching the SKB from the pool.
3857 * Moves the packet buffer from queue to rx_used.
3858 * Calls ipw_rx_queue_restock to refill any empty
3859 * slots.
3860 * ...
3861 *
3862 */
3863
3864/*
3865 * If there are slots in the RX queue that need to be restocked,
3866 * and we have free pre-allocated buffers, fill the ranks as much
3867 * as we can pulling from rx_free.
3868 *
3869 * This moves the 'write' index forward to catch up with 'processed', and
3870 * also updates the memory address in the firmware to reference the new
3871 * target buffer.
3872 */
3873static void ipw_rx_queue_restock(struct ipw_priv *priv)
3874{
3875 struct ipw_rx_queue *rxq = priv->rxq;
3876 struct list_head *element;
3877 struct ipw_rx_mem_buffer *rxb;
3878 unsigned long flags;
3879 int write;
3880
3881 spin_lock_irqsave(&rxq->lock, flags);
3882 write = rxq->write;
3883 while ((rxq->write != rxq->processed) && (rxq->free_count)) {
3884 element = rxq->rx_free.next;
3885 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
3886 list_del(element);
3887
3888 ipw_write32(priv, CX2_RFDS_TABLE_LOWER + rxq->write * RFD_SIZE,
3889 rxb->dma_addr);
3890 rxq->queue[rxq->write] = rxb;
3891 rxq->write = (rxq->write + 1) % RX_QUEUE_SIZE;
3892 rxq->free_count--;
3893 }
3894 spin_unlock_irqrestore(&rxq->lock, flags);
3895
3896 /* If the pre-allocated buffer pool is dropping low, schedule to
3897 * refill it */
3898 if (rxq->free_count <= RX_LOW_WATERMARK)
3899 queue_work(priv->workqueue, &priv->rx_replenish);
3900
3901 /* If we've added more space for the firmware to place data, tell it */
3902 if (write != rxq->write)
3903 ipw_write32(priv, CX2_RX_WRITE_INDEX, rxq->write);
3904}
3905
3906/*
3907 * Move all used packet from rx_used to rx_free, allocating a new SKB for each.
3908 * Also restock the Rx queue via ipw_rx_queue_restock.
3909 *
3910 * This is called as a scheduled work item (except for during intialization)
3911 */
3912static void ipw_rx_queue_replenish(void *data)
3913{
3914 struct ipw_priv *priv = data;
3915 struct ipw_rx_queue *rxq = priv->rxq;
3916 struct list_head *element;
3917 struct ipw_rx_mem_buffer *rxb;
3918 unsigned long flags;
3919
3920 spin_lock_irqsave(&rxq->lock, flags);
3921 while (!list_empty(&rxq->rx_used)) {
3922 element = rxq->rx_used.next;
3923 rxb = list_entry(element, struct ipw_rx_mem_buffer, list);
3924 rxb->skb = alloc_skb(CX2_RX_BUF_SIZE, GFP_ATOMIC);
3925 if (!rxb->skb) {
3926 printk(KERN_CRIT "%s: Can not allocate SKB buffers.\n",
3927 priv->net_dev->name);
3928 /* We don't reschedule replenish work here -- we will
3929 * call the restock method and if it still needs
3930 * more buffers it will schedule replenish */
3931 break;
3932 }
3933 list_del(element);
3934
3935 rxb->rxb = (struct ipw_rx_buffer *)rxb->skb->data;
3936 rxb->dma_addr = pci_map_single(
3937 priv->pci_dev, rxb->skb->data, CX2_RX_BUF_SIZE,
3938 PCI_DMA_FROMDEVICE);
3939
3940 list_add_tail(&rxb->list, &rxq->rx_free);
3941 rxq->free_count++;
3942 }
3943 spin_unlock_irqrestore(&rxq->lock, flags);
3944
3945 ipw_rx_queue_restock(priv);
3946}
3947
3948/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
3949 * If an SKB has been detached, the POOL needs to have it's SKB set to NULL
3950 * This free routine walks the list of POOL entries and if SKB is set to
3951 * non NULL it is unmapped and freed
3952 */
3953static void ipw_rx_queue_free(struct ipw_priv *priv,
3954 struct ipw_rx_queue *rxq)
3955{
3956 int i;
3957
3958 if (!rxq)
3959 return;
3960
3961 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
3962 if (rxq->pool[i].skb != NULL) {
3963 pci_unmap_single(priv->pci_dev, rxq->pool[i].dma_addr,
3964 CX2_RX_BUF_SIZE,
3965 PCI_DMA_FROMDEVICE);
3966 dev_kfree_skb(rxq->pool[i].skb);
3967 }
3968 }
3969
3970 kfree(rxq);
3971}
3972
3973static struct ipw_rx_queue *ipw_rx_queue_alloc(struct ipw_priv *priv)
3974{
3975 struct ipw_rx_queue *rxq;
3976 int i;
3977
3978 rxq = (struct ipw_rx_queue *)kmalloc(sizeof(*rxq), GFP_KERNEL);
3979 memset(rxq, 0, sizeof(*rxq));
3980 spin_lock_init(&rxq->lock);
3981 INIT_LIST_HEAD(&rxq->rx_free);
3982 INIT_LIST_HEAD(&rxq->rx_used);
3983
3984 /* Fill the rx_used queue with _all_ of the Rx buffers */
3985 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
3986 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
3987
3988 /* Set us so that we have processed and used all buffers, but have
3989 * not restocked the Rx queue with fresh buffers */
3990 rxq->read = rxq->write = 0;
3991 rxq->processed = RX_QUEUE_SIZE - 1;
3992 rxq->free_count = 0;
3993
3994 return rxq;
3995}
3996
3997static int ipw_is_rate_in_mask(struct ipw_priv *priv, int ieee_mode, u8 rate)
3998{
3999 rate &= ~IEEE80211_BASIC_RATE_MASK;
4000 if (ieee_mode == IEEE_A) {
4001 switch (rate) {
4002 case IEEE80211_OFDM_RATE_6MB:
4003 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ?
4004 1 : 0;
4005 case IEEE80211_OFDM_RATE_9MB:
4006 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ?
4007 1 : 0;
4008 case IEEE80211_OFDM_RATE_12MB:
4009 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ?
4010 1 : 0;
4011 case IEEE80211_OFDM_RATE_18MB:
4012 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ?
4013 1 : 0;
4014 case IEEE80211_OFDM_RATE_24MB:
4015 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ?
4016 1 : 0;
4017 case IEEE80211_OFDM_RATE_36MB:
4018 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ?
4019 1 : 0;
4020 case IEEE80211_OFDM_RATE_48MB:
4021 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ?
4022 1 : 0;
4023 case IEEE80211_OFDM_RATE_54MB:
4024 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ?
4025 1 : 0;
4026 default:
4027 return 0;
4028 }
4029 }
4030
4031 /* B and G mixed */
4032 switch (rate) {
4033 case IEEE80211_CCK_RATE_1MB:
4034 return priv->rates_mask & IEEE80211_CCK_RATE_1MB_MASK ? 1 : 0;
4035 case IEEE80211_CCK_RATE_2MB:
4036 return priv->rates_mask & IEEE80211_CCK_RATE_2MB_MASK ? 1 : 0;
4037 case IEEE80211_CCK_RATE_5MB:
4038 return priv->rates_mask & IEEE80211_CCK_RATE_5MB_MASK ? 1 : 0;
4039 case IEEE80211_CCK_RATE_11MB:
4040 return priv->rates_mask & IEEE80211_CCK_RATE_11MB_MASK ? 1 : 0;
4041 }
4042
4043 /* If we are limited to B modulations, bail at this point */
4044 if (ieee_mode == IEEE_B)
4045 return 0;
4046
4047 /* G */
4048 switch (rate) {
4049 case IEEE80211_OFDM_RATE_6MB:
4050 return priv->rates_mask & IEEE80211_OFDM_RATE_6MB_MASK ? 1 : 0;
4051 case IEEE80211_OFDM_RATE_9MB:
4052 return priv->rates_mask & IEEE80211_OFDM_RATE_9MB_MASK ? 1 : 0;
4053 case IEEE80211_OFDM_RATE_12MB:
4054 return priv->rates_mask & IEEE80211_OFDM_RATE_12MB_MASK ? 1 : 0;
4055 case IEEE80211_OFDM_RATE_18MB:
4056 return priv->rates_mask & IEEE80211_OFDM_RATE_18MB_MASK ? 1 : 0;
4057 case IEEE80211_OFDM_RATE_24MB:
4058 return priv->rates_mask & IEEE80211_OFDM_RATE_24MB_MASK ? 1 : 0;
4059 case IEEE80211_OFDM_RATE_36MB:
4060 return priv->rates_mask & IEEE80211_OFDM_RATE_36MB_MASK ? 1 : 0;
4061 case IEEE80211_OFDM_RATE_48MB:
4062 return priv->rates_mask & IEEE80211_OFDM_RATE_48MB_MASK ? 1 : 0;
4063 case IEEE80211_OFDM_RATE_54MB:
4064 return priv->rates_mask & IEEE80211_OFDM_RATE_54MB_MASK ? 1 : 0;
4065 }
4066
4067 return 0;
4068}
4069
4070static int ipw_compatible_rates(struct ipw_priv *priv,
4071 const struct ieee80211_network *network,
4072 struct ipw_supported_rates *rates)
4073{
4074 int num_rates, i;
4075
4076 memset(rates, 0, sizeof(*rates));
4077 num_rates = min(network->rates_len, (u8)IPW_MAX_RATES);
4078 rates->num_rates = 0;
4079 for (i = 0; i < num_rates; i++) {
4080 if (!ipw_is_rate_in_mask(priv, network->mode, network->rates[i])) {
4081 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
4082 network->rates[i], priv->rates_mask);
4083 continue;
4084 }
4085
4086 rates->supported_rates[rates->num_rates++] = network->rates[i];
4087 }
4088
4089 num_rates = min(network->rates_ex_len, (u8)(IPW_MAX_RATES - num_rates));
4090 for (i = 0; i < num_rates; i++) {
4091 if (!ipw_is_rate_in_mask(priv, network->mode, network->rates_ex[i])) {
4092 IPW_DEBUG_SCAN("Rate %02X masked : 0x%08X\n",
4093 network->rates_ex[i], priv->rates_mask);
4094 continue;
4095 }
4096
4097 rates->supported_rates[rates->num_rates++] = network->rates_ex[i];
4098 }
4099
4100 return rates->num_rates;
4101}
4102
4103static inline void ipw_copy_rates(struct ipw_supported_rates *dest,
4104 const struct ipw_supported_rates *src)
4105{
4106 u8 i;
4107 for (i = 0; i < src->num_rates; i++)
4108 dest->supported_rates[i] = src->supported_rates[i];
4109 dest->num_rates = src->num_rates;
4110}
4111
4112/* TODO: Look at sniffed packets in the air to determine if the basic rate
4113 * mask should ever be used -- right now all callers to add the scan rates are
4114 * set with the modulation = CCK, so BASIC_RATE_MASK is never set... */
4115static void ipw_add_cck_scan_rates(struct ipw_supported_rates *rates,
4116 u8 modulation, u32 rate_mask)
4117{
4118 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
4119 IEEE80211_BASIC_RATE_MASK : 0;
4120
4121 if (rate_mask & IEEE80211_CCK_RATE_1MB_MASK)
4122 rates->supported_rates[rates->num_rates++] =
4123 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
4124
4125 if (rate_mask & IEEE80211_CCK_RATE_2MB_MASK)
4126 rates->supported_rates[rates->num_rates++] =
4127 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
4128
4129 if (rate_mask & IEEE80211_CCK_RATE_5MB_MASK)
4130 rates->supported_rates[rates->num_rates++] = basic_mask |
4131 IEEE80211_CCK_RATE_5MB;
4132
4133 if (rate_mask & IEEE80211_CCK_RATE_11MB_MASK)
4134 rates->supported_rates[rates->num_rates++] = basic_mask |
4135 IEEE80211_CCK_RATE_11MB;
4136}
4137
4138static void ipw_add_ofdm_scan_rates(struct ipw_supported_rates *rates,
4139 u8 modulation, u32 rate_mask)
4140{
4141 u8 basic_mask = (IEEE80211_OFDM_MODULATION == modulation) ?
4142 IEEE80211_BASIC_RATE_MASK : 0;
4143
4144 if (rate_mask & IEEE80211_OFDM_RATE_6MB_MASK)
4145 rates->supported_rates[rates->num_rates++] = basic_mask |
4146 IEEE80211_OFDM_RATE_6MB;
4147
4148 if (rate_mask & IEEE80211_OFDM_RATE_9MB_MASK)
4149 rates->supported_rates[rates->num_rates++] =
4150 IEEE80211_OFDM_RATE_9MB;
4151
4152 if (rate_mask & IEEE80211_OFDM_RATE_12MB_MASK)
4153 rates->supported_rates[rates->num_rates++] = basic_mask |
4154 IEEE80211_OFDM_RATE_12MB;
4155
4156 if (rate_mask & IEEE80211_OFDM_RATE_18MB_MASK)
4157 rates->supported_rates[rates->num_rates++] =
4158 IEEE80211_OFDM_RATE_18MB;
4159
4160 if (rate_mask & IEEE80211_OFDM_RATE_24MB_MASK)
4161 rates->supported_rates[rates->num_rates++] = basic_mask |
4162 IEEE80211_OFDM_RATE_24MB;
4163
4164 if (rate_mask & IEEE80211_OFDM_RATE_36MB_MASK)
4165 rates->supported_rates[rates->num_rates++] =
4166 IEEE80211_OFDM_RATE_36MB;
4167
4168 if (rate_mask & IEEE80211_OFDM_RATE_48MB_MASK)
4169 rates->supported_rates[rates->num_rates++] =
4170 IEEE80211_OFDM_RATE_48MB;
4171
4172 if (rate_mask & IEEE80211_OFDM_RATE_54MB_MASK)
4173 rates->supported_rates[rates->num_rates++] =
4174 IEEE80211_OFDM_RATE_54MB;
4175}
4176
4177struct ipw_network_match {
4178 struct ieee80211_network *network;
4179 struct ipw_supported_rates rates;
4180};
4181
4182static int ipw_best_network(
4183 struct ipw_priv *priv,
4184 struct ipw_network_match *match,
4185 struct ieee80211_network *network,
4186 int roaming)
4187{
4188 struct ipw_supported_rates rates;
4189
4190 /* Verify that this network's capability is compatible with the
4191 * current mode (AdHoc or Infrastructure) */
4192 if ((priv->ieee->iw_mode == IW_MODE_INFRA &&
4193 !(network->capability & WLAN_CAPABILITY_BSS)) ||
4194 (priv->ieee->iw_mode == IW_MODE_ADHOC &&
4195 !(network->capability & WLAN_CAPABILITY_IBSS))) {
4196 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded due to "
4197 "capability mismatch.\n",
4198 escape_essid(network->ssid, network->ssid_len),
4199 MAC_ARG(network->bssid));
4200 return 0;
4201 }
4202
4203 /* If we do not have an ESSID for this AP, we can not associate with
4204 * it */
4205 if (network->flags & NETWORK_EMPTY_ESSID) {
4206 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4207 "because of hidden ESSID.\n",
4208 escape_essid(network->ssid, network->ssid_len),
4209 MAC_ARG(network->bssid));
4210 return 0;
4211 }
4212
4213 if (unlikely(roaming)) {
4214 /* If we are roaming, then ensure check if this is a valid
4215 * network to try and roam to */
4216 if ((network->ssid_len != match->network->ssid_len) ||
4217 memcmp(network->ssid, match->network->ssid,
4218 network->ssid_len)) {
4219 IPW_DEBUG_ASSOC("Netowrk '%s (" MAC_FMT ")' excluded "
4220 "because of non-network ESSID.\n",
4221 escape_essid(network->ssid,
4222 network->ssid_len),
4223 MAC_ARG(network->bssid));
4224 return 0;
4225 }
4226 } else {
4227 /* If an ESSID has been configured then compare the broadcast
4228 * ESSID to ours */
4229 if ((priv->config & CFG_STATIC_ESSID) &&
4230 ((network->ssid_len != priv->essid_len) ||
4231 memcmp(network->ssid, priv->essid,
4232 min(network->ssid_len, priv->essid_len)))) {
4233 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
4234 strncpy(escaped, escape_essid(
4235 network->ssid, network->ssid_len),
4236 sizeof(escaped));
4237 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4238 "because of ESSID mismatch: '%s'.\n",
4239 escaped, MAC_ARG(network->bssid),
4240 escape_essid(priv->essid, priv->essid_len));
4241 return 0;
4242 }
4243 }
4244
4245 /* If the old network rate is better than this one, don't bother
4246 * testing everything else. */
4247 if (match->network && match->network->stats.rssi >
4248 network->stats.rssi) {
4249 char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
4250 strncpy(escaped,
4251 escape_essid(network->ssid, network->ssid_len),
4252 sizeof(escaped));
4253 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded because "
4254 "'%s (" MAC_FMT ")' has a stronger signal.\n",
4255 escaped, MAC_ARG(network->bssid),
4256 escape_essid(match->network->ssid,
4257 match->network->ssid_len),
4258 MAC_ARG(match->network->bssid));
4259 return 0;
4260 }
4261
4262 /* If this network has already had an association attempt within the
4263 * last 3 seconds, do not try and associate again... */
4264 if (network->last_associate &&
4265 time_after(network->last_associate + (HZ * 5UL), jiffies)) {
4266 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4267 "because of storming (%lu since last "
4268 "assoc attempt).\n",
4269 escape_essid(network->ssid, network->ssid_len),
4270 MAC_ARG(network->bssid),
4271 (jiffies - network->last_associate) / HZ);
4272 return 0;
4273 }
4274
4275 /* Now go through and see if the requested network is valid... */
4276 if (priv->ieee->scan_age != 0 &&
4277 jiffies - network->last_scanned > priv->ieee->scan_age) {
4278 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4279 "because of age: %lums.\n",
4280 escape_essid(network->ssid, network->ssid_len),
4281 MAC_ARG(network->bssid),
4282 (jiffies - network->last_scanned) / (HZ / 100));
4283 return 0;
4284 }
4285
4286 if ((priv->config & CFG_STATIC_CHANNEL) &&
4287 (network->channel != priv->channel)) {
4288 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4289 "because of channel mismatch: %d != %d.\n",
4290 escape_essid(network->ssid, network->ssid_len),
4291 MAC_ARG(network->bssid),
4292 network->channel, priv->channel);
4293 return 0;
4294 }
4295
4296 /* Verify privacy compatability */
4297 if (((priv->capability & CAP_PRIVACY_ON) ? 1 : 0) !=
4298 ((network->capability & WLAN_CAPABILITY_PRIVACY) ? 1 : 0)) {
4299 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4300 "because of privacy mismatch: %s != %s.\n",
4301 escape_essid(network->ssid, network->ssid_len),
4302 MAC_ARG(network->bssid),
4303 priv->capability & CAP_PRIVACY_ON ? "on" :
4304 "off",
4305 network->capability &
4306 WLAN_CAPABILITY_PRIVACY ?"on" : "off");
4307 return 0;
4308 }
4309
4310 if ((priv->config & CFG_STATIC_BSSID) &&
4311 memcmp(network->bssid, priv->bssid, ETH_ALEN)) {
4312 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4313 "because of BSSID mismatch: " MAC_FMT ".\n",
4314 escape_essid(network->ssid, network->ssid_len),
4315 MAC_ARG(network->bssid),
4316 MAC_ARG(priv->bssid));
4317 return 0;
4318 }
4319
4320 /* Filter out any incompatible freq / mode combinations */
4321 if (!ieee80211_is_valid_mode(priv->ieee, network->mode)) {
4322 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4323 "because of invalid frequency/mode "
4324 "combination.\n",
4325 escape_essid(network->ssid, network->ssid_len),
4326 MAC_ARG(network->bssid));
4327 return 0;
4328 }
4329
4330 ipw_compatible_rates(priv, network, &rates);
4331 if (rates.num_rates == 0) {
4332 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' excluded "
4333 "because of no compatible rates.\n",
4334 escape_essid(network->ssid, network->ssid_len),
4335 MAC_ARG(network->bssid));
4336 return 0;
4337 }
4338
4339 /* TODO: Perform any further minimal comparititive tests. We do not
4340 * want to put too much policy logic here; intelligent scan selection
4341 * should occur within a generic IEEE 802.11 user space tool. */
4342
4343 /* Set up 'new' AP to this network */
4344 ipw_copy_rates(&match->rates, &rates);
4345 match->network = network;
4346
4347 IPW_DEBUG_ASSOC("Network '%s (" MAC_FMT ")' is a viable match.\n",
4348 escape_essid(network->ssid, network->ssid_len),
4349 MAC_ARG(network->bssid));
4350
4351 return 1;
4352}
4353
4354
4355static void ipw_adhoc_create(struct ipw_priv *priv,
4356 struct ieee80211_network *network)
4357{
4358 /*
4359 * For the purposes of scanning, we can set our wireless mode
4360 * to trigger scans across combinations of bands, but when it
4361 * comes to creating a new ad-hoc network, we have tell the FW
4362 * exactly which band to use.
4363 *
4364 * We also have the possibility of an invalid channel for the
4365 * chossen band. Attempting to create a new ad-hoc network
4366 * with an invalid channel for wireless mode will trigger a
4367 * FW fatal error.
4368 */
4369 network->mode = is_valid_channel(priv->ieee->mode, priv->channel);
4370 if (network->mode) {
4371 network->channel = priv->channel;
4372 } else {
4373 IPW_WARNING("Overriding invalid channel\n");
4374 if (priv->ieee->mode & IEEE_A) {
4375 network->mode = IEEE_A;
4376 priv->channel = band_a_active_channel[0];
4377 } else if (priv->ieee->mode & IEEE_G) {
4378 network->mode = IEEE_G;
4379 priv->channel = band_b_active_channel[0];
4380 } else {
4381 network->mode = IEEE_B;
4382 priv->channel = band_b_active_channel[0];
4383 }
4384 }
4385
4386 network->channel = priv->channel;
4387 priv->config |= CFG_ADHOC_PERSIST;
4388 ipw_create_bssid(priv, network->bssid);
4389 network->ssid_len = priv->essid_len;
4390 memcpy(network->ssid, priv->essid, priv->essid_len);
4391 memset(&network->stats, 0, sizeof(network->stats));
4392 network->capability = WLAN_CAPABILITY_IBSS;
4393 if (priv->capability & CAP_PRIVACY_ON)
4394 network->capability |= WLAN_CAPABILITY_PRIVACY;
4395 network->rates_len = min(priv->rates.num_rates, MAX_RATES_LENGTH);
4396 memcpy(network->rates, priv->rates.supported_rates,
4397 network->rates_len);
4398 network->rates_ex_len = priv->rates.num_rates - network->rates_len;
4399 memcpy(network->rates_ex,
4400 &priv->rates.supported_rates[network->rates_len],
4401 network->rates_ex_len);
4402 network->last_scanned = 0;
4403 network->flags = 0;
4404 network->last_associate = 0;
4405 network->time_stamp[0] = 0;
4406 network->time_stamp[1] = 0;
4407 network->beacon_interval = 100; /* Default */
4408 network->listen_interval = 10; /* Default */
4409 network->atim_window = 0; /* Default */
4410#ifdef CONFIG_IEEE80211_WPA
4411 network->wpa_ie_len = 0;
4412 network->rsn_ie_len = 0;
4413#endif /* CONFIG_IEEE80211_WPA */
4414}
4415
4416static void ipw_send_wep_keys(struct ipw_priv *priv)
4417{
4418 struct ipw_wep_key *key;
4419 int i;
4420 struct host_cmd cmd = {
4421 .cmd = IPW_CMD_WEP_KEY,
4422 .len = sizeof(*key)
4423 };
4424
4425 key = (struct ipw_wep_key *)&cmd.param;
4426 key->cmd_id = DINO_CMD_WEP_KEY;
4427 key->seq_num = 0;
4428
4429 for (i = 0; i < 4; i++) {
4430 key->key_index = i;
4431 if (!(priv->sec.flags & (1 << i))) {
4432 key->key_size = 0;
4433 } else {
4434 key->key_size = priv->sec.key_sizes[i];
4435 memcpy(key->key, priv->sec.keys[i], key->key_size);
4436 }
4437
4438 if (ipw_send_cmd(priv, &cmd)) {
4439 IPW_ERROR("failed to send WEP_KEY command\n");
4440 return;
4441 }
4442 }
4443}
4444
4445static void ipw_adhoc_check(void *data)
4446{
4447 struct ipw_priv *priv = data;
4448
4449 if (priv->missed_adhoc_beacons++ > priv->missed_beacon_threshold &&
4450 !(priv->config & CFG_ADHOC_PERSIST)) {
4451 IPW_DEBUG_SCAN("Disassociating due to missed beacons\n");
4452 ipw_remove_current_network(priv);
4453 ipw_disassociate(priv);
4454 return;
4455 }
4456
4457 queue_delayed_work(priv->workqueue, &priv->adhoc_check,
4458 priv->assoc_request.beacon_interval);
4459}
4460
4461#ifdef CONFIG_IPW_DEBUG
4462static void ipw_debug_config(struct ipw_priv *priv)
4463{
4464 IPW_DEBUG_INFO("Scan completed, no valid APs matched "
4465 "[CFG 0x%08X]\n", priv->config);
4466 if (priv->config & CFG_STATIC_CHANNEL)
4467 IPW_DEBUG_INFO("Channel locked to %d\n",
4468 priv->channel);
4469 else
4470 IPW_DEBUG_INFO("Channel unlocked.\n");
4471 if (priv->config & CFG_STATIC_ESSID)
4472 IPW_DEBUG_INFO("ESSID locked to '%s'\n",
4473 escape_essid(priv->essid,
4474 priv->essid_len));
4475 else
4476 IPW_DEBUG_INFO("ESSID unlocked.\n");
4477 if (priv->config & CFG_STATIC_BSSID)
4478 IPW_DEBUG_INFO("BSSID locked to %d\n", priv->channel);
4479 else
4480 IPW_DEBUG_INFO("BSSID unlocked.\n");
4481 if (priv->capability & CAP_PRIVACY_ON)
4482 IPW_DEBUG_INFO("PRIVACY on\n");
4483 else
4484 IPW_DEBUG_INFO("PRIVACY off\n");
4485 IPW_DEBUG_INFO("RATE MASK: 0x%08X\n", priv->rates_mask);
4486}
4487#else
4488#define ipw_debug_config(x) do {} while (0);
4489#endif
4490
4491static inline void ipw_set_fixed_rate(struct ipw_priv *priv,
4492 struct ieee80211_network *network)
4493{
4494 /* TODO: Verify that this works... */
4495 struct ipw_fixed_rate fr = {
4496 .tx_rates = priv->rates_mask
4497 };
4498 u32 reg;
4499 u16 mask = 0;
4500
4501 /* Identify 'current FW band' and match it with the fixed
4502 * Tx rates */
4503
4504 switch (priv->ieee->freq_band) {
4505 case IEEE80211_52GHZ_BAND: /* A only */
4506 /* IEEE_A */
4507 if (priv->rates_mask & ~IEEE80211_OFDM_RATES_MASK) {
4508 /* Invalid fixed rate mask */
4509 fr.tx_rates = 0;
4510 break;
4511 }
4512
4513 fr.tx_rates >>= IEEE80211_OFDM_SHIFT_MASK_A;
4514 break;
4515
4516 default: /* 2.4Ghz or Mixed */
4517 /* IEEE_B */
4518 if (network->mode == IEEE_B) {
4519 if (fr.tx_rates & ~IEEE80211_CCK_RATES_MASK) {
4520 /* Invalid fixed rate mask */
4521 fr.tx_rates = 0;
4522 }
4523 break;
4524 }
4525
4526 /* IEEE_G */
4527 if (fr.tx_rates & ~(IEEE80211_CCK_RATES_MASK |
4528 IEEE80211_OFDM_RATES_MASK)) {
4529 /* Invalid fixed rate mask */
4530 fr.tx_rates = 0;
4531 break;
4532 }
4533
4534 if (IEEE80211_OFDM_RATE_6MB_MASK & fr.tx_rates) {
4535 mask |= (IEEE80211_OFDM_RATE_6MB_MASK >> 1);
4536 fr.tx_rates &= ~IEEE80211_OFDM_RATE_6MB_MASK;
4537 }
4538
4539 if (IEEE80211_OFDM_RATE_9MB_MASK & fr.tx_rates) {
4540 mask |= (IEEE80211_OFDM_RATE_9MB_MASK >> 1);
4541 fr.tx_rates &= ~IEEE80211_OFDM_RATE_9MB_MASK;
4542 }
4543
4544 if (IEEE80211_OFDM_RATE_12MB_MASK & fr.tx_rates) {
4545 mask |= (IEEE80211_OFDM_RATE_12MB_MASK >> 1);
4546 fr.tx_rates &= ~IEEE80211_OFDM_RATE_12MB_MASK;
4547 }
4548
4549 fr.tx_rates |= mask;
4550 break;
4551 }
4552
4553 reg = ipw_read32(priv, IPW_MEM_FIXED_OVERRIDE);
4554 ipw_write_reg32(priv, reg, *(u32*)&fr);
4555}
4556
4557static int ipw_associate_network(struct ipw_priv *priv,
4558 struct ieee80211_network *network,
4559 struct ipw_supported_rates *rates,
4560 int roaming)
4561{
4562 int err;
4563
4564 if (priv->config & CFG_FIXED_RATE)
4565 ipw_set_fixed_rate(priv, network);
4566
4567 if (!(priv->config & CFG_STATIC_ESSID)) {
4568 priv->essid_len = min(network->ssid_len,
4569 (u8)IW_ESSID_MAX_SIZE);
4570 memcpy(priv->essid, network->ssid, priv->essid_len);
4571 }
4572
4573 network->last_associate = jiffies;
4574
4575 memset(&priv->assoc_request, 0, sizeof(priv->assoc_request));
4576 priv->assoc_request.channel = network->channel;
4577 if ((priv->capability & CAP_PRIVACY_ON) &&
4578 (priv->capability & CAP_SHARED_KEY)) {
4579 priv->assoc_request.auth_type = AUTH_SHARED_KEY;
4580 priv->assoc_request.auth_key = priv->sec.active_key;
4581 } else {
4582 priv->assoc_request.auth_type = AUTH_OPEN;
4583 priv->assoc_request.auth_key = 0;
4584 }
4585
4586 if (priv->capability & CAP_PRIVACY_ON)
4587 ipw_send_wep_keys(priv);
4588
4589 /*
4590 * It is valid for our ieee device to support multiple modes, but
4591 * when it comes to associating to a given network we have to choose
4592 * just one mode.
4593 */
4594 if (network->mode & priv->ieee->mode & IEEE_A)
4595 priv->assoc_request.ieee_mode = IPW_A_MODE;
4596 else if (network->mode & priv->ieee->mode & IEEE_G)
4597 priv->assoc_request.ieee_mode = IPW_G_MODE;
4598 else if (network->mode & priv->ieee->mode & IEEE_B)
4599 priv->assoc_request.ieee_mode = IPW_B_MODE;
4600
4601 IPW_DEBUG_ASSOC("%sssocation attempt: '%s', channel %d, "
4602 "802.11%c [%d], enc=%s%s%s%c%c\n",
4603 roaming ? "Rea" : "A",
4604 escape_essid(priv->essid, priv->essid_len),
4605 network->channel,
4606 ipw_modes[priv->assoc_request.ieee_mode],
4607 rates->num_rates,
4608 priv->capability & CAP_PRIVACY_ON ? "on " : "off",
4609 priv->capability & CAP_PRIVACY_ON ?
4610 (priv->capability & CAP_SHARED_KEY ? "(shared)" :
4611 "(open)") : "",
4612 priv->capability & CAP_PRIVACY_ON ? " key=" : "",
4613 priv->capability & CAP_PRIVACY_ON ?
4614 '1' + priv->sec.active_key : '.',
4615 priv->capability & CAP_PRIVACY_ON ?
4616 '.' : ' ');
4617
4618 priv->assoc_request.beacon_interval = network->beacon_interval;
4619 if ((priv->ieee->iw_mode == IW_MODE_ADHOC) &&
4620 (network->time_stamp[0] == 0) &&
4621 (network->time_stamp[1] == 0)) {
4622 priv->assoc_request.assoc_type = HC_IBSS_START;
4623 priv->assoc_request.assoc_tsf_msw = 0;
4624 priv->assoc_request.assoc_tsf_lsw = 0;
4625 } else {
4626 if (unlikely(roaming))
4627 priv->assoc_request.assoc_type = HC_REASSOCIATE;
4628 else
4629 priv->assoc_request.assoc_type = HC_ASSOCIATE;
4630 priv->assoc_request.assoc_tsf_msw = network->time_stamp[1];
4631 priv->assoc_request.assoc_tsf_lsw = network->time_stamp[0];
4632 }
4633
4634 memcpy(&priv->assoc_request.bssid, network->bssid, ETH_ALEN);
4635
4636 if (priv->ieee->iw_mode == IW_MODE_ADHOC) {
4637 memset(&priv->assoc_request.dest, 0xFF, ETH_ALEN);
4638 priv->assoc_request.atim_window = network->atim_window;
4639 } else {
4640 memcpy(&priv->assoc_request.dest, network->bssid,
4641 ETH_ALEN);
4642 priv->assoc_request.atim_window = 0;
4643 }
4644
4645 priv->assoc_request.capability = network->capability;
4646 priv->assoc_request.listen_interval = network->listen_interval;
4647
4648 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
4649 if (err) {
4650 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
4651 return err;
4652 }
4653
4654 rates->ieee_mode = priv->assoc_request.ieee_mode;
4655 rates->purpose = IPW_RATE_CONNECT;
4656 ipw_send_supported_rates(priv, rates);
4657
4658 if (priv->assoc_request.ieee_mode == IPW_G_MODE)
4659 priv->sys_config.dot11g_auto_detection = 1;
4660 else
4661 priv->sys_config.dot11g_auto_detection = 0;
4662 err = ipw_send_system_config(priv, &priv->sys_config);
4663 if (err) {
4664 IPW_DEBUG_HC("Attempt to send sys config command failed.\n");
4665 return err;
4666 }
4667
4668 IPW_DEBUG_ASSOC("Association sensitivity: %d\n", network->stats.rssi);
4669 err = ipw_set_sensitivity(priv, network->stats.rssi);
4670 if (err) {
4671 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
4672 return err;
4673 }
4674
4675 /*
4676 * If preemption is enabled, it is possible for the association
4677 * to complete before we return from ipw_send_associate. Therefore
4678 * we have to be sure and update our priviate data first.
4679 */
4680 priv->channel = network->channel;
4681 memcpy(priv->bssid, network->bssid, ETH_ALEN);
4682 priv->status |= STATUS_ASSOCIATING;
4683 priv->status &= ~STATUS_SECURITY_UPDATED;
4684
4685 priv->assoc_network = network;
4686
4687 err = ipw_send_associate(priv, &priv->assoc_request);
4688 if (err) {
4689 IPW_DEBUG_HC("Attempt to send associate command failed.\n");
4690 return err;
4691 }
4692
4693 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' " MAC_FMT " \n",
4694 escape_essid(priv->essid, priv->essid_len),
4695 MAC_ARG(priv->bssid));
4696
4697 return 0;
4698}
4699
4700static void ipw_roam(void *data)
4701{
4702 struct ipw_priv *priv = data;
4703 struct ieee80211_network *network = NULL;
4704 struct ipw_network_match match = {
4705 .network = priv->assoc_network
4706 };
4707
4708 /* The roaming process is as follows:
4709 *
4710 * 1. Missed beacon threshold triggers the roaming process by
4711 * setting the status ROAM bit and requesting a scan.
4712 * 2. When the scan completes, it schedules the ROAM work
4713 * 3. The ROAM work looks at all of the known networks for one that
4714 * is a better network than the currently associated. If none
4715 * found, the ROAM process is over (ROAM bit cleared)
4716 * 4. If a better network is found, a disassociation request is
4717 * sent.
4718 * 5. When the disassociation completes, the roam work is again
4719 * scheduled. The second time through, the driver is no longer
4720 * associated, and the newly selected network is sent an
4721 * association request.
4722 * 6. At this point ,the roaming process is complete and the ROAM
4723 * status bit is cleared.
4724 */
4725
4726 /* If we are no longer associated, and the roaming bit is no longer
4727 * set, then we are not actively roaming, so just return */
4728 if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ROAMING)))
4729 return;
4730
4731 if (priv->status & STATUS_ASSOCIATED) {
4732 /* First pass through ROAM process -- look for a better
4733 * network */
4734 u8 rssi = priv->assoc_network->stats.rssi;
4735 priv->assoc_network->stats.rssi = -128;
4736 list_for_each_entry(network, &priv->ieee->network_list, list) {
4737 if (network != priv->assoc_network)
4738 ipw_best_network(priv, &match, network, 1);
4739 }
4740 priv->assoc_network->stats.rssi = rssi;
4741
4742 if (match.network == priv->assoc_network) {
4743 IPW_DEBUG_ASSOC("No better APs in this network to "
4744 "roam to.\n");
4745 priv->status &= ~STATUS_ROAMING;
4746 ipw_debug_config(priv);
4747 return;
4748 }
4749
4750 ipw_send_disassociate(priv, 1);
4751 priv->assoc_network = match.network;
4752
4753 return;
4754 }
4755
4756 /* Second pass through ROAM process -- request association */
4757 ipw_compatible_rates(priv, priv->assoc_network, &match.rates);
4758 ipw_associate_network(priv, priv->assoc_network, &match.rates, 1);
4759 priv->status &= ~STATUS_ROAMING;
4760}
4761
4762static void ipw_associate(void *data)
4763{
4764 struct ipw_priv *priv = data;
4765
4766 struct ieee80211_network *network = NULL;
4767 struct ipw_network_match match = {
4768 .network = NULL
4769 };
4770 struct ipw_supported_rates *rates;
4771 struct list_head *element;
4772
4773 if (!(priv->config & CFG_ASSOCIATE) &&
4774 !(priv->config & (CFG_STATIC_ESSID |
4775 CFG_STATIC_CHANNEL |
4776 CFG_STATIC_BSSID))) {
4777 IPW_DEBUG_ASSOC("Not attempting association (associate=0)\n");
4778 return;
4779 }
4780
4781 list_for_each_entry(network, &priv->ieee->network_list, list)
4782 ipw_best_network(priv, &match, network, 0);
4783
4784 network = match.network;
4785 rates = &match.rates;
4786
4787 if (network == NULL &&
4788 priv->ieee->iw_mode == IW_MODE_ADHOC &&
4789 priv->config & CFG_ADHOC_CREATE &&
4790 priv->config & CFG_STATIC_ESSID &&
4791 !list_empty(&priv->ieee->network_free_list)) {
4792 element = priv->ieee->network_free_list.next;
4793 network = list_entry(element, struct ieee80211_network,
4794 list);
4795 ipw_adhoc_create(priv, network);
4796 rates = &priv->rates;
4797 list_del(element);
4798 list_add_tail(&network->list, &priv->ieee->network_list);
4799 }
4800
4801 /* If we reached the end of the list, then we don't have any valid
4802 * matching APs */
4803 if (!network) {
4804 ipw_debug_config(priv);
4805
4806 queue_delayed_work(priv->workqueue, &priv->request_scan,
4807 SCAN_INTERVAL);
4808
4809 return;
4810 }
4811
4812 ipw_associate_network(priv, network, rates, 0);
4813}
4814
4815static inline void ipw_handle_data_packet(struct ipw_priv *priv,
4816 struct ipw_rx_mem_buffer *rxb,
4817 struct ieee80211_rx_stats *stats)
4818{
4819 struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
4820
4821 /* We received data from the HW, so stop the watchdog */
4822 priv->net_dev->trans_start = jiffies;
4823
4824 /* We only process data packets if the
4825 * interface is open */
4826 if (unlikely((pkt->u.frame.length + IPW_RX_FRAME_SIZE) >
4827 skb_tailroom(rxb->skb))) {
4828 priv->ieee->stats.rx_errors++;
4829 priv->wstats.discard.misc++;
4830 IPW_DEBUG_DROP("Corruption detected! Oh no!\n");
4831 return;
4832 } else if (unlikely(!netif_running(priv->net_dev))) {
4833 priv->ieee->stats.rx_dropped++;
4834 priv->wstats.discard.misc++;
4835 IPW_DEBUG_DROP("Dropping packet while interface is not up.\n");
4836 return;
4837 }
4838
4839 /* Advance skb->data to the start of the actual payload */
4840 skb_reserve(rxb->skb, offsetof(struct ipw_rx_packet, u.frame.data));
4841
4842 /* Set the size of the skb to the size of the frame */
4843 skb_put(rxb->skb, pkt->u.frame.length);
4844
4845 IPW_DEBUG_RX("Rx packet of %d bytes.\n", rxb->skb->len);
4846
4847 if (!ieee80211_rx(priv->ieee, rxb->skb, stats))
4848 priv->ieee->stats.rx_errors++;
4849 else /* ieee80211_rx succeeded, so it now owns the SKB */
4850 rxb->skb = NULL;
4851}
4852
4853
4854/*
4855 * Main entry function for recieving a packet with 80211 headers. This
4856 * should be called when ever the FW has notified us that there is a new
4857 * skb in the recieve queue.
4858 */
4859static void ipw_rx(struct ipw_priv *priv)
4860{
4861 struct ipw_rx_mem_buffer *rxb;
4862 struct ipw_rx_packet *pkt;
4863 struct ieee80211_hdr *header;
4864 u32 r, w, i;
4865 u8 network_packet;
4866
4867 r = ipw_read32(priv, CX2_RX_READ_INDEX);
4868 w = ipw_read32(priv, CX2_RX_WRITE_INDEX);
4869 i = (priv->rxq->processed + 1) % RX_QUEUE_SIZE;
4870
4871 while (i != r) {
4872 rxb = priv->rxq->queue[i];
4873#ifdef CONFIG_IPW_DEBUG
4874 if (unlikely(rxb == NULL)) {
4875 printk(KERN_CRIT "Queue not allocated!\n");
4876 break;
4877 }
4878#endif
4879 priv->rxq->queue[i] = NULL;
4880
4881 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
4882 CX2_RX_BUF_SIZE,
4883 PCI_DMA_FROMDEVICE);
4884
4885 pkt = (struct ipw_rx_packet *)rxb->skb->data;
4886 IPW_DEBUG_RX("Packet: type=%02X seq=%02X bits=%02X\n",
4887 pkt->header.message_type,
4888 pkt->header.rx_seq_num,
4889 pkt->header.control_bits);
4890
4891 switch (pkt->header.message_type) {
4892 case RX_FRAME_TYPE: /* 802.11 frame */ {
4893 struct ieee80211_rx_stats stats = {
4894 .rssi = pkt->u.frame.rssi_dbm -
4895 IPW_RSSI_TO_DBM,
4896 .signal = pkt->u.frame.signal,
4897 .rate = pkt->u.frame.rate,
4898 .mac_time = jiffies,
4899 .received_channel =
4900 pkt->u.frame.received_channel,
4901 .freq = (pkt->u.frame.control & (1<<0)) ?
4902 IEEE80211_24GHZ_BAND : IEEE80211_52GHZ_BAND,
4903 .len = pkt->u.frame.length,
4904 };
4905
4906 if (stats.rssi != 0)
4907 stats.mask |= IEEE80211_STATMASK_RSSI;
4908 if (stats.signal != 0)
4909 stats.mask |= IEEE80211_STATMASK_SIGNAL;
4910 if (stats.rate != 0)
4911 stats.mask |= IEEE80211_STATMASK_RATE;
4912
4913 priv->rx_packets++;
4914
4915#ifdef CONFIG_IPW_PROMISC
4916 if (priv->ieee->iw_mode == IW_MODE_MONITOR) {
4917 ipw_handle_data_packet(priv, rxb, &stats);
4918 break;
4919 }
4920#endif
4921
4922 header = (struct ieee80211_hdr *)(rxb->skb->data +
4923 IPW_RX_FRAME_SIZE);
4924 /* TODO: Check Ad-Hoc dest/source and make sure
4925 * that we are actually parsing these packets
4926 * correctly -- we should probably use the
4927 * frame control of the packet and disregard
4928 * the current iw_mode */
4929 switch (priv->ieee->iw_mode) {
4930 case IW_MODE_ADHOC:
4931 network_packet =
4932 !memcmp(header->addr1,
4933 priv->net_dev->dev_addr,
4934 ETH_ALEN) ||
4935 !memcmp(header->addr3,
4936 priv->bssid, ETH_ALEN) ||
4937 is_broadcast_ether_addr(header->addr1) ||
4938 is_multicast_ether_addr(header->addr1);
4939 break;
4940
4941 case IW_MODE_INFRA:
4942 default:
4943 network_packet =
4944 !memcmp(header->addr3,
4945 priv->bssid, ETH_ALEN) ||
4946 !memcmp(header->addr1,
4947 priv->net_dev->dev_addr,
4948 ETH_ALEN) ||
4949 is_broadcast_ether_addr(header->addr1) ||
4950 is_multicast_ether_addr(header->addr1);
4951 break;
4952 }
4953
4954 if (network_packet && priv->assoc_network) {
4955 priv->assoc_network->stats.rssi = stats.rssi;
4956 average_add(&priv->average_rssi,
4957 stats.rssi);
4958 priv->last_rx_rssi = stats.rssi;
4959 }
4960
4961 IPW_DEBUG_RX("Frame: len=%u\n", pkt->u.frame.length);
4962
4963 if (pkt->u.frame.length < frame_hdr_len(header)) {
4964 IPW_DEBUG_DROP("Received packet is too small. "
4965 "Dropping.\n");
4966 priv->ieee->stats.rx_errors++;
4967 priv->wstats.discard.misc++;
4968 break;
4969 }
4970
4971 switch (WLAN_FC_GET_TYPE(header->frame_ctl)) {
4972 case IEEE80211_FTYPE_MGMT:
4973 ieee80211_rx_mgt(priv->ieee, header, &stats);
4974 if (priv->ieee->iw_mode == IW_MODE_ADHOC &&
4975 ((WLAN_FC_GET_STYPE(header->frame_ctl) ==
4976 IEEE80211_STYPE_PROBE_RESP) ||
4977 (WLAN_FC_GET_STYPE(header->frame_ctl) ==
4978 IEEE80211_STYPE_BEACON)) &&
4979 !memcmp(header->addr3, priv->bssid, ETH_ALEN))
4980 ipw_add_station(priv, header->addr2);
4981 break;
4982
4983 case IEEE80211_FTYPE_CTL:
4984 break;
4985
4986 case IEEE80211_FTYPE_DATA:
4987 if (network_packet)
4988 ipw_handle_data_packet(priv, rxb, &stats);
4989 else
4990 IPW_DEBUG_DROP("Dropping: " MAC_FMT
4991 ", " MAC_FMT ", " MAC_FMT "\n",
4992 MAC_ARG(header->addr1), MAC_ARG(header->addr2),
4993 MAC_ARG(header->addr3));
4994 break;
4995 }
4996 break;
4997 }
4998
4999 case RX_HOST_NOTIFICATION_TYPE: {
5000 IPW_DEBUG_RX("Notification: subtype=%02X flags=%02X size=%d\n",
5001 pkt->u.notification.subtype,
5002 pkt->u.notification.flags,
5003 pkt->u.notification.size);
5004 ipw_rx_notification(priv, &pkt->u.notification);
5005 break;
5006 }
5007
5008 default:
5009 IPW_DEBUG_RX("Bad Rx packet of type %d\n",
5010 pkt->header.message_type);
5011 break;
5012 }
5013
5014 /* For now we just don't re-use anything. We can tweak this
5015 * later to try and re-use notification packets and SKBs that
5016 * fail to Rx correctly */
5017 if (rxb->skb != NULL) {
5018 dev_kfree_skb_any(rxb->skb);
5019 rxb->skb = NULL;
5020 }
5021
5022 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
5023 CX2_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
5024 list_add_tail(&rxb->list, &priv->rxq->rx_used);
5025
5026 i = (i + 1) % RX_QUEUE_SIZE;
5027 }
5028
5029 /* Backtrack one entry */
5030 priv->rxq->processed = (i ? i : RX_QUEUE_SIZE) - 1;
5031
5032 ipw_rx_queue_restock(priv);
5033}
5034
5035static void ipw_abort_scan(struct ipw_priv *priv)
5036{
5037 int err;
5038
5039 if (priv->status & STATUS_SCAN_ABORTING) {
5040 IPW_DEBUG_HC("Ignoring concurrent scan abort request.\n");
5041 return;
5042 }
5043 priv->status |= STATUS_SCAN_ABORTING;
5044
5045 err = ipw_send_scan_abort(priv);
5046 if (err)
5047 IPW_DEBUG_HC("Request to abort scan failed.\n");
5048}
5049
5050static int ipw_request_scan(struct ipw_priv *priv)
5051{
5052 struct ipw_scan_request_ext scan;
5053 int channel_index = 0;
5054 int i, err, scan_type;
5055
5056 if (priv->status & STATUS_EXIT_PENDING) {
5057 IPW_DEBUG_SCAN("Aborting scan due to device shutdown\n");
5058 priv->status |= STATUS_SCAN_PENDING;
5059 return 0;
5060 }
5061
5062 if (priv->status & STATUS_SCANNING) {
5063 IPW_DEBUG_HC("Concurrent scan requested. Aborting first.\n");
5064 priv->status |= STATUS_SCAN_PENDING;
5065 ipw_abort_scan(priv);
5066 return 0;
5067 }
5068
5069 if (priv->status & STATUS_SCAN_ABORTING) {
5070 IPW_DEBUG_HC("Scan request while abort pending. Queuing.\n");
5071 priv->status |= STATUS_SCAN_PENDING;
5072 return 0;
5073 }
5074
5075 if (priv->status & STATUS_RF_KILL_MASK) {
5076 IPW_DEBUG_HC("Aborting scan due to RF Kill activation\n");
5077 priv->status |= STATUS_SCAN_PENDING;
5078 return 0;
5079 }
5080
5081 memset(&scan, 0, sizeof(scan));
5082
5083 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_SCAN] = 20;
5084 scan.dwell_time[IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN] = 20;
5085 scan.dwell_time[IPW_SCAN_PASSIVE_FULL_DWELL_SCAN] = 20;
5086
5087 scan.full_scan_index = ieee80211_get_scans(priv->ieee);
5088 /* If we are roaming, then make this a directed scan for the current
5089 * network. Otherwise, ensure that every other scan is a fast
5090 * channel hop scan */
5091 if ((priv->status & STATUS_ROAMING) || (
5092 !(priv->status & STATUS_ASSOCIATED) &&
5093 (priv->config & CFG_STATIC_ESSID) &&
5094 (scan.full_scan_index % 2))) {
5095 err = ipw_send_ssid(priv, priv->essid, priv->essid_len);
5096 if (err) {
5097 IPW_DEBUG_HC("Attempt to send SSID command failed.\n");
5098 return err;
5099 }
5100
5101 scan_type = IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN;
5102 } else {
5103 scan_type = IPW_SCAN_ACTIVE_BROADCAST_SCAN;
5104 }
5105
5106 if (priv->ieee->freq_band & IEEE80211_52GHZ_BAND) {
5107 int start = channel_index;
5108 for (i = 0; i < MAX_A_CHANNELS; i++) {
5109 if (band_a_active_channel[i] == 0)
5110 break;
5111 if ((priv->status & STATUS_ASSOCIATED) &&
5112 band_a_active_channel[i] == priv->channel)
5113 continue;
5114 channel_index++;
5115 scan.channels_list[channel_index] =
5116 band_a_active_channel[i];
5117 ipw_set_scan_type(&scan, channel_index, scan_type);
5118 }
5119
5120 if (start != channel_index) {
5121 scan.channels_list[start] = (u8)(IPW_A_MODE << 6) |
5122 (channel_index - start);
5123 channel_index++;
5124 }
5125 }
5126
5127 if (priv->ieee->freq_band & IEEE80211_24GHZ_BAND) {
5128 int start = channel_index;
5129 for (i = 0; i < MAX_B_CHANNELS; i++) {
5130 if (band_b_active_channel[i] == 0)
5131 break;
5132 if ((priv->status & STATUS_ASSOCIATED) &&
5133 band_b_active_channel[i] == priv->channel)
5134 continue;
5135 channel_index++;
5136 scan.channels_list[channel_index] =
5137 band_b_active_channel[i];
5138 ipw_set_scan_type(&scan, channel_index, scan_type);
5139 }
5140
5141 if (start != channel_index) {
5142 scan.channels_list[start] = (u8)(IPW_B_MODE << 6) |
5143 (channel_index - start);
5144 }
5145 }
5146
5147 err = ipw_send_scan_request_ext(priv, &scan);
5148 if (err) {
5149 IPW_DEBUG_HC("Sending scan command failed: %08X\n",
5150 err);
5151 return -EIO;
5152 }
5153
5154 priv->status |= STATUS_SCANNING;
5155 priv->status &= ~STATUS_SCAN_PENDING;
5156
5157 return 0;
5158}
5159
5160/*
5161 * This file defines the Wireless Extension handlers. It does not
5162 * define any methods of hardware manipulation and relies on the
5163 * functions defined in ipw_main to provide the HW interaction.
5164 *
5165 * The exception to this is the use of the ipw_get_ordinal()
5166 * function used to poll the hardware vs. making unecessary calls.
5167 *
5168 */
5169
5170static int ipw_wx_get_name(struct net_device *dev,
5171 struct iw_request_info *info,
5172 union iwreq_data *wrqu, char *extra)
5173{
5174 struct ipw_priv *priv = ieee80211_priv(dev);
5175 if (!(priv->status & STATUS_ASSOCIATED))
5176 strcpy(wrqu->name, "unassociated");
5177 else
5178 snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
5179 ipw_modes[priv->assoc_request.ieee_mode]);
5180 IPW_DEBUG_WX("Name: %s\n", wrqu->name);
5181 return 0;
5182}
5183
5184static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
5185{
5186 if (channel == 0) {
5187 IPW_DEBUG_INFO("Setting channel to ANY (0)\n");
5188 priv->config &= ~CFG_STATIC_CHANNEL;
5189 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
5190 STATUS_ASSOCIATING))) {
5191 IPW_DEBUG_ASSOC("Attempting to associate with new "
5192 "parameters.\n");
5193 ipw_associate(priv);
5194 }
5195
5196 return 0;
5197 }
5198
5199 priv->config |= CFG_STATIC_CHANNEL;
5200
5201 if (priv->channel == channel) {
5202 IPW_DEBUG_INFO(
5203 "Request to set channel to current value (%d)\n",
5204 channel);
5205 return 0;
5206 }
5207
5208 IPW_DEBUG_INFO("Setting channel to %i\n", (int)channel);
5209 priv->channel = channel;
5210
5211 /* If we are currently associated, or trying to associate
5212 * then see if this is a new channel (causing us to disassociate) */
5213 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5214 IPW_DEBUG_ASSOC("Disassociating due to channel change.\n");
5215 ipw_disassociate(priv);
5216 } else {
5217 ipw_associate(priv);
5218 }
5219
5220 return 0;
5221}
5222
5223static int ipw_wx_set_freq(struct net_device *dev,
5224 struct iw_request_info *info,
5225 union iwreq_data *wrqu, char *extra)
5226{
5227 struct ipw_priv *priv = ieee80211_priv(dev);
5228 struct iw_freq *fwrq = &wrqu->freq;
5229
5230 /* if setting by freq convert to channel */
5231 if (fwrq->e == 1) {
5232 if ((fwrq->m >= (int) 2.412e8 &&
5233 fwrq->m <= (int) 2.487e8)) {
5234 int f = fwrq->m / 100000;
5235 int c = 0;
5236
5237 while ((c < REG_MAX_CHANNEL) &&
5238 (f != ipw_frequencies[c]))
5239 c++;
5240
5241 /* hack to fall through */
5242 fwrq->e = 0;
5243 fwrq->m = c + 1;
5244 }
5245 }
5246
5247 if (fwrq->e > 0 || fwrq->m > 1000)
5248 return -EOPNOTSUPP;
5249
5250 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m);
5251 return ipw_set_channel(priv, (u8)fwrq->m);
5252
5253 return 0;
5254}
5255
5256
5257static int ipw_wx_get_freq(struct net_device *dev,
5258 struct iw_request_info *info,
5259 union iwreq_data *wrqu, char *extra)
5260{
5261 struct ipw_priv *priv = ieee80211_priv(dev);
5262
5263 wrqu->freq.e = 0;
5264
5265 /* If we are associated, trying to associate, or have a statically
5266 * configured CHANNEL then return that; otherwise return ANY */
5267 if (priv->config & CFG_STATIC_CHANNEL ||
5268 priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED))
5269 wrqu->freq.m = priv->channel;
5270 else
5271 wrqu->freq.m = 0;
5272
5273 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel);
5274 return 0;
5275}
5276
5277static int ipw_wx_set_mode(struct net_device *dev,
5278 struct iw_request_info *info,
5279 union iwreq_data *wrqu, char *extra)
5280{
5281 struct ipw_priv *priv = ieee80211_priv(dev);
5282 int err = 0;
5283
5284 IPW_DEBUG_WX("Set MODE: %d\n", wrqu->mode);
5285
5286 if (wrqu->mode == priv->ieee->iw_mode)
5287 return 0;
5288
5289 switch (wrqu->mode) {
5290#ifdef CONFIG_IPW_PROMISC
5291 case IW_MODE_MONITOR:
5292#endif
5293 case IW_MODE_ADHOC:
5294 case IW_MODE_INFRA:
5295 break;
5296 case IW_MODE_AUTO:
5297 wrqu->mode = IW_MODE_INFRA;
5298 break;
5299 default:
5300 return -EINVAL;
5301 }
5302
5303#ifdef CONFIG_IPW_PROMISC
5304 if (priv->ieee->iw_mode == IW_MODE_MONITOR)
5305 priv->net_dev->type = ARPHRD_ETHER;
5306
5307 if (wrqu->mode == IW_MODE_MONITOR)
5308 priv->net_dev->type = ARPHRD_IEEE80211;
5309#endif /* CONFIG_IPW_PROMISC */
5310
5311#ifdef CONFIG_PM
5312 /* Free the existing firmware and reset the fw_loaded
5313 * flag so ipw_load() will bring in the new firmawre */
5314 if (fw_loaded) {
5315 fw_loaded = 0;
5316 }
5317
5318 release_firmware(bootfw);
5319 release_firmware(ucode);
5320 release_firmware(firmware);
5321 bootfw = ucode = firmware = NULL;
5322#endif
5323
5324 priv->ieee->iw_mode = wrqu->mode;
5325 ipw_adapter_restart(priv);
5326
5327 return err;
5328}
5329
5330static int ipw_wx_get_mode(struct net_device *dev,
5331 struct iw_request_info *info,
5332 union iwreq_data *wrqu, char *extra)
5333{
5334 struct ipw_priv *priv = ieee80211_priv(dev);
5335
5336 wrqu->mode = priv->ieee->iw_mode;
5337 IPW_DEBUG_WX("Get MODE -> %d\n", wrqu->mode);
5338
5339 return 0;
5340}
5341
5342
5343#define DEFAULT_RTS_THRESHOLD 2304U
5344#define MIN_RTS_THRESHOLD 1U
5345#define MAX_RTS_THRESHOLD 2304U
5346#define DEFAULT_BEACON_INTERVAL 100U
5347#define DEFAULT_SHORT_RETRY_LIMIT 7U
5348#define DEFAULT_LONG_RETRY_LIMIT 4U
5349
5350/* Values are in microsecond */
5351static const s32 timeout_duration[] = {
5352 350000,
5353 250000,
5354 75000,
5355 37000,
5356 25000,
5357};
5358
5359static const s32 period_duration[] = {
5360 400000,
5361 700000,
5362 1000000,
5363 1000000,
5364 1000000
5365};
5366
5367static int ipw_wx_get_range(struct net_device *dev,
5368 struct iw_request_info *info,
5369 union iwreq_data *wrqu, char *extra)
5370{
5371 struct ipw_priv *priv = ieee80211_priv(dev);
5372 struct iw_range *range = (struct iw_range *)extra;
5373 u16 val;
5374 int i;
5375
5376 wrqu->data.length = sizeof(*range);
5377 memset(range, 0, sizeof(*range));
5378
5379 /* 54Mbs == ~27 Mb/s real (802.11g) */
5380 range->throughput = 27 * 1000 * 1000;
5381
5382 range->max_qual.qual = 100;
5383 /* TODO: Find real max RSSI and stick here */
5384 range->max_qual.level = 0;
5385 range->max_qual.noise = 0;
5386 range->max_qual.updated = 7; /* Updated all three */
5387
5388 range->avg_qual.qual = 70;
5389 /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
5390 range->avg_qual.level = 0; /* FIXME to real average level */
5391 range->avg_qual.noise = 0;
5392 range->avg_qual.updated = 7; /* Updated all three */
5393
5394 range->num_bitrates = min(priv->rates.num_rates, (u8)IW_MAX_BITRATES);
5395
5396 for (i = 0; i < range->num_bitrates; i++)
5397 range->bitrate[i] = (priv->rates.supported_rates[i] & 0x7F) *
5398 500000;
5399
5400 range->max_rts = DEFAULT_RTS_THRESHOLD;
5401 range->min_frag = MIN_FRAG_THRESHOLD;
5402 range->max_frag = MAX_FRAG_THRESHOLD;
5403
5404 range->encoding_size[0] = 5;
5405 range->encoding_size[1] = 13;
5406 range->num_encoding_sizes = 2;
5407 range->max_encoding_tokens = WEP_KEYS;
5408
5409 /* Set the Wireless Extension versions */
5410 range->we_version_compiled = WIRELESS_EXT;
5411 range->we_version_source = 16;
5412
5413 range->num_channels = FREQ_COUNT;
5414
5415 val = 0;
5416 for (i = 0; i < FREQ_COUNT; i++) {
5417 range->freq[val].i = i + 1;
5418 range->freq[val].m = ipw_frequencies[i] * 100000;
5419 range->freq[val].e = 1;
5420 val++;
5421
5422 if (val == IW_MAX_FREQUENCIES)
5423 break;
5424 }
5425 range->num_frequency = val;
5426
5427 IPW_DEBUG_WX("GET Range\n");
5428 return 0;
5429}
5430
5431static int ipw_wx_set_wap(struct net_device *dev,
5432 struct iw_request_info *info,
5433 union iwreq_data *wrqu, char *extra)
5434{
5435 struct ipw_priv *priv = ieee80211_priv(dev);
5436
5437 static const unsigned char any[] = {
5438 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
5439 };
5440 static const unsigned char off[] = {
5441 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
5442 };
5443
5444 if (wrqu->ap_addr.sa_family != ARPHRD_ETHER)
5445 return -EINVAL;
5446
5447 if (!memcmp(any, wrqu->ap_addr.sa_data, ETH_ALEN) ||
5448 !memcmp(off, wrqu->ap_addr.sa_data, ETH_ALEN)) {
5449 /* we disable mandatory BSSID association */
5450 IPW_DEBUG_WX("Setting AP BSSID to ANY\n");
5451 priv->config &= ~CFG_STATIC_BSSID;
5452 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
5453 STATUS_ASSOCIATING))) {
5454 IPW_DEBUG_ASSOC("Attempting to associate with new "
5455 "parameters.\n");
5456 ipw_associate(priv);
5457 }
5458
5459 return 0;
5460 }
5461
5462 priv->config |= CFG_STATIC_BSSID;
5463 if (!memcmp(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN)) {
5464 IPW_DEBUG_WX("BSSID set to current BSSID.\n");
5465 return 0;
5466 }
5467
5468 IPW_DEBUG_WX("Setting mandatory BSSID to " MAC_FMT "\n",
5469 MAC_ARG(wrqu->ap_addr.sa_data));
5470
5471 memcpy(priv->bssid, wrqu->ap_addr.sa_data, ETH_ALEN);
5472
5473 /* If we are currently associated, or trying to associate
5474 * then see if this is a new BSSID (causing us to disassociate) */
5475 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5476 IPW_DEBUG_ASSOC("Disassociating due to BSSID change.\n");
5477 ipw_disassociate(priv);
5478 } else {
5479 ipw_associate(priv);
5480 }
5481
5482 return 0;
5483}
5484
5485static int ipw_wx_get_wap(struct net_device *dev,
5486 struct iw_request_info *info,
5487 union iwreq_data *wrqu, char *extra)
5488{
5489 struct ipw_priv *priv = ieee80211_priv(dev);
5490 /* If we are associated, trying to associate, or have a statically
5491 * configured BSSID then return that; otherwise return ANY */
5492 if (priv->config & CFG_STATIC_BSSID ||
5493 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5494 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
5495 memcpy(wrqu->ap_addr.sa_data, &priv->bssid, ETH_ALEN);
5496 } else
5497 memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
5498
5499 IPW_DEBUG_WX("Getting WAP BSSID: " MAC_FMT "\n",
5500 MAC_ARG(wrqu->ap_addr.sa_data));
5501 return 0;
5502}
5503
5504static int ipw_wx_set_essid(struct net_device *dev,
5505 struct iw_request_info *info,
5506 union iwreq_data *wrqu, char *extra)
5507{
5508 struct ipw_priv *priv = ieee80211_priv(dev);
5509 char *essid = ""; /* ANY */
5510 int length = 0;
5511
5512 if (wrqu->essid.flags && wrqu->essid.length) {
5513 length = wrqu->essid.length - 1;
5514 essid = extra;
5515 }
5516 if (length == 0) {
5517 IPW_DEBUG_WX("Setting ESSID to ANY\n");
5518 priv->config &= ~CFG_STATIC_ESSID;
5519 if (!(priv->status & (STATUS_SCANNING | STATUS_ASSOCIATED |
5520 STATUS_ASSOCIATING))) {
5521 IPW_DEBUG_ASSOC("Attempting to associate with new "
5522 "parameters.\n");
5523 ipw_associate(priv);
5524 }
5525
5526 return 0;
5527 }
5528
5529 length = min(length, IW_ESSID_MAX_SIZE);
5530
5531 priv->config |= CFG_STATIC_ESSID;
5532
5533 if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) {
5534 IPW_DEBUG_WX("ESSID set to current ESSID.\n");
5535 return 0;
5536 }
5537
5538 IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", escape_essid(essid, length),
5539 length);
5540
5541 priv->essid_len = length;
5542 memcpy(priv->essid, essid, priv->essid_len);
5543
5544 /* If we are currently associated, or trying to associate
5545 * then see if this is a new ESSID (causing us to disassociate) */
5546 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5547 IPW_DEBUG_ASSOC("Disassociating due to ESSID change.\n");
5548 ipw_disassociate(priv);
5549 } else {
5550 ipw_associate(priv);
5551 }
5552
5553 return 0;
5554}
5555
5556static int ipw_wx_get_essid(struct net_device *dev,
5557 struct iw_request_info *info,
5558 union iwreq_data *wrqu, char *extra)
5559{
5560 struct ipw_priv *priv = ieee80211_priv(dev);
5561
5562 /* If we are associated, trying to associate, or have a statically
5563 * configured ESSID then return that; otherwise return ANY */
5564 if (priv->config & CFG_STATIC_ESSID ||
5565 priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5566 IPW_DEBUG_WX("Getting essid: '%s'\n",
5567 escape_essid(priv->essid, priv->essid_len));
5568 memcpy(extra, priv->essid, priv->essid_len);
5569 wrqu->essid.length = priv->essid_len;
5570 wrqu->essid.flags = 1; /* active */
5571 } else {
5572 IPW_DEBUG_WX("Getting essid: ANY\n");
5573 wrqu->essid.length = 0;
5574 wrqu->essid.flags = 0; /* active */
5575 }
5576
5577 return 0;
5578}
5579
5580static int ipw_wx_set_nick(struct net_device *dev,
5581 struct iw_request_info *info,
5582 union iwreq_data *wrqu, char *extra)
5583{
5584 struct ipw_priv *priv = ieee80211_priv(dev);
5585
5586 IPW_DEBUG_WX("Setting nick to '%s'\n", extra);
5587 if (wrqu->data.length > IW_ESSID_MAX_SIZE)
5588 return -E2BIG;
5589
5590 wrqu->data.length = min((size_t)wrqu->data.length, sizeof(priv->nick));
5591 memset(priv->nick, 0, sizeof(priv->nick));
5592 memcpy(priv->nick, extra, wrqu->data.length);
5593 IPW_DEBUG_TRACE("<<\n");
5594 return 0;
5595
5596}
5597
5598
5599static int ipw_wx_get_nick(struct net_device *dev,
5600 struct iw_request_info *info,
5601 union iwreq_data *wrqu, char *extra)
5602{
5603 struct ipw_priv *priv = ieee80211_priv(dev);
5604 IPW_DEBUG_WX("Getting nick\n");
5605 wrqu->data.length = strlen(priv->nick) + 1;
5606 memcpy(extra, priv->nick, wrqu->data.length);
5607 wrqu->data.flags = 1; /* active */
5608 return 0;
5609}
5610
5611
5612static int ipw_wx_set_rate(struct net_device *dev,
5613 struct iw_request_info *info,
5614 union iwreq_data *wrqu, char *extra)
5615{
5616 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
5617 return -EOPNOTSUPP;
5618}
5619
5620static int ipw_wx_get_rate(struct net_device *dev,
5621 struct iw_request_info *info,
5622 union iwreq_data *wrqu, char *extra)
5623{
5624 struct ipw_priv * priv = ieee80211_priv(dev);
5625 wrqu->bitrate.value = priv->last_rate;
5626
5627 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value);
5628 return 0;
5629}
5630
5631
5632static int ipw_wx_set_rts(struct net_device *dev,
5633 struct iw_request_info *info,
5634 union iwreq_data *wrqu, char *extra)
5635{
5636 struct ipw_priv *priv = ieee80211_priv(dev);
5637
5638 if (wrqu->rts.disabled)
5639 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
5640 else {
5641 if (wrqu->rts.value < MIN_RTS_THRESHOLD ||
5642 wrqu->rts.value > MAX_RTS_THRESHOLD)
5643 return -EINVAL;
5644
5645 priv->rts_threshold = wrqu->rts.value;
5646 }
5647
5648 ipw_send_rts_threshold(priv, priv->rts_threshold);
5649 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold);
5650 return 0;
5651}
5652
5653static int ipw_wx_get_rts(struct net_device *dev,
5654 struct iw_request_info *info,
5655 union iwreq_data *wrqu, char *extra)
5656{
5657 struct ipw_priv *priv = ieee80211_priv(dev);
5658 wrqu->rts.value = priv->rts_threshold;
5659 wrqu->rts.fixed = 0; /* no auto select */
5660 wrqu->rts.disabled =
5661 (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
5662
5663 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value);
5664 return 0;
5665}
5666
5667
5668static int ipw_wx_set_txpow(struct net_device *dev,
5669 struct iw_request_info *info,
5670 union iwreq_data *wrqu, char *extra)
5671{
5672 struct ipw_priv *priv = ieee80211_priv(dev);
5673 struct ipw_tx_power tx_power;
5674 int i;
5675
5676 if (ipw_radio_kill_sw(priv, wrqu->power.disabled))
5677 return -EINPROGRESS;
5678
5679 if (wrqu->power.flags != IW_TXPOW_DBM)
5680 return -EINVAL;
5681
5682 if ((wrqu->power.value > 20) ||
5683 (wrqu->power.value < -12))
5684 return -EINVAL;
5685
5686 priv->tx_power = wrqu->power.value;
5687
5688 memset(&tx_power, 0, sizeof(tx_power));
5689
5690 /* configure device for 'G' band */
5691 tx_power.ieee_mode = IPW_G_MODE;
5692 tx_power.num_channels = 11;
5693 for (i = 0; i < 11; i++) {
5694 tx_power.channels_tx_power[i].channel_number = i + 1;
5695 tx_power.channels_tx_power[i].tx_power = priv->tx_power;
5696 }
5697 if (ipw_send_tx_power(priv, &tx_power))
5698 goto error;
5699
5700 /* configure device to also handle 'B' band */
5701 tx_power.ieee_mode = IPW_B_MODE;
5702 if (ipw_send_tx_power(priv, &tx_power))
5703 goto error;
5704
5705 return 0;
5706
5707 error:
5708 return -EIO;
5709}
5710
5711
5712static int ipw_wx_get_txpow(struct net_device *dev,
5713 struct iw_request_info *info,
5714 union iwreq_data *wrqu, char *extra)
5715{
5716 struct ipw_priv *priv = ieee80211_priv(dev);
5717
5718 wrqu->power.value = priv->tx_power;
5719 wrqu->power.fixed = 1;
5720 wrqu->power.flags = IW_TXPOW_DBM;
5721 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
5722
5723 IPW_DEBUG_WX("GET TX Power -> %s %d \n",
5724 wrqu->power.disabled ? "ON" : "OFF",
5725 wrqu->power.value);
5726
5727 return 0;
5728}
5729
5730static int ipw_wx_set_frag(struct net_device *dev,
5731 struct iw_request_info *info,
5732 union iwreq_data *wrqu, char *extra)
5733{
5734 struct ipw_priv *priv = ieee80211_priv(dev);
5735
5736 if (wrqu->frag.disabled)
5737 priv->ieee->fts = DEFAULT_FTS;
5738 else {
5739 if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
5740 wrqu->frag.value > MAX_FRAG_THRESHOLD)
5741 return -EINVAL;
5742
5743 priv->ieee->fts = wrqu->frag.value & ~0x1;
5744 }
5745
5746 ipw_send_frag_threshold(priv, wrqu->frag.value);
5747 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value);
5748 return 0;
5749}
5750
5751static int ipw_wx_get_frag(struct net_device *dev,
5752 struct iw_request_info *info,
5753 union iwreq_data *wrqu, char *extra)
5754{
5755 struct ipw_priv *priv = ieee80211_priv(dev);
5756 wrqu->frag.value = priv->ieee->fts;
5757 wrqu->frag.fixed = 0; /* no auto select */
5758 wrqu->frag.disabled =
5759 (wrqu->frag.value == DEFAULT_FTS);
5760
5761 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value);
5762
5763 return 0;
5764}
5765
5766static int ipw_wx_set_retry(struct net_device *dev,
5767 struct iw_request_info *info,
5768 union iwreq_data *wrqu, char *extra)
5769{
5770 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
5771 return -EOPNOTSUPP;
5772}
5773
5774
5775static int ipw_wx_get_retry(struct net_device *dev,
5776 struct iw_request_info *info,
5777 union iwreq_data *wrqu, char *extra)
5778{
5779 IPW_DEBUG_WX("0x%p, 0x%p, 0x%p\n", dev, info, wrqu);
5780 return -EOPNOTSUPP;
5781}
5782
5783
5784static int ipw_wx_set_scan(struct net_device *dev,
5785 struct iw_request_info *info,
5786 union iwreq_data *wrqu, char *extra)
5787{
5788 struct ipw_priv *priv = ieee80211_priv(dev);
5789 IPW_DEBUG_WX("Start scan\n");
5790 if (ipw_request_scan(priv))
5791 return -EIO;
5792 return 0;
5793}
5794
5795static int ipw_wx_get_scan(struct net_device *dev,
5796 struct iw_request_info *info,
5797 union iwreq_data *wrqu, char *extra)
5798{
5799 struct ipw_priv *priv = ieee80211_priv(dev);
5800 return ieee80211_wx_get_scan(priv->ieee, info, wrqu, extra);
5801}
5802
5803static int ipw_wx_set_encode(struct net_device *dev,
5804 struct iw_request_info *info,
5805 union iwreq_data *wrqu, char *key)
5806{
5807 struct ipw_priv *priv = ieee80211_priv(dev);
5808 return ieee80211_wx_set_encode(priv->ieee, info, wrqu, key);
5809}
5810
5811static int ipw_wx_get_encode(struct net_device *dev,
5812 struct iw_request_info *info,
5813 union iwreq_data *wrqu, char *key)
5814{
5815 struct ipw_priv *priv = ieee80211_priv(dev);
5816 return ieee80211_wx_get_encode(priv->ieee, info, wrqu, key);
5817}
5818
5819static int ipw_wx_set_power(struct net_device *dev,
5820 struct iw_request_info *info,
5821 union iwreq_data *wrqu, char *extra)
5822{
5823 struct ipw_priv *priv = ieee80211_priv(dev);
5824 int err;
5825
5826 if (wrqu->power.disabled) {
5827 priv->power_mode = IPW_POWER_LEVEL(priv->power_mode);
5828 err = ipw_send_power_mode(priv, IPW_POWER_MODE_CAM);
5829 if (err) {
5830 IPW_DEBUG_WX("failed setting power mode.\n");
5831 return err;
5832 }
5833
5834 IPW_DEBUG_WX("SET Power Management Mode -> off\n");
5835
5836 return 0;
5837 }
5838
5839 switch (wrqu->power.flags & IW_POWER_MODE) {
5840 case IW_POWER_ON: /* If not specified */
5841 case IW_POWER_MODE: /* If set all mask */
5842 case IW_POWER_ALL_R: /* If explicitely state all */
5843 break;
5844 default: /* Otherwise we don't support it */
5845 IPW_DEBUG_WX("SET PM Mode: %X not supported.\n",
5846 wrqu->power.flags);
5847 return -EOPNOTSUPP;
5848 }
5849
5850 /* If the user hasn't specified a power management mode yet, default
5851 * to BATTERY */
5852 if (IPW_POWER_LEVEL(priv->power_mode) == IPW_POWER_AC)
5853 priv->power_mode = IPW_POWER_ENABLED | IPW_POWER_BATTERY;
5854 else
5855 priv->power_mode = IPW_POWER_ENABLED | priv->power_mode;
5856 err = ipw_send_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode));
5857 if (err) {
5858 IPW_DEBUG_WX("failed setting power mode.\n");
5859 return err;
5860 }
5861
5862 IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n",
5863 priv->power_mode);
5864
5865 return 0;
5866}
5867
5868static int ipw_wx_get_power(struct net_device *dev,
5869 struct iw_request_info *info,
5870 union iwreq_data *wrqu, char *extra)
5871{
5872 struct ipw_priv *priv = ieee80211_priv(dev);
5873
5874 if (!(priv->power_mode & IPW_POWER_ENABLED)) {
5875 wrqu->power.disabled = 1;
5876 } else {
5877 wrqu->power.disabled = 0;
5878 }
5879
5880 IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode);
5881
5882 return 0;
5883}
5884
5885static int ipw_wx_set_powermode(struct net_device *dev,
5886 struct iw_request_info *info,
5887 union iwreq_data *wrqu, char *extra)
5888{
5889 struct ipw_priv *priv = ieee80211_priv(dev);
5890 int mode = *(int *)extra;
5891 int err;
5892
5893 if ((mode < 1) || (mode > IPW_POWER_LIMIT)) {
5894 mode = IPW_POWER_AC;
5895 priv->power_mode = mode;
5896 } else {
5897 priv->power_mode = IPW_POWER_ENABLED | mode;
5898 }
5899
5900 if (priv->power_mode != mode) {
5901 err = ipw_send_power_mode(priv, mode);
5902
5903 if (err) {
5904 IPW_DEBUG_WX("failed setting power mode.\n");
5905 return err;
5906 }
5907 }
5908
5909 return 0;
5910}
5911
5912#define MAX_WX_STRING 80
5913static int ipw_wx_get_powermode(struct net_device *dev,
5914 struct iw_request_info *info,
5915 union iwreq_data *wrqu, char *extra)
5916{
5917 struct ipw_priv *priv = ieee80211_priv(dev);
5918 int level = IPW_POWER_LEVEL(priv->power_mode);
5919 char *p = extra;
5920
5921 p += snprintf(p, MAX_WX_STRING, "Power save level: %d ", level);
5922
5923 switch (level) {
5924 case IPW_POWER_AC:
5925 p += snprintf(p, MAX_WX_STRING - (p - extra), "(AC)");
5926 break;
5927 case IPW_POWER_BATTERY:
5928 p += snprintf(p, MAX_WX_STRING - (p - extra), "(BATTERY)");
5929 break;
5930 default:
5931 p += snprintf(p, MAX_WX_STRING - (p - extra),
5932 "(Timeout %dms, Period %dms)",
5933 timeout_duration[level - 1] / 1000,
5934 period_duration[level - 1] / 1000);
5935 }
5936
5937 if (!(priv->power_mode & IPW_POWER_ENABLED))
5938 p += snprintf(p, MAX_WX_STRING - (p - extra)," OFF");
5939
5940 wrqu->data.length = p - extra + 1;
5941
5942 return 0;
5943}
5944
5945static int ipw_wx_set_wireless_mode(struct net_device *dev,
5946 struct iw_request_info *info,
5947 union iwreq_data *wrqu, char *extra)
5948{
5949 struct ipw_priv *priv = ieee80211_priv(dev);
5950 int mode = *(int *)extra;
5951 u8 band = 0, modulation = 0;
5952
5953 if (mode == 0 || mode & ~IEEE_MODE_MASK) {
5954 IPW_WARNING("Attempt to set invalid wireless mode: %d\n",
5955 mode);
5956 return -EINVAL;
5957 }
5958
5959 if (priv->adapter == IPW_2915ABG) {
5960 priv->ieee->abg_ture = 1;
5961 if (mode & IEEE_A) {
5962 band |= IEEE80211_52GHZ_BAND;
5963 modulation |= IEEE80211_OFDM_MODULATION;
5964 } else
5965 priv->ieee->abg_ture = 0;
5966 } else {
5967 if (mode & IEEE_A) {
5968 IPW_WARNING("Attempt to set 2200BG into "
5969 "802.11a mode\n");
5970 return -EINVAL;
5971 }
5972
5973 priv->ieee->abg_ture = 0;
5974 }
5975
5976 if (mode & IEEE_B) {
5977 band |= IEEE80211_24GHZ_BAND;
5978 modulation |= IEEE80211_CCK_MODULATION;
5979 } else
5980 priv->ieee->abg_ture = 0;
5981
5982 if (mode & IEEE_G) {
5983 band |= IEEE80211_24GHZ_BAND;
5984 modulation |= IEEE80211_OFDM_MODULATION;
5985 } else
5986 priv->ieee->abg_ture = 0;
5987
5988 priv->ieee->mode = mode;
5989 priv->ieee->freq_band = band;
5990 priv->ieee->modulation = modulation;
5991 init_supported_rates(priv, &priv->rates);
5992
5993 /* If we are currently associated, or trying to associate
5994 * then see if this is a new configuration (causing us to
5995 * disassociate) */
5996 if (priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) {
5997 /* The resulting association will trigger
5998 * the new rates to be sent to the device */
5999 IPW_DEBUG_ASSOC("Disassociating due to mode change.\n");
6000 ipw_disassociate(priv);
6001 } else
6002 ipw_send_supported_rates(priv, &priv->rates);
6003
6004 IPW_DEBUG_WX("PRIV SET MODE: %c%c%c\n",
6005 mode & IEEE_A ? 'a' : '.',
6006 mode & IEEE_B ? 'b' : '.',
6007 mode & IEEE_G ? 'g' : '.');
6008 return 0;
6009}
6010
6011static int ipw_wx_get_wireless_mode(struct net_device *dev,
6012 struct iw_request_info *info,
6013 union iwreq_data *wrqu, char *extra)
6014{
6015 struct ipw_priv *priv = ieee80211_priv(dev);
6016
6017 switch (priv->ieee->freq_band) {
6018 case IEEE80211_24GHZ_BAND:
6019 switch (priv->ieee->modulation) {
6020 case IEEE80211_CCK_MODULATION:
6021 strncpy(extra, "802.11b (2)", MAX_WX_STRING);
6022 break;
6023 case IEEE80211_OFDM_MODULATION:
6024 strncpy(extra, "802.11g (4)", MAX_WX_STRING);
6025 break;
6026 default:
6027 strncpy(extra, "802.11bg (6)", MAX_WX_STRING);
6028 break;
6029 }
6030 break;
6031
6032 case IEEE80211_52GHZ_BAND:
6033 strncpy(extra, "802.11a (1)", MAX_WX_STRING);
6034 break;
6035
6036 default: /* Mixed Band */
6037 switch (priv->ieee->modulation) {
6038 case IEEE80211_CCK_MODULATION:
6039 strncpy(extra, "802.11ab (3)", MAX_WX_STRING);
6040 break;
6041 case IEEE80211_OFDM_MODULATION:
6042 strncpy(extra, "802.11ag (5)", MAX_WX_STRING);
6043 break;
6044 default:
6045 strncpy(extra, "802.11abg (7)", MAX_WX_STRING);
6046 break;
6047 }
6048 break;
6049 }
6050
6051 IPW_DEBUG_WX("PRIV GET MODE: %s\n", extra);
6052
6053 wrqu->data.length = strlen(extra) + 1;
6054
6055 return 0;
6056}
6057
6058#ifdef CONFIG_IPW_PROMISC
6059static int ipw_wx_set_promisc(struct net_device *dev,
6060 struct iw_request_info *info,
6061 union iwreq_data *wrqu, char *extra)
6062{
6063 struct ipw_priv *priv = ieee80211_priv(dev);
6064 int *parms = (int *)extra;
6065 int enable = (parms[0] > 0);
6066
6067 IPW_DEBUG_WX("SET PROMISC: %d %d\n", enable, parms[1]);
6068 if (enable) {
6069 if (priv->ieee->iw_mode != IW_MODE_MONITOR) {
6070 priv->net_dev->type = ARPHRD_IEEE80211;
6071 ipw_adapter_restart(priv);
6072 }
6073
6074 ipw_set_channel(priv, parms[1]);
6075 } else {
6076 if (priv->ieee->iw_mode != IW_MODE_MONITOR)
6077 return 0;
6078 priv->net_dev->type = ARPHRD_ETHER;
6079 ipw_adapter_restart(priv);
6080 }
6081 return 0;
6082}
6083
6084
6085static int ipw_wx_reset(struct net_device *dev,
6086 struct iw_request_info *info,
6087 union iwreq_data *wrqu, char *extra)
6088{
6089 struct ipw_priv *priv = ieee80211_priv(dev);
6090 IPW_DEBUG_WX("RESET\n");
6091 ipw_adapter_restart(priv);
6092 return 0;
6093}
6094#endif // CONFIG_IPW_PROMISC
6095
6096/* Rebase the WE IOCTLs to zero for the handler array */
6097#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
6098static iw_handler ipw_wx_handlers[] =
6099{
6100 IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
6101 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
6102 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
6103 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
6104 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode,
6105 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range,
6106 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap,
6107 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap,
6108 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan,
6109 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan,
6110 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid,
6111 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid,
6112 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick,
6113 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick,
6114 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate,
6115 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate,
6116 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts,
6117 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts,
6118 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag,
6119 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag,
6120 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow,
6121 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow,
6122 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry,
6123 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry,
6124 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode,
6125 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode,
6126 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power,
6127 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power,
6128};
6129
6130#define IPW_PRIV_SET_POWER SIOCIWFIRSTPRIV
6131#define IPW_PRIV_GET_POWER SIOCIWFIRSTPRIV+1
6132#define IPW_PRIV_SET_MODE SIOCIWFIRSTPRIV+2
6133#define IPW_PRIV_GET_MODE SIOCIWFIRSTPRIV+3
6134#define IPW_PRIV_SET_PROMISC SIOCIWFIRSTPRIV+4
6135#define IPW_PRIV_RESET SIOCIWFIRSTPRIV+5
6136
6137
6138static struct iw_priv_args ipw_priv_args[] = {
6139 {
6140 .cmd = IPW_PRIV_SET_POWER,
6141 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6142 .name = "set_power"
6143 },
6144 {
6145 .cmd = IPW_PRIV_GET_POWER,
6146 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
6147 .name = "get_power"
6148 },
6149 {
6150 .cmd = IPW_PRIV_SET_MODE,
6151 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
6152 .name = "set_mode"
6153 },
6154 {
6155 .cmd = IPW_PRIV_GET_MODE,
6156 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
6157 .name = "get_mode"
6158 },
6159#ifdef CONFIG_IPW_PROMISC
6160 {
6161 IPW_PRIV_SET_PROMISC,
6162 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"
6163 },
6164 {
6165 IPW_PRIV_RESET,
6166 IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"
6167 },
6168#endif /* CONFIG_IPW_PROMISC */
6169};
6170
6171static iw_handler ipw_priv_handler[] = {
6172 ipw_wx_set_powermode,
6173 ipw_wx_get_powermode,
6174 ipw_wx_set_wireless_mode,
6175 ipw_wx_get_wireless_mode,
6176#ifdef CONFIG_IPW_PROMISC
6177 ipw_wx_set_promisc,
6178 ipw_wx_reset,
6179#endif
6180};
6181
6182static struct iw_handler_def ipw_wx_handler_def =
6183{
6184 .standard = ipw_wx_handlers,
6185 .num_standard = ARRAY_SIZE(ipw_wx_handlers),
6186 .num_private = ARRAY_SIZE(ipw_priv_handler),
6187 .num_private_args = ARRAY_SIZE(ipw_priv_args),
6188 .private = ipw_priv_handler,
6189 .private_args = ipw_priv_args,
6190};
6191
6192
6193
6194
6195/*
6196 * Get wireless statistics.
6197 * Called by /proc/net/wireless
6198 * Also called by SIOCGIWSTATS
6199 */
6200static struct iw_statistics *ipw_get_wireless_stats(struct net_device * dev)
6201{
6202 struct ipw_priv *priv = ieee80211_priv(dev);
6203 struct iw_statistics *wstats;
6204
6205 wstats = &priv->wstats;
6206
6207 /* if hw is disabled, then ipw2100_get_ordinal() can't be called.
6208 * ipw2100_wx_wireless_stats seems to be called before fw is
6209 * initialized. STATUS_ASSOCIATED will only be set if the hw is up
6210 * and associated; if not associcated, the values are all meaningless
6211 * anyway, so set them all to NULL and INVALID */
6212 if (!(priv->status & STATUS_ASSOCIATED)) {
6213 wstats->miss.beacon = 0;
6214 wstats->discard.retries = 0;
6215 wstats->qual.qual = 0;
6216 wstats->qual.level = 0;
6217 wstats->qual.noise = 0;
6218 wstats->qual.updated = 7;
6219 wstats->qual.updated |= IW_QUAL_NOISE_INVALID |
6220 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
6221 return wstats;
6222 }
6223
6224 wstats->qual.qual = priv->quality;
6225 wstats->qual.level = average_value(&priv->average_rssi);
6226 wstats->qual.noise = average_value(&priv->average_noise);
6227 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED |
6228 IW_QUAL_NOISE_UPDATED;
6229
6230 wstats->miss.beacon = average_value(&priv->average_missed_beacons);
6231 wstats->discard.retries = priv->last_tx_failures;
6232 wstats->discard.code = priv->ieee->ieee_stats.rx_discards_undecryptable;
6233
6234/* if (ipw_get_ordinal(priv, IPW_ORD_STAT_TX_RETRY, &tx_retry, &len))
6235 goto fail_get_ordinal;
6236 wstats->discard.retries += tx_retry; */
6237
6238 return wstats;
6239}
6240
6241
6242/* net device stuff */
6243
6244static inline void init_sys_config(struct ipw_sys_config *sys_config)
6245{
6246 memset(sys_config, 0, sizeof(struct ipw_sys_config));
6247 sys_config->bt_coexistence = 1; /* We may need to look into prvStaBtConfig */
6248 sys_config->answer_broadcast_ssid_probe = 0;
6249 sys_config->accept_all_data_frames = 0;
6250 sys_config->accept_non_directed_frames = 1;
6251 sys_config->exclude_unicast_unencrypted = 0;
6252 sys_config->disable_unicast_decryption = 1;
6253 sys_config->exclude_multicast_unencrypted = 0;
6254 sys_config->disable_multicast_decryption = 1;
6255 sys_config->antenna_diversity = CFG_SYS_ANTENNA_BOTH;
6256 sys_config->pass_crc_to_host = 0; /* TODO: See if 1 gives us FCS */
6257 sys_config->dot11g_auto_detection = 0;
6258 sys_config->enable_cts_to_self = 0;
6259 sys_config->bt_coexist_collision_thr = 0;
6260 sys_config->pass_noise_stats_to_host = 1;
6261}
6262
6263static int ipw_net_open(struct net_device *dev)
6264{
6265 struct ipw_priv *priv = ieee80211_priv(dev);
6266 IPW_DEBUG_INFO("dev->open\n");
6267 /* we should be verifying the device is ready to be opened */
6268 if (!(priv->status & STATUS_RF_KILL_MASK) &&
6269 (priv->status & STATUS_ASSOCIATED))
6270 netif_start_queue(dev);
6271 return 0;
6272}
6273
6274static int ipw_net_stop(struct net_device *dev)
6275{
6276 IPW_DEBUG_INFO("dev->close\n");
6277 netif_stop_queue(dev);
6278 return 0;
6279}
6280
6281/*
6282todo:
6283
6284modify to send one tfd per fragment instead of using chunking. otherwise
6285we need to heavily modify the ieee80211_skb_to_txb.
6286*/
6287
6288static inline void ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb)
6289{
6290 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)
6291 txb->fragments[0]->data;
6292 int i = 0;
6293 struct tfd_frame *tfd;
6294 struct clx2_tx_queue *txq = &priv->txq[0];
6295 struct clx2_queue *q = &txq->q;
6296 u8 id, hdr_len, unicast;
6297 u16 remaining_bytes;
6298
6299 switch (priv->ieee->iw_mode) {
6300 case IW_MODE_ADHOC:
6301 hdr_len = IEEE80211_3ADDR_LEN;
6302 unicast = !is_broadcast_ether_addr(hdr->addr1) &&
6303 !is_multicast_ether_addr(hdr->addr1);
6304 id = ipw_find_station(priv, hdr->addr1);
6305 if (id == IPW_INVALID_STATION) {
6306 id = ipw_add_station(priv, hdr->addr1);
6307 if (id == IPW_INVALID_STATION) {
6308 IPW_WARNING("Attempt to send data to "
6309 "invalid cell: " MAC_FMT "\n",
6310 MAC_ARG(hdr->addr1));
6311 goto drop;
6312 }
6313 }
6314 break;
6315
6316 case IW_MODE_INFRA:
6317 default:
6318 unicast = !is_broadcast_ether_addr(hdr->addr3) &&
6319 !is_multicast_ether_addr(hdr->addr3);
6320 hdr_len = IEEE80211_3ADDR_LEN;
6321 id = 0;
6322 break;
6323 }
6324
6325 tfd = &txq->bd[q->first_empty];
6326 txq->txb[q->first_empty] = txb;
6327 memset(tfd, 0, sizeof(*tfd));
6328 tfd->u.data.station_number = id;
6329
6330 tfd->control_flags.message_type = TX_FRAME_TYPE;
6331 tfd->control_flags.control_bits = TFD_NEED_IRQ_MASK;
6332
6333 tfd->u.data.cmd_id = DINO_CMD_TX;
6334 tfd->u.data.len = txb->payload_size;
6335 remaining_bytes = txb->payload_size;
6336 if (unlikely(!unicast))
6337 tfd->u.data.tx_flags = DCT_FLAG_NO_WEP;
6338 else
6339 tfd->u.data.tx_flags = DCT_FLAG_NO_WEP | DCT_FLAG_ACK_REQD;
6340
6341 if (priv->assoc_request.ieee_mode == IPW_B_MODE)
6342 tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_CCK;
6343 else
6344 tfd->u.data.tx_flags_ext = DCT_FLAG_EXT_MODE_OFDM;
6345
6346 if (priv->config & CFG_PREAMBLE)
6347 tfd->u.data.tx_flags |= DCT_FLAG_SHORT_PREMBL;
6348
6349 memcpy(&tfd->u.data.tfd.tfd_24.mchdr, hdr, hdr_len);
6350
6351 /* payload */
6352 tfd->u.data.num_chunks = min((u8)(NUM_TFD_CHUNKS - 2), txb->nr_frags);
6353 for (i = 0; i < tfd->u.data.num_chunks; i++) {
6354 IPW_DEBUG_TX("Dumping TX packet frag %i of %i (%d bytes):\n",
6355 i, tfd->u.data.num_chunks,
6356 txb->fragments[i]->len - hdr_len);
6357 printk_buf(IPW_DL_TX, txb->fragments[i]->data + hdr_len,
6358 txb->fragments[i]->len - hdr_len);
6359
6360 tfd->u.data.chunk_ptr[i] = pci_map_single(
6361 priv->pci_dev, txb->fragments[i]->data + hdr_len,
6362 txb->fragments[i]->len - hdr_len, PCI_DMA_TODEVICE);
6363 tfd->u.data.chunk_len[i] = txb->fragments[i]->len - hdr_len;
6364 }
6365
6366 if (i != txb->nr_frags) {
6367 struct sk_buff *skb;
6368 u16 remaining_bytes = 0;
6369 int j;
6370
6371 for (j = i; j < txb->nr_frags; j++)
6372 remaining_bytes += txb->fragments[j]->len - hdr_len;
6373
6374 printk(KERN_INFO "Trying to reallocate for %d bytes\n",
6375 remaining_bytes);
6376 skb = alloc_skb(remaining_bytes, GFP_ATOMIC);
6377 if (skb != NULL) {
6378 tfd->u.data.chunk_len[i] = remaining_bytes;
6379 for (j = i; j < txb->nr_frags; j++) {
6380 int size = txb->fragments[j]->len - hdr_len;
6381 printk(KERN_INFO "Adding frag %d %d...\n",
6382 j, size);
6383 memcpy(skb_put(skb, size),
6384 txb->fragments[j]->data + hdr_len,
6385 size);
6386 }
6387 dev_kfree_skb_any(txb->fragments[i]);
6388 txb->fragments[i] = skb;
6389 tfd->u.data.chunk_ptr[i] = pci_map_single(
6390 priv->pci_dev, skb->data,
6391 tfd->u.data.chunk_len[i], PCI_DMA_TODEVICE);
6392 tfd->u.data.num_chunks++;
6393 }
6394 }
6395
6396 /* kick DMA */
6397 q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd);
6398 ipw_write32(priv, q->reg_w, q->first_empty);
6399
6400 if (ipw_queue_space(q) < q->high_mark)
6401 netif_stop_queue(priv->net_dev);
6402
6403 return;
6404
6405 drop:
6406 IPW_DEBUG_DROP("Silently dropping Tx packet.\n");
6407 ieee80211_txb_free(txb);
6408}
6409
6410static int ipw_net_hard_start_xmit(struct ieee80211_txb *txb,
6411 struct net_device *dev)
6412{
6413 struct ipw_priv *priv = ieee80211_priv(dev);
6414 unsigned long flags;
6415
6416 IPW_DEBUG_TX("dev->xmit(%d bytes)\n", txb->payload_size);
6417
6418 spin_lock_irqsave(&priv->lock, flags);
6419
6420 if (!(priv->status & STATUS_ASSOCIATED)) {
6421 IPW_DEBUG_INFO("Tx attempt while not associated.\n");
6422 priv->ieee->stats.tx_carrier_errors++;
6423 netif_stop_queue(dev);
6424 goto fail_unlock;
6425 }
6426
6427 ipw_tx_skb(priv, txb);
6428
6429 spin_unlock_irqrestore(&priv->lock, flags);
6430 return 0;
6431
6432 fail_unlock:
6433 spin_unlock_irqrestore(&priv->lock, flags);
6434 return 1;
6435}
6436
6437static struct net_device_stats *ipw_net_get_stats(struct net_device *dev)
6438{
6439 struct ipw_priv *priv = ieee80211_priv(dev);
6440
6441 priv->ieee->stats.tx_packets = priv->tx_packets;
6442 priv->ieee->stats.rx_packets = priv->rx_packets;
6443 return &priv->ieee->stats;
6444}
6445
6446static void ipw_net_set_multicast_list(struct net_device *dev)
6447{
6448
6449}
6450
6451static int ipw_net_set_mac_address(struct net_device *dev, void *p)
6452{
6453 struct ipw_priv *priv = ieee80211_priv(dev);
6454 struct sockaddr *addr = p;
6455 if (!is_valid_ether_addr(addr->sa_data))
6456 return -EADDRNOTAVAIL;
6457 priv->config |= CFG_CUSTOM_MAC;
6458 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
6459 printk(KERN_INFO "%s: Setting MAC to " MAC_FMT "\n",
6460 priv->net_dev->name, MAC_ARG(priv->mac_addr));
6461 ipw_adapter_restart(priv);
6462 return 0;
6463}
6464
6465static void ipw_ethtool_get_drvinfo(struct net_device *dev,
6466 struct ethtool_drvinfo *info)
6467{
6468 struct ipw_priv *p = ieee80211_priv(dev);
6469 char vers[64];
6470 char date[32];
6471 u32 len;
6472
6473 strcpy(info->driver, DRV_NAME);
6474 strcpy(info->version, DRV_VERSION);
6475
6476 len = sizeof(vers);
6477 ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
6478 len = sizeof(date);
6479 ipw_get_ordinal(p, IPW_ORD_STAT_FW_DATE, date, &len);
6480
6481 snprintf(info->fw_version, sizeof(info->fw_version),"%s (%s)",
6482 vers, date);
6483 strcpy(info->bus_info, pci_name(p->pci_dev));
6484 info->eedump_len = CX2_EEPROM_IMAGE_SIZE;
6485}
6486
6487static u32 ipw_ethtool_get_link(struct net_device *dev)
6488{
6489 struct ipw_priv *priv = ieee80211_priv(dev);
6490 return (priv->status & STATUS_ASSOCIATED) != 0;
6491}
6492
6493static int ipw_ethtool_get_eeprom_len(struct net_device *dev)
6494{
6495 return CX2_EEPROM_IMAGE_SIZE;
6496}
6497
6498static int ipw_ethtool_get_eeprom(struct net_device *dev,
6499 struct ethtool_eeprom *eeprom, u8 *bytes)
6500{
6501 struct ipw_priv *p = ieee80211_priv(dev);
6502
6503 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
6504 return -EINVAL;
6505
6506 memcpy(bytes, &((u8 *)p->eeprom)[eeprom->offset], eeprom->len);
6507 return 0;
6508}
6509
6510static int ipw_ethtool_set_eeprom(struct net_device *dev,
6511 struct ethtool_eeprom *eeprom, u8 *bytes)
6512{
6513 struct ipw_priv *p = ieee80211_priv(dev);
6514 int i;
6515
6516 if (eeprom->offset + eeprom->len > CX2_EEPROM_IMAGE_SIZE)
6517 return -EINVAL;
6518
6519 memcpy(&((u8 *)p->eeprom)[eeprom->offset], bytes, eeprom->len);
6520 for (i = IPW_EEPROM_DATA;
6521 i < IPW_EEPROM_DATA + CX2_EEPROM_IMAGE_SIZE;
6522 i++)
6523 ipw_write8(p, i, p->eeprom[i]);
6524
6525 return 0;
6526}
6527
6528static struct ethtool_ops ipw_ethtool_ops = {
6529 .get_link = ipw_ethtool_get_link,
6530 .get_drvinfo = ipw_ethtool_get_drvinfo,
6531 .get_eeprom_len = ipw_ethtool_get_eeprom_len,
6532 .get_eeprom = ipw_ethtool_get_eeprom,
6533 .set_eeprom = ipw_ethtool_set_eeprom,
6534};
6535
6536static irqreturn_t ipw_isr(int irq, void *data, struct pt_regs *regs)
6537{
6538 struct ipw_priv *priv = data;
6539 u32 inta, inta_mask;
6540
6541 if (!priv)
6542 return IRQ_NONE;
6543
6544 spin_lock(&priv->lock);
6545
6546 if (!(priv->status & STATUS_INT_ENABLED)) {
6547 /* Shared IRQ */
6548 goto none;
6549 }
6550
6551 inta = ipw_read32(priv, CX2_INTA_RW);
6552 inta_mask = ipw_read32(priv, CX2_INTA_MASK_R);
6553
6554 if (inta == 0xFFFFFFFF) {
6555 /* Hardware disappeared */
6556 IPW_WARNING("IRQ INTA == 0xFFFFFFFF\n");
6557 goto none;
6558 }
6559
6560 if (!(inta & (CX2_INTA_MASK_ALL & inta_mask))) {
6561 /* Shared interrupt */
6562 goto none;
6563 }
6564
6565 /* tell the device to stop sending interrupts */
6566 ipw_disable_interrupts(priv);
6567
6568 /* ack current interrupts */
6569 inta &= (CX2_INTA_MASK_ALL & inta_mask);
6570 ipw_write32(priv, CX2_INTA_RW, inta);
6571
6572 /* Cache INTA value for our tasklet */
6573 priv->isr_inta = inta;
6574
6575 tasklet_schedule(&priv->irq_tasklet);
6576
6577 spin_unlock(&priv->lock);
6578
6579 return IRQ_HANDLED;
6580 none:
6581 spin_unlock(&priv->lock);
6582 return IRQ_NONE;
6583}
6584
6585static void ipw_rf_kill(void *adapter)
6586{
6587 struct ipw_priv *priv = adapter;
6588 unsigned long flags;
6589
6590 spin_lock_irqsave(&priv->lock, flags);
6591
6592 if (rf_kill_active(priv)) {
6593 IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n");
6594 if (priv->workqueue)
6595 queue_delayed_work(priv->workqueue,
6596 &priv->rf_kill, 2 * HZ);
6597 goto exit_unlock;
6598 }
6599
6600 /* RF Kill is now disabled, so bring the device back up */
6601
6602 if (!(priv->status & STATUS_RF_KILL_MASK)) {
6603 IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting "
6604 "device\n");
6605
6606 /* we can not do an adapter restart while inside an irq lock */
6607 queue_work(priv->workqueue, &priv->adapter_restart);
6608 } else
6609 IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still "
6610 "enabled\n");
6611
6612 exit_unlock:
6613 spin_unlock_irqrestore(&priv->lock, flags);
6614}
6615
6616static int ipw_setup_deferred_work(struct ipw_priv *priv)
6617{
6618 int ret = 0;
6619
6620#ifdef CONFIG_SOFTWARE_SUSPEND2
6621 priv->workqueue = create_workqueue(DRV_NAME, 0);
6622#else
6623 priv->workqueue = create_workqueue(DRV_NAME);
6624#endif
6625 init_waitqueue_head(&priv->wait_command_queue);
6626
6627 INIT_WORK(&priv->adhoc_check, ipw_adhoc_check, priv);
6628 INIT_WORK(&priv->associate, ipw_associate, priv);
6629 INIT_WORK(&priv->disassociate, ipw_disassociate, priv);
6630 INIT_WORK(&priv->rx_replenish, ipw_rx_queue_replenish, priv);
6631 INIT_WORK(&priv->adapter_restart, ipw_adapter_restart, priv);
6632 INIT_WORK(&priv->rf_kill, ipw_rf_kill, priv);
6633 INIT_WORK(&priv->up, (void (*)(void *))ipw_up, priv);
6634 INIT_WORK(&priv->down, (void (*)(void *))ipw_down, priv);
6635 INIT_WORK(&priv->request_scan,
6636 (void (*)(void *))ipw_request_scan, priv);
6637 INIT_WORK(&priv->gather_stats,
6638 (void (*)(void *))ipw_gather_stats, priv);
6639 INIT_WORK(&priv->abort_scan, (void (*)(void *))ipw_abort_scan, priv);
6640 INIT_WORK(&priv->roam, ipw_roam, priv);
6641 INIT_WORK(&priv->scan_check, ipw_scan_check, priv);
6642
6643 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
6644 ipw_irq_tasklet, (unsigned long)priv);
6645
6646 return ret;
6647}
6648
6649
6650static void shim__set_security(struct net_device *dev,
6651 struct ieee80211_security *sec)
6652{
6653 struct ipw_priv *priv = ieee80211_priv(dev);
6654 int i;
6655
6656 for (i = 0; i < 4; i++) {
6657 if (sec->flags & (1 << i)) {
6658 priv->sec.key_sizes[i] = sec->key_sizes[i];
6659 if (sec->key_sizes[i] == 0)
6660 priv->sec.flags &= ~(1 << i);
6661 else
6662 memcpy(priv->sec.keys[i], sec->keys[i],
6663 sec->key_sizes[i]);
6664 priv->sec.flags |= (1 << i);
6665 priv->status |= STATUS_SECURITY_UPDATED;
6666 }
6667 }
6668
6669 if ((sec->flags & SEC_ACTIVE_KEY) &&
6670 priv->sec.active_key != sec->active_key) {
6671 if (sec->active_key <= 3) {
6672 priv->sec.active_key = sec->active_key;
6673 priv->sec.flags |= SEC_ACTIVE_KEY;
6674 } else
6675 priv->sec.flags &= ~SEC_ACTIVE_KEY;
6676 priv->status |= STATUS_SECURITY_UPDATED;
6677 }
6678
6679 if ((sec->flags & SEC_AUTH_MODE) &&
6680 (priv->sec.auth_mode != sec->auth_mode)) {
6681 priv->sec.auth_mode = sec->auth_mode;
6682 priv->sec.flags |= SEC_AUTH_MODE;
6683 if (sec->auth_mode == WLAN_AUTH_SHARED_KEY)
6684 priv->capability |= CAP_SHARED_KEY;
6685 else
6686 priv->capability &= ~CAP_SHARED_KEY;
6687 priv->status |= STATUS_SECURITY_UPDATED;
6688 }
6689
6690 if (sec->flags & SEC_ENABLED &&
6691 priv->sec.enabled != sec->enabled) {
6692 priv->sec.flags |= SEC_ENABLED;
6693 priv->sec.enabled = sec->enabled;
6694 priv->status |= STATUS_SECURITY_UPDATED;
6695 if (sec->enabled)
6696 priv->capability |= CAP_PRIVACY_ON;
6697 else
6698 priv->capability &= ~CAP_PRIVACY_ON;
6699 }
6700
6701 if (sec->flags & SEC_LEVEL &&
6702 priv->sec.level != sec->level) {
6703 priv->sec.level = sec->level;
6704 priv->sec.flags |= SEC_LEVEL;
6705 priv->status |= STATUS_SECURITY_UPDATED;
6706 }
6707
6708 /* To match current functionality of ipw2100 (which works well w/
6709 * various supplicants, we don't force a disassociate if the
6710 * privacy capability changes ... */
6711#if 0
6712 if ((priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)) &&
6713 (((priv->assoc_request.capability &
6714 WLAN_CAPABILITY_PRIVACY) && !sec->enabled) ||
6715 (!(priv->assoc_request.capability &
6716 WLAN_CAPABILITY_PRIVACY) && sec->enabled))) {
6717 IPW_DEBUG_ASSOC("Disassociating due to capability "
6718 "change.\n");
6719 ipw_disassociate(priv);
6720 }
6721#endif
6722}
6723
6724static int init_supported_rates(struct ipw_priv *priv,
6725 struct ipw_supported_rates *rates)
6726{
6727 /* TODO: Mask out rates based on priv->rates_mask */
6728
6729 memset(rates, 0, sizeof(*rates));
6730 /* configure supported rates */
6731 switch (priv->ieee->freq_band) {
6732 case IEEE80211_52GHZ_BAND:
6733 rates->ieee_mode = IPW_A_MODE;
6734 rates->purpose = IPW_RATE_CAPABILITIES;
6735 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
6736 IEEE80211_OFDM_DEFAULT_RATES_MASK);
6737 break;
6738
6739 default: /* Mixed or 2.4Ghz */
6740 rates->ieee_mode = IPW_G_MODE;
6741 rates->purpose = IPW_RATE_CAPABILITIES;
6742 ipw_add_cck_scan_rates(rates, IEEE80211_CCK_MODULATION,
6743 IEEE80211_CCK_DEFAULT_RATES_MASK);
6744 if (priv->ieee->modulation & IEEE80211_OFDM_MODULATION) {
6745 ipw_add_ofdm_scan_rates(rates, IEEE80211_CCK_MODULATION,
6746 IEEE80211_OFDM_DEFAULT_RATES_MASK);
6747 }
6748 break;
6749 }
6750
6751 return 0;
6752}
6753
6754static int ipw_config(struct ipw_priv *priv)
6755{
6756 int i;
6757 struct ipw_tx_power tx_power;
6758
6759 memset(&priv->sys_config, 0, sizeof(priv->sys_config));
6760 memset(&tx_power, 0, sizeof(tx_power));
6761
6762 /* This is only called from ipw_up, which resets/reloads the firmware
6763 so, we don't need to first disable the card before we configure
6764 it */
6765
6766 /* configure device for 'G' band */
6767 tx_power.ieee_mode = IPW_G_MODE;
6768 tx_power.num_channels = 11;
6769 for (i = 0; i < 11; i++) {
6770 tx_power.channels_tx_power[i].channel_number = i + 1;
6771 tx_power.channels_tx_power[i].tx_power = priv->tx_power;
6772 }
6773 if (ipw_send_tx_power(priv, &tx_power))
6774 goto error;
6775
6776 /* configure device to also handle 'B' band */
6777 tx_power.ieee_mode = IPW_B_MODE;
6778 if (ipw_send_tx_power(priv, &tx_power))
6779 goto error;
6780
6781 /* initialize adapter address */
6782 if (ipw_send_adapter_address(priv, priv->net_dev->dev_addr))
6783 goto error;
6784
6785 /* set basic system config settings */
6786 init_sys_config(&priv->sys_config);
6787 if (ipw_send_system_config(priv, &priv->sys_config))
6788 goto error;
6789
6790 init_supported_rates(priv, &priv->rates);
6791 if (ipw_send_supported_rates(priv, &priv->rates))
6792 goto error;
6793
6794 /* Set request-to-send threshold */
6795 if (priv->rts_threshold) {
6796 if (ipw_send_rts_threshold(priv, priv->rts_threshold))
6797 goto error;
6798 }
6799
6800 if (ipw_set_random_seed(priv))
6801 goto error;
6802
6803 /* final state transition to the RUN state */
6804 if (ipw_send_host_complete(priv))
6805 goto error;
6806
6807 /* If configured to try and auto-associate, kick off a scan */
6808 if ((priv->config & CFG_ASSOCIATE) && ipw_request_scan(priv))
6809 goto error;
6810
6811 return 0;
6812
6813 error:
6814 return -EIO;
6815}
6816
6817#define MAX_HW_RESTARTS 5
6818static int ipw_up(struct ipw_priv *priv)
6819{
6820 int rc, i;
6821
6822 if (priv->status & STATUS_EXIT_PENDING)
6823 return -EIO;
6824
6825 for (i = 0; i < MAX_HW_RESTARTS; i++ ) {
6826 /* Load the microcode, firmware, and eeprom.
6827 * Also start the clocks. */
6828 rc = ipw_load(priv);
6829 if (rc) {
6830 IPW_ERROR("Unable to load firmware: 0x%08X\n",
6831 rc);
6832 return rc;
6833 }
6834
6835 ipw_init_ordinals(priv);
6836 if (!(priv->config & CFG_CUSTOM_MAC))
6837 eeprom_parse_mac(priv, priv->mac_addr);
6838 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
6839
6840 if (priv->status & STATUS_RF_KILL_MASK)
6841 return 0;
6842
6843 rc = ipw_config(priv);
6844 if (!rc) {
6845 IPW_DEBUG_INFO("Configured device on count %i\n", i);
6846 priv->notif_missed_beacons = 0;
6847 netif_start_queue(priv->net_dev);
6848 return 0;
6849 } else {
6850 IPW_DEBUG_INFO("Device configuration failed: 0x%08X\n",
6851 rc);
6852 }
6853
6854 IPW_DEBUG_INFO("Failed to config device on retry %d of %d\n",
6855 i, MAX_HW_RESTARTS);
6856
6857 /* We had an error bringing up the hardware, so take it
6858 * all the way back down so we can try again */
6859 ipw_down(priv);
6860 }
6861
6862 /* tried to restart and config the device for as long as our
6863 * patience could withstand */
6864 IPW_ERROR("Unable to initialize device after %d attempts.\n",
6865 i);
6866 return -EIO;
6867}
6868
6869static void ipw_down(struct ipw_priv *priv)
6870{
6871 /* Attempt to disable the card */
6872#if 0
6873 ipw_send_card_disable(priv, 0);
6874#endif
6875
6876 /* tell the device to stop sending interrupts */
6877 ipw_disable_interrupts(priv);
6878
6879 /* Clear all bits but the RF Kill */
6880 priv->status &= STATUS_RF_KILL_MASK;
6881
6882 netif_carrier_off(priv->net_dev);
6883 netif_stop_queue(priv->net_dev);
6884
6885 ipw_stop_nic(priv);
6886}
6887
6888/* Called by register_netdev() */
6889static int ipw_net_init(struct net_device *dev)
6890{
6891 struct ipw_priv *priv = ieee80211_priv(dev);
6892
6893 if (priv->status & STATUS_RF_KILL_SW) {
6894 IPW_WARNING("Radio disabled by module parameter.\n");
6895 return 0;
6896 } else if (rf_kill_active(priv)) {
6897 IPW_WARNING("Radio Frequency Kill Switch is On:\n"
6898 "Kill switch must be turned off for "
6899 "wireless networking to work.\n");
6900 queue_delayed_work(priv->workqueue, &priv->rf_kill, 2 * HZ);
6901 return 0;
6902 }
6903
6904 if (ipw_up(priv))
6905 return -EIO;
6906
6907 return 0;
6908}
6909
6910/* PCI driver stuff */
6911static struct pci_device_id card_ids[] = {
6912 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
6913 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
6914 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
6915 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2712, 0, 0, 0},
6916 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2721, 0, 0, 0},
6917 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2722, 0, 0, 0},
6918 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2731, 0, 0, 0},
6919 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2732, 0, 0, 0},
6920 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2741, 0, 0, 0},
6921 {PCI_VENDOR_ID_INTEL, 0x1043, 0x103c, 0x2741, 0, 0, 0},
6922 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2742, 0, 0, 0},
6923 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2751, 0, 0, 0},
6924 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2752, 0, 0, 0},
6925 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2753, 0, 0, 0},
6926 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2754, 0, 0, 0},
6927 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2761, 0, 0, 0},
6928 {PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2762, 0, 0, 0},
6929 {PCI_VENDOR_ID_INTEL, 0x104f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
6930 {PCI_VENDOR_ID_INTEL, 0x4220, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* BG */
6931 {PCI_VENDOR_ID_INTEL, 0x4221, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* 2225BG */
6932 {PCI_VENDOR_ID_INTEL, 0x4223, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
6933 {PCI_VENDOR_ID_INTEL, 0x4224, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, /* ABG */
6934
6935 /* required last entry */
6936 {0,}
6937};
6938
6939MODULE_DEVICE_TABLE(pci, card_ids);
6940
6941static struct attribute *ipw_sysfs_entries[] = {
6942 &dev_attr_rf_kill.attr,
6943 &dev_attr_direct_dword.attr,
6944 &dev_attr_indirect_byte.attr,
6945 &dev_attr_indirect_dword.attr,
6946 &dev_attr_mem_gpio_reg.attr,
6947 &dev_attr_command_event_reg.attr,
6948 &dev_attr_nic_type.attr,
6949 &dev_attr_status.attr,
6950 &dev_attr_cfg.attr,
6951 &dev_attr_dump_errors.attr,
6952 &dev_attr_dump_events.attr,
6953 &dev_attr_eeprom_delay.attr,
6954 &dev_attr_ucode_version.attr,
6955 &dev_attr_rtc.attr,
6956 NULL
6957};
6958
6959static struct attribute_group ipw_attribute_group = {
6960 .name = NULL, /* put in device directory */
6961 .attrs = ipw_sysfs_entries,
6962};
6963
6964static int ipw_pci_probe(struct pci_dev *pdev,
6965 const struct pci_device_id *ent)
6966{
6967 int err = 0;
6968 struct net_device *net_dev;
6969 void __iomem *base;
6970 u32 length, val;
6971 struct ipw_priv *priv;
6972 int band, modulation;
6973
6974 net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
6975 if (net_dev == NULL) {
6976 err = -ENOMEM;
6977 goto out;
6978 }
6979
6980 priv = ieee80211_priv(net_dev);
6981 priv->ieee = netdev_priv(net_dev);
6982 priv->net_dev = net_dev;
6983 priv->pci_dev = pdev;
6984#ifdef CONFIG_IPW_DEBUG
6985 ipw_debug_level = debug;
6986#endif
6987 spin_lock_init(&priv->lock);
6988
6989 if (pci_enable_device(pdev)) {
6990 err = -ENODEV;
6991 goto out_free_ieee80211;
6992 }
6993
6994 pci_set_master(pdev);
6995
6996 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6997 if (!err)
6998 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
6999 if (err) {
7000 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n");
7001 goto out_pci_disable_device;
7002 }
7003
7004 pci_set_drvdata(pdev, priv);
7005
7006 err = pci_request_regions(pdev, DRV_NAME);
7007 if (err)
7008 goto out_pci_disable_device;
7009
7010 /* We disable the RETRY_TIMEOUT register (0x41) to keep
7011 * PCI Tx retries from interfering with C3 CPU state */
7012 pci_read_config_dword(pdev, 0x40, &val);
7013 if ((val & 0x0000ff00) != 0)
7014 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
7015
7016 length = pci_resource_len(pdev, 0);
7017 priv->hw_len = length;
7018
7019 base = ioremap_nocache(pci_resource_start(pdev, 0), length);
7020 if (!base) {
7021 err = -ENODEV;
7022 goto out_pci_release_regions;
7023 }
7024
7025 priv->hw_base = base;
7026 IPW_DEBUG_INFO("pci_resource_len = 0x%08x\n", length);
7027 IPW_DEBUG_INFO("pci_resource_base = %p\n", base);
7028
7029 err = ipw_setup_deferred_work(priv);
7030 if (err) {
7031 IPW_ERROR("Unable to setup deferred work\n");
7032 goto out_iounmap;
7033 }
7034
7035 /* Initialize module parameter values here */
7036 if (ifname)
7037 strncpy(net_dev->name, ifname, IFNAMSIZ);
7038
7039 if (associate)
7040 priv->config |= CFG_ASSOCIATE;
7041 else
7042 IPW_DEBUG_INFO("Auto associate disabled.\n");
7043
7044 if (auto_create)
7045 priv->config |= CFG_ADHOC_CREATE;
7046 else
7047 IPW_DEBUG_INFO("Auto adhoc creation disabled.\n");
7048
7049 if (disable) {
7050 priv->status |= STATUS_RF_KILL_SW;
7051 IPW_DEBUG_INFO("Radio disabled.\n");
7052 }
7053
7054 if (channel != 0) {
7055 priv->config |= CFG_STATIC_CHANNEL;
7056 priv->channel = channel;
7057 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
7058 IPW_DEBUG_INFO("Bind to static channel %d\n", channel);
7059 /* TODO: Validate that provided channel is in range */
7060 }
7061
7062 switch (mode) {
7063 case 1:
7064 priv->ieee->iw_mode = IW_MODE_ADHOC;
7065 break;
7066#ifdef CONFIG_IPW_PROMISC
7067 case 2:
7068 priv->ieee->iw_mode = IW_MODE_MONITOR;
7069 break;
7070#endif
7071 default:
7072 case 0:
7073 priv->ieee->iw_mode = IW_MODE_INFRA;
7074 break;
7075 }
7076
7077 if ((priv->pci_dev->device == 0x4223) ||
7078 (priv->pci_dev->device == 0x4224)) {
7079 printk(KERN_INFO DRV_NAME
7080 ": Detected Intel PRO/Wireless 2915ABG Network "
7081 "Connection\n");
7082 priv->ieee->abg_ture = 1;
7083 band = IEEE80211_52GHZ_BAND | IEEE80211_24GHZ_BAND;
7084 modulation = IEEE80211_OFDM_MODULATION |
7085 IEEE80211_CCK_MODULATION;
7086 priv->adapter = IPW_2915ABG;
7087 priv->ieee->mode = IEEE_A|IEEE_G|IEEE_B;
7088 } else {
7089 if (priv->pci_dev->device == 0x4221)
7090 printk(KERN_INFO DRV_NAME
7091 ": Detected Intel PRO/Wireless 2225BG Network "
7092 "Connection\n");
7093 else
7094 printk(KERN_INFO DRV_NAME
7095 ": Detected Intel PRO/Wireless 2200BG Network "
7096 "Connection\n");
7097
7098 priv->ieee->abg_ture = 0;
7099 band = IEEE80211_24GHZ_BAND;
7100 modulation = IEEE80211_OFDM_MODULATION |
7101 IEEE80211_CCK_MODULATION;
7102 priv->adapter = IPW_2200BG;
7103 priv->ieee->mode = IEEE_G|IEEE_B;
7104 }
7105
7106 priv->ieee->freq_band = band;
7107 priv->ieee->modulation = modulation;
7108
7109 priv->rates_mask = IEEE80211_DEFAULT_RATES_MASK;
7110
7111 priv->missed_beacon_threshold = IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT;
7112 priv->roaming_threshold = IPW_MB_ROAMING_THRESHOLD_DEFAULT;
7113
7114 priv->rts_threshold = DEFAULT_RTS_THRESHOLD;
7115
7116 /* If power management is turned on, default to AC mode */
7117 priv->power_mode = IPW_POWER_AC;
7118 priv->tx_power = IPW_DEFAULT_TX_POWER;
7119
7120 err = request_irq(pdev->irq, ipw_isr, SA_SHIRQ, DRV_NAME,
7121 priv);
7122 if (err) {
7123 IPW_ERROR("Error allocating IRQ %d\n", pdev->irq);
7124 goto out_destroy_workqueue;
7125 }
7126
7127 SET_MODULE_OWNER(net_dev);
7128 SET_NETDEV_DEV(net_dev, &pdev->dev);
7129
7130 priv->ieee->hard_start_xmit = ipw_net_hard_start_xmit;
7131 priv->ieee->set_security = shim__set_security;
7132
7133 net_dev->open = ipw_net_open;
7134 net_dev->stop = ipw_net_stop;
7135 net_dev->init = ipw_net_init;
7136 net_dev->get_stats = ipw_net_get_stats;
7137 net_dev->set_multicast_list = ipw_net_set_multicast_list;
7138 net_dev->set_mac_address = ipw_net_set_mac_address;
7139 net_dev->get_wireless_stats = ipw_get_wireless_stats;
7140 net_dev->wireless_handlers = &ipw_wx_handler_def;
7141 net_dev->ethtool_ops = &ipw_ethtool_ops;
7142 net_dev->irq = pdev->irq;
7143 net_dev->base_addr = (unsigned long )priv->hw_base;
7144 net_dev->mem_start = pci_resource_start(pdev, 0);
7145 net_dev->mem_end = net_dev->mem_start + pci_resource_len(pdev, 0) - 1;
7146
7147 err = sysfs_create_group(&pdev->dev.kobj, &ipw_attribute_group);
7148 if (err) {
7149 IPW_ERROR("failed to create sysfs device attributes\n");
7150 goto out_release_irq;
7151 }
7152
7153 err = register_netdev(net_dev);
7154 if (err) {
7155 IPW_ERROR("failed to register network device\n");
7156 goto out_remove_group;
7157 }
7158
7159 return 0;
7160
7161 out_remove_group:
7162 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
7163 out_release_irq:
7164 free_irq(pdev->irq, priv);
7165 out_destroy_workqueue:
7166 destroy_workqueue(priv->workqueue);
7167 priv->workqueue = NULL;
7168 out_iounmap:
7169 iounmap(priv->hw_base);
7170 out_pci_release_regions:
7171 pci_release_regions(pdev);
7172 out_pci_disable_device:
7173 pci_disable_device(pdev);
7174 pci_set_drvdata(pdev, NULL);
7175 out_free_ieee80211:
7176 free_ieee80211(priv->net_dev);
7177 out:
7178 return err;
7179}
7180
7181static void ipw_pci_remove(struct pci_dev *pdev)
7182{
7183 struct ipw_priv *priv = pci_get_drvdata(pdev);
7184 if (!priv)
7185 return;
7186
7187 priv->status |= STATUS_EXIT_PENDING;
7188
7189 sysfs_remove_group(&pdev->dev.kobj, &ipw_attribute_group);
7190
7191 ipw_down(priv);
7192
7193 unregister_netdev(priv->net_dev);
7194
7195 if (priv->rxq) {
7196 ipw_rx_queue_free(priv, priv->rxq);
7197 priv->rxq = NULL;
7198 }
7199 ipw_tx_queue_free(priv);
7200
7201 /* ipw_down will ensure that there is no more pending work
7202 * in the workqueue's, so we can safely remove them now. */
7203 if (priv->workqueue) {
7204 cancel_delayed_work(&priv->adhoc_check);
7205 cancel_delayed_work(&priv->gather_stats);
7206 cancel_delayed_work(&priv->request_scan);
7207 cancel_delayed_work(&priv->rf_kill);
7208 cancel_delayed_work(&priv->scan_check);
7209 destroy_workqueue(priv->workqueue);
7210 priv->workqueue = NULL;
7211 }
7212
7213 free_irq(pdev->irq, priv);
7214 iounmap(priv->hw_base);
7215 pci_release_regions(pdev);
7216 pci_disable_device(pdev);
7217 pci_set_drvdata(pdev, NULL);
7218 free_ieee80211(priv->net_dev);
7219
7220#ifdef CONFIG_PM
7221 if (fw_loaded) {
7222 release_firmware(bootfw);
7223 release_firmware(ucode);
7224 release_firmware(firmware);
7225 fw_loaded = 0;
7226 }
7227#endif
7228}
7229
7230
7231#ifdef CONFIG_PM
7232static int ipw_pci_suspend(struct pci_dev *pdev, u32 state)
7233{
7234 struct ipw_priv *priv = pci_get_drvdata(pdev);
7235 struct net_device *dev = priv->net_dev;
7236
7237 printk(KERN_INFO "%s: Going into suspend...\n", dev->name);
7238
7239 /* Take down the device; powers it off, etc. */
7240 ipw_down(priv);
7241
7242 /* Remove the PRESENT state of the device */
7243 netif_device_detach(dev);
7244
7245#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
7246 pci_save_state(pdev, priv->pm_state);
7247#else
7248 pci_save_state(pdev);
7249#endif
7250 pci_disable_device(pdev);
7251 pci_set_power_state(pdev, state);
7252
7253 return 0;
7254}
7255
7256static int ipw_pci_resume(struct pci_dev *pdev)
7257{
7258 struct ipw_priv *priv = pci_get_drvdata(pdev);
7259 struct net_device *dev = priv->net_dev;
7260 u32 val;
7261
7262 printk(KERN_INFO "%s: Coming out of suspend...\n", dev->name);
7263
7264 pci_set_power_state(pdev, 0);
7265 pci_enable_device(pdev);
7266#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
7267 pci_restore_state(pdev, priv->pm_state);
7268#else
7269 pci_restore_state(pdev);
7270#endif
7271 /*
7272 * Suspend/Resume resets the PCI configuration space, so we have to
7273 * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries
7274 * from interfering with C3 CPU state. pci_restore_state won't help
7275 * here since it only restores the first 64 bytes pci config header.
7276 */
7277 pci_read_config_dword(pdev, 0x40, &val);
7278 if ((val & 0x0000ff00) != 0)
7279 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
7280
7281 /* Set the device back into the PRESENT state; this will also wake
7282 * the queue of needed */
7283 netif_device_attach(dev);
7284
7285 /* Bring the device back up */
7286 queue_work(priv->workqueue, &priv->up);
7287
7288 return 0;
7289}
7290#endif
7291
7292/* driver initialization stuff */
7293static struct pci_driver ipw_driver = {
7294 .name = DRV_NAME,
7295 .id_table = card_ids,
7296 .probe = ipw_pci_probe,
7297 .remove = __devexit_p(ipw_pci_remove),
7298#ifdef CONFIG_PM
7299 .suspend = ipw_pci_suspend,
7300 .resume = ipw_pci_resume,
7301#endif
7302};
7303
7304static int __init ipw_init(void)
7305{
7306 int ret;
7307
7308 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
7309 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
7310
7311 ret = pci_module_init(&ipw_driver);
7312 if (ret) {
7313 IPW_ERROR("Unable to initialize PCI module\n");
7314 return ret;
7315 }
7316
7317 ret = driver_create_file(&ipw_driver.driver,
7318 &driver_attr_debug_level);
7319 if (ret) {
7320 IPW_ERROR("Unable to create driver sysfs file\n");
7321 pci_unregister_driver(&ipw_driver);
7322 return ret;
7323 }
7324
7325 return ret;
7326}
7327
7328static void __exit ipw_exit(void)
7329{
7330 driver_remove_file(&ipw_driver.driver, &driver_attr_debug_level);
7331 pci_unregister_driver(&ipw_driver);
7332}
7333
7334module_param(disable, int, 0444);
7335MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
7336
7337module_param(associate, int, 0444);
7338MODULE_PARM_DESC(associate, "auto associate when scanning (default on)");
7339
7340module_param(auto_create, int, 0444);
7341MODULE_PARM_DESC(auto_create, "auto create adhoc network (default on)");
7342
7343module_param(debug, int, 0444);
7344MODULE_PARM_DESC(debug, "debug output mask");
7345
7346module_param(channel, int, 0444);
7347MODULE_PARM_DESC(channel, "channel to limit associate to (default 0 [ANY])");
7348
7349module_param(ifname, charp, 0444);
7350MODULE_PARM_DESC(ifname, "network device name (default eth%d)");
7351
7352#ifdef CONFIG_IPW_PROMISC
7353module_param(mode, int, 0444);
7354MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)");
7355#else
7356module_param(mode, int, 0444);
7357MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS)");
7358#endif
7359
7360module_exit(ipw_exit);
7361module_init(ipw_init);
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
new file mode 100644
index 000000000000..4e8b75e7962a
--- /dev/null
+++ b/drivers/net/wireless/ipw2200.h
@@ -0,0 +1,1770 @@
1/******************************************************************************
2
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
13
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18 The full GNU General Public License is included in this distribution in the
19 file called LICENSE.
20
21 Contact Information:
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25******************************************************************************/
26
27#ifndef __ipw2200_h__
28#define __ipw2200_h__
29
30#define WEXT_USECHANNELS 1
31
32#include <linux/module.h>
33#include <linux/moduleparam.h>
34#include <linux/config.h>
35#include <linux/init.h>
36
37#include <linux/version.h>
38#include <linux/pci.h>
39#include <linux/netdevice.h>
40#include <linux/ethtool.h>
41#include <linux/skbuff.h>
42#include <linux/etherdevice.h>
43#include <linux/delay.h>
44#include <linux/random.h>
45
46#include <linux/firmware.h>
47#include <linux/wireless.h>
48#include <asm/io.h>
49
50#include <net/ieee80211.h>
51
52#define DRV_NAME "ipw2200"
53
54#include <linux/workqueue.h>
55
56#ifndef IRQ_NONE
57typedef void irqreturn_t;
58#define IRQ_NONE
59#define IRQ_HANDLED
60#define IRQ_RETVAL(x)
61#endif
62
63#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) )
64#define __iomem
65#endif
66
67#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) )
68#define pci_dma_sync_single_for_cpu pci_dma_sync_single
69#define pci_dma_sync_single_for_device pci_dma_sync_single
70#endif
71
72#ifndef HAVE_FREE_NETDEV
73#define free_netdev(x) kfree(x)
74#endif
75
76/* Authentication and Association States */
77enum connection_manager_assoc_states
78{
79 CMAS_INIT = 0,
80 CMAS_TX_AUTH_SEQ_1,
81 CMAS_RX_AUTH_SEQ_2,
82 CMAS_AUTH_SEQ_1_PASS,
83 CMAS_AUTH_SEQ_1_FAIL,
84 CMAS_TX_AUTH_SEQ_3,
85 CMAS_RX_AUTH_SEQ_4,
86 CMAS_AUTH_SEQ_2_PASS,
87 CMAS_AUTH_SEQ_2_FAIL,
88 CMAS_AUTHENTICATED,
89 CMAS_TX_ASSOC,
90 CMAS_RX_ASSOC_RESP,
91 CMAS_ASSOCIATED,
92 CMAS_LAST
93};
94
95
96#define IPW_NORMAL 0
97#define IPW_NOWAIT 0
98#define IPW_WAIT (1<<0)
99#define IPW_QUIET (1<<1)
100#define IPW_ROAMING (1<<2)
101
102#define IPW_POWER_MODE_CAM 0x00 //(always on)
103#define IPW_POWER_INDEX_1 0x01
104#define IPW_POWER_INDEX_2 0x02
105#define IPW_POWER_INDEX_3 0x03
106#define IPW_POWER_INDEX_4 0x04
107#define IPW_POWER_INDEX_5 0x05
108#define IPW_POWER_AC 0x06
109#define IPW_POWER_BATTERY 0x07
110#define IPW_POWER_LIMIT 0x07
111#define IPW_POWER_MASK 0x0F
112#define IPW_POWER_ENABLED 0x10
113#define IPW_POWER_LEVEL(x) ((x) & IPW_POWER_MASK)
114
115#define IPW_CMD_HOST_COMPLETE 2
116#define IPW_CMD_POWER_DOWN 4
117#define IPW_CMD_SYSTEM_CONFIG 6
118#define IPW_CMD_MULTICAST_ADDRESS 7
119#define IPW_CMD_SSID 8
120#define IPW_CMD_ADAPTER_ADDRESS 11
121#define IPW_CMD_PORT_TYPE 12
122#define IPW_CMD_RTS_THRESHOLD 15
123#define IPW_CMD_FRAG_THRESHOLD 16
124#define IPW_CMD_POWER_MODE 17
125#define IPW_CMD_WEP_KEY 18
126#define IPW_CMD_TGI_TX_KEY 19
127#define IPW_CMD_SCAN_REQUEST 20
128#define IPW_CMD_ASSOCIATE 21
129#define IPW_CMD_SUPPORTED_RATES 22
130#define IPW_CMD_SCAN_ABORT 23
131#define IPW_CMD_TX_FLUSH 24
132#define IPW_CMD_QOS_PARAMETERS 25
133#define IPW_CMD_SCAN_REQUEST_EXT 26
134#define IPW_CMD_DINO_CONFIG 30
135#define IPW_CMD_RSN_CAPABILITIES 31
136#define IPW_CMD_RX_KEY 32
137#define IPW_CMD_CARD_DISABLE 33
138#define IPW_CMD_SEED_NUMBER 34
139#define IPW_CMD_TX_POWER 35
140#define IPW_CMD_COUNTRY_INFO 36
141#define IPW_CMD_AIRONET_INFO 37
142#define IPW_CMD_AP_TX_POWER 38
143#define IPW_CMD_CCKM_INFO 39
144#define IPW_CMD_CCX_VER_INFO 40
145#define IPW_CMD_SET_CALIBRATION 41
146#define IPW_CMD_SENSITIVITY_CALIB 42
147#define IPW_CMD_RETRY_LIMIT 51
148#define IPW_CMD_IPW_PRE_POWER_DOWN 58
149#define IPW_CMD_VAP_BEACON_TEMPLATE 60
150#define IPW_CMD_VAP_DTIM_PERIOD 61
151#define IPW_CMD_EXT_SUPPORTED_RATES 62
152#define IPW_CMD_VAP_LOCAL_TX_PWR_CONSTRAINT 63
153#define IPW_CMD_VAP_QUIET_INTERVALS 64
154#define IPW_CMD_VAP_CHANNEL_SWITCH 65
155#define IPW_CMD_VAP_MANDATORY_CHANNELS 66
156#define IPW_CMD_VAP_CELL_PWR_LIMIT 67
157#define IPW_CMD_VAP_CF_PARAM_SET 68
158#define IPW_CMD_VAP_SET_BEACONING_STATE 69
159#define IPW_CMD_MEASUREMENT 80
160#define IPW_CMD_POWER_CAPABILITY 81
161#define IPW_CMD_SUPPORTED_CHANNELS 82
162#define IPW_CMD_TPC_REPORT 83
163#define IPW_CMD_WME_INFO 84
164#define IPW_CMD_PRODUCTION_COMMAND 85
165#define IPW_CMD_LINKSYS_EOU_INFO 90
166
167#define RFD_SIZE 4
168#define NUM_TFD_CHUNKS 6
169
170#define TX_QUEUE_SIZE 32
171#define RX_QUEUE_SIZE 32
172
173#define DINO_CMD_WEP_KEY 0x08
174#define DINO_CMD_TX 0x0B
175#define DCT_ANTENNA_A 0x01
176#define DCT_ANTENNA_B 0x02
177
178#define IPW_A_MODE 0
179#define IPW_B_MODE 1
180#define IPW_G_MODE 2
181
182/*
183 * TX Queue Flag Definitions
184 */
185
186/* abort attempt if mgmt frame is rx'd */
187#define DCT_FLAG_ABORT_MGMT 0x01
188
189/* require CTS */
190#define DCT_FLAG_CTS_REQUIRED 0x02
191
192/* use short preamble */
193#define DCT_FLAG_SHORT_PREMBL 0x04
194
195/* RTS/CTS first */
196#define DCT_FLAG_RTS_REQD 0x08
197
198/* dont calculate duration field */
199#define DCT_FLAG_DUR_SET 0x10
200
201/* even if MAC WEP set (allows pre-encrypt) */
202#define DCT_FLAG_NO_WEP 0x20
203#define IPW_
204/* overwrite TSF field */
205#define DCT_FLAG_TSF_REQD 0x40
206
207/* ACK rx is expected to follow */
208#define DCT_FLAG_ACK_REQD 0x80
209
210#define DCT_FLAG_EXT_MODE_CCK 0x01
211#define DCT_FLAG_EXT_MODE_OFDM 0x00
212
213
214#define TX_RX_TYPE_MASK 0xFF
215#define TX_FRAME_TYPE 0x00
216#define TX_HOST_COMMAND_TYPE 0x01
217#define RX_FRAME_TYPE 0x09
218#define RX_HOST_NOTIFICATION_TYPE 0x03
219#define RX_HOST_CMD_RESPONSE_TYPE 0x04
220#define RX_TX_FRAME_RESPONSE_TYPE 0x05
221#define TFD_NEED_IRQ_MASK 0x04
222
223#define HOST_CMD_DINO_CONFIG 30
224
225#define HOST_NOTIFICATION_STATUS_ASSOCIATED 10
226#define HOST_NOTIFICATION_STATUS_AUTHENTICATE 11
227#define HOST_NOTIFICATION_STATUS_SCAN_CHANNEL_RESULT 12
228#define HOST_NOTIFICATION_STATUS_SCAN_COMPLETED 13
229#define HOST_NOTIFICATION_STATUS_FRAG_LENGTH 14
230#define HOST_NOTIFICATION_STATUS_LINK_DETERIORATION 15
231#define HOST_NOTIFICATION_DINO_CONFIG_RESPONSE 16
232#define HOST_NOTIFICATION_STATUS_BEACON_STATE 17
233#define HOST_NOTIFICATION_STATUS_TGI_TX_KEY 18
234#define HOST_NOTIFICATION_TX_STATUS 19
235#define HOST_NOTIFICATION_CALIB_KEEP_RESULTS 20
236#define HOST_NOTIFICATION_MEASUREMENT_STARTED 21
237#define HOST_NOTIFICATION_MEASUREMENT_ENDED 22
238#define HOST_NOTIFICATION_CHANNEL_SWITCHED 23
239#define HOST_NOTIFICATION_RX_DURING_QUIET_PERIOD 24
240#define HOST_NOTIFICATION_NOISE_STATS 25
241#define HOST_NOTIFICATION_S36_MEASUREMENT_ACCEPTED 30
242#define HOST_NOTIFICATION_S36_MEASUREMENT_REFUSED 31
243
244#define HOST_NOTIFICATION_STATUS_BEACON_MISSING 1
245#define IPW_MB_DISASSOCIATE_THRESHOLD_DEFAULT 24
246#define IPW_MB_ROAMING_THRESHOLD_DEFAULT 8
247#define IPW_REAL_RATE_RX_PACKET_THRESHOLD 300
248
249#define MACADRR_BYTE_LEN 6
250
251#define DCR_TYPE_AP 0x01
252#define DCR_TYPE_WLAP 0x02
253#define DCR_TYPE_MU_ESS 0x03
254#define DCR_TYPE_MU_IBSS 0x04
255#define DCR_TYPE_MU_PIBSS 0x05
256#define DCR_TYPE_SNIFFER 0x06
257#define DCR_TYPE_MU_BSS DCR_TYPE_MU_ESS
258
259/**
260 * Generic queue structure
261 *
262 * Contains common data for Rx and Tx queues
263 */
264struct clx2_queue {
265 int n_bd; /**< number of BDs in this queue */
266 int first_empty; /**< 1-st empty entry (index) */
267 int last_used; /**< last used entry (index) */
268 u32 reg_w; /**< 'write' reg (queue head), addr in domain 1 */
269 u32 reg_r; /**< 'read' reg (queue tail), addr in domain 1 */
270 dma_addr_t dma_addr; /**< physical addr for BD's */
271 int low_mark; /**< low watermark, resume queue if free space more than this */
272 int high_mark; /**< high watermark, stop queue if free space less than this */
273} __attribute__ ((packed));
274
275struct machdr32
276{
277 u16 frame_ctl;
278 u16 duration; // watch out for endians!
279 u8 addr1[ MACADRR_BYTE_LEN ];
280 u8 addr2[ MACADRR_BYTE_LEN ];
281 u8 addr3[ MACADRR_BYTE_LEN ];
282 u16 seq_ctrl; // more endians!
283 u8 addr4[ MACADRR_BYTE_LEN ];
284 u16 qos_ctrl;
285} __attribute__ ((packed)) ;
286
287struct machdr30
288{
289 u16 frame_ctl;
290 u16 duration; // watch out for endians!
291 u8 addr1[ MACADRR_BYTE_LEN ];
292 u8 addr2[ MACADRR_BYTE_LEN ];
293 u8 addr3[ MACADRR_BYTE_LEN ];
294 u16 seq_ctrl; // more endians!
295 u8 addr4[ MACADRR_BYTE_LEN ];
296} __attribute__ ((packed)) ;
297
298struct machdr26
299{
300 u16 frame_ctl;
301 u16 duration; // watch out for endians!
302 u8 addr1[ MACADRR_BYTE_LEN ];
303 u8 addr2[ MACADRR_BYTE_LEN ];
304 u8 addr3[ MACADRR_BYTE_LEN ];
305 u16 seq_ctrl; // more endians!
306 u16 qos_ctrl;
307} __attribute__ ((packed)) ;
308
309struct machdr24
310{
311 u16 frame_ctl;
312 u16 duration; // watch out for endians!
313 u8 addr1[ MACADRR_BYTE_LEN ];
314 u8 addr2[ MACADRR_BYTE_LEN ];
315 u8 addr3[ MACADRR_BYTE_LEN ];
316 u16 seq_ctrl; // more endians!
317} __attribute__ ((packed)) ;
318
319// TX TFD with 32 byte MAC Header
320struct tx_tfd_32
321{
322 struct machdr32 mchdr; // 32
323 u32 uivplaceholder[2]; // 8
324} __attribute__ ((packed)) ;
325
326// TX TFD with 30 byte MAC Header
327struct tx_tfd_30
328{
329 struct machdr30 mchdr; // 30
330 u8 reserved[2]; // 2
331 u32 uivplaceholder[2]; // 8
332} __attribute__ ((packed)) ;
333
334// tx tfd with 26 byte mac header
335struct tx_tfd_26
336{
337 struct machdr26 mchdr; // 26
338 u8 reserved1[2]; // 2
339 u32 uivplaceholder[2]; // 8
340 u8 reserved2[4]; // 4
341} __attribute__ ((packed)) ;
342
343// tx tfd with 24 byte mac header
344struct tx_tfd_24
345{
346 struct machdr24 mchdr; // 24
347 u32 uivplaceholder[2]; // 8
348 u8 reserved[8]; // 8
349} __attribute__ ((packed)) ;
350
351
352#define DCT_WEP_KEY_FIELD_LENGTH 16
353
354struct tfd_command
355{
356 u8 index;
357 u8 length;
358 u16 reserved;
359 u8 payload[0];
360} __attribute__ ((packed)) ;
361
362struct tfd_data {
363 /* Header */
364 u32 work_area_ptr;
365 u8 station_number; /* 0 for BSS */
366 u8 reserved1;
367 u16 reserved2;
368
369 /* Tx Parameters */
370 u8 cmd_id;
371 u8 seq_num;
372 u16 len;
373 u8 priority;
374 u8 tx_flags;
375 u8 tx_flags_ext;
376 u8 key_index;
377 u8 wepkey[DCT_WEP_KEY_FIELD_LENGTH];
378 u8 rate;
379 u8 antenna;
380 u16 next_packet_duration;
381 u16 next_frag_len;
382 u16 back_off_counter; //////txop;
383 u8 retrylimit;
384 u16 cwcurrent;
385 u8 reserved3;
386
387 /* 802.11 MAC Header */
388 union
389 {
390 struct tx_tfd_24 tfd_24;
391 struct tx_tfd_26 tfd_26;
392 struct tx_tfd_30 tfd_30;
393 struct tx_tfd_32 tfd_32;
394 } tfd;
395
396 /* Payload DMA info */
397 u32 num_chunks;
398 u32 chunk_ptr[NUM_TFD_CHUNKS];
399 u16 chunk_len[NUM_TFD_CHUNKS];
400} __attribute__ ((packed));
401
402struct txrx_control_flags
403{
404 u8 message_type;
405 u8 rx_seq_num;
406 u8 control_bits;
407 u8 reserved;
408} __attribute__ ((packed));
409
410#define TFD_SIZE 128
411#define TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH (TFD_SIZE - sizeof(struct txrx_control_flags))
412
413struct tfd_frame
414{
415 struct txrx_control_flags control_flags;
416 union {
417 struct tfd_data data;
418 struct tfd_command cmd;
419 u8 raw[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH];
420 } u;
421} __attribute__ ((packed)) ;
422
423typedef void destructor_func(const void*);
424
425/**
426 * Tx Queue for DMA. Queue consists of circular buffer of
427 * BD's and required locking structures.
428 */
429struct clx2_tx_queue {
430 struct clx2_queue q;
431 struct tfd_frame* bd;
432 struct ieee80211_txb **txb;
433};
434
435/*
436 * RX related structures and functions
437 */
438#define RX_FREE_BUFFERS 32
439#define RX_LOW_WATERMARK 8
440
441#define SUP_RATE_11A_MAX_NUM_CHANNELS (8)
442#define SUP_RATE_11B_MAX_NUM_CHANNELS (4)
443#define SUP_RATE_11G_MAX_NUM_CHANNELS (12)
444
445// Used for passing to driver number of successes and failures per rate
446struct rate_histogram
447{
448 union {
449 u32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
450 u32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
451 u32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
452 } success;
453 union {
454 u32 a[SUP_RATE_11A_MAX_NUM_CHANNELS];
455 u32 b[SUP_RATE_11B_MAX_NUM_CHANNELS];
456 u32 g[SUP_RATE_11G_MAX_NUM_CHANNELS];
457 } failed;
458} __attribute__ ((packed));
459
460/* statistics command response */
461struct ipw_cmd_stats {
462 u8 cmd_id;
463 u8 seq_num;
464 u16 good_sfd;
465 u16 bad_plcp;
466 u16 wrong_bssid;
467 u16 valid_mpdu;
468 u16 bad_mac_header;
469 u16 reserved_frame_types;
470 u16 rx_ina;
471 u16 bad_crc32;
472 u16 invalid_cts;
473 u16 invalid_acks;
474 u16 long_distance_ina_fina;
475 u16 dsp_silence_unreachable;
476 u16 accumulated_rssi;
477 u16 rx_ovfl_frame_tossed;
478 u16 rssi_silence_threshold;
479 u16 rx_ovfl_frame_supplied;
480 u16 last_rx_frame_signal;
481 u16 last_rx_frame_noise;
482 u16 rx_autodetec_no_ofdm;
483 u16 rx_autodetec_no_barker;
484 u16 reserved;
485} __attribute__ ((packed));
486
487struct notif_channel_result {
488 u8 channel_num;
489 struct ipw_cmd_stats stats;
490 u8 uReserved;
491} __attribute__ ((packed));
492
493struct notif_scan_complete {
494 u8 scan_type;
495 u8 num_channels;
496 u8 status;
497 u8 reserved;
498} __attribute__ ((packed));
499
500struct notif_frag_length {
501 u16 frag_length;
502 u16 reserved;
503} __attribute__ ((packed));
504
505struct notif_beacon_state {
506 u32 state;
507 u32 number;
508} __attribute__ ((packed));
509
510struct notif_tgi_tx_key {
511 u8 key_state;
512 u8 security_type;
513 u8 station_index;
514 u8 reserved;
515} __attribute__ ((packed));
516
517struct notif_link_deterioration {
518 struct ipw_cmd_stats stats;
519 u8 rate;
520 u8 modulation;
521 struct rate_histogram histogram;
522 u8 reserved1;
523 u16 reserved2;
524} __attribute__ ((packed));
525
526struct notif_association {
527 u8 state;
528} __attribute__ ((packed));
529
530struct notif_authenticate {
531 u8 state;
532 struct machdr24 addr;
533 u16 status;
534} __attribute__ ((packed));
535
536struct temperature
537{
538 s32 measured;
539 s32 active;
540} __attribute__ ((packed));
541
542struct notif_calibration {
543 u8 data[104];
544} __attribute__ ((packed));
545
546struct notif_noise {
547 u32 value;
548} __attribute__ ((packed));
549
550struct ipw_rx_notification {
551 u8 reserved[8];
552 u8 subtype;
553 u8 flags;
554 u16 size;
555 union {
556 struct notif_association assoc;
557 struct notif_authenticate auth;
558 struct notif_channel_result channel_result;
559 struct notif_scan_complete scan_complete;
560 struct notif_frag_length frag_len;
561 struct notif_beacon_state beacon_state;
562 struct notif_tgi_tx_key tgi_tx_key;
563 struct notif_link_deterioration link_deterioration;
564 struct notif_calibration calibration;
565 struct notif_noise noise;
566 u8 raw[0];
567 } u;
568} __attribute__ ((packed));
569
570struct ipw_rx_frame {
571 u32 reserved1;
572 u8 parent_tsf[4]; // fw_use[0] is boolean for OUR_TSF_IS_GREATER
573 u8 received_channel; // The channel that this frame was received on.
574 // Note that for .11b this does not have to be
575 // the same as the channel that it was sent.
576 // Filled by LMAC
577 u8 frameStatus;
578 u8 rate;
579 u8 rssi;
580 u8 agc;
581 u8 rssi_dbm;
582 u16 signal;
583 u16 noise;
584 u8 antennaAndPhy;
585 u8 control; // control bit should be on in bg
586 u8 rtscts_rate; // rate of rts or cts (in rts cts sequence rate
587 // is identical)
588 u8 rtscts_seen; // 0x1 RTS seen ; 0x2 CTS seen
589 u16 length;
590 u8 data[0];
591} __attribute__ ((packed));
592
593struct ipw_rx_header {
594 u8 message_type;
595 u8 rx_seq_num;
596 u8 control_bits;
597 u8 reserved;
598} __attribute__ ((packed));
599
600struct ipw_rx_packet
601{
602 struct ipw_rx_header header;
603 union {
604 struct ipw_rx_frame frame;
605 struct ipw_rx_notification notification;
606 } u;
607} __attribute__ ((packed));
608
609#define IPW_RX_NOTIFICATION_SIZE sizeof(struct ipw_rx_header) + 12
610#define IPW_RX_FRAME_SIZE sizeof(struct ipw_rx_header) + \
611 sizeof(struct ipw_rx_frame)
612
613struct ipw_rx_mem_buffer {
614 dma_addr_t dma_addr;
615 struct ipw_rx_buffer *rxb;
616 struct sk_buff *skb;
617 struct list_head list;
618}; /* Not transferred over network, so not __attribute__ ((packed)) */
619
620struct ipw_rx_queue {
621 struct ipw_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
622 struct ipw_rx_mem_buffer *queue[RX_QUEUE_SIZE];
623 u32 processed; /* Internal index to last handled Rx packet */
624 u32 read; /* Shared index to newest available Rx buffer */
625 u32 write; /* Shared index to oldest written Rx packet */
626 u32 free_count;/* Number of pre-allocated buffers in rx_free */
627 /* Each of these lists is used as a FIFO for ipw_rx_mem_buffers */
628 struct list_head rx_free; /* Own an SKBs */
629 struct list_head rx_used; /* No SKB allocated */
630 spinlock_t lock;
631}; /* Not transferred over network, so not __attribute__ ((packed)) */
632
633
634struct alive_command_responce {
635 u8 alive_command;
636 u8 sequence_number;
637 u16 software_revision;
638 u8 device_identifier;
639 u8 reserved1[5];
640 u16 reserved2;
641 u16 reserved3;
642 u16 clock_settle_time;
643 u16 powerup_settle_time;
644 u16 reserved4;
645 u8 time_stamp[5]; /* month, day, year, hours, minutes */
646 u8 ucode_valid;
647} __attribute__ ((packed));
648
649#define IPW_MAX_RATES 12
650
651struct ipw_rates {
652 u8 num_rates;
653 u8 rates[IPW_MAX_RATES];
654} __attribute__ ((packed));
655
656struct command_block
657{
658 unsigned int control;
659 u32 source_addr;
660 u32 dest_addr;
661 unsigned int status;
662} __attribute__ ((packed));
663
664#define CB_NUMBER_OF_ELEMENTS_SMALL 64
665struct fw_image_desc
666{
667 unsigned long last_cb_index;
668 unsigned long current_cb_index;
669 struct command_block cb_list[CB_NUMBER_OF_ELEMENTS_SMALL];
670 void * v_addr;
671 unsigned long p_addr;
672 unsigned long len;
673};
674
675struct ipw_sys_config
676{
677 u8 bt_coexistence;
678 u8 reserved1;
679 u8 answer_broadcast_ssid_probe;
680 u8 accept_all_data_frames;
681 u8 accept_non_directed_frames;
682 u8 exclude_unicast_unencrypted;
683 u8 disable_unicast_decryption;
684 u8 exclude_multicast_unencrypted;
685 u8 disable_multicast_decryption;
686 u8 antenna_diversity;
687 u8 pass_crc_to_host;
688 u8 dot11g_auto_detection;
689 u8 enable_cts_to_self;
690 u8 enable_multicast_filtering;
691 u8 bt_coexist_collision_thr;
692 u8 reserved2;
693 u8 accept_all_mgmt_bcpr;
694 u8 accept_all_mgtm_frames;
695 u8 pass_noise_stats_to_host;
696 u8 reserved3;
697} __attribute__ ((packed));
698
699struct ipw_multicast_addr
700{
701 u8 num_of_multicast_addresses;
702 u8 reserved[3];
703 u8 mac1[6];
704 u8 mac2[6];
705 u8 mac3[6];
706 u8 mac4[6];
707} __attribute__ ((packed));
708
709struct ipw_wep_key
710{
711 u8 cmd_id;
712 u8 seq_num;
713 u8 key_index;
714 u8 key_size;
715 u8 key[16];
716} __attribute__ ((packed));
717
718struct ipw_tgi_tx_key
719{
720 u8 key_id;
721 u8 security_type;
722 u8 station_index;
723 u8 flags;
724 u8 key[16];
725 u32 tx_counter[2];
726} __attribute__ ((packed));
727
728#define IPW_SCAN_CHANNELS 54
729
730struct ipw_scan_request
731{
732 u8 scan_type;
733 u16 dwell_time;
734 u8 channels_list[IPW_SCAN_CHANNELS];
735 u8 channels_reserved[3];
736} __attribute__ ((packed));
737
738enum {
739 IPW_SCAN_PASSIVE_TILL_FIRST_BEACON_SCAN = 0,
740 IPW_SCAN_PASSIVE_FULL_DWELL_SCAN,
741 IPW_SCAN_ACTIVE_DIRECT_SCAN,
742 IPW_SCAN_ACTIVE_BROADCAST_SCAN,
743 IPW_SCAN_ACTIVE_BROADCAST_AND_DIRECT_SCAN,
744 IPW_SCAN_TYPES
745};
746
747struct ipw_scan_request_ext
748{
749 u32 full_scan_index;
750 u8 channels_list[IPW_SCAN_CHANNELS];
751 u8 scan_type[IPW_SCAN_CHANNELS / 2];
752 u8 reserved;
753 u16 dwell_time[IPW_SCAN_TYPES];
754} __attribute__ ((packed));
755
756extern inline u8 ipw_get_scan_type(struct ipw_scan_request_ext *scan, u8 index)
757{
758 if (index % 2)
759 return scan->scan_type[index / 2] & 0x0F;
760 else
761 return (scan->scan_type[index / 2] & 0xF0) >> 4;
762}
763
764extern inline void ipw_set_scan_type(struct ipw_scan_request_ext *scan,
765 u8 index, u8 scan_type)
766{
767 if (index % 2)
768 scan->scan_type[index / 2] =
769 (scan->scan_type[index / 2] & 0xF0) |
770 (scan_type & 0x0F);
771 else
772 scan->scan_type[index / 2] =
773 (scan->scan_type[index / 2] & 0x0F) |
774 ((scan_type & 0x0F) << 4);
775}
776
777struct ipw_associate
778{
779 u8 channel;
780 u8 auth_type:4,
781 auth_key:4;
782 u8 assoc_type;
783 u8 reserved;
784 u16 policy_support;
785 u8 preamble_length;
786 u8 ieee_mode;
787 u8 bssid[ETH_ALEN];
788 u32 assoc_tsf_msw;
789 u32 assoc_tsf_lsw;
790 u16 capability;
791 u16 listen_interval;
792 u16 beacon_interval;
793 u8 dest[ETH_ALEN];
794 u16 atim_window;
795 u8 smr;
796 u8 reserved1;
797 u16 reserved2;
798} __attribute__ ((packed));
799
800struct ipw_supported_rates
801{
802 u8 ieee_mode;
803 u8 num_rates;
804 u8 purpose;
805 u8 reserved;
806 u8 supported_rates[IPW_MAX_RATES];
807} __attribute__ ((packed));
808
809struct ipw_rts_threshold
810{
811 u16 rts_threshold;
812 u16 reserved;
813} __attribute__ ((packed));
814
815struct ipw_frag_threshold
816{
817 u16 frag_threshold;
818 u16 reserved;
819} __attribute__ ((packed));
820
821struct ipw_retry_limit
822{
823 u8 short_retry_limit;
824 u8 long_retry_limit;
825 u16 reserved;
826} __attribute__ ((packed));
827
828struct ipw_dino_config
829{
830 u32 dino_config_addr;
831 u16 dino_config_size;
832 u8 dino_response;
833 u8 reserved;
834} __attribute__ ((packed));
835
836struct ipw_aironet_info
837{
838 u8 id;
839 u8 length;
840 u16 reserved;
841} __attribute__ ((packed));
842
843struct ipw_rx_key
844{
845 u8 station_index;
846 u8 key_type;
847 u8 key_id;
848 u8 key_flag;
849 u8 key[16];
850 u8 station_address[6];
851 u8 key_index;
852 u8 reserved;
853} __attribute__ ((packed));
854
855struct ipw_country_channel_info
856{
857 u8 first_channel;
858 u8 no_channels;
859 s8 max_tx_power;
860} __attribute__ ((packed));
861
862struct ipw_country_info
863{
864 u8 id;
865 u8 length;
866 u8 country_str[3];
867 struct ipw_country_channel_info groups[7];
868} __attribute__ ((packed));
869
870struct ipw_channel_tx_power
871{
872 u8 channel_number;
873 s8 tx_power;
874} __attribute__ ((packed));
875
876#define SCAN_ASSOCIATED_INTERVAL (HZ)
877#define SCAN_INTERVAL (HZ / 10)
878#define MAX_A_CHANNELS 37
879#define MAX_B_CHANNELS 14
880
881struct ipw_tx_power
882{
883 u8 num_channels;
884 u8 ieee_mode;
885 struct ipw_channel_tx_power channels_tx_power[MAX_A_CHANNELS];
886} __attribute__ ((packed));
887
888struct ipw_qos_parameters
889{
890 u16 cw_min[4];
891 u16 cw_max[4];
892 u8 aifs[4];
893 u8 flag[4];
894 u16 tx_op_limit[4];
895} __attribute__ ((packed));
896
897struct ipw_rsn_capabilities
898{
899 u8 id;
900 u8 length;
901 u16 version;
902} __attribute__ ((packed));
903
904struct ipw_sensitivity_calib
905{
906 u16 beacon_rssi_raw;
907 u16 reserved;
908} __attribute__ ((packed));
909
910/**
911 * Host command structure.
912 *
913 * On input, the following fields should be filled:
914 * - cmd
915 * - len
916 * - status_len
917 * - param (if needed)
918 *
919 * On output,
920 * - \a status contains status;
921 * - \a param filled with status parameters.
922 */
923struct ipw_cmd {
924 u32 cmd; /**< Host command */
925 u32 status; /**< Status */
926 u32 status_len; /**< How many 32 bit parameters in the status */
927 u32 len; /**< incoming parameters length, bytes */
928 /**
929 * command parameters.
930 * There should be enough space for incoming and
931 * outcoming parameters.
932 * Incoming parameters listed 1-st, followed by outcoming params.
933 * nParams=(len+3)/4+status_len
934 */
935 u32 param[0];
936} __attribute__ ((packed));
937
938#define STATUS_HCMD_ACTIVE (1<<0) /**< host command in progress */
939
940#define STATUS_INT_ENABLED (1<<1)
941#define STATUS_RF_KILL_HW (1<<2)
942#define STATUS_RF_KILL_SW (1<<3)
943#define STATUS_RF_KILL_MASK (STATUS_RF_KILL_HW | STATUS_RF_KILL_SW)
944
945#define STATUS_INIT (1<<5)
946#define STATUS_AUTH (1<<6)
947#define STATUS_ASSOCIATED (1<<7)
948#define STATUS_STATE_MASK (STATUS_INIT | STATUS_AUTH | STATUS_ASSOCIATED)
949
950#define STATUS_ASSOCIATING (1<<8)
951#define STATUS_DISASSOCIATING (1<<9)
952#define STATUS_ROAMING (1<<10)
953#define STATUS_EXIT_PENDING (1<<11)
954#define STATUS_DISASSOC_PENDING (1<<12)
955#define STATUS_STATE_PENDING (1<<13)
956
957#define STATUS_SCAN_PENDING (1<<20)
958#define STATUS_SCANNING (1<<21)
959#define STATUS_SCAN_ABORTING (1<<22)
960
961#define STATUS_INDIRECT_BYTE (1<<28) /* sysfs entry configured for access */
962#define STATUS_INDIRECT_DWORD (1<<29) /* sysfs entry configured for access */
963#define STATUS_DIRECT_DWORD (1<<30) /* sysfs entry configured for access */
964
965#define STATUS_SECURITY_UPDATED (1<<31) /* Security sync needed */
966
967#define CFG_STATIC_CHANNEL (1<<0) /* Restrict assoc. to single channel */
968#define CFG_STATIC_ESSID (1<<1) /* Restrict assoc. to single SSID */
969#define CFG_STATIC_BSSID (1<<2) /* Restrict assoc. to single BSSID */
970#define CFG_CUSTOM_MAC (1<<3)
971#define CFG_PREAMBLE (1<<4)
972#define CFG_ADHOC_PERSIST (1<<5)
973#define CFG_ASSOCIATE (1<<6)
974#define CFG_FIXED_RATE (1<<7)
975#define CFG_ADHOC_CREATE (1<<8)
976
977#define CAP_SHARED_KEY (1<<0) /* Off = OPEN */
978#define CAP_PRIVACY_ON (1<<1) /* Off = No privacy */
979
980#define MAX_STATIONS 32
981#define IPW_INVALID_STATION (0xff)
982
983struct ipw_station_entry {
984 u8 mac_addr[ETH_ALEN];
985 u8 reserved;
986 u8 support_mode;
987};
988
989#define AVG_ENTRIES 8
990struct average {
991 s16 entries[AVG_ENTRIES];
992 u8 pos;
993 u8 init;
994 s32 sum;
995};
996
997struct ipw_priv {
998 /* ieee device used by generic ieee processing code */
999 struct ieee80211_device *ieee;
1000 struct ieee80211_security sec;
1001
1002 /* spinlock */
1003 spinlock_t lock;
1004
1005 /* basic pci-network driver stuff */
1006 struct pci_dev *pci_dev;
1007 struct net_device *net_dev;
1008
1009 /* pci hardware address support */
1010 void __iomem *hw_base;
1011 unsigned long hw_len;
1012
1013 struct fw_image_desc sram_desc;
1014
1015 /* result of ucode download */
1016 struct alive_command_responce dino_alive;
1017
1018 wait_queue_head_t wait_command_queue;
1019 wait_queue_head_t wait_state;
1020
1021 /* Rx and Tx DMA processing queues */
1022 struct ipw_rx_queue *rxq;
1023 struct clx2_tx_queue txq_cmd;
1024 struct clx2_tx_queue txq[4];
1025 u32 status;
1026 u32 config;
1027 u32 capability;
1028
1029 u8 last_rx_rssi;
1030 u8 last_noise;
1031 struct average average_missed_beacons;
1032 struct average average_rssi;
1033 struct average average_noise;
1034 u32 port_type;
1035 int rx_bufs_min; /**< minimum number of bufs in Rx queue */
1036 int rx_pend_max; /**< maximum pending buffers for one IRQ */
1037 u32 hcmd_seq; /**< sequence number for hcmd */
1038 u32 missed_beacon_threshold;
1039 u32 roaming_threshold;
1040
1041 struct ipw_associate assoc_request;
1042 struct ieee80211_network *assoc_network;
1043
1044 unsigned long ts_scan_abort;
1045 struct ipw_supported_rates rates;
1046 struct ipw_rates phy[3]; /**< PHY restrictions, per band */
1047 struct ipw_rates supp; /**< software defined */
1048 struct ipw_rates extended; /**< use for corresp. IE, AP only */
1049
1050 struct notif_link_deterioration last_link_deterioration; /** for statistics */
1051 struct ipw_cmd* hcmd; /**< host command currently executed */
1052
1053 wait_queue_head_t hcmd_wq; /**< host command waits for execution */
1054 u32 tsf_bcn[2]; /**< TSF from latest beacon */
1055
1056 struct notif_calibration calib; /**< last calibration */
1057
1058 /* ordinal interface with firmware */
1059 u32 table0_addr;
1060 u32 table0_len;
1061 u32 table1_addr;
1062 u32 table1_len;
1063 u32 table2_addr;
1064 u32 table2_len;
1065
1066 /* context information */
1067 u8 essid[IW_ESSID_MAX_SIZE];
1068 u8 essid_len;
1069 u8 nick[IW_ESSID_MAX_SIZE];
1070 u16 rates_mask;
1071 u8 channel;
1072 struct ipw_sys_config sys_config;
1073 u32 power_mode;
1074 u8 bssid[ETH_ALEN];
1075 u16 rts_threshold;
1076 u8 mac_addr[ETH_ALEN];
1077 u8 num_stations;
1078 u8 stations[MAX_STATIONS][ETH_ALEN];
1079
1080 u32 notif_missed_beacons;
1081
1082 /* Statistics and counters normalized with each association */
1083 u32 last_missed_beacons;
1084 u32 last_tx_packets;
1085 u32 last_rx_packets;
1086 u32 last_tx_failures;
1087 u32 last_rx_err;
1088 u32 last_rate;
1089
1090 u32 missed_adhoc_beacons;
1091 u32 missed_beacons;
1092 u32 rx_packets;
1093 u32 tx_packets;
1094 u32 quality;
1095
1096 /* eeprom */
1097 u8 eeprom[0x100]; /* 256 bytes of eeprom */
1098 int eeprom_delay;
1099
1100 struct iw_statistics wstats;
1101
1102 struct workqueue_struct *workqueue;
1103
1104 struct work_struct adhoc_check;
1105 struct work_struct associate;
1106 struct work_struct disassociate;
1107 struct work_struct rx_replenish;
1108 struct work_struct request_scan;
1109 struct work_struct adapter_restart;
1110 struct work_struct rf_kill;
1111 struct work_struct up;
1112 struct work_struct down;
1113 struct work_struct gather_stats;
1114 struct work_struct abort_scan;
1115 struct work_struct roam;
1116 struct work_struct scan_check;
1117
1118 struct tasklet_struct irq_tasklet;
1119
1120
1121#define IPW_2200BG 1
1122#define IPW_2915ABG 2
1123 u8 adapter;
1124
1125#define IPW_DEFAULT_TX_POWER 0x14
1126 u8 tx_power;
1127
1128#ifdef CONFIG_PM
1129 u32 pm_state[16];
1130#endif
1131
1132 /* network state */
1133
1134 /* Used to pass the current INTA value from ISR to Tasklet */
1135 u32 isr_inta;
1136
1137 /* debugging info */
1138 u32 indirect_dword;
1139 u32 direct_dword;
1140 u32 indirect_byte;
1141}; /*ipw_priv */
1142
1143
1144/* debug macros */
1145
1146#ifdef CONFIG_IPW_DEBUG
1147#define IPW_DEBUG(level, fmt, args...) \
1148do { if (ipw_debug_level & (level)) \
1149 printk(KERN_DEBUG DRV_NAME": %c %s " fmt, \
1150 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
1151#else
1152#define IPW_DEBUG(level, fmt, args...) do {} while (0)
1153#endif /* CONFIG_IPW_DEBUG */
1154
1155/*
1156 * To use the debug system;
1157 *
1158 * If you are defining a new debug classification, simply add it to the #define
1159 * list here in the form of:
1160 *
1161 * #define IPW_DL_xxxx VALUE
1162 *
1163 * shifting value to the left one bit from the previous entry. xxxx should be
1164 * the name of the classification (for example, WEP)
1165 *
1166 * You then need to either add a IPW_xxxx_DEBUG() macro definition for your
1167 * classification, or use IPW_DEBUG(IPW_DL_xxxx, ...) whenever you want
1168 * to send output to that classification.
1169 *
1170 * To add your debug level to the list of levels seen when you perform
1171 *
1172 * % cat /proc/net/ipw/debug_level
1173 *
1174 * you simply need to add your entry to the ipw_debug_levels array.
1175 *
1176 * If you do not see debug_level in /proc/net/ipw then you do not have
1177 * CONFIG_IPW_DEBUG defined in your kernel configuration
1178 *
1179 */
1180
1181#define IPW_DL_ERROR (1<<0)
1182#define IPW_DL_WARNING (1<<1)
1183#define IPW_DL_INFO (1<<2)
1184#define IPW_DL_WX (1<<3)
1185#define IPW_DL_HOST_COMMAND (1<<5)
1186#define IPW_DL_STATE (1<<6)
1187
1188#define IPW_DL_NOTIF (1<<10)
1189#define IPW_DL_SCAN (1<<11)
1190#define IPW_DL_ASSOC (1<<12)
1191#define IPW_DL_DROP (1<<13)
1192#define IPW_DL_IOCTL (1<<14)
1193
1194#define IPW_DL_MANAGE (1<<15)
1195#define IPW_DL_FW (1<<16)
1196#define IPW_DL_RF_KILL (1<<17)
1197#define IPW_DL_FW_ERRORS (1<<18)
1198
1199
1200#define IPW_DL_ORD (1<<20)
1201
1202#define IPW_DL_FRAG (1<<21)
1203#define IPW_DL_WEP (1<<22)
1204#define IPW_DL_TX (1<<23)
1205#define IPW_DL_RX (1<<24)
1206#define IPW_DL_ISR (1<<25)
1207#define IPW_DL_FW_INFO (1<<26)
1208#define IPW_DL_IO (1<<27)
1209#define IPW_DL_TRACE (1<<28)
1210
1211#define IPW_DL_STATS (1<<29)
1212
1213
1214#define IPW_ERROR(f, a...) printk(KERN_ERR DRV_NAME ": " f, ## a)
1215#define IPW_WARNING(f, a...) printk(KERN_WARNING DRV_NAME ": " f, ## a)
1216#define IPW_DEBUG_INFO(f, a...) IPW_DEBUG(IPW_DL_INFO, f, ## a)
1217
1218#define IPW_DEBUG_WX(f, a...) IPW_DEBUG(IPW_DL_WX, f, ## a)
1219#define IPW_DEBUG_SCAN(f, a...) IPW_DEBUG(IPW_DL_SCAN, f, ## a)
1220#define IPW_DEBUG_STATUS(f, a...) IPW_DEBUG(IPW_DL_STATUS, f, ## a)
1221#define IPW_DEBUG_TRACE(f, a...) IPW_DEBUG(IPW_DL_TRACE, f, ## a)
1222#define IPW_DEBUG_RX(f, a...) IPW_DEBUG(IPW_DL_RX, f, ## a)
1223#define IPW_DEBUG_TX(f, a...) IPW_DEBUG(IPW_DL_TX, f, ## a)
1224#define IPW_DEBUG_ISR(f, a...) IPW_DEBUG(IPW_DL_ISR, f, ## a)
1225#define IPW_DEBUG_MANAGEMENT(f, a...) IPW_DEBUG(IPW_DL_MANAGE, f, ## a)
1226#define IPW_DEBUG_WEP(f, a...) IPW_DEBUG(IPW_DL_WEP, f, ## a)
1227#define IPW_DEBUG_HC(f, a...) IPW_DEBUG(IPW_DL_HOST_COMMAND, f, ## a)
1228#define IPW_DEBUG_FRAG(f, a...) IPW_DEBUG(IPW_DL_FRAG, f, ## a)
1229#define IPW_DEBUG_FW(f, a...) IPW_DEBUG(IPW_DL_FW, f, ## a)
1230#define IPW_DEBUG_RF_KILL(f, a...) IPW_DEBUG(IPW_DL_RF_KILL, f, ## a)
1231#define IPW_DEBUG_DROP(f, a...) IPW_DEBUG(IPW_DL_DROP, f, ## a)
1232#define IPW_DEBUG_IO(f, a...) IPW_DEBUG(IPW_DL_IO, f, ## a)
1233#define IPW_DEBUG_ORD(f, a...) IPW_DEBUG(IPW_DL_ORD, f, ## a)
1234#define IPW_DEBUG_FW_INFO(f, a...) IPW_DEBUG(IPW_DL_FW_INFO, f, ## a)
1235#define IPW_DEBUG_NOTIF(f, a...) IPW_DEBUG(IPW_DL_NOTIF, f, ## a)
1236#define IPW_DEBUG_STATE(f, a...) IPW_DEBUG(IPW_DL_STATE | IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
1237#define IPW_DEBUG_ASSOC(f, a...) IPW_DEBUG(IPW_DL_ASSOC | IPW_DL_INFO, f, ## a)
1238#define IPW_DEBUG_STATS(f, a...) IPW_DEBUG(IPW_DL_STATS, f, ## a)
1239
1240#include <linux/ctype.h>
1241
1242/*
1243* Register bit definitions
1244*/
1245
1246/* Dino control registers bits */
1247
1248#define DINO_ENABLE_SYSTEM 0x80
1249#define DINO_ENABLE_CS 0x40
1250#define DINO_RXFIFO_DATA 0x01
1251#define DINO_CONTROL_REG 0x00200000
1252
1253#define CX2_INTA_RW 0x00000008
1254#define CX2_INTA_MASK_R 0x0000000C
1255#define CX2_INDIRECT_ADDR 0x00000010
1256#define CX2_INDIRECT_DATA 0x00000014
1257#define CX2_AUTOINC_ADDR 0x00000018
1258#define CX2_AUTOINC_DATA 0x0000001C
1259#define CX2_RESET_REG 0x00000020
1260#define CX2_GP_CNTRL_RW 0x00000024
1261
1262#define CX2_READ_INT_REGISTER 0xFF4
1263
1264#define CX2_GP_CNTRL_BIT_INIT_DONE 0x00000004
1265
1266#define CX2_REGISTER_DOMAIN1_END 0x00001000
1267#define CX2_SRAM_READ_INT_REGISTER 0x00000ff4
1268
1269#define CX2_SHARED_LOWER_BOUND 0x00000200
1270#define CX2_INTERRUPT_AREA_LOWER_BOUND 0x00000f80
1271
1272#define CX2_NIC_SRAM_LOWER_BOUND 0x00000000
1273#define CX2_NIC_SRAM_UPPER_BOUND 0x00030000
1274
1275#define CX2_BIT_INT_HOST_SRAM_READ_INT_REGISTER (1 << 29)
1276#define CX2_GP_CNTRL_BIT_CLOCK_READY 0x00000001
1277#define CX2_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY 0x00000002
1278
1279/*
1280 * RESET Register Bit Indexes
1281 */
1282#define CBD_RESET_REG_PRINCETON_RESET 0x00000001 /* Bit 0 (LSB) */
1283#define CX2_RESET_REG_SW_RESET 0x00000080 /* Bit 7 */
1284#define CX2_RESET_REG_MASTER_DISABLED 0x00000100 /* Bit 8 */
1285#define CX2_RESET_REG_STOP_MASTER 0x00000200 /* Bit 9 */
1286#define CX2_ARC_KESHET_CONFIG 0x08000000 /* Bit 27 */
1287#define CX2_START_STANDBY 0x00000004 /* Bit 2 */
1288
1289#define CX2_CSR_CIS_UPPER_BOUND 0x00000200
1290#define CX2_DOMAIN_0_END 0x1000
1291#define CLX_MEM_BAR_SIZE 0x1000
1292
1293#define CX2_BASEBAND_CONTROL_STATUS 0X00200000
1294#define CX2_BASEBAND_TX_FIFO_WRITE 0X00200004
1295#define CX2_BASEBAND_RX_FIFO_READ 0X00200004
1296#define CX2_BASEBAND_CONTROL_STORE 0X00200010
1297
1298#define CX2_INTERNAL_CMD_EVENT 0X00300004
1299#define CX2_BASEBAND_POWER_DOWN 0x00000001
1300
1301#define CX2_MEM_HALT_AND_RESET 0x003000e0
1302
1303/* defgroup bits_halt_reset MEM_HALT_AND_RESET register bits */
1304#define CX2_BIT_HALT_RESET_ON 0x80000000
1305#define CX2_BIT_HALT_RESET_OFF 0x00000000
1306
1307#define CB_LAST_VALID 0x20000000
1308#define CB_INT_ENABLED 0x40000000
1309#define CB_VALID 0x80000000
1310#define CB_SRC_LE 0x08000000
1311#define CB_DEST_LE 0x04000000
1312#define CB_SRC_AUTOINC 0x00800000
1313#define CB_SRC_IO_GATED 0x00400000
1314#define CB_DEST_AUTOINC 0x00080000
1315#define CB_SRC_SIZE_LONG 0x00200000
1316#define CB_DEST_SIZE_LONG 0x00020000
1317
1318
1319/* DMA DEFINES */
1320
1321#define DMA_CONTROL_SMALL_CB_CONST_VALUE 0x00540000
1322#define DMA_CB_STOP_AND_ABORT 0x00000C00
1323#define DMA_CB_START 0x00000100
1324
1325
1326#define CX2_SHARED_SRAM_SIZE 0x00030000
1327#define CX2_SHARED_SRAM_DMA_CONTROL 0x00027000
1328#define CB_MAX_LENGTH 0x1FFF
1329
1330#define CX2_HOST_EEPROM_DATA_SRAM_SIZE 0xA18
1331#define CX2_EEPROM_IMAGE_SIZE 0x100
1332
1333
1334/* DMA defs */
1335#define CX2_DMA_I_CURRENT_CB 0x003000D0
1336#define CX2_DMA_O_CURRENT_CB 0x003000D4
1337#define CX2_DMA_I_DMA_CONTROL 0x003000A4
1338#define CX2_DMA_I_CB_BASE 0x003000A0
1339
1340#define CX2_TX_CMD_QUEUE_BD_BASE (0x00000200)
1341#define CX2_TX_CMD_QUEUE_BD_SIZE (0x00000204)
1342#define CX2_TX_QUEUE_0_BD_BASE (0x00000208)
1343#define CX2_TX_QUEUE_0_BD_SIZE (0x0000020C)
1344#define CX2_TX_QUEUE_1_BD_BASE (0x00000210)
1345#define CX2_TX_QUEUE_1_BD_SIZE (0x00000214)
1346#define CX2_TX_QUEUE_2_BD_BASE (0x00000218)
1347#define CX2_TX_QUEUE_2_BD_SIZE (0x0000021C)
1348#define CX2_TX_QUEUE_3_BD_BASE (0x00000220)
1349#define CX2_TX_QUEUE_3_BD_SIZE (0x00000224)
1350#define CX2_RX_BD_BASE (0x00000240)
1351#define CX2_RX_BD_SIZE (0x00000244)
1352#define CX2_RFDS_TABLE_LOWER (0x00000500)
1353
1354#define CX2_TX_CMD_QUEUE_READ_INDEX (0x00000280)
1355#define CX2_TX_QUEUE_0_READ_INDEX (0x00000284)
1356#define CX2_TX_QUEUE_1_READ_INDEX (0x00000288)
1357#define CX2_TX_QUEUE_2_READ_INDEX (0x0000028C)
1358#define CX2_TX_QUEUE_3_READ_INDEX (0x00000290)
1359#define CX2_RX_READ_INDEX (0x000002A0)
1360
1361#define CX2_TX_CMD_QUEUE_WRITE_INDEX (0x00000F80)
1362#define CX2_TX_QUEUE_0_WRITE_INDEX (0x00000F84)
1363#define CX2_TX_QUEUE_1_WRITE_INDEX (0x00000F88)
1364#define CX2_TX_QUEUE_2_WRITE_INDEX (0x00000F8C)
1365#define CX2_TX_QUEUE_3_WRITE_INDEX (0x00000F90)
1366#define CX2_RX_WRITE_INDEX (0x00000FA0)
1367
1368/*
1369 * EEPROM Related Definitions
1370 */
1371
1372#define IPW_EEPROM_DATA_SRAM_ADDRESS (CX2_SHARED_LOWER_BOUND + 0x814)
1373#define IPW_EEPROM_DATA_SRAM_SIZE (CX2_SHARED_LOWER_BOUND + 0x818)
1374#define IPW_EEPROM_LOAD_DISABLE (CX2_SHARED_LOWER_BOUND + 0x81C)
1375#define IPW_EEPROM_DATA (CX2_SHARED_LOWER_BOUND + 0x820)
1376#define IPW_EEPROM_UPPER_ADDRESS (CX2_SHARED_LOWER_BOUND + 0x9E0)
1377
1378#define IPW_STATION_TABLE_LOWER (CX2_SHARED_LOWER_BOUND + 0xA0C)
1379#define IPW_STATION_TABLE_UPPER (CX2_SHARED_LOWER_BOUND + 0xB0C)
1380#define IPW_REQUEST_ATIM (CX2_SHARED_LOWER_BOUND + 0xB0C)
1381#define IPW_ATIM_SENT (CX2_SHARED_LOWER_BOUND + 0xB10)
1382#define IPW_WHO_IS_AWAKE (CX2_SHARED_LOWER_BOUND + 0xB14)
1383#define IPW_DURING_ATIM_WINDOW (CX2_SHARED_LOWER_BOUND + 0xB18)
1384
1385
1386#define MSB 1
1387#define LSB 0
1388#define WORD_TO_BYTE(_word) ((_word) * sizeof(u16))
1389
1390#define GET_EEPROM_ADDR(_wordoffset,_byteoffset) \
1391 ( WORD_TO_BYTE(_wordoffset) + (_byteoffset) )
1392
1393/* EEPROM access by BYTE */
1394#define EEPROM_PME_CAPABILITY (GET_EEPROM_ADDR(0x09,MSB)) /* 1 byte */
1395#define EEPROM_MAC_ADDRESS (GET_EEPROM_ADDR(0x21,LSB)) /* 6 byte */
1396#define EEPROM_VERSION (GET_EEPROM_ADDR(0x24,MSB)) /* 1 byte */
1397#define EEPROM_NIC_TYPE (GET_EEPROM_ADDR(0x25,LSB)) /* 1 byte */
1398#define EEPROM_SKU_CAPABILITY (GET_EEPROM_ADDR(0x25,MSB)) /* 1 byte */
1399#define EEPROM_COUNTRY_CODE (GET_EEPROM_ADDR(0x26,LSB)) /* 3 bytes */
1400#define EEPROM_IBSS_CHANNELS_BG (GET_EEPROM_ADDR(0x28,LSB)) /* 2 bytes */
1401#define EEPROM_IBSS_CHANNELS_A (GET_EEPROM_ADDR(0x29,MSB)) /* 5 bytes */
1402#define EEPROM_BSS_CHANNELS_BG (GET_EEPROM_ADDR(0x2c,LSB)) /* 2 bytes */
1403#define EEPROM_HW_VERSION (GET_EEPROM_ADDR(0x72,LSB)) /* 2 bytes */
1404
1405/* NIC type as found in the one byte EEPROM_NIC_TYPE offset*/
1406#define EEPROM_NIC_TYPE_STANDARD 0
1407#define EEPROM_NIC_TYPE_DELL 1
1408#define EEPROM_NIC_TYPE_FUJITSU 2
1409#define EEPROM_NIC_TYPE_IBM 3
1410#define EEPROM_NIC_TYPE_HP 4
1411
1412#define FW_MEM_REG_LOWER_BOUND 0x00300000
1413#define FW_MEM_REG_EEPROM_ACCESS (FW_MEM_REG_LOWER_BOUND + 0x40)
1414
1415#define EEPROM_BIT_SK (1<<0)
1416#define EEPROM_BIT_CS (1<<1)
1417#define EEPROM_BIT_DI (1<<2)
1418#define EEPROM_BIT_DO (1<<4)
1419
1420#define EEPROM_CMD_READ 0x2
1421
1422/* Interrupts masks */
1423#define CX2_INTA_NONE 0x00000000
1424
1425#define CX2_INTA_BIT_RX_TRANSFER 0x00000002
1426#define CX2_INTA_BIT_STATUS_CHANGE 0x00000010
1427#define CX2_INTA_BIT_BEACON_PERIOD_EXPIRED 0x00000020
1428
1429//Inta Bits for CF
1430#define CX2_INTA_BIT_TX_CMD_QUEUE 0x00000800
1431#define CX2_INTA_BIT_TX_QUEUE_1 0x00001000
1432#define CX2_INTA_BIT_TX_QUEUE_2 0x00002000
1433#define CX2_INTA_BIT_TX_QUEUE_3 0x00004000
1434#define CX2_INTA_BIT_TX_QUEUE_4 0x00008000
1435
1436#define CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE 0x00010000
1437
1438#define CX2_INTA_BIT_PREPARE_FOR_POWER_DOWN 0x00100000
1439#define CX2_INTA_BIT_POWER_DOWN 0x00200000
1440
1441#define CX2_INTA_BIT_FW_INITIALIZATION_DONE 0x01000000
1442#define CX2_INTA_BIT_FW_CARD_DISABLE_PHY_OFF_DONE 0x02000000
1443#define CX2_INTA_BIT_RF_KILL_DONE 0x04000000
1444#define CX2_INTA_BIT_FATAL_ERROR 0x40000000
1445#define CX2_INTA_BIT_PARITY_ERROR 0x80000000
1446
1447/* Interrupts enabled at init time. */
1448#define CX2_INTA_MASK_ALL \
1449 (CX2_INTA_BIT_TX_QUEUE_1 | \
1450 CX2_INTA_BIT_TX_QUEUE_2 | \
1451 CX2_INTA_BIT_TX_QUEUE_3 | \
1452 CX2_INTA_BIT_TX_QUEUE_4 | \
1453 CX2_INTA_BIT_TX_CMD_QUEUE | \
1454 CX2_INTA_BIT_RX_TRANSFER | \
1455 CX2_INTA_BIT_FATAL_ERROR | \
1456 CX2_INTA_BIT_PARITY_ERROR | \
1457 CX2_INTA_BIT_STATUS_CHANGE | \
1458 CX2_INTA_BIT_FW_INITIALIZATION_DONE | \
1459 CX2_INTA_BIT_BEACON_PERIOD_EXPIRED | \
1460 CX2_INTA_BIT_SLAVE_MODE_HOST_CMD_DONE | \
1461 CX2_INTA_BIT_PREPARE_FOR_POWER_DOWN | \
1462 CX2_INTA_BIT_POWER_DOWN | \
1463 CX2_INTA_BIT_RF_KILL_DONE )
1464
1465#define IPWSTATUS_ERROR_LOG (CX2_SHARED_LOWER_BOUND + 0x410)
1466#define IPW_EVENT_LOG (CX2_SHARED_LOWER_BOUND + 0x414)
1467
1468/* FW event log definitions */
1469#define EVENT_ELEM_SIZE (3 * sizeof(u32))
1470#define EVENT_START_OFFSET (1 * sizeof(u32) + 2 * sizeof(u16))
1471
1472/* FW error log definitions */
1473#define ERROR_ELEM_SIZE (7 * sizeof(u32))
1474#define ERROR_START_OFFSET (1 * sizeof(u32))
1475
1476enum {
1477 IPW_FW_ERROR_OK = 0,
1478 IPW_FW_ERROR_FAIL,
1479 IPW_FW_ERROR_MEMORY_UNDERFLOW,
1480 IPW_FW_ERROR_MEMORY_OVERFLOW,
1481 IPW_FW_ERROR_BAD_PARAM,
1482 IPW_FW_ERROR_BAD_CHECKSUM,
1483 IPW_FW_ERROR_NMI_INTERRUPT,
1484 IPW_FW_ERROR_BAD_DATABASE,
1485 IPW_FW_ERROR_ALLOC_FAIL,
1486 IPW_FW_ERROR_DMA_UNDERRUN,
1487 IPW_FW_ERROR_DMA_STATUS,
1488 IPW_FW_ERROR_DINOSTATUS_ERROR,
1489 IPW_FW_ERROR_EEPROMSTATUS_ERROR,
1490 IPW_FW_ERROR_SYSASSERT,
1491 IPW_FW_ERROR_FATAL_ERROR
1492};
1493
1494#define AUTH_OPEN 0
1495#define AUTH_SHARED_KEY 1
1496#define AUTH_IGNORE 3
1497
1498#define HC_ASSOCIATE 0
1499#define HC_REASSOCIATE 1
1500#define HC_DISASSOCIATE 2
1501#define HC_IBSS_START 3
1502#define HC_IBSS_RECONF 4
1503#define HC_DISASSOC_QUIET 5
1504
1505#define IPW_RATE_CAPABILITIES 1
1506#define IPW_RATE_CONNECT 0
1507
1508
1509/*
1510 * Rate values and masks
1511 */
1512#define IPW_TX_RATE_1MB 0x0A
1513#define IPW_TX_RATE_2MB 0x14
1514#define IPW_TX_RATE_5MB 0x37
1515#define IPW_TX_RATE_6MB 0x0D
1516#define IPW_TX_RATE_9MB 0x0F
1517#define IPW_TX_RATE_11MB 0x6E
1518#define IPW_TX_RATE_12MB 0x05
1519#define IPW_TX_RATE_18MB 0x07
1520#define IPW_TX_RATE_24MB 0x09
1521#define IPW_TX_RATE_36MB 0x0B
1522#define IPW_TX_RATE_48MB 0x01
1523#define IPW_TX_RATE_54MB 0x03
1524
1525#define IPW_ORD_TABLE_ID_MASK 0x0000FF00
1526#define IPW_ORD_TABLE_VALUE_MASK 0x000000FF
1527
1528#define IPW_ORD_TABLE_0_MASK 0x0000F000
1529#define IPW_ORD_TABLE_1_MASK 0x0000F100
1530#define IPW_ORD_TABLE_2_MASK 0x0000F200
1531#define IPW_ORD_TABLE_3_MASK 0x0000F300
1532#define IPW_ORD_TABLE_4_MASK 0x0000F400
1533#define IPW_ORD_TABLE_5_MASK 0x0000F500
1534#define IPW_ORD_TABLE_6_MASK 0x0000F600
1535#define IPW_ORD_TABLE_7_MASK 0x0000F700
1536
1537/*
1538 * Table 0 Entries (all entries are 32 bits)
1539 */
1540enum {
1541 IPW_ORD_STAT_TX_CURR_RATE = IPW_ORD_TABLE_0_MASK + 1,
1542 IPW_ORD_STAT_FRAG_TRESHOLD,
1543 IPW_ORD_STAT_RTS_THRESHOLD,
1544 IPW_ORD_STAT_TX_HOST_REQUESTS,
1545 IPW_ORD_STAT_TX_HOST_COMPLETE,
1546 IPW_ORD_STAT_TX_DIR_DATA,
1547 IPW_ORD_STAT_TX_DIR_DATA_B_1,
1548 IPW_ORD_STAT_TX_DIR_DATA_B_2,
1549 IPW_ORD_STAT_TX_DIR_DATA_B_5_5,
1550 IPW_ORD_STAT_TX_DIR_DATA_B_11,
1551 /* Hole */
1552
1553
1554
1555
1556
1557
1558
1559 IPW_ORD_STAT_TX_DIR_DATA_G_1 = IPW_ORD_TABLE_0_MASK + 19,
1560 IPW_ORD_STAT_TX_DIR_DATA_G_2,
1561 IPW_ORD_STAT_TX_DIR_DATA_G_5_5,
1562 IPW_ORD_STAT_TX_DIR_DATA_G_6,
1563 IPW_ORD_STAT_TX_DIR_DATA_G_9,
1564 IPW_ORD_STAT_TX_DIR_DATA_G_11,
1565 IPW_ORD_STAT_TX_DIR_DATA_G_12,
1566 IPW_ORD_STAT_TX_DIR_DATA_G_18,
1567 IPW_ORD_STAT_TX_DIR_DATA_G_24,
1568 IPW_ORD_STAT_TX_DIR_DATA_G_36,
1569 IPW_ORD_STAT_TX_DIR_DATA_G_48,
1570 IPW_ORD_STAT_TX_DIR_DATA_G_54,
1571 IPW_ORD_STAT_TX_NON_DIR_DATA,
1572 IPW_ORD_STAT_TX_NON_DIR_DATA_B_1,
1573 IPW_ORD_STAT_TX_NON_DIR_DATA_B_2,
1574 IPW_ORD_STAT_TX_NON_DIR_DATA_B_5_5,
1575 IPW_ORD_STAT_TX_NON_DIR_DATA_B_11,
1576 /* Hole */
1577
1578
1579
1580
1581
1582
1583
1584 IPW_ORD_STAT_TX_NON_DIR_DATA_G_1 = IPW_ORD_TABLE_0_MASK + 44,
1585 IPW_ORD_STAT_TX_NON_DIR_DATA_G_2,
1586 IPW_ORD_STAT_TX_NON_DIR_DATA_G_5_5,
1587 IPW_ORD_STAT_TX_NON_DIR_DATA_G_6,
1588 IPW_ORD_STAT_TX_NON_DIR_DATA_G_9,
1589 IPW_ORD_STAT_TX_NON_DIR_DATA_G_11,
1590 IPW_ORD_STAT_TX_NON_DIR_DATA_G_12,
1591 IPW_ORD_STAT_TX_NON_DIR_DATA_G_18,
1592 IPW_ORD_STAT_TX_NON_DIR_DATA_G_24,
1593 IPW_ORD_STAT_TX_NON_DIR_DATA_G_36,
1594 IPW_ORD_STAT_TX_NON_DIR_DATA_G_48,
1595 IPW_ORD_STAT_TX_NON_DIR_DATA_G_54,
1596 IPW_ORD_STAT_TX_RETRY,
1597 IPW_ORD_STAT_TX_FAILURE,
1598 IPW_ORD_STAT_RX_ERR_CRC,
1599 IPW_ORD_STAT_RX_ERR_ICV,
1600 IPW_ORD_STAT_RX_NO_BUFFER,
1601 IPW_ORD_STAT_FULL_SCANS,
1602 IPW_ORD_STAT_PARTIAL_SCANS,
1603 IPW_ORD_STAT_TGH_ABORTED_SCANS,
1604 IPW_ORD_STAT_TX_TOTAL_BYTES,
1605 IPW_ORD_STAT_CURR_RSSI_RAW,
1606 IPW_ORD_STAT_RX_BEACON,
1607 IPW_ORD_STAT_MISSED_BEACONS,
1608 IPW_ORD_TABLE_0_LAST
1609};
1610
1611#define IPW_RSSI_TO_DBM 112
1612
1613/* Table 1 Entries
1614 */
1615enum {
1616 IPW_ORD_TABLE_1_LAST = IPW_ORD_TABLE_1_MASK | 1,
1617};
1618
1619/*
1620 * Table 2 Entries
1621 *
1622 * FW_VERSION: 16 byte string
1623 * FW_DATE: 16 byte string (only 14 bytes used)
1624 * UCODE_VERSION: 4 byte version code
1625 * UCODE_DATE: 5 bytes code code
1626 * ADDAPTER_MAC: 6 byte MAC address
1627 * RTC: 4 byte clock
1628 */
1629enum {
1630 IPW_ORD_STAT_FW_VERSION = IPW_ORD_TABLE_2_MASK | 1,
1631 IPW_ORD_STAT_FW_DATE,
1632 IPW_ORD_STAT_UCODE_VERSION,
1633 IPW_ORD_STAT_UCODE_DATE,
1634 IPW_ORD_STAT_ADAPTER_MAC,
1635 IPW_ORD_STAT_RTC,
1636 IPW_ORD_TABLE_2_LAST
1637};
1638
1639/* Table 3 */
1640enum {
1641 IPW_ORD_STAT_TX_PACKET = IPW_ORD_TABLE_3_MASK | 0,
1642 IPW_ORD_STAT_TX_PACKET_FAILURE,
1643 IPW_ORD_STAT_TX_PACKET_SUCCESS,
1644 IPW_ORD_STAT_TX_PACKET_ABORTED,
1645 IPW_ORD_TABLE_3_LAST
1646};
1647
1648/* Table 4 */
1649enum {
1650 IPW_ORD_TABLE_4_LAST = IPW_ORD_TABLE_4_MASK
1651};
1652
1653/* Table 5 */
1654enum {
1655 IPW_ORD_STAT_AVAILABLE_AP_COUNT = IPW_ORD_TABLE_5_MASK,
1656 IPW_ORD_STAT_AP_ASSNS,
1657 IPW_ORD_STAT_ROAM,
1658 IPW_ORD_STAT_ROAM_CAUSE_MISSED_BEACONS,
1659 IPW_ORD_STAT_ROAM_CAUSE_UNASSOC,
1660 IPW_ORD_STAT_ROAM_CAUSE_RSSI,
1661 IPW_ORD_STAT_ROAM_CAUSE_LINK_QUALITY,
1662 IPW_ORD_STAT_ROAM_CAUSE_AP_LOAD_BALANCE,
1663 IPW_ORD_STAT_ROAM_CAUSE_AP_NO_TX,
1664 IPW_ORD_STAT_LINK_UP,
1665 IPW_ORD_STAT_LINK_DOWN,
1666 IPW_ORD_ANTENNA_DIVERSITY,
1667 IPW_ORD_CURR_FREQ,
1668 IPW_ORD_TABLE_5_LAST
1669};
1670
1671/* Table 6 */
1672enum {
1673 IPW_ORD_COUNTRY_CODE = IPW_ORD_TABLE_6_MASK,
1674 IPW_ORD_CURR_BSSID,
1675 IPW_ORD_CURR_SSID,
1676 IPW_ORD_TABLE_6_LAST
1677};
1678
1679/* Table 7 */
1680enum {
1681 IPW_ORD_STAT_PERCENT_MISSED_BEACONS = IPW_ORD_TABLE_7_MASK,
1682 IPW_ORD_STAT_PERCENT_TX_RETRIES,
1683 IPW_ORD_STAT_PERCENT_LINK_QUALITY,
1684 IPW_ORD_STAT_CURR_RSSI_DBM,
1685 IPW_ORD_TABLE_7_LAST
1686};
1687
1688#define IPW_ORDINALS_TABLE_LOWER (CX2_SHARED_LOWER_BOUND + 0x500)
1689#define IPW_ORDINALS_TABLE_0 (CX2_SHARED_LOWER_BOUND + 0x180)
1690#define IPW_ORDINALS_TABLE_1 (CX2_SHARED_LOWER_BOUND + 0x184)
1691#define IPW_ORDINALS_TABLE_2 (CX2_SHARED_LOWER_BOUND + 0x188)
1692#define IPW_MEM_FIXED_OVERRIDE (CX2_SHARED_LOWER_BOUND + 0x41C)
1693
1694struct ipw_fixed_rate {
1695 u16 tx_rates;
1696 u16 reserved;
1697} __attribute__ ((packed));
1698
1699#define CX2_INDIRECT_ADDR_MASK (~0x3ul)
1700
1701struct host_cmd {
1702 u8 cmd;
1703 u8 len;
1704 u16 reserved;
1705 u32 param[TFD_CMD_IMMEDIATE_PAYLOAD_LENGTH];
1706} __attribute__ ((packed));
1707
1708#define CFG_BT_COEXISTENCE_MIN 0x00
1709#define CFG_BT_COEXISTENCE_DEFER 0x02
1710#define CFG_BT_COEXISTENCE_KILL 0x04
1711#define CFG_BT_COEXISTENCE_WME_OVER_BT 0x08
1712#define CFG_BT_COEXISTENCE_OOB 0x10
1713#define CFG_BT_COEXISTENCE_MAX 0xFF
1714#define CFG_BT_COEXISTENCE_DEF 0x80 /* read Bt from EEPROM*/
1715
1716#define CFG_CTS_TO_ITSELF_ENABLED_MIN 0x0
1717#define CFG_CTS_TO_ITSELF_ENABLED_MAX 0x1
1718#define CFG_CTS_TO_ITSELF_ENABLED_DEF CFG_CTS_TO_ITSELF_ENABLED_MIN
1719
1720#define CFG_SYS_ANTENNA_BOTH 0x000
1721#define CFG_SYS_ANTENNA_A 0x001
1722#define CFG_SYS_ANTENNA_B 0x003
1723
1724/*
1725 * The definitions below were lifted off the ipw2100 driver, which only
1726 * supports 'b' mode, so I'm sure these are not exactly correct.
1727 *
1728 * Somebody fix these!!
1729 */
1730#define REG_MIN_CHANNEL 0
1731#define REG_MAX_CHANNEL 14
1732
1733#define REG_CHANNEL_MASK 0x00003FFF
1734#define IPW_IBSS_11B_DEFAULT_MASK 0x87ff
1735
1736static const long ipw_frequencies[] = {
1737 2412, 2417, 2422, 2427,
1738 2432, 2437, 2442, 2447,
1739 2452, 2457, 2462, 2467,
1740 2472, 2484
1741};
1742
1743#define FREQ_COUNT ARRAY_SIZE(ipw_frequencies)
1744
1745#define IPW_MAX_CONFIG_RETRIES 10
1746
1747static inline u32 frame_hdr_len(struct ieee80211_hdr *hdr)
1748{
1749 u32 retval;
1750 u16 fc;
1751
1752 retval = sizeof(struct ieee80211_hdr);
1753 fc = le16_to_cpu(hdr->frame_ctl);
1754
1755 /*
1756 * Function ToDS FromDS
1757 * IBSS 0 0
1758 * To AP 1 0
1759 * From AP 0 1
1760 * WDS (bridge) 1 1
1761 *
1762 * Only WDS frames use Address4 among them. --YZ
1763 */
1764 if (!(fc & IEEE80211_FCTL_TODS) || !(fc & IEEE80211_FCTL_FROMDS))
1765 retval -= ETH_ALEN;
1766
1767 return retval;
1768}
1769
1770#endif /* __ipw2200_h__ */
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index aabcdc2be05e..4d0b5a336bd7 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -94,6 +94,8 @@
94#include <net/iw_handler.h> 94#include <net/iw_handler.h>
95#include <net/ieee80211.h> 95#include <net/ieee80211.h>
96 96
97#include <net/ieee80211.h>
98
97#include <asm/uaccess.h> 99#include <asm/uaccess.h>
98#include <asm/io.h> 100#include <asm/io.h>
99#include <asm/system.h> 101#include <asm/system.h>
@@ -101,7 +103,6 @@
101#include "hermes.h" 103#include "hermes.h"
102#include "hermes_rid.h" 104#include "hermes_rid.h"
103#include "orinoco.h" 105#include "orinoco.h"
104#include "ieee802_11.h"
105 106
106/********************************************************************/ 107/********************************************************************/
107/* Module information */ 108/* Module information */
@@ -150,7 +151,7 @@ static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
150#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2) 151#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2)
151 152
152#define ORINOCO_MIN_MTU 256 153#define ORINOCO_MIN_MTU 256
153#define ORINOCO_MAX_MTU (IEEE802_11_DATA_LEN - ENCAPS_OVERHEAD) 154#define ORINOCO_MAX_MTU (IEEE80211_DATA_LEN - ENCAPS_OVERHEAD)
154 155
155#define SYMBOL_MAX_VER_LEN (14) 156#define SYMBOL_MAX_VER_LEN (14)
156#define USER_BAP 0 157#define USER_BAP 0
@@ -442,7 +443,7 @@ static int orinoco_change_mtu(struct net_device *dev, int new_mtu)
442 if ( (new_mtu < ORINOCO_MIN_MTU) || (new_mtu > ORINOCO_MAX_MTU) ) 443 if ( (new_mtu < ORINOCO_MIN_MTU) || (new_mtu > ORINOCO_MAX_MTU) )
443 return -EINVAL; 444 return -EINVAL;
444 445
445 if ( (new_mtu + ENCAPS_OVERHEAD + IEEE802_11_HLEN) > 446 if ( (new_mtu + ENCAPS_OVERHEAD + IEEE80211_HLEN) >
446 (priv->nicbuf_size - ETH_HLEN) ) 447 (priv->nicbuf_size - ETH_HLEN) )
447 return -EINVAL; 448 return -EINVAL;
448 449
@@ -918,7 +919,7 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
918 data. */ 919 data. */
919 return; 920 return;
920 } 921 }
921 if (length > IEEE802_11_DATA_LEN) { 922 if (length > IEEE80211_DATA_LEN) {
922 printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n", 923 printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n",
923 dev->name, length); 924 dev->name, length);
924 stats->rx_length_errors++; 925 stats->rx_length_errors++;
@@ -2272,7 +2273,7 @@ static int orinoco_init(struct net_device *dev)
2272 2273
2273 /* No need to lock, the hw_unavailable flag is already set in 2274 /* No need to lock, the hw_unavailable flag is already set in
2274 * alloc_orinocodev() */ 2275 * alloc_orinocodev() */
2275 priv->nicbuf_size = IEEE802_11_FRAME_LEN + ETH_HLEN; 2276 priv->nicbuf_size = IEEE80211_FRAME_LEN + ETH_HLEN;
2276 2277
2277 /* Initialize the firmware */ 2278 /* Initialize the firmware */
2278 err = orinoco_reinit_firmware(dev); 2279 err = orinoco_reinit_firmware(dev);
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index 8636d9306785..b5719437e981 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -2,7 +2,7 @@
2#define __WL3501_H__ 2#define __WL3501_H__
3 3
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5#include "ieee802_11.h" 5#include <net/ieee80211.h>
6 6
7/* define for WLA 2.0 */ 7/* define for WLA 2.0 */
8#define WL3501_BLKSZ 256 8#define WL3501_BLKSZ 256
@@ -548,7 +548,7 @@ struct wl3501_80211_tx_plcp_hdr {
548 548
549struct wl3501_80211_tx_hdr { 549struct wl3501_80211_tx_hdr {
550 struct wl3501_80211_tx_plcp_hdr pclp_hdr; 550 struct wl3501_80211_tx_plcp_hdr pclp_hdr;
551 struct ieee802_11_hdr mac_hdr; 551 struct ieee80211_hdr mac_hdr;
552} __attribute__ ((packed)); 552} __attribute__ ((packed));
553 553
554/* 554/*
diff --git a/drivers/usb/net/Makefile b/drivers/usb/net/Makefile
index 16f352195512..fe3fd4115e1e 100644
--- a/drivers/usb/net/Makefile
+++ b/drivers/usb/net/Makefile
@@ -8,5 +8,3 @@ obj-$(CONFIG_USB_PEGASUS) += pegasus.o
8obj-$(CONFIG_USB_RTL8150) += rtl8150.o 8obj-$(CONFIG_USB_RTL8150) += rtl8150.o
9obj-$(CONFIG_USB_USBNET) += usbnet.o 9obj-$(CONFIG_USB_USBNET) += usbnet.o
10obj-$(CONFIG_USB_ZD1201) += zd1201.o 10obj-$(CONFIG_USB_ZD1201) += zd1201.o
11
12CFLAGS_zd1201.o = -Idrivers/net/wireless/
diff --git a/drivers/usb/net/zd1201.c b/drivers/usb/net/zd1201.c
index 3b387b005739..72b06129e20a 100644
--- a/drivers/usb/net/zd1201.c
+++ b/drivers/usb/net/zd1201.c
@@ -21,7 +21,7 @@
21#include <linux/string.h> 21#include <linux/string.h>
22#include <linux/if_arp.h> 22#include <linux/if_arp.h>
23#include <linux/firmware.h> 23#include <linux/firmware.h>
24#include <ieee802_11.h> 24#include <net/ieee80211.h>
25#include "zd1201.h" 25#include "zd1201.h"
26 26
27static struct usb_device_id zd1201_table[] = { 27static struct usb_device_id zd1201_table[] = {
@@ -337,25 +337,25 @@ static void zd1201_usbrx(struct urb *urb, struct pt_regs *regs)
337 goto resubmit; 337 goto resubmit;
338 } 338 }
339 339
340 if ((seq & IEEE802_11_SCTL_FRAG) || 340 if ((seq & IEEE80211_SCTL_FRAG) ||
341 (fc & IEEE802_11_FCTL_MOREFRAGS)) { 341 (fc & IEEE80211_FCTL_MOREFRAGS)) {
342 struct zd1201_frag *frag = NULL; 342 struct zd1201_frag *frag = NULL;
343 char *ptr; 343 char *ptr;
344 344
345 if (datalen<14) 345 if (datalen<14)
346 goto resubmit; 346 goto resubmit;
347 if ((seq & IEEE802_11_SCTL_FRAG) == 0) { 347 if ((seq & IEEE80211_SCTL_FRAG) == 0) {
348 frag = kmalloc(sizeof(struct zd1201_frag*), 348 frag = kmalloc(sizeof(struct zd1201_frag*),
349 GFP_ATOMIC); 349 GFP_ATOMIC);
350 if (!frag) 350 if (!frag)
351 goto resubmit; 351 goto resubmit;
352 skb = dev_alloc_skb(IEEE802_11_DATA_LEN +14+2); 352 skb = dev_alloc_skb(IEEE80211_DATA_LEN +14+2);
353 if (!skb) { 353 if (!skb) {
354 kfree(frag); 354 kfree(frag);
355 goto resubmit; 355 goto resubmit;
356 } 356 }
357 frag->skb = skb; 357 frag->skb = skb;
358 frag->seq = seq & IEEE802_11_SCTL_SEQ; 358 frag->seq = seq & IEEE80211_SCTL_SEQ;
359 skb_reserve(skb, 2); 359 skb_reserve(skb, 2);
360 memcpy(skb_put(skb, 12), &data[datalen-14], 12); 360 memcpy(skb_put(skb, 12), &data[datalen-14], 12);
361 memcpy(skb_put(skb, 2), &data[6], 2); 361 memcpy(skb_put(skb, 2), &data[6], 2);
@@ -364,7 +364,7 @@ static void zd1201_usbrx(struct urb *urb, struct pt_regs *regs)
364 goto resubmit; 364 goto resubmit;
365 } 365 }
366 hlist_for_each_entry(frag, node, &zd->fraglist, fnode) 366 hlist_for_each_entry(frag, node, &zd->fraglist, fnode)
367 if(frag->seq == (seq&IEEE802_11_SCTL_SEQ)) 367 if(frag->seq == (seq&IEEE80211_SCTL_SEQ))
368 break; 368 break;
369 if (!frag) 369 if (!frag)
370 goto resubmit; 370 goto resubmit;
@@ -372,7 +372,7 @@ static void zd1201_usbrx(struct urb *urb, struct pt_regs *regs)
372 ptr = skb_put(skb, len); 372 ptr = skb_put(skb, len);
373 if (ptr) 373 if (ptr)
374 memcpy(ptr, data+8, len); 374 memcpy(ptr, data+8, len);
375 if (fc & IEEE802_11_FCTL_MOREFRAGS) 375 if (fc & IEEE80211_FCTL_MOREFRAGS)
376 goto resubmit; 376 goto resubmit;
377 hlist_del_init(&frag->fnode); 377 hlist_del_init(&frag->fnode);
378 kfree(frag); 378 kfree(frag);
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index a1478258d002..8a2df4dfbc59 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -65,7 +65,7 @@ static inline int is_zero_ether_addr(const u8 *addr)
65 */ 65 */
66static inline int is_multicast_ether_addr(const u8 *addr) 66static inline int is_multicast_ether_addr(const u8 *addr)
67{ 67{
68 return addr[0] & 0x01; 68 return ((addr[0] != 0xff) && (0x01 & addr[0]));
69} 69}
70 70
71/** 71/**
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
index 7fe57f957a51..065b702df563 100644
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -20,7 +20,6 @@
20 */ 20 */
21#ifndef IEEE80211_H 21#ifndef IEEE80211_H
22#define IEEE80211_H 22#define IEEE80211_H
23
24#include <linux/if_ether.h> /* ETH_ALEN */ 23#include <linux/if_ether.h> /* ETH_ALEN */
25#include <linux/kernel.h> /* ARRAY_SIZE */ 24#include <linux/kernel.h> /* ARRAY_SIZE */
26 25
@@ -94,6 +93,8 @@ struct eapol {
94 u16 length; 93 u16 length;
95} __attribute__ ((packed)); 94} __attribute__ ((packed));
96 95
96#define IEEE80211_1ADDR_LEN 10
97#define IEEE80211_2ADDR_LEN 16
97#define IEEE80211_3ADDR_LEN 24 98#define IEEE80211_3ADDR_LEN 24
98#define IEEE80211_4ADDR_LEN 30 99#define IEEE80211_4ADDR_LEN 30
99#define IEEE80211_FCS_LEN 4 100#define IEEE80211_FCS_LEN 4
@@ -300,23 +301,6 @@ struct ieee80211_snap_hdr {
300#define WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH 9 301#define WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH 9
301 302
302 303
303/* Information Element IDs */
304#define WLAN_EID_SSID 0
305#define WLAN_EID_SUPP_RATES 1
306#define WLAN_EID_FH_PARAMS 2
307#define WLAN_EID_DS_PARAMS 3
308#define WLAN_EID_CF_PARAMS 4
309#define WLAN_EID_TIM 5
310#define WLAN_EID_IBSS_PARAMS 6
311#define WLAN_EID_CHALLENGE 16
312#define WLAN_EID_RSN 48
313#define WLAN_EID_GENERIC 221
314
315#define IEEE80211_MGMT_HDR_LEN 24
316#define IEEE80211_DATA_HDR3_LEN 24
317#define IEEE80211_DATA_HDR4_LEN 30
318
319
320#define IEEE80211_STATMASK_SIGNAL (1<<0) 304#define IEEE80211_STATMASK_SIGNAL (1<<0)
321#define IEEE80211_STATMASK_RSSI (1<<1) 305#define IEEE80211_STATMASK_RSSI (1<<1)
322#define IEEE80211_STATMASK_NOISE (1<<2) 306#define IEEE80211_STATMASK_NOISE (1<<2)
@@ -441,6 +425,8 @@ struct ieee80211_stats {
441 425
442struct ieee80211_device; 426struct ieee80211_device;
443 427
428#include "ieee80211_crypt.h"
429
444#define SEC_KEY_1 (1<<0) 430#define SEC_KEY_1 (1<<0)
445#define SEC_KEY_2 (1<<1) 431#define SEC_KEY_2 (1<<1)
446#define SEC_KEY_3 (1<<2) 432#define SEC_KEY_3 (1<<2)
@@ -488,15 +474,6 @@ Total: 28-2340 bytes
488 474
489*/ 475*/
490 476
491struct ieee80211_header_data {
492 u16 frame_ctl;
493 u16 duration_id;
494 u8 addr1[6];
495 u8 addr2[6];
496 u8 addr3[6];
497 u16 seq_ctrl;
498};
499
500#define BEACON_PROBE_SSID_ID_POSITION 12 477#define BEACON_PROBE_SSID_ID_POSITION 12
501 478
502/* Management Frame Information Element Types */ 479/* Management Frame Information Element Types */
@@ -541,7 +518,7 @@ struct ieee80211_info_element {
541*/ 518*/
542 519
543struct ieee80211_authentication { 520struct ieee80211_authentication {
544 struct ieee80211_header_data header; 521 struct ieee80211_hdr_3addr header;
545 u16 algorithm; 522 u16 algorithm;
546 u16 transaction; 523 u16 transaction;
547 u16 status; 524 u16 status;
@@ -550,7 +527,7 @@ struct ieee80211_authentication {
550 527
551 528
552struct ieee80211_probe_response { 529struct ieee80211_probe_response {
553 struct ieee80211_header_data header; 530 struct ieee80211_hdr_3addr header;
554 u32 time_stamp[2]; 531 u32 time_stamp[2];
555 u16 beacon_interval; 532 u16 beacon_interval;
556 u16 capability; 533 u16 capability;
@@ -648,12 +625,6 @@ enum ieee80211_state {
648#define MAC_ARG(x) ((u8*)(x))[0],((u8*)(x))[1],((u8*)(x))[2],((u8*)(x))[3],((u8*)(x))[4],((u8*)(x))[5] 625#define MAC_ARG(x) ((u8*)(x))[0],((u8*)(x))[1],((u8*)(x))[2],((u8*)(x))[3],((u8*)(x))[4],((u8*)(x))[5]
649 626
650 627
651extern inline int is_broadcast_ether_addr(const u8 *addr)
652{
653 return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \
654 (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
655}
656
657#define CFG_IEEE80211_RESERVE_FCS (1<<0) 628#define CFG_IEEE80211_RESERVE_FCS (1<<0)
658#define CFG_IEEE80211_COMPUTE_FCS (1<<1) 629#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
659 630
@@ -787,21 +758,21 @@ extern inline int ieee80211_is_valid_mode(struct ieee80211_device *ieee, int mod
787 758
788extern inline int ieee80211_get_hdrlen(u16 fc) 759extern inline int ieee80211_get_hdrlen(u16 fc)
789{ 760{
790 int hdrlen = 24; 761 int hdrlen = IEEE80211_3ADDR_LEN;
791 762
792 switch (WLAN_FC_GET_TYPE(fc)) { 763 switch (WLAN_FC_GET_TYPE(fc)) {
793 case IEEE80211_FTYPE_DATA: 764 case IEEE80211_FTYPE_DATA:
794 if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS)) 765 if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
795 hdrlen = 30; /* Addr4 */ 766 hdrlen = IEEE80211_4ADDR_LEN;
796 break; 767 break;
797 case IEEE80211_FTYPE_CTL: 768 case IEEE80211_FTYPE_CTL:
798 switch (WLAN_FC_GET_STYPE(fc)) { 769 switch (WLAN_FC_GET_STYPE(fc)) {
799 case IEEE80211_STYPE_CTS: 770 case IEEE80211_STYPE_CTS:
800 case IEEE80211_STYPE_ACK: 771 case IEEE80211_STYPE_ACK:
801 hdrlen = 10; 772 hdrlen = IEEE80211_1ADDR_LEN;
802 break; 773 break;
803 default: 774 default:
804 hdrlen = 16; 775 hdrlen = IEEE80211_2ADDR_LEN;
805 break; 776 break;
806 } 777 }
807 break; 778 break;
@@ -878,5 +849,4 @@ static inline const char *escape_essid(const char *essid, u8 essid_len) {
878 *d = '\0'; 849 *d = '\0';
879 return escaped; 850 return escaped;
880} 851}
881
882#endif /* IEEE80211_H */ 852#endif /* IEEE80211_H */
diff --git a/include/net/ieee80211_crypt.h b/include/net/ieee80211_crypt.h
new file mode 100644
index 000000000000..b58a3bcc0dc0
--- /dev/null
+++ b/include/net/ieee80211_crypt.h
@@ -0,0 +1,86 @@
1/*
2 * Original code based on Host AP (software wireless LAN access point) driver
3 * for Intersil Prism2/2.5/3.
4 *
5 * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
6 * <jkmaline@cc.hut.fi>
7 * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
8 *
9 * Adaption to a generic IEEE 802.11 stack by James Ketrenos
10 * <jketreno@linux.intel.com>
11 *
12 * Copyright (c) 2004, Intel Corporation
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation. See README and COPYING for
17 * more details.
18 */
19
20/*
21 * This file defines the interface to the ieee80211 crypto module.
22 */
23#ifndef IEEE80211_CRYPT_H
24#define IEEE80211_CRYPT_H
25
26#include <linux/skbuff.h>
27
28struct ieee80211_crypto_ops {
29 const char *name;
30
31 /* init new crypto context (e.g., allocate private data space,
32 * select IV, etc.); returns NULL on failure or pointer to allocated
33 * private data on success */
34 void * (*init)(int keyidx);
35
36 /* deinitialize crypto context and free allocated private data */
37 void (*deinit)(void *priv);
38
39 /* encrypt/decrypt return < 0 on error or >= 0 on success. The return
40 * value from decrypt_mpdu is passed as the keyidx value for
41 * decrypt_msdu. skb must have enough head and tail room for the
42 * encryption; if not, error will be returned; these functions are
43 * called for all MPDUs (i.e., fragments).
44 */
45 int (*encrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
46 int (*decrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
47
48 /* These functions are called for full MSDUs, i.e. full frames.
49 * These can be NULL if full MSDU operations are not needed. */
50 int (*encrypt_msdu)(struct sk_buff *skb, int hdr_len, void *priv);
51 int (*decrypt_msdu)(struct sk_buff *skb, int keyidx, int hdr_len,
52 void *priv);
53
54 int (*set_key)(void *key, int len, u8 *seq, void *priv);
55 int (*get_key)(void *key, int len, u8 *seq, void *priv);
56
57 /* procfs handler for printing out key information and possible
58 * statistics */
59 char * (*print_stats)(char *p, void *priv);
60
61 /* maximum number of bytes added by encryption; encrypt buf is
62 * allocated with extra_prefix_len bytes, copy of in_buf, and
63 * extra_postfix_len; encrypt need not use all this space, but
64 * the result must start at the beginning of the buffer and correct
65 * length must be returned */
66 int extra_prefix_len, extra_postfix_len;
67
68 struct module *owner;
69};
70
71struct ieee80211_crypt_data {
72 struct list_head list; /* delayed deletion list */
73 struct ieee80211_crypto_ops *ops;
74 void *priv;
75 atomic_t refcnt;
76};
77
78int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops);
79int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops);
80struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name);
81void ieee80211_crypt_deinit_entries(struct ieee80211_device *, int);
82void ieee80211_crypt_deinit_handler(unsigned long);
83void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
84 struct ieee80211_crypt_data **crypt);
85
86#endif
diff --git a/net/Kconfig b/net/Kconfig
index 9251b28e8d5d..8a12ea8f0c05 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -640,6 +640,8 @@ source "net/irda/Kconfig"
640 640
641source "net/bluetooth/Kconfig" 641source "net/bluetooth/Kconfig"
642 642
643source "net/ieee80211/Kconfig"
644
643source "drivers/net/Kconfig" 645source "drivers/net/Kconfig"
644 646
645endmenu 647endmenu
diff --git a/net/Makefile b/net/Makefile
index 8e2bdc025ab8..83bc52d87bae 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_DECNET) += decnet/
42obj-$(CONFIG_ECONET) += econet/ 42obj-$(CONFIG_ECONET) += econet/
43obj-$(CONFIG_VLAN_8021Q) += 8021q/ 43obj-$(CONFIG_VLAN_8021Q) += 8021q/
44obj-$(CONFIG_IP_SCTP) += sctp/ 44obj-$(CONFIG_IP_SCTP) += sctp/
45obj-$(CONFIG_IEEE80211) += ieee80211/
45 46
46ifeq ($(CONFIG_NET),y) 47ifeq ($(CONFIG_NET),y)
47obj-$(CONFIG_SYSCTL) += sysctl_net.o 48obj-$(CONFIG_SYSCTL) += sysctl_net.o
diff --git a/net/ieee80211/Kconfig b/net/ieee80211/Kconfig
new file mode 100644
index 000000000000..961c711eda97
--- /dev/null
+++ b/net/ieee80211/Kconfig
@@ -0,0 +1,69 @@
1config IEEE80211
2 tristate "Generic IEEE 802.11 Networking Stack"
3 select NET_RADIO
4 ---help---
5 This option enables the hardware independent IEEE 802.11
6 networking stack.
7
8config IEEE80211_DEBUG
9 bool "Enable full debugging output"
10 depends on IEEE80211
11 ---help---
12 This option will enable debug tracing output for the
13 ieee80211 network stack.
14
15 This will result in the kernel module being ~70k larger. You
16 can control which debug output is sent to the kernel log by
17 setting the value in
18
19 /proc/net/ieee80211/debug_level
20
21 For example:
22
23 % echo 0x00000FFO > /proc/net/ieee80211/debug_level
24
25 For a list of values you can assign to debug_level, you
26 can look at the bit mask values in <net/ieee80211.h>
27
28 If you are not trying to debug or develop the ieee80211
29 subsystem, you most likely want to say N here.
30
31config IEEE80211_CRYPT_WEP
32 tristate "IEEE 802.11 WEP encryption (802.1x)"
33 depends on IEEE80211
34 select CRYPTO
35 select CRYPTO_ARC4
36 select CRC32
37 ---help---
38 Include software based cipher suites in support of IEEE
39 802.11's WEP. This is needed for WEP as well as 802.1x.
40
41 This can be compiled as a modules and it will be called
42 "ieee80211_crypt_wep".
43
44config IEEE80211_CRYPT_CCMP
45 tristate "IEEE 802.11i CCMP support"
46 depends on IEEE80211
47 select CRYPTO
48 select CRYPTO_AES
49 ---help---
50 Include software based cipher suites in support of IEEE 802.11i
51 (aka TGi, WPA, WPA2, WPA-PSK, etc.) for use with CCMP enabled
52 networks.
53
54 This can be compiled as a modules and it will be called
55 "ieee80211_crypt_ccmp".
56
57config IEEE80211_CRYPT_TKIP
58 tristate "IEEE 802.11i TKIP encryption"
59 depends on IEEE80211
60 select CRYPTO
61 select CRYPTO_MICHAEL_MIC
62 ---help---
63 Include software based cipher suites in support of IEEE 802.11i
64 (aka TGi, WPA, WPA2, WPA-PSK, etc.) for use with TKIP enabled
65 networks.
66
67 This can be compiled as a modules and it will be called
68 "ieee80211_crypt_tkip".
69
diff --git a/net/ieee80211/Makefile b/net/ieee80211/Makefile
new file mode 100644
index 000000000000..29be9cae1def
--- /dev/null
+++ b/net/ieee80211/Makefile
@@ -0,0 +1,11 @@
1obj-$(CONFIG_IEEE80211) += ieee80211.o
2obj-$(CONFIG_IEEE80211) += ieee80211_crypt.o
3obj-$(CONFIG_IEEE80211_CRYPT_WEP) += ieee80211_crypt_wep.o
4obj-$(CONFIG_IEEE80211_CRYPT_CCMP) += ieee80211_crypt_ccmp.o
5obj-$(CONFIG_IEEE80211_CRYPT_TKIP) += ieee80211_crypt_tkip.o
6ieee80211-objs := \
7 ieee80211_module.o \
8 ieee80211_tx.o \
9 ieee80211_rx.o \
10 ieee80211_wx.o
11
diff --git a/net/ieee80211/ieee80211_crypt.c b/net/ieee80211/ieee80211_crypt.c
new file mode 100644
index 000000000000..05a6f2f298db
--- /dev/null
+++ b/net/ieee80211/ieee80211_crypt.c
@@ -0,0 +1,259 @@
1/*
2 * Host AP crypto routines
3 *
4 * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
5 * Portions Copyright (C) 2004, Intel Corporation <jketreno@linux.intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation. See README and COPYING for
10 * more details.
11 *
12 */
13
14#include <linux/config.h>
15#include <linux/version.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <asm/string.h>
20#include <asm/errno.h>
21
22#include <net/ieee80211.h>
23
24MODULE_AUTHOR("Jouni Malinen");
25MODULE_DESCRIPTION("HostAP crypto");
26MODULE_LICENSE("GPL");
27
28struct ieee80211_crypto_alg {
29 struct list_head list;
30 struct ieee80211_crypto_ops *ops;
31};
32
33
34struct ieee80211_crypto {
35 struct list_head algs;
36 spinlock_t lock;
37};
38
39static struct ieee80211_crypto *hcrypt;
40
41void ieee80211_crypt_deinit_entries(struct ieee80211_device *ieee,
42 int force)
43{
44 struct list_head *ptr, *n;
45 struct ieee80211_crypt_data *entry;
46
47 for (ptr = ieee->crypt_deinit_list.next, n = ptr->next;
48 ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) {
49 entry = list_entry(ptr, struct ieee80211_crypt_data, list);
50
51 if (atomic_read(&entry->refcnt) != 0 && !force)
52 continue;
53
54 list_del(ptr);
55
56 if (entry->ops) {
57 entry->ops->deinit(entry->priv);
58 module_put(entry->ops->owner);
59 }
60 kfree(entry);
61 }
62}
63
64void ieee80211_crypt_deinit_handler(unsigned long data)
65{
66 struct ieee80211_device *ieee = (struct ieee80211_device *)data;
67 unsigned long flags;
68
69 spin_lock_irqsave(&ieee->lock, flags);
70 ieee80211_crypt_deinit_entries(ieee, 0);
71 if (!list_empty(&ieee->crypt_deinit_list)) {
72 printk(KERN_DEBUG "%s: entries remaining in delayed crypt "
73 "deletion list\n", ieee->dev->name);
74 ieee->crypt_deinit_timer.expires = jiffies + HZ;
75 add_timer(&ieee->crypt_deinit_timer);
76 }
77 spin_unlock_irqrestore(&ieee->lock, flags);
78
79}
80
81void ieee80211_crypt_delayed_deinit(struct ieee80211_device *ieee,
82 struct ieee80211_crypt_data **crypt)
83{
84 struct ieee80211_crypt_data *tmp;
85 unsigned long flags;
86
87 if (*crypt == NULL)
88 return;
89
90 tmp = *crypt;
91 *crypt = NULL;
92
93 /* must not run ops->deinit() while there may be pending encrypt or
94 * decrypt operations. Use a list of delayed deinits to avoid needing
95 * locking. */
96
97 spin_lock_irqsave(&ieee->lock, flags);
98 list_add(&tmp->list, &ieee->crypt_deinit_list);
99 if (!timer_pending(&ieee->crypt_deinit_timer)) {
100 ieee->crypt_deinit_timer.expires = jiffies + HZ;
101 add_timer(&ieee->crypt_deinit_timer);
102 }
103 spin_unlock_irqrestore(&ieee->lock, flags);
104}
105
106int ieee80211_register_crypto_ops(struct ieee80211_crypto_ops *ops)
107{
108 unsigned long flags;
109 struct ieee80211_crypto_alg *alg;
110
111 if (hcrypt == NULL)
112 return -1;
113
114 alg = kmalloc(sizeof(*alg), GFP_KERNEL);
115 if (alg == NULL)
116 return -ENOMEM;
117
118 memset(alg, 0, sizeof(*alg));
119 alg->ops = ops;
120
121 spin_lock_irqsave(&hcrypt->lock, flags);
122 list_add(&alg->list, &hcrypt->algs);
123 spin_unlock_irqrestore(&hcrypt->lock, flags);
124
125 printk(KERN_DEBUG "ieee80211_crypt: registered algorithm '%s'\n",
126 ops->name);
127
128 return 0;
129}
130
131int ieee80211_unregister_crypto_ops(struct ieee80211_crypto_ops *ops)
132{
133 unsigned long flags;
134 struct list_head *ptr;
135 struct ieee80211_crypto_alg *del_alg = NULL;
136
137 if (hcrypt == NULL)
138 return -1;
139
140 spin_lock_irqsave(&hcrypt->lock, flags);
141 for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
142 struct ieee80211_crypto_alg *alg =
143 (struct ieee80211_crypto_alg *) ptr;
144 if (alg->ops == ops) {
145 list_del(&alg->list);
146 del_alg = alg;
147 break;
148 }
149 }
150 spin_unlock_irqrestore(&hcrypt->lock, flags);
151
152 if (del_alg) {
153 printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm "
154 "'%s'\n", ops->name);
155 kfree(del_alg);
156 }
157
158 return del_alg ? 0 : -1;
159}
160
161
162struct ieee80211_crypto_ops * ieee80211_get_crypto_ops(const char *name)
163{
164 unsigned long flags;
165 struct list_head *ptr;
166 struct ieee80211_crypto_alg *found_alg = NULL;
167
168 if (hcrypt == NULL)
169 return NULL;
170
171 spin_lock_irqsave(&hcrypt->lock, flags);
172 for (ptr = hcrypt->algs.next; ptr != &hcrypt->algs; ptr = ptr->next) {
173 struct ieee80211_crypto_alg *alg =
174 (struct ieee80211_crypto_alg *) ptr;
175 if (strcmp(alg->ops->name, name) == 0) {
176 found_alg = alg;
177 break;
178 }
179 }
180 spin_unlock_irqrestore(&hcrypt->lock, flags);
181
182 if (found_alg)
183 return found_alg->ops;
184 else
185 return NULL;
186}
187
188
189static void * ieee80211_crypt_null_init(int keyidx) { return (void *) 1; }
190static void ieee80211_crypt_null_deinit(void *priv) {}
191
192static struct ieee80211_crypto_ops ieee80211_crypt_null = {
193 .name = "NULL",
194 .init = ieee80211_crypt_null_init,
195 .deinit = ieee80211_crypt_null_deinit,
196 .encrypt_mpdu = NULL,
197 .decrypt_mpdu = NULL,
198 .encrypt_msdu = NULL,
199 .decrypt_msdu = NULL,
200 .set_key = NULL,
201 .get_key = NULL,
202 .extra_prefix_len = 0,
203 .extra_postfix_len = 0,
204 .owner = THIS_MODULE,
205};
206
207
208static int __init ieee80211_crypto_init(void)
209{
210 int ret = -ENOMEM;
211
212 hcrypt = kmalloc(sizeof(*hcrypt), GFP_KERNEL);
213 if (!hcrypt)
214 goto out;
215
216 memset(hcrypt, 0, sizeof(*hcrypt));
217 INIT_LIST_HEAD(&hcrypt->algs);
218 spin_lock_init(&hcrypt->lock);
219
220 ret = ieee80211_register_crypto_ops(&ieee80211_crypt_null);
221 if (ret < 0) {
222 kfree(hcrypt);
223 hcrypt = NULL;
224 }
225out:
226 return ret;
227}
228
229
230static void __exit ieee80211_crypto_deinit(void)
231{
232 struct list_head *ptr, *n;
233
234 if (hcrypt == NULL)
235 return;
236
237 for (ptr = hcrypt->algs.next, n = ptr->next; ptr != &hcrypt->algs;
238 ptr = n, n = ptr->next) {
239 struct ieee80211_crypto_alg *alg =
240 (struct ieee80211_crypto_alg *) ptr;
241 list_del(ptr);
242 printk(KERN_DEBUG "ieee80211_crypt: unregistered algorithm "
243 "'%s' (deinit)\n", alg->ops->name);
244 kfree(alg);
245 }
246
247 kfree(hcrypt);
248}
249
250EXPORT_SYMBOL(ieee80211_crypt_deinit_entries);
251EXPORT_SYMBOL(ieee80211_crypt_deinit_handler);
252EXPORT_SYMBOL(ieee80211_crypt_delayed_deinit);
253
254EXPORT_SYMBOL(ieee80211_register_crypto_ops);
255EXPORT_SYMBOL(ieee80211_unregister_crypto_ops);
256EXPORT_SYMBOL(ieee80211_get_crypto_ops);
257
258module_init(ieee80211_crypto_init);
259module_exit(ieee80211_crypto_deinit);
diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c
new file mode 100644
index 000000000000..11d15573b26a
--- /dev/null
+++ b/net/ieee80211/ieee80211_crypt_ccmp.c
@@ -0,0 +1,470 @@
1/*
2 * Host AP crypt: host-based CCMP encryption implementation for Host AP driver
3 *
4 * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. See README and COPYING for
9 * more details.
10 */
11
12#include <linux/config.h>
13#include <linux/version.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/random.h>
18#include <linux/skbuff.h>
19#include <linux/netdevice.h>
20#include <linux/if_ether.h>
21#include <linux/if_arp.h>
22#include <asm/string.h>
23#include <linux/wireless.h>
24
25#include <net/ieee80211.h>
26
27
28#include <linux/crypto.h>
29#include <asm/scatterlist.h>
30
31MODULE_AUTHOR("Jouni Malinen");
32MODULE_DESCRIPTION("Host AP crypt: CCMP");
33MODULE_LICENSE("GPL");
34
35#define AES_BLOCK_LEN 16
36#define CCMP_HDR_LEN 8
37#define CCMP_MIC_LEN 8
38#define CCMP_TK_LEN 16
39#define CCMP_PN_LEN 6
40
41struct ieee80211_ccmp_data {
42 u8 key[CCMP_TK_LEN];
43 int key_set;
44
45 u8 tx_pn[CCMP_PN_LEN];
46 u8 rx_pn[CCMP_PN_LEN];
47
48 u32 dot11RSNAStatsCCMPFormatErrors;
49 u32 dot11RSNAStatsCCMPReplays;
50 u32 dot11RSNAStatsCCMPDecryptErrors;
51
52 int key_idx;
53
54 struct crypto_tfm *tfm;
55
56 /* scratch buffers for virt_to_page() (crypto API) */
57 u8 tx_b0[AES_BLOCK_LEN], tx_b[AES_BLOCK_LEN],
58 tx_e[AES_BLOCK_LEN], tx_s0[AES_BLOCK_LEN];
59 u8 rx_b0[AES_BLOCK_LEN], rx_b[AES_BLOCK_LEN], rx_a[AES_BLOCK_LEN];
60};
61
62static void ieee80211_ccmp_aes_encrypt(struct crypto_tfm *tfm,
63 const u8 pt[16], u8 ct[16])
64{
65 struct scatterlist src, dst;
66
67 src.page = virt_to_page(pt);
68 src.offset = offset_in_page(pt);
69 src.length = AES_BLOCK_LEN;
70
71 dst.page = virt_to_page(ct);
72 dst.offset = offset_in_page(ct);
73 dst.length = AES_BLOCK_LEN;
74
75 crypto_cipher_encrypt(tfm, &dst, &src, AES_BLOCK_LEN);
76}
77
78static void * ieee80211_ccmp_init(int key_idx)
79{
80 struct ieee80211_ccmp_data *priv;
81
82 priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
83 if (priv == NULL)
84 goto fail;
85 memset(priv, 0, sizeof(*priv));
86 priv->key_idx = key_idx;
87
88 priv->tfm = crypto_alloc_tfm("aes", 0);
89 if (priv->tfm == NULL) {
90 printk(KERN_DEBUG "ieee80211_crypt_ccmp: could not allocate "
91 "crypto API aes\n");
92 goto fail;
93 }
94
95 return priv;
96
97fail:
98 if (priv) {
99 if (priv->tfm)
100 crypto_free_tfm(priv->tfm);
101 kfree(priv);
102 }
103
104 return NULL;
105}
106
107
108static void ieee80211_ccmp_deinit(void *priv)
109{
110 struct ieee80211_ccmp_data *_priv = priv;
111 if (_priv && _priv->tfm)
112 crypto_free_tfm(_priv->tfm);
113 kfree(priv);
114}
115
116
117static inline void xor_block(u8 *b, u8 *a, size_t len)
118{
119 int i;
120 for (i = 0; i < len; i++)
121 b[i] ^= a[i];
122}
123
124
125static void ccmp_init_blocks(struct crypto_tfm *tfm,
126 struct ieee80211_hdr *hdr,
127 u8 *pn, size_t dlen, u8 *b0, u8 *auth,
128 u8 *s0)
129{
130 u8 *pos, qc = 0;
131 size_t aad_len;
132 u16 fc;
133 int a4_included, qc_included;
134 u8 aad[2 * AES_BLOCK_LEN];
135
136 fc = le16_to_cpu(hdr->frame_ctl);
137 a4_included = ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
138 (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS));
139 qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
140 (WLAN_FC_GET_STYPE(fc) & 0x08));
141 aad_len = 22;
142 if (a4_included)
143 aad_len += 6;
144 if (qc_included) {
145 pos = (u8 *) &hdr->addr4;
146 if (a4_included)
147 pos += 6;
148 qc = *pos & 0x0f;
149 aad_len += 2;
150 }
151
152 /* CCM Initial Block:
153 * Flag (Include authentication header, M=3 (8-octet MIC),
154 * L=1 (2-octet Dlen))
155 * Nonce: 0x00 | A2 | PN
156 * Dlen */
157 b0[0] = 0x59;
158 b0[1] = qc;
159 memcpy(b0 + 2, hdr->addr2, ETH_ALEN);
160 memcpy(b0 + 8, pn, CCMP_PN_LEN);
161 b0[14] = (dlen >> 8) & 0xff;
162 b0[15] = dlen & 0xff;
163
164 /* AAD:
165 * FC with bits 4..6 and 11..13 masked to zero; 14 is always one
166 * A1 | A2 | A3
167 * SC with bits 4..15 (seq#) masked to zero
168 * A4 (if present)
169 * QC (if present)
170 */
171 pos = (u8 *) hdr;
172 aad[0] = 0; /* aad_len >> 8 */
173 aad[1] = aad_len & 0xff;
174 aad[2] = pos[0] & 0x8f;
175 aad[3] = pos[1] & 0xc7;
176 memcpy(aad + 4, hdr->addr1, 3 * ETH_ALEN);
177 pos = (u8 *) &hdr->seq_ctl;
178 aad[22] = pos[0] & 0x0f;
179 aad[23] = 0; /* all bits masked */
180 memset(aad + 24, 0, 8);
181 if (a4_included)
182 memcpy(aad + 24, hdr->addr4, ETH_ALEN);
183 if (qc_included) {
184 aad[a4_included ? 30 : 24] = qc;
185 /* rest of QC masked */
186 }
187
188 /* Start with the first block and AAD */
189 ieee80211_ccmp_aes_encrypt(tfm, b0, auth);
190 xor_block(auth, aad, AES_BLOCK_LEN);
191 ieee80211_ccmp_aes_encrypt(tfm, auth, auth);
192 xor_block(auth, &aad[AES_BLOCK_LEN], AES_BLOCK_LEN);
193 ieee80211_ccmp_aes_encrypt(tfm, auth, auth);
194 b0[0] &= 0x07;
195 b0[14] = b0[15] = 0;
196 ieee80211_ccmp_aes_encrypt(tfm, b0, s0);
197}
198
199
200static int ieee80211_ccmp_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
201{
202 struct ieee80211_ccmp_data *key = priv;
203 int data_len, i, blocks, last, len;
204 u8 *pos, *mic;
205 struct ieee80211_hdr *hdr;
206 u8 *b0 = key->tx_b0;
207 u8 *b = key->tx_b;
208 u8 *e = key->tx_e;
209 u8 *s0 = key->tx_s0;
210
211 if (skb_headroom(skb) < CCMP_HDR_LEN ||
212 skb_tailroom(skb) < CCMP_MIC_LEN ||
213 skb->len < hdr_len)
214 return -1;
215
216 data_len = skb->len - hdr_len;
217 pos = skb_push(skb, CCMP_HDR_LEN);
218 memmove(pos, pos + CCMP_HDR_LEN, hdr_len);
219 pos += hdr_len;
220 mic = skb_put(skb, CCMP_MIC_LEN);
221
222 i = CCMP_PN_LEN - 1;
223 while (i >= 0) {
224 key->tx_pn[i]++;
225 if (key->tx_pn[i] != 0)
226 break;
227 i--;
228 }
229
230 *pos++ = key->tx_pn[5];
231 *pos++ = key->tx_pn[4];
232 *pos++ = 0;
233 *pos++ = (key->key_idx << 6) | (1 << 5) /* Ext IV included */;
234 *pos++ = key->tx_pn[3];
235 *pos++ = key->tx_pn[2];
236 *pos++ = key->tx_pn[1];
237 *pos++ = key->tx_pn[0];
238
239 hdr = (struct ieee80211_hdr *) skb->data;
240 ccmp_init_blocks(key->tfm, hdr, key->tx_pn, data_len, b0, b, s0);
241
242 blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
243 last = data_len % AES_BLOCK_LEN;
244
245 for (i = 1; i <= blocks; i++) {
246 len = (i == blocks && last) ? last : AES_BLOCK_LEN;
247 /* Authentication */
248 xor_block(b, pos, len);
249 ieee80211_ccmp_aes_encrypt(key->tfm, b, b);
250 /* Encryption, with counter */
251 b0[14] = (i >> 8) & 0xff;
252 b0[15] = i & 0xff;
253 ieee80211_ccmp_aes_encrypt(key->tfm, b0, e);
254 xor_block(pos, e, len);
255 pos += len;
256 }
257
258 for (i = 0; i < CCMP_MIC_LEN; i++)
259 mic[i] = b[i] ^ s0[i];
260
261 return 0;
262}
263
264
265static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
266{
267 struct ieee80211_ccmp_data *key = priv;
268 u8 keyidx, *pos;
269 struct ieee80211_hdr *hdr;
270 u8 *b0 = key->rx_b0;
271 u8 *b = key->rx_b;
272 u8 *a = key->rx_a;
273 u8 pn[6];
274 int i, blocks, last, len;
275 size_t data_len = skb->len - hdr_len - CCMP_HDR_LEN - CCMP_MIC_LEN;
276 u8 *mic = skb->data + skb->len - CCMP_MIC_LEN;
277
278 if (skb->len < hdr_len + CCMP_HDR_LEN + CCMP_MIC_LEN) {
279 key->dot11RSNAStatsCCMPFormatErrors++;
280 return -1;
281 }
282
283 hdr = (struct ieee80211_hdr *) skb->data;
284 pos = skb->data + hdr_len;
285 keyidx = pos[3];
286 if (!(keyidx & (1 << 5))) {
287 if (net_ratelimit()) {
288 printk(KERN_DEBUG "CCMP: received packet without ExtIV"
289 " flag from " MAC_FMT "\n", MAC_ARG(hdr->addr2));
290 }
291 key->dot11RSNAStatsCCMPFormatErrors++;
292 return -2;
293 }
294 keyidx >>= 6;
295 if (key->key_idx != keyidx) {
296 printk(KERN_DEBUG "CCMP: RX tkey->key_idx=%d frame "
297 "keyidx=%d priv=%p\n", key->key_idx, keyidx, priv);
298 return -6;
299 }
300 if (!key->key_set) {
301 if (net_ratelimit()) {
302 printk(KERN_DEBUG "CCMP: received packet from " MAC_FMT
303 " with keyid=%d that does not have a configured"
304 " key\n", MAC_ARG(hdr->addr2), keyidx);
305 }
306 return -3;
307 }
308
309 pn[0] = pos[7];
310 pn[1] = pos[6];
311 pn[2] = pos[5];
312 pn[3] = pos[4];
313 pn[4] = pos[1];
314 pn[5] = pos[0];
315 pos += 8;
316
317 if (memcmp(pn, key->rx_pn, CCMP_PN_LEN) <= 0) {
318 if (net_ratelimit()) {
319 printk(KERN_DEBUG "CCMP: replay detected: STA=" MAC_FMT
320 " previous PN %02x%02x%02x%02x%02x%02x "
321 "received PN %02x%02x%02x%02x%02x%02x\n",
322 MAC_ARG(hdr->addr2), MAC_ARG(key->rx_pn),
323 MAC_ARG(pn));
324 }
325 key->dot11RSNAStatsCCMPReplays++;
326 return -4;
327 }
328
329 ccmp_init_blocks(key->tfm, hdr, pn, data_len, b0, a, b);
330 xor_block(mic, b, CCMP_MIC_LEN);
331
332 blocks = (data_len + AES_BLOCK_LEN - 1) / AES_BLOCK_LEN;
333 last = data_len % AES_BLOCK_LEN;
334
335 for (i = 1; i <= blocks; i++) {
336 len = (i == blocks && last) ? last : AES_BLOCK_LEN;
337 /* Decrypt, with counter */
338 b0[14] = (i >> 8) & 0xff;
339 b0[15] = i & 0xff;
340 ieee80211_ccmp_aes_encrypt(key->tfm, b0, b);
341 xor_block(pos, b, len);
342 /* Authentication */
343 xor_block(a, pos, len);
344 ieee80211_ccmp_aes_encrypt(key->tfm, a, a);
345 pos += len;
346 }
347
348 if (memcmp(mic, a, CCMP_MIC_LEN) != 0) {
349 if (net_ratelimit()) {
350 printk(KERN_DEBUG "CCMP: decrypt failed: STA="
351 MAC_FMT "\n", MAC_ARG(hdr->addr2));
352 }
353 key->dot11RSNAStatsCCMPDecryptErrors++;
354 return -5;
355 }
356
357 memcpy(key->rx_pn, pn, CCMP_PN_LEN);
358
359 /* Remove hdr and MIC */
360 memmove(skb->data + CCMP_HDR_LEN, skb->data, hdr_len);
361 skb_pull(skb, CCMP_HDR_LEN);
362 skb_trim(skb, skb->len - CCMP_MIC_LEN);
363
364 return keyidx;
365}
366
367
368static int ieee80211_ccmp_set_key(void *key, int len, u8 *seq, void *priv)
369{
370 struct ieee80211_ccmp_data *data = priv;
371 int keyidx;
372 struct crypto_tfm *tfm = data->tfm;
373
374 keyidx = data->key_idx;
375 memset(data, 0, sizeof(*data));
376 data->key_idx = keyidx;
377 data->tfm = tfm;
378 if (len == CCMP_TK_LEN) {
379 memcpy(data->key, key, CCMP_TK_LEN);
380 data->key_set = 1;
381 if (seq) {
382 data->rx_pn[0] = seq[5];
383 data->rx_pn[1] = seq[4];
384 data->rx_pn[2] = seq[3];
385 data->rx_pn[3] = seq[2];
386 data->rx_pn[4] = seq[1];
387 data->rx_pn[5] = seq[0];
388 }
389 crypto_cipher_setkey(data->tfm, data->key, CCMP_TK_LEN);
390 } else if (len == 0)
391 data->key_set = 0;
392 else
393 return -1;
394
395 return 0;
396}
397
398
399static int ieee80211_ccmp_get_key(void *key, int len, u8 *seq, void *priv)
400{
401 struct ieee80211_ccmp_data *data = priv;
402
403 if (len < CCMP_TK_LEN)
404 return -1;
405
406 if (!data->key_set)
407 return 0;
408 memcpy(key, data->key, CCMP_TK_LEN);
409
410 if (seq) {
411 seq[0] = data->tx_pn[5];
412 seq[1] = data->tx_pn[4];
413 seq[2] = data->tx_pn[3];
414 seq[3] = data->tx_pn[2];
415 seq[4] = data->tx_pn[1];
416 seq[5] = data->tx_pn[0];
417 }
418
419 return CCMP_TK_LEN;
420}
421
422
423static char * ieee80211_ccmp_print_stats(char *p, void *priv)
424{
425 struct ieee80211_ccmp_data *ccmp = priv;
426 p += sprintf(p, "key[%d] alg=CCMP key_set=%d "
427 "tx_pn=%02x%02x%02x%02x%02x%02x "
428 "rx_pn=%02x%02x%02x%02x%02x%02x "
429 "format_errors=%d replays=%d decrypt_errors=%d\n",
430 ccmp->key_idx, ccmp->key_set,
431 MAC_ARG(ccmp->tx_pn), MAC_ARG(ccmp->rx_pn),
432 ccmp->dot11RSNAStatsCCMPFormatErrors,
433 ccmp->dot11RSNAStatsCCMPReplays,
434 ccmp->dot11RSNAStatsCCMPDecryptErrors);
435
436 return p;
437}
438
439
440static struct ieee80211_crypto_ops ieee80211_crypt_ccmp = {
441 .name = "CCMP",
442 .init = ieee80211_ccmp_init,
443 .deinit = ieee80211_ccmp_deinit,
444 .encrypt_mpdu = ieee80211_ccmp_encrypt,
445 .decrypt_mpdu = ieee80211_ccmp_decrypt,
446 .encrypt_msdu = NULL,
447 .decrypt_msdu = NULL,
448 .set_key = ieee80211_ccmp_set_key,
449 .get_key = ieee80211_ccmp_get_key,
450 .print_stats = ieee80211_ccmp_print_stats,
451 .extra_prefix_len = CCMP_HDR_LEN,
452 .extra_postfix_len = CCMP_MIC_LEN,
453 .owner = THIS_MODULE,
454};
455
456
457static int __init ieee80211_crypto_ccmp_init(void)
458{
459 return ieee80211_register_crypto_ops(&ieee80211_crypt_ccmp);
460}
461
462
463static void __exit ieee80211_crypto_ccmp_exit(void)
464{
465 ieee80211_unregister_crypto_ops(&ieee80211_crypt_ccmp);
466}
467
468
469module_init(ieee80211_crypto_ccmp_init);
470module_exit(ieee80211_crypto_ccmp_exit);
diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c
new file mode 100644
index 000000000000..f91d92c6df25
--- /dev/null
+++ b/net/ieee80211/ieee80211_crypt_tkip.c
@@ -0,0 +1,708 @@
1/*
2 * Host AP crypt: host-based TKIP encryption implementation for Host AP driver
3 *
4 * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. See README and COPYING for
9 * more details.
10 */
11
12#include <linux/config.h>
13#include <linux/version.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/random.h>
18#include <linux/skbuff.h>
19#include <linux/netdevice.h>
20#include <linux/if_ether.h>
21#include <linux/if_arp.h>
22#include <asm/string.h>
23
24#include <net/ieee80211.h>
25
26
27#include <linux/crypto.h>
28#include <asm/scatterlist.h>
29#include <linux/crc32.h>
30
31MODULE_AUTHOR("Jouni Malinen");
32MODULE_DESCRIPTION("Host AP crypt: TKIP");
33MODULE_LICENSE("GPL");
34
35struct ieee80211_tkip_data {
36#define TKIP_KEY_LEN 32
37 u8 key[TKIP_KEY_LEN];
38 int key_set;
39
40 u32 tx_iv32;
41 u16 tx_iv16;
42 u16 tx_ttak[5];
43 int tx_phase1_done;
44
45 u32 rx_iv32;
46 u16 rx_iv16;
47 u16 rx_ttak[5];
48 int rx_phase1_done;
49 u32 rx_iv32_new;
50 u16 rx_iv16_new;
51
52 u32 dot11RSNAStatsTKIPReplays;
53 u32 dot11RSNAStatsTKIPICVErrors;
54 u32 dot11RSNAStatsTKIPLocalMICFailures;
55
56 int key_idx;
57
58 struct crypto_tfm *tfm_arc4;
59 struct crypto_tfm *tfm_michael;
60
61 /* scratch buffers for virt_to_page() (crypto API) */
62 u8 rx_hdr[16], tx_hdr[16];
63};
64
65static void * ieee80211_tkip_init(int key_idx)
66{
67 struct ieee80211_tkip_data *priv;
68
69 priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
70 if (priv == NULL)
71 goto fail;
72 memset(priv, 0, sizeof(*priv));
73 priv->key_idx = key_idx;
74
75 priv->tfm_arc4 = crypto_alloc_tfm("arc4", 0);
76 if (priv->tfm_arc4 == NULL) {
77 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
78 "crypto API arc4\n");
79 goto fail;
80 }
81
82 priv->tfm_michael = crypto_alloc_tfm("michael_mic", 0);
83 if (priv->tfm_michael == NULL) {
84 printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate "
85 "crypto API michael_mic\n");
86 goto fail;
87 }
88
89 return priv;
90
91fail:
92 if (priv) {
93 if (priv->tfm_michael)
94 crypto_free_tfm(priv->tfm_michael);
95 if (priv->tfm_arc4)
96 crypto_free_tfm(priv->tfm_arc4);
97 kfree(priv);
98 }
99
100 return NULL;
101}
102
103
104static void ieee80211_tkip_deinit(void *priv)
105{
106 struct ieee80211_tkip_data *_priv = priv;
107 if (_priv && _priv->tfm_michael)
108 crypto_free_tfm(_priv->tfm_michael);
109 if (_priv && _priv->tfm_arc4)
110 crypto_free_tfm(_priv->tfm_arc4);
111 kfree(priv);
112}
113
114
115static inline u16 RotR1(u16 val)
116{
117 return (val >> 1) | (val << 15);
118}
119
120
121static inline u8 Lo8(u16 val)
122{
123 return val & 0xff;
124}
125
126
127static inline u8 Hi8(u16 val)
128{
129 return val >> 8;
130}
131
132
133static inline u16 Lo16(u32 val)
134{
135 return val & 0xffff;
136}
137
138
139static inline u16 Hi16(u32 val)
140{
141 return val >> 16;
142}
143
144
145static inline u16 Mk16(u8 hi, u8 lo)
146{
147 return lo | (((u16) hi) << 8);
148}
149
150
151static inline u16 Mk16_le(u16 *v)
152{
153 return le16_to_cpu(*v);
154}
155
156
157static const u16 Sbox[256] =
158{
159 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
160 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
161 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
162 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
163 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
164 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
165 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
166 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
167 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
168 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
169 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
170 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
171 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
172 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
173 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
174 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
175 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
176 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
177 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
178 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
179 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
180 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
181 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
182 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
183 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
184 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
185 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
186 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
187 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
188 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
189 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
190 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
191};
192
193
194static inline u16 _S_(u16 v)
195{
196 u16 t = Sbox[Hi8(v)];
197 return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8));
198}
199
200
201#define PHASE1_LOOP_COUNT 8
202
203static void tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32)
204{
205 int i, j;
206
207 /* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */
208 TTAK[0] = Lo16(IV32);
209 TTAK[1] = Hi16(IV32);
210 TTAK[2] = Mk16(TA[1], TA[0]);
211 TTAK[3] = Mk16(TA[3], TA[2]);
212 TTAK[4] = Mk16(TA[5], TA[4]);
213
214 for (i = 0; i < PHASE1_LOOP_COUNT; i++) {
215 j = 2 * (i & 1);
216 TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j]));
217 TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j]));
218 TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j]));
219 TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j]));
220 TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i;
221 }
222}
223
224
225static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK,
226 u16 IV16)
227{
228 /* Make temporary area overlap WEP seed so that the final copy can be
229 * avoided on little endian hosts. */
230 u16 *PPK = (u16 *) &WEPSeed[4];
231
232 /* Step 1 - make copy of TTAK and bring in TSC */
233 PPK[0] = TTAK[0];
234 PPK[1] = TTAK[1];
235 PPK[2] = TTAK[2];
236 PPK[3] = TTAK[3];
237 PPK[4] = TTAK[4];
238 PPK[5] = TTAK[4] + IV16;
239
240 /* Step 2 - 96-bit bijective mixing using S-box */
241 PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *) &TK[0]));
242 PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *) &TK[2]));
243 PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *) &TK[4]));
244 PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *) &TK[6]));
245 PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *) &TK[8]));
246 PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *) &TK[10]));
247
248 PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *) &TK[12]));
249 PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *) &TK[14]));
250 PPK[2] += RotR1(PPK[1]);
251 PPK[3] += RotR1(PPK[2]);
252 PPK[4] += RotR1(PPK[3]);
253 PPK[5] += RotR1(PPK[4]);
254
255 /* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value
256 * WEPSeed[0..2] is transmitted as WEP IV */
257 WEPSeed[0] = Hi8(IV16);
258 WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F;
259 WEPSeed[2] = Lo8(IV16);
260 WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *) &TK[0])) >> 1);
261
262#ifdef __BIG_ENDIAN
263 {
264 int i;
265 for (i = 0; i < 6; i++)
266 PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8);
267 }
268#endif
269}
270
271static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
272{
273 struct ieee80211_tkip_data *tkey = priv;
274 int len;
275 u8 rc4key[16], *pos, *icv;
276 struct ieee80211_hdr *hdr;
277 u32 crc;
278 struct scatterlist sg;
279
280 if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 ||
281 skb->len < hdr_len)
282 return -1;
283
284 hdr = (struct ieee80211_hdr *) skb->data;
285 if (!tkey->tx_phase1_done) {
286 tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2,
287 tkey->tx_iv32);
288 tkey->tx_phase1_done = 1;
289 }
290 tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16);
291
292 len = skb->len - hdr_len;
293 pos = skb_push(skb, 8);
294 memmove(pos, pos + 8, hdr_len);
295 pos += hdr_len;
296 icv = skb_put(skb, 4);
297
298 *pos++ = rc4key[0];
299 *pos++ = rc4key[1];
300 *pos++ = rc4key[2];
301 *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */;
302 *pos++ = tkey->tx_iv32 & 0xff;
303 *pos++ = (tkey->tx_iv32 >> 8) & 0xff;
304 *pos++ = (tkey->tx_iv32 >> 16) & 0xff;
305 *pos++ = (tkey->tx_iv32 >> 24) & 0xff;
306
307 crc = ~crc32_le(~0, pos, len);
308 icv[0] = crc;
309 icv[1] = crc >> 8;
310 icv[2] = crc >> 16;
311 icv[3] = crc >> 24;
312
313 crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16);
314 sg.page = virt_to_page(pos);
315 sg.offset = offset_in_page(pos);
316 sg.length = len + 4;
317 crypto_cipher_encrypt(tkey->tfm_arc4, &sg, &sg, len + 4);
318
319 tkey->tx_iv16++;
320 if (tkey->tx_iv16 == 0) {
321 tkey->tx_phase1_done = 0;
322 tkey->tx_iv32++;
323 }
324
325 return 0;
326}
327
328static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
329{
330 struct ieee80211_tkip_data *tkey = priv;
331 u8 rc4key[16];
332 u8 keyidx, *pos;
333 u32 iv32;
334 u16 iv16;
335 struct ieee80211_hdr *hdr;
336 u8 icv[4];
337 u32 crc;
338 struct scatterlist sg;
339 int plen;
340
341 if (skb->len < hdr_len + 8 + 4)
342 return -1;
343
344 hdr = (struct ieee80211_hdr *) skb->data;
345 pos = skb->data + hdr_len;
346 keyidx = pos[3];
347 if (!(keyidx & (1 << 5))) {
348 if (net_ratelimit()) {
349 printk(KERN_DEBUG "TKIP: received packet without ExtIV"
350 " flag from " MAC_FMT "\n", MAC_ARG(hdr->addr2));
351 }
352 return -2;
353 }
354 keyidx >>= 6;
355 if (tkey->key_idx != keyidx) {
356 printk(KERN_DEBUG "TKIP: RX tkey->key_idx=%d frame "
357 "keyidx=%d priv=%p\n", tkey->key_idx, keyidx, priv);
358 return -6;
359 }
360 if (!tkey->key_set) {
361 if (net_ratelimit()) {
362 printk(KERN_DEBUG "TKIP: received packet from " MAC_FMT
363 " with keyid=%d that does not have a configured"
364 " key\n", MAC_ARG(hdr->addr2), keyidx);
365 }
366 return -3;
367 }
368 iv16 = (pos[0] << 8) | pos[2];
369 iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24);
370 pos += 8;
371
372 if (iv32 < tkey->rx_iv32 ||
373 (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) {
374 if (net_ratelimit()) {
375 printk(KERN_DEBUG "TKIP: replay detected: STA=" MAC_FMT
376 " previous TSC %08x%04x received TSC "
377 "%08x%04x\n", MAC_ARG(hdr->addr2),
378 tkey->rx_iv32, tkey->rx_iv16, iv32, iv16);
379 }
380 tkey->dot11RSNAStatsTKIPReplays++;
381 return -4;
382 }
383
384 if (iv32 != tkey->rx_iv32 || !tkey->rx_phase1_done) {
385 tkip_mixing_phase1(tkey->rx_ttak, tkey->key, hdr->addr2, iv32);
386 tkey->rx_phase1_done = 1;
387 }
388 tkip_mixing_phase2(rc4key, tkey->key, tkey->rx_ttak, iv16);
389
390 plen = skb->len - hdr_len - 12;
391
392 crypto_cipher_setkey(tkey->tfm_arc4, rc4key, 16);
393 sg.page = virt_to_page(pos);
394 sg.offset = offset_in_page(pos);
395 sg.length = plen + 4;
396 crypto_cipher_decrypt(tkey->tfm_arc4, &sg, &sg, plen + 4);
397
398 crc = ~crc32_le(~0, pos, plen);
399 icv[0] = crc;
400 icv[1] = crc >> 8;
401 icv[2] = crc >> 16;
402 icv[3] = crc >> 24;
403 if (memcmp(icv, pos + plen, 4) != 0) {
404 if (iv32 != tkey->rx_iv32) {
405 /* Previously cached Phase1 result was already lost, so
406 * it needs to be recalculated for the next packet. */
407 tkey->rx_phase1_done = 0;
408 }
409 if (net_ratelimit()) {
410 printk(KERN_DEBUG "TKIP: ICV error detected: STA="
411 MAC_FMT "\n", MAC_ARG(hdr->addr2));
412 }
413 tkey->dot11RSNAStatsTKIPICVErrors++;
414 return -5;
415 }
416
417 /* Update real counters only after Michael MIC verification has
418 * completed */
419 tkey->rx_iv32_new = iv32;
420 tkey->rx_iv16_new = iv16;
421
422 /* Remove IV and ICV */
423 memmove(skb->data + 8, skb->data, hdr_len);
424 skb_pull(skb, 8);
425 skb_trim(skb, skb->len - 4);
426
427 return keyidx;
428}
429
430
431static int michael_mic(struct ieee80211_tkip_data *tkey, u8 *key, u8 *hdr,
432 u8 *data, size_t data_len, u8 *mic)
433{
434 struct scatterlist sg[2];
435
436 if (tkey->tfm_michael == NULL) {
437 printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n");
438 return -1;
439 }
440 sg[0].page = virt_to_page(hdr);
441 sg[0].offset = offset_in_page(hdr);
442 sg[0].length = 16;
443
444 sg[1].page = virt_to_page(data);
445 sg[1].offset = offset_in_page(data);
446 sg[1].length = data_len;
447
448 crypto_digest_init(tkey->tfm_michael);
449 crypto_digest_setkey(tkey->tfm_michael, key, 8);
450 crypto_digest_update(tkey->tfm_michael, sg, 2);
451 crypto_digest_final(tkey->tfm_michael, mic);
452
453 return 0;
454}
455
456static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr)
457{
458 struct ieee80211_hdr *hdr11;
459
460 hdr11 = (struct ieee80211_hdr *) skb->data;
461 switch (le16_to_cpu(hdr11->frame_ctl) &
462 (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
463 case IEEE80211_FCTL_TODS:
464 memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
465 memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
466 break;
467 case IEEE80211_FCTL_FROMDS:
468 memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
469 memcpy(hdr + ETH_ALEN, hdr11->addr3, ETH_ALEN); /* SA */
470 break;
471 case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
472 memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */
473 memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */
474 break;
475 case 0:
476 memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */
477 memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */
478 break;
479 }
480
481 hdr[12] = 0; /* priority */
482 hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */
483}
484
485
486static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv)
487{
488 struct ieee80211_tkip_data *tkey = priv;
489 u8 *pos;
490
491 if (skb_tailroom(skb) < 8 || skb->len < hdr_len) {
492 printk(KERN_DEBUG "Invalid packet for Michael MIC add "
493 "(tailroom=%d hdr_len=%d skb->len=%d)\n",
494 skb_tailroom(skb), hdr_len, skb->len);
495 return -1;
496 }
497
498 michael_mic_hdr(skb, tkey->tx_hdr);
499 pos = skb_put(skb, 8);
500 if (michael_mic(tkey, &tkey->key[16], tkey->tx_hdr,
501 skb->data + hdr_len, skb->len - 8 - hdr_len, pos))
502 return -1;
503
504 return 0;
505}
506
507
508#if WIRELESS_EXT >= 18
509static void ieee80211_michael_mic_failure(struct net_device *dev,
510 struct ieee80211_hdr *hdr,
511 int keyidx)
512{
513 union iwreq_data wrqu;
514 struct iw_michaelmicfailure ev;
515
516 /* TODO: needed parameters: count, keyid, key type, TSC */
517 memset(&ev, 0, sizeof(ev));
518 ev.flags = keyidx & IW_MICFAILURE_KEY_ID;
519 if (hdr->addr1[0] & 0x01)
520 ev.flags |= IW_MICFAILURE_GROUP;
521 else
522 ev.flags |= IW_MICFAILURE_PAIRWISE;
523 ev.src_addr.sa_family = ARPHRD_ETHER;
524 memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN);
525 memset(&wrqu, 0, sizeof(wrqu));
526 wrqu.data.length = sizeof(ev);
527 wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *) &ev);
528}
529#elif WIRELESS_EXT >= 15
530static void ieee80211_michael_mic_failure(struct net_device *dev,
531 struct ieee80211_hdr *hdr,
532 int keyidx)
533{
534 union iwreq_data wrqu;
535 char buf[128];
536
537 /* TODO: needed parameters: count, keyid, key type, TSC */
538 sprintf(buf, "MLME-MICHAELMICFAILURE.indication(keyid=%d %scast addr="
539 MAC_FMT ")", keyidx, hdr->addr1[0] & 0x01 ? "broad" : "uni",
540 MAC_ARG(hdr->addr2));
541 memset(&wrqu, 0, sizeof(wrqu));
542 wrqu.data.length = strlen(buf);
543 wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf);
544}
545#else /* WIRELESS_EXT >= 15 */
546static inline void ieee80211_michael_mic_failure(struct net_device *dev,
547 struct ieee80211_hdr *hdr,
548 int keyidx)
549{
550}
551#endif /* WIRELESS_EXT >= 15 */
552
553
554static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx,
555 int hdr_len, void *priv)
556{
557 struct ieee80211_tkip_data *tkey = priv;
558 u8 mic[8];
559
560 if (!tkey->key_set)
561 return -1;
562
563 michael_mic_hdr(skb, tkey->rx_hdr);
564 if (michael_mic(tkey, &tkey->key[24], tkey->rx_hdr,
565 skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
566 return -1;
567 if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
568 struct ieee80211_hdr *hdr;
569 hdr = (struct ieee80211_hdr *) skb->data;
570 printk(KERN_DEBUG "%s: Michael MIC verification failed for "
571 "MSDU from " MAC_FMT " keyidx=%d\n",
572 skb->dev ? skb->dev->name : "N/A", MAC_ARG(hdr->addr2),
573 keyidx);
574 if (skb->dev)
575 ieee80211_michael_mic_failure(skb->dev, hdr, keyidx);
576 tkey->dot11RSNAStatsTKIPLocalMICFailures++;
577 return -1;
578 }
579
580 /* Update TSC counters for RX now that the packet verification has
581 * completed. */
582 tkey->rx_iv32 = tkey->rx_iv32_new;
583 tkey->rx_iv16 = tkey->rx_iv16_new;
584
585 skb_trim(skb, skb->len - 8);
586
587 return 0;
588}
589
590
591static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv)
592{
593 struct ieee80211_tkip_data *tkey = priv;
594 int keyidx;
595 struct crypto_tfm *tfm = tkey->tfm_michael;
596 struct crypto_tfm *tfm2 = tkey->tfm_arc4;
597
598 keyidx = tkey->key_idx;
599 memset(tkey, 0, sizeof(*tkey));
600 tkey->key_idx = keyidx;
601 tkey->tfm_michael = tfm;
602 tkey->tfm_arc4 = tfm2;
603 if (len == TKIP_KEY_LEN) {
604 memcpy(tkey->key, key, TKIP_KEY_LEN);
605 tkey->key_set = 1;
606 tkey->tx_iv16 = 1; /* TSC is initialized to 1 */
607 if (seq) {
608 tkey->rx_iv32 = (seq[5] << 24) | (seq[4] << 16) |
609 (seq[3] << 8) | seq[2];
610 tkey->rx_iv16 = (seq[1] << 8) | seq[0];
611 }
612 } else if (len == 0)
613 tkey->key_set = 0;
614 else
615 return -1;
616
617 return 0;
618}
619
620
621static int ieee80211_tkip_get_key(void *key, int len, u8 *seq, void *priv)
622{
623 struct ieee80211_tkip_data *tkey = priv;
624
625 if (len < TKIP_KEY_LEN)
626 return -1;
627
628 if (!tkey->key_set)
629 return 0;
630 memcpy(key, tkey->key, TKIP_KEY_LEN);
631
632 if (seq) {
633 /* Return the sequence number of the last transmitted frame. */
634 u16 iv16 = tkey->tx_iv16;
635 u32 iv32 = tkey->tx_iv32;
636 if (iv16 == 0)
637 iv32--;
638 iv16--;
639 seq[0] = tkey->tx_iv16;
640 seq[1] = tkey->tx_iv16 >> 8;
641 seq[2] = tkey->tx_iv32;
642 seq[3] = tkey->tx_iv32 >> 8;
643 seq[4] = tkey->tx_iv32 >> 16;
644 seq[5] = tkey->tx_iv32 >> 24;
645 }
646
647 return TKIP_KEY_LEN;
648}
649
650
651static char * ieee80211_tkip_print_stats(char *p, void *priv)
652{
653 struct ieee80211_tkip_data *tkip = priv;
654 p += sprintf(p, "key[%d] alg=TKIP key_set=%d "
655 "tx_pn=%02x%02x%02x%02x%02x%02x "
656 "rx_pn=%02x%02x%02x%02x%02x%02x "
657 "replays=%d icv_errors=%d local_mic_failures=%d\n",
658 tkip->key_idx, tkip->key_set,
659 (tkip->tx_iv32 >> 24) & 0xff,
660 (tkip->tx_iv32 >> 16) & 0xff,
661 (tkip->tx_iv32 >> 8) & 0xff,
662 tkip->tx_iv32 & 0xff,
663 (tkip->tx_iv16 >> 8) & 0xff,
664 tkip->tx_iv16 & 0xff,
665 (tkip->rx_iv32 >> 24) & 0xff,
666 (tkip->rx_iv32 >> 16) & 0xff,
667 (tkip->rx_iv32 >> 8) & 0xff,
668 tkip->rx_iv32 & 0xff,
669 (tkip->rx_iv16 >> 8) & 0xff,
670 tkip->rx_iv16 & 0xff,
671 tkip->dot11RSNAStatsTKIPReplays,
672 tkip->dot11RSNAStatsTKIPICVErrors,
673 tkip->dot11RSNAStatsTKIPLocalMICFailures);
674 return p;
675}
676
677
678static struct ieee80211_crypto_ops ieee80211_crypt_tkip = {
679 .name = "TKIP",
680 .init = ieee80211_tkip_init,
681 .deinit = ieee80211_tkip_deinit,
682 .encrypt_mpdu = ieee80211_tkip_encrypt,
683 .decrypt_mpdu = ieee80211_tkip_decrypt,
684 .encrypt_msdu = ieee80211_michael_mic_add,
685 .decrypt_msdu = ieee80211_michael_mic_verify,
686 .set_key = ieee80211_tkip_set_key,
687 .get_key = ieee80211_tkip_get_key,
688 .print_stats = ieee80211_tkip_print_stats,
689 .extra_prefix_len = 4 + 4, /* IV + ExtIV */
690 .extra_postfix_len = 8 + 4, /* MIC + ICV */
691 .owner = THIS_MODULE,
692};
693
694
695static int __init ieee80211_crypto_tkip_init(void)
696{
697 return ieee80211_register_crypto_ops(&ieee80211_crypt_tkip);
698}
699
700
701static void __exit ieee80211_crypto_tkip_exit(void)
702{
703 ieee80211_unregister_crypto_ops(&ieee80211_crypt_tkip);
704}
705
706
707module_init(ieee80211_crypto_tkip_init);
708module_exit(ieee80211_crypto_tkip_exit);
diff --git a/net/ieee80211/ieee80211_crypt_wep.c b/net/ieee80211/ieee80211_crypt_wep.c
new file mode 100644
index 000000000000..bec1d3470d39
--- /dev/null
+++ b/net/ieee80211/ieee80211_crypt_wep.c
@@ -0,0 +1,272 @@
1/*
2 * Host AP crypt: host-based WEP encryption implementation for Host AP driver
3 *
4 * Copyright (c) 2002-2004, Jouni Malinen <jkmaline@cc.hut.fi>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. See README and COPYING for
9 * more details.
10 */
11
12#include <linux/config.h>
13#include <linux/version.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/slab.h>
17#include <linux/random.h>
18#include <linux/skbuff.h>
19#include <asm/string.h>
20
21#include <net/ieee80211.h>
22
23
24#include <linux/crypto.h>
25#include <asm/scatterlist.h>
26#include <linux/crc32.h>
27
28MODULE_AUTHOR("Jouni Malinen");
29MODULE_DESCRIPTION("Host AP crypt: WEP");
30MODULE_LICENSE("GPL");
31
32
33struct prism2_wep_data {
34 u32 iv;
35#define WEP_KEY_LEN 13
36 u8 key[WEP_KEY_LEN + 1];
37 u8 key_len;
38 u8 key_idx;
39 struct crypto_tfm *tfm;
40};
41
42
43static void * prism2_wep_init(int keyidx)
44{
45 struct prism2_wep_data *priv;
46
47 priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
48 if (priv == NULL)
49 goto fail;
50 memset(priv, 0, sizeof(*priv));
51 priv->key_idx = keyidx;
52
53 priv->tfm = crypto_alloc_tfm("arc4", 0);
54 if (priv->tfm == NULL) {
55 printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate "
56 "crypto API arc4\n");
57 goto fail;
58 }
59
60 /* start WEP IV from a random value */
61 get_random_bytes(&priv->iv, 4);
62
63 return priv;
64
65fail:
66 if (priv) {
67 if (priv->tfm)
68 crypto_free_tfm(priv->tfm);
69 kfree(priv);
70 }
71 return NULL;
72}
73
74
75static void prism2_wep_deinit(void *priv)
76{
77 struct prism2_wep_data *_priv = priv;
78 if (_priv && _priv->tfm)
79 crypto_free_tfm(_priv->tfm);
80 kfree(priv);
81}
82
83
84/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
85 * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted,
86 * so the payload length increases with 8 bytes.
87 *
88 * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
89 */
90static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
91{
92 struct prism2_wep_data *wep = priv;
93 u32 crc, klen, len;
94 u8 key[WEP_KEY_LEN + 3];
95 u8 *pos, *icv;
96 struct scatterlist sg;
97
98 if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
99 skb->len < hdr_len)
100 return -1;
101
102 len = skb->len - hdr_len;
103 pos = skb_push(skb, 4);
104 memmove(pos, pos + 4, hdr_len);
105 pos += hdr_len;
106
107 klen = 3 + wep->key_len;
108
109 wep->iv++;
110
111 /* Fluhrer, Mantin, and Shamir have reported weaknesses in the key
112 * scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N)
113 * can be used to speedup attacks, so avoid using them. */
114 if ((wep->iv & 0xff00) == 0xff00) {
115 u8 B = (wep->iv >> 16) & 0xff;
116 if (B >= 3 && B < klen)
117 wep->iv += 0x0100;
118 }
119
120 /* Prepend 24-bit IV to RC4 key and TX frame */
121 *pos++ = key[0] = (wep->iv >> 16) & 0xff;
122 *pos++ = key[1] = (wep->iv >> 8) & 0xff;
123 *pos++ = key[2] = wep->iv & 0xff;
124 *pos++ = wep->key_idx << 6;
125
126 /* Copy rest of the WEP key (the secret part) */
127 memcpy(key + 3, wep->key, wep->key_len);
128
129 /* Append little-endian CRC32 and encrypt it to produce ICV */
130 crc = ~crc32_le(~0, pos, len);
131 icv = skb_put(skb, 4);
132 icv[0] = crc;
133 icv[1] = crc >> 8;
134 icv[2] = crc >> 16;
135 icv[3] = crc >> 24;
136
137 crypto_cipher_setkey(wep->tfm, key, klen);
138 sg.page = virt_to_page(pos);
139 sg.offset = offset_in_page(pos);
140 sg.length = len + 4;
141 crypto_cipher_encrypt(wep->tfm, &sg, &sg, len + 4);
142
143 return 0;
144}
145
146
147/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of
148 * the frame: IV (4 bytes), encrypted payload (including SNAP header),
149 * ICV (4 bytes). len includes both IV and ICV.
150 *
151 * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on
152 * failure. If frame is OK, IV and ICV will be removed.
153 */
154static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv)
155{
156 struct prism2_wep_data *wep = priv;
157 u32 crc, klen, plen;
158 u8 key[WEP_KEY_LEN + 3];
159 u8 keyidx, *pos, icv[4];
160 struct scatterlist sg;
161
162 if (skb->len < hdr_len + 8)
163 return -1;
164
165 pos = skb->data + hdr_len;
166 key[0] = *pos++;
167 key[1] = *pos++;
168 key[2] = *pos++;
169 keyidx = *pos++ >> 6;
170 if (keyidx != wep->key_idx)
171 return -1;
172
173 klen = 3 + wep->key_len;
174
175 /* Copy rest of the WEP key (the secret part) */
176 memcpy(key + 3, wep->key, wep->key_len);
177
178 /* Apply RC4 to data and compute CRC32 over decrypted data */
179 plen = skb->len - hdr_len - 8;
180
181 crypto_cipher_setkey(wep->tfm, key, klen);
182 sg.page = virt_to_page(pos);
183 sg.offset = offset_in_page(pos);
184 sg.length = plen + 4;
185 crypto_cipher_decrypt(wep->tfm, &sg, &sg, plen + 4);
186
187 crc = ~crc32_le(~0, pos, plen);
188 icv[0] = crc;
189 icv[1] = crc >> 8;
190 icv[2] = crc >> 16;
191 icv[3] = crc >> 24;
192 if (memcmp(icv, pos + plen, 4) != 0) {
193 /* ICV mismatch - drop frame */
194 return -2;
195 }
196
197 /* Remove IV and ICV */
198 memmove(skb->data + 4, skb->data, hdr_len);
199 skb_pull(skb, 4);
200 skb_trim(skb, skb->len - 4);
201
202 return 0;
203}
204
205
206static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv)
207{
208 struct prism2_wep_data *wep = priv;
209
210 if (len < 0 || len > WEP_KEY_LEN)
211 return -1;
212
213 memcpy(wep->key, key, len);
214 wep->key_len = len;
215
216 return 0;
217}
218
219
220static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv)
221{
222 struct prism2_wep_data *wep = priv;
223
224 if (len < wep->key_len)
225 return -1;
226
227 memcpy(key, wep->key, wep->key_len);
228
229 return wep->key_len;
230}
231
232
233static char * prism2_wep_print_stats(char *p, void *priv)
234{
235 struct prism2_wep_data *wep = priv;
236 p += sprintf(p, "key[%d] alg=WEP len=%d\n",
237 wep->key_idx, wep->key_len);
238 return p;
239}
240
241
242static struct ieee80211_crypto_ops ieee80211_crypt_wep = {
243 .name = "WEP",
244 .init = prism2_wep_init,
245 .deinit = prism2_wep_deinit,
246 .encrypt_mpdu = prism2_wep_encrypt,
247 .decrypt_mpdu = prism2_wep_decrypt,
248 .encrypt_msdu = NULL,
249 .decrypt_msdu = NULL,
250 .set_key = prism2_wep_set_key,
251 .get_key = prism2_wep_get_key,
252 .print_stats = prism2_wep_print_stats,
253 .extra_prefix_len = 4, /* IV */
254 .extra_postfix_len = 4, /* ICV */
255 .owner = THIS_MODULE,
256};
257
258
259static int __init ieee80211_crypto_wep_init(void)
260{
261 return ieee80211_register_crypto_ops(&ieee80211_crypt_wep);
262}
263
264
265static void __exit ieee80211_crypto_wep_exit(void)
266{
267 ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep);
268}
269
270
271module_init(ieee80211_crypto_wep_init);
272module_exit(ieee80211_crypto_wep_exit);
diff --git a/net/ieee80211/ieee80211_module.c b/net/ieee80211/ieee80211_module.c
new file mode 100644
index 000000000000..e0802b8755a0
--- /dev/null
+++ b/net/ieee80211/ieee80211_module.c
@@ -0,0 +1,273 @@
1/*******************************************************************************
2
3 Copyright(c) 2004 Intel Corporation. All rights reserved.
4
5 Portions of this file are based on the WEP enablement code provided by the
6 Host AP project hostap-drivers v0.1.3
7 Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
8 <jkmaline@cc.hut.fi>
9 Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31*******************************************************************************/
32
33#include <linux/compiler.h>
34#include <linux/config.h>
35#include <linux/errno.h>
36#include <linux/if_arp.h>
37#include <linux/in6.h>
38#include <linux/in.h>
39#include <linux/ip.h>
40#include <linux/kernel.h>
41#include <linux/module.h>
42#include <linux/netdevice.h>
43#include <linux/proc_fs.h>
44#include <linux/skbuff.h>
45#include <linux/slab.h>
46#include <linux/tcp.h>
47#include <linux/types.h>
48#include <linux/version.h>
49#include <linux/wireless.h>
50#include <linux/etherdevice.h>
51#include <asm/uaccess.h>
52#include <net/arp.h>
53
54#include <net/ieee80211.h>
55
56MODULE_DESCRIPTION("802.11 data/management/control stack");
57MODULE_AUTHOR("Copyright (C) 2004 Intel Corporation <jketreno@linux.intel.com>");
58MODULE_LICENSE("GPL");
59
60#define DRV_NAME "ieee80211"
61
62static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
63{
64 if (ieee->networks)
65 return 0;
66
67 ieee->networks = kmalloc(
68 MAX_NETWORK_COUNT * sizeof(struct ieee80211_network),
69 GFP_KERNEL);
70 if (!ieee->networks) {
71 printk(KERN_WARNING "%s: Out of memory allocating beacons\n",
72 ieee->dev->name);
73 return -ENOMEM;
74 }
75
76 memset(ieee->networks, 0,
77 MAX_NETWORK_COUNT * sizeof(struct ieee80211_network));
78
79 return 0;
80}
81
82static inline void ieee80211_networks_free(struct ieee80211_device *ieee)
83{
84 if (!ieee->networks)
85 return;
86 kfree(ieee->networks);
87 ieee->networks = NULL;
88}
89
90static inline void ieee80211_networks_initialize(struct ieee80211_device *ieee)
91{
92 int i;
93
94 INIT_LIST_HEAD(&ieee->network_free_list);
95 INIT_LIST_HEAD(&ieee->network_list);
96 for (i = 0; i < MAX_NETWORK_COUNT; i++)
97 list_add_tail(&ieee->networks[i].list, &ieee->network_free_list);
98}
99
100
101struct net_device *alloc_ieee80211(int sizeof_priv)
102{
103 struct ieee80211_device *ieee;
104 struct net_device *dev;
105 int err;
106
107 IEEE80211_DEBUG_INFO("Initializing...\n");
108
109 dev = alloc_etherdev(sizeof(struct ieee80211_device) + sizeof_priv);
110 if (!dev) {
111 IEEE80211_ERROR("Unable to network device.\n");
112 goto failed;
113 }
114 ieee = netdev_priv(dev);
115 dev->hard_start_xmit = ieee80211_xmit;
116
117 ieee->dev = dev;
118
119 err = ieee80211_networks_allocate(ieee);
120 if (err) {
121 IEEE80211_ERROR("Unable to allocate beacon storage: %d\n",
122 err);
123 goto failed;
124 }
125 ieee80211_networks_initialize(ieee);
126
127 /* Default fragmentation threshold is maximum payload size */
128 ieee->fts = DEFAULT_FTS;
129 ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
130 ieee->open_wep = 1;
131
132 /* Default to enabling full open WEP with host based encrypt/decrypt */
133 ieee->host_encrypt = 1;
134 ieee->host_decrypt = 1;
135 ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
136
137 INIT_LIST_HEAD(&ieee->crypt_deinit_list);
138 init_timer(&ieee->crypt_deinit_timer);
139 ieee->crypt_deinit_timer.data = (unsigned long)ieee;
140 ieee->crypt_deinit_timer.function = ieee80211_crypt_deinit_handler;
141
142 spin_lock_init(&ieee->lock);
143
144 ieee->wpa_enabled = 0;
145 ieee->tkip_countermeasures = 0;
146 ieee->drop_unencrypted = 0;
147 ieee->privacy_invoked = 0;
148 ieee->ieee802_1x = 1;
149
150 return dev;
151
152 failed:
153 if (dev)
154 free_netdev(dev);
155 return NULL;
156}
157
158
159void free_ieee80211(struct net_device *dev)
160{
161 struct ieee80211_device *ieee = netdev_priv(dev);
162
163 int i;
164
165 del_timer_sync(&ieee->crypt_deinit_timer);
166 ieee80211_crypt_deinit_entries(ieee, 1);
167
168 for (i = 0; i < WEP_KEYS; i++) {
169 struct ieee80211_crypt_data *crypt = ieee->crypt[i];
170 if (crypt) {
171 if (crypt->ops) {
172 crypt->ops->deinit(crypt->priv);
173 module_put(crypt->ops->owner);
174 }
175 kfree(crypt);
176 ieee->crypt[i] = NULL;
177 }
178 }
179
180 ieee80211_networks_free(ieee);
181 free_netdev(dev);
182}
183
184#ifdef CONFIG_IEEE80211_DEBUG
185
186static int debug = 0;
187u32 ieee80211_debug_level = 0;
188struct proc_dir_entry *ieee80211_proc = NULL;
189
190static int show_debug_level(char *page, char **start, off_t offset,
191 int count, int *eof, void *data)
192{
193 return snprintf(page, count, "0x%08X\n", ieee80211_debug_level);
194}
195
196static int store_debug_level(struct file *file, const char __user *buffer,
197 unsigned long count, void *data)
198{
199 char buf[] = "0x00000000";
200 char *p = (char *)buf;
201 unsigned long val;
202
203 if (count > sizeof(buf) - 1)
204 count = sizeof(buf) - 1;
205
206 if (copy_from_user(buf, buffer, count))
207 return count;
208 buf[count] = 0;
209 /*
210 * what a FPOS... What, sscanf(buf, "%i", &val) would be too
211 * scary?
212 */
213 if (p[1] == 'x' || p[1] == 'X' || p[0] == 'x' || p[0] == 'X') {
214 p++;
215 if (p[0] == 'x' || p[0] == 'X')
216 p++;
217 val = simple_strtoul(p, &p, 16);
218 } else
219 val = simple_strtoul(p, &p, 10);
220 if (p == buf)
221 printk(KERN_INFO DRV_NAME
222 ": %s is not in hex or decimal form.\n", buf);
223 else
224 ieee80211_debug_level = val;
225
226 return strlen(buf);
227}
228
229static int __init ieee80211_init(void)
230{
231 struct proc_dir_entry *e;
232
233 ieee80211_debug_level = debug;
234 ieee80211_proc = create_proc_entry(DRV_NAME, S_IFDIR, proc_net);
235 if (ieee80211_proc == NULL) {
236 IEEE80211_ERROR("Unable to create " DRV_NAME
237 " proc directory\n");
238 return -EIO;
239 }
240 e = create_proc_entry("debug_level", S_IFREG | S_IRUGO | S_IWUSR,
241 ieee80211_proc);
242 if (!e) {
243 remove_proc_entry(DRV_NAME, proc_net);
244 ieee80211_proc = NULL;
245 return -EIO;
246 }
247 e->read_proc = show_debug_level;
248 e->write_proc = store_debug_level;
249 e->data = NULL;
250
251 return 0;
252}
253
254static void __exit ieee80211_exit(void)
255{
256 if (ieee80211_proc) {
257 remove_proc_entry("debug_level", ieee80211_proc);
258 remove_proc_entry(DRV_NAME, proc_net);
259 ieee80211_proc = NULL;
260 }
261}
262
263#include <linux/moduleparam.h>
264module_param(debug, int, 0444);
265MODULE_PARM_DESC(debug, "debug output mask");
266
267
268module_exit(ieee80211_exit);
269module_init(ieee80211_init);
270#endif
271
272EXPORT_SYMBOL(alloc_ieee80211);
273EXPORT_SYMBOL(free_ieee80211);
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
new file mode 100644
index 000000000000..0dd102993ef4
--- /dev/null
+++ b/net/ieee80211/ieee80211_rx.c
@@ -0,0 +1,1205 @@
1/*
2 * Original code based Host AP (software wireless LAN access point) driver
3 * for Intersil Prism2/2.5/3 - hostap.o module, common routines
4 *
5 * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
6 * <jkmaline@cc.hut.fi>
7 * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
8 * Copyright (c) 2004, Intel Corporation
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation. See README and COPYING for
13 * more details.
14 */
15
16#include <linux/compiler.h>
17#include <linux/config.h>
18#include <linux/errno.h>
19#include <linux/if_arp.h>
20#include <linux/in6.h>
21#include <linux/in.h>
22#include <linux/ip.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/netdevice.h>
26#include <linux/proc_fs.h>
27#include <linux/skbuff.h>
28#include <linux/slab.h>
29#include <linux/tcp.h>
30#include <linux/types.h>
31#include <linux/version.h>
32#include <linux/wireless.h>
33#include <linux/etherdevice.h>
34#include <asm/uaccess.h>
35#include <linux/ctype.h>
36
37#include <net/ieee80211.h>
38
39static inline void ieee80211_monitor_rx(struct ieee80211_device *ieee,
40 struct sk_buff *skb,
41 struct ieee80211_rx_stats *rx_stats)
42{
43 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
44 u16 fc = le16_to_cpu(hdr->frame_ctl);
45
46 skb->dev = ieee->dev;
47 skb->mac.raw = skb->data;
48 skb_pull(skb, ieee80211_get_hdrlen(fc));
49 skb->pkt_type = PACKET_OTHERHOST;
50 skb->protocol = __constant_htons(ETH_P_80211_RAW);
51 memset(skb->cb, 0, sizeof(skb->cb));
52 netif_rx(skb);
53}
54
55
56/* Called only as a tasklet (software IRQ) */
57static struct ieee80211_frag_entry *
58ieee80211_frag_cache_find(struct ieee80211_device *ieee, unsigned int seq,
59 unsigned int frag, u8 *src, u8 *dst)
60{
61 struct ieee80211_frag_entry *entry;
62 int i;
63
64 for (i = 0; i < IEEE80211_FRAG_CACHE_LEN; i++) {
65 entry = &ieee->frag_cache[i];
66 if (entry->skb != NULL &&
67 time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
68 IEEE80211_DEBUG_FRAG(
69 "expiring fragment cache entry "
70 "seq=%u last_frag=%u\n",
71 entry->seq, entry->last_frag);
72 dev_kfree_skb_any(entry->skb);
73 entry->skb = NULL;
74 }
75
76 if (entry->skb != NULL && entry->seq == seq &&
77 (entry->last_frag + 1 == frag || frag == -1) &&
78 memcmp(entry->src_addr, src, ETH_ALEN) == 0 &&
79 memcmp(entry->dst_addr, dst, ETH_ALEN) == 0)
80 return entry;
81 }
82
83 return NULL;
84}
85
86/* Called only as a tasklet (software IRQ) */
87static struct sk_buff *
88ieee80211_frag_cache_get(struct ieee80211_device *ieee,
89 struct ieee80211_hdr *hdr)
90{
91 struct sk_buff *skb = NULL;
92 u16 sc;
93 unsigned int frag, seq;
94 struct ieee80211_frag_entry *entry;
95
96 sc = le16_to_cpu(hdr->seq_ctl);
97 frag = WLAN_GET_SEQ_FRAG(sc);
98 seq = WLAN_GET_SEQ_SEQ(sc);
99
100 if (frag == 0) {
101 /* Reserve enough space to fit maximum frame length */
102 skb = dev_alloc_skb(ieee->dev->mtu +
103 sizeof(struct ieee80211_hdr) +
104 8 /* LLC */ +
105 2 /* alignment */ +
106 8 /* WEP */ + ETH_ALEN /* WDS */);
107 if (skb == NULL)
108 return NULL;
109
110 entry = &ieee->frag_cache[ieee->frag_next_idx];
111 ieee->frag_next_idx++;
112 if (ieee->frag_next_idx >= IEEE80211_FRAG_CACHE_LEN)
113 ieee->frag_next_idx = 0;
114
115 if (entry->skb != NULL)
116 dev_kfree_skb_any(entry->skb);
117
118 entry->first_frag_time = jiffies;
119 entry->seq = seq;
120 entry->last_frag = frag;
121 entry->skb = skb;
122 memcpy(entry->src_addr, hdr->addr2, ETH_ALEN);
123 memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN);
124 } else {
125 /* received a fragment of a frame for which the head fragment
126 * should have already been received */
127 entry = ieee80211_frag_cache_find(ieee, seq, frag, hdr->addr2,
128 hdr->addr1);
129 if (entry != NULL) {
130 entry->last_frag = frag;
131 skb = entry->skb;
132 }
133 }
134
135 return skb;
136}
137
138
139/* Called only as a tasklet (software IRQ) */
140static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee,
141 struct ieee80211_hdr *hdr)
142{
143 u16 sc;
144 unsigned int seq;
145 struct ieee80211_frag_entry *entry;
146
147 sc = le16_to_cpu(hdr->seq_ctl);
148 seq = WLAN_GET_SEQ_SEQ(sc);
149
150 entry = ieee80211_frag_cache_find(ieee, seq, -1, hdr->addr2,
151 hdr->addr1);
152
153 if (entry == NULL) {
154 IEEE80211_DEBUG_FRAG(
155 "could not invalidate fragment cache "
156 "entry (seq=%u)\n", seq);
157 return -1;
158 }
159
160 entry->skb = NULL;
161 return 0;
162}
163
164
165#ifdef NOT_YET
166/* ieee80211_rx_frame_mgtmt
167 *
168 * Responsible for handling management control frames
169 *
170 * Called by ieee80211_rx */
171static inline int
172ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb,
173 struct ieee80211_rx_stats *rx_stats, u16 type,
174 u16 stype)
175{
176 if (ieee->iw_mode == IW_MODE_MASTER) {
177 printk(KERN_DEBUG "%s: Master mode not yet suppported.\n",
178 ieee->dev->name);
179 return 0;
180/*
181 hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr *)
182 skb->data);*/
183 }
184
185 if (ieee->hostapd && type == WLAN_FC_TYPE_MGMT) {
186 if (stype == WLAN_FC_STYPE_BEACON &&
187 ieee->iw_mode == IW_MODE_MASTER) {
188 struct sk_buff *skb2;
189 /* Process beacon frames also in kernel driver to
190 * update STA(AP) table statistics */
191 skb2 = skb_clone(skb, GFP_ATOMIC);
192 if (skb2)
193 hostap_rx(skb2->dev, skb2, rx_stats);
194 }
195
196 /* send management frames to the user space daemon for
197 * processing */
198 ieee->apdevstats.rx_packets++;
199 ieee->apdevstats.rx_bytes += skb->len;
200 prism2_rx_80211(ieee->apdev, skb, rx_stats, PRISM2_RX_MGMT);
201 return 0;
202 }
203
204 if (ieee->iw_mode == IW_MODE_MASTER) {
205 if (type != WLAN_FC_TYPE_MGMT && type != WLAN_FC_TYPE_CTRL) {
206 printk(KERN_DEBUG "%s: unknown management frame "
207 "(type=0x%02x, stype=0x%02x) dropped\n",
208 skb->dev->name, type, stype);
209 return -1;
210 }
211
212 hostap_rx(skb->dev, skb, rx_stats);
213 return 0;
214 }
215
216 printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: management frame "
217 "received in non-Host AP mode\n", skb->dev->name);
218 return -1;
219}
220#endif
221
222
223/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
224/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
225static unsigned char rfc1042_header[] =
226{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
227/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
228static unsigned char bridge_tunnel_header[] =
229{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
230/* No encapsulation header if EtherType < 0x600 (=length) */
231
232/* Called by ieee80211_rx_frame_decrypt */
233static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee,
234 struct sk_buff *skb)
235{
236 struct net_device *dev = ieee->dev;
237 u16 fc, ethertype;
238 struct ieee80211_hdr *hdr;
239 u8 *pos;
240
241 if (skb->len < 24)
242 return 0;
243
244 hdr = (struct ieee80211_hdr *) skb->data;
245 fc = le16_to_cpu(hdr->frame_ctl);
246
247 /* check that the frame is unicast frame to us */
248 if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
249 IEEE80211_FCTL_TODS &&
250 memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 &&
251 memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) {
252 /* ToDS frame with own addr BSSID and DA */
253 } else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
254 IEEE80211_FCTL_FROMDS &&
255 memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) {
256 /* FromDS frame with own addr as DA */
257 } else
258 return 0;
259
260 if (skb->len < 24 + 8)
261 return 0;
262
263 /* check for port access entity Ethernet type */
264 pos = skb->data + 24;
265 ethertype = (pos[6] << 8) | pos[7];
266 if (ethertype == ETH_P_PAE)
267 return 1;
268
269 return 0;
270}
271
272/* Called only as a tasklet (software IRQ), by ieee80211_rx */
273static inline int
274ieee80211_rx_frame_decrypt(struct ieee80211_device* ieee, struct sk_buff *skb,
275 struct ieee80211_crypt_data *crypt)
276{
277 struct ieee80211_hdr *hdr;
278 int res, hdrlen;
279
280 if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL)
281 return 0;
282
283 hdr = (struct ieee80211_hdr *) skb->data;
284 hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
285
286#ifdef CONFIG_IEEE80211_CRYPT_TKIP
287 if (ieee->tkip_countermeasures &&
288 strcmp(crypt->ops->name, "TKIP") == 0) {
289 if (net_ratelimit()) {
290 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
291 "received packet from " MAC_FMT "\n",
292 ieee->dev->name, MAC_ARG(hdr->addr2));
293 }
294 return -1;
295 }
296#endif
297
298 atomic_inc(&crypt->refcnt);
299 res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv);
300 atomic_dec(&crypt->refcnt);
301 if (res < 0) {
302 IEEE80211_DEBUG_DROP(
303 "decryption failed (SA=" MAC_FMT
304 ") res=%d\n", MAC_ARG(hdr->addr2), res);
305 if (res == -2)
306 IEEE80211_DEBUG_DROP("Decryption failed ICV "
307 "mismatch (key %d)\n",
308 skb->data[hdrlen + 3] >> 6);
309 ieee->ieee_stats.rx_discards_undecryptable++;
310 return -1;
311 }
312
313 return res;
314}
315
316
317/* Called only as a tasklet (software IRQ), by ieee80211_rx */
318static inline int
319ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device* ieee, struct sk_buff *skb,
320 int keyidx, struct ieee80211_crypt_data *crypt)
321{
322 struct ieee80211_hdr *hdr;
323 int res, hdrlen;
324
325 if (crypt == NULL || crypt->ops->decrypt_msdu == NULL)
326 return 0;
327
328 hdr = (struct ieee80211_hdr *) skb->data;
329 hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
330
331 atomic_inc(&crypt->refcnt);
332 res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
333 atomic_dec(&crypt->refcnt);
334 if (res < 0) {
335 printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
336 " (SA=" MAC_FMT " keyidx=%d)\n",
337 ieee->dev->name, MAC_ARG(hdr->addr2), keyidx);
338 return -1;
339 }
340
341 return 0;
342}
343
344
345/* All received frames are sent to this function. @skb contains the frame in
346 * IEEE 802.11 format, i.e., in the format it was sent over air.
347 * This function is called only as a tasklet (software IRQ). */
348int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
349 struct ieee80211_rx_stats *rx_stats)
350{
351 struct net_device *dev = ieee->dev;
352 struct ieee80211_hdr *hdr;
353 size_t hdrlen;
354 u16 fc, type, stype, sc;
355 struct net_device_stats *stats;
356 unsigned int frag;
357 u8 *payload;
358 u16 ethertype;
359#ifdef NOT_YET
360 struct net_device *wds = NULL;
361 struct sk_buff *skb2 = NULL;
362 struct net_device *wds = NULL;
363 int frame_authorized = 0;
364 int from_assoc_ap = 0;
365 void *sta = NULL;
366#endif
367 u8 dst[ETH_ALEN];
368 u8 src[ETH_ALEN];
369 struct ieee80211_crypt_data *crypt = NULL;
370 int keyidx = 0;
371
372 hdr = (struct ieee80211_hdr *)skb->data;
373 stats = &ieee->stats;
374
375 if (skb->len < 10) {
376 printk(KERN_INFO "%s: SKB length < 10\n",
377 dev->name);
378 goto rx_dropped;
379 }
380
381 fc = le16_to_cpu(hdr->frame_ctl);
382 type = WLAN_FC_GET_TYPE(fc);
383 stype = WLAN_FC_GET_STYPE(fc);
384 sc = le16_to_cpu(hdr->seq_ctl);
385 frag = WLAN_GET_SEQ_FRAG(sc);
386 hdrlen = ieee80211_get_hdrlen(fc);
387
388#ifdef NOT_YET
389#if WIRELESS_EXT > 15
390 /* Put this code here so that we avoid duplicating it in all
391 * Rx paths. - Jean II */
392#ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */
393 /* If spy monitoring on */
394 if (iface->spy_data.spy_number > 0) {
395 struct iw_quality wstats;
396 wstats.level = rx_stats->signal;
397 wstats.noise = rx_stats->noise;
398 wstats.updated = 6; /* No qual value */
399 /* Update spy records */
400 wireless_spy_update(dev, hdr->addr2, &wstats);
401 }
402#endif /* IW_WIRELESS_SPY */
403#endif /* WIRELESS_EXT > 15 */
404 hostap_update_rx_stats(local->ap, hdr, rx_stats);
405#endif
406
407#if WIRELESS_EXT > 15
408 if (ieee->iw_mode == IW_MODE_MONITOR) {
409 ieee80211_monitor_rx(ieee, skb, rx_stats);
410 stats->rx_packets++;
411 stats->rx_bytes += skb->len;
412 return 1;
413 }
414#endif
415
416 if (ieee->host_decrypt) {
417 int idx = 0;
418 if (skb->len >= hdrlen + 3)
419 idx = skb->data[hdrlen + 3] >> 6;
420 crypt = ieee->crypt[idx];
421#ifdef NOT_YET
422 sta = NULL;
423
424 /* Use station specific key to override default keys if the
425 * receiver address is a unicast address ("individual RA"). If
426 * bcrx_sta_key parameter is set, station specific key is used
427 * even with broad/multicast targets (this is against IEEE
428 * 802.11, but makes it easier to use different keys with
429 * stations that do not support WEP key mapping). */
430
431 if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key)
432 (void) hostap_handle_sta_crypto(local, hdr, &crypt,
433 &sta);
434#endif
435
436 /* allow NULL decrypt to indicate an station specific override
437 * for default encryption */
438 if (crypt && (crypt->ops == NULL ||
439 crypt->ops->decrypt_mpdu == NULL))
440 crypt = NULL;
441
442 if (!crypt && (fc & IEEE80211_FCTL_WEP)) {
443 /* This seems to be triggered by some (multicast?)
444 * frames from other than current BSS, so just drop the
445 * frames silently instead of filling system log with
446 * these reports. */
447 IEEE80211_DEBUG_DROP("Decryption failed (not set)"
448 " (SA=" MAC_FMT ")\n",
449 MAC_ARG(hdr->addr2));
450 ieee->ieee_stats.rx_discards_undecryptable++;
451 goto rx_dropped;
452 }
453 }
454
455#ifdef NOT_YET
456 if (type != WLAN_FC_TYPE_DATA) {
457 if (type == WLAN_FC_TYPE_MGMT && stype == WLAN_FC_STYPE_AUTH &&
458 fc & IEEE80211_FCTL_WEP && ieee->host_decrypt &&
459 (keyidx = hostap_rx_frame_decrypt(ieee, skb, crypt)) < 0)
460 {
461 printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth "
462 "from " MAC_FMT "\n", dev->name,
463 MAC_ARG(hdr->addr2));
464 /* TODO: could inform hostapd about this so that it
465 * could send auth failure report */
466 goto rx_dropped;
467 }
468
469 if (ieee80211_rx_frame_mgmt(ieee, skb, rx_stats, type, stype))
470 goto rx_dropped;
471 else
472 goto rx_exit;
473 }
474#endif
475
476 /* Data frame - extract src/dst addresses */
477 if (skb->len < IEEE80211_3ADDR_LEN)
478 goto rx_dropped;
479
480 switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) {
481 case IEEE80211_FCTL_FROMDS:
482 memcpy(dst, hdr->addr1, ETH_ALEN);
483 memcpy(src, hdr->addr3, ETH_ALEN);
484 break;
485 case IEEE80211_FCTL_TODS:
486 memcpy(dst, hdr->addr3, ETH_ALEN);
487 memcpy(src, hdr->addr2, ETH_ALEN);
488 break;
489 case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS:
490 if (skb->len < IEEE80211_4ADDR_LEN)
491 goto rx_dropped;
492 memcpy(dst, hdr->addr3, ETH_ALEN);
493 memcpy(src, hdr->addr4, ETH_ALEN);
494 break;
495 case 0:
496 memcpy(dst, hdr->addr1, ETH_ALEN);
497 memcpy(src, hdr->addr2, ETH_ALEN);
498 break;
499 }
500
501#ifdef NOT_YET
502 if (hostap_rx_frame_wds(ieee, hdr, fc, &wds))
503 goto rx_dropped;
504 if (wds) {
505 skb->dev = dev = wds;
506 stats = hostap_get_stats(dev);
507 }
508
509 if (ieee->iw_mode == IW_MODE_MASTER && !wds &&
510 (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS &&
511 ieee->stadev &&
512 memcmp(hdr->addr2, ieee->assoc_ap_addr, ETH_ALEN) == 0) {
513 /* Frame from BSSID of the AP for which we are a client */
514 skb->dev = dev = ieee->stadev;
515 stats = hostap_get_stats(dev);
516 from_assoc_ap = 1;
517 }
518#endif
519
520 dev->last_rx = jiffies;
521
522#ifdef NOT_YET
523 if ((ieee->iw_mode == IW_MODE_MASTER ||
524 ieee->iw_mode == IW_MODE_REPEAT) &&
525 !from_assoc_ap) {
526 switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats,
527 wds != NULL)) {
528 case AP_RX_CONTINUE_NOT_AUTHORIZED:
529 frame_authorized = 0;
530 break;
531 case AP_RX_CONTINUE:
532 frame_authorized = 1;
533 break;
534 case AP_RX_DROP:
535 goto rx_dropped;
536 case AP_RX_EXIT:
537 goto rx_exit;
538 }
539 }
540#endif
541
542 /* Nullfunc frames may have PS-bit set, so they must be passed to
543 * hostap_handle_sta_rx() before being dropped here. */
544 if (stype != IEEE80211_STYPE_DATA &&
545 stype != IEEE80211_STYPE_DATA_CFACK &&
546 stype != IEEE80211_STYPE_DATA_CFPOLL &&
547 stype != IEEE80211_STYPE_DATA_CFACKPOLL) {
548 if (stype != IEEE80211_STYPE_NULLFUNC)
549 IEEE80211_DEBUG_DROP(
550 "RX: dropped data frame "
551 "with no data (type=0x%02x, "
552 "subtype=0x%02x, len=%d)\n",
553 type, stype, skb->len);
554 goto rx_dropped;
555 }
556
557 /* skb: hdr + (possibly fragmented, possibly encrypted) payload */
558
559 if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
560 (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0)
561 goto rx_dropped;
562
563 hdr = (struct ieee80211_hdr *) skb->data;
564
565 /* skb: hdr + (possibly fragmented) plaintext payload */
566 // PR: FIXME: hostap has additional conditions in the "if" below:
567 // ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
568 if ((frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) {
569 int flen;
570 struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr);
571 IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag);
572
573 if (!frag_skb) {
574 IEEE80211_DEBUG(IEEE80211_DL_RX | IEEE80211_DL_FRAG,
575 "Rx cannot get skb from fragment "
576 "cache (morefrag=%d seq=%u frag=%u)\n",
577 (fc & IEEE80211_FCTL_MOREFRAGS) != 0,
578 WLAN_GET_SEQ_SEQ(sc), frag);
579 goto rx_dropped;
580 }
581
582 flen = skb->len;
583 if (frag != 0)
584 flen -= hdrlen;
585
586 if (frag_skb->tail + flen > frag_skb->end) {
587 printk(KERN_WARNING "%s: host decrypted and "
588 "reassembled frame did not fit skb\n",
589 dev->name);
590 ieee80211_frag_cache_invalidate(ieee, hdr);
591 goto rx_dropped;
592 }
593
594 if (frag == 0) {
595 /* copy first fragment (including full headers) into
596 * beginning of the fragment cache skb */
597 memcpy(skb_put(frag_skb, flen), skb->data, flen);
598 } else {
599 /* append frame payload to the end of the fragment
600 * cache skb */
601 memcpy(skb_put(frag_skb, flen), skb->data + hdrlen,
602 flen);
603 }
604 dev_kfree_skb_any(skb);
605 skb = NULL;
606
607 if (fc & IEEE80211_FCTL_MOREFRAGS) {
608 /* more fragments expected - leave the skb in fragment
609 * cache for now; it will be delivered to upper layers
610 * after all fragments have been received */
611 goto rx_exit;
612 }
613
614 /* this was the last fragment and the frame will be
615 * delivered, so remove skb from fragment cache */
616 skb = frag_skb;
617 hdr = (struct ieee80211_hdr *) skb->data;
618 ieee80211_frag_cache_invalidate(ieee, hdr);
619 }
620
621 /* skb: hdr + (possible reassembled) full MSDU payload; possibly still
622 * encrypted/authenticated */
623 if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) &&
624 ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt))
625 goto rx_dropped;
626
627 hdr = (struct ieee80211_hdr *) skb->data;
628 if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep) {
629 if (/*ieee->ieee802_1x &&*/
630 ieee80211_is_eapol_frame(ieee, skb)) {
631#ifdef CONFIG_IEEE80211_DEBUG
632 /* pass unencrypted EAPOL frames even if encryption is
633 * configured */
634 struct eapol *eap = (struct eapol *)(skb->data +
635 24);
636 IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
637 eap_get_type(eap->type));
638#endif
639 } else {
640 IEEE80211_DEBUG_DROP(
641 "encryption configured, but RX "
642 "frame not encrypted (SA=" MAC_FMT ")\n",
643 MAC_ARG(hdr->addr2));
644 goto rx_dropped;
645 }
646 }
647
648#ifdef CONFIG_IEEE80211_DEBUG
649 if (crypt && !(fc & IEEE80211_FCTL_WEP) &&
650 ieee80211_is_eapol_frame(ieee, skb)) {
651 struct eapol *eap = (struct eapol *)(skb->data +
652 24);
653 IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n",
654 eap_get_type(eap->type));
655 }
656#endif
657
658 if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep &&
659 !ieee80211_is_eapol_frame(ieee, skb)) {
660 IEEE80211_DEBUG_DROP(
661 "dropped unencrypted RX data "
662 "frame from " MAC_FMT
663 " (drop_unencrypted=1)\n",
664 MAC_ARG(hdr->addr2));
665 goto rx_dropped;
666 }
667
668 /* skb: hdr + (possible reassembled) full plaintext payload */
669
670 payload = skb->data + hdrlen;
671 ethertype = (payload[6] << 8) | payload[7];
672
673#ifdef NOT_YET
674 /* If IEEE 802.1X is used, check whether the port is authorized to send
675 * the received frame. */
676 if (ieee->ieee802_1x && ieee->iw_mode == IW_MODE_MASTER) {
677 if (ethertype == ETH_P_PAE) {
678 printk(KERN_DEBUG "%s: RX: IEEE 802.1X frame\n",
679 dev->name);
680 if (ieee->hostapd && ieee->apdev) {
681 /* Send IEEE 802.1X frames to the user
682 * space daemon for processing */
683 prism2_rx_80211(ieee->apdev, skb, rx_stats,
684 PRISM2_RX_MGMT);
685 ieee->apdevstats.rx_packets++;
686 ieee->apdevstats.rx_bytes += skb->len;
687 goto rx_exit;
688 }
689 } else if (!frame_authorized) {
690 printk(KERN_DEBUG "%s: dropped frame from "
691 "unauthorized port (IEEE 802.1X): "
692 "ethertype=0x%04x\n",
693 dev->name, ethertype);
694 goto rx_dropped;
695 }
696 }
697#endif
698
699 /* convert hdr + possible LLC headers into Ethernet header */
700 if (skb->len - hdrlen >= 8 &&
701 ((memcmp(payload, rfc1042_header, SNAP_SIZE) == 0 &&
702 ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) ||
703 memcmp(payload, bridge_tunnel_header, SNAP_SIZE) == 0)) {
704 /* remove RFC1042 or Bridge-Tunnel encapsulation and
705 * replace EtherType */
706 skb_pull(skb, hdrlen + SNAP_SIZE);
707 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
708 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
709 } else {
710 u16 len;
711 /* Leave Ethernet header part of hdr and full payload */
712 skb_pull(skb, hdrlen);
713 len = htons(skb->len);
714 memcpy(skb_push(skb, 2), &len, 2);
715 memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN);
716 memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN);
717 }
718
719#ifdef NOT_YET
720 if (wds && ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
721 IEEE80211_FCTL_TODS) &&
722 skb->len >= ETH_HLEN + ETH_ALEN) {
723 /* Non-standard frame: get addr4 from its bogus location after
724 * the payload */
725 memcpy(skb->data + ETH_ALEN,
726 skb->data + skb->len - ETH_ALEN, ETH_ALEN);
727 skb_trim(skb, skb->len - ETH_ALEN);
728 }
729#endif
730
731 stats->rx_packets++;
732 stats->rx_bytes += skb->len;
733
734#ifdef NOT_YET
735 if (ieee->iw_mode == IW_MODE_MASTER && !wds &&
736 ieee->ap->bridge_packets) {
737 if (dst[0] & 0x01) {
738 /* copy multicast frame both to the higher layers and
739 * to the wireless media */
740 ieee->ap->bridged_multicast++;
741 skb2 = skb_clone(skb, GFP_ATOMIC);
742 if (skb2 == NULL)
743 printk(KERN_DEBUG "%s: skb_clone failed for "
744 "multicast frame\n", dev->name);
745 } else if (hostap_is_sta_assoc(ieee->ap, dst)) {
746 /* send frame directly to the associated STA using
747 * wireless media and not passing to higher layers */
748 ieee->ap->bridged_unicast++;
749 skb2 = skb;
750 skb = NULL;
751 }
752 }
753
754 if (skb2 != NULL) {
755 /* send to wireless media */
756 skb2->protocol = __constant_htons(ETH_P_802_3);
757 skb2->mac.raw = skb2->nh.raw = skb2->data;
758 /* skb2->nh.raw = skb2->data + ETH_HLEN; */
759 skb2->dev = dev;
760 dev_queue_xmit(skb2);
761 }
762
763#endif
764
765 if (skb) {
766 skb->protocol = eth_type_trans(skb, dev);
767 memset(skb->cb, 0, sizeof(skb->cb));
768 skb->dev = dev;
769 skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */
770 netif_rx(skb);
771 }
772
773 rx_exit:
774#ifdef NOT_YET
775 if (sta)
776 hostap_handle_sta_release(sta);
777#endif
778 return 1;
779
780 rx_dropped:
781 stats->rx_dropped++;
782
783 /* Returning 0 indicates to caller that we have not handled the SKB--
784 * so it is still allocated and can be used again by underlying
785 * hardware as a DMA target */
786 return 0;
787}
788
789#define MGMT_FRAME_FIXED_PART_LENGTH 0x24
790
791static inline int ieee80211_is_ofdm_rate(u8 rate)
792{
793 switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
794 case IEEE80211_OFDM_RATE_6MB:
795 case IEEE80211_OFDM_RATE_9MB:
796 case IEEE80211_OFDM_RATE_12MB:
797 case IEEE80211_OFDM_RATE_18MB:
798 case IEEE80211_OFDM_RATE_24MB:
799 case IEEE80211_OFDM_RATE_36MB:
800 case IEEE80211_OFDM_RATE_48MB:
801 case IEEE80211_OFDM_RATE_54MB:
802 return 1;
803 }
804 return 0;
805}
806
807
808static inline int ieee80211_network_init(
809 struct ieee80211_device *ieee,
810 struct ieee80211_probe_response *beacon,
811 struct ieee80211_network *network,
812 struct ieee80211_rx_stats *stats)
813{
814#ifdef CONFIG_IEEE80211_DEBUG
815 char rates_str[64];
816 char *p;
817#endif
818 struct ieee80211_info_element *info_element;
819 u16 left;
820 u8 i;
821
822 /* Pull out fixed field data */
823 memcpy(network->bssid, beacon->header.addr3, ETH_ALEN);
824 network->capability = beacon->capability;
825 network->last_scanned = jiffies;
826 network->time_stamp[0] = beacon->time_stamp[0];
827 network->time_stamp[1] = beacon->time_stamp[1];
828 network->beacon_interval = beacon->beacon_interval;
829 /* Where to pull this? beacon->listen_interval;*/
830 network->listen_interval = 0x0A;
831 network->rates_len = network->rates_ex_len = 0;
832 network->last_associate = 0;
833 network->ssid_len = 0;
834 network->flags = 0;
835 network->atim_window = 0;
836
837 if (stats->freq == IEEE80211_52GHZ_BAND) {
838 /* for A band (No DS info) */
839 network->channel = stats->received_channel;
840 } else
841 network->flags |= NETWORK_HAS_CCK;
842
843 network->wpa_ie_len = 0;
844 network->rsn_ie_len = 0;
845
846 info_element = &beacon->info_element;
847 left = stats->len - ((void *)info_element - (void *)beacon);
848 while (left >= sizeof(struct ieee80211_info_element_hdr)) {
849 if (sizeof(struct ieee80211_info_element_hdr) + info_element->len > left) {
850 IEEE80211_DEBUG_SCAN("SCAN: parse failed: info_element->len + 2 > left : info_element->len+2=%d left=%d.\n",
851 info_element->len + sizeof(struct ieee80211_info_element),
852 left);
853 return 1;
854 }
855
856 switch (info_element->id) {
857 case MFIE_TYPE_SSID:
858 if (ieee80211_is_empty_essid(info_element->data,
859 info_element->len)) {
860 network->flags |= NETWORK_EMPTY_ESSID;
861 break;
862 }
863
864 network->ssid_len = min(info_element->len,
865 (u8)IW_ESSID_MAX_SIZE);
866 memcpy(network->ssid, info_element->data, network->ssid_len);
867 if (network->ssid_len < IW_ESSID_MAX_SIZE)
868 memset(network->ssid + network->ssid_len, 0,
869 IW_ESSID_MAX_SIZE - network->ssid_len);
870
871 IEEE80211_DEBUG_SCAN("MFIE_TYPE_SSID: '%s' len=%d.\n",
872 network->ssid, network->ssid_len);
873 break;
874
875 case MFIE_TYPE_RATES:
876#ifdef CONFIG_IEEE80211_DEBUG
877 p = rates_str;
878#endif
879 network->rates_len = min(info_element->len, MAX_RATES_LENGTH);
880 for (i = 0; i < network->rates_len; i++) {
881 network->rates[i] = info_element->data[i];
882#ifdef CONFIG_IEEE80211_DEBUG
883 p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]);
884#endif
885 if (ieee80211_is_ofdm_rate(info_element->data[i])) {
886 network->flags |= NETWORK_HAS_OFDM;
887 if (info_element->data[i] &
888 IEEE80211_BASIC_RATE_MASK)
889 network->flags &=
890 ~NETWORK_HAS_CCK;
891 }
892 }
893
894 IEEE80211_DEBUG_SCAN("MFIE_TYPE_RATES: '%s' (%d)\n",
895 rates_str, network->rates_len);
896 break;
897
898 case MFIE_TYPE_RATES_EX:
899#ifdef CONFIG_IEEE80211_DEBUG
900 p = rates_str;
901#endif
902 network->rates_ex_len = min(info_element->len, MAX_RATES_EX_LENGTH);
903 for (i = 0; i < network->rates_ex_len; i++) {
904 network->rates_ex[i] = info_element->data[i];
905#ifdef CONFIG_IEEE80211_DEBUG
906 p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]);
907#endif
908 if (ieee80211_is_ofdm_rate(info_element->data[i])) {
909 network->flags |= NETWORK_HAS_OFDM;
910 if (info_element->data[i] &
911 IEEE80211_BASIC_RATE_MASK)
912 network->flags &=
913 ~NETWORK_HAS_CCK;
914 }
915 }
916
917 IEEE80211_DEBUG_SCAN("MFIE_TYPE_RATES_EX: '%s' (%d)\n",
918 rates_str, network->rates_ex_len);
919 break;
920
921 case MFIE_TYPE_DS_SET:
922 IEEE80211_DEBUG_SCAN("MFIE_TYPE_DS_SET: %d\n",
923 info_element->data[0]);
924 if (stats->freq == IEEE80211_24GHZ_BAND)
925 network->channel = info_element->data[0];
926 break;
927
928 case MFIE_TYPE_FH_SET:
929 IEEE80211_DEBUG_SCAN("MFIE_TYPE_FH_SET: ignored\n");
930 break;
931
932 case MFIE_TYPE_CF_SET:
933 IEEE80211_DEBUG_SCAN("MFIE_TYPE_CF_SET: ignored\n");
934 break;
935
936 case MFIE_TYPE_TIM:
937 IEEE80211_DEBUG_SCAN("MFIE_TYPE_TIM: ignored\n");
938 break;
939
940 case MFIE_TYPE_IBSS_SET:
941 IEEE80211_DEBUG_SCAN("MFIE_TYPE_IBSS_SET: ignored\n");
942 break;
943
944 case MFIE_TYPE_CHALLENGE:
945 IEEE80211_DEBUG_SCAN("MFIE_TYPE_CHALLENGE: ignored\n");
946 break;
947
948 case MFIE_TYPE_GENERIC:
949 IEEE80211_DEBUG_SCAN("MFIE_TYPE_GENERIC: %d bytes\n",
950 info_element->len);
951 if (info_element->len >= 4 &&
952 info_element->data[0] == 0x00 &&
953 info_element->data[1] == 0x50 &&
954 info_element->data[2] == 0xf2 &&
955 info_element->data[3] == 0x01) {
956 network->wpa_ie_len = min(info_element->len + 2,
957 MAX_WPA_IE_LEN);
958 memcpy(network->wpa_ie, info_element,
959 network->wpa_ie_len);
960 }
961 break;
962
963 case MFIE_TYPE_RSN:
964 IEEE80211_DEBUG_SCAN("MFIE_TYPE_RSN: %d bytes\n",
965 info_element->len);
966 network->rsn_ie_len = min(info_element->len + 2,
967 MAX_WPA_IE_LEN);
968 memcpy(network->rsn_ie, info_element,
969 network->rsn_ie_len);
970 break;
971
972 default:
973 IEEE80211_DEBUG_SCAN("unsupported IE %d\n",
974 info_element->id);
975 break;
976 }
977
978 left -= sizeof(struct ieee80211_info_element_hdr) +
979 info_element->len;
980 info_element = (struct ieee80211_info_element *)
981 &info_element->data[info_element->len];
982 }
983
984 network->mode = 0;
985 if (stats->freq == IEEE80211_52GHZ_BAND)
986 network->mode = IEEE_A;
987 else {
988 if (network->flags & NETWORK_HAS_OFDM)
989 network->mode |= IEEE_G;
990 if (network->flags & NETWORK_HAS_CCK)
991 network->mode |= IEEE_B;
992 }
993
994 if (network->mode == 0) {
995 IEEE80211_DEBUG_SCAN("Filtered out '%s (" MAC_FMT ")' "
996 "network.\n",
997 escape_essid(network->ssid,
998 network->ssid_len),
999 MAC_ARG(network->bssid));
1000 return 1;
1001 }
1002
1003 if (ieee80211_is_empty_essid(network->ssid, network->ssid_len))
1004 network->flags |= NETWORK_EMPTY_ESSID;
1005
1006 memcpy(&network->stats, stats, sizeof(network->stats));
1007
1008 return 0;
1009}
1010
1011static inline int is_same_network(struct ieee80211_network *src,
1012 struct ieee80211_network *dst)
1013{
1014 /* A network is only a duplicate if the channel, BSSID, and ESSID
1015 * all match. We treat all <hidden> with the same BSSID and channel
1016 * as one network */
1017 return ((src->ssid_len == dst->ssid_len) &&
1018 (src->channel == dst->channel) &&
1019 !memcmp(src->bssid, dst->bssid, ETH_ALEN) &&
1020 !memcmp(src->ssid, dst->ssid, src->ssid_len));
1021}
1022
1023static inline void update_network(struct ieee80211_network *dst,
1024 struct ieee80211_network *src)
1025{
1026 memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats));
1027 dst->capability = src->capability;
1028 memcpy(dst->rates, src->rates, src->rates_len);
1029 dst->rates_len = src->rates_len;
1030 memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len);
1031 dst->rates_ex_len = src->rates_ex_len;
1032
1033 dst->mode = src->mode;
1034 dst->flags = src->flags;
1035 dst->time_stamp[0] = src->time_stamp[0];
1036 dst->time_stamp[1] = src->time_stamp[1];
1037
1038 dst->beacon_interval = src->beacon_interval;
1039 dst->listen_interval = src->listen_interval;
1040 dst->atim_window = src->atim_window;
1041
1042 memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len);
1043 dst->wpa_ie_len = src->wpa_ie_len;
1044 memcpy(dst->rsn_ie, src->rsn_ie, src->rsn_ie_len);
1045 dst->rsn_ie_len = src->rsn_ie_len;
1046
1047 dst->last_scanned = jiffies;
1048 /* dst->last_associate is not overwritten */
1049}
1050
1051static inline void ieee80211_process_probe_response(
1052 struct ieee80211_device *ieee,
1053 struct ieee80211_probe_response *beacon,
1054 struct ieee80211_rx_stats *stats)
1055{
1056 struct ieee80211_network network;
1057 struct ieee80211_network *target;
1058 struct ieee80211_network *oldest = NULL;
1059#ifdef CONFIG_IEEE80211_DEBUG
1060 struct ieee80211_info_element *info_element = &beacon->info_element;
1061#endif
1062 unsigned long flags;
1063
1064 IEEE80211_DEBUG_SCAN(
1065 "'%s' (" MAC_FMT "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
1066 escape_essid(info_element->data, info_element->len),
1067 MAC_ARG(beacon->header.addr3),
1068 (beacon->capability & (1<<0xf)) ? '1' : '0',
1069 (beacon->capability & (1<<0xe)) ? '1' : '0',
1070 (beacon->capability & (1<<0xd)) ? '1' : '0',
1071 (beacon->capability & (1<<0xc)) ? '1' : '0',
1072 (beacon->capability & (1<<0xb)) ? '1' : '0',
1073 (beacon->capability & (1<<0xa)) ? '1' : '0',
1074 (beacon->capability & (1<<0x9)) ? '1' : '0',
1075 (beacon->capability & (1<<0x8)) ? '1' : '0',
1076 (beacon->capability & (1<<0x7)) ? '1' : '0',
1077 (beacon->capability & (1<<0x6)) ? '1' : '0',
1078 (beacon->capability & (1<<0x5)) ? '1' : '0',
1079 (beacon->capability & (1<<0x4)) ? '1' : '0',
1080 (beacon->capability & (1<<0x3)) ? '1' : '0',
1081 (beacon->capability & (1<<0x2)) ? '1' : '0',
1082 (beacon->capability & (1<<0x1)) ? '1' : '0',
1083 (beacon->capability & (1<<0x0)) ? '1' : '0');
1084
1085 if (ieee80211_network_init(ieee, beacon, &network, stats)) {
1086 IEEE80211_DEBUG_SCAN("Dropped '%s' (" MAC_FMT ") via %s.\n",
1087 escape_essid(info_element->data,
1088 info_element->len),
1089 MAC_ARG(beacon->header.addr3),
1090 WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
1091 IEEE80211_STYPE_PROBE_RESP ?
1092 "PROBE RESPONSE" : "BEACON");
1093 return;
1094 }
1095
1096 /* The network parsed correctly -- so now we scan our known networks
1097 * to see if we can find it in our list.
1098 *
1099 * NOTE: This search is definitely not optimized. Once its doing
1100 * the "right thing" we'll optimize it for efficiency if
1101 * necessary */
1102
1103 /* Search for this entry in the list and update it if it is
1104 * already there. */
1105
1106 spin_lock_irqsave(&ieee->lock, flags);
1107
1108 list_for_each_entry(target, &ieee->network_list, list) {
1109 if (is_same_network(target, &network))
1110 break;
1111
1112 if ((oldest == NULL) ||
1113 (target->last_scanned < oldest->last_scanned))
1114 oldest = target;
1115 }
1116
1117 /* If we didn't find a match, then get a new network slot to initialize
1118 * with this beacon's information */
1119 if (&target->list == &ieee->network_list) {
1120 if (list_empty(&ieee->network_free_list)) {
1121 /* If there are no more slots, expire the oldest */
1122 list_del(&oldest->list);
1123 target = oldest;
1124 IEEE80211_DEBUG_SCAN("Expired '%s' (" MAC_FMT ") from "
1125 "network list.\n",
1126 escape_essid(target->ssid,
1127 target->ssid_len),
1128 MAC_ARG(target->bssid));
1129 } else {
1130 /* Otherwise just pull from the free list */
1131 target = list_entry(ieee->network_free_list.next,
1132 struct ieee80211_network, list);
1133 list_del(ieee->network_free_list.next);
1134 }
1135
1136
1137#ifdef CONFIG_IEEE80211_DEBUG
1138 IEEE80211_DEBUG_SCAN("Adding '%s' (" MAC_FMT ") via %s.\n",
1139 escape_essid(network.ssid,
1140 network.ssid_len),
1141 MAC_ARG(network.bssid),
1142 WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
1143 IEEE80211_STYPE_PROBE_RESP ?
1144 "PROBE RESPONSE" : "BEACON");
1145#endif
1146 memcpy(target, &network, sizeof(*target));
1147 list_add_tail(&target->list, &ieee->network_list);
1148 } else {
1149 IEEE80211_DEBUG_SCAN("Updating '%s' (" MAC_FMT ") via %s.\n",
1150 escape_essid(target->ssid,
1151 target->ssid_len),
1152 MAC_ARG(target->bssid),
1153 WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
1154 IEEE80211_STYPE_PROBE_RESP ?
1155 "PROBE RESPONSE" : "BEACON");
1156 update_network(target, &network);
1157 }
1158
1159 spin_unlock_irqrestore(&ieee->lock, flags);
1160}
1161
1162void ieee80211_rx_mgt(struct ieee80211_device *ieee,
1163 struct ieee80211_hdr *header,
1164 struct ieee80211_rx_stats *stats)
1165{
1166 switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
1167 case IEEE80211_STYPE_ASSOC_RESP:
1168 IEEE80211_DEBUG_MGMT("received ASSOCIATION RESPONSE (%d)\n",
1169 WLAN_FC_GET_STYPE(header->frame_ctl));
1170 break;
1171
1172 case IEEE80211_STYPE_REASSOC_RESP:
1173 IEEE80211_DEBUG_MGMT("received REASSOCIATION RESPONSE (%d)\n",
1174 WLAN_FC_GET_STYPE(header->frame_ctl));
1175 break;
1176
1177 case IEEE80211_STYPE_PROBE_RESP:
1178 IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n",
1179 WLAN_FC_GET_STYPE(header->frame_ctl));
1180 IEEE80211_DEBUG_SCAN("Probe response\n");
1181 ieee80211_process_probe_response(
1182 ieee, (struct ieee80211_probe_response *)header, stats);
1183 break;
1184
1185 case IEEE80211_STYPE_BEACON:
1186 IEEE80211_DEBUG_MGMT("received BEACON (%d)\n",
1187 WLAN_FC_GET_STYPE(header->frame_ctl));
1188 IEEE80211_DEBUG_SCAN("Beacon\n");
1189 ieee80211_process_probe_response(
1190 ieee, (struct ieee80211_probe_response *)header, stats);
1191 break;
1192
1193 default:
1194 IEEE80211_DEBUG_MGMT("received UNKNOWN (%d)\n",
1195 WLAN_FC_GET_STYPE(header->frame_ctl));
1196 IEEE80211_WARNING("%s: Unknown management packet: %d\n",
1197 ieee->dev->name,
1198 WLAN_FC_GET_STYPE(header->frame_ctl));
1199 break;
1200 }
1201}
1202
1203
1204EXPORT_SYMBOL(ieee80211_rx_mgt);
1205EXPORT_SYMBOL(ieee80211_rx);
diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c
new file mode 100644
index 000000000000..d1049edcd14d
--- /dev/null
+++ b/net/ieee80211/ieee80211_tx.c
@@ -0,0 +1,447 @@
1/******************************************************************************
2
3 Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
4
5 This program is free software; you can redistribute it and/or modify it
6 under the terms of version 2 of the GNU General Public License as
7 published by the Free Software Foundation.
8
9 This program is distributed in the hope that it will be useful, but WITHOUT
10 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 more details.
13
14 You should have received a copy of the GNU General Public License along with
15 this program; if not, write to the Free Software Foundation, Inc., 59
16 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17
18 The full GNU General Public License is included in this distribution in the
19 file called LICENSE.
20
21 Contact Information:
22 James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24
25******************************************************************************/
26#include <linux/compiler.h>
27#include <linux/config.h>
28#include <linux/errno.h>
29#include <linux/if_arp.h>
30#include <linux/in6.h>
31#include <linux/in.h>
32#include <linux/ip.h>
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/netdevice.h>
36#include <linux/proc_fs.h>
37#include <linux/skbuff.h>
38#include <linux/slab.h>
39#include <linux/tcp.h>
40#include <linux/types.h>
41#include <linux/version.h>
42#include <linux/wireless.h>
43#include <linux/etherdevice.h>
44#include <asm/uaccess.h>
45
46#include <net/ieee80211.h>
47
48
49/*
50
51
52802.11 Data Frame
53
54 ,-------------------------------------------------------------------.
55Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
56 |------|------|---------|---------|---------|------|---------|------|
57Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
58 | | tion | (BSSID) | | | ence | data | |
59 `--------------------------------------------------| |------'
60Total: 28 non-data bytes `----.----'
61 |
62 .- 'Frame data' expands to <---------------------------'
63 |
64 V
65 ,---------------------------------------------------.
66Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
67 |------|------|---------|----------|------|---------|
68Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
69 | DSAP | SSAP | | | | Packet |
70 | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
71 `-----------------------------------------| |
72Total: 8 non-data bytes `----.----'
73 |
74 .- 'IP Packet' expands, if WEP enabled, to <--'
75 |
76 V
77 ,-----------------------.
78Bytes | 4 | 0-2296 | 4 |
79 |-----|-----------|-----|
80Desc. | IV | Encrypted | ICV |
81 | | IP Packet | |
82 `-----------------------'
83Total: 8 non-data bytes
84
85
86802.3 Ethernet Data Frame
87
88 ,-----------------------------------------.
89Bytes | 6 | 6 | 2 | Variable | 4 |
90 |-------|-------|------|-----------|------|
91Desc. | Dest. | Source| Type | IP Packet | fcs |
92 | MAC | MAC | | | |
93 `-----------------------------------------'
94Total: 18 non-data bytes
95
96In the event that fragmentation is required, the incoming payload is split into
97N parts of size ieee->fts. The first fragment contains the SNAP header and the
98remaining packets are just data.
99
100If encryption is enabled, each fragment payload size is reduced by enough space
101to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
102So if you have 1500 bytes of payload with ieee->fts set to 500 without
103encryption it will take 3 frames. With WEP it will take 4 frames as the
104payload of each frame is reduced to 492 bytes.
105
106* SKB visualization
107*
108* ,- skb->data
109* |
110* | ETHERNET HEADER ,-<-- PAYLOAD
111* | | 14 bytes from skb->data
112* | 2 bytes for Type --> ,T. | (sizeof ethhdr)
113* | | | |
114* |,-Dest.--. ,--Src.---. | | |
115* | 6 bytes| | 6 bytes | | | |
116* v | | | | | |
117* 0 | v 1 | v | v 2
118* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
119* ^ | ^ | ^ |
120* | | | | | |
121* | | | | `T' <---- 2 bytes for Type
122* | | | |
123* | | '---SNAP--' <-------- 6 bytes for SNAP
124* | |
125* `-IV--' <-------------------- 4 bytes for IV (WEP)
126*
127* SNAP HEADER
128*
129*/
130
131static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
132static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
133
134static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
135{
136 struct ieee80211_snap_hdr *snap;
137 u8 *oui;
138
139 snap = (struct ieee80211_snap_hdr *)data;
140 snap->dsap = 0xaa;
141 snap->ssap = 0xaa;
142 snap->ctrl = 0x03;
143
144 if (h_proto == 0x8137 || h_proto == 0x80f3)
145 oui = P802_1H_OUI;
146 else
147 oui = RFC1042_OUI;
148 snap->oui[0] = oui[0];
149 snap->oui[1] = oui[1];
150 snap->oui[2] = oui[2];
151
152 *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
153
154 return SNAP_SIZE + sizeof(u16);
155}
156
157static inline int ieee80211_encrypt_fragment(
158 struct ieee80211_device *ieee,
159 struct sk_buff *frag,
160 int hdr_len)
161{
162 struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx];
163 int res;
164
165#ifdef CONFIG_IEEE80211_CRYPT_TKIP
166 struct ieee80211_hdr *header;
167
168 if (ieee->tkip_countermeasures &&
169 crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
170 header = (struct ieee80211_hdr *) frag->data;
171 if (net_ratelimit()) {
172 printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
173 "TX packet to " MAC_FMT "\n",
174 ieee->dev->name, MAC_ARG(header->addr1));
175 }
176 return -1;
177 }
178#endif
179 /* To encrypt, frame format is:
180 * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
181
182 // PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU encryption.
183 /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
184 * call both MSDU and MPDU encryption functions from here. */
185 atomic_inc(&crypt->refcnt);
186 res = 0;
187 if (crypt->ops->encrypt_msdu)
188 res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
189 if (res == 0 && crypt->ops->encrypt_mpdu)
190 res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
191
192 atomic_dec(&crypt->refcnt);
193 if (res < 0) {
194 printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
195 ieee->dev->name, frag->len);
196 ieee->ieee_stats.tx_discards++;
197 return -1;
198 }
199
200 return 0;
201}
202
203
204void ieee80211_txb_free(struct ieee80211_txb *txb) {
205 int i;
206 if (unlikely(!txb))
207 return;
208 for (i = 0; i < txb->nr_frags; i++)
209 if (txb->fragments[i])
210 dev_kfree_skb_any(txb->fragments[i]);
211 kfree(txb);
212}
213
214static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
215 int gfp_mask)
216{
217 struct ieee80211_txb *txb;
218 int i;
219 txb = kmalloc(
220 sizeof(struct ieee80211_txb) + (sizeof(u8*) * nr_frags),
221 gfp_mask);
222 if (!txb)
223 return NULL;
224
225 memset(txb, 0, sizeof(struct ieee80211_txb));
226 txb->nr_frags = nr_frags;
227 txb->frag_size = txb_size;
228
229 for (i = 0; i < nr_frags; i++) {
230 txb->fragments[i] = dev_alloc_skb(txb_size);
231 if (unlikely(!txb->fragments[i])) {
232 i--;
233 break;
234 }
235 }
236 if (unlikely(i != nr_frags)) {
237 while (i >= 0)
238 dev_kfree_skb_any(txb->fragments[i--]);
239 kfree(txb);
240 return NULL;
241 }
242 return txb;
243}
244
245/* SKBs are added to the ieee->tx_queue. */
246int ieee80211_xmit(struct sk_buff *skb,
247 struct net_device *dev)
248{
249 struct ieee80211_device *ieee = netdev_priv(dev);
250 struct ieee80211_txb *txb = NULL;
251 struct ieee80211_hdr *frag_hdr;
252 int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
253 unsigned long flags;
254 struct net_device_stats *stats = &ieee->stats;
255 int ether_type, encrypt;
256 int bytes, fc, hdr_len;
257 struct sk_buff *skb_frag;
258 struct ieee80211_hdr header = { /* Ensure zero initialized */
259 .duration_id = 0,
260 .seq_ctl = 0
261 };
262 u8 dest[ETH_ALEN], src[ETH_ALEN];
263
264 struct ieee80211_crypt_data* crypt;
265
266 spin_lock_irqsave(&ieee->lock, flags);
267
268 /* If there is no driver handler to take the TXB, dont' bother
269 * creating it... */
270 if (!ieee->hard_start_xmit) {
271 printk(KERN_WARNING "%s: No xmit handler.\n",
272 ieee->dev->name);
273 goto success;
274 }
275
276 if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
277 printk(KERN_WARNING "%s: skb too small (%d).\n",
278 ieee->dev->name, skb->len);
279 goto success;
280 }
281
282 ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
283
284 crypt = ieee->crypt[ieee->tx_keyidx];
285
286 encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
287 ieee->host_encrypt && crypt && crypt->ops;
288
289 if (!encrypt && ieee->ieee802_1x &&
290 ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
291 stats->tx_dropped++;
292 goto success;
293 }
294
295#ifdef CONFIG_IEEE80211_DEBUG
296 if (crypt && !encrypt && ether_type == ETH_P_PAE) {
297 struct eapol *eap = (struct eapol *)(skb->data +
298 sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
299 IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
300 eap_get_type(eap->type));
301 }
302#endif
303
304 /* Save source and destination addresses */
305 memcpy(&dest, skb->data, ETH_ALEN);
306 memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);
307
308 /* Advance the SKB to the start of the payload */
309 skb_pull(skb, sizeof(struct ethhdr));
310
311 /* Determine total amount of storage required for TXB packets */
312 bytes = skb->len + SNAP_SIZE + sizeof(u16);
313
314 if (encrypt)
315 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
316 IEEE80211_FCTL_WEP;
317 else
318 fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
319
320 if (ieee->iw_mode == IW_MODE_INFRA) {
321 fc |= IEEE80211_FCTL_TODS;
322 /* To DS: Addr1 = BSSID, Addr2 = SA,
323 Addr3 = DA */
324 memcpy(&header.addr1, ieee->bssid, ETH_ALEN);
325 memcpy(&header.addr2, &src, ETH_ALEN);
326 memcpy(&header.addr3, &dest, ETH_ALEN);
327 } else if (ieee->iw_mode == IW_MODE_ADHOC) {
328 /* not From/To DS: Addr1 = DA, Addr2 = SA,
329 Addr3 = BSSID */
330 memcpy(&header.addr1, dest, ETH_ALEN);
331 memcpy(&header.addr2, src, ETH_ALEN);
332 memcpy(&header.addr3, ieee->bssid, ETH_ALEN);
333 }
334 header.frame_ctl = cpu_to_le16(fc);
335 hdr_len = IEEE80211_3ADDR_LEN;
336
337 /* Determine fragmentation size based on destination (multicast
338 * and broadcast are not fragmented) */
339 if (is_multicast_ether_addr(dest) ||
340 is_broadcast_ether_addr(dest))
341 frag_size = MAX_FRAG_THRESHOLD;
342 else
343 frag_size = ieee->fts;
344
345 /* Determine amount of payload per fragment. Regardless of if
346 * this stack is providing the full 802.11 header, one will
347 * eventually be affixed to this fragment -- so we must account for
348 * it when determining the amount of payload space. */
349 bytes_per_frag = frag_size - IEEE80211_3ADDR_LEN;
350 if (ieee->config &
351 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
352 bytes_per_frag -= IEEE80211_FCS_LEN;
353
354 /* Each fragment may need to have room for encryptiong pre/postfix */
355 if (encrypt)
356 bytes_per_frag -= crypt->ops->extra_prefix_len +
357 crypt->ops->extra_postfix_len;
358
359 /* Number of fragments is the total bytes_per_frag /
360 * payload_per_fragment */
361 nr_frags = bytes / bytes_per_frag;
362 bytes_last_frag = bytes % bytes_per_frag;
363 if (bytes_last_frag)
364 nr_frags++;
365 else
366 bytes_last_frag = bytes_per_frag;
367
368 /* When we allocate the TXB we allocate enough space for the reserve
369 * and full fragment bytes (bytes_per_frag doesn't include prefix,
370 * postfix, header, FCS, etc.) */
371 txb = ieee80211_alloc_txb(nr_frags, frag_size, GFP_ATOMIC);
372 if (unlikely(!txb)) {
373 printk(KERN_WARNING "%s: Could not allocate TXB\n",
374 ieee->dev->name);
375 goto failed;
376 }
377 txb->encrypted = encrypt;
378 txb->payload_size = bytes;
379
380 for (i = 0; i < nr_frags; i++) {
381 skb_frag = txb->fragments[i];
382
383 if (encrypt)
384 skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
385
386 frag_hdr = (struct ieee80211_hdr *)skb_put(skb_frag, hdr_len);
387 memcpy(frag_hdr, &header, hdr_len);
388
389 /* If this is not the last fragment, then add the MOREFRAGS
390 * bit to the frame control */
391 if (i != nr_frags - 1) {
392 frag_hdr->frame_ctl = cpu_to_le16(
393 fc | IEEE80211_FCTL_MOREFRAGS);
394 bytes = bytes_per_frag;
395 } else {
396 /* The last fragment takes the remaining length */
397 bytes = bytes_last_frag;
398 }
399
400 /* Put a SNAP header on the first fragment */
401 if (i == 0) {
402 ieee80211_put_snap(
403 skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
404 ether_type);
405 bytes -= SNAP_SIZE + sizeof(u16);
406 }
407
408 memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
409
410 /* Advance the SKB... */
411 skb_pull(skb, bytes);
412
413 /* Encryption routine will move the header forward in order
414 * to insert the IV between the header and the payload */
415 if (encrypt)
416 ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
417 if (ieee->config &
418 (CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
419 skb_put(skb_frag, 4);
420 }
421
422
423 success:
424 spin_unlock_irqrestore(&ieee->lock, flags);
425
426 dev_kfree_skb_any(skb);
427
428 if (txb) {
429 if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
430 stats->tx_packets++;
431 stats->tx_bytes += txb->payload_size;
432 return 0;
433 }
434 ieee80211_txb_free(txb);
435 }
436
437 return 0;
438
439 failed:
440 spin_unlock_irqrestore(&ieee->lock, flags);
441 netif_stop_queue(dev);
442 stats->tx_errors++;
443 return 1;
444
445}
446
447EXPORT_SYMBOL(ieee80211_txb_free);
diff --git a/net/ieee80211/ieee80211_wx.c b/net/ieee80211/ieee80211_wx.c
new file mode 100644
index 000000000000..25a868c2e2f7
--- /dev/null
+++ b/net/ieee80211/ieee80211_wx.c
@@ -0,0 +1,471 @@
1/******************************************************************************
2
3 Copyright(c) 2004 Intel Corporation. All rights reserved.
4
5 Portions of this file are based on the WEP enablement code provided by the
6 Host AP project hostap-drivers v0.1.3
7 Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
8 <jkmaline@cc.hut.fi>
9 Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
10
11 This program is free software; you can redistribute it and/or modify it
12 under the terms of version 2 of the GNU General Public License as
13 published by the Free Software Foundation.
14
15 This program is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 more details.
19
20 You should have received a copy of the GNU General Public License along with
21 this program; if not, write to the Free Software Foundation, Inc., 59
22 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23
24 The full GNU General Public License is included in this distribution in the
25 file called LICENSE.
26
27 Contact Information:
28 James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30
31******************************************************************************/
32#include <linux/wireless.h>
33#include <linux/version.h>
34#include <linux/kmod.h>
35#include <linux/module.h>
36
37#include <net/ieee80211.h>
38static const char *ieee80211_modes[] = {
39 "?", "a", "b", "ab", "g", "ag", "bg", "abg"
40};
41
42#define MAX_CUSTOM_LEN 64
43static inline char *ipw2100_translate_scan(struct ieee80211_device *ieee,
44 char *start, char *stop,
45 struct ieee80211_network *network)
46{
47 char custom[MAX_CUSTOM_LEN];
48 char *p;
49 struct iw_event iwe;
50 int i, j;
51 u8 max_rate, rate;
52
53 /* First entry *MUST* be the AP MAC address */
54 iwe.cmd = SIOCGIWAP;
55 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
56 memcpy(iwe.u.ap_addr.sa_data, network->bssid, ETH_ALEN);
57 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_ADDR_LEN);
58
59 /* Remaining entries will be displayed in the order we provide them */
60
61 /* Add the ESSID */
62 iwe.cmd = SIOCGIWESSID;
63 iwe.u.data.flags = 1;
64 if (network->flags & NETWORK_EMPTY_ESSID) {
65 iwe.u.data.length = sizeof("<hidden>");
66 start = iwe_stream_add_point(start, stop, &iwe, "<hidden>");
67 } else {
68 iwe.u.data.length = min(network->ssid_len, (u8)32);
69 start = iwe_stream_add_point(start, stop, &iwe, network->ssid);
70 }
71
72 /* Add the protocol name */
73 iwe.cmd = SIOCGIWNAME;
74 snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11%s", ieee80211_modes[network->mode]);
75 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_CHAR_LEN);
76
77 /* Add mode */
78 iwe.cmd = SIOCGIWMODE;
79 if (network->capability &
80 (WLAN_CAPABILITY_BSS | WLAN_CAPABILITY_IBSS)) {
81 if (network->capability & WLAN_CAPABILITY_BSS)
82 iwe.u.mode = IW_MODE_MASTER;
83 else
84 iwe.u.mode = IW_MODE_ADHOC;
85
86 start = iwe_stream_add_event(start, stop, &iwe,
87 IW_EV_UINT_LEN);
88 }
89
90 /* Add frequency/channel */
91 iwe.cmd = SIOCGIWFREQ;
92/* iwe.u.freq.m = ieee80211_frequency(network->channel, network->mode);
93 iwe.u.freq.e = 3; */
94 iwe.u.freq.m = network->channel;
95 iwe.u.freq.e = 0;
96 iwe.u.freq.i = 0;
97 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_FREQ_LEN);
98
99 /* Add encryption capability */
100 iwe.cmd = SIOCGIWENCODE;
101 if (network->capability & WLAN_CAPABILITY_PRIVACY)
102 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
103 else
104 iwe.u.data.flags = IW_ENCODE_DISABLED;
105 iwe.u.data.length = 0;
106 start = iwe_stream_add_point(start, stop, &iwe, network->ssid);
107
108 /* Add basic and extended rates */
109 max_rate = 0;
110 p = custom;
111 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
112 for (i = 0, j = 0; i < network->rates_len; ) {
113 if (j < network->rates_ex_len &&
114 ((network->rates_ex[j] & 0x7F) <
115 (network->rates[i] & 0x7F)))
116 rate = network->rates_ex[j++] & 0x7F;
117 else
118 rate = network->rates[i++] & 0x7F;
119 if (rate > max_rate)
120 max_rate = rate;
121 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
122 "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
123 }
124 for (; j < network->rates_ex_len; j++) {
125 rate = network->rates_ex[j] & 0x7F;
126 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
127 "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
128 if (rate > max_rate)
129 max_rate = rate;
130 }
131
132 iwe.cmd = SIOCGIWRATE;
133 iwe.u.bitrate.fixed = iwe.u.bitrate.disabled = 0;
134 iwe.u.bitrate.value = max_rate * 500000;
135 start = iwe_stream_add_event(start, stop, &iwe,
136 IW_EV_PARAM_LEN);
137
138 iwe.cmd = IWEVCUSTOM;
139 iwe.u.data.length = p - custom;
140 if (iwe.u.data.length)
141 start = iwe_stream_add_point(start, stop, &iwe, custom);
142
143 /* Add quality statistics */
144 /* TODO: Fix these values... */
145 iwe.cmd = IWEVQUAL;
146 iwe.u.qual.qual = network->stats.signal;
147 iwe.u.qual.level = network->stats.rssi;
148 iwe.u.qual.noise = network->stats.noise;
149 iwe.u.qual.updated = network->stats.mask & IEEE80211_STATMASK_WEMASK;
150 if (!(network->stats.mask & IEEE80211_STATMASK_RSSI))
151 iwe.u.qual.updated |= IW_QUAL_LEVEL_INVALID;
152 if (!(network->stats.mask & IEEE80211_STATMASK_NOISE))
153 iwe.u.qual.updated |= IW_QUAL_NOISE_INVALID;
154 if (!(network->stats.mask & IEEE80211_STATMASK_SIGNAL))
155 iwe.u.qual.updated |= IW_QUAL_QUAL_INVALID;
156
157 start = iwe_stream_add_event(start, stop, &iwe, IW_EV_QUAL_LEN);
158
159 iwe.cmd = IWEVCUSTOM;
160 p = custom;
161
162 iwe.u.data.length = p - custom;
163 if (iwe.u.data.length)
164 start = iwe_stream_add_point(start, stop, &iwe, custom);
165
166 if (ieee->wpa_enabled && network->wpa_ie_len){
167 char buf[MAX_WPA_IE_LEN * 2 + 30];
168
169 u8 *p = buf;
170 p += sprintf(p, "wpa_ie=");
171 for (i = 0; i < network->wpa_ie_len; i++) {
172 p += sprintf(p, "%02x", network->wpa_ie[i]);
173 }
174
175 memset(&iwe, 0, sizeof(iwe));
176 iwe.cmd = IWEVCUSTOM;
177 iwe.u.data.length = strlen(buf);
178 start = iwe_stream_add_point(start, stop, &iwe, buf);
179 }
180
181 if (ieee->wpa_enabled && network->rsn_ie_len){
182 char buf[MAX_WPA_IE_LEN * 2 + 30];
183
184 u8 *p = buf;
185 p += sprintf(p, "rsn_ie=");
186 for (i = 0; i < network->rsn_ie_len; i++) {
187 p += sprintf(p, "%02x", network->rsn_ie[i]);
188 }
189
190 memset(&iwe, 0, sizeof(iwe));
191 iwe.cmd = IWEVCUSTOM;
192 iwe.u.data.length = strlen(buf);
193 start = iwe_stream_add_point(start, stop, &iwe, buf);
194 }
195
196 /* Add EXTRA: Age to display seconds since last beacon/probe response
197 * for given network. */
198 iwe.cmd = IWEVCUSTOM;
199 p = custom;
200 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
201 " Last beacon: %lums ago", (jiffies - network->last_scanned) / (HZ / 100));
202 iwe.u.data.length = p - custom;
203 if (iwe.u.data.length)
204 start = iwe_stream_add_point(start, stop, &iwe, custom);
205
206
207 return start;
208}
209
210int ieee80211_wx_get_scan(struct ieee80211_device *ieee,
211 struct iw_request_info *info,
212 union iwreq_data *wrqu, char *extra)
213{
214 struct ieee80211_network *network;
215 unsigned long flags;
216
217 char *ev = extra;
218 char *stop = ev + IW_SCAN_MAX_DATA;
219 int i = 0;
220
221 IEEE80211_DEBUG_WX("Getting scan\n");
222
223 spin_lock_irqsave(&ieee->lock, flags);
224
225 list_for_each_entry(network, &ieee->network_list, list) {
226 i++;
227 if (ieee->scan_age == 0 ||
228 time_after(network->last_scanned + ieee->scan_age, jiffies))
229 ev = ipw2100_translate_scan(ieee, ev, stop, network);
230 else
231 IEEE80211_DEBUG_SCAN(
232 "Not showing network '%s ("
233 MAC_FMT ")' due to age (%lums).\n",
234 escape_essid(network->ssid,
235 network->ssid_len),
236 MAC_ARG(network->bssid),
237 (jiffies - network->last_scanned) / (HZ / 100));
238 }
239
240 spin_unlock_irqrestore(&ieee->lock, flags);
241
242 wrqu->data.length = ev - extra;
243 wrqu->data.flags = 0;
244
245 IEEE80211_DEBUG_WX("exit: %d networks returned.\n", i);
246
247 return 0;
248}
249
250int ieee80211_wx_set_encode(struct ieee80211_device *ieee,
251 struct iw_request_info *info,
252 union iwreq_data *wrqu, char *keybuf)
253{
254 struct iw_point *erq = &(wrqu->encoding);
255 struct net_device *dev = ieee->dev;
256 struct ieee80211_security sec = {
257 .flags = 0
258 };
259 int i, key, key_provided, len;
260 struct ieee80211_crypt_data **crypt;
261
262 IEEE80211_DEBUG_WX("SET_ENCODE\n");
263
264 key = erq->flags & IW_ENCODE_INDEX;
265 if (key) {
266 if (key > WEP_KEYS)
267 return -EINVAL;
268 key--;
269 key_provided = 1;
270 } else {
271 key_provided = 0;
272 key = ieee->tx_keyidx;
273 }
274
275 IEEE80211_DEBUG_WX("Key: %d [%s]\n", key, key_provided ?
276 "provided" : "default");
277
278 crypt = &ieee->crypt[key];
279
280 if (erq->flags & IW_ENCODE_DISABLED) {
281 if (key_provided && *crypt) {
282 IEEE80211_DEBUG_WX("Disabling encryption on key %d.\n",
283 key);
284 ieee80211_crypt_delayed_deinit(ieee, crypt);
285 } else
286 IEEE80211_DEBUG_WX("Disabling encryption.\n");
287
288 /* Check all the keys to see if any are still configured,
289 * and if no key index was provided, de-init them all */
290 for (i = 0; i < WEP_KEYS; i++) {
291 if (ieee->crypt[i] != NULL) {
292 if (key_provided)
293 break;
294 ieee80211_crypt_delayed_deinit(
295 ieee, &ieee->crypt[i]);
296 }
297 }
298
299 if (i == WEP_KEYS) {
300 sec.enabled = 0;
301 sec.level = SEC_LEVEL_0;
302 sec.flags |= SEC_ENABLED | SEC_LEVEL;
303 }
304
305 goto done;
306 }
307
308
309
310 sec.enabled = 1;
311 sec.flags |= SEC_ENABLED;
312
313 if (*crypt != NULL && (*crypt)->ops != NULL &&
314 strcmp((*crypt)->ops->name, "WEP") != 0) {
315 /* changing to use WEP; deinit previously used algorithm
316 * on this key */
317 ieee80211_crypt_delayed_deinit(ieee, crypt);
318 }
319
320 if (*crypt == NULL) {
321 struct ieee80211_crypt_data *new_crypt;
322
323 /* take WEP into use */
324 new_crypt = kmalloc(sizeof(struct ieee80211_crypt_data),
325 GFP_KERNEL);
326 if (new_crypt == NULL)
327 return -ENOMEM;
328 memset(new_crypt, 0, sizeof(struct ieee80211_crypt_data));
329 new_crypt->ops = ieee80211_get_crypto_ops("WEP");
330 if (!new_crypt->ops) {
331 request_module("ieee80211_crypt_wep");
332 new_crypt->ops = ieee80211_get_crypto_ops("WEP");
333 }
334
335 if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
336 new_crypt->priv = new_crypt->ops->init(key);
337
338 if (!new_crypt->ops || !new_crypt->priv) {
339 kfree(new_crypt);
340 new_crypt = NULL;
341
342 printk(KERN_WARNING "%s: could not initialize WEP: "
343 "load module ieee80211_crypt_wep\n",
344 dev->name);
345 return -EOPNOTSUPP;
346 }
347 *crypt = new_crypt;
348 }
349
350 /* If a new key was provided, set it up */
351 if (erq->length > 0) {
352 len = erq->length <= 5 ? 5 : 13;
353 memcpy(sec.keys[key], keybuf, erq->length);
354 if (len > erq->length)
355 memset(sec.keys[key] + erq->length, 0,
356 len - erq->length);
357 IEEE80211_DEBUG_WX("Setting key %d to '%s' (%d:%d bytes)\n",
358 key, escape_essid(sec.keys[key], len),
359 erq->length, len);
360 sec.key_sizes[key] = len;
361 (*crypt)->ops->set_key(sec.keys[key], len, NULL,
362 (*crypt)->priv);
363 sec.flags |= (1 << key);
364 /* This ensures a key will be activated if no key is
365 * explicitely set */
366 if (key == sec.active_key)
367 sec.flags |= SEC_ACTIVE_KEY;
368 } else {
369 len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
370 NULL, (*crypt)->priv);
371 if (len == 0) {
372 /* Set a default key of all 0 */
373 IEEE80211_DEBUG_WX("Setting key %d to all zero.\n",
374 key);
375 memset(sec.keys[key], 0, 13);
376 (*crypt)->ops->set_key(sec.keys[key], 13, NULL,
377 (*crypt)->priv);
378 sec.key_sizes[key] = 13;
379 sec.flags |= (1 << key);
380 }
381
382 /* No key data - just set the default TX key index */
383 if (key_provided) {
384 IEEE80211_DEBUG_WX(
385 "Setting key %d to default Tx key.\n", key);
386 ieee->tx_keyidx = key;
387 sec.active_key = key;
388 sec.flags |= SEC_ACTIVE_KEY;
389 }
390 }
391
392 done:
393 ieee->open_wep = !(erq->flags & IW_ENCODE_RESTRICTED);
394 sec.auth_mode = ieee->open_wep ? WLAN_AUTH_OPEN : WLAN_AUTH_SHARED_KEY;
395 sec.flags |= SEC_AUTH_MODE;
396 IEEE80211_DEBUG_WX("Auth: %s\n", sec.auth_mode == WLAN_AUTH_OPEN ?
397 "OPEN" : "SHARED KEY");
398
399 /* For now we just support WEP, so only set that security level...
400 * TODO: When WPA is added this is one place that needs to change */
401 sec.flags |= SEC_LEVEL;
402 sec.level = SEC_LEVEL_1; /* 40 and 104 bit WEP */
403
404 if (ieee->set_security)
405 ieee->set_security(dev, &sec);
406
407 /* Do not reset port if card is in Managed mode since resetting will
408 * generate new IEEE 802.11 authentication which may end up in looping
409 * with IEEE 802.1X. If your hardware requires a reset after WEP
410 * configuration (for example... Prism2), implement the reset_port in
411 * the callbacks structures used to initialize the 802.11 stack. */
412 if (ieee->reset_on_keychange &&
413 ieee->iw_mode != IW_MODE_INFRA &&
414 ieee->reset_port && ieee->reset_port(dev)) {
415 printk(KERN_DEBUG "%s: reset_port failed\n", dev->name);
416 return -EINVAL;
417 }
418 return 0;
419}
420
421int ieee80211_wx_get_encode(struct ieee80211_device *ieee,
422 struct iw_request_info *info,
423 union iwreq_data *wrqu, char *keybuf)
424{
425 struct iw_point *erq = &(wrqu->encoding);
426 int len, key;
427 struct ieee80211_crypt_data *crypt;
428
429 IEEE80211_DEBUG_WX("GET_ENCODE\n");
430
431 key = erq->flags & IW_ENCODE_INDEX;
432 if (key) {
433 if (key > WEP_KEYS)
434 return -EINVAL;
435 key--;
436 } else
437 key = ieee->tx_keyidx;
438
439 crypt = ieee->crypt[key];
440 erq->flags = key + 1;
441
442 if (crypt == NULL || crypt->ops == NULL) {
443 erq->length = 0;
444 erq->flags |= IW_ENCODE_DISABLED;
445 return 0;
446 }
447
448 if (strcmp(crypt->ops->name, "WEP") != 0) {
449 /* only WEP is supported with wireless extensions, so just
450 * report that encryption is used */
451 erq->length = 0;
452 erq->flags |= IW_ENCODE_ENABLED;
453 return 0;
454 }
455
456 len = crypt->ops->get_key(keybuf, WEP_KEY_LEN, NULL, crypt->priv);
457 erq->length = (len >= 0 ? len : 0);
458
459 erq->flags |= IW_ENCODE_ENABLED;
460
461 if (ieee->open_wep)
462 erq->flags |= IW_ENCODE_OPEN;
463 else
464 erq->flags |= IW_ENCODE_RESTRICTED;
465
466 return 0;
467}
468
469EXPORT_SYMBOL(ieee80211_wx_get_scan);
470EXPORT_SYMBOL(ieee80211_wx_set_encode);
471EXPORT_SYMBOL(ieee80211_wx_get_encode);