aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/networking/LICENSE.qla3xxx46
-rw-r--r--MAINTAINERS27
-rw-r--r--drivers/isdn/i4l/Kconfig1
-rw-r--r--drivers/net/8390.c10
-rw-r--r--drivers/net/Kconfig17
-rw-r--r--drivers/net/Makefile11
-rw-r--r--drivers/net/forcedeth.c250
-rw-r--r--drivers/net/qla3xxx.c3537
-rw-r--r--drivers/net/qla3xxx.h1194
-rw-r--r--drivers/net/sky2.c1
-rw-r--r--drivers/net/slhc.c28
-rw-r--r--drivers/net/tulip/uli526x.c12
-rw-r--r--drivers/net/wireless/airo.c12
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h64
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c34
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_leds.c10
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c64
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_phy.c33
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_pio.c4
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c34
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_wx.c162
-rw-r--r--drivers/net/wireless/ipw2200.c29
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c573
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.h6
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c4
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.h2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_netdev.c17
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--include/net/ieee80211.h9
-rw-r--r--include/net/ieee80211softmac.h60
-rw-r--r--net/ieee80211/ieee80211_rx.c39
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_assoc.c21
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_io.c14
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_module.c90
-rw-r--r--net/ieee80211/softmac/ieee80211softmac_priv.h8
35 files changed, 5936 insertions, 489 deletions
diff --git a/Documentation/networking/LICENSE.qla3xxx b/Documentation/networking/LICENSE.qla3xxx
new file mode 100644
index 000000000000..2f2077e34d81
--- /dev/null
+++ b/Documentation/networking/LICENSE.qla3xxx
@@ -0,0 +1,46 @@
1Copyright (c) 2003-2006 QLogic Corporation
2QLogic Linux Networking HBA Driver
3
4This program includes a device driver for Linux 2.6 that may be
5distributed with QLogic hardware specific firmware binary file.
6You may modify and redistribute the device driver code under the
7GNU General Public License as published by the Free Software
8Foundation (version 2 or a later version).
9
10You may redistribute the hardware specific firmware binary file
11under the following terms:
12
13 1. Redistribution of source code (only if applicable),
14 must retain the above copyright notice, this list of
15 conditions and the following disclaimer.
16
17 2. Redistribution in binary form must reproduce the above
18 copyright notice, this list of conditions and the
19 following disclaimer in the documentation and/or other
20 materials provided with the distribution.
21
22 3. The name of QLogic Corporation may not be used to
23 endorse or promote products derived from this software
24 without specific prior written permission
25
26REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
27THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
28EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
30PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
31BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
32EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
33TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
35ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
36OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
38POSSIBILITY OF SUCH DAMAGE.
39
40USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
41CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
42OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
43TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
44ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
45COMBINATION WITH THIS PROGRAM.
46
diff --git a/MAINTAINERS b/MAINTAINERS
index 32aa30d1504a..fb20abdb805d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2347,6 +2347,12 @@ M: linux-driver@qlogic.com
2347L: linux-scsi@vger.kernel.org 2347L: linux-scsi@vger.kernel.org
2348S: Supported 2348S: Supported
2349 2349
2350QLOGIC QLA3XXX NETWORK DRIVER
2351P: Ron Mercer
2352M: linux-driver@qlogic.com
2353L: netdev@vger.kernel.org
2354S: Supported
2355
2350QNX4 FILESYSTEM 2356QNX4 FILESYSTEM
2351P: Anders Larsen 2357P: Anders Larsen
2352M: al@alarsen.net 2358M: al@alarsen.net
@@ -2597,6 +2603,18 @@ P: Nicolas Pitre
2597M: nico@cam.org 2603M: nico@cam.org
2598S: Maintained 2604S: Maintained
2599 2605
2606SOFTMAC LAYER (IEEE 802.11)
2607P: Johannes Berg
2608M: johannes@sipsolutions.net
2609P: Joe Jezak
2610M: josejx@gentoo.org
2611P: Daniel Drake
2612M: dsd@gentoo.org
2613W: http://softmac.sipsolutions.net/
2614L: softmac-dev@sipsolutions.net
2615L: netdev@vger.kernel.org
2616S: Maintained
2617
2600SOFTWARE RAID (Multiple Disks) SUPPORT 2618SOFTWARE RAID (Multiple Disks) SUPPORT
2601P: Ingo Molnar 2619P: Ingo Molnar
2602M: mingo@redhat.com 2620M: mingo@redhat.com
@@ -3305,6 +3323,15 @@ W: http://www.qsl.net/dl1bke/
3305L: linux-hams@vger.kernel.org 3323L: linux-hams@vger.kernel.org
3306S: Maintained 3324S: Maintained
3307 3325
3326ZD1211RW WIRELESS DRIVER
3327P: Daniel Drake
3328M: dsd@gentoo.org
3329P: Ulrich Kunitz
3330M: kune@deine-taler.de
3331W: http://zd1211.ath.cx/wiki/DriverRewrite
3332L: zd1211-devs@lists.sourceforge.net (subscribers-only)
3333S: Maintained
3334
3308ZF MACHZ WATCHDOG 3335ZF MACHZ WATCHDOG
3309P: Fernando Fuganti 3336P: Fernando Fuganti
3310M: fuganti@netbank.com.br 3337M: fuganti@netbank.com.br
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index a4f7288a1fc8..3ef567b99c74 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -5,6 +5,7 @@
5config ISDN_PPP 5config ISDN_PPP
6 bool "Support synchronous PPP" 6 bool "Support synchronous PPP"
7 depends on INET 7 depends on INET
8 select SLHC
8 help 9 help
9 Over digital connections such as ISDN, there is no need to 10 Over digital connections such as ISDN, there is no need to
10 synchronize sender and recipient's clocks with start and stop bits 11 synchronize sender and recipient's clocks with start and stop bits
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index d2935ae39814..3eb7048684a6 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -299,7 +299,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
299 * Slow phase with lock held. 299 * Slow phase with lock held.
300 */ 300 */
301 301
302 disable_irq_nosync(dev->irq); 302 disable_irq_nosync_lockdep(dev->irq);
303 303
304 spin_lock(&ei_local->page_lock); 304 spin_lock(&ei_local->page_lock);
305 305
@@ -338,7 +338,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
338 netif_stop_queue(dev); 338 netif_stop_queue(dev);
339 outb_p(ENISR_ALL, e8390_base + EN0_IMR); 339 outb_p(ENISR_ALL, e8390_base + EN0_IMR);
340 spin_unlock(&ei_local->page_lock); 340 spin_unlock(&ei_local->page_lock);
341 enable_irq(dev->irq); 341 enable_irq_lockdep(dev->irq);
342 ei_local->stat.tx_errors++; 342 ei_local->stat.tx_errors++;
343 return 1; 343 return 1;
344 } 344 }
@@ -379,7 +379,7 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
379 outb_p(ENISR_ALL, e8390_base + EN0_IMR); 379 outb_p(ENISR_ALL, e8390_base + EN0_IMR);
380 380
381 spin_unlock(&ei_local->page_lock); 381 spin_unlock(&ei_local->page_lock);
382 enable_irq(dev->irq); 382 enable_irq_lockdep(dev->irq);
383 383
384 dev_kfree_skb (skb); 384 dev_kfree_skb (skb);
385 ei_local->stat.tx_bytes += send_length; 385 ei_local->stat.tx_bytes += send_length;
@@ -505,9 +505,9 @@ irqreturn_t ei_interrupt(int irq, void *dev_id, struct pt_regs * regs)
505#ifdef CONFIG_NET_POLL_CONTROLLER 505#ifdef CONFIG_NET_POLL_CONTROLLER
506void ei_poll(struct net_device *dev) 506void ei_poll(struct net_device *dev)
507{ 507{
508 disable_irq(dev->irq); 508 disable_irq_lockdep(dev->irq);
509 ei_interrupt(dev->irq, dev, NULL); 509 ei_interrupt(dev->irq, dev, NULL);
510 enable_irq(dev->irq); 510 enable_irq_lockdep(dev->irq);
511} 511}
512#endif 512#endif
513 513
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 39189903e355..3a0d80b28503 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2249,6 +2249,15 @@ config MV643XX_ETH_2
2249 This enables support for Port 2 of the Marvell MV643XX Gigabit 2249 This enables support for Port 2 of the Marvell MV643XX Gigabit
2250 Ethernet. 2250 Ethernet.
2251 2251
2252config QLA3XXX
2253 tristate "QLogic QLA3XXX Network Driver Support"
2254 depends on PCI
2255 help
2256 This driver supports QLogic ISP3XXX gigabit Ethernet cards.
2257
2258 To compile this driver as a module, choose M here: the module
2259 will be called qla3xxx.
2260
2252endmenu 2261endmenu
2253 2262
2254# 2263#
@@ -2509,6 +2518,7 @@ config PLIP
2509 2518
2510config PPP 2519config PPP
2511 tristate "PPP (point-to-point protocol) support" 2520 tristate "PPP (point-to-point protocol) support"
2521 select SLHC
2512 ---help--- 2522 ---help---
2513 PPP (Point to Point Protocol) is a newer and better SLIP. It serves 2523 PPP (Point to Point Protocol) is a newer and better SLIP. It serves
2514 the same purpose: sending Internet traffic over telephone (and other 2524 the same purpose: sending Internet traffic over telephone (and other
@@ -2689,6 +2699,7 @@ config SLIP
2689config SLIP_COMPRESSED 2699config SLIP_COMPRESSED
2690 bool "CSLIP compressed headers" 2700 bool "CSLIP compressed headers"
2691 depends on SLIP 2701 depends on SLIP
2702 select SLHC
2692 ---help--- 2703 ---help---
2693 This protocol is faster than SLIP because it uses compression on the 2704 This protocol is faster than SLIP because it uses compression on the
2694 TCP/IP headers (not on the data itself), but it has to be supported 2705 TCP/IP headers (not on the data itself), but it has to be supported
@@ -2701,6 +2712,12 @@ config SLIP_COMPRESSED
2701 <http://www.tldp.org/docs.html#howto>, explains how to configure 2712 <http://www.tldp.org/docs.html#howto>, explains how to configure
2702 CSLIP. This won't enlarge your kernel. 2713 CSLIP. This won't enlarge your kernel.
2703 2714
2715config SLHC
2716 tristate
2717 help
2718 This option enables Van Jacobsen serial line header compression
2719 routines.
2720
2704config SLIP_SMART 2721config SLIP_SMART
2705 bool "Keepalive and linefill" 2722 bool "Keepalive and linefill"
2706 depends on SLIP 2723 depends on SLIP
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index c91e95126f78..5e91c3562ad2 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -2,10 +2,6 @@
2# Makefile for the Linux network (ethercard) device drivers. 2# Makefile for the Linux network (ethercard) device drivers.
3# 3#
4 4
5ifeq ($(CONFIG_ISDN_PPP),y)
6 obj-$(CONFIG_ISDN) += slhc.o
7endif
8
9obj-$(CONFIG_E1000) += e1000/ 5obj-$(CONFIG_E1000) += e1000/
10obj-$(CONFIG_IBM_EMAC) += ibm_emac/ 6obj-$(CONFIG_IBM_EMAC) += ibm_emac/
11obj-$(CONFIG_IXGB) += ixgb/ 7obj-$(CONFIG_IXGB) += ixgb/
@@ -110,8 +106,9 @@ obj-$(CONFIG_FORCEDETH) += forcedeth.o
110obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o 106obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
111 107
112obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o 108obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
109obj-$(CONFIG_QLA3XXX) += qla3xxx.o
113 110
114obj-$(CONFIG_PPP) += ppp_generic.o slhc.o 111obj-$(CONFIG_PPP) += ppp_generic.o
115obj-$(CONFIG_PPP_ASYNC) += ppp_async.o 112obj-$(CONFIG_PPP_ASYNC) += ppp_async.o
116obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o 113obj-$(CONFIG_PPP_SYNC_TTY) += ppp_synctty.o
117obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o 114obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
@@ -120,9 +117,7 @@ obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
120obj-$(CONFIG_PPPOE) += pppox.o pppoe.o 117obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
121 118
122obj-$(CONFIG_SLIP) += slip.o 119obj-$(CONFIG_SLIP) += slip.o
123ifeq ($(CONFIG_SLIP_COMPRESSED),y) 120obj-$(CONFIG_SLHC) += slhc.o
124 obj-$(CONFIG_SLIP) += slhc.o
125endif
126 121
127obj-$(CONFIG_DUMMY) += dummy.o 122obj-$(CONFIG_DUMMY) += dummy.o
128obj-$(CONFIG_IFB) += ifb.o 123obj-$(CONFIG_IFB) += ifb.o
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 11b8f1b43dd5..6fc6d1b05f1e 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -381,21 +381,21 @@ enum {
381 381
382/* Big endian: should work, but is untested */ 382/* Big endian: should work, but is untested */
383struct ring_desc { 383struct ring_desc {
384 u32 PacketBuffer; 384 __le32 buf;
385 u32 FlagLen; 385 __le32 flaglen;
386}; 386};
387 387
388struct ring_desc_ex { 388struct ring_desc_ex {
389 u32 PacketBufferHigh; 389 __le32 bufhigh;
390 u32 PacketBufferLow; 390 __le32 buflow;
391 u32 TxVlan; 391 __le32 txvlan;
392 u32 FlagLen; 392 __le32 flaglen;
393}; 393};
394 394
395typedef union _ring_type { 395union ring_type {
396 struct ring_desc* orig; 396 struct ring_desc* orig;
397 struct ring_desc_ex* ex; 397 struct ring_desc_ex* ex;
398} ring_type; 398};
399 399
400#define FLAG_MASK_V1 0xffff0000 400#define FLAG_MASK_V1 0xffff0000
401#define FLAG_MASK_V2 0xffffc000 401#define FLAG_MASK_V2 0xffffc000
@@ -653,8 +653,8 @@ static const struct nv_ethtool_str nv_etests_str[] = {
653}; 653};
654 654
655struct register_test { 655struct register_test {
656 u32 reg; 656 __le32 reg;
657 u32 mask; 657 __le32 mask;
658}; 658};
659 659
660static const struct register_test nv_registers_test[] = { 660static const struct register_test nv_registers_test[] = {
@@ -713,7 +713,7 @@ struct fe_priv {
713 /* rx specific fields. 713 /* rx specific fields.
714 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 714 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
715 */ 715 */
716 ring_type rx_ring; 716 union ring_type rx_ring;
717 unsigned int cur_rx, refill_rx; 717 unsigned int cur_rx, refill_rx;
718 struct sk_buff **rx_skbuff; 718 struct sk_buff **rx_skbuff;
719 dma_addr_t *rx_dma; 719 dma_addr_t *rx_dma;
@@ -733,7 +733,7 @@ struct fe_priv {
733 /* 733 /*
734 * tx specific fields. 734 * tx specific fields.
735 */ 735 */
736 ring_type tx_ring; 736 union ring_type tx_ring;
737 unsigned int next_tx, nic_tx; 737 unsigned int next_tx, nic_tx;
738 struct sk_buff **tx_skbuff; 738 struct sk_buff **tx_skbuff;
739 dma_addr_t *tx_dma; 739 dma_addr_t *tx_dma;
@@ -826,13 +826,13 @@ static inline void pci_push(u8 __iomem *base)
826 826
827static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v) 827static inline u32 nv_descr_getlength(struct ring_desc *prd, u32 v)
828{ 828{
829 return le32_to_cpu(prd->FlagLen) 829 return le32_to_cpu(prd->flaglen)
830 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2); 830 & ((v == DESC_VER_1) ? LEN_MASK_V1 : LEN_MASK_V2);
831} 831}
832 832
833static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v) 833static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
834{ 834{
835 return le32_to_cpu(prd->FlagLen) & LEN_MASK_V2; 835 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
836} 836}
837 837
838static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 838static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
@@ -885,7 +885,7 @@ static void free_rings(struct net_device *dev)
885 struct fe_priv *np = get_nvpriv(dev); 885 struct fe_priv *np = get_nvpriv(dev);
886 886
887 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 887 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
888 if(np->rx_ring.orig) 888 if (np->rx_ring.orig)
889 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 889 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
890 np->rx_ring.orig, np->ring_addr); 890 np->rx_ring.orig, np->ring_addr);
891 } else { 891 } else {
@@ -1258,14 +1258,14 @@ static int nv_alloc_rx(struct net_device *dev)
1258 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data, 1258 np->rx_dma[nr] = pci_map_single(np->pci_dev, skb->data,
1259 skb->end-skb->data, PCI_DMA_FROMDEVICE); 1259 skb->end-skb->data, PCI_DMA_FROMDEVICE);
1260 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1260 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1261 np->rx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->rx_dma[nr]); 1261 np->rx_ring.orig[nr].buf = cpu_to_le32(np->rx_dma[nr]);
1262 wmb(); 1262 wmb();
1263 np->rx_ring.orig[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL); 1263 np->rx_ring.orig[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX_AVAIL);
1264 } else { 1264 } else {
1265 np->rx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->rx_dma[nr]) >> 32; 1265 np->rx_ring.ex[nr].bufhigh = cpu_to_le64(np->rx_dma[nr]) >> 32;
1266 np->rx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF; 1266 np->rx_ring.ex[nr].buflow = cpu_to_le64(np->rx_dma[nr]) & 0x0FFFFFFFF;
1267 wmb(); 1267 wmb();
1268 np->rx_ring.ex[nr].FlagLen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL); 1268 np->rx_ring.ex[nr].flaglen = cpu_to_le32(np->rx_buf_sz | NV_RX2_AVAIL);
1269 } 1269 }
1270 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n", 1270 dprintk(KERN_DEBUG "%s: nv_alloc_rx: Packet %d marked as Available\n",
1271 dev->name, refill_rx); 1271 dev->name, refill_rx);
@@ -1315,9 +1315,9 @@ static void nv_init_rx(struct net_device *dev)
1315 np->refill_rx = 0; 1315 np->refill_rx = 0;
1316 for (i = 0; i < np->rx_ring_size; i++) 1316 for (i = 0; i < np->rx_ring_size; i++)
1317 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1317 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1318 np->rx_ring.orig[i].FlagLen = 0; 1318 np->rx_ring.orig[i].flaglen = 0;
1319 else 1319 else
1320 np->rx_ring.ex[i].FlagLen = 0; 1320 np->rx_ring.ex[i].flaglen = 0;
1321} 1321}
1322 1322
1323static void nv_init_tx(struct net_device *dev) 1323static void nv_init_tx(struct net_device *dev)
@@ -1328,9 +1328,9 @@ static void nv_init_tx(struct net_device *dev)
1328 np->next_tx = np->nic_tx = 0; 1328 np->next_tx = np->nic_tx = 0;
1329 for (i = 0; i < np->tx_ring_size; i++) { 1329 for (i = 0; i < np->tx_ring_size; i++) {
1330 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1330 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1331 np->tx_ring.orig[i].FlagLen = 0; 1331 np->tx_ring.orig[i].flaglen = 0;
1332 else 1332 else
1333 np->tx_ring.ex[i].FlagLen = 0; 1333 np->tx_ring.ex[i].flaglen = 0;
1334 np->tx_skbuff[i] = NULL; 1334 np->tx_skbuff[i] = NULL;
1335 np->tx_dma[i] = 0; 1335 np->tx_dma[i] = 0;
1336 } 1336 }
@@ -1373,9 +1373,9 @@ static void nv_drain_tx(struct net_device *dev)
1373 1373
1374 for (i = 0; i < np->tx_ring_size; i++) { 1374 for (i = 0; i < np->tx_ring_size; i++) {
1375 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1375 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1376 np->tx_ring.orig[i].FlagLen = 0; 1376 np->tx_ring.orig[i].flaglen = 0;
1377 else 1377 else
1378 np->tx_ring.ex[i].FlagLen = 0; 1378 np->tx_ring.ex[i].flaglen = 0;
1379 if (nv_release_txskb(dev, i)) 1379 if (nv_release_txskb(dev, i))
1380 np->stats.tx_dropped++; 1380 np->stats.tx_dropped++;
1381 } 1381 }
@@ -1387,9 +1387,9 @@ static void nv_drain_rx(struct net_device *dev)
1387 int i; 1387 int i;
1388 for (i = 0; i < np->rx_ring_size; i++) { 1388 for (i = 0; i < np->rx_ring_size; i++) {
1389 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1389 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1390 np->rx_ring.orig[i].FlagLen = 0; 1390 np->rx_ring.orig[i].flaglen = 0;
1391 else 1391 else
1392 np->rx_ring.ex[i].FlagLen = 0; 1392 np->rx_ring.ex[i].flaglen = 0;
1393 wmb(); 1393 wmb();
1394 if (np->rx_skbuff[i]) { 1394 if (np->rx_skbuff[i]) {
1395 pci_unmap_single(np->pci_dev, np->rx_dma[i], 1395 pci_unmap_single(np->pci_dev, np->rx_dma[i],
@@ -1450,17 +1450,17 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1450 np->tx_dma_len[nr] = bcnt; 1450 np->tx_dma_len[nr] = bcnt;
1451 1451
1452 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1452 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1453 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1453 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1454 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1454 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1455 } else { 1455 } else {
1456 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1456 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1457 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1457 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1458 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1458 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1459 } 1459 }
1460 tx_flags = np->tx_flags; 1460 tx_flags = np->tx_flags;
1461 offset += bcnt; 1461 offset += bcnt;
1462 size -= bcnt; 1462 size -= bcnt;
1463 } while(size); 1463 } while (size);
1464 1464
1465 /* setup the fragments */ 1465 /* setup the fragments */
1466 for (i = 0; i < fragments; i++) { 1466 for (i = 0; i < fragments; i++) {
@@ -1477,12 +1477,12 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1477 np->tx_dma_len[nr] = bcnt; 1477 np->tx_dma_len[nr] = bcnt;
1478 1478
1479 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1479 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1480 np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1480 np->tx_ring.orig[nr].buf = cpu_to_le32(np->tx_dma[nr]);
1481 np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1481 np->tx_ring.orig[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1482 } else { 1482 } else {
1483 np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1483 np->tx_ring.ex[nr].bufhigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1484 np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1484 np->tx_ring.ex[nr].buflow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1485 np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags); 1485 np->tx_ring.ex[nr].flaglen = cpu_to_le32((bcnt-1) | tx_flags);
1486 } 1486 }
1487 offset += bcnt; 1487 offset += bcnt;
1488 size -= bcnt; 1488 size -= bcnt;
@@ -1491,9 +1491,9 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1491 1491
1492 /* set last fragment flag */ 1492 /* set last fragment flag */
1493 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1493 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1494 np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra); 1494 np->tx_ring.orig[nr].flaglen |= cpu_to_le32(tx_flags_extra);
1495 } else { 1495 } else {
1496 np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra); 1496 np->tx_ring.ex[nr].flaglen |= cpu_to_le32(tx_flags_extra);
1497 } 1497 }
1498 1498
1499 np->tx_skbuff[nr] = skb; 1499 np->tx_skbuff[nr] = skb;
@@ -1512,10 +1512,10 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1512 1512
1513 /* set tx flags */ 1513 /* set tx flags */
1514 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1514 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1515 np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1515 np->tx_ring.orig[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1516 } else { 1516 } else {
1517 np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan); 1517 np->tx_ring.ex[start_nr].txvlan = cpu_to_le32(tx_flags_vlan);
1518 np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra); 1518 np->tx_ring.ex[start_nr].flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
1519 } 1519 }
1520 1520
1521 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n", 1521 dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
@@ -1547,7 +1547,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1547static void nv_tx_done(struct net_device *dev) 1547static void nv_tx_done(struct net_device *dev)
1548{ 1548{
1549 struct fe_priv *np = netdev_priv(dev); 1549 struct fe_priv *np = netdev_priv(dev);
1550 u32 Flags; 1550 u32 flags;
1551 unsigned int i; 1551 unsigned int i;
1552 struct sk_buff *skb; 1552 struct sk_buff *skb;
1553 1553
@@ -1555,22 +1555,22 @@ static void nv_tx_done(struct net_device *dev)
1555 i = np->nic_tx % np->tx_ring_size; 1555 i = np->nic_tx % np->tx_ring_size;
1556 1556
1557 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1557 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
1558 Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen); 1558 flags = le32_to_cpu(np->tx_ring.orig[i].flaglen);
1559 else 1559 else
1560 Flags = le32_to_cpu(np->tx_ring.ex[i].FlagLen); 1560 flags = le32_to_cpu(np->tx_ring.ex[i].flaglen);
1561 1561
1562 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, Flags 0x%x.\n", 1562 dprintk(KERN_DEBUG "%s: nv_tx_done: looking at packet %d, flags 0x%x.\n",
1563 dev->name, np->nic_tx, Flags); 1563 dev->name, np->nic_tx, flags);
1564 if (Flags & NV_TX_VALID) 1564 if (flags & NV_TX_VALID)
1565 break; 1565 break;
1566 if (np->desc_ver == DESC_VER_1) { 1566 if (np->desc_ver == DESC_VER_1) {
1567 if (Flags & NV_TX_LASTPACKET) { 1567 if (flags & NV_TX_LASTPACKET) {
1568 skb = np->tx_skbuff[i]; 1568 skb = np->tx_skbuff[i];
1569 if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| 1569 if (flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION|
1570 NV_TX_UNDERFLOW|NV_TX_ERROR)) { 1570 NV_TX_UNDERFLOW|NV_TX_ERROR)) {
1571 if (Flags & NV_TX_UNDERFLOW) 1571 if (flags & NV_TX_UNDERFLOW)
1572 np->stats.tx_fifo_errors++; 1572 np->stats.tx_fifo_errors++;
1573 if (Flags & NV_TX_CARRIERLOST) 1573 if (flags & NV_TX_CARRIERLOST)
1574 np->stats.tx_carrier_errors++; 1574 np->stats.tx_carrier_errors++;
1575 np->stats.tx_errors++; 1575 np->stats.tx_errors++;
1576 } else { 1576 } else {
@@ -1579,13 +1579,13 @@ static void nv_tx_done(struct net_device *dev)
1579 } 1579 }
1580 } 1580 }
1581 } else { 1581 } else {
1582 if (Flags & NV_TX2_LASTPACKET) { 1582 if (flags & NV_TX2_LASTPACKET) {
1583 skb = np->tx_skbuff[i]; 1583 skb = np->tx_skbuff[i];
1584 if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| 1584 if (flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION|
1585 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { 1585 NV_TX2_UNDERFLOW|NV_TX2_ERROR)) {
1586 if (Flags & NV_TX2_UNDERFLOW) 1586 if (flags & NV_TX2_UNDERFLOW)
1587 np->stats.tx_fifo_errors++; 1587 np->stats.tx_fifo_errors++;
1588 if (Flags & NV_TX2_CARRIERLOST) 1588 if (flags & NV_TX2_CARRIERLOST)
1589 np->stats.tx_carrier_errors++; 1589 np->stats.tx_carrier_errors++;
1590 np->stats.tx_errors++; 1590 np->stats.tx_errors++;
1591 } else { 1591 } else {
@@ -1638,29 +1638,29 @@ static void nv_tx_timeout(struct net_device *dev)
1638 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1638 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1639 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 1639 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
1640 i, 1640 i,
1641 le32_to_cpu(np->tx_ring.orig[i].PacketBuffer), 1641 le32_to_cpu(np->tx_ring.orig[i].buf),
1642 le32_to_cpu(np->tx_ring.orig[i].FlagLen), 1642 le32_to_cpu(np->tx_ring.orig[i].flaglen),
1643 le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer), 1643 le32_to_cpu(np->tx_ring.orig[i+1].buf),
1644 le32_to_cpu(np->tx_ring.orig[i+1].FlagLen), 1644 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
1645 le32_to_cpu(np->tx_ring.orig[i+2].PacketBuffer), 1645 le32_to_cpu(np->tx_ring.orig[i+2].buf),
1646 le32_to_cpu(np->tx_ring.orig[i+2].FlagLen), 1646 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
1647 le32_to_cpu(np->tx_ring.orig[i+3].PacketBuffer), 1647 le32_to_cpu(np->tx_ring.orig[i+3].buf),
1648 le32_to_cpu(np->tx_ring.orig[i+3].FlagLen)); 1648 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
1649 } else { 1649 } else {
1650 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 1650 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
1651 i, 1651 i,
1652 le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh), 1652 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
1653 le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow), 1653 le32_to_cpu(np->tx_ring.ex[i].buflow),
1654 le32_to_cpu(np->tx_ring.ex[i].FlagLen), 1654 le32_to_cpu(np->tx_ring.ex[i].flaglen),
1655 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferHigh), 1655 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
1656 le32_to_cpu(np->tx_ring.ex[i+1].PacketBufferLow), 1656 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
1657 le32_to_cpu(np->tx_ring.ex[i+1].FlagLen), 1657 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
1658 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferHigh), 1658 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
1659 le32_to_cpu(np->tx_ring.ex[i+2].PacketBufferLow), 1659 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
1660 le32_to_cpu(np->tx_ring.ex[i+2].FlagLen), 1660 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
1661 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferHigh), 1661 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
1662 le32_to_cpu(np->tx_ring.ex[i+3].PacketBufferLow), 1662 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
1663 le32_to_cpu(np->tx_ring.ex[i+3].FlagLen)); 1663 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
1664 } 1664 }
1665 } 1665 }
1666 } 1666 }
@@ -1697,7 +1697,7 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1697 int protolen; /* length as stored in the proto field */ 1697 int protolen; /* length as stored in the proto field */
1698 1698
1699 /* 1) calculate len according to header */ 1699 /* 1) calculate len according to header */
1700 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == __constant_htons(ETH_P_8021Q)) { 1700 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
1701 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 1701 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
1702 hdrlen = VLAN_HLEN; 1702 hdrlen = VLAN_HLEN;
1703 } else { 1703 } else {
@@ -1743,7 +1743,7 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
1743static void nv_rx_process(struct net_device *dev) 1743static void nv_rx_process(struct net_device *dev)
1744{ 1744{
1745 struct fe_priv *np = netdev_priv(dev); 1745 struct fe_priv *np = netdev_priv(dev);
1746 u32 Flags; 1746 u32 flags;
1747 u32 vlanflags = 0; 1747 u32 vlanflags = 0;
1748 1748
1749 for (;;) { 1749 for (;;) {
@@ -1755,18 +1755,18 @@ static void nv_rx_process(struct net_device *dev)
1755 1755
1756 i = np->cur_rx % np->rx_ring_size; 1756 i = np->cur_rx % np->rx_ring_size;
1757 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1757 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1758 Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen); 1758 flags = le32_to_cpu(np->rx_ring.orig[i].flaglen);
1759 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver); 1759 len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
1760 } else { 1760 } else {
1761 Flags = le32_to_cpu(np->rx_ring.ex[i].FlagLen); 1761 flags = le32_to_cpu(np->rx_ring.ex[i].flaglen);
1762 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver); 1762 len = nv_descr_getlength_ex(&np->rx_ring.ex[i], np->desc_ver);
1763 vlanflags = le32_to_cpu(np->rx_ring.ex[i].PacketBufferLow); 1763 vlanflags = le32_to_cpu(np->rx_ring.ex[i].buflow);
1764 } 1764 }
1765 1765
1766 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, Flags 0x%x.\n", 1766 dprintk(KERN_DEBUG "%s: nv_rx_process: looking at packet %d, flags 0x%x.\n",
1767 dev->name, np->cur_rx, Flags); 1767 dev->name, np->cur_rx, flags);
1768 1768
1769 if (Flags & NV_RX_AVAIL) 1769 if (flags & NV_RX_AVAIL)
1770 break; /* still owned by hardware, */ 1770 break; /* still owned by hardware, */
1771 1771
1772 /* 1772 /*
@@ -1780,7 +1780,7 @@ static void nv_rx_process(struct net_device *dev)
1780 1780
1781 { 1781 {
1782 int j; 1782 int j;
1783 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",Flags); 1783 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
1784 for (j=0; j<64; j++) { 1784 for (j=0; j<64; j++) {
1785 if ((j%16) == 0) 1785 if ((j%16) == 0)
1786 dprintk("\n%03x:", j); 1786 dprintk("\n%03x:", j);
@@ -1790,30 +1790,30 @@ static void nv_rx_process(struct net_device *dev)
1790 } 1790 }
1791 /* look at what we actually got: */ 1791 /* look at what we actually got: */
1792 if (np->desc_ver == DESC_VER_1) { 1792 if (np->desc_ver == DESC_VER_1) {
1793 if (!(Flags & NV_RX_DESCRIPTORVALID)) 1793 if (!(flags & NV_RX_DESCRIPTORVALID))
1794 goto next_pkt; 1794 goto next_pkt;
1795 1795
1796 if (Flags & NV_RX_ERROR) { 1796 if (flags & NV_RX_ERROR) {
1797 if (Flags & NV_RX_MISSEDFRAME) { 1797 if (flags & NV_RX_MISSEDFRAME) {
1798 np->stats.rx_missed_errors++; 1798 np->stats.rx_missed_errors++;
1799 np->stats.rx_errors++; 1799 np->stats.rx_errors++;
1800 goto next_pkt; 1800 goto next_pkt;
1801 } 1801 }
1802 if (Flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) { 1802 if (flags & (NV_RX_ERROR1|NV_RX_ERROR2|NV_RX_ERROR3)) {
1803 np->stats.rx_errors++; 1803 np->stats.rx_errors++;
1804 goto next_pkt; 1804 goto next_pkt;
1805 } 1805 }
1806 if (Flags & NV_RX_CRCERR) { 1806 if (flags & NV_RX_CRCERR) {
1807 np->stats.rx_crc_errors++; 1807 np->stats.rx_crc_errors++;
1808 np->stats.rx_errors++; 1808 np->stats.rx_errors++;
1809 goto next_pkt; 1809 goto next_pkt;
1810 } 1810 }
1811 if (Flags & NV_RX_OVERFLOW) { 1811 if (flags & NV_RX_OVERFLOW) {
1812 np->stats.rx_over_errors++; 1812 np->stats.rx_over_errors++;
1813 np->stats.rx_errors++; 1813 np->stats.rx_errors++;
1814 goto next_pkt; 1814 goto next_pkt;
1815 } 1815 }
1816 if (Flags & NV_RX_ERROR4) { 1816 if (flags & NV_RX_ERROR4) {
1817 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1817 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1818 if (len < 0) { 1818 if (len < 0) {
1819 np->stats.rx_errors++; 1819 np->stats.rx_errors++;
@@ -1821,32 +1821,32 @@ static void nv_rx_process(struct net_device *dev)
1821 } 1821 }
1822 } 1822 }
1823 /* framing errors are soft errors. */ 1823 /* framing errors are soft errors. */
1824 if (Flags & NV_RX_FRAMINGERR) { 1824 if (flags & NV_RX_FRAMINGERR) {
1825 if (Flags & NV_RX_SUBSTRACT1) { 1825 if (flags & NV_RX_SUBSTRACT1) {
1826 len--; 1826 len--;
1827 } 1827 }
1828 } 1828 }
1829 } 1829 }
1830 } else { 1830 } else {
1831 if (!(Flags & NV_RX2_DESCRIPTORVALID)) 1831 if (!(flags & NV_RX2_DESCRIPTORVALID))
1832 goto next_pkt; 1832 goto next_pkt;
1833 1833
1834 if (Flags & NV_RX2_ERROR) { 1834 if (flags & NV_RX2_ERROR) {
1835 if (Flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) { 1835 if (flags & (NV_RX2_ERROR1|NV_RX2_ERROR2|NV_RX2_ERROR3)) {
1836 np->stats.rx_errors++; 1836 np->stats.rx_errors++;
1837 goto next_pkt; 1837 goto next_pkt;
1838 } 1838 }
1839 if (Flags & NV_RX2_CRCERR) { 1839 if (flags & NV_RX2_CRCERR) {
1840 np->stats.rx_crc_errors++; 1840 np->stats.rx_crc_errors++;
1841 np->stats.rx_errors++; 1841 np->stats.rx_errors++;
1842 goto next_pkt; 1842 goto next_pkt;
1843 } 1843 }
1844 if (Flags & NV_RX2_OVERFLOW) { 1844 if (flags & NV_RX2_OVERFLOW) {
1845 np->stats.rx_over_errors++; 1845 np->stats.rx_over_errors++;
1846 np->stats.rx_errors++; 1846 np->stats.rx_errors++;
1847 goto next_pkt; 1847 goto next_pkt;
1848 } 1848 }
1849 if (Flags & NV_RX2_ERROR4) { 1849 if (flags & NV_RX2_ERROR4) {
1850 len = nv_getlen(dev, np->rx_skbuff[i]->data, len); 1850 len = nv_getlen(dev, np->rx_skbuff[i]->data, len);
1851 if (len < 0) { 1851 if (len < 0) {
1852 np->stats.rx_errors++; 1852 np->stats.rx_errors++;
@@ -1854,17 +1854,17 @@ static void nv_rx_process(struct net_device *dev)
1854 } 1854 }
1855 } 1855 }
1856 /* framing errors are soft errors */ 1856 /* framing errors are soft errors */
1857 if (Flags & NV_RX2_FRAMINGERR) { 1857 if (flags & NV_RX2_FRAMINGERR) {
1858 if (Flags & NV_RX2_SUBSTRACT1) { 1858 if (flags & NV_RX2_SUBSTRACT1) {
1859 len--; 1859 len--;
1860 } 1860 }
1861 } 1861 }
1862 } 1862 }
1863 if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) { 1863 if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) {
1864 Flags &= NV_RX2_CHECKSUMMASK; 1864 flags &= NV_RX2_CHECKSUMMASK;
1865 if (Flags == NV_RX2_CHECKSUMOK1 || 1865 if (flags == NV_RX2_CHECKSUMOK1 ||
1866 Flags == NV_RX2_CHECKSUMOK2 || 1866 flags == NV_RX2_CHECKSUMOK2 ||
1867 Flags == NV_RX2_CHECKSUMOK3) { 1867 flags == NV_RX2_CHECKSUMOK3) {
1868 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name); 1868 dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
1869 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY; 1869 np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
1870 } else { 1870 } else {
@@ -1990,7 +1990,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
1990 struct fe_priv *np = netdev_priv(dev); 1990 struct fe_priv *np = netdev_priv(dev);
1991 struct sockaddr *macaddr = (struct sockaddr*)addr; 1991 struct sockaddr *macaddr = (struct sockaddr*)addr;
1992 1992
1993 if(!is_valid_ether_addr(macaddr->sa_data)) 1993 if (!is_valid_ether_addr(macaddr->sa_data))
1994 return -EADDRNOTAVAIL; 1994 return -EADDRNOTAVAIL;
1995 1995
1996 /* synchronized against open : rtnl_lock() held by caller */ 1996 /* synchronized against open : rtnl_lock() held by caller */
@@ -2283,20 +2283,20 @@ set_speed:
2283 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 2283 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
2284 2284
2285 switch (adv_pause) { 2285 switch (adv_pause) {
2286 case (ADVERTISE_PAUSE_CAP): 2286 case ADVERTISE_PAUSE_CAP:
2287 if (lpa_pause & LPA_PAUSE_CAP) { 2287 if (lpa_pause & LPA_PAUSE_CAP) {
2288 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2288 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
2289 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 2289 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
2290 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2290 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2291 } 2291 }
2292 break; 2292 break;
2293 case (ADVERTISE_PAUSE_ASYM): 2293 case ADVERTISE_PAUSE_ASYM:
2294 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 2294 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
2295 { 2295 {
2296 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 2296 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
2297 } 2297 }
2298 break; 2298 break;
2299 case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM): 2299 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
2300 if (lpa_pause & LPA_PAUSE_CAP) 2300 if (lpa_pause & LPA_PAUSE_CAP)
2301 { 2301 {
2302 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 2302 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
@@ -3245,7 +3245,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
3245 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) { 3245 if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
3246 /* fall back to old rings */ 3246 /* fall back to old rings */
3247 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3247 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3248 if(rxtx_ring) 3248 if (rxtx_ring)
3249 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 3249 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
3250 rxtx_ring, ring_addr); 3250 rxtx_ring, ring_addr);
3251 } else { 3251 } else {
@@ -3481,7 +3481,7 @@ static int nv_get_stats_count(struct net_device *dev)
3481 struct fe_priv *np = netdev_priv(dev); 3481 struct fe_priv *np = netdev_priv(dev);
3482 3482
3483 if (np->driver_data & DEV_HAS_STATISTICS) 3483 if (np->driver_data & DEV_HAS_STATISTICS)
3484 return (sizeof(struct nv_ethtool_stats)/sizeof(u64)); 3484 return sizeof(struct nv_ethtool_stats)/sizeof(u64);
3485 else 3485 else
3486 return 0; 3486 return 0;
3487} 3487}
@@ -3619,7 +3619,7 @@ static int nv_loopback_test(struct net_device *dev)
3619 struct sk_buff *tx_skb, *rx_skb; 3619 struct sk_buff *tx_skb, *rx_skb;
3620 dma_addr_t test_dma_addr; 3620 dma_addr_t test_dma_addr;
3621 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 3621 u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
3622 u32 Flags; 3622 u32 flags;
3623 int len, i, pkt_len; 3623 int len, i, pkt_len;
3624 u8 *pkt_data; 3624 u8 *pkt_data;
3625 u32 filter_flags = 0; 3625 u32 filter_flags = 0;
@@ -3663,12 +3663,12 @@ static int nv_loopback_test(struct net_device *dev)
3663 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE); 3663 tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
3664 3664
3665 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3665 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3666 np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr); 3666 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
3667 np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 3667 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
3668 } else { 3668 } else {
3669 np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32; 3669 np->tx_ring.ex[0].bufhigh = cpu_to_le64(test_dma_addr) >> 32;
3670 np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF; 3670 np->tx_ring.ex[0].buflow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
3671 np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 3671 np->tx_ring.ex[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
3672 } 3672 }
3673 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3673 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
3674 pci_push(get_hwbase(dev)); 3674 pci_push(get_hwbase(dev));
@@ -3677,21 +3677,21 @@ static int nv_loopback_test(struct net_device *dev)
3677 3677
3678 /* check for rx of the packet */ 3678 /* check for rx of the packet */
3679 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3679 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
3680 Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen); 3680 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
3681 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 3681 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
3682 3682
3683 } else { 3683 } else {
3684 Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen); 3684 flags = le32_to_cpu(np->rx_ring.ex[0].flaglen);
3685 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver); 3685 len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
3686 } 3686 }
3687 3687
3688 if (Flags & NV_RX_AVAIL) { 3688 if (flags & NV_RX_AVAIL) {
3689 ret = 0; 3689 ret = 0;
3690 } else if (np->desc_ver == DESC_VER_1) { 3690 } else if (np->desc_ver == DESC_VER_1) {
3691 if (Flags & NV_RX_ERROR) 3691 if (flags & NV_RX_ERROR)
3692 ret = 0; 3692 ret = 0;
3693 } else { 3693 } else {
3694 if (Flags & NV_RX2_ERROR) { 3694 if (flags & NV_RX2_ERROR) {
3695 ret = 0; 3695 ret = 0;
3696 } 3696 }
3697 } 3697 }
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
new file mode 100644
index 000000000000..c729aeeb4696
--- /dev/null
+++ b/drivers/net/qla3xxx.c
@@ -0,0 +1,3537 @@
1/*
2 * QLogic QLA3xxx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla3xxx for copyright and licensing details.
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/types.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/pci.h>
14#include <linux/dma-mapping.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
17#include <linux/dmapool.h>
18#include <linux/mempool.h>
19#include <linux/spinlock.h>
20#include <linux/kthread.h>
21#include <linux/interrupt.h>
22#include <linux/errno.h>
23#include <linux/ioport.h>
24#include <linux/ip.h>
25#include <linux/if_arp.h>
26#include <linux/if_ether.h>
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/ethtool.h>
30#include <linux/skbuff.h>
31#include <linux/rtnetlink.h>
32#include <linux/if_vlan.h>
33#include <linux/init.h>
34#include <linux/delay.h>
35#include <linux/mm.h>
36
37#include "qla3xxx.h"
38
39#define DRV_NAME "qla3xxx"
40#define DRV_STRING "QLogic ISP3XXX Network Driver"
41#define DRV_VERSION "v2.02.00-k36"
42#define PFX DRV_NAME " "
43
44static const char ql3xxx_driver_name[] = DRV_NAME;
45static const char ql3xxx_driver_version[] = DRV_VERSION;
46
47MODULE_AUTHOR("QLogic Corporation");
48MODULE_DESCRIPTION("QLogic ISP3XXX Network Driver " DRV_VERSION " ");
49MODULE_LICENSE("GPL");
50MODULE_VERSION(DRV_VERSION);
51
52static const u32 default_msg
53 = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
54 | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
55
56static int debug = -1; /* defaults above */
57module_param(debug, int, 0);
58MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
59
60static int msi;
61module_param(msi, int, 0);
62MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
63
64static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
65 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
66 /* required last entry */
67 {0,}
68};
69
70MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
71
72/*
73 * Caller must take hw_lock.
74 */
75static int ql_sem_spinlock(struct ql3_adapter *qdev,
76 u32 sem_mask, u32 sem_bits)
77{
78 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
79 u32 value;
80 unsigned int seconds = 3;
81
82 do {
83 writel((sem_mask | sem_bits),
84 &port_regs->CommonRegs.semaphoreReg);
85 value = readl(&port_regs->CommonRegs.semaphoreReg);
86 if ((value & (sem_mask >> 16)) == sem_bits)
87 return 0;
88 ssleep(1);
89 } while(--seconds);
90 return -1;
91}
92
93static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
94{
95 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
96 writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
97 readl(&port_regs->CommonRegs.semaphoreReg);
98}
99
100static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
101{
102 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
103 u32 value;
104
105 writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
106 value = readl(&port_regs->CommonRegs.semaphoreReg);
107 return ((value & (sem_mask >> 16)) == sem_bits);
108}
109
110/*
111 * Caller holds hw_lock.
112 */
113static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
114{
115 int i = 0;
116
117 while (1) {
118 if (!ql_sem_lock(qdev,
119 QL_DRVR_SEM_MASK,
120 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
121 * 2) << 1)) {
122 if (i < 10) {
123 ssleep(1);
124 i++;
125 } else {
126 printk(KERN_ERR PFX "%s: Timed out waiting for "
127 "driver lock...\n",
128 qdev->ndev->name);
129 return 0;
130 }
131 } else {
132 printk(KERN_DEBUG PFX
133 "%s: driver lock acquired.\n",
134 qdev->ndev->name);
135 return 1;
136 }
137 }
138}
139
140static void ql_set_register_page(struct ql3_adapter *qdev, u32 page)
141{
142 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
143
144 writel(((ISP_CONTROL_NP_MASK << 16) | page),
145 &port_regs->CommonRegs.ispControlStatus);
146 readl(&port_regs->CommonRegs.ispControlStatus);
147 qdev->current_page = page;
148}
149
150static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
151 u32 __iomem * reg)
152{
153 u32 value;
154 unsigned long hw_flags;
155
156 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
157 value = readl(reg);
158 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
159
160 return value;
161}
162
163static u32 ql_read_common_reg(struct ql3_adapter *qdev,
164 u32 __iomem * reg)
165{
166 return readl(reg);
167}
168
169static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
170{
171 u32 value;
172 unsigned long hw_flags;
173
174 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
175
176 if (qdev->current_page != 0)
177 ql_set_register_page(qdev,0);
178 value = readl(reg);
179
180 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
181 return value;
182}
183
184static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
185{
186 if (qdev->current_page != 0)
187 ql_set_register_page(qdev,0);
188 return readl(reg);
189}
190
191static void ql_write_common_reg_l(struct ql3_adapter *qdev,
192 u32 * reg, u32 value)
193{
194 unsigned long hw_flags;
195
196 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
197 writel(value, (u32 *) reg);
198 readl(reg);
199 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
200 return;
201}
202
203static void ql_write_common_reg(struct ql3_adapter *qdev,
204 u32 * reg, u32 value)
205{
206 writel(value, (u32 *) reg);
207 readl(reg);
208 return;
209}
210
211static void ql_write_page0_reg(struct ql3_adapter *qdev,
212 u32 * reg, u32 value)
213{
214 if (qdev->current_page != 0)
215 ql_set_register_page(qdev,0);
216 writel(value, (u32 *) reg);
217 readl(reg);
218 return;
219}
220
221/*
222 * Caller holds hw_lock. Only called during init.
223 */
224static void ql_write_page1_reg(struct ql3_adapter *qdev,
225 u32 * reg, u32 value)
226{
227 if (qdev->current_page != 1)
228 ql_set_register_page(qdev,1);
229 writel(value, (u32 *) reg);
230 readl(reg);
231 return;
232}
233
234/*
235 * Caller holds hw_lock. Only called during init.
236 */
237static void ql_write_page2_reg(struct ql3_adapter *qdev,
238 u32 * reg, u32 value)
239{
240 if (qdev->current_page != 2)
241 ql_set_register_page(qdev,2);
242 writel(value, (u32 *) reg);
243 readl(reg);
244 return;
245}
246
247static void ql_disable_interrupts(struct ql3_adapter *qdev)
248{
249 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
250
251 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
252 (ISP_IMR_ENABLE_INT << 16));
253
254}
255
256static void ql_enable_interrupts(struct ql3_adapter *qdev)
257{
258 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
259
260 ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
261 ((0xff << 16) | ISP_IMR_ENABLE_INT));
262
263}
264
265static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
266 struct ql_rcv_buf_cb *lrg_buf_cb)
267{
268 u64 map;
269 lrg_buf_cb->next = NULL;
270
271 if (qdev->lrg_buf_free_tail == NULL) { /* The list is empty */
272 qdev->lrg_buf_free_head = qdev->lrg_buf_free_tail = lrg_buf_cb;
273 } else {
274 qdev->lrg_buf_free_tail->next = lrg_buf_cb;
275 qdev->lrg_buf_free_tail = lrg_buf_cb;
276 }
277
278 if (!lrg_buf_cb->skb) {
279 lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
280 if (unlikely(!lrg_buf_cb->skb)) {
281 printk(KERN_ERR PFX "%s: failed dev_alloc_skb().\n",
282 qdev->ndev->name);
283 qdev->lrg_buf_skb_check++;
284 } else {
285 /*
286 * We save some space to copy the ethhdr from first
287 * buffer
288 */
289 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
290 map = pci_map_single(qdev->pdev,
291 lrg_buf_cb->skb->data,
292 qdev->lrg_buffer_len -
293 QL_HEADER_SPACE,
294 PCI_DMA_FROMDEVICE);
295 lrg_buf_cb->buf_phy_addr_low =
296 cpu_to_le32(LS_64BITS(map));
297 lrg_buf_cb->buf_phy_addr_high =
298 cpu_to_le32(MS_64BITS(map));
299 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
300 pci_unmap_len_set(lrg_buf_cb, maplen,
301 qdev->lrg_buffer_len -
302 QL_HEADER_SPACE);
303 }
304 }
305
306 qdev->lrg_buf_free_count++;
307}
308
309static struct ql_rcv_buf_cb *ql_get_from_lrg_buf_free_list(struct ql3_adapter
310 *qdev)
311{
312 struct ql_rcv_buf_cb *lrg_buf_cb;
313
314 if ((lrg_buf_cb = qdev->lrg_buf_free_head) != NULL) {
315 if ((qdev->lrg_buf_free_head = lrg_buf_cb->next) == NULL)
316 qdev->lrg_buf_free_tail = NULL;
317 qdev->lrg_buf_free_count--;
318 }
319
320 return lrg_buf_cb;
321}
322
323static u32 addrBits = EEPROM_NO_ADDR_BITS;
324static u32 dataBits = EEPROM_NO_DATA_BITS;
325
326static void fm93c56a_deselect(struct ql3_adapter *qdev);
327static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
328 unsigned short *value);
329
330/*
331 * Caller holds hw_lock.
332 */
333static void fm93c56a_select(struct ql3_adapter *qdev)
334{
335 struct ql3xxx_port_registers __iomem *port_regs =
336 qdev->mem_map_registers;
337
338 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
339 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
340 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
341 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
342 ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
343}
344
345/*
346 * Caller holds hw_lock.
347 */
348static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
349{
350 int i;
351 u32 mask;
352 u32 dataBit;
353 u32 previousBit;
354 struct ql3xxx_port_registers __iomem *port_regs =
355 qdev->mem_map_registers;
356
357 /* Clock in a zero, then do the start bit */
358 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
359 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
360 AUBURN_EEPROM_DO_1);
361 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
362 ISP_NVRAM_MASK | qdev->
363 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
364 AUBURN_EEPROM_CLK_RISE);
365 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
366 ISP_NVRAM_MASK | qdev->
367 eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
368 AUBURN_EEPROM_CLK_FALL);
369
370 mask = 1 << (FM93C56A_CMD_BITS - 1);
371 /* Force the previous data bit to be different */
372 previousBit = 0xffff;
373 for (i = 0; i < FM93C56A_CMD_BITS; i++) {
374 dataBit =
375 (cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
376 if (previousBit != dataBit) {
377 /*
378 * If the bit changed, then change the DO state to
379 * match
380 */
381 ql_write_common_reg(qdev,
382 &port_regs->CommonRegs.
383 serialPortInterfaceReg,
384 ISP_NVRAM_MASK | qdev->
385 eeprom_cmd_data | dataBit);
386 previousBit = dataBit;
387 }
388 ql_write_common_reg(qdev,
389 &port_regs->CommonRegs.
390 serialPortInterfaceReg,
391 ISP_NVRAM_MASK | qdev->
392 eeprom_cmd_data | dataBit |
393 AUBURN_EEPROM_CLK_RISE);
394 ql_write_common_reg(qdev,
395 &port_regs->CommonRegs.
396 serialPortInterfaceReg,
397 ISP_NVRAM_MASK | qdev->
398 eeprom_cmd_data | dataBit |
399 AUBURN_EEPROM_CLK_FALL);
400 cmd = cmd << 1;
401 }
402
403 mask = 1 << (addrBits - 1);
404 /* Force the previous data bit to be different */
405 previousBit = 0xffff;
406 for (i = 0; i < addrBits; i++) {
407 dataBit =
408 (eepromAddr & mask) ? AUBURN_EEPROM_DO_1 :
409 AUBURN_EEPROM_DO_0;
410 if (previousBit != dataBit) {
411 /*
412 * If the bit changed, then change the DO state to
413 * match
414 */
415 ql_write_common_reg(qdev,
416 &port_regs->CommonRegs.
417 serialPortInterfaceReg,
418 ISP_NVRAM_MASK | qdev->
419 eeprom_cmd_data | dataBit);
420 previousBit = dataBit;
421 }
422 ql_write_common_reg(qdev,
423 &port_regs->CommonRegs.
424 serialPortInterfaceReg,
425 ISP_NVRAM_MASK | qdev->
426 eeprom_cmd_data | dataBit |
427 AUBURN_EEPROM_CLK_RISE);
428 ql_write_common_reg(qdev,
429 &port_regs->CommonRegs.
430 serialPortInterfaceReg,
431 ISP_NVRAM_MASK | qdev->
432 eeprom_cmd_data | dataBit |
433 AUBURN_EEPROM_CLK_FALL);
434 eepromAddr = eepromAddr << 1;
435 }
436}
437
438/*
439 * Caller holds hw_lock.
440 */
441static void fm93c56a_deselect(struct ql3_adapter *qdev)
442{
443 struct ql3xxx_port_registers __iomem *port_regs =
444 qdev->mem_map_registers;
445 qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_0;
446 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
447 ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
448}
449
450/*
451 * Caller holds hw_lock.
452 */
453static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
454{
455 int i;
456 u32 data = 0;
457 u32 dataBit;
458 struct ql3xxx_port_registers __iomem *port_regs =
459 qdev->mem_map_registers;
460
461 /* Read the data bits */
462 /* The first bit is a dummy. Clock right over it. */
463 for (i = 0; i < dataBits; i++) {
464 ql_write_common_reg(qdev,
465 &port_regs->CommonRegs.
466 serialPortInterfaceReg,
467 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
468 AUBURN_EEPROM_CLK_RISE);
469 ql_write_common_reg(qdev,
470 &port_regs->CommonRegs.
471 serialPortInterfaceReg,
472 ISP_NVRAM_MASK | qdev->eeprom_cmd_data |
473 AUBURN_EEPROM_CLK_FALL);
474 dataBit =
475 (ql_read_common_reg
476 (qdev,
477 &port_regs->CommonRegs.
478 serialPortInterfaceReg) & AUBURN_EEPROM_DI_1) ? 1 : 0;
479 data = (data << 1) | dataBit;
480 }
481 *value = (u16) data;
482}
483
484/*
485 * Caller holds hw_lock.
486 */
487static void eeprom_readword(struct ql3_adapter *qdev,
488 u32 eepromAddr, unsigned short *value)
489{
490 fm93c56a_select(qdev);
491 fm93c56a_cmd(qdev, (int)FM93C56A_READ, eepromAddr);
492 fm93c56a_datain(qdev, value);
493 fm93c56a_deselect(qdev);
494}
495
496static void ql_swap_mac_addr(u8 * macAddress)
497{
498#ifdef __BIG_ENDIAN
499 u8 temp;
500 temp = macAddress[0];
501 macAddress[0] = macAddress[1];
502 macAddress[1] = temp;
503 temp = macAddress[2];
504 macAddress[2] = macAddress[3];
505 macAddress[3] = temp;
506 temp = macAddress[4];
507 macAddress[4] = macAddress[5];
508 macAddress[5] = temp;
509#endif
510}
511
512static int ql_get_nvram_params(struct ql3_adapter *qdev)
513{
514 u16 *pEEPROMData;
515 u16 checksum = 0;
516 u32 index;
517 unsigned long hw_flags;
518
519 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
520
521 pEEPROMData = (u16 *) & qdev->nvram_data;
522 qdev->eeprom_cmd_data = 0;
523 if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
524 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
525 2) << 10)) {
526 printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
527 __func__);
528 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
529 return -1;
530 }
531
532 for (index = 0; index < EEPROM_SIZE; index++) {
533 eeprom_readword(qdev, index, pEEPROMData);
534 checksum += *pEEPROMData;
535 pEEPROMData++;
536 }
537 ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
538
539 if (checksum != 0) {
540 printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
541 qdev->ndev->name, checksum);
542 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
543 return -1;
544 }
545
546 /*
547 * We have a problem with endianness for the MAC addresses
548 * and the two 8-bit values version, and numPorts. We
549 * have to swap them on big endian systems.
550 */
551 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn0.macAddress);
552 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn1.macAddress);
553 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn2.macAddress);
554 ql_swap_mac_addr(qdev->nvram_data.funcCfg_fn3.macAddress);
555 pEEPROMData = (u16 *) & qdev->nvram_data.version;
556 *pEEPROMData = le16_to_cpu(*pEEPROMData);
557
558 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
559 return checksum;
560}
561
562static const u32 PHYAddr[2] = {
563 PORT0_PHY_ADDRESS, PORT1_PHY_ADDRESS
564};
565
566static int ql_wait_for_mii_ready(struct ql3_adapter *qdev)
567{
568 struct ql3xxx_port_registers __iomem *port_regs =
569 qdev->mem_map_registers;
570 u32 temp;
571 int count = 1000;
572
573 while (count) {
574 temp = ql_read_page0_reg(qdev, &port_regs->macMIIStatusReg);
575 if (!(temp & MAC_MII_STATUS_BSY))
576 return 0;
577 udelay(10);
578 count--;
579 }
580 return -1;
581}
582
583static void ql_mii_enable_scan_mode(struct ql3_adapter *qdev)
584{
585 struct ql3xxx_port_registers __iomem *port_regs =
586 qdev->mem_map_registers;
587 u32 scanControl;
588
589 if (qdev->numPorts > 1) {
590 /* Auto scan will cycle through multiple ports */
591 scanControl = MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC;
592 } else {
593 scanControl = MAC_MII_CONTROL_SC;
594 }
595
596 /*
597 * Scan register 1 of PHY/PETBI,
598 * Set up to scan both devices
599 * The autoscan starts from the first register, completes
600 * the last one before rolling over to the first
601 */
602 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
603 PHYAddr[0] | MII_SCAN_REGISTER);
604
605 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
606 (scanControl) |
607 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS) << 16));
608}
609
610static u8 ql_mii_disable_scan_mode(struct ql3_adapter *qdev)
611{
612 u8 ret;
613 struct ql3xxx_port_registers __iomem *port_regs =
614 qdev->mem_map_registers;
615
616 /* See if scan mode is enabled before we turn it off */
617 if (ql_read_page0_reg(qdev, &port_regs->macMIIMgmtControlReg) &
618 (MAC_MII_CONTROL_AS | MAC_MII_CONTROL_SC)) {
619 /* Scan is enabled */
620 ret = 1;
621 } else {
622 /* Scan is disabled */
623 ret = 0;
624 }
625
626 /*
627 * When disabling scan mode you must first change the MII register
628 * address
629 */
630 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
631 PHYAddr[0] | MII_SCAN_REGISTER);
632
633 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
634 ((MAC_MII_CONTROL_SC | MAC_MII_CONTROL_AS |
635 MAC_MII_CONTROL_RC) << 16));
636
637 return ret;
638}
639
640static int ql_mii_write_reg_ex(struct ql3_adapter *qdev,
641 u16 regAddr, u16 value, u32 mac_index)
642{
643 struct ql3xxx_port_registers __iomem *port_regs =
644 qdev->mem_map_registers;
645 u8 scanWasEnabled;
646
647 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
648
649 if (ql_wait_for_mii_ready(qdev)) {
650 if (netif_msg_link(qdev))
651 printk(KERN_WARNING PFX
652 "%s Timed out waiting for management port to "
653 "get free before issuing command.\n",
654 qdev->ndev->name);
655 return -1;
656 }
657
658 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
659 PHYAddr[mac_index] | regAddr);
660
661 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
662
663 /* Wait for write to complete 9/10/04 SJP */
664 if (ql_wait_for_mii_ready(qdev)) {
665 if (netif_msg_link(qdev))
666 printk(KERN_WARNING PFX
667 "%s: Timed out waiting for management port to"
668 "get free before issuing command.\n",
669 qdev->ndev->name);
670 return -1;
671 }
672
673 if (scanWasEnabled)
674 ql_mii_enable_scan_mode(qdev);
675
676 return 0;
677}
678
679static int ql_mii_read_reg_ex(struct ql3_adapter *qdev, u16 regAddr,
680 u16 * value, u32 mac_index)
681{
682 struct ql3xxx_port_registers __iomem *port_regs =
683 qdev->mem_map_registers;
684 u8 scanWasEnabled;
685 u32 temp;
686
687 scanWasEnabled = ql_mii_disable_scan_mode(qdev);
688
689 if (ql_wait_for_mii_ready(qdev)) {
690 if (netif_msg_link(qdev))
691 printk(KERN_WARNING PFX
692 "%s: Timed out waiting for management port to "
693 "get free before issuing command.\n",
694 qdev->ndev->name);
695 return -1;
696 }
697
698 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
699 PHYAddr[mac_index] | regAddr);
700
701 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
702 (MAC_MII_CONTROL_RC << 16));
703
704 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
705 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
706
707 /* Wait for the read to complete */
708 if (ql_wait_for_mii_ready(qdev)) {
709 if (netif_msg_link(qdev))
710 printk(KERN_WARNING PFX
711 "%s: Timed out waiting for management port to "
712 "get free after issuing command.\n",
713 qdev->ndev->name);
714 return -1;
715 }
716
717 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
718 *value = (u16) temp;
719
720 if (scanWasEnabled)
721 ql_mii_enable_scan_mode(qdev);
722
723 return 0;
724}
725
726static int ql_mii_write_reg(struct ql3_adapter *qdev, u16 regAddr, u16 value)
727{
728 struct ql3xxx_port_registers __iomem *port_regs =
729 qdev->mem_map_registers;
730
731 ql_mii_disable_scan_mode(qdev);
732
733 if (ql_wait_for_mii_ready(qdev)) {
734 if (netif_msg_link(qdev))
735 printk(KERN_WARNING PFX
736 "%s: Timed out waiting for management port to "
737 "get free before issuing command.\n",
738 qdev->ndev->name);
739 return -1;
740 }
741
742 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
743 qdev->PHYAddr | regAddr);
744
745 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
746
747 /* Wait for write to complete. */
748 if (ql_wait_for_mii_ready(qdev)) {
749 if (netif_msg_link(qdev))
750 printk(KERN_WARNING PFX
751 "%s: Timed out waiting for management port to "
752 "get free before issuing command.\n",
753 qdev->ndev->name);
754 return -1;
755 }
756
757 ql_mii_enable_scan_mode(qdev);
758
759 return 0;
760}
761
762static int ql_mii_read_reg(struct ql3_adapter *qdev, u16 regAddr, u16 *value)
763{
764 u32 temp;
765 struct ql3xxx_port_registers __iomem *port_regs =
766 qdev->mem_map_registers;
767
768 ql_mii_disable_scan_mode(qdev);
769
770 if (ql_wait_for_mii_ready(qdev)) {
771 if (netif_msg_link(qdev))
772 printk(KERN_WARNING PFX
773 "%s: Timed out waiting for management port to "
774 "get free before issuing command.\n",
775 qdev->ndev->name);
776 return -1;
777 }
778
779 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtAddrReg,
780 qdev->PHYAddr | regAddr);
781
782 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
783 (MAC_MII_CONTROL_RC << 16));
784
785 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
786 (MAC_MII_CONTROL_RC << 16) | MAC_MII_CONTROL_RC);
787
788 /* Wait for the read to complete */
789 if (ql_wait_for_mii_ready(qdev)) {
790 if (netif_msg_link(qdev))
791 printk(KERN_WARNING PFX
792 "%s: Timed out waiting for management port to "
793 "get free before issuing command.\n",
794 qdev->ndev->name);
795 return -1;
796 }
797
798 temp = ql_read_page0_reg(qdev, &port_regs->macMIIMgmtDataReg);
799 *value = (u16) temp;
800
801 ql_mii_enable_scan_mode(qdev);
802
803 return 0;
804}
805
806static void ql_petbi_reset(struct ql3_adapter *qdev)
807{
808 ql_mii_write_reg(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET);
809}
810
811static void ql_petbi_start_neg(struct ql3_adapter *qdev)
812{
813 u16 reg;
814
815 /* Enable Auto-negotiation sense */
816 ql_mii_read_reg(qdev, PETBI_TBI_CTRL, &reg);
817 reg |= PETBI_TBI_AUTO_SENSE;
818 ql_mii_write_reg(qdev, PETBI_TBI_CTRL, reg);
819
820 ql_mii_write_reg(qdev, PETBI_NEG_ADVER,
821 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX);
822
823 ql_mii_write_reg(qdev, PETBI_CONTROL_REG,
824 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
825 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000);
826
827}
828
829static void ql_petbi_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
830{
831 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG, PETBI_CTRL_SOFT_RESET,
832 mac_index);
833}
834
835static void ql_petbi_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
836{
837 u16 reg;
838
839 /* Enable Auto-negotiation sense */
840 ql_mii_read_reg_ex(qdev, PETBI_TBI_CTRL, &reg, mac_index);
841 reg |= PETBI_TBI_AUTO_SENSE;
842 ql_mii_write_reg_ex(qdev, PETBI_TBI_CTRL, reg, mac_index);
843
844 ql_mii_write_reg_ex(qdev, PETBI_NEG_ADVER,
845 PETBI_NEG_PAUSE | PETBI_NEG_DUPLEX, mac_index);
846
847 ql_mii_write_reg_ex(qdev, PETBI_CONTROL_REG,
848 PETBI_CTRL_AUTO_NEG | PETBI_CTRL_RESTART_NEG |
849 PETBI_CTRL_FULL_DUPLEX | PETBI_CTRL_SPEED_1000,
850 mac_index);
851}
852
853static void ql_petbi_init(struct ql3_adapter *qdev)
854{
855 ql_petbi_reset(qdev);
856 ql_petbi_start_neg(qdev);
857}
858
859static void ql_petbi_init_ex(struct ql3_adapter *qdev, u32 mac_index)
860{
861 ql_petbi_reset_ex(qdev, mac_index);
862 ql_petbi_start_neg_ex(qdev, mac_index);
863}
864
865static int ql_is_petbi_neg_pause(struct ql3_adapter *qdev)
866{
867 u16 reg;
868
869 if (ql_mii_read_reg(qdev, PETBI_NEG_PARTNER, &reg) < 0)
870 return 0;
871
872 return (reg & PETBI_NEG_PAUSE_MASK) == PETBI_NEG_PAUSE;
873}
874
875static int ql_phy_get_speed(struct ql3_adapter *qdev)
876{
877 u16 reg;
878
879 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
880 return 0;
881
882 reg = (((reg & 0x18) >> 3) & 3);
883
884 if (reg == 2)
885 return SPEED_1000;
886 else if (reg == 1)
887 return SPEED_100;
888 else if (reg == 0)
889 return SPEED_10;
890 else
891 return -1;
892}
893
894static int ql_is_full_dup(struct ql3_adapter *qdev)
895{
896 u16 reg;
897
898 if (ql_mii_read_reg(qdev, AUX_CONTROL_STATUS, &reg) < 0)
899 return 0;
900
901 return (reg & PHY_AUX_DUPLEX_STAT) != 0;
902}
903
904static int ql_is_phy_neg_pause(struct ql3_adapter *qdev)
905{
906 u16 reg;
907
908 if (ql_mii_read_reg(qdev, PHY_NEG_PARTNER, &reg) < 0)
909 return 0;
910
911 return (reg & PHY_NEG_PAUSE) != 0;
912}
913
914/*
915 * Caller holds hw_lock.
916 */
917static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
918{
919 struct ql3xxx_port_registers __iomem *port_regs =
920 qdev->mem_map_registers;
921 u32 value;
922
923 if (enable)
924 value = (MAC_CONFIG_REG_PE | (MAC_CONFIG_REG_PE << 16));
925 else
926 value = (MAC_CONFIG_REG_PE << 16);
927
928 if (qdev->mac_index)
929 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
930 else
931 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
932}
933
934/*
935 * Caller holds hw_lock.
936 */
937static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
938{
939 struct ql3xxx_port_registers __iomem *port_regs =
940 qdev->mem_map_registers;
941 u32 value;
942
943 if (enable)
944 value = (MAC_CONFIG_REG_SR | (MAC_CONFIG_REG_SR << 16));
945 else
946 value = (MAC_CONFIG_REG_SR << 16);
947
948 if (qdev->mac_index)
949 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
950 else
951 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
952}
953
954/*
955 * Caller holds hw_lock.
956 */
957static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
958{
959 struct ql3xxx_port_registers __iomem *port_regs =
960 qdev->mem_map_registers;
961 u32 value;
962
963 if (enable)
964 value = (MAC_CONFIG_REG_GM | (MAC_CONFIG_REG_GM << 16));
965 else
966 value = (MAC_CONFIG_REG_GM << 16);
967
968 if (qdev->mac_index)
969 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
970 else
971 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
972}
973
974/*
975 * Caller holds hw_lock.
976 */
977static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
978{
979 struct ql3xxx_port_registers __iomem *port_regs =
980 qdev->mem_map_registers;
981 u32 value;
982
983 if (enable)
984 value = (MAC_CONFIG_REG_FD | (MAC_CONFIG_REG_FD << 16));
985 else
986 value = (MAC_CONFIG_REG_FD << 16);
987
988 if (qdev->mac_index)
989 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
990 else
991 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
992}
993
994/*
995 * Caller holds hw_lock.
996 */
997static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
998{
999 struct ql3xxx_port_registers __iomem *port_regs =
1000 qdev->mem_map_registers;
1001 u32 value;
1002
1003 if (enable)
1004 value =
1005 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) |
1006 ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16));
1007 else
1008 value = ((MAC_CONFIG_REG_TF | MAC_CONFIG_REG_RF) << 16);
1009
1010 if (qdev->mac_index)
1011 ql_write_page0_reg(qdev, &port_regs->mac1ConfigReg, value);
1012 else
1013 ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
1014}
1015
1016/*
1017 * Caller holds hw_lock.
1018 */
1019static int ql_is_fiber(struct ql3_adapter *qdev)
1020{
1021 struct ql3xxx_port_registers __iomem *port_regs =
1022 qdev->mem_map_registers;
1023 u32 bitToCheck = 0;
1024 u32 temp;
1025
1026 switch (qdev->mac_index) {
1027 case 0:
1028 bitToCheck = PORT_STATUS_SM0;
1029 break;
1030 case 1:
1031 bitToCheck = PORT_STATUS_SM1;
1032 break;
1033 }
1034
1035 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1036 return (temp & bitToCheck) != 0;
1037}
1038
1039static int ql_is_auto_cfg(struct ql3_adapter *qdev)
1040{
1041 u16 reg;
1042 ql_mii_read_reg(qdev, 0x00, &reg);
1043 return (reg & 0x1000) != 0;
1044}
1045
1046/*
1047 * Caller holds hw_lock.
1048 */
1049static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
1050{
1051 struct ql3xxx_port_registers __iomem *port_regs =
1052 qdev->mem_map_registers;
1053 u32 bitToCheck = 0;
1054 u32 temp;
1055
1056 switch (qdev->mac_index) {
1057 case 0:
1058 bitToCheck = PORT_STATUS_AC0;
1059 break;
1060 case 1:
1061 bitToCheck = PORT_STATUS_AC1;
1062 break;
1063 }
1064
1065 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1066 if (temp & bitToCheck) {
1067 if (netif_msg_link(qdev))
1068 printk(KERN_INFO PFX
1069 "%s: Auto-Negotiate complete.\n",
1070 qdev->ndev->name);
1071 return 1;
1072 } else {
1073 if (netif_msg_link(qdev))
1074 printk(KERN_WARNING PFX
1075 "%s: Auto-Negotiate incomplete.\n",
1076 qdev->ndev->name);
1077 return 0;
1078 }
1079}
1080
1081/*
1082 * ql_is_neg_pause() returns 1 if pause was negotiated to be on
1083 */
1084static int ql_is_neg_pause(struct ql3_adapter *qdev)
1085{
1086 if (ql_is_fiber(qdev))
1087 return ql_is_petbi_neg_pause(qdev);
1088 else
1089 return ql_is_phy_neg_pause(qdev);
1090}
1091
1092static int ql_auto_neg_error(struct ql3_adapter *qdev)
1093{
1094 struct ql3xxx_port_registers __iomem *port_regs =
1095 qdev->mem_map_registers;
1096 u32 bitToCheck = 0;
1097 u32 temp;
1098
1099 switch (qdev->mac_index) {
1100 case 0:
1101 bitToCheck = PORT_STATUS_AE0;
1102 break;
1103 case 1:
1104 bitToCheck = PORT_STATUS_AE1;
1105 break;
1106 }
1107 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1108 return (temp & bitToCheck) != 0;
1109}
1110
1111static u32 ql_get_link_speed(struct ql3_adapter *qdev)
1112{
1113 if (ql_is_fiber(qdev))
1114 return SPEED_1000;
1115 else
1116 return ql_phy_get_speed(qdev);
1117}
1118
1119static int ql_is_link_full_dup(struct ql3_adapter *qdev)
1120{
1121 if (ql_is_fiber(qdev))
1122 return 1;
1123 else
1124 return ql_is_full_dup(qdev);
1125}
1126
1127/*
1128 * Caller holds hw_lock.
1129 */
1130static int ql_link_down_detect(struct ql3_adapter *qdev)
1131{
1132 struct ql3xxx_port_registers __iomem *port_regs =
1133 qdev->mem_map_registers;
1134 u32 bitToCheck = 0;
1135 u32 temp;
1136
1137 switch (qdev->mac_index) {
1138 case 0:
1139 bitToCheck = ISP_CONTROL_LINK_DN_0;
1140 break;
1141 case 1:
1142 bitToCheck = ISP_CONTROL_LINK_DN_1;
1143 break;
1144 }
1145
1146 temp =
1147 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
1148 return (temp & bitToCheck) != 0;
1149}
1150
1151/*
1152 * Caller holds hw_lock.
1153 */
1154static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
1155{
1156 struct ql3xxx_port_registers __iomem *port_regs =
1157 qdev->mem_map_registers;
1158
1159 switch (qdev->mac_index) {
1160 case 0:
1161 ql_write_common_reg(qdev,
1162 &port_regs->CommonRegs.ispControlStatus,
1163 (ISP_CONTROL_LINK_DN_0) |
1164 (ISP_CONTROL_LINK_DN_0 << 16));
1165 break;
1166
1167 case 1:
1168 ql_write_common_reg(qdev,
1169 &port_regs->CommonRegs.ispControlStatus,
1170 (ISP_CONTROL_LINK_DN_1) |
1171 (ISP_CONTROL_LINK_DN_1 << 16));
1172 break;
1173
1174 default:
1175 return 1;
1176 }
1177
1178 return 0;
1179}
1180
1181/*
1182 * Caller holds hw_lock.
1183 */
1184static int ql_this_adapter_controls_port(struct ql3_adapter *qdev,
1185 u32 mac_index)
1186{
1187 struct ql3xxx_port_registers __iomem *port_regs =
1188 qdev->mem_map_registers;
1189 u32 bitToCheck = 0;
1190 u32 temp;
1191
1192 switch (mac_index) {
1193 case 0:
1194 bitToCheck = PORT_STATUS_F1_ENABLED;
1195 break;
1196 case 1:
1197 bitToCheck = PORT_STATUS_F3_ENABLED;
1198 break;
1199 default:
1200 break;
1201 }
1202
1203 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1204 if (temp & bitToCheck) {
1205 if (netif_msg_link(qdev))
1206 printk(KERN_DEBUG PFX
1207 "%s: is not link master.\n", qdev->ndev->name);
1208 return 0;
1209 } else {
1210 if (netif_msg_link(qdev))
1211 printk(KERN_DEBUG PFX
1212 "%s: is link master.\n", qdev->ndev->name);
1213 return 1;
1214 }
1215}
1216
1217static void ql_phy_reset_ex(struct ql3_adapter *qdev, u32 mac_index)
1218{
1219 ql_mii_write_reg_ex(qdev, CONTROL_REG, PHY_CTRL_SOFT_RESET, mac_index);
1220}
1221
1222static void ql_phy_start_neg_ex(struct ql3_adapter *qdev, u32 mac_index)
1223{
1224 u16 reg;
1225
1226 ql_mii_write_reg_ex(qdev, PHY_NEG_ADVER,
1227 PHY_NEG_PAUSE | PHY_NEG_ADV_SPEED | 1, mac_index);
1228
1229 ql_mii_read_reg_ex(qdev, CONTROL_REG, &reg, mac_index);
1230 ql_mii_write_reg_ex(qdev, CONTROL_REG, reg | PHY_CTRL_RESTART_NEG,
1231 mac_index);
1232}
1233
1234static void ql_phy_init_ex(struct ql3_adapter *qdev, u32 mac_index)
1235{
1236 ql_phy_reset_ex(qdev, mac_index);
1237 ql_phy_start_neg_ex(qdev, mac_index);
1238}
1239
1240/*
1241 * Caller holds hw_lock.
1242 */
1243static u32 ql_get_link_state(struct ql3_adapter *qdev)
1244{
1245 struct ql3xxx_port_registers __iomem *port_regs =
1246 qdev->mem_map_registers;
1247 u32 bitToCheck = 0;
1248 u32 temp, linkState;
1249
1250 switch (qdev->mac_index) {
1251 case 0:
1252 bitToCheck = PORT_STATUS_UP0;
1253 break;
1254 case 1:
1255 bitToCheck = PORT_STATUS_UP1;
1256 break;
1257 }
1258 temp = ql_read_page0_reg(qdev, &port_regs->portStatus);
1259 if (temp & bitToCheck) {
1260 linkState = LS_UP;
1261 } else {
1262 linkState = LS_DOWN;
1263 if (netif_msg_link(qdev))
1264 printk(KERN_WARNING PFX
1265 "%s: Link is down.\n", qdev->ndev->name);
1266 }
1267 return linkState;
1268}
1269
1270static int ql_port_start(struct ql3_adapter *qdev)
1271{
1272 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1273 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1274 2) << 7))
1275 return -1;
1276
1277 if (ql_is_fiber(qdev)) {
1278 ql_petbi_init(qdev);
1279 } else {
1280 /* Copper port */
1281 ql_phy_init_ex(qdev, qdev->mac_index);
1282 }
1283
1284 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1285 return 0;
1286}
1287
1288static int ql_finish_auto_neg(struct ql3_adapter *qdev)
1289{
1290
1291 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1292 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1293 2) << 7))
1294 return -1;
1295
1296 if (!ql_auto_neg_error(qdev)) {
1297 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1298 /* configure the MAC */
1299 if (netif_msg_link(qdev))
1300 printk(KERN_DEBUG PFX
1301 "%s: Configuring link.\n",
1302 qdev->ndev->
1303 name);
1304 ql_mac_cfg_soft_reset(qdev, 1);
1305 ql_mac_cfg_gig(qdev,
1306 (ql_get_link_speed
1307 (qdev) ==
1308 SPEED_1000));
1309 ql_mac_cfg_full_dup(qdev,
1310 ql_is_link_full_dup
1311 (qdev));
1312 ql_mac_cfg_pause(qdev,
1313 ql_is_neg_pause
1314 (qdev));
1315 ql_mac_cfg_soft_reset(qdev, 0);
1316
1317 /* enable the MAC */
1318 if (netif_msg_link(qdev))
1319 printk(KERN_DEBUG PFX
1320 "%s: Enabling mac.\n",
1321 qdev->ndev->
1322 name);
1323 ql_mac_enable(qdev, 1);
1324 }
1325
1326 if (netif_msg_link(qdev))
1327 printk(KERN_DEBUG PFX
1328 "%s: Change port_link_state LS_DOWN to LS_UP.\n",
1329 qdev->ndev->name);
1330 qdev->port_link_state = LS_UP;
1331 netif_start_queue(qdev->ndev);
1332 netif_carrier_on(qdev->ndev);
1333 if (netif_msg_link(qdev))
1334 printk(KERN_INFO PFX
1335 "%s: Link is up at %d Mbps, %s duplex.\n",
1336 qdev->ndev->name,
1337 ql_get_link_speed(qdev),
1338 ql_is_link_full_dup(qdev)
1339 ? "full" : "half");
1340
1341 } else { /* Remote error detected */
1342
1343 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1344 if (netif_msg_link(qdev))
1345 printk(KERN_DEBUG PFX
1346 "%s: Remote error detected. "
1347 "Calling ql_port_start().\n",
1348 qdev->ndev->
1349 name);
1350 /*
1351 * ql_port_start() is shared code and needs
1352 * to lock the PHY on it's own.
1353 */
1354 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1355 if(ql_port_start(qdev)) {/* Restart port */
1356 return -1;
1357 } else
1358 return 0;
1359 }
1360 }
1361 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1362 return 0;
1363}
1364
1365static void ql_link_state_machine(struct ql3_adapter *qdev)
1366{
1367 u32 curr_link_state;
1368 unsigned long hw_flags;
1369
1370 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1371
1372 curr_link_state = ql_get_link_state(qdev);
1373
1374 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
1375 if (netif_msg_link(qdev))
1376 printk(KERN_INFO PFX
1377 "%s: Reset in progress, skip processing link "
1378 "state.\n", qdev->ndev->name);
1379 return;
1380 }
1381
1382 switch (qdev->port_link_state) {
1383 default:
1384 if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
1385 ql_port_start(qdev);
1386 }
1387 qdev->port_link_state = LS_DOWN;
1388 /* Fall Through */
1389
1390 case LS_DOWN:
1391 if (netif_msg_link(qdev))
1392 printk(KERN_DEBUG PFX
1393 "%s: port_link_state = LS_DOWN.\n",
1394 qdev->ndev->name);
1395 if (curr_link_state == LS_UP) {
1396 if (netif_msg_link(qdev))
1397 printk(KERN_DEBUG PFX
1398 "%s: curr_link_state = LS_UP.\n",
1399 qdev->ndev->name);
1400 if (ql_is_auto_neg_complete(qdev))
1401 ql_finish_auto_neg(qdev);
1402
1403 if (qdev->port_link_state == LS_UP)
1404 ql_link_down_detect_clear(qdev);
1405
1406 }
1407 break;
1408
1409 case LS_UP:
1410 /*
1411 * See if the link is currently down or went down and came
1412 * back up
1413 */
1414 if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) {
1415 if (netif_msg_link(qdev))
1416 printk(KERN_INFO PFX "%s: Link is down.\n",
1417 qdev->ndev->name);
1418 qdev->port_link_state = LS_DOWN;
1419 }
1420 break;
1421 }
1422 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1423}
1424
1425/*
1426 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1427 */
1428static void ql_get_phy_owner(struct ql3_adapter *qdev)
1429{
1430 if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
1431 set_bit(QL_LINK_MASTER,&qdev->flags);
1432 else
1433 clear_bit(QL_LINK_MASTER,&qdev->flags);
1434}
1435
1436/*
1437 * Caller must take hw_lock and QL_PHY_GIO_SEM.
1438 */
1439static void ql_init_scan_mode(struct ql3_adapter *qdev)
1440{
1441 ql_mii_enable_scan_mode(qdev);
1442
1443 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1444 if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
1445 ql_petbi_init_ex(qdev, qdev->mac_index);
1446 } else {
1447 if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
1448 ql_phy_init_ex(qdev, qdev->mac_index);
1449 }
1450}
1451
1452/*
1453 * MII_Setup needs to be called before taking the PHY out of reset so that the
1454 * management interface clock speed can be set properly. It would be better if
1455 * we had a way to disable MDC until after the PHY is out of reset, but we
1456 * don't have that capability.
1457 */
1458static int ql_mii_setup(struct ql3_adapter *qdev)
1459{
1460 u32 reg;
1461 struct ql3xxx_port_registers __iomem *port_regs =
1462 qdev->mem_map_registers;
1463
1464 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1465 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1466 2) << 7))
1467 return -1;
1468
1469 /* Divide 125MHz clock by 28 to meet PHY timing requirements */
1470 reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
1471
1472 ql_write_page0_reg(qdev, &port_regs->macMIIMgmtControlReg,
1473 reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
1474
1475 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1476 return 0;
1477}
1478
1479static u32 ql_supported_modes(struct ql3_adapter *qdev)
1480{
1481 u32 supported;
1482
1483 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1484 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
1485 | SUPPORTED_Autoneg;
1486 } else {
1487 supported = SUPPORTED_10baseT_Half
1488 | SUPPORTED_10baseT_Full
1489 | SUPPORTED_100baseT_Half
1490 | SUPPORTED_100baseT_Full
1491 | SUPPORTED_1000baseT_Half
1492 | SUPPORTED_1000baseT_Full
1493 | SUPPORTED_Autoneg | SUPPORTED_TP;
1494 }
1495
1496 return supported;
1497}
1498
1499static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
1500{
1501 int status;
1502 unsigned long hw_flags;
1503 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1504 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1505 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1506 2) << 7))
1507 return 0;
1508 status = ql_is_auto_cfg(qdev);
1509 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1510 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1511 return status;
1512}
1513
1514static u32 ql_get_speed(struct ql3_adapter *qdev)
1515{
1516 u32 status;
1517 unsigned long hw_flags;
1518 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1519 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1520 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1521 2) << 7))
1522 return 0;
1523 status = ql_get_link_speed(qdev);
1524 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1525 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1526 return status;
1527}
1528
1529static int ql_get_full_dup(struct ql3_adapter *qdev)
1530{
1531 int status;
1532 unsigned long hw_flags;
1533 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1534 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
1535 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
1536 2) << 7))
1537 return 0;
1538 status = ql_is_link_full_dup(qdev);
1539 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
1540 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1541 return status;
1542}
1543
1544
1545static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
1546{
1547 struct ql3_adapter *qdev = netdev_priv(ndev);
1548
1549 ecmd->transceiver = XCVR_INTERNAL;
1550 ecmd->supported = ql_supported_modes(qdev);
1551
1552 if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
1553 ecmd->port = PORT_FIBRE;
1554 } else {
1555 ecmd->port = PORT_TP;
1556 ecmd->phy_address = qdev->PHYAddr;
1557 }
1558 ecmd->advertising = ql_supported_modes(qdev);
1559 ecmd->autoneg = ql_get_auto_cfg_status(qdev);
1560 ecmd->speed = ql_get_speed(qdev);
1561 ecmd->duplex = ql_get_full_dup(qdev);
1562 return 0;
1563}
1564
1565static void ql_get_drvinfo(struct net_device *ndev,
1566 struct ethtool_drvinfo *drvinfo)
1567{
1568 struct ql3_adapter *qdev = netdev_priv(ndev);
1569 strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
1570 strncpy(drvinfo->version, ql3xxx_driver_version, 32);
1571 strncpy(drvinfo->fw_version, "N/A", 32);
1572 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
1573 drvinfo->n_stats = 0;
1574 drvinfo->testinfo_len = 0;
1575 drvinfo->regdump_len = 0;
1576 drvinfo->eedump_len = 0;
1577}
1578
1579static u32 ql_get_msglevel(struct net_device *ndev)
1580{
1581 struct ql3_adapter *qdev = netdev_priv(ndev);
1582 return qdev->msg_enable;
1583}
1584
1585static void ql_set_msglevel(struct net_device *ndev, u32 value)
1586{
1587 struct ql3_adapter *qdev = netdev_priv(ndev);
1588 qdev->msg_enable = value;
1589}
1590
1591static struct ethtool_ops ql3xxx_ethtool_ops = {
1592 .get_settings = ql_get_settings,
1593 .get_drvinfo = ql_get_drvinfo,
1594 .get_perm_addr = ethtool_op_get_perm_addr,
1595 .get_link = ethtool_op_get_link,
1596 .get_msglevel = ql_get_msglevel,
1597 .set_msglevel = ql_set_msglevel,
1598};
1599
1600static int ql_populate_free_queue(struct ql3_adapter *qdev)
1601{
1602 struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
1603 u64 map;
1604
1605 while (lrg_buf_cb) {
1606 if (!lrg_buf_cb->skb) {
1607 lrg_buf_cb->skb = dev_alloc_skb(qdev->lrg_buffer_len);
1608 if (unlikely(!lrg_buf_cb->skb)) {
1609 printk(KERN_DEBUG PFX
1610 "%s: Failed dev_alloc_skb().\n",
1611 qdev->ndev->name);
1612 break;
1613 } else {
1614 /*
1615 * We save some space to copy the ethhdr from
1616 * first buffer
1617 */
1618 skb_reserve(lrg_buf_cb->skb, QL_HEADER_SPACE);
1619 map = pci_map_single(qdev->pdev,
1620 lrg_buf_cb->skb->data,
1621 qdev->lrg_buffer_len -
1622 QL_HEADER_SPACE,
1623 PCI_DMA_FROMDEVICE);
1624 lrg_buf_cb->buf_phy_addr_low =
1625 cpu_to_le32(LS_64BITS(map));
1626 lrg_buf_cb->buf_phy_addr_high =
1627 cpu_to_le32(MS_64BITS(map));
1628 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1629 pci_unmap_len_set(lrg_buf_cb, maplen,
1630 qdev->lrg_buffer_len -
1631 QL_HEADER_SPACE);
1632 --qdev->lrg_buf_skb_check;
1633 if (!qdev->lrg_buf_skb_check)
1634 return 1;
1635 }
1636 }
1637 lrg_buf_cb = lrg_buf_cb->next;
1638 }
1639 return 0;
1640}
1641
1642/*
1643 * Caller holds hw_lock.
1644 */
1645static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
1646{
1647 struct bufq_addr_element *lrg_buf_q_ele;
1648 int i;
1649 struct ql_rcv_buf_cb *lrg_buf_cb;
1650 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1651
1652 if ((qdev->lrg_buf_free_count >= 8)
1653 && (qdev->lrg_buf_release_cnt >= 16)) {
1654
1655 if (qdev->lrg_buf_skb_check)
1656 if (!ql_populate_free_queue(qdev))
1657 return;
1658
1659 lrg_buf_q_ele = qdev->lrg_buf_next_free;
1660
1661 while ((qdev->lrg_buf_release_cnt >= 16)
1662 && (qdev->lrg_buf_free_count >= 8)) {
1663
1664 for (i = 0; i < 8; i++) {
1665 lrg_buf_cb =
1666 ql_get_from_lrg_buf_free_list(qdev);
1667 lrg_buf_q_ele->addr_high =
1668 lrg_buf_cb->buf_phy_addr_high;
1669 lrg_buf_q_ele->addr_low =
1670 lrg_buf_cb->buf_phy_addr_low;
1671 lrg_buf_q_ele++;
1672
1673 qdev->lrg_buf_release_cnt--;
1674 }
1675
1676 qdev->lrg_buf_q_producer_index++;
1677
1678 if (qdev->lrg_buf_q_producer_index == NUM_LBUFQ_ENTRIES)
1679 qdev->lrg_buf_q_producer_index = 0;
1680
1681 if (qdev->lrg_buf_q_producer_index ==
1682 (NUM_LBUFQ_ENTRIES - 1)) {
1683 lrg_buf_q_ele = qdev->lrg_buf_q_virt_addr;
1684 }
1685 }
1686
1687 qdev->lrg_buf_next_free = lrg_buf_q_ele;
1688
1689 ql_write_common_reg(qdev,
1690 (u32 *) & port_regs->CommonRegs.
1691 rxLargeQProducerIndex,
1692 qdev->lrg_buf_q_producer_index);
1693 }
1694}
1695
1696static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
1697 struct ob_mac_iocb_rsp *mac_rsp)
1698{
1699 struct ql_tx_buf_cb *tx_cb;
1700
1701 tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
1702 pci_unmap_single(qdev->pdev,
1703 pci_unmap_addr(tx_cb, mapaddr),
1704 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
1705 dev_kfree_skb_irq(tx_cb->skb);
1706 qdev->stats.tx_packets++;
1707 qdev->stats.tx_bytes += tx_cb->skb->len;
1708 tx_cb->skb = NULL;
1709 atomic_inc(&qdev->tx_count);
1710}
1711
1712static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
1713 struct ib_mac_iocb_rsp *ib_mac_rsp_ptr)
1714{
1715 long int offset;
1716 u32 lrg_buf_phy_addr_low = 0;
1717 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1718 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1719 u32 *curr_ial_ptr;
1720 struct sk_buff *skb;
1721 u16 length = le16_to_cpu(ib_mac_rsp_ptr->length);
1722
1723 /*
1724 * Get the inbound address list (small buffer).
1725 */
1726 offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
1727 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1728 qdev->small_buf_index = 0;
1729
1730 curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
1731 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1732 qdev->small_buf_release_cnt++;
1733
1734 /* start of first buffer */
1735 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1736 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1737 qdev->lrg_buf_release_cnt++;
1738 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1739 qdev->lrg_buf_index = 0;
1740 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1741 curr_ial_ptr++;
1742
1743 /* start of second buffer */
1744 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1745 lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
1746
1747 /*
1748 * Second buffer gets sent up the stack.
1749 */
1750 qdev->lrg_buf_release_cnt++;
1751 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1752 qdev->lrg_buf_index = 0;
1753 skb = lrg_buf_cb2->skb;
1754
1755 qdev->stats.rx_packets++;
1756 qdev->stats.rx_bytes += length;
1757
1758 skb_put(skb, length);
1759 pci_unmap_single(qdev->pdev,
1760 pci_unmap_addr(lrg_buf_cb2, mapaddr),
1761 pci_unmap_len(lrg_buf_cb2, maplen),
1762 PCI_DMA_FROMDEVICE);
1763 prefetch(skb->data);
1764 skb->dev = qdev->ndev;
1765 skb->ip_summed = CHECKSUM_NONE;
1766 skb->protocol = eth_type_trans(skb, qdev->ndev);
1767
1768 netif_receive_skb(skb);
1769 qdev->ndev->last_rx = jiffies;
1770 lrg_buf_cb2->skb = NULL;
1771
1772 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1773 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1774}
1775
1776static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
1777 struct ib_ip_iocb_rsp *ib_ip_rsp_ptr)
1778{
1779 long int offset;
1780 u32 lrg_buf_phy_addr_low = 0;
1781 struct ql_rcv_buf_cb *lrg_buf_cb1 = NULL;
1782 struct ql_rcv_buf_cb *lrg_buf_cb2 = NULL;
1783 u32 *curr_ial_ptr;
1784 struct sk_buff *skb1, *skb2;
1785 struct net_device *ndev = qdev->ndev;
1786 u16 length = le16_to_cpu(ib_ip_rsp_ptr->length);
1787 u16 size = 0;
1788
1789 /*
1790 * Get the inbound address list (small buffer).
1791 */
1792
1793 offset = qdev->small_buf_index * QL_SMALL_BUFFER_SIZE;
1794 if (++qdev->small_buf_index == NUM_SMALL_BUFFERS)
1795 qdev->small_buf_index = 0;
1796 curr_ial_ptr = (u32 *) (qdev->small_buf_virt_addr + offset);
1797 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low + offset;
1798 qdev->small_buf_release_cnt++;
1799
1800 /* start of first buffer */
1801 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1802 lrg_buf_cb1 = &qdev->lrg_buf[qdev->lrg_buf_index];
1803
1804 qdev->lrg_buf_release_cnt++;
1805 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1806 qdev->lrg_buf_index = 0;
1807 skb1 = lrg_buf_cb1->skb;
1808 curr_ial_ptr++; /* 64-bit pointers require two incs. */
1809 curr_ial_ptr++;
1810
1811 /* start of second buffer */
1812 lrg_buf_phy_addr_low = le32_to_cpu(*curr_ial_ptr);
1813 lrg_buf_cb2 = &qdev->lrg_buf[qdev->lrg_buf_index];
1814 skb2 = lrg_buf_cb2->skb;
1815 qdev->lrg_buf_release_cnt++;
1816 if (++qdev->lrg_buf_index == NUM_LARGE_BUFFERS)
1817 qdev->lrg_buf_index = 0;
1818
1819 qdev->stats.rx_packets++;
1820 qdev->stats.rx_bytes += length;
1821
1822 /*
1823 * Copy the ethhdr from first buffer to second. This
1824 * is necessary for IP completions.
1825 */
1826 if (*((u16 *) skb1->data) != 0xFFFF)
1827 size = VLAN_ETH_HLEN;
1828 else
1829 size = ETH_HLEN;
1830
1831 skb_put(skb2, length); /* Just the second buffer length here. */
1832 pci_unmap_single(qdev->pdev,
1833 pci_unmap_addr(lrg_buf_cb2, mapaddr),
1834 pci_unmap_len(lrg_buf_cb2, maplen),
1835 PCI_DMA_FROMDEVICE);
1836 prefetch(skb2->data);
1837
1838 memcpy(skb_push(skb2, size), skb1->data + VLAN_ID_LEN, size);
1839 skb2->dev = qdev->ndev;
1840 skb2->ip_summed = CHECKSUM_NONE;
1841 skb2->protocol = eth_type_trans(skb2, qdev->ndev);
1842
1843 netif_receive_skb(skb2);
1844 ndev->last_rx = jiffies;
1845 lrg_buf_cb2->skb = NULL;
1846
1847 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb1);
1848 ql_release_to_lrg_buf_free_list(qdev, lrg_buf_cb2);
1849}
1850
1851static int ql_tx_rx_clean(struct ql3_adapter *qdev,
1852 int *tx_cleaned, int *rx_cleaned, int work_to_do)
1853{
1854 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1855 struct net_rsp_iocb *net_rsp;
1856 struct net_device *ndev = qdev->ndev;
1857 unsigned long hw_flags;
1858
1859 /* While there are entries in the completion queue. */
1860 while ((cpu_to_le32(*(qdev->prsp_producer_index)) !=
1861 qdev->rsp_consumer_index) && (*rx_cleaned < work_to_do)) {
1862
1863 net_rsp = qdev->rsp_current;
1864 switch (net_rsp->opcode) {
1865
1866 case OPCODE_OB_MAC_IOCB_FN0:
1867 case OPCODE_OB_MAC_IOCB_FN2:
1868 ql_process_mac_tx_intr(qdev, (struct ob_mac_iocb_rsp *)
1869 net_rsp);
1870 (*tx_cleaned)++;
1871 break;
1872
1873 case OPCODE_IB_MAC_IOCB:
1874 ql_process_mac_rx_intr(qdev, (struct ib_mac_iocb_rsp *)
1875 net_rsp);
1876 (*rx_cleaned)++;
1877 break;
1878
1879 case OPCODE_IB_IP_IOCB:
1880 ql_process_macip_rx_intr(qdev, (struct ib_ip_iocb_rsp *)
1881 net_rsp);
1882 (*rx_cleaned)++;
1883 break;
1884 default:
1885 {
1886 u32 *tmp = (u32 *) net_rsp;
1887 printk(KERN_ERR PFX
1888 "%s: Hit default case, not "
1889 "handled!\n"
1890 " dropping the packet, opcode = "
1891 "%x.\n",
1892 ndev->name, net_rsp->opcode);
1893 printk(KERN_ERR PFX
1894 "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n",
1895 (unsigned long int)tmp[0],
1896 (unsigned long int)tmp[1],
1897 (unsigned long int)tmp[2],
1898 (unsigned long int)tmp[3]);
1899 }
1900 }
1901
1902 qdev->rsp_consumer_index++;
1903
1904 if (qdev->rsp_consumer_index == NUM_RSP_Q_ENTRIES) {
1905 qdev->rsp_consumer_index = 0;
1906 qdev->rsp_current = qdev->rsp_q_virt_addr;
1907 } else {
1908 qdev->rsp_current++;
1909 }
1910 }
1911
1912 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
1913
1914 ql_update_lrg_bufq_prod_index(qdev);
1915
1916 if (qdev->small_buf_release_cnt >= 16) {
1917 while (qdev->small_buf_release_cnt >= 16) {
1918 qdev->small_buf_q_producer_index++;
1919
1920 if (qdev->small_buf_q_producer_index ==
1921 NUM_SBUFQ_ENTRIES)
1922 qdev->small_buf_q_producer_index = 0;
1923 qdev->small_buf_release_cnt -= 8;
1924 }
1925
1926 ql_write_common_reg(qdev,
1927 (u32 *) & port_regs->CommonRegs.
1928 rxSmallQProducerIndex,
1929 qdev->small_buf_q_producer_index);
1930 }
1931
1932 ql_write_common_reg(qdev,
1933 (u32 *) & port_regs->CommonRegs.rspQConsumerIndex,
1934 qdev->rsp_consumer_index);
1935 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
1936
1937 if (unlikely(netif_queue_stopped(qdev->ndev))) {
1938 if (netif_queue_stopped(qdev->ndev) &&
1939 (atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4)))
1940 netif_wake_queue(qdev->ndev);
1941 }
1942
1943 return *tx_cleaned + *rx_cleaned;
1944}
1945
1946static int ql_poll(struct net_device *ndev, int *budget)
1947{
1948 struct ql3_adapter *qdev = netdev_priv(ndev);
1949 int work_to_do = min(*budget, ndev->quota);
1950 int rx_cleaned = 0, tx_cleaned = 0;
1951
1952 if (!netif_carrier_ok(ndev))
1953 goto quit_polling;
1954
1955 ql_tx_rx_clean(qdev, &tx_cleaned, &rx_cleaned, work_to_do);
1956 *budget -= rx_cleaned;
1957 ndev->quota -= rx_cleaned;
1958
1959 if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) {
1960quit_polling:
1961 netif_rx_complete(ndev);
1962 ql_enable_interrupts(qdev);
1963 return 0;
1964 }
1965 return 1;
1966}
1967
1968static irqreturn_t ql3xxx_isr(int irq, void *dev_id, struct pt_regs *regs)
1969{
1970
1971 struct net_device *ndev = dev_id;
1972 struct ql3_adapter *qdev = netdev_priv(ndev);
1973 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
1974 u32 value;
1975 int handled = 1;
1976 u32 var;
1977
1978 port_regs = qdev->mem_map_registers;
1979
1980 value =
1981 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
1982
1983 if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
1984 spin_lock(&qdev->adapter_lock);
1985 netif_stop_queue(qdev->ndev);
1986 netif_carrier_off(qdev->ndev);
1987 ql_disable_interrupts(qdev);
1988 qdev->port_link_state = LS_DOWN;
1989 set_bit(QL_RESET_ACTIVE,&qdev->flags) ;
1990
1991 if (value & ISP_CONTROL_FE) {
1992 /*
1993 * Chip Fatal Error.
1994 */
1995 var =
1996 ql_read_page0_reg_l(qdev,
1997 &port_regs->PortFatalErrStatus);
1998 printk(KERN_WARNING PFX
1999 "%s: Resetting chip. PortFatalErrStatus "
2000 "register = 0x%x\n", ndev->name, var);
2001 set_bit(QL_RESET_START,&qdev->flags) ;
2002 } else {
2003 /*
2004 * Soft Reset Requested.
2005 */
2006 set_bit(QL_RESET_PER_SCSI,&qdev->flags) ;
2007 printk(KERN_ERR PFX
2008 "%s: Another function issued a reset to the "
2009 "chip. ISR value = %x.\n", ndev->name, value);
2010 }
2011 queue_work(qdev->workqueue, &qdev->reset_work);
2012 spin_unlock(&qdev->adapter_lock);
2013 } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
2014 ql_disable_interrupts(qdev);
2015 if (likely(netif_rx_schedule_prep(ndev)))
2016 __netif_rx_schedule(ndev);
2017 else
2018 ql_enable_interrupts(qdev);
2019 } else {
2020 return IRQ_NONE;
2021 }
2022
2023 return IRQ_RETVAL(handled);
2024}
2025
2026static int ql3xxx_send(struct sk_buff *skb, struct net_device *ndev)
2027{
2028 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2029 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2030 struct ql_tx_buf_cb *tx_cb;
2031 struct ob_mac_iocb_req *mac_iocb_ptr;
2032 u64 map;
2033
2034 if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
2035 if (!netif_queue_stopped(ndev))
2036 netif_stop_queue(ndev);
2037 return NETDEV_TX_BUSY;
2038 }
2039 tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
2040 mac_iocb_ptr = tx_cb->queue_entry;
2041 memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
2042 mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
2043 mac_iocb_ptr->flags |= qdev->mb_bit_mask;
2044 mac_iocb_ptr->transaction_id = qdev->req_producer_index;
2045 mac_iocb_ptr->data_len = cpu_to_le16((u16) skb->len);
2046 tx_cb->skb = skb;
2047 map = pci_map_single(qdev->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
2048 mac_iocb_ptr->buf_addr0_low = cpu_to_le32(LS_64BITS(map));
2049 mac_iocb_ptr->buf_addr0_high = cpu_to_le32(MS_64BITS(map));
2050 mac_iocb_ptr->buf_0_len = cpu_to_le32(skb->len | OB_MAC_IOCB_REQ_E);
2051 pci_unmap_addr_set(tx_cb, mapaddr, map);
2052 pci_unmap_len_set(tx_cb, maplen, skb->len);
2053 atomic_dec(&qdev->tx_count);
2054
2055 qdev->req_producer_index++;
2056 if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
2057 qdev->req_producer_index = 0;
2058 wmb();
2059 ql_write_common_reg_l(qdev,
2060 (u32 *) & port_regs->CommonRegs.reqQProducerIndex,
2061 qdev->req_producer_index);
2062
2063 ndev->trans_start = jiffies;
2064 if (netif_msg_tx_queued(qdev))
2065 printk(KERN_DEBUG PFX "%s: tx queued, slot %d, len %d\n",
2066 ndev->name, qdev->req_producer_index, skb->len);
2067
2068 return NETDEV_TX_OK;
2069}
2070static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
2071{
2072 qdev->req_q_size =
2073 (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
2074
2075 qdev->req_q_virt_addr =
2076 pci_alloc_consistent(qdev->pdev,
2077 (size_t) qdev->req_q_size,
2078 &qdev->req_q_phy_addr);
2079
2080 if ((qdev->req_q_virt_addr == NULL) ||
2081 LS_64BITS(qdev->req_q_phy_addr) & (qdev->req_q_size - 1)) {
2082 printk(KERN_ERR PFX "%s: reqQ failed.\n",
2083 qdev->ndev->name);
2084 return -ENOMEM;
2085 }
2086
2087 qdev->rsp_q_size = NUM_RSP_Q_ENTRIES * sizeof(struct net_rsp_iocb);
2088
2089 qdev->rsp_q_virt_addr =
2090 pci_alloc_consistent(qdev->pdev,
2091 (size_t) qdev->rsp_q_size,
2092 &qdev->rsp_q_phy_addr);
2093
2094 if ((qdev->rsp_q_virt_addr == NULL) ||
2095 LS_64BITS(qdev->rsp_q_phy_addr) & (qdev->rsp_q_size - 1)) {
2096 printk(KERN_ERR PFX
2097 "%s: rspQ allocation failed\n",
2098 qdev->ndev->name);
2099 pci_free_consistent(qdev->pdev, (size_t) qdev->req_q_size,
2100 qdev->req_q_virt_addr,
2101 qdev->req_q_phy_addr);
2102 return -ENOMEM;
2103 }
2104
2105 set_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2106
2107 return 0;
2108}
2109
2110static void ql_free_net_req_rsp_queues(struct ql3_adapter *qdev)
2111{
2112 if (!test_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags)) {
2113 printk(KERN_INFO PFX
2114 "%s: Already done.\n", qdev->ndev->name);
2115 return;
2116 }
2117
2118 pci_free_consistent(qdev->pdev,
2119 qdev->req_q_size,
2120 qdev->req_q_virt_addr, qdev->req_q_phy_addr);
2121
2122 qdev->req_q_virt_addr = NULL;
2123
2124 pci_free_consistent(qdev->pdev,
2125 qdev->rsp_q_size,
2126 qdev->rsp_q_virt_addr, qdev->rsp_q_phy_addr);
2127
2128 qdev->rsp_q_virt_addr = NULL;
2129
2130 clear_bit(QL_ALLOC_REQ_RSP_Q_DONE,&qdev->flags);
2131}
2132
2133static int ql_alloc_buffer_queues(struct ql3_adapter *qdev)
2134{
2135 /* Create Large Buffer Queue */
2136 qdev->lrg_buf_q_size =
2137 NUM_LBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2138 if (qdev->lrg_buf_q_size < PAGE_SIZE)
2139 qdev->lrg_buf_q_alloc_size = PAGE_SIZE;
2140 else
2141 qdev->lrg_buf_q_alloc_size = qdev->lrg_buf_q_size * 2;
2142
2143 qdev->lrg_buf_q_alloc_virt_addr =
2144 pci_alloc_consistent(qdev->pdev,
2145 qdev->lrg_buf_q_alloc_size,
2146 &qdev->lrg_buf_q_alloc_phy_addr);
2147
2148 if (qdev->lrg_buf_q_alloc_virt_addr == NULL) {
2149 printk(KERN_ERR PFX
2150 "%s: lBufQ failed\n", qdev->ndev->name);
2151 return -ENOMEM;
2152 }
2153 qdev->lrg_buf_q_virt_addr = qdev->lrg_buf_q_alloc_virt_addr;
2154 qdev->lrg_buf_q_phy_addr = qdev->lrg_buf_q_alloc_phy_addr;
2155
2156 /* Create Small Buffer Queue */
2157 qdev->small_buf_q_size =
2158 NUM_SBUFQ_ENTRIES * sizeof(struct lrg_buf_q_entry);
2159 if (qdev->small_buf_q_size < PAGE_SIZE)
2160 qdev->small_buf_q_alloc_size = PAGE_SIZE;
2161 else
2162 qdev->small_buf_q_alloc_size = qdev->small_buf_q_size * 2;
2163
2164 qdev->small_buf_q_alloc_virt_addr =
2165 pci_alloc_consistent(qdev->pdev,
2166 qdev->small_buf_q_alloc_size,
2167 &qdev->small_buf_q_alloc_phy_addr);
2168
2169 if (qdev->small_buf_q_alloc_virt_addr == NULL) {
2170 printk(KERN_ERR PFX
2171 "%s: Small Buffer Queue allocation failed.\n",
2172 qdev->ndev->name);
2173 pci_free_consistent(qdev->pdev, qdev->lrg_buf_q_alloc_size,
2174 qdev->lrg_buf_q_alloc_virt_addr,
2175 qdev->lrg_buf_q_alloc_phy_addr);
2176 return -ENOMEM;
2177 }
2178
2179 qdev->small_buf_q_virt_addr = qdev->small_buf_q_alloc_virt_addr;
2180 qdev->small_buf_q_phy_addr = qdev->small_buf_q_alloc_phy_addr;
2181 set_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2182 return 0;
2183}
2184
2185static void ql_free_buffer_queues(struct ql3_adapter *qdev)
2186{
2187 if (!test_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags)) {
2188 printk(KERN_INFO PFX
2189 "%s: Already done.\n", qdev->ndev->name);
2190 return;
2191 }
2192
2193 pci_free_consistent(qdev->pdev,
2194 qdev->lrg_buf_q_alloc_size,
2195 qdev->lrg_buf_q_alloc_virt_addr,
2196 qdev->lrg_buf_q_alloc_phy_addr);
2197
2198 qdev->lrg_buf_q_virt_addr = NULL;
2199
2200 pci_free_consistent(qdev->pdev,
2201 qdev->small_buf_q_alloc_size,
2202 qdev->small_buf_q_alloc_virt_addr,
2203 qdev->small_buf_q_alloc_phy_addr);
2204
2205 qdev->small_buf_q_virt_addr = NULL;
2206
2207 clear_bit(QL_ALLOC_BUFQS_DONE,&qdev->flags);
2208}
2209
2210static int ql_alloc_small_buffers(struct ql3_adapter *qdev)
2211{
2212 int i;
2213 struct bufq_addr_element *small_buf_q_entry;
2214
2215 /* Currently we allocate on one of memory and use it for smallbuffers */
2216 qdev->small_buf_total_size =
2217 (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES *
2218 QL_SMALL_BUFFER_SIZE);
2219
2220 qdev->small_buf_virt_addr =
2221 pci_alloc_consistent(qdev->pdev,
2222 qdev->small_buf_total_size,
2223 &qdev->small_buf_phy_addr);
2224
2225 if (qdev->small_buf_virt_addr == NULL) {
2226 printk(KERN_ERR PFX
2227 "%s: Failed to get small buffer memory.\n",
2228 qdev->ndev->name);
2229 return -ENOMEM;
2230 }
2231
2232 qdev->small_buf_phy_addr_low = LS_64BITS(qdev->small_buf_phy_addr);
2233 qdev->small_buf_phy_addr_high = MS_64BITS(qdev->small_buf_phy_addr);
2234
2235 small_buf_q_entry = qdev->small_buf_q_virt_addr;
2236
2237 qdev->last_rsp_offset = qdev->small_buf_phy_addr_low;
2238
2239 /* Initialize the small buffer queue. */
2240 for (i = 0; i < (QL_ADDR_ELE_PER_BUFQ_ENTRY * NUM_SBUFQ_ENTRIES); i++) {
2241 small_buf_q_entry->addr_high =
2242 cpu_to_le32(qdev->small_buf_phy_addr_high);
2243 small_buf_q_entry->addr_low =
2244 cpu_to_le32(qdev->small_buf_phy_addr_low +
2245 (i * QL_SMALL_BUFFER_SIZE));
2246 small_buf_q_entry++;
2247 }
2248 qdev->small_buf_index = 0;
2249 set_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags);
2250 return 0;
2251}
2252
2253static void ql_free_small_buffers(struct ql3_adapter *qdev)
2254{
2255 if (!test_bit(QL_ALLOC_SMALL_BUF_DONE,&qdev->flags)) {
2256 printk(KERN_INFO PFX
2257 "%s: Already done.\n", qdev->ndev->name);
2258 return;
2259 }
2260 if (qdev->small_buf_virt_addr != NULL) {
2261 pci_free_consistent(qdev->pdev,
2262 qdev->small_buf_total_size,
2263 qdev->small_buf_virt_addr,
2264 qdev->small_buf_phy_addr);
2265
2266 qdev->small_buf_virt_addr = NULL;
2267 }
2268}
2269
2270static void ql_free_large_buffers(struct ql3_adapter *qdev)
2271{
2272 int i = 0;
2273 struct ql_rcv_buf_cb *lrg_buf_cb;
2274
2275 for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
2276 lrg_buf_cb = &qdev->lrg_buf[i];
2277 if (lrg_buf_cb->skb) {
2278 dev_kfree_skb(lrg_buf_cb->skb);
2279 pci_unmap_single(qdev->pdev,
2280 pci_unmap_addr(lrg_buf_cb, mapaddr),
2281 pci_unmap_len(lrg_buf_cb, maplen),
2282 PCI_DMA_FROMDEVICE);
2283 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2284 } else {
2285 break;
2286 }
2287 }
2288}
2289
2290static void ql_init_large_buffers(struct ql3_adapter *qdev)
2291{
2292 int i;
2293 struct ql_rcv_buf_cb *lrg_buf_cb;
2294 struct bufq_addr_element *buf_addr_ele = qdev->lrg_buf_q_virt_addr;
2295
2296 for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
2297 lrg_buf_cb = &qdev->lrg_buf[i];
2298 buf_addr_ele->addr_high = lrg_buf_cb->buf_phy_addr_high;
2299 buf_addr_ele->addr_low = lrg_buf_cb->buf_phy_addr_low;
2300 buf_addr_ele++;
2301 }
2302 qdev->lrg_buf_index = 0;
2303 qdev->lrg_buf_skb_check = 0;
2304}
2305
2306static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2307{
2308 int i;
2309 struct ql_rcv_buf_cb *lrg_buf_cb;
2310 struct sk_buff *skb;
2311 u64 map;
2312
2313 for (i = 0; i < NUM_LARGE_BUFFERS; i++) {
2314 skb = dev_alloc_skb(qdev->lrg_buffer_len);
2315 if (unlikely(!skb)) {
2316 /* Better luck next round */
2317 printk(KERN_ERR PFX
2318 "%s: large buff alloc failed, "
2319 "for %d bytes at index %d.\n",
2320 qdev->ndev->name,
2321 qdev->lrg_buffer_len * 2, i);
2322 ql_free_large_buffers(qdev);
2323 return -ENOMEM;
2324 } else {
2325
2326 lrg_buf_cb = &qdev->lrg_buf[i];
2327 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2328 lrg_buf_cb->index = i;
2329 lrg_buf_cb->skb = skb;
2330 /*
2331 * We save some space to copy the ethhdr from first
2332 * buffer
2333 */
2334 skb_reserve(skb, QL_HEADER_SPACE);
2335 map = pci_map_single(qdev->pdev,
2336 skb->data,
2337 qdev->lrg_buffer_len -
2338 QL_HEADER_SPACE,
2339 PCI_DMA_FROMDEVICE);
2340 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2341 pci_unmap_len_set(lrg_buf_cb, maplen,
2342 qdev->lrg_buffer_len -
2343 QL_HEADER_SPACE);
2344 lrg_buf_cb->buf_phy_addr_low =
2345 cpu_to_le32(LS_64BITS(map));
2346 lrg_buf_cb->buf_phy_addr_high =
2347 cpu_to_le32(MS_64BITS(map));
2348 }
2349 }
2350 return 0;
2351}
2352
2353static void ql_create_send_free_list(struct ql3_adapter *qdev)
2354{
2355 struct ql_tx_buf_cb *tx_cb;
2356 int i;
2357 struct ob_mac_iocb_req *req_q_curr =
2358 qdev->req_q_virt_addr;
2359
2360 /* Create free list of transmit buffers */
2361 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
2362 tx_cb = &qdev->tx_buf[i];
2363 tx_cb->skb = NULL;
2364 tx_cb->queue_entry = req_q_curr;
2365 req_q_curr++;
2366 }
2367}
2368
2369static int ql_alloc_mem_resources(struct ql3_adapter *qdev)
2370{
2371 if (qdev->ndev->mtu == NORMAL_MTU_SIZE)
2372 qdev->lrg_buffer_len = NORMAL_MTU_SIZE;
2373 else if (qdev->ndev->mtu == JUMBO_MTU_SIZE) {
2374 qdev->lrg_buffer_len = JUMBO_MTU_SIZE;
2375 } else {
2376 printk(KERN_ERR PFX
2377 "%s: Invalid mtu size. Only 1500 and 9000 are accepted.\n",
2378 qdev->ndev->name);
2379 return -ENOMEM;
2380 }
2381 qdev->lrg_buffer_len += VLAN_ETH_HLEN + VLAN_ID_LEN + QL_HEADER_SPACE;
2382 qdev->max_frame_size =
2383 (qdev->lrg_buffer_len - QL_HEADER_SPACE) + ETHERNET_CRC_SIZE;
2384
2385 /*
2386 * First allocate a page of shared memory and use it for shadow
2387 * locations of Network Request Queue Consumer Address Register and
2388 * Network Completion Queue Producer Index Register
2389 */
2390 qdev->shadow_reg_virt_addr =
2391 pci_alloc_consistent(qdev->pdev,
2392 PAGE_SIZE, &qdev->shadow_reg_phy_addr);
2393
2394 if (qdev->shadow_reg_virt_addr != NULL) {
2395 qdev->preq_consumer_index = (u16 *) qdev->shadow_reg_virt_addr;
2396 qdev->req_consumer_index_phy_addr_high =
2397 MS_64BITS(qdev->shadow_reg_phy_addr);
2398 qdev->req_consumer_index_phy_addr_low =
2399 LS_64BITS(qdev->shadow_reg_phy_addr);
2400
2401 qdev->prsp_producer_index =
2402 (u32 *) (((u8 *) qdev->preq_consumer_index) + 8);
2403 qdev->rsp_producer_index_phy_addr_high =
2404 qdev->req_consumer_index_phy_addr_high;
2405 qdev->rsp_producer_index_phy_addr_low =
2406 qdev->req_consumer_index_phy_addr_low + 8;
2407 } else {
2408 printk(KERN_ERR PFX
2409 "%s: shadowReg Alloc failed.\n", qdev->ndev->name);
2410 return -ENOMEM;
2411 }
2412
2413 if (ql_alloc_net_req_rsp_queues(qdev) != 0) {
2414 printk(KERN_ERR PFX
2415 "%s: ql_alloc_net_req_rsp_queues failed.\n",
2416 qdev->ndev->name);
2417 goto err_req_rsp;
2418 }
2419
2420 if (ql_alloc_buffer_queues(qdev) != 0) {
2421 printk(KERN_ERR PFX
2422 "%s: ql_alloc_buffer_queues failed.\n",
2423 qdev->ndev->name);
2424 goto err_buffer_queues;
2425 }
2426
2427 if (ql_alloc_small_buffers(qdev) != 0) {
2428 printk(KERN_ERR PFX
2429 "%s: ql_alloc_small_buffers failed\n", qdev->ndev->name);
2430 goto err_small_buffers;
2431 }
2432
2433 if (ql_alloc_large_buffers(qdev) != 0) {
2434 printk(KERN_ERR PFX
2435 "%s: ql_alloc_large_buffers failed\n", qdev->ndev->name);
2436 goto err_small_buffers;
2437 }
2438
2439 /* Initialize the large buffer queue. */
2440 ql_init_large_buffers(qdev);
2441 ql_create_send_free_list(qdev);
2442
2443 qdev->rsp_current = qdev->rsp_q_virt_addr;
2444
2445 return 0;
2446
2447err_small_buffers:
2448 ql_free_buffer_queues(qdev);
2449err_buffer_queues:
2450 ql_free_net_req_rsp_queues(qdev);
2451err_req_rsp:
2452 pci_free_consistent(qdev->pdev,
2453 PAGE_SIZE,
2454 qdev->shadow_reg_virt_addr,
2455 qdev->shadow_reg_phy_addr);
2456
2457 return -ENOMEM;
2458}
2459
2460static void ql_free_mem_resources(struct ql3_adapter *qdev)
2461{
2462 ql_free_large_buffers(qdev);
2463 ql_free_small_buffers(qdev);
2464 ql_free_buffer_queues(qdev);
2465 ql_free_net_req_rsp_queues(qdev);
2466 if (qdev->shadow_reg_virt_addr != NULL) {
2467 pci_free_consistent(qdev->pdev,
2468 PAGE_SIZE,
2469 qdev->shadow_reg_virt_addr,
2470 qdev->shadow_reg_phy_addr);
2471 qdev->shadow_reg_virt_addr = NULL;
2472 }
2473}
2474
2475static int ql_init_misc_registers(struct ql3_adapter *qdev)
2476{
2477 struct ql3xxx_local_ram_registers *local_ram =
2478 (struct ql3xxx_local_ram_registers *)qdev->mem_map_registers;
2479
2480 if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
2481 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2482 2) << 4))
2483 return -1;
2484
2485 ql_write_page2_reg(qdev,
2486 &local_ram->bufletSize, qdev->nvram_data.bufletSize);
2487
2488 ql_write_page2_reg(qdev,
2489 &local_ram->maxBufletCount,
2490 qdev->nvram_data.bufletCount);
2491
2492 ql_write_page2_reg(qdev,
2493 &local_ram->freeBufletThresholdLow,
2494 (qdev->nvram_data.tcpWindowThreshold25 << 16) |
2495 (qdev->nvram_data.tcpWindowThreshold0));
2496
2497 ql_write_page2_reg(qdev,
2498 &local_ram->freeBufletThresholdHigh,
2499 qdev->nvram_data.tcpWindowThreshold50);
2500
2501 ql_write_page2_reg(qdev,
2502 &local_ram->ipHashTableBase,
2503 (qdev->nvram_data.ipHashTableBaseHi << 16) |
2504 qdev->nvram_data.ipHashTableBaseLo);
2505 ql_write_page2_reg(qdev,
2506 &local_ram->ipHashTableCount,
2507 qdev->nvram_data.ipHashTableSize);
2508 ql_write_page2_reg(qdev,
2509 &local_ram->tcpHashTableBase,
2510 (qdev->nvram_data.tcpHashTableBaseHi << 16) |
2511 qdev->nvram_data.tcpHashTableBaseLo);
2512 ql_write_page2_reg(qdev,
2513 &local_ram->tcpHashTableCount,
2514 qdev->nvram_data.tcpHashTableSize);
2515 ql_write_page2_reg(qdev,
2516 &local_ram->ncbBase,
2517 (qdev->nvram_data.ncbTableBaseHi << 16) |
2518 qdev->nvram_data.ncbTableBaseLo);
2519 ql_write_page2_reg(qdev,
2520 &local_ram->maxNcbCount,
2521 qdev->nvram_data.ncbTableSize);
2522 ql_write_page2_reg(qdev,
2523 &local_ram->drbBase,
2524 (qdev->nvram_data.drbTableBaseHi << 16) |
2525 qdev->nvram_data.drbTableBaseLo);
2526 ql_write_page2_reg(qdev,
2527 &local_ram->maxDrbCount,
2528 qdev->nvram_data.drbTableSize);
2529 ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
2530 return 0;
2531}
2532
2533static int ql_adapter_initialize(struct ql3_adapter *qdev)
2534{
2535 u32 value;
2536 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2537 struct ql3xxx_host_memory_registers __iomem *hmem_regs =
2538 (struct ql3xxx_host_memory_registers *)port_regs;
2539 u32 delay = 10;
2540 int status = 0;
2541
2542 if(ql_mii_setup(qdev))
2543 return -1;
2544
2545 /* Bring out PHY out of reset */
2546 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
2547 (ISP_SERIAL_PORT_IF_WE |
2548 (ISP_SERIAL_PORT_IF_WE << 16)));
2549
2550 qdev->port_link_state = LS_DOWN;
2551 netif_carrier_off(qdev->ndev);
2552
2553 /* V2 chip fix for ARS-39168. */
2554 ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
2555 (ISP_SERIAL_PORT_IF_SDE |
2556 (ISP_SERIAL_PORT_IF_SDE << 16)));
2557
2558 /* Request Queue Registers */
2559 *((u32 *) (qdev->preq_consumer_index)) = 0;
2560 atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
2561 qdev->req_producer_index = 0;
2562
2563 ql_write_page1_reg(qdev,
2564 &hmem_regs->reqConsumerIndexAddrHigh,
2565 qdev->req_consumer_index_phy_addr_high);
2566 ql_write_page1_reg(qdev,
2567 &hmem_regs->reqConsumerIndexAddrLow,
2568 qdev->req_consumer_index_phy_addr_low);
2569
2570 ql_write_page1_reg(qdev,
2571 &hmem_regs->reqBaseAddrHigh,
2572 MS_64BITS(qdev->req_q_phy_addr));
2573 ql_write_page1_reg(qdev,
2574 &hmem_regs->reqBaseAddrLow,
2575 LS_64BITS(qdev->req_q_phy_addr));
2576 ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
2577
2578 /* Response Queue Registers */
2579 *((u16 *) (qdev->prsp_producer_index)) = 0;
2580 qdev->rsp_consumer_index = 0;
2581 qdev->rsp_current = qdev->rsp_q_virt_addr;
2582
2583 ql_write_page1_reg(qdev,
2584 &hmem_regs->rspProducerIndexAddrHigh,
2585 qdev->rsp_producer_index_phy_addr_high);
2586
2587 ql_write_page1_reg(qdev,
2588 &hmem_regs->rspProducerIndexAddrLow,
2589 qdev->rsp_producer_index_phy_addr_low);
2590
2591 ql_write_page1_reg(qdev,
2592 &hmem_regs->rspBaseAddrHigh,
2593 MS_64BITS(qdev->rsp_q_phy_addr));
2594
2595 ql_write_page1_reg(qdev,
2596 &hmem_regs->rspBaseAddrLow,
2597 LS_64BITS(qdev->rsp_q_phy_addr));
2598
2599 ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
2600
2601 /* Large Buffer Queue */
2602 ql_write_page1_reg(qdev,
2603 &hmem_regs->rxLargeQBaseAddrHigh,
2604 MS_64BITS(qdev->lrg_buf_q_phy_addr));
2605
2606 ql_write_page1_reg(qdev,
2607 &hmem_regs->rxLargeQBaseAddrLow,
2608 LS_64BITS(qdev->lrg_buf_q_phy_addr));
2609
2610 ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, NUM_LBUFQ_ENTRIES);
2611
2612 ql_write_page1_reg(qdev,
2613 &hmem_regs->rxLargeBufferLength,
2614 qdev->lrg_buffer_len);
2615
2616 /* Small Buffer Queue */
2617 ql_write_page1_reg(qdev,
2618 &hmem_regs->rxSmallQBaseAddrHigh,
2619 MS_64BITS(qdev->small_buf_q_phy_addr));
2620
2621 ql_write_page1_reg(qdev,
2622 &hmem_regs->rxSmallQBaseAddrLow,
2623 LS_64BITS(qdev->small_buf_q_phy_addr));
2624
2625 ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
2626 ql_write_page1_reg(qdev,
2627 &hmem_regs->rxSmallBufferLength,
2628 QL_SMALL_BUFFER_SIZE);
2629
2630 qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
2631 qdev->small_buf_release_cnt = 8;
2632 qdev->lrg_buf_q_producer_index = NUM_LBUFQ_ENTRIES - 1;
2633 qdev->lrg_buf_release_cnt = 8;
2634 qdev->lrg_buf_next_free =
2635 (struct bufq_addr_element *)qdev->lrg_buf_q_virt_addr;
2636 qdev->small_buf_index = 0;
2637 qdev->lrg_buf_index = 0;
2638 qdev->lrg_buf_free_count = 0;
2639 qdev->lrg_buf_free_head = NULL;
2640 qdev->lrg_buf_free_tail = NULL;
2641
2642 ql_write_common_reg(qdev,
2643 (u32 *) & port_regs->CommonRegs.
2644 rxSmallQProducerIndex,
2645 qdev->small_buf_q_producer_index);
2646 ql_write_common_reg(qdev,
2647 (u32 *) & port_regs->CommonRegs.
2648 rxLargeQProducerIndex,
2649 qdev->lrg_buf_q_producer_index);
2650
2651 /*
2652 * Find out if the chip has already been initialized. If it has, then
2653 * we skip some of the initialization.
2654 */
2655 clear_bit(QL_LINK_MASTER, &qdev->flags);
2656 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
2657 if ((value & PORT_STATUS_IC) == 0) {
2658
2659 /* Chip has not been configured yet, so let it rip. */
2660 if(ql_init_misc_registers(qdev)) {
2661 status = -1;
2662 goto out;
2663 }
2664
2665 if (qdev->mac_index)
2666 ql_write_page0_reg(qdev,
2667 &port_regs->mac1MaxFrameLengthReg,
2668 qdev->max_frame_size);
2669 else
2670 ql_write_page0_reg(qdev,
2671 &port_regs->mac0MaxFrameLengthReg,
2672 qdev->max_frame_size);
2673
2674 value = qdev->nvram_data.tcpMaxWindowSize;
2675 ql_write_page0_reg(qdev, &port_regs->tcpMaxWindow, value);
2676
2677 value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
2678
2679 if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
2680 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
2681 * 2) << 13)) {
2682 status = -1;
2683 goto out;
2684 }
2685 ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
2686 ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
2687 (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
2688 16) | (INTERNAL_CHIP_SD |
2689 INTERNAL_CHIP_WE)));
2690 ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
2691 }
2692
2693
2694 if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
2695 (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
2696 2) << 7)) {
2697 status = -1;
2698 goto out;
2699 }
2700
2701 ql_init_scan_mode(qdev);
2702 ql_get_phy_owner(qdev);
2703
2704 /* Load the MAC Configuration */
2705
2706 /* Program lower 32 bits of the MAC address */
2707 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
2708 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
2709 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
2710 ((qdev->ndev->dev_addr[2] << 24)
2711 | (qdev->ndev->dev_addr[3] << 16)
2712 | (qdev->ndev->dev_addr[4] << 8)
2713 | qdev->ndev->dev_addr[5]));
2714
2715 /* Program top 16 bits of the MAC address */
2716 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
2717 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
2718 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
2719 ((qdev->ndev->dev_addr[0] << 8)
2720 | qdev->ndev->dev_addr[1]));
2721
2722 /* Enable Primary MAC */
2723 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
2724 ((MAC_ADDR_INDIRECT_PTR_REG_PE << 16) |
2725 MAC_ADDR_INDIRECT_PTR_REG_PE));
2726
2727 /* Clear Primary and Secondary IP addresses */
2728 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
2729 ((IP_ADDR_INDEX_REG_MASK << 16) |
2730 (qdev->mac_index << 2)));
2731 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
2732
2733 ql_write_page0_reg(qdev, &port_regs->ipAddrIndexReg,
2734 ((IP_ADDR_INDEX_REG_MASK << 16) |
2735 ((qdev->mac_index << 2) + 1)));
2736 ql_write_page0_reg(qdev, &port_regs->ipAddrDataReg, 0);
2737
2738 ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
2739
2740 /* Indicate Configuration Complete */
2741 ql_write_page0_reg(qdev,
2742 &port_regs->portControl,
2743 ((PORT_CONTROL_CC << 16) | PORT_CONTROL_CC));
2744
2745 do {
2746 value = ql_read_page0_reg(qdev, &port_regs->portStatus);
2747 if (value & PORT_STATUS_IC)
2748 break;
2749 msleep(500);
2750 } while (--delay);
2751
2752 if (delay == 0) {
2753 printk(KERN_ERR PFX
2754 "%s: Hw Initialization timeout.\n", qdev->ndev->name);
2755 status = -1;
2756 goto out;
2757 }
2758
2759 /* Enable Ethernet Function */
2760 value =
2761 (PORT_CONTROL_EF | PORT_CONTROL_ET | PORT_CONTROL_EI |
2762 PORT_CONTROL_HH);
2763 ql_write_page0_reg(qdev, &port_regs->portControl,
2764 ((value << 16) | value));
2765
2766out:
2767 return status;
2768}
2769
2770/*
2771 * Caller holds hw_lock.
2772 */
2773static int ql_adapter_reset(struct ql3_adapter *qdev)
2774{
2775 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2776 int status = 0;
2777 u16 value;
2778 int max_wait_time;
2779
2780 set_bit(QL_RESET_ACTIVE, &qdev->flags);
2781 clear_bit(QL_RESET_DONE, &qdev->flags);
2782
2783 /*
2784 * Issue soft reset to chip.
2785 */
2786 printk(KERN_DEBUG PFX
2787 "%s: Issue soft reset to chip.\n",
2788 qdev->ndev->name);
2789 ql_write_common_reg(qdev,
2790 (u32 *) & port_regs->CommonRegs.ispControlStatus,
2791 ((ISP_CONTROL_SR << 16) | ISP_CONTROL_SR));
2792
2793 /* Wait 3 seconds for reset to complete. */
2794 printk(KERN_DEBUG PFX
2795 "%s: Wait 10 milliseconds for reset to complete.\n",
2796 qdev->ndev->name);
2797
2798 /* Wait until the firmware tells us the Soft Reset is done */
2799 max_wait_time = 5;
2800 do {
2801 value =
2802 ql_read_common_reg(qdev,
2803 &port_regs->CommonRegs.ispControlStatus);
2804 if ((value & ISP_CONTROL_SR) == 0)
2805 break;
2806
2807 ssleep(1);
2808 } while ((--max_wait_time));
2809
2810 /*
2811 * Also, make sure that the Network Reset Interrupt bit has been
2812 * cleared after the soft reset has taken place.
2813 */
2814 value =
2815 ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
2816 if (value & ISP_CONTROL_RI) {
2817 printk(KERN_DEBUG PFX
2818 "ql_adapter_reset: clearing RI after reset.\n");
2819 ql_write_common_reg(qdev,
2820 (u32 *) & port_regs->CommonRegs.
2821 ispControlStatus,
2822 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
2823 }
2824
2825 if (max_wait_time == 0) {
2826 /* Issue Force Soft Reset */
2827 ql_write_common_reg(qdev,
2828 (u32 *) & port_regs->CommonRegs.
2829 ispControlStatus,
2830 ((ISP_CONTROL_FSR << 16) |
2831 ISP_CONTROL_FSR));
2832 /*
2833 * Wait until the firmware tells us the Force Soft Reset is
2834 * done
2835 */
2836 max_wait_time = 5;
2837 do {
2838 value =
2839 ql_read_common_reg(qdev,
2840 &port_regs->CommonRegs.
2841 ispControlStatus);
2842 if ((value & ISP_CONTROL_FSR) == 0) {
2843 break;
2844 }
2845 ssleep(1);
2846 } while ((--max_wait_time));
2847 }
2848 if (max_wait_time == 0)
2849 status = 1;
2850
2851 clear_bit(QL_RESET_ACTIVE, &qdev->flags);
2852 set_bit(QL_RESET_DONE, &qdev->flags);
2853 return status;
2854}
2855
2856static void ql_set_mac_info(struct ql3_adapter *qdev)
2857{
2858 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
2859 u32 value, port_status;
2860 u8 func_number;
2861
2862 /* Get the function number */
2863 value =
2864 ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
2865 func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
2866 port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
2867 switch (value & ISP_CONTROL_FN_MASK) {
2868 case ISP_CONTROL_FN0_NET:
2869 qdev->mac_index = 0;
2870 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
2871 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
2872 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
2873 qdev->mb_bit_mask = FN0_MA_BITS_MASK;
2874 qdev->PHYAddr = PORT0_PHY_ADDRESS;
2875 if (port_status & PORT_STATUS_SM0)
2876 set_bit(QL_LINK_OPTICAL,&qdev->flags);
2877 else
2878 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
2879 break;
2880
2881 case ISP_CONTROL_FN1_NET:
2882 qdev->mac_index = 1;
2883 qdev->mac_ob_opcode = OUTBOUND_MAC_IOCB | func_number;
2884 qdev->tcp_ob_opcode = OUTBOUND_TCP_IOCB | func_number;
2885 qdev->update_ob_opcode = UPDATE_NCB_IOCB | func_number;
2886 qdev->mb_bit_mask = FN1_MA_BITS_MASK;
2887 qdev->PHYAddr = PORT1_PHY_ADDRESS;
2888 if (port_status & PORT_STATUS_SM1)
2889 set_bit(QL_LINK_OPTICAL,&qdev->flags);
2890 else
2891 clear_bit(QL_LINK_OPTICAL,&qdev->flags);
2892 break;
2893
2894 case ISP_CONTROL_FN0_SCSI:
2895 case ISP_CONTROL_FN1_SCSI:
2896 default:
2897 printk(KERN_DEBUG PFX
2898 "%s: Invalid function number, ispControlStatus = 0x%x\n",
2899 qdev->ndev->name,value);
2900 break;
2901 }
2902 qdev->numPorts = qdev->nvram_data.numPorts;
2903}
2904
2905static void ql_display_dev_info(struct net_device *ndev)
2906{
2907 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
2908 struct pci_dev *pdev = qdev->pdev;
2909
2910 printk(KERN_INFO PFX
2911 "\n%s Adapter %d RevisionID %d found on PCI slot %d.\n",
2912 DRV_NAME, qdev->index, qdev->chip_rev_id, qdev->pci_slot);
2913 printk(KERN_INFO PFX
2914 "%s Interface.\n",
2915 test_bit(QL_LINK_OPTICAL,&qdev->flags) ? "OPTICAL" : "COPPER");
2916
2917 /*
2918 * Print PCI bus width/type.
2919 */
2920 printk(KERN_INFO PFX
2921 "Bus interface is %s %s.\n",
2922 ((qdev->pci_width == 64) ? "64-bit" : "32-bit"),
2923 ((qdev->pci_x) ? "PCI-X" : "PCI"));
2924
2925 printk(KERN_INFO PFX
2926 "mem IO base address adjusted = 0x%p\n",
2927 qdev->mem_map_registers);
2928 printk(KERN_INFO PFX "Interrupt number = %d\n", pdev->irq);
2929
2930 if (netif_msg_probe(qdev))
2931 printk(KERN_INFO PFX
2932 "%s: MAC address %02x:%02x:%02x:%02x:%02x:%02x\n",
2933 ndev->name, ndev->dev_addr[0], ndev->dev_addr[1],
2934 ndev->dev_addr[2], ndev->dev_addr[3], ndev->dev_addr[4],
2935 ndev->dev_addr[5]);
2936}
2937
2938static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
2939{
2940 struct net_device *ndev = qdev->ndev;
2941 int retval = 0;
2942
2943 netif_stop_queue(ndev);
2944 netif_carrier_off(ndev);
2945
2946 clear_bit(QL_ADAPTER_UP,&qdev->flags);
2947 clear_bit(QL_LINK_MASTER,&qdev->flags);
2948
2949 ql_disable_interrupts(qdev);
2950
2951 free_irq(qdev->pdev->irq, ndev);
2952
2953 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
2954 printk(KERN_INFO PFX
2955 "%s: calling pci_disable_msi().\n", qdev->ndev->name);
2956 clear_bit(QL_MSI_ENABLED,&qdev->flags);
2957 pci_disable_msi(qdev->pdev);
2958 }
2959
2960 del_timer_sync(&qdev->adapter_timer);
2961
2962 netif_poll_disable(ndev);
2963
2964 if (do_reset) {
2965 int soft_reset;
2966 unsigned long hw_flags;
2967
2968 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
2969 if (ql_wait_for_drvr_lock(qdev)) {
2970 if ((soft_reset = ql_adapter_reset(qdev))) {
2971 printk(KERN_ERR PFX
2972 "%s: ql_adapter_reset(%d) FAILED!\n",
2973 ndev->name, qdev->index);
2974 }
2975 printk(KERN_ERR PFX
2976 "%s: Releaseing driver lock via chip reset.\n",ndev->name);
2977 } else {
2978 printk(KERN_ERR PFX
2979 "%s: Could not acquire driver lock to do "
2980 "reset!\n", ndev->name);
2981 retval = -1;
2982 }
2983 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
2984 }
2985 ql_free_mem_resources(qdev);
2986 return retval;
2987}
2988
2989static int ql_adapter_up(struct ql3_adapter *qdev)
2990{
2991 struct net_device *ndev = qdev->ndev;
2992 int err;
2993 unsigned long irq_flags = SA_SAMPLE_RANDOM | SA_SHIRQ;
2994 unsigned long hw_flags;
2995
2996 if (ql_alloc_mem_resources(qdev)) {
2997 printk(KERN_ERR PFX
2998 "%s Unable to allocate buffers.\n", ndev->name);
2999 return -ENOMEM;
3000 }
3001
3002 if (qdev->msi) {
3003 if (pci_enable_msi(qdev->pdev)) {
3004 printk(KERN_ERR PFX
3005 "%s: User requested MSI, but MSI failed to "
3006 "initialize. Continuing without MSI.\n",
3007 qdev->ndev->name);
3008 qdev->msi = 0;
3009 } else {
3010 printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
3011 set_bit(QL_MSI_ENABLED,&qdev->flags);
3012 irq_flags &= ~SA_SHIRQ;
3013 }
3014 }
3015
3016 if ((err = request_irq(qdev->pdev->irq,
3017 ql3xxx_isr,
3018 irq_flags, ndev->name, ndev))) {
3019 printk(KERN_ERR PFX
3020 "%s: Failed to reserve interrupt %d already in use.\n",
3021 ndev->name, qdev->pdev->irq);
3022 goto err_irq;
3023 }
3024
3025 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3026
3027 if ((err = ql_wait_for_drvr_lock(qdev))) {
3028 if ((err = ql_adapter_initialize(qdev))) {
3029 printk(KERN_ERR PFX
3030 "%s: Unable to initialize adapter.\n",
3031 ndev->name);
3032 goto err_init;
3033 }
3034 printk(KERN_ERR PFX
3035 "%s: Releaseing driver lock.\n",ndev->name);
3036 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3037 } else {
3038 printk(KERN_ERR PFX
3039 "%s: Could not aquire driver lock.\n",
3040 ndev->name);
3041 goto err_lock;
3042 }
3043
3044 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3045
3046 set_bit(QL_ADAPTER_UP,&qdev->flags);
3047
3048 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3049
3050 netif_poll_enable(ndev);
3051 ql_enable_interrupts(qdev);
3052 return 0;
3053
3054err_init:
3055 ql_sem_unlock(qdev, QL_DRVR_SEM_MASK);
3056err_lock:
3057 free_irq(qdev->pdev->irq, ndev);
3058err_irq:
3059 if (qdev->msi && test_bit(QL_MSI_ENABLED,&qdev->flags)) {
3060 printk(KERN_INFO PFX
3061 "%s: calling pci_disable_msi().\n",
3062 qdev->ndev->name);
3063 clear_bit(QL_MSI_ENABLED,&qdev->flags);
3064 pci_disable_msi(qdev->pdev);
3065 }
3066 return err;
3067}
3068
3069static int ql_cycle_adapter(struct ql3_adapter *qdev, int reset)
3070{
3071 if( ql_adapter_down(qdev,reset) || ql_adapter_up(qdev)) {
3072 printk(KERN_ERR PFX
3073 "%s: Driver up/down cycle failed, "
3074 "closing device\n",qdev->ndev->name);
3075 dev_close(qdev->ndev);
3076 return -1;
3077 }
3078 return 0;
3079}
3080
3081static int ql3xxx_close(struct net_device *ndev)
3082{
3083 struct ql3_adapter *qdev = netdev_priv(ndev);
3084
3085 /*
3086 * Wait for device to recover from a reset.
3087 * (Rarely happens, but possible.)
3088 */
3089 while (!test_bit(QL_ADAPTER_UP,&qdev->flags))
3090 msleep(50);
3091
3092 ql_adapter_down(qdev,QL_DO_RESET);
3093 return 0;
3094}
3095
3096static int ql3xxx_open(struct net_device *ndev)
3097{
3098 struct ql3_adapter *qdev = netdev_priv(ndev);
3099 return (ql_adapter_up(qdev));
3100}
3101
3102static struct net_device_stats *ql3xxx_get_stats(struct net_device *dev)
3103{
3104 struct ql3_adapter *qdev = (struct ql3_adapter *)dev->priv;
3105 return &qdev->stats;
3106}
3107
3108static int ql3xxx_change_mtu(struct net_device *ndev, int new_mtu)
3109{
3110 struct ql3_adapter *qdev = netdev_priv(ndev);
3111 printk(KERN_ERR PFX "%s: new mtu size = %d.\n", ndev->name, new_mtu);
3112 if (new_mtu != NORMAL_MTU_SIZE && new_mtu != JUMBO_MTU_SIZE) {
3113 printk(KERN_ERR PFX
3114 "%s: mtu size of %d is not valid. Use exactly %d or "
3115 "%d.\n", ndev->name, new_mtu, NORMAL_MTU_SIZE,
3116 JUMBO_MTU_SIZE);
3117 return -EINVAL;
3118 }
3119
3120 if (!netif_running(ndev)) {
3121 ndev->mtu = new_mtu;
3122 return 0;
3123 }
3124
3125 ndev->mtu = new_mtu;
3126 return ql_cycle_adapter(qdev,QL_DO_RESET);
3127}
3128
3129static void ql3xxx_set_multicast_list(struct net_device *ndev)
3130{
3131 /*
3132 * We are manually parsing the list in the net_device structure.
3133 */
3134 return;
3135}
3136
3137static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3138{
3139 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3140 struct ql3xxx_port_registers __iomem *port_regs =
3141 qdev->mem_map_registers;
3142 struct sockaddr *addr = p;
3143 unsigned long hw_flags;
3144
3145 if (netif_running(ndev))
3146 return -EBUSY;
3147
3148 if (!is_valid_ether_addr(addr->sa_data))
3149 return -EADDRNOTAVAIL;
3150
3151 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
3152
3153 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3154 /* Program lower 32 bits of the MAC address */
3155 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3156 (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
3157 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3158 ((ndev->dev_addr[2] << 24) | (ndev->
3159 dev_addr[3] << 16) |
3160 (ndev->dev_addr[4] << 8) | ndev->dev_addr[5]));
3161
3162 /* Program top 16 bits of the MAC address */
3163 ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
3164 ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
3165 ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
3166 ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
3167 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3168
3169 return 0;
3170}
3171
3172static void ql3xxx_tx_timeout(struct net_device *ndev)
3173{
3174 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
3175
3176 printk(KERN_ERR PFX "%s: Resetting...\n", ndev->name);
3177 /*
3178 * Stop the queues, we've got a problem.
3179 */
3180 netif_stop_queue(ndev);
3181
3182 /*
3183 * Wake up the worker to process this event.
3184 */
3185 queue_work(qdev->workqueue, &qdev->tx_timeout_work);
3186}
3187
3188static void ql_reset_work(struct ql3_adapter *qdev)
3189{
3190 struct net_device *ndev = qdev->ndev;
3191 u32 value;
3192 struct ql_tx_buf_cb *tx_cb;
3193 int max_wait_time, i;
3194 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3195 unsigned long hw_flags;
3196
3197 if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
3198 clear_bit(QL_LINK_MASTER,&qdev->flags);
3199
3200 /*
3201 * Loop through the active list and return the skb.
3202 */
3203 for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
3204 tx_cb = &qdev->tx_buf[i];
3205 if (tx_cb->skb) {
3206
3207 printk(KERN_DEBUG PFX
3208 "%s: Freeing lost SKB.\n",
3209 qdev->ndev->name);
3210 pci_unmap_single(qdev->pdev,
3211 pci_unmap_addr(tx_cb, mapaddr),
3212 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
3213 dev_kfree_skb(tx_cb->skb);
3214 tx_cb->skb = NULL;
3215 }
3216 }
3217
3218 printk(KERN_ERR PFX
3219 "%s: Clearing NRI after reset.\n", qdev->ndev->name);
3220 spin_lock_irqsave(&qdev->hw_lock, hw_flags);
3221 ql_write_common_reg(qdev,
3222 &port_regs->CommonRegs.
3223 ispControlStatus,
3224 ((ISP_CONTROL_RI << 16) | ISP_CONTROL_RI));
3225 /*
3226 * Wait the for Soft Reset to Complete.
3227 */
3228 max_wait_time = 10;
3229 do {
3230 value = ql_read_common_reg(qdev,
3231 &port_regs->CommonRegs.
3232
3233 ispControlStatus);
3234 if ((value & ISP_CONTROL_SR) == 0) {
3235 printk(KERN_DEBUG PFX
3236 "%s: reset completed.\n",
3237 qdev->ndev->name);
3238 break;
3239 }
3240
3241 if (value & ISP_CONTROL_RI) {
3242 printk(KERN_DEBUG PFX
3243 "%s: clearing NRI after reset.\n",
3244 qdev->ndev->name);
3245 ql_write_common_reg(qdev,
3246 (u32 *) &
3247 port_regs->
3248 CommonRegs.
3249 ispControlStatus,
3250 ((ISP_CONTROL_RI <<
3251 16) | ISP_CONTROL_RI));
3252 }
3253
3254 ssleep(1);
3255 } while (--max_wait_time);
3256 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
3257
3258 if (value & ISP_CONTROL_SR) {
3259
3260 /*
3261 * Set the reset flags and clear the board again.
3262 * Nothing else to do...
3263 */
3264 printk(KERN_ERR PFX
3265 "%s: Timed out waiting for reset to "
3266 "complete.\n", ndev->name);
3267 printk(KERN_ERR PFX
3268 "%s: Do a reset.\n", ndev->name);
3269 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3270 clear_bit(QL_RESET_START,&qdev->flags);
3271 ql_cycle_adapter(qdev,QL_DO_RESET);
3272 return;
3273 }
3274
3275 clear_bit(QL_RESET_ACTIVE,&qdev->flags);
3276 clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
3277 clear_bit(QL_RESET_START,&qdev->flags);
3278 ql_cycle_adapter(qdev,QL_NO_RESET);
3279 }
3280}
3281
3282static void ql_tx_timeout_work(struct ql3_adapter *qdev)
3283{
3284 ql_cycle_adapter(qdev,QL_DO_RESET);
3285}
3286
3287static void ql_get_board_info(struct ql3_adapter *qdev)
3288{
3289 struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
3290 u32 value;
3291
3292 value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
3293
3294 qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
3295 if (value & PORT_STATUS_64)
3296 qdev->pci_width = 64;
3297 else
3298 qdev->pci_width = 32;
3299 if (value & PORT_STATUS_X)
3300 qdev->pci_x = 1;
3301 else
3302 qdev->pci_x = 0;
3303 qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
3304}
3305
3306static void ql3xxx_timer(unsigned long ptr)
3307{
3308 struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
3309
3310 if (test_bit(QL_RESET_ACTIVE,&qdev->flags)) {
3311 printk(KERN_DEBUG PFX
3312 "%s: Reset in progress.\n",
3313 qdev->ndev->name);
3314 goto end;
3315 }
3316
3317 ql_link_state_machine(qdev);
3318
3319 /* Restart timer on 2 second interval. */
3320end:
3321 mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
3322}
3323
3324static int __devinit ql3xxx_probe(struct pci_dev *pdev,
3325 const struct pci_device_id *pci_entry)
3326{
3327 struct net_device *ndev = NULL;
3328 struct ql3_adapter *qdev = NULL;
3329 static int cards_found = 0;
3330 int pci_using_dac, err;
3331
3332 err = pci_enable_device(pdev);
3333 if (err) {
3334 printk(KERN_ERR PFX "%s cannot enable PCI device\n",
3335 pci_name(pdev));
3336 goto err_out;
3337 }
3338
3339 err = pci_request_regions(pdev, DRV_NAME);
3340 if (err) {
3341 printk(KERN_ERR PFX "%s cannot obtain PCI resources\n",
3342 pci_name(pdev));
3343 goto err_out_disable_pdev;
3344 }
3345
3346 pci_set_master(pdev);
3347
3348 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3349 pci_using_dac = 1;
3350 err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3351 } else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
3352 pci_using_dac = 0;
3353 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3354 }
3355
3356 if (err) {
3357 printk(KERN_ERR PFX "%s no usable DMA configuration\n",
3358 pci_name(pdev));
3359 goto err_out_free_regions;
3360 }
3361
3362 ndev = alloc_etherdev(sizeof(struct ql3_adapter));
3363 if (!ndev)
3364 goto err_out_free_regions;
3365
3366 SET_MODULE_OWNER(ndev);
3367 SET_NETDEV_DEV(ndev, &pdev->dev);
3368
3369 ndev->features = NETIF_F_LLTX;
3370 if (pci_using_dac)
3371 ndev->features |= NETIF_F_HIGHDMA;
3372
3373 pci_set_drvdata(pdev, ndev);
3374
3375 qdev = netdev_priv(ndev);
3376 qdev->index = cards_found;
3377 qdev->ndev = ndev;
3378 qdev->pdev = pdev;
3379 qdev->port_link_state = LS_DOWN;
3380 if (msi)
3381 qdev->msi = 1;
3382
3383 qdev->msg_enable = netif_msg_init(debug, default_msg);
3384
3385 qdev->mem_map_registers =
3386 ioremap_nocache(pci_resource_start(pdev, 1),
3387 pci_resource_len(qdev->pdev, 1));
3388 if (!qdev->mem_map_registers) {
3389 printk(KERN_ERR PFX "%s: cannot map device registers\n",
3390 pci_name(pdev));
3391 goto err_out_free_ndev;
3392 }
3393
3394 spin_lock_init(&qdev->adapter_lock);
3395 spin_lock_init(&qdev->hw_lock);
3396
3397 /* Set driver entry points */
3398 ndev->open = ql3xxx_open;
3399 ndev->hard_start_xmit = ql3xxx_send;
3400 ndev->stop = ql3xxx_close;
3401 ndev->get_stats = ql3xxx_get_stats;
3402 ndev->change_mtu = ql3xxx_change_mtu;
3403 ndev->set_multicast_list = ql3xxx_set_multicast_list;
3404 SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
3405 ndev->set_mac_address = ql3xxx_set_mac_address;
3406 ndev->tx_timeout = ql3xxx_tx_timeout;
3407 ndev->watchdog_timeo = 5 * HZ;
3408
3409 ndev->poll = &ql_poll;
3410 ndev->weight = 64;
3411
3412 ndev->irq = pdev->irq;
3413
3414 /* make sure the EEPROM is good */
3415 if (ql_get_nvram_params(qdev)) {
3416 printk(KERN_ALERT PFX
3417 "ql3xxx_probe: Adapter #%d, Invalid NVRAM parameters.\n",
3418 qdev->index);
3419 goto err_out_iounmap;
3420 }
3421
3422 ql_set_mac_info(qdev);
3423
3424 /* Validate and set parameters */
3425 if (qdev->mac_index) {
3426 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn2.macAddress,
3427 ETH_ALEN);
3428 } else {
3429 memcpy(ndev->dev_addr, &qdev->nvram_data.funcCfg_fn0.macAddress,
3430 ETH_ALEN);
3431 }
3432 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
3433
3434 ndev->tx_queue_len = NUM_REQ_Q_ENTRIES;
3435
3436 /* Turn off support for multicasting */
3437 ndev->flags &= ~IFF_MULTICAST;
3438
3439 /* Record PCI bus information. */
3440 ql_get_board_info(qdev);
3441
3442 /*
3443 * Set the Maximum Memory Read Byte Count value. We do this to handle
3444 * jumbo frames.
3445 */
3446 if (qdev->pci_x) {
3447 pci_write_config_word(pdev, (int)0x4e, (u16) 0x0036);
3448 }
3449
3450 err = register_netdev(ndev);
3451 if (err) {
3452 printk(KERN_ERR PFX "%s: cannot register net device\n",
3453 pci_name(pdev));
3454 goto err_out_iounmap;
3455 }
3456
3457 /* we're going to reset, so assume we have no link for now */
3458
3459 netif_carrier_off(ndev);
3460 netif_stop_queue(ndev);
3461
3462 qdev->workqueue = create_singlethread_workqueue(ndev->name);
3463 INIT_WORK(&qdev->reset_work, (void (*)(void *))ql_reset_work, qdev);
3464 INIT_WORK(&qdev->tx_timeout_work,
3465 (void (*)(void *))ql_tx_timeout_work, qdev);
3466
3467 init_timer(&qdev->adapter_timer);
3468 qdev->adapter_timer.function = ql3xxx_timer;
3469 qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
3470 qdev->adapter_timer.data = (unsigned long)qdev;
3471
3472 if(!cards_found) {
3473 printk(KERN_ALERT PFX "%s\n", DRV_STRING);
3474 printk(KERN_ALERT PFX "Driver name: %s, Version: %s.\n",
3475 DRV_NAME, DRV_VERSION);
3476 }
3477 ql_display_dev_info(ndev);
3478
3479 cards_found++;
3480 return 0;
3481
3482err_out_iounmap:
3483 iounmap(qdev->mem_map_registers);
3484err_out_free_ndev:
3485 free_netdev(ndev);
3486err_out_free_regions:
3487 pci_release_regions(pdev);
3488err_out_disable_pdev:
3489 pci_disable_device(pdev);
3490 pci_set_drvdata(pdev, NULL);
3491err_out:
3492 return err;
3493}
3494
3495static void __devexit ql3xxx_remove(struct pci_dev *pdev)
3496{
3497 struct net_device *ndev = pci_get_drvdata(pdev);
3498 struct ql3_adapter *qdev = netdev_priv(ndev);
3499
3500 unregister_netdev(ndev);
3501 qdev = netdev_priv(ndev);
3502
3503 ql_disable_interrupts(qdev);
3504
3505 if (qdev->workqueue) {
3506 cancel_delayed_work(&qdev->reset_work);
3507 cancel_delayed_work(&qdev->tx_timeout_work);
3508 destroy_workqueue(qdev->workqueue);
3509 qdev->workqueue = NULL;
3510 }
3511
3512 iounmap((void *)qdev->mmap_virt_base);
3513 pci_release_regions(pdev);
3514 pci_set_drvdata(pdev, NULL);
3515 free_netdev(ndev);
3516}
3517
3518static struct pci_driver ql3xxx_driver = {
3519
3520 .name = DRV_NAME,
3521 .id_table = ql3xxx_pci_tbl,
3522 .probe = ql3xxx_probe,
3523 .remove = __devexit_p(ql3xxx_remove),
3524};
3525
3526static int __init ql3xxx_init_module(void)
3527{
3528 return pci_register_driver(&ql3xxx_driver);
3529}
3530
3531static void __exit ql3xxx_exit(void)
3532{
3533 pci_unregister_driver(&ql3xxx_driver);
3534}
3535
3536module_init(ql3xxx_init_module);
3537module_exit(ql3xxx_exit);
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
new file mode 100644
index 000000000000..9492cee6b083
--- /dev/null
+++ b/drivers/net/qla3xxx.h
@@ -0,0 +1,1194 @@
1/*
2 * QLogic QLA3xxx NIC HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
4 *
5 * See LICENSE.qla3xxx for copyright and licensing details.
6 */
7#ifndef _QLA3XXX_H_
8#define _QLA3XXX_H_
9
10/*
11 * IOCB Definitions...
12 */
13#pragma pack(1)
14
15#define OPCODE_OB_MAC_IOCB_FN0 0x01
16#define OPCODE_OB_MAC_IOCB_FN2 0x21
17#define OPCODE_OB_TCP_IOCB_FN0 0x03
18#define OPCODE_OB_TCP_IOCB_FN2 0x23
19#define OPCODE_UPDATE_NCB_IOCB_FN0 0x00
20#define OPCODE_UPDATE_NCB_IOCB_FN2 0x20
21
22#define OPCODE_UPDATE_NCB_IOCB 0xF0
23#define OPCODE_IB_MAC_IOCB 0xF9
24#define OPCODE_IB_IP_IOCB 0xFA
25#define OPCODE_IB_TCP_IOCB 0xFB
26#define OPCODE_DUMP_PROTO_IOCB 0xFE
27#define OPCODE_BUFFER_ALERT_IOCB 0xFB
28
29#define OPCODE_FUNC_ID_MASK 0x30
30#define OUTBOUND_MAC_IOCB 0x01 /* plus function bits */
31#define OUTBOUND_TCP_IOCB 0x03 /* plus function bits */
32#define UPDATE_NCB_IOCB 0x00 /* plus function bits */
33
34#define FN0_MA_BITS_MASK 0x00
35#define FN1_MA_BITS_MASK 0x80
36
37struct ob_mac_iocb_req {
38 u8 opcode;
39 u8 flags;
40#define OB_MAC_IOCB_REQ_MA 0xC0
41#define OB_MAC_IOCB_REQ_F 0x20
42#define OB_MAC_IOCB_REQ_X 0x10
43#define OB_MAC_IOCB_REQ_D 0x02
44#define OB_MAC_IOCB_REQ_I 0x01
45 __le16 reserved0;
46
47 __le32 transaction_id;
48 __le16 data_len;
49 __le16 reserved1;
50 __le32 reserved2;
51 __le32 reserved3;
52 __le32 buf_addr0_low;
53 __le32 buf_addr0_high;
54 __le32 buf_0_len;
55 __le32 buf_addr1_low;
56 __le32 buf_addr1_high;
57 __le32 buf_1_len;
58 __le32 buf_addr2_low;
59 __le32 buf_addr2_high;
60 __le32 buf_2_len;
61 __le32 reserved4;
62 __le32 reserved5;
63};
64/*
65 * The following constants define control bits for buffer
66 * length fields for all IOCB's.
67 */
68#define OB_MAC_IOCB_REQ_E 0x80000000 /* Last valid buffer in list. */
69#define OB_MAC_IOCB_REQ_C 0x40000000 /* points to an OAL. (continuation) */
70#define OB_MAC_IOCB_REQ_L 0x20000000 /* Auburn local address pointer. */
71#define OB_MAC_IOCB_REQ_R 0x10000000 /* 32-bit address pointer. */
72
73struct ob_mac_iocb_rsp {
74 u8 opcode;
75 u8 flags;
76#define OB_MAC_IOCB_RSP_P 0x08
77#define OB_MAC_IOCB_RSP_S 0x02
78#define OB_MAC_IOCB_RSP_I 0x01
79
80 __le16 reserved0;
81 __le32 transaction_id;
82 __le32 reserved1;
83 __le32 reserved2;
84};
85
86struct ib_mac_iocb_rsp {
87 u8 opcode;
88 u8 flags;
89#define IB_MAC_IOCB_RSP_S 0x80
90#define IB_MAC_IOCB_RSP_H1 0x40
91#define IB_MAC_IOCB_RSP_H0 0x20
92#define IB_MAC_IOCB_RSP_B 0x10
93#define IB_MAC_IOCB_RSP_M 0x08
94#define IB_MAC_IOCB_RSP_MA 0x07
95
96 __le16 length;
97 __le32 reserved;
98 __le32 ial_low;
99 __le32 ial_high;
100
101};
102
103struct ob_ip_iocb_req {
104 u8 opcode;
105 __le16 flags;
106#define OB_IP_IOCB_REQ_O 0x100
107#define OB_IP_IOCB_REQ_H 0x008
108#define OB_IP_IOCB_REQ_U 0x004
109#define OB_IP_IOCB_REQ_D 0x002
110#define OB_IP_IOCB_REQ_I 0x001
111
112 u8 reserved0;
113
114 __le32 transaction_id;
115 __le16 data_len;
116 __le16 reserved1;
117 __le32 hncb_ptr_low;
118 __le32 hncb_ptr_high;
119 __le32 buf_addr0_low;
120 __le32 buf_addr0_high;
121 __le32 buf_0_len;
122 __le32 buf_addr1_low;
123 __le32 buf_addr1_high;
124 __le32 buf_1_len;
125 __le32 buf_addr2_low;
126 __le32 buf_addr2_high;
127 __le32 buf_2_len;
128 __le32 reserved2;
129 __le32 reserved3;
130};
131
132/* defines for BufferLength fields above */
133#define OB_IP_IOCB_REQ_E 0x80000000
134#define OB_IP_IOCB_REQ_C 0x40000000
135#define OB_IP_IOCB_REQ_L 0x20000000
136#define OB_IP_IOCB_REQ_R 0x10000000
137
138struct ob_ip_iocb_rsp {
139 u8 opcode;
140 u8 flags;
141#define OB_MAC_IOCB_RSP_E 0x08
142#define OB_MAC_IOCB_RSP_L 0x04
143#define OB_MAC_IOCB_RSP_S 0x02
144#define OB_MAC_IOCB_RSP_I 0x01
145
146 __le16 reserved0;
147 __le32 transaction_id;
148 __le32 reserved1;
149 __le32 reserved2;
150};
151
152struct ob_tcp_iocb_req {
153 u8 opcode;
154
155 u8 flags0;
156#define OB_TCP_IOCB_REQ_P 0x80
157#define OB_TCP_IOCB_REQ_CI 0x20
158#define OB_TCP_IOCB_REQ_H 0x10
159#define OB_TCP_IOCB_REQ_LN 0x08
160#define OB_TCP_IOCB_REQ_K 0x04
161#define OB_TCP_IOCB_REQ_D 0x02
162#define OB_TCP_IOCB_REQ_I 0x01
163
164 u8 flags1;
165#define OB_TCP_IOCB_REQ_OSM 0x40
166#define OB_TCP_IOCB_REQ_URG 0x20
167#define OB_TCP_IOCB_REQ_ACK 0x10
168#define OB_TCP_IOCB_REQ_PSH 0x08
169#define OB_TCP_IOCB_REQ_RST 0x04
170#define OB_TCP_IOCB_REQ_SYN 0x02
171#define OB_TCP_IOCB_REQ_FIN 0x01
172
173 u8 options_len;
174#define OB_TCP_IOCB_REQ_OMASK 0xF0
175#define OB_TCP_IOCB_REQ_SHIFT 4
176
177 __le32 transaction_id;
178 __le32 data_len;
179 __le32 hncb_ptr_low;
180 __le32 hncb_ptr_high;
181 __le32 buf_addr0_low;
182 __le32 buf_addr0_high;
183 __le32 buf_0_len;
184 __le32 buf_addr1_low;
185 __le32 buf_addr1_high;
186 __le32 buf_1_len;
187 __le32 buf_addr2_low;
188 __le32 buf_addr2_high;
189 __le32 buf_2_len;
190 __le32 time_stamp;
191 __le32 reserved1;
192};
193
194struct ob_tcp_iocb_rsp {
195 u8 opcode;
196
197 u8 flags0;
198#define OB_TCP_IOCB_RSP_C 0x20
199#define OB_TCP_IOCB_RSP_H 0x10
200#define OB_TCP_IOCB_RSP_LN 0x08
201#define OB_TCP_IOCB_RSP_K 0x04
202#define OB_TCP_IOCB_RSP_D 0x02
203#define OB_TCP_IOCB_RSP_I 0x01
204
205 u8 flags1;
206#define OB_TCP_IOCB_RSP_E 0x10
207#define OB_TCP_IOCB_RSP_W 0x08
208#define OB_TCP_IOCB_RSP_P 0x04
209#define OB_TCP_IOCB_RSP_T 0x02
210#define OB_TCP_IOCB_RSP_F 0x01
211
212 u8 state;
213#define OB_TCP_IOCB_RSP_SMASK 0xF0
214#define OB_TCP_IOCB_RSP_SHIFT 4
215
216 __le32 transaction_id;
217 __le32 local_ncb_ptr;
218 __le32 reserved0;
219};
220
221struct ib_ip_iocb_rsp {
222 u8 opcode;
223 u8 flags;
224#define IB_IP_IOCB_RSP_S 0x80
225#define IB_IP_IOCB_RSP_H1 0x40
226#define IB_IP_IOCB_RSP_H0 0x20
227#define IB_IP_IOCB_RSP_B 0x10
228#define IB_IP_IOCB_RSP_M 0x08
229#define IB_IP_IOCB_RSP_MA 0x07
230
231 __le16 length;
232 __le16 checksum;
233 __le16 reserved;
234#define IB_IP_IOCB_RSP_R 0x01
235 __le32 ial_low;
236 __le32 ial_high;
237};
238
239struct ib_tcp_iocb_rsp {
240 u8 opcode;
241 u8 flags;
242#define IB_TCP_IOCB_RSP_P 0x80
243#define IB_TCP_IOCB_RSP_T 0x40
244#define IB_TCP_IOCB_RSP_D 0x20
245#define IB_TCP_IOCB_RSP_N 0x10
246#define IB_TCP_IOCB_RSP_IP 0x03
247#define IB_TCP_FLAG_MASK 0xf0
248#define IB_TCP_FLAG_IOCB_SYN 0x00
249
250#define TCP_IB_RSP_FLAGS(x) (x->flags & ~IB_TCP_FLAG_MASK)
251
252 __le16 length;
253 __le32 hncb_ref_num;
254 __le32 ial_low;
255 __le32 ial_high;
256};
257
258struct net_rsp_iocb {
259 u8 opcode;
260 u8 flags;
261 __le16 reserved0;
262 __le32 reserved[3];
263};
264#pragma pack()
265
266/*
267 * Register Definitions...
268 */
269#define PORT0_PHY_ADDRESS 0x1e00
270#define PORT1_PHY_ADDRESS 0x1f00
271
272#define ETHERNET_CRC_SIZE 4
273
274#define MII_SCAN_REGISTER 0x00000001
275
276/* 32-bit ispControlStatus */
277enum {
278 ISP_CONTROL_NP_MASK = 0x0003,
279 ISP_CONTROL_NP_PCSR = 0x0000,
280 ISP_CONTROL_NP_HMCR = 0x0001,
281 ISP_CONTROL_NP_LRAMCR = 0x0002,
282 ISP_CONTROL_NP_PSR = 0x0003,
283 ISP_CONTROL_RI = 0x0008,
284 ISP_CONTROL_CI = 0x0010,
285 ISP_CONTROL_PI = 0x0020,
286 ISP_CONTROL_IN = 0x0040,
287 ISP_CONTROL_BE = 0x0080,
288 ISP_CONTROL_FN_MASK = 0x0700,
289 ISP_CONTROL_FN0_NET = 0x0400,
290 ISP_CONTROL_FN0_SCSI = 0x0500,
291 ISP_CONTROL_FN1_NET = 0x0600,
292 ISP_CONTROL_FN1_SCSI = 0x0700,
293 ISP_CONTROL_LINK_DN_0 = 0x0800,
294 ISP_CONTROL_LINK_DN_1 = 0x1000,
295 ISP_CONTROL_FSR = 0x2000,
296 ISP_CONTROL_FE = 0x4000,
297 ISP_CONTROL_SR = 0x8000,
298};
299
300/* 32-bit ispInterruptMaskReg */
301enum {
302 ISP_IMR_ENABLE_INT = 0x0004,
303 ISP_IMR_DISABLE_RESET_INT = 0x0008,
304 ISP_IMR_DISABLE_CMPL_INT = 0x0010,
305 ISP_IMR_DISABLE_PROC_INT = 0x0020,
306};
307
308/* 32-bit serialPortInterfaceReg */
309enum {
310 ISP_SERIAL_PORT_IF_CLK = 0x0001,
311 ISP_SERIAL_PORT_IF_CS = 0x0002,
312 ISP_SERIAL_PORT_IF_D0 = 0x0004,
313 ISP_SERIAL_PORT_IF_DI = 0x0008,
314 ISP_NVRAM_MASK = (0x000F << 16),
315 ISP_SERIAL_PORT_IF_WE = 0x0010,
316 ISP_SERIAL_PORT_IF_NVR_MASK = 0x001F,
317 ISP_SERIAL_PORT_IF_SCI = 0x0400,
318 ISP_SERIAL_PORT_IF_SC0 = 0x0800,
319 ISP_SERIAL_PORT_IF_SCE = 0x1000,
320 ISP_SERIAL_PORT_IF_SDI = 0x2000,
321 ISP_SERIAL_PORT_IF_SDO = 0x4000,
322 ISP_SERIAL_PORT_IF_SDE = 0x8000,
323 ISP_SERIAL_PORT_IF_I2C_MASK = 0xFC00,
324};
325
326/* semaphoreReg */
327enum {
328 QL_RESOURCE_MASK_BASE_CODE = 0x7,
329 QL_RESOURCE_BITS_BASE_CODE = 0x4,
330 QL_DRVR_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 1),
331 QL_DDR_RAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 4),
332 QL_PHY_GIO_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 7),
333 QL_NVRAM_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 10),
334 QL_FLASH_SEM_BITS = (QL_RESOURCE_BITS_BASE_CODE << 13),
335 QL_DRVR_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (1 + 16)),
336 QL_DDR_RAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (4 + 16)),
337 QL_PHY_GIO_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (7 + 16)),
338 QL_NVRAM_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (10 + 16)),
339 QL_FLASH_SEM_MASK = (QL_RESOURCE_MASK_BASE_CODE << (13 + 16)),
340};
341
342 /*
343 * QL3XXX memory-mapped registers
344 * QL3XXX has 4 "pages" of registers, each page occupying
345 * 256 bytes. Each page has a "common" area at the start and then
346 * page-specific registers after that.
347 */
348struct ql3xxx_common_registers {
349 u32 MB0; /* Offset 0x00 */
350 u32 MB1; /* Offset 0x04 */
351 u32 MB2; /* Offset 0x08 */
352 u32 MB3; /* Offset 0x0c */
353 u32 MB4; /* Offset 0x10 */
354 u32 MB5; /* Offset 0x14 */
355 u32 MB6; /* Offset 0x18 */
356 u32 MB7; /* Offset 0x1c */
357 u32 flashBiosAddr;
358 u32 flashBiosData;
359 u32 ispControlStatus;
360 u32 ispInterruptMaskReg;
361 u32 serialPortInterfaceReg;
362 u32 semaphoreReg;
363 u32 reqQProducerIndex;
364 u32 rspQConsumerIndex;
365
366 u32 rxLargeQProducerIndex;
367 u32 rxSmallQProducerIndex;
368 u32 arcMadiCommand;
369 u32 arcMadiData;
370};
371
372enum {
373 EXT_HW_CONFIG_SP_MASK = 0x0006,
374 EXT_HW_CONFIG_SP_NONE = 0x0000,
375 EXT_HW_CONFIG_SP_BYTE_PARITY = 0x0002,
376 EXT_HW_CONFIG_SP_ECC = 0x0004,
377 EXT_HW_CONFIG_SP_ECCx = 0x0006,
378 EXT_HW_CONFIG_SIZE_MASK = 0x0060,
379 EXT_HW_CONFIG_SIZE_128M = 0x0000,
380 EXT_HW_CONFIG_SIZE_256M = 0x0020,
381 EXT_HW_CONFIG_SIZE_512M = 0x0040,
382 EXT_HW_CONFIG_SIZE_INVALID = 0x0060,
383 EXT_HW_CONFIG_PD = 0x0080,
384 EXT_HW_CONFIG_FW = 0x0200,
385 EXT_HW_CONFIG_US = 0x0400,
386 EXT_HW_CONFIG_DCS_MASK = 0x1800,
387 EXT_HW_CONFIG_DCS_9MA = 0x0000,
388 EXT_HW_CONFIG_DCS_15MA = 0x0800,
389 EXT_HW_CONFIG_DCS_18MA = 0x1000,
390 EXT_HW_CONFIG_DCS_24MA = 0x1800,
391 EXT_HW_CONFIG_DDS_MASK = 0x6000,
392 EXT_HW_CONFIG_DDS_9MA = 0x0000,
393 EXT_HW_CONFIG_DDS_15MA = 0x2000,
394 EXT_HW_CONFIG_DDS_18MA = 0x4000,
395 EXT_HW_CONFIG_DDS_24MA = 0x6000,
396};
397
398/* InternalChipConfig */
399enum {
400 INTERNAL_CHIP_DM = 0x0001,
401 INTERNAL_CHIP_SD = 0x0002,
402 INTERNAL_CHIP_RAP_MASK = 0x000C,
403 INTERNAL_CHIP_RAP_RR = 0x0000,
404 INTERNAL_CHIP_RAP_NRM = 0x0004,
405 INTERNAL_CHIP_RAP_ERM = 0x0008,
406 INTERNAL_CHIP_RAP_ERMx = 0x000C,
407 INTERNAL_CHIP_WE = 0x0010,
408 INTERNAL_CHIP_EF = 0x0020,
409 INTERNAL_CHIP_FR = 0x0040,
410 INTERNAL_CHIP_FW = 0x0080,
411 INTERNAL_CHIP_FI = 0x0100,
412 INTERNAL_CHIP_FT = 0x0200,
413};
414
415/* portControl */
416enum {
417 PORT_CONTROL_DS = 0x0001,
418 PORT_CONTROL_HH = 0x0002,
419 PORT_CONTROL_EI = 0x0004,
420 PORT_CONTROL_ET = 0x0008,
421 PORT_CONTROL_EF = 0x0010,
422 PORT_CONTROL_DRM = 0x0020,
423 PORT_CONTROL_RLB = 0x0040,
424 PORT_CONTROL_RCB = 0x0080,
425 PORT_CONTROL_MAC = 0x0100,
426 PORT_CONTROL_IPV = 0x0200,
427 PORT_CONTROL_IFP = 0x0400,
428 PORT_CONTROL_ITP = 0x0800,
429 PORT_CONTROL_FI = 0x1000,
430 PORT_CONTROL_DFP = 0x2000,
431 PORT_CONTROL_OI = 0x4000,
432 PORT_CONTROL_CC = 0x8000,
433};
434
435/* portStatus */
436enum {
437 PORT_STATUS_SM0 = 0x0001,
438 PORT_STATUS_SM1 = 0x0002,
439 PORT_STATUS_X = 0x0008,
440 PORT_STATUS_DL = 0x0080,
441 PORT_STATUS_IC = 0x0200,
442 PORT_STATUS_MRC = 0x0400,
443 PORT_STATUS_NL = 0x0800,
444 PORT_STATUS_REV_ID_MASK = 0x7000,
445 PORT_STATUS_REV_ID_1 = 0x1000,
446 PORT_STATUS_REV_ID_2 = 0x2000,
447 PORT_STATUS_REV_ID_3 = 0x3000,
448 PORT_STATUS_64 = 0x8000,
449 PORT_STATUS_UP0 = 0x10000,
450 PORT_STATUS_AC0 = 0x20000,
451 PORT_STATUS_AE0 = 0x40000,
452 PORT_STATUS_UP1 = 0x100000,
453 PORT_STATUS_AC1 = 0x200000,
454 PORT_STATUS_AE1 = 0x400000,
455 PORT_STATUS_F0_ENABLED = 0x1000000,
456 PORT_STATUS_F1_ENABLED = 0x2000000,
457 PORT_STATUS_F2_ENABLED = 0x4000000,
458 PORT_STATUS_F3_ENABLED = 0x8000000,
459};
460
461/* macMIIMgmtControlReg */
462enum {
463 MAC_ADDR_INDIRECT_PTR_REG_RP_MASK = 0x0003,
464 MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_LWR = 0x0000,
465 MAC_ADDR_INDIRECT_PTR_REG_RP_PRI_UPR = 0x0001,
466 MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_LWR = 0x0002,
467 MAC_ADDR_INDIRECT_PTR_REG_RP_SEC_UPR = 0x0003,
468 MAC_ADDR_INDIRECT_PTR_REG_PR = 0x0008,
469 MAC_ADDR_INDIRECT_PTR_REG_SS = 0x0010,
470 MAC_ADDR_INDIRECT_PTR_REG_SE = 0x0020,
471 MAC_ADDR_INDIRECT_PTR_REG_SP = 0x0040,
472 MAC_ADDR_INDIRECT_PTR_REG_PE = 0x0080,
473};
474
475/* macMIIMgmtControlReg */
476enum {
477 MAC_MII_CONTROL_RC = 0x0001,
478 MAC_MII_CONTROL_SC = 0x0002,
479 MAC_MII_CONTROL_AS = 0x0004,
480 MAC_MII_CONTROL_NP = 0x0008,
481 MAC_MII_CONTROL_CLK_SEL_MASK = 0x0070,
482 MAC_MII_CONTROL_CLK_SEL_DIV2 = 0x0000,
483 MAC_MII_CONTROL_CLK_SEL_DIV4 = 0x0010,
484 MAC_MII_CONTROL_CLK_SEL_DIV6 = 0x0020,
485 MAC_MII_CONTROL_CLK_SEL_DIV8 = 0x0030,
486 MAC_MII_CONTROL_CLK_SEL_DIV10 = 0x0040,
487 MAC_MII_CONTROL_CLK_SEL_DIV14 = 0x0050,
488 MAC_MII_CONTROL_CLK_SEL_DIV20 = 0x0060,
489 MAC_MII_CONTROL_CLK_SEL_DIV28 = 0x0070,
490 MAC_MII_CONTROL_RM = 0x8000,
491};
492
493/* macMIIStatusReg */
494enum {
495 MAC_MII_STATUS_BSY = 0x0001,
496 MAC_MII_STATUS_SC = 0x0002,
497 MAC_MII_STATUS_NV = 0x0004,
498};
499
500enum {
501 MAC_CONFIG_REG_PE = 0x0001,
502 MAC_CONFIG_REG_TF = 0x0002,
503 MAC_CONFIG_REG_RF = 0x0004,
504 MAC_CONFIG_REG_FD = 0x0008,
505 MAC_CONFIG_REG_GM = 0x0010,
506 MAC_CONFIG_REG_LB = 0x0020,
507 MAC_CONFIG_REG_SR = 0x8000,
508};
509
510enum {
511 MAC_HALF_DUPLEX_REG_ED = 0x10000,
512 MAC_HALF_DUPLEX_REG_NB = 0x20000,
513 MAC_HALF_DUPLEX_REG_BNB = 0x40000,
514 MAC_HALF_DUPLEX_REG_ALT = 0x80000,
515};
516
517enum {
518 IP_ADDR_INDEX_REG_MASK = 0x000f,
519 IP_ADDR_INDEX_REG_FUNC_0_PRI = 0x0000,
520 IP_ADDR_INDEX_REG_FUNC_0_SEC = 0x0001,
521 IP_ADDR_INDEX_REG_FUNC_1_PRI = 0x0002,
522 IP_ADDR_INDEX_REG_FUNC_1_SEC = 0x0003,
523 IP_ADDR_INDEX_REG_FUNC_2_PRI = 0x0004,
524 IP_ADDR_INDEX_REG_FUNC_2_SEC = 0x0005,
525 IP_ADDR_INDEX_REG_FUNC_3_PRI = 0x0006,
526 IP_ADDR_INDEX_REG_FUNC_3_SEC = 0x0007,
527};
528
529enum {
530 PROBE_MUX_ADDR_REG_MUX_SEL_MASK = 0x003f,
531 PROBE_MUX_ADDR_REG_SYSCLK = 0x0000,
532 PROBE_MUX_ADDR_REG_PCICLK = 0x0040,
533 PROBE_MUX_ADDR_REG_NRXCLK = 0x0080,
534 PROBE_MUX_ADDR_REG_CPUCLK = 0x00C0,
535 PROBE_MUX_ADDR_REG_MODULE_SEL_MASK = 0x3f00,
536 PROBE_MUX_ADDR_REG_UP = 0x4000,
537 PROBE_MUX_ADDR_REG_RE = 0x8000,
538};
539
540enum {
541 STATISTICS_INDEX_REG_MASK = 0x01ff,
542 STATISTICS_INDEX_REG_MAC0_TX_FRAME = 0x0000,
543 STATISTICS_INDEX_REG_MAC0_TX_BYTES = 0x0001,
544 STATISTICS_INDEX_REG_MAC0_TX_STAT1 = 0x0002,
545 STATISTICS_INDEX_REG_MAC0_TX_STAT2 = 0x0003,
546 STATISTICS_INDEX_REG_MAC0_TX_STAT3 = 0x0004,
547 STATISTICS_INDEX_REG_MAC0_TX_STAT4 = 0x0005,
548 STATISTICS_INDEX_REG_MAC0_TX_STAT5 = 0x0006,
549 STATISTICS_INDEX_REG_MAC0_RX_FRAME = 0x0007,
550 STATISTICS_INDEX_REG_MAC0_RX_BYTES = 0x0008,
551 STATISTICS_INDEX_REG_MAC0_RX_STAT1 = 0x0009,
552 STATISTICS_INDEX_REG_MAC0_RX_STAT2 = 0x000a,
553 STATISTICS_INDEX_REG_MAC0_RX_STAT3 = 0x000b,
554 STATISTICS_INDEX_REG_MAC0_RX_ERR_CRC = 0x000c,
555 STATISTICS_INDEX_REG_MAC0_RX_ERR_ENC = 0x000d,
556 STATISTICS_INDEX_REG_MAC0_RX_ERR_LEN = 0x000e,
557 STATISTICS_INDEX_REG_MAC0_RX_STAT4 = 0x000f,
558 STATISTICS_INDEX_REG_MAC1_TX_FRAME = 0x0010,
559 STATISTICS_INDEX_REG_MAC1_TX_BYTES = 0x0011,
560 STATISTICS_INDEX_REG_MAC1_TX_STAT1 = 0x0012,
561 STATISTICS_INDEX_REG_MAC1_TX_STAT2 = 0x0013,
562 STATISTICS_INDEX_REG_MAC1_TX_STAT3 = 0x0014,
563 STATISTICS_INDEX_REG_MAC1_TX_STAT4 = 0x0015,
564 STATISTICS_INDEX_REG_MAC1_TX_STAT5 = 0x0016,
565 STATISTICS_INDEX_REG_MAC1_RX_FRAME = 0x0017,
566 STATISTICS_INDEX_REG_MAC1_RX_BYTES = 0x0018,
567 STATISTICS_INDEX_REG_MAC1_RX_STAT1 = 0x0019,
568 STATISTICS_INDEX_REG_MAC1_RX_STAT2 = 0x001a,
569 STATISTICS_INDEX_REG_MAC1_RX_STAT3 = 0x001b,
570 STATISTICS_INDEX_REG_MAC1_RX_ERR_CRC = 0x001c,
571 STATISTICS_INDEX_REG_MAC1_RX_ERR_ENC = 0x001d,
572 STATISTICS_INDEX_REG_MAC1_RX_ERR_LEN = 0x001e,
573 STATISTICS_INDEX_REG_MAC1_RX_STAT4 = 0x001f,
574 STATISTICS_INDEX_REG_IP_TX_PKTS = 0x0020,
575 STATISTICS_INDEX_REG_IP_TX_BYTES = 0x0021,
576 STATISTICS_INDEX_REG_IP_TX_FRAG = 0x0022,
577 STATISTICS_INDEX_REG_IP_RX_PKTS = 0x0023,
578 STATISTICS_INDEX_REG_IP_RX_BYTES = 0x0024,
579 STATISTICS_INDEX_REG_IP_RX_FRAG = 0x0025,
580 STATISTICS_INDEX_REG_IP_DGRM_REASSEMBLY = 0x0026,
581 STATISTICS_INDEX_REG_IP_V6_RX_PKTS = 0x0027,
582 STATISTICS_INDEX_REG_IP_RX_PKTERR = 0x0028,
583 STATISTICS_INDEX_REG_IP_REASSEMBLY_ERR = 0x0029,
584 STATISTICS_INDEX_REG_TCP_TX_SEG = 0x0030,
585 STATISTICS_INDEX_REG_TCP_TX_BYTES = 0x0031,
586 STATISTICS_INDEX_REG_TCP_RX_SEG = 0x0032,
587 STATISTICS_INDEX_REG_TCP_RX_BYTES = 0x0033,
588 STATISTICS_INDEX_REG_TCP_TIMER_EXP = 0x0034,
589 STATISTICS_INDEX_REG_TCP_RX_ACK = 0x0035,
590 STATISTICS_INDEX_REG_TCP_TX_ACK = 0x0036,
591 STATISTICS_INDEX_REG_TCP_RX_ERR = 0x0037,
592 STATISTICS_INDEX_REG_TCP_RX_WIN_PROBE = 0x0038,
593 STATISTICS_INDEX_REG_TCP_ECC_ERR_CORR = 0x003f,
594};
595
596enum {
597 PORT_FATAL_ERROR_STATUS_OFB_RE_MAC0 = 0x00000001,
598 PORT_FATAL_ERROR_STATUS_OFB_RE_MAC1 = 0x00000002,
599 PORT_FATAL_ERROR_STATUS_OFB_WE = 0x00000004,
600 PORT_FATAL_ERROR_STATUS_IFB_RE = 0x00000008,
601 PORT_FATAL_ERROR_STATUS_IFB_WE_MAC0 = 0x00000010,
602 PORT_FATAL_ERROR_STATUS_IFB_WE_MAC1 = 0x00000020,
603 PORT_FATAL_ERROR_STATUS_ODE_RE = 0x00000040,
604 PORT_FATAL_ERROR_STATUS_ODE_WE = 0x00000080,
605 PORT_FATAL_ERROR_STATUS_IDE_RE = 0x00000100,
606 PORT_FATAL_ERROR_STATUS_IDE_WE = 0x00000200,
607 PORT_FATAL_ERROR_STATUS_SDE_RE = 0x00000400,
608 PORT_FATAL_ERROR_STATUS_SDE_WE = 0x00000800,
609 PORT_FATAL_ERROR_STATUS_BLE = 0x00001000,
610 PORT_FATAL_ERROR_STATUS_SPE = 0x00002000,
611 PORT_FATAL_ERROR_STATUS_EP0 = 0x00004000,
612 PORT_FATAL_ERROR_STATUS_EP1 = 0x00008000,
613 PORT_FATAL_ERROR_STATUS_ICE = 0x00010000,
614 PORT_FATAL_ERROR_STATUS_ILE = 0x00020000,
615 PORT_FATAL_ERROR_STATUS_OPE = 0x00040000,
616 PORT_FATAL_ERROR_STATUS_TA = 0x00080000,
617 PORT_FATAL_ERROR_STATUS_MA = 0x00100000,
618 PORT_FATAL_ERROR_STATUS_SCE = 0x00200000,
619 PORT_FATAL_ERROR_STATUS_RPE = 0x00400000,
620 PORT_FATAL_ERROR_STATUS_MPE = 0x00800000,
621 PORT_FATAL_ERROR_STATUS_OCE = 0x01000000,
622};
623
624/*
625 * port control and status page - page 0
626 */
627
628struct ql3xxx_port_registers {
629 struct ql3xxx_common_registers CommonRegs;
630
631 u32 ExternalHWConfig;
632 u32 InternalChipConfig;
633 u32 portControl;
634 u32 portStatus;
635 u32 macAddrIndirectPtrReg;
636 u32 macAddrDataReg;
637 u32 macMIIMgmtControlReg;
638 u32 macMIIMgmtAddrReg;
639 u32 macMIIMgmtDataReg;
640 u32 macMIIStatusReg;
641 u32 mac0ConfigReg;
642 u32 mac0IpgIfgReg;
643 u32 mac0HalfDuplexReg;
644 u32 mac0MaxFrameLengthReg;
645 u32 mac0PauseThresholdReg;
646 u32 mac1ConfigReg;
647 u32 mac1IpgIfgReg;
648 u32 mac1HalfDuplexReg;
649 u32 mac1MaxFrameLengthReg;
650 u32 mac1PauseThresholdReg;
651 u32 ipAddrIndexReg;
652 u32 ipAddrDataReg;
653 u32 ipReassemblyTimeout;
654 u32 tcpMaxWindow;
655 u32 currentTcpTimestamp[2];
656 u32 internalRamRWAddrReg;
657 u32 internalRamWDataReg;
658 u32 reclaimedBufferAddrRegLow;
659 u32 reclaimedBufferAddrRegHigh;
660 u32 reserved[2];
661 u32 fpgaRevID;
662 u32 localRamAddr;
663 u32 localRamDataAutoIncr;
664 u32 localRamDataNonIncr;
665 u32 gpOutput;
666 u32 gpInput;
667 u32 probeMuxAddr;
668 u32 probeMuxData;
669 u32 statisticsIndexReg;
670 u32 statisticsReadDataRegAutoIncr;
671 u32 statisticsReadDataRegNoIncr;
672 u32 PortFatalErrStatus;
673};
674
675/*
676 * port host memory config page - page 1
677 */
678struct ql3xxx_host_memory_registers {
679 struct ql3xxx_common_registers CommonRegs;
680
681 u32 reserved[12];
682
683 /* Network Request Queue */
684 u32 reqConsumerIndex;
685 u32 reqConsumerIndexAddrLow;
686 u32 reqConsumerIndexAddrHigh;
687 u32 reqBaseAddrLow;
688 u32 reqBaseAddrHigh;
689 u32 reqLength;
690
691 /* Network Completion Queue */
692 u32 rspProducerIndex;
693 u32 rspProducerIndexAddrLow;
694 u32 rspProducerIndexAddrHigh;
695 u32 rspBaseAddrLow;
696 u32 rspBaseAddrHigh;
697 u32 rspLength;
698
699 /* RX Large Buffer Queue */
700 u32 rxLargeQConsumerIndex;
701 u32 rxLargeQBaseAddrLow;
702 u32 rxLargeQBaseAddrHigh;
703 u32 rxLargeQLength;
704 u32 rxLargeBufferLength;
705
706 /* RX Small Buffer Queue */
707 u32 rxSmallQConsumerIndex;
708 u32 rxSmallQBaseAddrLow;
709 u32 rxSmallQBaseAddrHigh;
710 u32 rxSmallQLength;
711 u32 rxSmallBufferLength;
712
713};
714
715/*
716 * port local RAM page - page 2
717 */
718struct ql3xxx_local_ram_registers {
719 struct ql3xxx_common_registers CommonRegs;
720 u32 bufletSize;
721 u32 maxBufletCount;
722 u32 currentBufletCount;
723 u32 reserved;
724 u32 freeBufletThresholdLow;
725 u32 freeBufletThresholdHigh;
726 u32 ipHashTableBase;
727 u32 ipHashTableCount;
728 u32 tcpHashTableBase;
729 u32 tcpHashTableCount;
730 u32 ncbBase;
731 u32 maxNcbCount;
732 u32 currentNcbCount;
733 u32 drbBase;
734 u32 maxDrbCount;
735 u32 currentDrbCount;
736};
737
738/*
739 * definitions for Semaphore bits in Semaphore/Serial NVRAM interface register
740 */
741
742#define LS_64BITS(x) (u32)(0xffffffff & ((u64)x))
743#define MS_64BITS(x) (u32)(0xffffffff & (((u64)x)>>16>>16) )
744
745/*
746 * I/O register
747 */
748
749enum {
750 CONTROL_REG = 0,
751 STATUS_REG = 1,
752 PHY_STAT_LINK_UP = 0x0004,
753 PHY_CTRL_LOOPBACK = 0x4000,
754
755 PETBI_CONTROL_REG = 0x00,
756 PETBI_CTRL_SOFT_RESET = 0x8000,
757 PETBI_CTRL_AUTO_NEG = 0x1000,
758 PETBI_CTRL_RESTART_NEG = 0x0200,
759 PETBI_CTRL_FULL_DUPLEX = 0x0100,
760 PETBI_CTRL_SPEED_1000 = 0x0040,
761
762 PETBI_STATUS_REG = 0x01,
763 PETBI_STAT_NEG_DONE = 0x0020,
764 PETBI_STAT_LINK_UP = 0x0004,
765
766 PETBI_NEG_ADVER = 0x04,
767 PETBI_NEG_PAUSE = 0x0080,
768 PETBI_NEG_PAUSE_MASK = 0x0180,
769 PETBI_NEG_DUPLEX = 0x0020,
770 PETBI_NEG_DUPLEX_MASK = 0x0060,
771
772 PETBI_NEG_PARTNER = 0x05,
773 PETBI_NEG_ERROR_MASK = 0x3000,
774
775 PETBI_EXPANSION_REG = 0x06,
776 PETBI_EXP_PAGE_RX = 0x0002,
777
778 PETBI_TBI_CTRL = 0x11,
779 PETBI_TBI_RESET = 0x8000,
780 PETBI_TBI_AUTO_SENSE = 0x0100,
781 PETBI_TBI_SERDES_MODE = 0x0010,
782 PETBI_TBI_SERDES_WRAP = 0x0002,
783
784 AUX_CONTROL_STATUS = 0x1c,
785 PHY_AUX_NEG_DONE = 0x8000,
786 PHY_NEG_PARTNER = 5,
787 PHY_AUX_DUPLEX_STAT = 0x0020,
788 PHY_AUX_SPEED_STAT = 0x0018,
789 PHY_AUX_NO_HW_STRAP = 0x0004,
790 PHY_AUX_RESET_STICK = 0x0002,
791 PHY_NEG_PAUSE = 0x0400,
792 PHY_CTRL_SOFT_RESET = 0x8000,
793 PHY_NEG_ADVER = 4,
794 PHY_NEG_ADV_SPEED = 0x01e0,
795 PHY_CTRL_RESTART_NEG = 0x0200,
796};
797enum {
798/* AM29LV Flash definitions */
799 FM93C56A_START = 0x1,
800/* Commands */
801 FM93C56A_READ = 0x2,
802 FM93C56A_WEN = 0x0,
803 FM93C56A_WRITE = 0x1,
804 FM93C56A_WRITE_ALL = 0x0,
805 FM93C56A_WDS = 0x0,
806 FM93C56A_ERASE = 0x3,
807 FM93C56A_ERASE_ALL = 0x0,
808/* Command Extentions */
809 FM93C56A_WEN_EXT = 0x3,
810 FM93C56A_WRITE_ALL_EXT = 0x1,
811 FM93C56A_WDS_EXT = 0x0,
812 FM93C56A_ERASE_ALL_EXT = 0x2,
813/* Special Bits */
814 FM93C56A_READ_DUMMY_BITS = 1,
815 FM93C56A_READY = 0,
816 FM93C56A_BUSY = 1,
817 FM93C56A_CMD_BITS = 2,
818/* AM29LV Flash definitions */
819 FM93C56A_SIZE_8 = 0x100,
820 FM93C56A_SIZE_16 = 0x80,
821 FM93C66A_SIZE_8 = 0x200,
822 FM93C66A_SIZE_16 = 0x100,
823 FM93C86A_SIZE_16 = 0x400,
824/* Address Bits */
825 FM93C56A_NO_ADDR_BITS_16 = 8,
826 FM93C56A_NO_ADDR_BITS_8 = 9,
827 FM93C86A_NO_ADDR_BITS_16 = 10,
828/* Data Bits */
829 FM93C56A_DATA_BITS_16 = 16,
830 FM93C56A_DATA_BITS_8 = 8,
831};
832enum {
833/* Auburn Bits */
834 AUBURN_EEPROM_DI = 0x8,
835 AUBURN_EEPROM_DI_0 = 0x0,
836 AUBURN_EEPROM_DI_1 = 0x8,
837 AUBURN_EEPROM_DO = 0x4,
838 AUBURN_EEPROM_DO_0 = 0x0,
839 AUBURN_EEPROM_DO_1 = 0x4,
840 AUBURN_EEPROM_CS = 0x2,
841 AUBURN_EEPROM_CS_0 = 0x0,
842 AUBURN_EEPROM_CS_1 = 0x2,
843 AUBURN_EEPROM_CLK_RISE = 0x1,
844 AUBURN_EEPROM_CLK_FALL = 0x0,
845};
846enum {EEPROM_SIZE = FM93C86A_SIZE_16,
847 EEPROM_NO_ADDR_BITS = FM93C86A_NO_ADDR_BITS_16,
848 EEPROM_NO_DATA_BITS = FM93C56A_DATA_BITS_16,
849};
850
851/*
852 * MAC Config data structure
853 */
854 struct eeprom_port_cfg {
855 u16 etherMtu_mac;
856 u16 pauseThreshold_mac;
857 u16 resumeThreshold_mac;
858 u16 portConfiguration;
859#define PORT_CONFIG_AUTO_NEG_ENABLED 0x8000
860#define PORT_CONFIG_SYM_PAUSE_ENABLED 0x4000
861#define PORT_CONFIG_FULL_DUPLEX_ENABLED 0x2000
862#define PORT_CONFIG_HALF_DUPLEX_ENABLED 0x1000
863#define PORT_CONFIG_1000MB_SPEED 0x0400
864#define PORT_CONFIG_100MB_SPEED 0x0200
865#define PORT_CONFIG_10MB_SPEED 0x0100
866#define PORT_CONFIG_LINK_SPEED_MASK 0x0F00
867 u16 reserved[12];
868
869};
870
871/*
872 * BIOS data structure
873 */
874struct eeprom_bios_cfg {
875 u16 SpinDlyEn:1, disBios:1, EnMemMap:1, EnSelectBoot:1, Reserved:12;
876
877 u8 bootID0:7, boodID0Valid:1;
878 u8 bootLun0[8];
879
880 u8 bootID1:7, boodID1Valid:1;
881 u8 bootLun1[8];
882
883 u16 MaxLunsTrgt;
884 u8 reserved[10];
885};
886
887/*
888 * Function Specific Data structure
889 */
890struct eeprom_function_cfg {
891 u8 reserved[30];
892 u8 macAddress[6];
893 u8 macAddressSecondary[6];
894
895 u16 subsysVendorId;
896 u16 subsysDeviceId;
897};
898
899/*
900 * EEPROM format
901 */
902struct eeprom_data {
903 u8 asicId[4];
904 u8 version;
905 u8 numPorts;
906 u16 boardId;
907
908#define EEPROM_BOARDID_STR_SIZE 16
909#define EEPROM_SERIAL_NUM_SIZE 16
910
911 u8 boardIdStr[16];
912 u8 serialNumber[16];
913 u16 extHwConfig;
914 struct eeprom_port_cfg macCfg_port0;
915 struct eeprom_port_cfg macCfg_port1;
916 u16 bufletSize;
917 u16 bufletCount;
918 u16 tcpWindowThreshold50;
919 u16 tcpWindowThreshold25;
920 u16 tcpWindowThreshold0;
921 u16 ipHashTableBaseHi;
922 u16 ipHashTableBaseLo;
923 u16 ipHashTableSize;
924 u16 tcpHashTableBaseHi;
925 u16 tcpHashTableBaseLo;
926 u16 tcpHashTableSize;
927 u16 ncbTableBaseHi;
928 u16 ncbTableBaseLo;
929 u16 ncbTableSize;
930 u16 drbTableBaseHi;
931 u16 drbTableBaseLo;
932 u16 drbTableSize;
933 u16 reserved_142[4];
934 u16 ipReassemblyTimeout;
935 u16 tcpMaxWindowSize;
936 u16 ipSecurity;
937#define IPSEC_CONFIG_PRESENT 0x0001
938 u8 reserved_156[294];
939 u16 qDebug[8];
940 struct eeprom_function_cfg funcCfg_fn0;
941 u16 reserved_510;
942 u8 oemSpace[432];
943 struct eeprom_bios_cfg biosCfg_fn1;
944 struct eeprom_function_cfg funcCfg_fn1;
945 u16 reserved_1022;
946 u8 reserved_1024[464];
947 struct eeprom_function_cfg funcCfg_fn2;
948 u16 reserved_1534;
949 u8 reserved_1536[432];
950 struct eeprom_bios_cfg biosCfg_fn3;
951 struct eeprom_function_cfg funcCfg_fn3;
952 u16 checksum;
953};
954
955/*
956 * General definitions...
957 */
958
959/*
960 * Below are a number compiler switches for controlling driver behavior.
961 * Some are not supported under certain conditions and are notated as such.
962 */
963
964#define QL3XXX_VENDOR_ID 0x1077
965#define QL3022_DEVICE_ID 0x3022
966
967/* MTU & Frame Size stuff */
968#define NORMAL_MTU_SIZE ETH_DATA_LEN
969#define JUMBO_MTU_SIZE 9000
970#define VLAN_ID_LEN 2
971
972/* Request Queue Related Definitions */
973#define NUM_REQ_Q_ENTRIES 256 /* so that 64 * 64 = 4096 (1 page) */
974
975/* Response Queue Related Definitions */
976#define NUM_RSP_Q_ENTRIES 256 /* so that 256 * 16 = 4096 (1 page) */
977
978/* Transmit and Receive Buffers */
979#define NUM_LBUFQ_ENTRIES 128
980#define NUM_SBUFQ_ENTRIES 64
981#define QL_SMALL_BUFFER_SIZE 32
982#define QL_ADDR_ELE_PER_BUFQ_ENTRY \
983(sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element))
984 /* Each send has at least control block. This is how many we keep. */
985#define NUM_SMALL_BUFFERS NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
986#define NUM_LARGE_BUFFERS NUM_LBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
987#define QL_HEADER_SPACE 32 /* make header space at top of skb. */
988/*
989 * Large & Small Buffers for Receives
990 */
991struct lrg_buf_q_entry {
992
993 u32 addr0_lower;
994#define IAL_LAST_ENTRY 0x00000001
995#define IAL_CONT_ENTRY 0x00000002
996#define IAL_FLAG_MASK 0x00000003
997 u32 addr0_upper;
998 u32 addr1_lower;
999 u32 addr1_upper;
1000 u32 addr2_lower;
1001 u32 addr2_upper;
1002 u32 addr3_lower;
1003 u32 addr3_upper;
1004 u32 addr4_lower;
1005 u32 addr4_upper;
1006 u32 addr5_lower;
1007 u32 addr5_upper;
1008 u32 addr6_lower;
1009 u32 addr6_upper;
1010 u32 addr7_lower;
1011 u32 addr7_upper;
1012
1013};
1014
1015struct bufq_addr_element {
1016 u32 addr_low;
1017 u32 addr_high;
1018};
1019
1020#define QL_NO_RESET 0
1021#define QL_DO_RESET 1
1022
1023enum link_state_t {
1024 LS_UNKNOWN = 0,
1025 LS_DOWN,
1026 LS_DEGRADE,
1027 LS_RECOVER,
1028 LS_UP,
1029};
1030
1031struct ql_rcv_buf_cb {
1032 struct ql_rcv_buf_cb *next;
1033 struct sk_buff *skb;
1034 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1035 DECLARE_PCI_UNMAP_LEN(maplen);
1036 __le32 buf_phy_addr_low;
1037 __le32 buf_phy_addr_high;
1038 int index;
1039};
1040
1041struct ql_tx_buf_cb {
1042 struct sk_buff *skb;
1043 struct ob_mac_iocb_req *queue_entry ;
1044 DECLARE_PCI_UNMAP_ADDR(mapaddr);
1045 DECLARE_PCI_UNMAP_LEN(maplen);
1046};
1047
1048/* definitions for type field */
1049#define QL_BUF_TYPE_MACIOCB 0x01
1050#define QL_BUF_TYPE_IPIOCB 0x02
1051#define QL_BUF_TYPE_TCPIOCB 0x03
1052
1053/* qdev->flags definitions. */
1054enum { QL_RESET_DONE = 1, /* Reset finished. */
1055 QL_RESET_ACTIVE = 2, /* Waiting for reset to finish. */
1056 QL_RESET_START = 3, /* Please reset the chip. */
1057 QL_RESET_PER_SCSI = 4, /* SCSI driver requests reset. */
1058 QL_TX_TIMEOUT = 5, /* Timeout in progress. */
1059 QL_LINK_MASTER = 6, /* This driver controls the link. */
1060 QL_ADAPTER_UP = 7, /* Adapter has been brought up. */
1061 QL_THREAD_UP = 8, /* This flag is available. */
1062 QL_LINK_UP = 9, /* Link Status. */
1063 QL_ALLOC_REQ_RSP_Q_DONE = 10,
1064 QL_ALLOC_BUFQS_DONE = 11,
1065 QL_ALLOC_SMALL_BUF_DONE = 12,
1066 QL_LINK_OPTICAL = 13,
1067 QL_MSI_ENABLED = 14,
1068};
1069
1070/*
1071 * ql3_adapter - The main Adapter structure definition.
1072 * This structure has all fields relevant to the hardware.
1073 */
1074
1075struct ql3_adapter {
1076 u32 reserved_00;
1077 unsigned long flags;
1078
1079 /* PCI Configuration information for this device */
1080 struct pci_dev *pdev;
1081 struct net_device *ndev; /* Parent NET device */
1082
1083 /* Hardware information */
1084 u8 chip_rev_id;
1085 u8 pci_slot;
1086 u8 pci_width;
1087 u8 pci_x;
1088 u32 msi;
1089 int index;
1090 struct timer_list adapter_timer; /* timer used for various functions */
1091
1092 spinlock_t adapter_lock;
1093 spinlock_t hw_lock;
1094
1095 /* PCI Bus Relative Register Addresses */
1096 u8 *mmap_virt_base; /* stores return value from ioremap() */
1097 struct ql3xxx_port_registers __iomem *mem_map_registers;
1098 u32 current_page; /* tracks current register page */
1099
1100 u32 msg_enable;
1101 u8 reserved_01[2];
1102 u8 reserved_02[2];
1103
1104 /* Page for Shadow Registers */
1105 void *shadow_reg_virt_addr;
1106 dma_addr_t shadow_reg_phy_addr;
1107
1108 /* Net Request Queue */
1109 u32 req_q_size;
1110 u32 reserved_03;
1111 struct ob_mac_iocb_req *req_q_virt_addr;
1112 dma_addr_t req_q_phy_addr;
1113 u16 req_producer_index;
1114 u16 reserved_04;
1115 u16 *preq_consumer_index;
1116 u32 req_consumer_index_phy_addr_high;
1117 u32 req_consumer_index_phy_addr_low;
1118 atomic_t tx_count;
1119 struct ql_tx_buf_cb tx_buf[NUM_REQ_Q_ENTRIES];
1120
1121 /* Net Response Queue */
1122 u32 rsp_q_size;
1123 u32 eeprom_cmd_data;
1124 struct net_rsp_iocb *rsp_q_virt_addr;
1125 dma_addr_t rsp_q_phy_addr;
1126 struct net_rsp_iocb *rsp_current;
1127 u16 rsp_consumer_index;
1128 u16 reserved_06;
1129 u32 *prsp_producer_index;
1130 u32 rsp_producer_index_phy_addr_high;
1131 u32 rsp_producer_index_phy_addr_low;
1132
1133 /* Large Buffer Queue */
1134 u32 lrg_buf_q_alloc_size;
1135 u32 lrg_buf_q_size;
1136 void *lrg_buf_q_alloc_virt_addr;
1137 void *lrg_buf_q_virt_addr;
1138 dma_addr_t lrg_buf_q_alloc_phy_addr;
1139 dma_addr_t lrg_buf_q_phy_addr;
1140 u32 lrg_buf_q_producer_index;
1141 u32 lrg_buf_release_cnt;
1142 struct bufq_addr_element *lrg_buf_next_free;
1143
1144 /* Large (Receive) Buffers */
1145 struct ql_rcv_buf_cb lrg_buf[NUM_LARGE_BUFFERS];
1146 struct ql_rcv_buf_cb *lrg_buf_free_head;
1147 struct ql_rcv_buf_cb *lrg_buf_free_tail;
1148 u32 lrg_buf_free_count;
1149 u32 lrg_buffer_len;
1150 u32 lrg_buf_index;
1151 u32 lrg_buf_skb_check;
1152
1153 /* Small Buffer Queue */
1154 u32 small_buf_q_alloc_size;
1155 u32 small_buf_q_size;
1156 u32 small_buf_q_producer_index;
1157 void *small_buf_q_alloc_virt_addr;
1158 void *small_buf_q_virt_addr;
1159 dma_addr_t small_buf_q_alloc_phy_addr;
1160 dma_addr_t small_buf_q_phy_addr;
1161 u32 small_buf_index;
1162
1163 /* Small (Receive) Buffers */
1164 void *small_buf_virt_addr;
1165 dma_addr_t small_buf_phy_addr;
1166 u32 small_buf_phy_addr_low;
1167 u32 small_buf_phy_addr_high;
1168 u32 small_buf_release_cnt;
1169 u32 small_buf_total_size;
1170
1171 /* ISR related, saves status for DPC. */
1172 u32 control_status;
1173
1174 struct eeprom_data nvram_data;
1175 struct timer_list ioctl_timer;
1176 u32 port_link_state;
1177 u32 last_rsp_offset;
1178
1179 /* 4022 specific */
1180 u32 mac_index; /* Driver's MAC number can be 0 or 1 for first and second networking functions respectively */
1181 u32 PHYAddr; /* Address of PHY 0x1e00 Port 0 and 0x1f00 Port 1 */
1182 u32 mac_ob_opcode; /* Opcode to use on mac transmission */
1183 u32 tcp_ob_opcode; /* Opcode to use on tcp transmission */
1184 u32 update_ob_opcode; /* Opcode to use for updating NCB */
1185 u32 mb_bit_mask; /* MA Bits mask to use on transmission */
1186 u32 numPorts;
1187 struct net_device_stats stats;
1188 struct workqueue_struct *workqueue;
1189 struct work_struct reset_work;
1190 struct work_struct tx_timeout_work;
1191 u32 max_frame_size;
1192};
1193
1194#endif /* _QLA3XXX_H_ */
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index de91609ca112..8f8799c3f9d1 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -121,6 +121,7 @@ static const struct pci_device_id sky2_id_table[] = {
121 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) }, 121 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4361) },
122 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) }, 122 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4362) },
123 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) }, 123 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4363) },
124 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4364) },
124 { 0 } 125 { 0 }
125}; 126};
126 127
diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c
index 3a1b7131681c..9a540e2092b9 100644
--- a/drivers/net/slhc.c
+++ b/drivers/net/slhc.c
@@ -94,27 +94,23 @@ slhc_init(int rslots, int tslots)
94 register struct cstate *ts; 94 register struct cstate *ts;
95 struct slcompress *comp; 95 struct slcompress *comp;
96 96
97 comp = (struct slcompress *)kmalloc(sizeof(struct slcompress), 97 comp = kzalloc(sizeof(struct slcompress), GFP_KERNEL);
98 GFP_KERNEL);
99 if (! comp) 98 if (! comp)
100 goto out_fail; 99 goto out_fail;
101 memset(comp, 0, sizeof(struct slcompress));
102 100
103 if ( rslots > 0 && rslots < 256 ) { 101 if ( rslots > 0 && rslots < 256 ) {
104 size_t rsize = rslots * sizeof(struct cstate); 102 size_t rsize = rslots * sizeof(struct cstate);
105 comp->rstate = (struct cstate *) kmalloc(rsize, GFP_KERNEL); 103 comp->rstate = kzalloc(rsize, GFP_KERNEL);
106 if (! comp->rstate) 104 if (! comp->rstate)
107 goto out_free; 105 goto out_free;
108 memset(comp->rstate, 0, rsize);
109 comp->rslot_limit = rslots - 1; 106 comp->rslot_limit = rslots - 1;
110 } 107 }
111 108
112 if ( tslots > 0 && tslots < 256 ) { 109 if ( tslots > 0 && tslots < 256 ) {
113 size_t tsize = tslots * sizeof(struct cstate); 110 size_t tsize = tslots * sizeof(struct cstate);
114 comp->tstate = (struct cstate *) kmalloc(tsize, GFP_KERNEL); 111 comp->tstate = kzalloc(tsize, GFP_KERNEL);
115 if (! comp->tstate) 112 if (! comp->tstate)
116 goto out_free2; 113 goto out_free2;
117 memset(comp->tstate, 0, tsize);
118 comp->tslot_limit = tslots - 1; 114 comp->tslot_limit = tslots - 1;
119 } 115 }
120 116
@@ -141,9 +137,9 @@ slhc_init(int rslots, int tslots)
141 return comp; 137 return comp;
142 138
143out_free2: 139out_free2:
144 kfree((unsigned char *)comp->rstate); 140 kfree(comp->rstate);
145out_free: 141out_free:
146 kfree((unsigned char *)comp); 142 kfree(comp);
147out_fail: 143out_fail:
148 return NULL; 144 return NULL;
149} 145}
@@ -700,20 +696,6 @@ EXPORT_SYMBOL(slhc_compress);
700EXPORT_SYMBOL(slhc_uncompress); 696EXPORT_SYMBOL(slhc_uncompress);
701EXPORT_SYMBOL(slhc_toss); 697EXPORT_SYMBOL(slhc_toss);
702 698
703#ifdef MODULE
704
705int init_module(void)
706{
707 printk(KERN_INFO "CSLIP: code copyright 1989 Regents of the University of California\n");
708 return 0;
709}
710
711void cleanup_module(void)
712{
713 return;
714}
715
716#endif /* MODULE */
717#else /* CONFIG_INET */ 699#else /* CONFIG_INET */
718 700
719 701
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index fd64b2b3e99c..c4c720e2d4c3 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -1702,7 +1702,6 @@ MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8
1702 1702
1703static int __init uli526x_init_module(void) 1703static int __init uli526x_init_module(void)
1704{ 1704{
1705 int rc;
1706 1705
1707 printk(version); 1706 printk(version);
1708 printed_version = 1; 1707 printed_version = 1;
@@ -1714,22 +1713,19 @@ static int __init uli526x_init_module(void)
1714 if (cr6set) 1713 if (cr6set)
1715 uli526x_cr6_user_set = cr6set; 1714 uli526x_cr6_user_set = cr6set;
1716 1715
1717 switch(mode) { 1716 switch (mode) {
1718 case ULI526X_10MHF: 1717 case ULI526X_10MHF:
1719 case ULI526X_100MHF: 1718 case ULI526X_100MHF:
1720 case ULI526X_10MFD: 1719 case ULI526X_10MFD:
1721 case ULI526X_100MFD: 1720 case ULI526X_100MFD:
1722 uli526x_media_mode = mode; 1721 uli526x_media_mode = mode;
1723 break; 1722 break;
1724 default:uli526x_media_mode = ULI526X_AUTO; 1723 default:
1724 uli526x_media_mode = ULI526X_AUTO;
1725 break; 1725 break;
1726 } 1726 }
1727 1727
1728 rc = pci_module_init(&uli526x_driver); 1728 return pci_register_driver(&uli526x_driver);
1729 if (rc < 0)
1730 return rc;
1731
1732 return 0;
1733} 1729}
1734 1730
1735 1731
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index a4dd13942714..16befbcea58c 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -3950,13 +3950,11 @@ static u16 issuecommand(struct airo_info *ai, Cmd *pCmd, Resp *pRsp) {
3950 pRsp->rsp0 = IN4500(ai, RESP0); 3950 pRsp->rsp0 = IN4500(ai, RESP0);
3951 pRsp->rsp1 = IN4500(ai, RESP1); 3951 pRsp->rsp1 = IN4500(ai, RESP1);
3952 pRsp->rsp2 = IN4500(ai, RESP2); 3952 pRsp->rsp2 = IN4500(ai, RESP2);
3953 if ((pRsp->status & 0xff00)!=0 && pCmd->cmd != CMD_SOFTRESET) { 3953 if ((pRsp->status & 0xff00)!=0 && pCmd->cmd != CMD_SOFTRESET)
3954 airo_print_err(ai->dev->name, "cmd= %x\n", pCmd->cmd); 3954 airo_print_err(ai->dev->name,
3955 airo_print_err(ai->dev->name, "status= %x\n", pRsp->status); 3955 "cmd:%x status:%x rsp0:%x rsp1:%x rsp2:%x",
3956 airo_print_err(ai->dev->name, "Rsp0= %x\n", pRsp->rsp0); 3956 pCmd->cmd, pRsp->status, pRsp->rsp0, pRsp->rsp1,
3957 airo_print_err(ai->dev->name, "Rsp1= %x\n", pRsp->rsp1); 3957 pRsp->rsp2);
3958 airo_print_err(ai->dev->name, "Rsp2= %x\n", pRsp->rsp2);
3959 }
3960 3958
3961 // clear stuck command busy if necessary 3959 // clear stuck command busy if necessary
3962 if (IN4500(ai, COMMAND) & COMMAND_BUSY) { 3960 if (IN4500(ai, COMMAND) & COMMAND_BUSY) {
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index 17a56828e232..ee6571ed706d 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -649,6 +649,19 @@ enum {
649#define bcm43xx_status(bcm) atomic_read(&(bcm)->init_status) 649#define bcm43xx_status(bcm) atomic_read(&(bcm)->init_status)
650#define bcm43xx_set_status(bcm, stat) atomic_set(&(bcm)->init_status, (stat)) 650#define bcm43xx_set_status(bcm, stat) atomic_set(&(bcm)->init_status, (stat))
651 651
652/* *** THEORY OF LOCKING ***
653 *
654 * We have two different locks in the bcm43xx driver.
655 * => bcm->mutex: General sleeping mutex. Protects struct bcm43xx_private
656 * and the device registers. This mutex does _not_ protect
657 * against concurrency from the IRQ handler.
658 * => bcm->irq_lock: IRQ spinlock. Protects against IRQ handler concurrency.
659 *
660 * Please note that, if you only take the irq_lock, you are not protected
661 * against concurrency from the periodic work handlers.
662 * Most times you want to take _both_ locks.
663 */
664
652struct bcm43xx_private { 665struct bcm43xx_private {
653 struct ieee80211_device *ieee; 666 struct ieee80211_device *ieee;
654 struct ieee80211softmac_device *softmac; 667 struct ieee80211softmac_device *softmac;
@@ -659,7 +672,6 @@ struct bcm43xx_private {
659 672
660 void __iomem *mmio_addr; 673 void __iomem *mmio_addr;
661 674
662 /* Locking, see "theory of locking" text below. */
663 spinlock_t irq_lock; 675 spinlock_t irq_lock;
664 struct mutex mutex; 676 struct mutex mutex;
665 677
@@ -691,6 +703,7 @@ struct bcm43xx_private {
691 struct bcm43xx_sprominfo sprom; 703 struct bcm43xx_sprominfo sprom;
692#define BCM43xx_NR_LEDS 4 704#define BCM43xx_NR_LEDS 4
693 struct bcm43xx_led leds[BCM43xx_NR_LEDS]; 705 struct bcm43xx_led leds[BCM43xx_NR_LEDS];
706 spinlock_t leds_lock;
694 707
695 /* The currently active core. */ 708 /* The currently active core. */
696 struct bcm43xx_coreinfo *current_core; 709 struct bcm43xx_coreinfo *current_core;
@@ -763,55 +776,6 @@ struct bcm43xx_private {
763}; 776};
764 777
765 778
766/* *** THEORY OF LOCKING ***
767 *
768 * We have two different locks in the bcm43xx driver.
769 * => bcm->mutex: General sleeping mutex. Protects struct bcm43xx_private
770 * and the device registers.
771 * => bcm->irq_lock: IRQ spinlock. Protects against IRQ handler concurrency.
772 *
773 * We have three types of helper function pairs to utilize these locks.
774 * (Always use the helper functions.)
775 * 1) bcm43xx_{un}lock_noirq():
776 * Takes bcm->mutex. Does _not_ protect against IRQ concurrency,
777 * so it is almost always unsafe, if device IRQs are enabled.
778 * So only use this, if device IRQs are masked.
779 * Locking may sleep.
780 * You can sleep within the critical section.
781 * 2) bcm43xx_{un}lock_irqonly():
782 * Takes bcm->irq_lock. Does _not_ protect against
783 * bcm43xx_lock_noirq() critical sections.
784 * Does only protect against the IRQ handler path and other
785 * irqonly() critical sections.
786 * Locking does not sleep.
787 * You must not sleep within the critical section.
788 * 3) bcm43xx_{un}lock_irqsafe():
789 * This is the cummulative lock and takes both, mutex and irq_lock.
790 * Protects against noirq() and irqonly() critical sections (and
791 * the IRQ handler path).
792 * Locking may sleep.
793 * You must not sleep within the critical section.
794 */
795
796/* Lock type 1 */
797#define bcm43xx_lock_noirq(bcm) mutex_lock(&(bcm)->mutex)
798#define bcm43xx_unlock_noirq(bcm) mutex_unlock(&(bcm)->mutex)
799/* Lock type 2 */
800#define bcm43xx_lock_irqonly(bcm, flags) \
801 spin_lock_irqsave(&(bcm)->irq_lock, flags)
802#define bcm43xx_unlock_irqonly(bcm, flags) \
803 spin_unlock_irqrestore(&(bcm)->irq_lock, flags)
804/* Lock type 3 */
805#define bcm43xx_lock_irqsafe(bcm, flags) do { \
806 bcm43xx_lock_noirq(bcm); \
807 bcm43xx_lock_irqonly(bcm, flags); \
808 } while (0)
809#define bcm43xx_unlock_irqsafe(bcm, flags) do { \
810 bcm43xx_unlock_irqonly(bcm, flags); \
811 bcm43xx_unlock_noirq(bcm); \
812 } while (0)
813
814
815static inline 779static inline
816struct bcm43xx_private * bcm43xx_priv(struct net_device *dev) 780struct bcm43xx_private * bcm43xx_priv(struct net_device *dev)
817{ 781{
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
index ce2e40b29b4f..2600ee4b803a 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
@@ -77,7 +77,8 @@ static ssize_t devinfo_read_file(struct file *file, char __user *userbuf,
77 77
78 down(&big_buffer_sem); 78 down(&big_buffer_sem);
79 79
80 bcm43xx_lock_irqsafe(bcm, flags); 80 mutex_lock(&bcm->mutex);
81 spin_lock_irqsave(&bcm->irq_lock, flags);
81 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) { 82 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
82 fappend("Board not initialized.\n"); 83 fappend("Board not initialized.\n");
83 goto out; 84 goto out;
@@ -121,7 +122,8 @@ static ssize_t devinfo_read_file(struct file *file, char __user *userbuf,
121 fappend("\n"); 122 fappend("\n");
122 123
123out: 124out:
124 bcm43xx_unlock_irqsafe(bcm, flags); 125 spin_unlock_irqrestore(&bcm->irq_lock, flags);
126 mutex_unlock(&bcm->mutex);
125 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 127 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
126 up(&big_buffer_sem); 128 up(&big_buffer_sem);
127 return res; 129 return res;
@@ -159,7 +161,8 @@ static ssize_t spromdump_read_file(struct file *file, char __user *userbuf,
159 unsigned long flags; 161 unsigned long flags;
160 162
161 down(&big_buffer_sem); 163 down(&big_buffer_sem);
162 bcm43xx_lock_irqsafe(bcm, flags); 164 mutex_lock(&bcm->mutex);
165 spin_lock_irqsave(&bcm->irq_lock, flags);
163 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) { 166 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
164 fappend("Board not initialized.\n"); 167 fappend("Board not initialized.\n");
165 goto out; 168 goto out;
@@ -169,7 +172,8 @@ static ssize_t spromdump_read_file(struct file *file, char __user *userbuf,
169 fappend("boardflags: 0x%04x\n", bcm->sprom.boardflags); 172 fappend("boardflags: 0x%04x\n", bcm->sprom.boardflags);
170 173
171out: 174out:
172 bcm43xx_unlock_irqsafe(bcm, flags); 175 spin_unlock_irqrestore(&bcm->irq_lock, flags);
176 mutex_unlock(&bcm->mutex);
173 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 177 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
174 up(&big_buffer_sem); 178 up(&big_buffer_sem);
175 return res; 179 return res;
@@ -188,7 +192,8 @@ static ssize_t tsf_read_file(struct file *file, char __user *userbuf,
188 u64 tsf; 192 u64 tsf;
189 193
190 down(&big_buffer_sem); 194 down(&big_buffer_sem);
191 bcm43xx_lock_irqsafe(bcm, flags); 195 mutex_lock(&bcm->mutex);
196 spin_lock_irqsave(&bcm->irq_lock, flags);
192 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) { 197 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
193 fappend("Board not initialized.\n"); 198 fappend("Board not initialized.\n");
194 goto out; 199 goto out;
@@ -199,7 +204,8 @@ static ssize_t tsf_read_file(struct file *file, char __user *userbuf,
199 (unsigned int)(tsf & 0xFFFFFFFFULL)); 204 (unsigned int)(tsf & 0xFFFFFFFFULL));
200 205
201out: 206out:
202 bcm43xx_unlock_irqsafe(bcm, flags); 207 spin_unlock_irqrestore(&bcm->irq_lock, flags);
208 mutex_unlock(&bcm->mutex);
203 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 209 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
204 up(&big_buffer_sem); 210 up(&big_buffer_sem);
205 return res; 211 return res;
@@ -221,7 +227,8 @@ static ssize_t tsf_write_file(struct file *file, const char __user *user_buf,
221 res = -EFAULT; 227 res = -EFAULT;
222 goto out_up; 228 goto out_up;
223 } 229 }
224 bcm43xx_lock_irqsafe(bcm, flags); 230 mutex_lock(&bcm->mutex);
231 spin_lock_irqsave(&bcm->irq_lock, flags);
225 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) { 232 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
226 printk(KERN_INFO PFX "debugfs: Board not initialized.\n"); 233 printk(KERN_INFO PFX "debugfs: Board not initialized.\n");
227 res = -EFAULT; 234 res = -EFAULT;
@@ -237,7 +244,8 @@ static ssize_t tsf_write_file(struct file *file, const char __user *user_buf,
237 res = buf_size; 244 res = buf_size;
238 245
239out_unlock: 246out_unlock:
240 bcm43xx_unlock_irqsafe(bcm, flags); 247 spin_unlock_irqrestore(&bcm->irq_lock, flags);
248 mutex_unlock(&bcm->mutex);
241out_up: 249out_up:
242 up(&big_buffer_sem); 250 up(&big_buffer_sem);
243 return res; 251 return res;
@@ -258,7 +266,8 @@ static ssize_t txstat_read_file(struct file *file, char __user *userbuf,
258 int i, cnt, j = 0; 266 int i, cnt, j = 0;
259 267
260 down(&big_buffer_sem); 268 down(&big_buffer_sem);
261 bcm43xx_lock_irqsafe(bcm, flags); 269 mutex_lock(&bcm->mutex);
270 spin_lock_irqsave(&bcm->irq_lock, flags);
262 271
263 fappend("Last %d logged xmitstatus blobs (Latest first):\n\n", 272 fappend("Last %d logged xmitstatus blobs (Latest first):\n\n",
264 BCM43xx_NR_LOGGED_XMITSTATUS); 273 BCM43xx_NR_LOGGED_XMITSTATUS);
@@ -294,14 +303,15 @@ static ssize_t txstat_read_file(struct file *file, char __user *userbuf,
294 i = BCM43xx_NR_LOGGED_XMITSTATUS - 1; 303 i = BCM43xx_NR_LOGGED_XMITSTATUS - 1;
295 } 304 }
296 305
297 bcm43xx_unlock_irqsafe(bcm, flags); 306 spin_unlock_irqrestore(&bcm->irq_lock, flags);
298 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 307 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
299 bcm43xx_lock_irqsafe(bcm, flags); 308 spin_lock_irqsave(&bcm->irq_lock, flags);
300 if (*ppos == pos) { 309 if (*ppos == pos) {
301 /* Done. Drop the copied data. */ 310 /* Done. Drop the copied data. */
302 e->xmitstatus_printing = 0; 311 e->xmitstatus_printing = 0;
303 } 312 }
304 bcm43xx_unlock_irqsafe(bcm, flags); 313 spin_unlock_irqrestore(&bcm->irq_lock, flags);
314 mutex_unlock(&bcm->mutex);
305 up(&big_buffer_sem); 315 up(&big_buffer_sem);
306 return res; 316 return res;
307} 317}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c b/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
index ec80692d638a..c3f90c8563d9 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
@@ -51,12 +51,12 @@ static void bcm43xx_led_blink(unsigned long d)
51 struct bcm43xx_private *bcm = led->bcm; 51 struct bcm43xx_private *bcm = led->bcm;
52 unsigned long flags; 52 unsigned long flags;
53 53
54 bcm43xx_lock_irqonly(bcm, flags); 54 spin_lock_irqsave(&bcm->leds_lock, flags);
55 if (led->blink_interval) { 55 if (led->blink_interval) {
56 bcm43xx_led_changestate(led); 56 bcm43xx_led_changestate(led);
57 mod_timer(&led->blink_timer, jiffies + led->blink_interval); 57 mod_timer(&led->blink_timer, jiffies + led->blink_interval);
58 } 58 }
59 bcm43xx_unlock_irqonly(bcm, flags); 59 spin_unlock_irqrestore(&bcm->leds_lock, flags);
60} 60}
61 61
62static void bcm43xx_led_blink_start(struct bcm43xx_led *led, 62static void bcm43xx_led_blink_start(struct bcm43xx_led *led,
@@ -177,7 +177,9 @@ void bcm43xx_leds_update(struct bcm43xx_private *bcm, int activity)
177 int i, turn_on; 177 int i, turn_on;
178 unsigned long interval = 0; 178 unsigned long interval = 0;
179 u16 ledctl; 179 u16 ledctl;
180 unsigned long flags;
180 181
182 spin_lock_irqsave(&bcm->leds_lock, flags);
181 ledctl = bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_CONTROL); 183 ledctl = bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_CONTROL);
182 for (i = 0; i < BCM43xx_NR_LEDS; i++) { 184 for (i = 0; i < BCM43xx_NR_LEDS; i++) {
183 led = &(bcm->leds[i]); 185 led = &(bcm->leds[i]);
@@ -266,6 +268,7 @@ void bcm43xx_leds_update(struct bcm43xx_private *bcm, int activity)
266 ledctl &= ~(1 << i); 268 ledctl &= ~(1 << i);
267 } 269 }
268 bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_CONTROL, ledctl); 270 bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_CONTROL, ledctl);
271 spin_unlock_irqrestore(&bcm->leds_lock, flags);
269} 272}
270 273
271void bcm43xx_leds_switch_all(struct bcm43xx_private *bcm, int on) 274void bcm43xx_leds_switch_all(struct bcm43xx_private *bcm, int on)
@@ -274,7 +277,9 @@ void bcm43xx_leds_switch_all(struct bcm43xx_private *bcm, int on)
274 u16 ledctl; 277 u16 ledctl;
275 int i; 278 int i;
276 int bit_on; 279 int bit_on;
280 unsigned long flags;
277 281
282 spin_lock_irqsave(&bcm->leds_lock, flags);
278 ledctl = bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_CONTROL); 283 ledctl = bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_CONTROL);
279 for (i = 0; i < BCM43xx_NR_LEDS; i++) { 284 for (i = 0; i < BCM43xx_NR_LEDS; i++) {
280 led = &(bcm->leds[i]); 285 led = &(bcm->leds[i]);
@@ -290,4 +295,5 @@ void bcm43xx_leds_switch_all(struct bcm43xx_private *bcm, int on)
290 ledctl &= ~(1 << i); 295 ledctl &= ~(1 << i);
291 } 296 }
292 bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_CONTROL, ledctl); 297 bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_CONTROL, ledctl);
298 spin_unlock_irqrestore(&bcm->leds_lock, flags);
293} 299}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index df317c1e12a8..ab3a0ee9fac8 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -514,13 +514,13 @@ static int bcm43xx_disable_interrupts_sync(struct bcm43xx_private *bcm, u32 *old
514 unsigned long flags; 514 unsigned long flags;
515 u32 old; 515 u32 old;
516 516
517 bcm43xx_lock_irqonly(bcm, flags); 517 spin_lock_irqsave(&bcm->irq_lock, flags);
518 if (unlikely(bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)) { 518 if (unlikely(bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)) {
519 bcm43xx_unlock_irqonly(bcm, flags); 519 spin_unlock_irqrestore(&bcm->irq_lock, flags);
520 return -EBUSY; 520 return -EBUSY;
521 } 521 }
522 old = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL); 522 old = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
523 bcm43xx_unlock_irqonly(bcm, flags); 523 spin_unlock_irqrestore(&bcm->irq_lock, flags);
524 bcm43xx_synchronize_irq(bcm); 524 bcm43xx_synchronize_irq(bcm);
525 525
526 if (oldstate) 526 if (oldstate)
@@ -1720,7 +1720,7 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
1720# define bcmirq_handled(irq) do { /* nothing */ } while (0) 1720# define bcmirq_handled(irq) do { /* nothing */ } while (0)
1721#endif /* CONFIG_BCM43XX_DEBUG*/ 1721#endif /* CONFIG_BCM43XX_DEBUG*/
1722 1722
1723 bcm43xx_lock_irqonly(bcm, flags); 1723 spin_lock_irqsave(&bcm->irq_lock, flags);
1724 reason = bcm->irq_reason; 1724 reason = bcm->irq_reason;
1725 dma_reason[0] = bcm->dma_reason[0]; 1725 dma_reason[0] = bcm->dma_reason[0];
1726 dma_reason[1] = bcm->dma_reason[1]; 1726 dma_reason[1] = bcm->dma_reason[1];
@@ -1746,7 +1746,7 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
1746 dma_reason[2], dma_reason[3]); 1746 dma_reason[2], dma_reason[3]);
1747 bcm43xx_controller_restart(bcm, "DMA error"); 1747 bcm43xx_controller_restart(bcm, "DMA error");
1748 mmiowb(); 1748 mmiowb();
1749 bcm43xx_unlock_irqonly(bcm, flags); 1749 spin_unlock_irqrestore(&bcm->irq_lock, flags);
1750 return; 1750 return;
1751 } 1751 }
1752 if (unlikely((dma_reason[0] & BCM43xx_DMAIRQ_NONFATALMASK) | 1752 if (unlikely((dma_reason[0] & BCM43xx_DMAIRQ_NONFATALMASK) |
@@ -1834,7 +1834,7 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
1834 bcm43xx_leds_update(bcm, activity); 1834 bcm43xx_leds_update(bcm, activity);
1835 bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate); 1835 bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate);
1836 mmiowb(); 1836 mmiowb();
1837 bcm43xx_unlock_irqonly(bcm, flags); 1837 spin_unlock_irqrestore(&bcm->irq_lock, flags);
1838} 1838}
1839 1839
1840static void pio_irq_workaround(struct bcm43xx_private *bcm, 1840static void pio_irq_workaround(struct bcm43xx_private *bcm,
@@ -3182,25 +3182,26 @@ static void bcm43xx_periodic_work_handler(void *d)
3182 /* Periodic work will take a long time, so we want it to 3182 /* Periodic work will take a long time, so we want it to
3183 * be preemtible. 3183 * be preemtible.
3184 */ 3184 */
3185 bcm43xx_lock_irqonly(bcm, flags);
3186 netif_stop_queue(bcm->net_dev); 3185 netif_stop_queue(bcm->net_dev);
3186 spin_lock_irqsave(&bcm->irq_lock, flags);
3187 if (bcm43xx_using_pio(bcm)) 3187 if (bcm43xx_using_pio(bcm))
3188 bcm43xx_pio_freeze_txqueues(bcm); 3188 bcm43xx_pio_freeze_txqueues(bcm);
3189 savedirqs = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL); 3189 savedirqs = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
3190 bcm43xx_unlock_irqonly(bcm, flags); 3190 spin_unlock_irqrestore(&bcm->irq_lock, flags);
3191 bcm43xx_lock_noirq(bcm); 3191 mutex_lock(&bcm->mutex);
3192 bcm43xx_synchronize_irq(bcm); 3192 bcm43xx_synchronize_irq(bcm);
3193 } else { 3193 } else {
3194 /* Periodic work should take short time, so we want low 3194 /* Periodic work should take short time, so we want low
3195 * locking overhead. 3195 * locking overhead.
3196 */ 3196 */
3197 bcm43xx_lock_irqsafe(bcm, flags); 3197 mutex_lock(&bcm->mutex);
3198 spin_lock_irqsave(&bcm->irq_lock, flags);
3198 } 3199 }
3199 3200
3200 do_periodic_work(bcm); 3201 do_periodic_work(bcm);
3201 3202
3202 if (badness > BADNESS_LIMIT) { 3203 if (badness > BADNESS_LIMIT) {
3203 bcm43xx_lock_irqonly(bcm, flags); 3204 spin_lock_irqsave(&bcm->irq_lock, flags);
3204 if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)) { 3205 if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)) {
3205 tasklet_enable(&bcm->isr_tasklet); 3206 tasklet_enable(&bcm->isr_tasklet);
3206 bcm43xx_interrupt_enable(bcm, savedirqs); 3207 bcm43xx_interrupt_enable(bcm, savedirqs);
@@ -3208,13 +3209,10 @@ static void bcm43xx_periodic_work_handler(void *d)
3208 bcm43xx_pio_thaw_txqueues(bcm); 3209 bcm43xx_pio_thaw_txqueues(bcm);
3209 } 3210 }
3210 netif_wake_queue(bcm->net_dev); 3211 netif_wake_queue(bcm->net_dev);
3211 mmiowb();
3212 bcm43xx_unlock_irqonly(bcm, flags);
3213 bcm43xx_unlock_noirq(bcm);
3214 } else {
3215 mmiowb();
3216 bcm43xx_unlock_irqsafe(bcm, flags);
3217 } 3212 }
3213 mmiowb();
3214 spin_unlock_irqrestore(&bcm->irq_lock, flags);
3215 mutex_unlock(&bcm->mutex);
3218} 3216}
3219 3217
3220static void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm) 3218static void bcm43xx_periodic_tasks_delete(struct bcm43xx_private *bcm)
@@ -3276,7 +3274,7 @@ static void bcm43xx_free_board(struct bcm43xx_private *bcm)
3276{ 3274{
3277 int i, err; 3275 int i, err;
3278 3276
3279 bcm43xx_lock_noirq(bcm); 3277 mutex_lock(&bcm->mutex);
3280 bcm43xx_sysfs_unregister(bcm); 3278 bcm43xx_sysfs_unregister(bcm);
3281 bcm43xx_periodic_tasks_delete(bcm); 3279 bcm43xx_periodic_tasks_delete(bcm);
3282 3280
@@ -3297,7 +3295,7 @@ static void bcm43xx_free_board(struct bcm43xx_private *bcm)
3297 bcm43xx_pctl_set_crystal(bcm, 0); 3295 bcm43xx_pctl_set_crystal(bcm, 0);
3298 3296
3299 bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT); 3297 bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT);
3300 bcm43xx_unlock_noirq(bcm); 3298 mutex_unlock(&bcm->mutex);
3301} 3299}
3302 3300
3303static int bcm43xx_init_board(struct bcm43xx_private *bcm) 3301static int bcm43xx_init_board(struct bcm43xx_private *bcm)
@@ -3307,7 +3305,7 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm)
3307 3305
3308 might_sleep(); 3306 might_sleep();
3309 3307
3310 bcm43xx_lock_noirq(bcm); 3308 mutex_lock(&bcm->mutex);
3311 bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZING); 3309 bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZING);
3312 3310
3313 err = bcm43xx_pctl_set_crystal(bcm, 1); 3311 err = bcm43xx_pctl_set_crystal(bcm, 1);
@@ -3389,7 +3387,7 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm)
3389 3387
3390 assert(err == 0); 3388 assert(err == 0);
3391out: 3389out:
3392 bcm43xx_unlock_noirq(bcm); 3390 mutex_unlock(&bcm->mutex);
3393 3391
3394 return err; 3392 return err;
3395 3393
@@ -3647,7 +3645,8 @@ static void bcm43xx_ieee80211_set_chan(struct net_device *net_dev,
3647 struct bcm43xx_radioinfo *radio; 3645 struct bcm43xx_radioinfo *radio;
3648 unsigned long flags; 3646 unsigned long flags;
3649 3647
3650 bcm43xx_lock_irqsafe(bcm, flags); 3648 mutex_lock(&bcm->mutex);
3649 spin_lock_irqsave(&bcm->irq_lock, flags);
3651 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) { 3650 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
3652 bcm43xx_mac_suspend(bcm); 3651 bcm43xx_mac_suspend(bcm);
3653 bcm43xx_radio_selectchannel(bcm, channel, 0); 3652 bcm43xx_radio_selectchannel(bcm, channel, 0);
@@ -3656,7 +3655,8 @@ static void bcm43xx_ieee80211_set_chan(struct net_device *net_dev,
3656 radio = bcm43xx_current_radio(bcm); 3655 radio = bcm43xx_current_radio(bcm);
3657 radio->initial_channel = channel; 3656 radio->initial_channel = channel;
3658 } 3657 }
3659 bcm43xx_unlock_irqsafe(bcm, flags); 3658 spin_unlock_irqrestore(&bcm->irq_lock, flags);
3659 mutex_unlock(&bcm->mutex);
3660} 3660}
3661 3661
3662/* set_security() callback in struct ieee80211_device */ 3662/* set_security() callback in struct ieee80211_device */
@@ -3670,7 +3670,8 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
3670 3670
3671 dprintk(KERN_INFO PFX "set security called"); 3671 dprintk(KERN_INFO PFX "set security called");
3672 3672
3673 bcm43xx_lock_irqsafe(bcm, flags); 3673 mutex_lock(&bcm->mutex);
3674 spin_lock_irqsave(&bcm->irq_lock, flags);
3674 3675
3675 for (keyidx = 0; keyidx<WEP_KEYS; keyidx++) 3676 for (keyidx = 0; keyidx<WEP_KEYS; keyidx++)
3676 if (sec->flags & (1<<keyidx)) { 3677 if (sec->flags & (1<<keyidx)) {
@@ -3739,7 +3740,8 @@ static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
3739 } else 3740 } else
3740 bcm43xx_clear_keys(bcm); 3741 bcm43xx_clear_keys(bcm);
3741 } 3742 }
3742 bcm43xx_unlock_irqsafe(bcm, flags); 3743 spin_unlock_irqrestore(&bcm->irq_lock, flags);
3744 mutex_unlock(&bcm->mutex);
3743} 3745}
3744 3746
3745/* hard_start_xmit() callback in struct ieee80211_device */ 3747/* hard_start_xmit() callback in struct ieee80211_device */
@@ -3751,10 +3753,10 @@ static int bcm43xx_ieee80211_hard_start_xmit(struct ieee80211_txb *txb,
3751 int err = -ENODEV; 3753 int err = -ENODEV;
3752 unsigned long flags; 3754 unsigned long flags;
3753 3755
3754 bcm43xx_lock_irqonly(bcm, flags); 3756 spin_lock_irqsave(&bcm->irq_lock, flags);
3755 if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)) 3757 if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED))
3756 err = bcm43xx_tx(bcm, txb); 3758 err = bcm43xx_tx(bcm, txb);
3757 bcm43xx_unlock_irqonly(bcm, flags); 3759 spin_unlock_irqrestore(&bcm->irq_lock, flags);
3758 3760
3759 return err; 3761 return err;
3760} 3762}
@@ -3769,9 +3771,9 @@ static void bcm43xx_net_tx_timeout(struct net_device *net_dev)
3769 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 3771 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
3770 unsigned long flags; 3772 unsigned long flags;
3771 3773
3772 bcm43xx_lock_irqonly(bcm, flags); 3774 spin_lock_irqsave(&bcm->irq_lock, flags);
3773 bcm43xx_controller_restart(bcm, "TX timeout"); 3775 bcm43xx_controller_restart(bcm, "TX timeout");
3774 bcm43xx_unlock_irqonly(bcm, flags); 3776 spin_unlock_irqrestore(&bcm->irq_lock, flags);
3775} 3777}
3776 3778
3777#ifdef CONFIG_NET_POLL_CONTROLLER 3779#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3822,6 +3824,7 @@ static int bcm43xx_init_private(struct bcm43xx_private *bcm,
3822 bcm->net_dev = net_dev; 3824 bcm->net_dev = net_dev;
3823 bcm->bad_frames_preempt = modparam_bad_frames_preempt; 3825 bcm->bad_frames_preempt = modparam_bad_frames_preempt;
3824 spin_lock_init(&bcm->irq_lock); 3826 spin_lock_init(&bcm->irq_lock);
3827 spin_lock_init(&bcm->leds_lock);
3825 mutex_init(&bcm->mutex); 3828 mutex_init(&bcm->mutex);
3826 tasklet_init(&bcm->isr_tasklet, 3829 tasklet_init(&bcm->isr_tasklet,
3827 (void (*)(unsigned long))bcm43xx_interrupt_tasklet, 3830 (void (*)(unsigned long))bcm43xx_interrupt_tasklet,
@@ -4002,16 +4005,13 @@ static int bcm43xx_suspend(struct pci_dev *pdev, pm_message_t state)
4002{ 4005{
4003 struct net_device *net_dev = pci_get_drvdata(pdev); 4006 struct net_device *net_dev = pci_get_drvdata(pdev);
4004 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 4007 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
4005 unsigned long flags;
4006 int try_to_shutdown = 0, err; 4008 int try_to_shutdown = 0, err;
4007 4009
4008 dprintk(KERN_INFO PFX "Suspending...\n"); 4010 dprintk(KERN_INFO PFX "Suspending...\n");
4009 4011
4010 bcm43xx_lock_irqsafe(bcm, flags);
4011 bcm->was_initialized = (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED); 4012 bcm->was_initialized = (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
4012 if (bcm->was_initialized) 4013 if (bcm->was_initialized)
4013 try_to_shutdown = 1; 4014 try_to_shutdown = 1;
4014 bcm43xx_unlock_irqsafe(bcm, flags);
4015 4015
4016 netif_device_detach(net_dev); 4016 netif_device_detach(net_dev);
4017 if (try_to_shutdown) { 4017 if (try_to_shutdown) {
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
index f8200deecc8a..eafd0f662686 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
@@ -81,6 +81,16 @@ static const s8 bcm43xx_tssi2dbm_g_table[] = {
81static void bcm43xx_phy_initg(struct bcm43xx_private *bcm); 81static void bcm43xx_phy_initg(struct bcm43xx_private *bcm);
82 82
83 83
84static inline
85void bcm43xx_voluntary_preempt(void)
86{
87 assert(!in_atomic() && !in_irq() &&
88 !in_interrupt() && !irqs_disabled());
89#ifndef CONFIG_PREEMPT
90 cond_resched();
91#endif /* CONFIG_PREEMPT */
92}
93
84void bcm43xx_raw_phy_lock(struct bcm43xx_private *bcm) 94void bcm43xx_raw_phy_lock(struct bcm43xx_private *bcm)
85{ 95{
86 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm); 96 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
@@ -133,22 +143,14 @@ void bcm43xx_phy_write(struct bcm43xx_private *bcm, u16 offset, u16 val)
133void bcm43xx_phy_calibrate(struct bcm43xx_private *bcm) 143void bcm43xx_phy_calibrate(struct bcm43xx_private *bcm)
134{ 144{
135 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm); 145 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
136 unsigned long flags;
137 146
138 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); /* Dummy read. */ 147 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); /* Dummy read. */
139 if (phy->calibrated) 148 if (phy->calibrated)
140 return; 149 return;
141 if (phy->type == BCM43xx_PHYTYPE_G && phy->rev == 1) { 150 if (phy->type == BCM43xx_PHYTYPE_G && phy->rev == 1) {
142 /* We do not want to be preempted while calibrating
143 * the hardware.
144 */
145 local_irq_save(flags);
146
147 bcm43xx_wireless_core_reset(bcm, 0); 151 bcm43xx_wireless_core_reset(bcm, 0);
148 bcm43xx_phy_initg(bcm); 152 bcm43xx_phy_initg(bcm);
149 bcm43xx_wireless_core_reset(bcm, 1); 153 bcm43xx_wireless_core_reset(bcm, 1);
150
151 local_irq_restore(flags);
152 } 154 }
153 phy->calibrated = 1; 155 phy->calibrated = 1;
154} 156}
@@ -1299,7 +1301,9 @@ static u16 bcm43xx_phy_lo_b_r15_loop(struct bcm43xx_private *bcm)
1299{ 1301{
1300 int i; 1302 int i;
1301 u16 ret = 0; 1303 u16 ret = 0;
1304 unsigned long flags;
1302 1305
1306 local_irq_save(flags);
1303 for (i = 0; i < 10; i++){ 1307 for (i = 0; i < 10; i++){
1304 bcm43xx_phy_write(bcm, 0x0015, 0xAFA0); 1308 bcm43xx_phy_write(bcm, 0x0015, 0xAFA0);
1305 udelay(1); 1309 udelay(1);
@@ -1309,6 +1313,8 @@ static u16 bcm43xx_phy_lo_b_r15_loop(struct bcm43xx_private *bcm)
1309 udelay(40); 1313 udelay(40);
1310 ret += bcm43xx_phy_read(bcm, 0x002C); 1314 ret += bcm43xx_phy_read(bcm, 0x002C);
1311 } 1315 }
1316 local_irq_restore(flags);
1317 bcm43xx_voluntary_preempt();
1312 1318
1313 return ret; 1319 return ret;
1314} 1320}
@@ -1435,6 +1441,7 @@ u16 bcm43xx_phy_lo_g_deviation_subval(struct bcm43xx_private *bcm, u16 control)
1435 } 1441 }
1436 ret = bcm43xx_phy_read(bcm, 0x002D); 1442 ret = bcm43xx_phy_read(bcm, 0x002D);
1437 local_irq_restore(flags); 1443 local_irq_restore(flags);
1444 bcm43xx_voluntary_preempt();
1438 1445
1439 return ret; 1446 return ret;
1440} 1447}
@@ -1760,6 +1767,7 @@ void bcm43xx_phy_lo_g_measure(struct bcm43xx_private *bcm)
1760 bcm43xx_radio_write16(bcm, 0x43, i); 1767 bcm43xx_radio_write16(bcm, 0x43, i);
1761 bcm43xx_radio_write16(bcm, 0x52, radio->txctl2); 1768 bcm43xx_radio_write16(bcm, 0x52, radio->txctl2);
1762 udelay(10); 1769 udelay(10);
1770 bcm43xx_voluntary_preempt();
1763 1771
1764 bcm43xx_phy_set_baseband_attenuation(bcm, j * 2); 1772 bcm43xx_phy_set_baseband_attenuation(bcm, j * 2);
1765 1773
@@ -1803,6 +1811,7 @@ void bcm43xx_phy_lo_g_measure(struct bcm43xx_private *bcm)
1803 radio->txctl2 1811 radio->txctl2
1804 | (3/*txctl1*/ << 4));//FIXME: shouldn't txctl1 be zero here and 3 in the loop above? 1812 | (3/*txctl1*/ << 4));//FIXME: shouldn't txctl1 be zero here and 3 in the loop above?
1805 udelay(10); 1813 udelay(10);
1814 bcm43xx_voluntary_preempt();
1806 1815
1807 bcm43xx_phy_set_baseband_attenuation(bcm, j * 2); 1816 bcm43xx_phy_set_baseband_attenuation(bcm, j * 2);
1808 1817
@@ -1824,6 +1833,7 @@ void bcm43xx_phy_lo_g_measure(struct bcm43xx_private *bcm)
1824 bcm43xx_phy_write(bcm, 0x0812, (r27 << 8) | 0xA2); 1833 bcm43xx_phy_write(bcm, 0x0812, (r27 << 8) | 0xA2);
1825 udelay(2); 1834 udelay(2);
1826 bcm43xx_phy_write(bcm, 0x0812, (r27 << 8) | 0xA3); 1835 bcm43xx_phy_write(bcm, 0x0812, (r27 << 8) | 0xA3);
1836 bcm43xx_voluntary_preempt();
1827 } else 1837 } else
1828 bcm43xx_phy_write(bcm, 0x0015, r27 | 0xEFA0); 1838 bcm43xx_phy_write(bcm, 0x0015, r27 | 0xEFA0);
1829 bcm43xx_phy_lo_adjust(bcm, is_initializing); 1839 bcm43xx_phy_lo_adjust(bcm, is_initializing);
@@ -2188,12 +2198,6 @@ int bcm43xx_phy_init(struct bcm43xx_private *bcm)
2188{ 2198{
2189 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm); 2199 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
2190 int err = -ENODEV; 2200 int err = -ENODEV;
2191 unsigned long flags;
2192
2193 /* We do not want to be preempted while calibrating
2194 * the hardware.
2195 */
2196 local_irq_save(flags);
2197 2201
2198 switch (phy->type) { 2202 switch (phy->type) {
2199 case BCM43xx_PHYTYPE_A: 2203 case BCM43xx_PHYTYPE_A:
@@ -2227,7 +2231,6 @@ int bcm43xx_phy_init(struct bcm43xx_private *bcm)
2227 err = 0; 2231 err = 0;
2228 break; 2232 break;
2229 } 2233 }
2230 local_irq_restore(flags);
2231 if (err) 2234 if (err)
2232 printk(KERN_WARNING PFX "Unknown PHYTYPE found!\n"); 2235 printk(KERN_WARNING PFX "Unknown PHYTYPE found!\n");
2233 2236
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_pio.c b/drivers/net/wireless/bcm43xx/bcm43xx_pio.c
index 574085c46152..c60c1743ea06 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_pio.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_pio.c
@@ -262,7 +262,7 @@ static void tx_tasklet(unsigned long d)
262 int err; 262 int err;
263 u16 txctl; 263 u16 txctl;
264 264
265 bcm43xx_lock_irqonly(bcm, flags); 265 spin_lock_irqsave(&bcm->irq_lock, flags);
266 266
267 if (queue->tx_frozen) 267 if (queue->tx_frozen)
268 goto out_unlock; 268 goto out_unlock;
@@ -300,7 +300,7 @@ static void tx_tasklet(unsigned long d)
300 continue; 300 continue;
301 } 301 }
302out_unlock: 302out_unlock:
303 bcm43xx_unlock_irqonly(bcm, flags); 303 spin_unlock_irqrestore(&bcm->irq_lock, flags);
304} 304}
305 305
306static void setup_txqueues(struct bcm43xx_pioqueue *queue) 306static void setup_txqueues(struct bcm43xx_pioqueue *queue)
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
index 6a23bdc75412..cc1ff3c6f140 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
@@ -120,12 +120,14 @@ static ssize_t bcm43xx_attr_sprom_show(struct device *dev,
120 GFP_KERNEL); 120 GFP_KERNEL);
121 if (!sprom) 121 if (!sprom)
122 return -ENOMEM; 122 return -ENOMEM;
123 bcm43xx_lock_irqsafe(bcm, flags); 123 mutex_lock(&bcm->mutex);
124 spin_lock_irqsave(&bcm->irq_lock, flags);
124 err = bcm43xx_sprom_read(bcm, sprom); 125 err = bcm43xx_sprom_read(bcm, sprom);
125 if (!err) 126 if (!err)
126 err = sprom2hex(sprom, buf, PAGE_SIZE); 127 err = sprom2hex(sprom, buf, PAGE_SIZE);
127 mmiowb(); 128 mmiowb();
128 bcm43xx_unlock_irqsafe(bcm, flags); 129 spin_unlock_irqrestore(&bcm->irq_lock, flags);
130 mutex_unlock(&bcm->mutex);
129 kfree(sprom); 131 kfree(sprom);
130 132
131 return err; 133 return err;
@@ -150,10 +152,14 @@ static ssize_t bcm43xx_attr_sprom_store(struct device *dev,
150 err = hex2sprom(sprom, buf, count); 152 err = hex2sprom(sprom, buf, count);
151 if (err) 153 if (err)
152 goto out_kfree; 154 goto out_kfree;
153 bcm43xx_lock_irqsafe(bcm, flags); 155 mutex_lock(&bcm->mutex);
156 spin_lock_irqsave(&bcm->irq_lock, flags);
157 spin_lock(&bcm->leds_lock);
154 err = bcm43xx_sprom_write(bcm, sprom); 158 err = bcm43xx_sprom_write(bcm, sprom);
155 mmiowb(); 159 mmiowb();
156 bcm43xx_unlock_irqsafe(bcm, flags); 160 spin_unlock(&bcm->leds_lock);
161 spin_unlock_irqrestore(&bcm->irq_lock, flags);
162 mutex_unlock(&bcm->mutex);
157out_kfree: 163out_kfree:
158 kfree(sprom); 164 kfree(sprom);
159 165
@@ -176,7 +182,7 @@ static ssize_t bcm43xx_attr_interfmode_show(struct device *dev,
176 if (!capable(CAP_NET_ADMIN)) 182 if (!capable(CAP_NET_ADMIN))
177 return -EPERM; 183 return -EPERM;
178 184
179 bcm43xx_lock_noirq(bcm); 185 mutex_lock(&bcm->mutex);
180 186
181 switch (bcm43xx_current_radio(bcm)->interfmode) { 187 switch (bcm43xx_current_radio(bcm)->interfmode) {
182 case BCM43xx_RADIO_INTERFMODE_NONE: 188 case BCM43xx_RADIO_INTERFMODE_NONE:
@@ -193,7 +199,7 @@ static ssize_t bcm43xx_attr_interfmode_show(struct device *dev,
193 } 199 }
194 err = 0; 200 err = 0;
195 201
196 bcm43xx_unlock_noirq(bcm); 202 mutex_unlock(&bcm->mutex);
197 203
198 return err ? err : count; 204 return err ? err : count;
199 205
@@ -229,7 +235,8 @@ static ssize_t bcm43xx_attr_interfmode_store(struct device *dev,
229 return -EINVAL; 235 return -EINVAL;
230 } 236 }
231 237
232 bcm43xx_lock_irqsafe(bcm, flags); 238 mutex_lock(&bcm->mutex);
239 spin_lock_irqsave(&bcm->irq_lock, flags);
233 240
234 err = bcm43xx_radio_set_interference_mitigation(bcm, mode); 241 err = bcm43xx_radio_set_interference_mitigation(bcm, mode);
235 if (err) { 242 if (err) {
@@ -237,7 +244,8 @@ static ssize_t bcm43xx_attr_interfmode_store(struct device *dev,
237 "supported by device\n"); 244 "supported by device\n");
238 } 245 }
239 mmiowb(); 246 mmiowb();
240 bcm43xx_unlock_irqsafe(bcm, flags); 247 spin_unlock_irqrestore(&bcm->irq_lock, flags);
248 mutex_unlock(&bcm->mutex);
241 249
242 return err ? err : count; 250 return err ? err : count;
243} 251}
@@ -257,7 +265,7 @@ static ssize_t bcm43xx_attr_preamble_show(struct device *dev,
257 if (!capable(CAP_NET_ADMIN)) 265 if (!capable(CAP_NET_ADMIN))
258 return -EPERM; 266 return -EPERM;
259 267
260 bcm43xx_lock_noirq(bcm); 268 mutex_lock(&bcm->mutex);
261 269
262 if (bcm->short_preamble) 270 if (bcm->short_preamble)
263 count = snprintf(buf, PAGE_SIZE, "1 (Short Preamble enabled)\n"); 271 count = snprintf(buf, PAGE_SIZE, "1 (Short Preamble enabled)\n");
@@ -265,7 +273,7 @@ static ssize_t bcm43xx_attr_preamble_show(struct device *dev,
265 count = snprintf(buf, PAGE_SIZE, "0 (Short Preamble disabled)\n"); 273 count = snprintf(buf, PAGE_SIZE, "0 (Short Preamble disabled)\n");
266 274
267 err = 0; 275 err = 0;
268 bcm43xx_unlock_noirq(bcm); 276 mutex_unlock(&bcm->mutex);
269 277
270 return err ? err : count; 278 return err ? err : count;
271} 279}
@@ -285,12 +293,14 @@ static ssize_t bcm43xx_attr_preamble_store(struct device *dev,
285 value = get_boolean(buf, count); 293 value = get_boolean(buf, count);
286 if (value < 0) 294 if (value < 0)
287 return value; 295 return value;
288 bcm43xx_lock_irqsafe(bcm, flags); 296 mutex_lock(&bcm->mutex);
297 spin_lock_irqsave(&bcm->irq_lock, flags);
289 298
290 bcm->short_preamble = !!value; 299 bcm->short_preamble = !!value;
291 300
292 err = 0; 301 err = 0;
293 bcm43xx_unlock_irqsafe(bcm, flags); 302 spin_unlock_irqrestore(&bcm->irq_lock, flags);
303 mutex_unlock(&bcm->mutex);
294 304
295 return err ? err : count; 305 return err ? err : count;
296} 306}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
index 5c36e29efff7..8ffd760dc830 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
@@ -47,6 +47,9 @@
47#define BCM43xx_WX_VERSION 18 47#define BCM43xx_WX_VERSION 18
48 48
49#define MAX_WX_STRING 80 49#define MAX_WX_STRING 80
50/* FIXME: the next line is a guess as to what the maximum value of RX power
51 (in dBm) might be */
52#define RX_POWER_MAX -10
50 53
51 54
52static int bcm43xx_wx_get_name(struct net_device *net_dev, 55static int bcm43xx_wx_get_name(struct net_device *net_dev,
@@ -56,12 +59,11 @@ static int bcm43xx_wx_get_name(struct net_device *net_dev,
56{ 59{
57 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 60 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
58 int i; 61 int i;
59 unsigned long flags;
60 struct bcm43xx_phyinfo *phy; 62 struct bcm43xx_phyinfo *phy;
61 char suffix[7] = { 0 }; 63 char suffix[7] = { 0 };
62 int have_a = 0, have_b = 0, have_g = 0; 64 int have_a = 0, have_b = 0, have_g = 0;
63 65
64 bcm43xx_lock_irqsafe(bcm, flags); 66 mutex_lock(&bcm->mutex);
65 for (i = 0; i < bcm->nr_80211_available; i++) { 67 for (i = 0; i < bcm->nr_80211_available; i++) {
66 phy = &(bcm->core_80211_ext[i].phy); 68 phy = &(bcm->core_80211_ext[i].phy);
67 switch (phy->type) { 69 switch (phy->type) {
@@ -77,7 +79,7 @@ static int bcm43xx_wx_get_name(struct net_device *net_dev,
77 assert(0); 79 assert(0);
78 } 80 }
79 } 81 }
80 bcm43xx_unlock_irqsafe(bcm, flags); 82 mutex_unlock(&bcm->mutex);
81 83
82 i = 0; 84 i = 0;
83 if (have_a) { 85 if (have_a) {
@@ -111,7 +113,9 @@ static int bcm43xx_wx_set_channelfreq(struct net_device *net_dev,
111 int freq; 113 int freq;
112 int err = -EINVAL; 114 int err = -EINVAL;
113 115
114 bcm43xx_lock_irqsafe(bcm, flags); 116 mutex_lock(&bcm->mutex);
117 spin_lock_irqsave(&bcm->irq_lock, flags);
118
115 if ((data->freq.m >= 0) && (data->freq.m <= 1000)) { 119 if ((data->freq.m >= 0) && (data->freq.m <= 1000)) {
116 channel = data->freq.m; 120 channel = data->freq.m;
117 freq = bcm43xx_channel_to_freq(bcm, channel); 121 freq = bcm43xx_channel_to_freq(bcm, channel);
@@ -131,7 +135,8 @@ static int bcm43xx_wx_set_channelfreq(struct net_device *net_dev,
131 err = 0; 135 err = 0;
132 } 136 }
133out_unlock: 137out_unlock:
134 bcm43xx_unlock_irqsafe(bcm, flags); 138 spin_unlock_irqrestore(&bcm->irq_lock, flags);
139 mutex_unlock(&bcm->mutex);
135 140
136 return err; 141 return err;
137} 142}
@@ -143,11 +148,10 @@ static int bcm43xx_wx_get_channelfreq(struct net_device *net_dev,
143{ 148{
144 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 149 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
145 struct bcm43xx_radioinfo *radio; 150 struct bcm43xx_radioinfo *radio;
146 unsigned long flags;
147 int err = -ENODEV; 151 int err = -ENODEV;
148 u16 channel; 152 u16 channel;
149 153
150 bcm43xx_lock_irqsafe(bcm, flags); 154 mutex_lock(&bcm->mutex);
151 radio = bcm43xx_current_radio(bcm); 155 radio = bcm43xx_current_radio(bcm);
152 channel = radio->channel; 156 channel = radio->channel;
153 if (channel == 0xFF) { 157 if (channel == 0xFF) {
@@ -162,7 +166,7 @@ static int bcm43xx_wx_get_channelfreq(struct net_device *net_dev,
162 166
163 err = 0; 167 err = 0;
164out_unlock: 168out_unlock:
165 bcm43xx_unlock_irqsafe(bcm, flags); 169 mutex_unlock(&bcm->mutex);
166 170
167 return err; 171 return err;
168} 172}
@@ -180,13 +184,15 @@ static int bcm43xx_wx_set_mode(struct net_device *net_dev,
180 if (mode == IW_MODE_AUTO) 184 if (mode == IW_MODE_AUTO)
181 mode = BCM43xx_INITIAL_IWMODE; 185 mode = BCM43xx_INITIAL_IWMODE;
182 186
183 bcm43xx_lock_irqsafe(bcm, flags); 187 mutex_lock(&bcm->mutex);
188 spin_lock_irqsave(&bcm->irq_lock, flags);
184 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) { 189 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
185 if (bcm->ieee->iw_mode != mode) 190 if (bcm->ieee->iw_mode != mode)
186 bcm43xx_set_iwmode(bcm, mode); 191 bcm43xx_set_iwmode(bcm, mode);
187 } else 192 } else
188 bcm->ieee->iw_mode = mode; 193 bcm->ieee->iw_mode = mode;
189 bcm43xx_unlock_irqsafe(bcm, flags); 194 spin_unlock_irqrestore(&bcm->irq_lock, flags);
195 mutex_unlock(&bcm->mutex);
190 196
191 return 0; 197 return 0;
192} 198}
@@ -197,11 +203,10 @@ static int bcm43xx_wx_get_mode(struct net_device *net_dev,
197 char *extra) 203 char *extra)
198{ 204{
199 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 205 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
200 unsigned long flags;
201 206
202 bcm43xx_lock_irqsafe(bcm, flags); 207 mutex_lock(&bcm->mutex);
203 data->mode = bcm->ieee->iw_mode; 208 data->mode = bcm->ieee->iw_mode;
204 bcm43xx_unlock_irqsafe(bcm, flags); 209 mutex_unlock(&bcm->mutex);
205 210
206 return 0; 211 return 0;
207} 212}
@@ -214,7 +219,6 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev,
214 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 219 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
215 struct iw_range *range = (struct iw_range *)extra; 220 struct iw_range *range = (struct iw_range *)extra;
216 const struct ieee80211_geo *geo; 221 const struct ieee80211_geo *geo;
217 unsigned long flags;
218 int i, j; 222 int i, j;
219 struct bcm43xx_phyinfo *phy; 223 struct bcm43xx_phyinfo *phy;
220 224
@@ -227,14 +231,14 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev,
227 231
228 range->max_qual.qual = 100; 232 range->max_qual.qual = 100;
229 /* TODO: Real max RSSI */ 233 /* TODO: Real max RSSI */
230 range->max_qual.level = 3; 234 range->max_qual.level = 0;
231 range->max_qual.noise = 100; 235 range->max_qual.noise = 0;
232 range->max_qual.updated = 7; 236 range->max_qual.updated = IW_QUAL_ALL_UPDATED;
233 237
234 range->avg_qual.qual = 70; 238 range->avg_qual.qual = 50;
235 range->avg_qual.level = 2; 239 range->avg_qual.level = 0;
236 range->avg_qual.noise = 40; 240 range->avg_qual.noise = 0;
237 range->avg_qual.updated = 7; 241 range->avg_qual.updated = IW_QUAL_ALL_UPDATED;
238 242
239 range->min_rts = BCM43xx_MIN_RTS_THRESHOLD; 243 range->min_rts = BCM43xx_MIN_RTS_THRESHOLD;
240 range->max_rts = BCM43xx_MAX_RTS_THRESHOLD; 244 range->max_rts = BCM43xx_MAX_RTS_THRESHOLD;
@@ -254,7 +258,7 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev,
254 IW_ENC_CAPA_CIPHER_TKIP | 258 IW_ENC_CAPA_CIPHER_TKIP |
255 IW_ENC_CAPA_CIPHER_CCMP; 259 IW_ENC_CAPA_CIPHER_CCMP;
256 260
257 bcm43xx_lock_irqsafe(bcm, flags); 261 mutex_lock(&bcm->mutex);
258 phy = bcm43xx_current_phy(bcm); 262 phy = bcm43xx_current_phy(bcm);
259 263
260 range->num_bitrates = 0; 264 range->num_bitrates = 0;
@@ -301,7 +305,7 @@ static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev,
301 } 305 }
302 range->num_frequency = j; 306 range->num_frequency = j;
303 307
304 bcm43xx_unlock_irqsafe(bcm, flags); 308 mutex_unlock(&bcm->mutex);
305 309
306 return 0; 310 return 0;
307} 311}
@@ -314,11 +318,11 @@ static int bcm43xx_wx_set_nick(struct net_device *net_dev,
314 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 318 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
315 size_t len; 319 size_t len;
316 320
317 bcm43xx_lock_noirq(bcm); 321 mutex_lock(&bcm->mutex);
318 len = min((size_t)data->data.length, (size_t)IW_ESSID_MAX_SIZE); 322 len = min((size_t)data->data.length, (size_t)IW_ESSID_MAX_SIZE);
319 memcpy(bcm->nick, extra, len); 323 memcpy(bcm->nick, extra, len);
320 bcm->nick[len] = '\0'; 324 bcm->nick[len] = '\0';
321 bcm43xx_unlock_noirq(bcm); 325 mutex_unlock(&bcm->mutex);
322 326
323 return 0; 327 return 0;
324} 328}
@@ -331,12 +335,12 @@ static int bcm43xx_wx_get_nick(struct net_device *net_dev,
331 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 335 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
332 size_t len; 336 size_t len;
333 337
334 bcm43xx_lock_noirq(bcm); 338 mutex_lock(&bcm->mutex);
335 len = strlen(bcm->nick) + 1; 339 len = strlen(bcm->nick) + 1;
336 memcpy(extra, bcm->nick, len); 340 memcpy(extra, bcm->nick, len);
337 data->data.length = (__u16)len; 341 data->data.length = (__u16)len;
338 data->data.flags = 1; 342 data->data.flags = 1;
339 bcm43xx_unlock_noirq(bcm); 343 mutex_unlock(&bcm->mutex);
340 344
341 return 0; 345 return 0;
342} 346}
@@ -350,7 +354,8 @@ static int bcm43xx_wx_set_rts(struct net_device *net_dev,
350 unsigned long flags; 354 unsigned long flags;
351 int err = -EINVAL; 355 int err = -EINVAL;
352 356
353 bcm43xx_lock_irqsafe(bcm, flags); 357 mutex_lock(&bcm->mutex);
358 spin_lock_irqsave(&bcm->irq_lock, flags);
354 if (data->rts.disabled) { 359 if (data->rts.disabled) {
355 bcm->rts_threshold = BCM43xx_MAX_RTS_THRESHOLD; 360 bcm->rts_threshold = BCM43xx_MAX_RTS_THRESHOLD;
356 err = 0; 361 err = 0;
@@ -361,7 +366,8 @@ static int bcm43xx_wx_set_rts(struct net_device *net_dev,
361 err = 0; 366 err = 0;
362 } 367 }
363 } 368 }
364 bcm43xx_unlock_irqsafe(bcm, flags); 369 spin_unlock_irqrestore(&bcm->irq_lock, flags);
370 mutex_unlock(&bcm->mutex);
365 371
366 return err; 372 return err;
367} 373}
@@ -372,13 +378,12 @@ static int bcm43xx_wx_get_rts(struct net_device *net_dev,
372 char *extra) 378 char *extra)
373{ 379{
374 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 380 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
375 unsigned long flags;
376 381
377 bcm43xx_lock_irqsafe(bcm, flags); 382 mutex_lock(&bcm->mutex);
378 data->rts.value = bcm->rts_threshold; 383 data->rts.value = bcm->rts_threshold;
379 data->rts.fixed = 0; 384 data->rts.fixed = 0;
380 data->rts.disabled = (bcm->rts_threshold == BCM43xx_MAX_RTS_THRESHOLD); 385 data->rts.disabled = (bcm->rts_threshold == BCM43xx_MAX_RTS_THRESHOLD);
381 bcm43xx_unlock_irqsafe(bcm, flags); 386 mutex_unlock(&bcm->mutex);
382 387
383 return 0; 388 return 0;
384} 389}
@@ -392,7 +397,8 @@ static int bcm43xx_wx_set_frag(struct net_device *net_dev,
392 unsigned long flags; 397 unsigned long flags;
393 int err = -EINVAL; 398 int err = -EINVAL;
394 399
395 bcm43xx_lock_irqsafe(bcm, flags); 400 mutex_lock(&bcm->mutex);
401 spin_lock_irqsave(&bcm->irq_lock, flags);
396 if (data->frag.disabled) { 402 if (data->frag.disabled) {
397 bcm->ieee->fts = MAX_FRAG_THRESHOLD; 403 bcm->ieee->fts = MAX_FRAG_THRESHOLD;
398 err = 0; 404 err = 0;
@@ -403,7 +409,8 @@ static int bcm43xx_wx_set_frag(struct net_device *net_dev,
403 err = 0; 409 err = 0;
404 } 410 }
405 } 411 }
406 bcm43xx_unlock_irqsafe(bcm, flags); 412 spin_unlock_irqrestore(&bcm->irq_lock, flags);
413 mutex_unlock(&bcm->mutex);
407 414
408 return err; 415 return err;
409} 416}
@@ -414,13 +421,12 @@ static int bcm43xx_wx_get_frag(struct net_device *net_dev,
414 char *extra) 421 char *extra)
415{ 422{
416 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 423 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
417 unsigned long flags;
418 424
419 bcm43xx_lock_irqsafe(bcm, flags); 425 mutex_lock(&bcm->mutex);
420 data->frag.value = bcm->ieee->fts; 426 data->frag.value = bcm->ieee->fts;
421 data->frag.fixed = 0; 427 data->frag.fixed = 0;
422 data->frag.disabled = (bcm->ieee->fts == MAX_FRAG_THRESHOLD); 428 data->frag.disabled = (bcm->ieee->fts == MAX_FRAG_THRESHOLD);
423 bcm43xx_unlock_irqsafe(bcm, flags); 429 mutex_unlock(&bcm->mutex);
424 430
425 return 0; 431 return 0;
426} 432}
@@ -442,7 +448,8 @@ static int bcm43xx_wx_set_xmitpower(struct net_device *net_dev,
442 return -EOPNOTSUPP; 448 return -EOPNOTSUPP;
443 } 449 }
444 450
445 bcm43xx_lock_irqsafe(bcm, flags); 451 mutex_lock(&bcm->mutex);
452 spin_lock_irqsave(&bcm->irq_lock, flags);
446 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) 453 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
447 goto out_unlock; 454 goto out_unlock;
448 radio = bcm43xx_current_radio(bcm); 455 radio = bcm43xx_current_radio(bcm);
@@ -466,7 +473,8 @@ static int bcm43xx_wx_set_xmitpower(struct net_device *net_dev,
466 err = 0; 473 err = 0;
467 474
468out_unlock: 475out_unlock:
469 bcm43xx_unlock_irqsafe(bcm, flags); 476 spin_unlock_irqrestore(&bcm->irq_lock, flags);
477 mutex_unlock(&bcm->mutex);
470 478
471 return err; 479 return err;
472} 480}
@@ -478,10 +486,9 @@ static int bcm43xx_wx_get_xmitpower(struct net_device *net_dev,
478{ 486{
479 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 487 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
480 struct bcm43xx_radioinfo *radio; 488 struct bcm43xx_radioinfo *radio;
481 unsigned long flags;
482 int err = -ENODEV; 489 int err = -ENODEV;
483 490
484 bcm43xx_lock_irqsafe(bcm, flags); 491 mutex_lock(&bcm->mutex);
485 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) 492 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
486 goto out_unlock; 493 goto out_unlock;
487 radio = bcm43xx_current_radio(bcm); 494 radio = bcm43xx_current_radio(bcm);
@@ -493,7 +500,7 @@ static int bcm43xx_wx_get_xmitpower(struct net_device *net_dev,
493 500
494 err = 0; 501 err = 0;
495out_unlock: 502out_unlock:
496 bcm43xx_unlock_irqsafe(bcm, flags); 503 mutex_unlock(&bcm->mutex);
497 504
498 return err; 505 return err;
499} 506}
@@ -580,7 +587,8 @@ static int bcm43xx_wx_set_interfmode(struct net_device *net_dev,
580 return -EINVAL; 587 return -EINVAL;
581 } 588 }
582 589
583 bcm43xx_lock_irqsafe(bcm, flags); 590 mutex_lock(&bcm->mutex);
591 spin_lock_irqsave(&bcm->irq_lock, flags);
584 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) { 592 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
585 err = bcm43xx_radio_set_interference_mitigation(bcm, mode); 593 err = bcm43xx_radio_set_interference_mitigation(bcm, mode);
586 if (err) { 594 if (err) {
@@ -595,7 +603,8 @@ static int bcm43xx_wx_set_interfmode(struct net_device *net_dev,
595 } else 603 } else
596 bcm43xx_current_radio(bcm)->interfmode = mode; 604 bcm43xx_current_radio(bcm)->interfmode = mode;
597 } 605 }
598 bcm43xx_unlock_irqsafe(bcm, flags); 606 spin_unlock_irqrestore(&bcm->irq_lock, flags);
607 mutex_unlock(&bcm->mutex);
599 608
600 return err; 609 return err;
601} 610}
@@ -606,12 +615,11 @@ static int bcm43xx_wx_get_interfmode(struct net_device *net_dev,
606 char *extra) 615 char *extra)
607{ 616{
608 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 617 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
609 unsigned long flags;
610 int mode; 618 int mode;
611 619
612 bcm43xx_lock_irqsafe(bcm, flags); 620 mutex_lock(&bcm->mutex);
613 mode = bcm43xx_current_radio(bcm)->interfmode; 621 mode = bcm43xx_current_radio(bcm)->interfmode;
614 bcm43xx_unlock_irqsafe(bcm, flags); 622 mutex_unlock(&bcm->mutex);
615 623
616 switch (mode) { 624 switch (mode) {
617 case BCM43xx_RADIO_INTERFMODE_NONE: 625 case BCM43xx_RADIO_INTERFMODE_NONE:
@@ -641,9 +649,11 @@ static int bcm43xx_wx_set_shortpreamble(struct net_device *net_dev,
641 int on; 649 int on;
642 650
643 on = *((int *)extra); 651 on = *((int *)extra);
644 bcm43xx_lock_irqsafe(bcm, flags); 652 mutex_lock(&bcm->mutex);
653 spin_lock_irqsave(&bcm->irq_lock, flags);
645 bcm->short_preamble = !!on; 654 bcm->short_preamble = !!on;
646 bcm43xx_unlock_irqsafe(bcm, flags); 655 spin_unlock_irqrestore(&bcm->irq_lock, flags);
656 mutex_unlock(&bcm->mutex);
647 657
648 return 0; 658 return 0;
649} 659}
@@ -654,12 +664,11 @@ static int bcm43xx_wx_get_shortpreamble(struct net_device *net_dev,
654 char *extra) 664 char *extra)
655{ 665{
656 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 666 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
657 unsigned long flags;
658 int on; 667 int on;
659 668
660 bcm43xx_lock_irqsafe(bcm, flags); 669 mutex_lock(&bcm->mutex);
661 on = bcm->short_preamble; 670 on = bcm->short_preamble;
662 bcm43xx_unlock_irqsafe(bcm, flags); 671 mutex_unlock(&bcm->mutex);
663 672
664 if (on) 673 if (on)
665 strncpy(extra, "1 (Short Preamble enabled)", MAX_WX_STRING); 674 strncpy(extra, "1 (Short Preamble enabled)", MAX_WX_STRING);
@@ -681,11 +690,13 @@ static int bcm43xx_wx_set_swencryption(struct net_device *net_dev,
681 690
682 on = *((int *)extra); 691 on = *((int *)extra);
683 692
684 bcm43xx_lock_irqsafe(bcm, flags); 693 mutex_lock(&bcm->mutex);
694 spin_lock_irqsave(&bcm->irq_lock, flags);
685 bcm->ieee->host_encrypt = !!on; 695 bcm->ieee->host_encrypt = !!on;
686 bcm->ieee->host_decrypt = !!on; 696 bcm->ieee->host_decrypt = !!on;
687 bcm->ieee->host_build_iv = !on; 697 bcm->ieee->host_build_iv = !on;
688 bcm43xx_unlock_irqsafe(bcm, flags); 698 spin_unlock_irqrestore(&bcm->irq_lock, flags);
699 mutex_unlock(&bcm->mutex);
689 700
690 return 0; 701 return 0;
691} 702}
@@ -696,12 +707,11 @@ static int bcm43xx_wx_get_swencryption(struct net_device *net_dev,
696 char *extra) 707 char *extra)
697{ 708{
698 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 709 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
699 unsigned long flags;
700 int on; 710 int on;
701 711
702 bcm43xx_lock_irqsafe(bcm, flags); 712 mutex_lock(&bcm->mutex);
703 on = bcm->ieee->host_encrypt; 713 on = bcm->ieee->host_encrypt;
704 bcm43xx_unlock_irqsafe(bcm, flags); 714 mutex_unlock(&bcm->mutex);
705 715
706 if (on) 716 if (on)
707 strncpy(extra, "1 (SW encryption enabled) ", MAX_WX_STRING); 717 strncpy(extra, "1 (SW encryption enabled) ", MAX_WX_STRING);
@@ -764,11 +774,13 @@ static int bcm43xx_wx_sprom_read(struct net_device *net_dev,
764 if (!sprom) 774 if (!sprom)
765 goto out; 775 goto out;
766 776
767 bcm43xx_lock_irqsafe(bcm, flags); 777 mutex_lock(&bcm->mutex);
778 spin_lock_irqsave(&bcm->irq_lock, flags);
768 err = -ENODEV; 779 err = -ENODEV;
769 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) 780 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)
770 err = bcm43xx_sprom_read(bcm, sprom); 781 err = bcm43xx_sprom_read(bcm, sprom);
771 bcm43xx_unlock_irqsafe(bcm, flags); 782 spin_unlock_irqrestore(&bcm->irq_lock, flags);
783 mutex_unlock(&bcm->mutex);
772 if (!err) 784 if (!err)
773 data->data.length = sprom2hex(sprom, extra); 785 data->data.length = sprom2hex(sprom, extra);
774 kfree(sprom); 786 kfree(sprom);
@@ -809,11 +821,15 @@ static int bcm43xx_wx_sprom_write(struct net_device *net_dev,
809 if (err) 821 if (err)
810 goto out_kfree; 822 goto out_kfree;
811 823
812 bcm43xx_lock_irqsafe(bcm, flags); 824 mutex_lock(&bcm->mutex);
825 spin_lock_irqsave(&bcm->irq_lock, flags);
826 spin_lock(&bcm->leds_lock);
813 err = -ENODEV; 827 err = -ENODEV;
814 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) 828 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)
815 err = bcm43xx_sprom_write(bcm, sprom); 829 err = bcm43xx_sprom_write(bcm, sprom);
816 bcm43xx_unlock_irqsafe(bcm, flags); 830 spin_unlock(&bcm->leds_lock);
831 spin_unlock_irqrestore(&bcm->irq_lock, flags);
832 mutex_unlock(&bcm->mutex);
817out_kfree: 833out_kfree:
818 kfree(sprom); 834 kfree(sprom);
819out: 835out:
@@ -827,6 +843,9 @@ static struct iw_statistics *bcm43xx_get_wireless_stats(struct net_device *net_d
827 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev); 843 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
828 struct ieee80211softmac_device *mac = ieee80211_priv(net_dev); 844 struct ieee80211softmac_device *mac = ieee80211_priv(net_dev);
829 struct iw_statistics *wstats; 845 struct iw_statistics *wstats;
846 struct ieee80211_network *network = NULL;
847 static int tmp_level = 0;
848 unsigned long flags;
830 849
831 wstats = &bcm->stats.wstats; 850 wstats = &bcm->stats.wstats;
832 if (!mac->associated) { 851 if (!mac->associated) {
@@ -844,16 +863,25 @@ static struct iw_statistics *bcm43xx_get_wireless_stats(struct net_device *net_d
844 wstats->qual.level = 0; 863 wstats->qual.level = 0;
845 wstats->qual.noise = 0; 864 wstats->qual.noise = 0;
846 wstats->qual.updated = 7; 865 wstats->qual.updated = 7;
847 wstats->qual.updated |= IW_QUAL_NOISE_INVALID | 866 wstats->qual.updated |= IW_QUAL_ALL_UPDATED;
848 IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID;
849 return wstats; 867 return wstats;
850 } 868 }
851 /* fill in the real statistics when iface associated */ 869 /* fill in the real statistics when iface associated */
852 wstats->qual.qual = 100; // TODO: get the real signal quality 870 spin_lock_irqsave(&mac->ieee->lock, flags);
853 wstats->qual.level = 3 - bcm->stats.link_quality; 871 list_for_each_entry(network, &mac->ieee->network_list, list) {
872 if (!memcmp(mac->associnfo.bssid, network->bssid, ETH_ALEN)) {
873 if (!tmp_level) /* get initial value */
874 tmp_level = network->stats.rssi;
875 else /* smooth results */
876 tmp_level = (7 * tmp_level + network->stats.rssi)/8;
877 break;
878 }
879 }
880 spin_unlock_irqrestore(&mac->ieee->lock, flags);
881 wstats->qual.level = tmp_level;
882 wstats->qual.qual = 100 + tmp_level - RX_POWER_MAX; // TODO: get the real signal quality
854 wstats->qual.noise = bcm->stats.noise; 883 wstats->qual.noise = bcm->stats.noise;
855 wstats->qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | 884 wstats->qual.updated = IW_QUAL_ALL_UPDATED;
856 IW_QUAL_NOISE_UPDATED;
857 wstats->discard.code = bcm->ieee->ieee_stats.rx_discards_undecryptable; 885 wstats->discard.code = bcm->ieee->ieee_stats.rx_discards_undecryptable;
858 wstats->discard.retries = bcm->ieee->ieee_stats.tx_retry_limit_exceeded; 886 wstats->discard.retries = bcm->ieee->ieee_stats.tx_retry_limit_exceeded;
859 wstats->discard.nwid = bcm->ieee->ieee_stats.tx_discards_wrong_sa; 887 wstats->discard.nwid = bcm->ieee->ieee_stats.tx_discards_wrong_sa;
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index b3300ffe4eec..758459e72f3d 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -2667,7 +2667,7 @@ static void ipw_fw_dma_abort(struct ipw_priv *priv)
2667 2667
2668 IPW_DEBUG_FW(">> :\n"); 2668 IPW_DEBUG_FW(">> :\n");
2669 2669
2670 //set the Stop and Abort bit 2670 /* set the Stop and Abort bit */
2671 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT; 2671 control = DMA_CONTROL_SMALL_CB_CONST_VALUE | DMA_CB_STOP_AND_ABORT;
2672 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control); 2672 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2673 priv->sram_desc.last_cb_index = 0; 2673 priv->sram_desc.last_cb_index = 0;
@@ -3002,8 +3002,6 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3002 if (rc < 0) 3002 if (rc < 0)
3003 return rc; 3003 return rc;
3004 3004
3005// spin_lock_irqsave(&priv->lock, flags);
3006
3007 for (addr = IPW_SHARED_LOWER_BOUND; 3005 for (addr = IPW_SHARED_LOWER_BOUND;
3008 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) { 3006 addr < IPW_REGISTER_DOMAIN1_END; addr += 4) {
3009 ipw_write32(priv, addr, 0); 3007 ipw_write32(priv, addr, 0);
@@ -3097,8 +3095,6 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3097 firmware have problem getting alive resp. */ 3095 firmware have problem getting alive resp. */
3098 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0); 3096 ipw_write_reg8(priv, IPW_BASEBAND_CONTROL_STATUS, 0);
3099 3097
3100// spin_unlock_irqrestore(&priv->lock, flags);
3101
3102 return rc; 3098 return rc;
3103} 3099}
3104 3100
@@ -6387,13 +6383,6 @@ static int ipw_wx_set_genie(struct net_device *dev,
6387 (wrqu->data.length && extra == NULL)) 6383 (wrqu->data.length && extra == NULL))
6388 return -EINVAL; 6384 return -EINVAL;
6389 6385
6390 //mutex_lock(&priv->mutex);
6391
6392 //if (!ieee->wpa_enabled) {
6393 // err = -EOPNOTSUPP;
6394 // goto out;
6395 //}
6396
6397 if (wrqu->data.length) { 6386 if (wrqu->data.length) {
6398 buf = kmalloc(wrqu->data.length, GFP_KERNEL); 6387 buf = kmalloc(wrqu->data.length, GFP_KERNEL);
6399 if (buf == NULL) { 6388 if (buf == NULL) {
@@ -6413,7 +6402,6 @@ static int ipw_wx_set_genie(struct net_device *dev,
6413 6402
6414 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len); 6403 ipw_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
6415 out: 6404 out:
6416 //mutex_unlock(&priv->mutex);
6417 return err; 6405 return err;
6418} 6406}
6419 6407
@@ -6426,13 +6414,6 @@ static int ipw_wx_get_genie(struct net_device *dev,
6426 struct ieee80211_device *ieee = priv->ieee; 6414 struct ieee80211_device *ieee = priv->ieee;
6427 int err = 0; 6415 int err = 0;
6428 6416
6429 //mutex_lock(&priv->mutex);
6430
6431 //if (!ieee->wpa_enabled) {
6432 // err = -EOPNOTSUPP;
6433 // goto out;
6434 //}
6435
6436 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) { 6417 if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) {
6437 wrqu->data.length = 0; 6418 wrqu->data.length = 0;
6438 goto out; 6419 goto out;
@@ -6447,7 +6428,6 @@ static int ipw_wx_get_genie(struct net_device *dev,
6447 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len); 6428 memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len);
6448 6429
6449 out: 6430 out:
6450 //mutex_unlock(&priv->mutex);
6451 return err; 6431 return err;
6452} 6432}
6453 6433
@@ -6558,7 +6538,6 @@ static int ipw_wx_set_auth(struct net_device *dev,
6558 ieee->ieee802_1x = param->value; 6538 ieee->ieee802_1x = param->value;
6559 break; 6539 break;
6560 6540
6561 //case IW_AUTH_ROAMING_CONTROL:
6562 case IW_AUTH_PRIVACY_INVOKED: 6541 case IW_AUTH_PRIVACY_INVOKED:
6563 ieee->privacy_invoked = param->value; 6542 ieee->privacy_invoked = param->value;
6564 break; 6543 break;
@@ -6680,7 +6659,7 @@ static int ipw_wx_set_mlme(struct net_device *dev,
6680 6659
6681 switch (mlme->cmd) { 6660 switch (mlme->cmd) {
6682 case IW_MLME_DEAUTH: 6661 case IW_MLME_DEAUTH:
6683 // silently ignore 6662 /* silently ignore */
6684 break; 6663 break;
6685 6664
6686 case IW_MLME_DISASSOC: 6665 case IW_MLME_DISASSOC:
@@ -9766,7 +9745,7 @@ static int ipw_wx_set_monitor(struct net_device *dev,
9766 return 0; 9745 return 0;
9767} 9746}
9768 9747
9769#endif // CONFIG_IPW2200_MONITOR 9748#endif /* CONFIG_IPW2200_MONITOR */
9770 9749
9771static int ipw_wx_reset(struct net_device *dev, 9750static int ipw_wx_reset(struct net_device *dev,
9772 struct iw_request_info *info, 9751 struct iw_request_info *info,
@@ -10009,7 +9988,7 @@ static void init_sys_config(struct ipw_sys_config *sys_config)
10009 sys_config->dot11g_auto_detection = 0; 9988 sys_config->dot11g_auto_detection = 0;
10010 sys_config->enable_cts_to_self = 0; 9989 sys_config->enable_cts_to_self = 0;
10011 sys_config->bt_coexist_collision_thr = 0; 9990 sys_config->bt_coexist_collision_thr = 0;
10012 sys_config->pass_noise_stats_to_host = 1; //1 -- fix for 256 9991 sys_config->pass_noise_stats_to_host = 1; /* 1 -- fix for 256 */
10013 sys_config->silence_threshold = 0x1e; 9992 sys_config->silence_threshold = 0x1e;
10014} 9993}
10015 9994
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 989599ad33ef..0c30fe7e8f7f 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -35,10 +35,14 @@
35 35
36#include <net/iw_handler.h> /* New driver API */ 36#include <net/iw_handler.h> /* New driver API */
37 37
38#define KEY_SIZE_WEP104 13 /* 104/128-bit WEP keys */
39#define KEY_SIZE_WEP40 5 /* 40/64-bit WEP keys */
40/* KEY_SIZE_TKIP should match isl_oid.h, struct obj_key.key[] size */
41#define KEY_SIZE_TKIP 32 /* TKIP keys */
38 42
39static void prism54_wpa_ie_add(islpci_private *priv, u8 *bssid, 43static void prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid,
40 u8 *wpa_ie, size_t wpa_ie_len); 44 u8 *wpa_ie, size_t wpa_ie_len);
41static size_t prism54_wpa_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie); 45static size_t prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie);
42static int prism54_set_wpa(struct net_device *, struct iw_request_info *, 46static int prism54_set_wpa(struct net_device *, struct iw_request_info *,
43 __u32 *, char *); 47 __u32 *, char *);
44 48
@@ -468,6 +472,9 @@ prism54_get_range(struct net_device *ndev, struct iw_request_info *info,
468 range->event_capa[1] = IW_EVENT_CAPA_K_1; 472 range->event_capa[1] = IW_EVENT_CAPA_K_1;
469 range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVCUSTOM); 473 range->event_capa[4] = IW_EVENT_CAPA_MASK(IWEVCUSTOM);
470 474
475 range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
476 IW_ENC_CAPA_CIPHER_TKIP;
477
471 if (islpci_get_state(priv) < PRV_STATE_INIT) 478 if (islpci_get_state(priv) < PRV_STATE_INIT)
472 return 0; 479 return 0;
473 480
@@ -567,6 +574,8 @@ prism54_translate_bss(struct net_device *ndev, char *current_ev,
567 struct iw_event iwe; /* Temporary buffer */ 574 struct iw_event iwe; /* Temporary buffer */
568 short cap; 575 short cap;
569 islpci_private *priv = netdev_priv(ndev); 576 islpci_private *priv = netdev_priv(ndev);
577 u8 wpa_ie[MAX_WPA_IE_LEN];
578 size_t wpa_ie_len;
570 579
571 /* The first entry must be the MAC address */ 580 /* The first entry must be the MAC address */
572 memcpy(iwe.u.ap_addr.sa_data, bss->address, 6); 581 memcpy(iwe.u.ap_addr.sa_data, bss->address, 6);
@@ -627,27 +636,13 @@ prism54_translate_bss(struct net_device *ndev, char *current_ev,
627 current_ev = 636 current_ev =
628 iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_QUAL_LEN); 637 iwe_stream_add_event(current_ev, end_buf, &iwe, IW_EV_QUAL_LEN);
629 638
630 if (priv->wpa) { 639 /* Add WPA/RSN Information Element, if any */
631 u8 wpa_ie[MAX_WPA_IE_LEN]; 640 wpa_ie_len = prism54_wpa_bss_ie_get(priv, bss->address, wpa_ie);
632 char *buf, *p; 641 if (wpa_ie_len > 0) {
633 size_t wpa_ie_len; 642 iwe.cmd = IWEVGENIE;
634 int i; 643 iwe.u.data.length = min(wpa_ie_len, (size_t)MAX_WPA_IE_LEN);
635 644 current_ev = iwe_stream_add_point(current_ev, end_buf,
636 wpa_ie_len = prism54_wpa_ie_get(priv, bss->address, wpa_ie); 645 &iwe, wpa_ie);
637 if (wpa_ie_len > 0 &&
638 (buf = kmalloc(wpa_ie_len * 2 + 10, GFP_ATOMIC))) {
639 p = buf;
640 p += sprintf(p, "wpa_ie=");
641 for (i = 0; i < wpa_ie_len; i++) {
642 p += sprintf(p, "%02x", wpa_ie[i]);
643 }
644 memset(&iwe, 0, sizeof (iwe));
645 iwe.cmd = IWEVCUSTOM;
646 iwe.u.data.length = strlen(buf);
647 current_ev = iwe_stream_add_point(current_ev, end_buf,
648 &iwe, buf);
649 kfree(buf);
650 }
651 } 646 }
652 return current_ev; 647 return current_ev;
653} 648}
@@ -1051,12 +1046,24 @@ prism54_set_encode(struct net_device *ndev, struct iw_request_info *info,
1051 current_index = r.u; 1046 current_index = r.u;
1052 /* Verify that the key is not marked as invalid */ 1047 /* Verify that the key is not marked as invalid */
1053 if (!(dwrq->flags & IW_ENCODE_NOKEY)) { 1048 if (!(dwrq->flags & IW_ENCODE_NOKEY)) {
1054 key.length = dwrq->length > sizeof (key.key) ? 1049 if (dwrq->length > KEY_SIZE_TKIP) {
1055 sizeof (key.key) : dwrq->length; 1050 /* User-provided key data too big */
1056 memcpy(key.key, extra, key.length); 1051 return -EINVAL;
1057 if (key.length == 32) 1052 }
1058 /* we want WPA-PSK */ 1053 if (dwrq->length > KEY_SIZE_WEP104) {
1054 /* WPA-PSK TKIP */
1059 key.type = DOT11_PRIV_TKIP; 1055 key.type = DOT11_PRIV_TKIP;
1056 key.length = KEY_SIZE_TKIP;
1057 } else if (dwrq->length > KEY_SIZE_WEP40) {
1058 /* WEP 104/128 */
1059 key.length = KEY_SIZE_WEP104;
1060 } else {
1061 /* WEP 40/64 */
1062 key.length = KEY_SIZE_WEP40;
1063 }
1064 memset(key.key, 0, sizeof (key.key));
1065 memcpy(key.key, extra, dwrq->length);
1066
1060 if ((index < 0) || (index > 3)) 1067 if ((index < 0) || (index > 3))
1061 /* no index provided use the current one */ 1068 /* no index provided use the current one */
1062 index = current_index; 1069 index = current_index;
@@ -1210,6 +1217,489 @@ prism54_set_txpower(struct net_device *ndev, struct iw_request_info *info,
1210 } 1217 }
1211} 1218}
1212 1219
1220static int prism54_set_genie(struct net_device *ndev,
1221 struct iw_request_info *info,
1222 struct iw_point *data, char *extra)
1223{
1224 islpci_private *priv = netdev_priv(ndev);
1225 int alen, ret = 0;
1226 struct obj_attachment *attach;
1227
1228 if (data->length > MAX_WPA_IE_LEN ||
1229 (data->length && extra == NULL))
1230 return -EINVAL;
1231
1232 memcpy(priv->wpa_ie, extra, data->length);
1233 priv->wpa_ie_len = data->length;
1234
1235 alen = sizeof(*attach) + priv->wpa_ie_len;
1236 attach = kzalloc(alen, GFP_KERNEL);
1237 if (attach == NULL)
1238 return -ENOMEM;
1239
1240#define WLAN_FC_TYPE_MGMT 0
1241#define WLAN_FC_STYPE_ASSOC_REQ 0
1242#define WLAN_FC_STYPE_REASSOC_REQ 2
1243
1244 /* Note: endianness is covered by mgt_set_varlen */
1245 attach->type = (WLAN_FC_TYPE_MGMT << 2) |
1246 (WLAN_FC_STYPE_ASSOC_REQ << 4);
1247 attach->id = -1;
1248 attach->size = priv->wpa_ie_len;
1249 memcpy(attach->data, extra, priv->wpa_ie_len);
1250
1251 ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach,
1252 priv->wpa_ie_len);
1253 if (ret == 0) {
1254 attach->type = (WLAN_FC_TYPE_MGMT << 2) |
1255 (WLAN_FC_STYPE_REASSOC_REQ << 4);
1256
1257 ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach,
1258 priv->wpa_ie_len);
1259 if (ret == 0)
1260 printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
1261 ndev->name);
1262 }
1263
1264 kfree(attach);
1265 return ret;
1266}
1267
1268
1269static int prism54_get_genie(struct net_device *ndev,
1270 struct iw_request_info *info,
1271 struct iw_point *data, char *extra)
1272{
1273 islpci_private *priv = netdev_priv(ndev);
1274 int len = priv->wpa_ie_len;
1275
1276 if (len <= 0) {
1277 data->length = 0;
1278 return 0;
1279 }
1280
1281 if (data->length < len)
1282 return -E2BIG;
1283
1284 data->length = len;
1285 memcpy(extra, priv->wpa_ie, len);
1286
1287 return 0;
1288}
1289
1290static int prism54_set_auth(struct net_device *ndev,
1291 struct iw_request_info *info,
1292 union iwreq_data *wrqu, char *extra)
1293{
1294 islpci_private *priv = netdev_priv(ndev);
1295 struct iw_param *param = &wrqu->param;
1296 u32 mlmelevel = 0, authen = 0, dot1x = 0;
1297 u32 exunencrypt = 0, privinvoked = 0, wpa = 0;
1298 u32 old_wpa;
1299 int ret = 0;
1300 union oid_res_t r;
1301
1302 if (islpci_get_state(priv) < PRV_STATE_INIT)
1303 return 0;
1304
1305 /* first get the flags */
1306 down_write(&priv->mib_sem);
1307 wpa = old_wpa = priv->wpa;
1308 up_write(&priv->mib_sem);
1309 ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r);
1310 authen = r.u;
1311 ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r);
1312 privinvoked = r.u;
1313 ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r);
1314 exunencrypt = r.u;
1315 ret = mgt_get_request(priv, DOT11_OID_DOT1XENABLE, 0, NULL, &r);
1316 dot1x = r.u;
1317 ret = mgt_get_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, NULL, &r);
1318 mlmelevel = r.u;
1319
1320 if (ret < 0)
1321 goto out;
1322
1323 switch (param->flags & IW_AUTH_INDEX) {
1324 case IW_AUTH_CIPHER_PAIRWISE:
1325 case IW_AUTH_CIPHER_GROUP:
1326 case IW_AUTH_KEY_MGMT:
1327 break;
1328
1329 case IW_AUTH_WPA_ENABLED:
1330 /* Do the same thing as IW_AUTH_WPA_VERSION */
1331 if (param->value) {
1332 wpa = 1;
1333 privinvoked = 1; /* For privacy invoked */
1334 exunencrypt = 1; /* Filter out all unencrypted frames */
1335 dot1x = 0x01; /* To enable eap filter */
1336 mlmelevel = DOT11_MLME_EXTENDED;
1337 authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */
1338 } else {
1339 wpa = 0;
1340 privinvoked = 0;
1341 exunencrypt = 0; /* Do not filter un-encrypted data */
1342 dot1x = 0;
1343 mlmelevel = DOT11_MLME_AUTO;
1344 }
1345 break;
1346
1347 case IW_AUTH_WPA_VERSION:
1348 if (param->value & IW_AUTH_WPA_VERSION_DISABLED) {
1349 wpa = 0;
1350 privinvoked = 0;
1351 exunencrypt = 0; /* Do not filter un-encrypted data */
1352 dot1x = 0;
1353 mlmelevel = DOT11_MLME_AUTO;
1354 } else {
1355 if (param->value & IW_AUTH_WPA_VERSION_WPA)
1356 wpa = 1;
1357 else if (param->value & IW_AUTH_WPA_VERSION_WPA2)
1358 wpa = 2;
1359 privinvoked = 1; /* For privacy invoked */
1360 exunencrypt = 1; /* Filter out all unencrypted frames */
1361 dot1x = 0x01; /* To enable eap filter */
1362 mlmelevel = DOT11_MLME_EXTENDED;
1363 authen = DOT11_AUTH_OS; /* Only WEP uses _SK and _BOTH */
1364 }
1365 break;
1366
1367 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
1368 dot1x = param->value ? 1 : 0;
1369 break;
1370
1371 case IW_AUTH_PRIVACY_INVOKED:
1372 privinvoked = param->value ? 1 : 0;
1373
1374 case IW_AUTH_DROP_UNENCRYPTED:
1375 exunencrypt = param->value ? 1 : 0;
1376 break;
1377
1378 case IW_AUTH_80211_AUTH_ALG:
1379 if (param->value & IW_AUTH_ALG_SHARED_KEY) {
1380 /* Only WEP uses _SK and _BOTH */
1381 if (wpa > 0) {
1382 ret = -EINVAL;
1383 goto out;
1384 }
1385 authen = DOT11_AUTH_SK;
1386 } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
1387 authen = DOT11_AUTH_OS;
1388 } else {
1389 ret = -EINVAL;
1390 goto out;
1391 }
1392 break;
1393
1394 default:
1395 return -EOPNOTSUPP;
1396 }
1397
1398 /* Set all the values */
1399 down_write(&priv->mib_sem);
1400 priv->wpa = wpa;
1401 up_write(&priv->mib_sem);
1402 mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
1403 mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &privinvoked);
1404 mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0, &exunencrypt);
1405 mgt_set_request(priv, DOT11_OID_DOT1XENABLE, 0, &dot1x);
1406 mgt_set_request(priv, DOT11_OID_MLMEAUTOLEVEL, 0, &mlmelevel);
1407
1408out:
1409 return ret;
1410}
1411
1412static int prism54_get_auth(struct net_device *ndev,
1413 struct iw_request_info *info,
1414 union iwreq_data *wrqu, char *extra)
1415{
1416 islpci_private *priv = netdev_priv(ndev);
1417 struct iw_param *param = &wrqu->param;
1418 u32 wpa = 0;
1419 int ret = 0;
1420 union oid_res_t r;
1421
1422 if (islpci_get_state(priv) < PRV_STATE_INIT)
1423 return 0;
1424
1425 /* first get the flags */
1426 down_write(&priv->mib_sem);
1427 wpa = priv->wpa;
1428 up_write(&priv->mib_sem);
1429
1430 switch (param->flags & IW_AUTH_INDEX) {
1431 case IW_AUTH_CIPHER_PAIRWISE:
1432 case IW_AUTH_CIPHER_GROUP:
1433 case IW_AUTH_KEY_MGMT:
1434 /*
1435 * wpa_supplicant will control these internally
1436 */
1437 ret = -EOPNOTSUPP;
1438 break;
1439
1440 case IW_AUTH_WPA_VERSION:
1441 switch (wpa) {
1442 case 1:
1443 param->value = IW_AUTH_WPA_VERSION_WPA;
1444 break;
1445 case 2:
1446 param->value = IW_AUTH_WPA_VERSION_WPA2;
1447 break;
1448 case 0:
1449 default:
1450 param->value = IW_AUTH_WPA_VERSION_DISABLED;
1451 break;
1452 }
1453 break;
1454
1455 case IW_AUTH_DROP_UNENCRYPTED:
1456 ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r);
1457 if (ret >= 0)
1458 param->value = r.u > 0 ? 1 : 0;
1459 break;
1460
1461 case IW_AUTH_80211_AUTH_ALG:
1462 ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r);
1463 if (ret >= 0) {
1464 switch (r.u) {
1465 case DOT11_AUTH_OS:
1466 param->value = IW_AUTH_ALG_OPEN_SYSTEM;
1467 break;
1468 case DOT11_AUTH_BOTH:
1469 case DOT11_AUTH_SK:
1470 param->value = IW_AUTH_ALG_SHARED_KEY;
1471 case DOT11_AUTH_NONE:
1472 default:
1473 param->value = 0;
1474 break;
1475 }
1476 }
1477 break;
1478
1479 case IW_AUTH_WPA_ENABLED:
1480 param->value = wpa > 0 ? 1 : 0;
1481 break;
1482
1483 case IW_AUTH_RX_UNENCRYPTED_EAPOL:
1484 ret = mgt_get_request(priv, DOT11_OID_DOT1XENABLE, 0, NULL, &r);
1485 if (ret >= 0)
1486 param->value = r.u > 0 ? 1 : 0;
1487 break;
1488
1489 case IW_AUTH_PRIVACY_INVOKED:
1490 ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r);
1491 if (ret >= 0)
1492 param->value = r.u > 0 ? 1 : 0;
1493 break;
1494
1495 default:
1496 return -EOPNOTSUPP;
1497 }
1498 return ret;
1499}
1500
1501static int prism54_set_encodeext(struct net_device *ndev,
1502 struct iw_request_info *info,
1503 union iwreq_data *wrqu,
1504 char *extra)
1505{
1506 islpci_private *priv = netdev_priv(ndev);
1507 struct iw_point *encoding = &wrqu->encoding;
1508 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1509 int idx, alg = ext->alg, set_key = 1;
1510 union oid_res_t r;
1511 int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0;
1512 int ret = 0;
1513
1514 if (islpci_get_state(priv) < PRV_STATE_INIT)
1515 return 0;
1516
1517 /* Determine and validate the key index */
1518 idx = (encoding->flags & IW_ENCODE_INDEX) - 1;
1519 if (idx) {
1520 if (idx < 0 || idx > 3)
1521 return -EINVAL;
1522 } else {
1523 ret = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
1524 if (ret < 0)
1525 goto out;
1526 idx = r.u;
1527 }
1528
1529 if (encoding->flags & IW_ENCODE_DISABLED)
1530 alg = IW_ENCODE_ALG_NONE;
1531
1532 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
1533 /* Only set transmit key index here, actual
1534 * key is set below if needed.
1535 */
1536 ret = mgt_set_request(priv, DOT11_OID_DEFKEYID, 0, &idx);
1537 set_key = ext->key_len > 0 ? 1 : 0;
1538 }
1539
1540 if (set_key) {
1541 struct obj_key key = { DOT11_PRIV_WEP, 0, "" };
1542 switch (alg) {
1543 case IW_ENCODE_ALG_NONE:
1544 break;
1545 case IW_ENCODE_ALG_WEP:
1546 if (ext->key_len > KEY_SIZE_WEP104) {
1547 ret = -EINVAL;
1548 goto out;
1549 }
1550 if (ext->key_len > KEY_SIZE_WEP40)
1551 key.length = KEY_SIZE_WEP104;
1552 else
1553 key.length = KEY_SIZE_WEP40;
1554 break;
1555 case IW_ENCODE_ALG_TKIP:
1556 if (ext->key_len > KEY_SIZE_TKIP) {
1557 ret = -EINVAL;
1558 goto out;
1559 }
1560 key.type = DOT11_PRIV_TKIP;
1561 key.length = KEY_SIZE_TKIP;
1562 default:
1563 return -EINVAL;
1564 }
1565
1566 if (key.length) {
1567 memset(key.key, 0, sizeof(key.key));
1568 memcpy(key.key, ext->key, ext->key_len);
1569 ret = mgt_set_request(priv, DOT11_OID_DEFKEYX, idx,
1570 &key);
1571 if (ret < 0)
1572 goto out;
1573 }
1574 }
1575
1576 /* Read the flags */
1577 if (encoding->flags & IW_ENCODE_DISABLED) {
1578 /* Encoding disabled,
1579 * authen = DOT11_AUTH_OS;
1580 * invoke = 0;
1581 * exunencrypt = 0; */
1582 }
1583 if (encoding->flags & IW_ENCODE_OPEN) {
1584 /* Encode but accept non-encoded packets. No auth */
1585 invoke = 1;
1586 }
1587 if (encoding->flags & IW_ENCODE_RESTRICTED) {
1588 /* Refuse non-encoded packets. Auth */
1589 authen = DOT11_AUTH_BOTH;
1590 invoke = 1;
1591 exunencrypt = 1;
1592 }
1593
1594 /* do the change if requested */
1595 if (encoding->flags & IW_ENCODE_MODE) {
1596 ret = mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0,
1597 &authen);
1598 ret = mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0,
1599 &invoke);
1600 ret = mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0,
1601 &exunencrypt);
1602 }
1603
1604out:
1605 return ret;
1606}
1607
1608
1609static int prism54_get_encodeext(struct net_device *ndev,
1610 struct iw_request_info *info,
1611 union iwreq_data *wrqu,
1612 char *extra)
1613{
1614 islpci_private *priv = netdev_priv(ndev);
1615 struct iw_point *encoding = &wrqu->encoding;
1616 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1617 int idx, max_key_len;
1618 union oid_res_t r;
1619 int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0, wpa = 0;
1620 int ret = 0;
1621
1622 if (islpci_get_state(priv) < PRV_STATE_INIT)
1623 return 0;
1624
1625 /* first get the flags */
1626 ret = mgt_get_request(priv, DOT11_OID_AUTHENABLE, 0, NULL, &r);
1627 authen = r.u;
1628 ret = mgt_get_request(priv, DOT11_OID_PRIVACYINVOKED, 0, NULL, &r);
1629 invoke = r.u;
1630 ret = mgt_get_request(priv, DOT11_OID_EXUNENCRYPTED, 0, NULL, &r);
1631 exunencrypt = r.u;
1632 if (ret < 0)
1633 goto out;
1634
1635 max_key_len = encoding->length - sizeof(*ext);
1636 if (max_key_len < 0)
1637 return -EINVAL;
1638
1639 idx = (encoding->flags & IW_ENCODE_INDEX) - 1;
1640 if (idx) {
1641 if (idx < 0 || idx > 3)
1642 return -EINVAL;
1643 } else {
1644 ret = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
1645 if (ret < 0)
1646 goto out;
1647 idx = r.u;
1648 }
1649
1650 encoding->flags = idx + 1;
1651 memset(ext, 0, sizeof(*ext));
1652
1653 switch (authen) {
1654 case DOT11_AUTH_BOTH:
1655 case DOT11_AUTH_SK:
1656 wrqu->encoding.flags |= IW_ENCODE_RESTRICTED;
1657 case DOT11_AUTH_OS:
1658 default:
1659 wrqu->encoding.flags |= IW_ENCODE_OPEN;
1660 break;
1661 }
1662
1663 down_write(&priv->mib_sem);
1664 wpa = priv->wpa;
1665 up_write(&priv->mib_sem);
1666
1667 if (authen == DOT11_AUTH_OS && !exunencrypt && !invoke && !wpa) {
1668 /* No encryption */
1669 ext->alg = IW_ENCODE_ALG_NONE;
1670 ext->key_len = 0;
1671 wrqu->encoding.flags |= IW_ENCODE_DISABLED;
1672 } else {
1673 struct obj_key *key;
1674
1675 ret = mgt_get_request(priv, DOT11_OID_DEFKEYX, idx, NULL, &r);
1676 if (ret < 0)
1677 goto out;
1678 key = r.ptr;
1679 if (max_key_len < key->length) {
1680 ret = -E2BIG;
1681 goto out;
1682 }
1683 memcpy(ext->key, key->key, key->length);
1684 ext->key_len = key->length;
1685
1686 switch (key->type) {
1687 case DOT11_PRIV_TKIP:
1688 ext->alg = IW_ENCODE_ALG_TKIP;
1689 break;
1690 default:
1691 case DOT11_PRIV_WEP:
1692 ext->alg = IW_ENCODE_ALG_WEP;
1693 break;
1694 }
1695 wrqu->encoding.flags |= IW_ENCODE_ENABLED;
1696 }
1697
1698out:
1699 return ret;
1700}
1701
1702
1213static int 1703static int
1214prism54_reset(struct net_device *ndev, struct iw_request_info *info, 1704prism54_reset(struct net_device *ndev, struct iw_request_info *info,
1215 __u32 * uwrq, char *extra) 1705 __u32 * uwrq, char *extra)
@@ -1591,8 +2081,8 @@ static u8 wpa_oid[4] = { 0x00, 0x50, 0xf2, 1 };
1591#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x" 2081#define MACSTR "%02x:%02x:%02x:%02x:%02x:%02x"
1592 2082
1593static void 2083static void
1594prism54_wpa_ie_add(islpci_private *priv, u8 *bssid, 2084prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid,
1595 u8 *wpa_ie, size_t wpa_ie_len) 2085 u8 *wpa_ie, size_t wpa_ie_len)
1596{ 2086{
1597 struct list_head *ptr; 2087 struct list_head *ptr;
1598 struct islpci_bss_wpa_ie *bss = NULL; 2088 struct islpci_bss_wpa_ie *bss = NULL;
@@ -1658,7 +2148,7 @@ prism54_wpa_ie_add(islpci_private *priv, u8 *bssid,
1658} 2148}
1659 2149
1660static size_t 2150static size_t
1661prism54_wpa_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie) 2151prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie)
1662{ 2152{
1663 struct list_head *ptr; 2153 struct list_head *ptr;
1664 struct islpci_bss_wpa_ie *bss = NULL; 2154 struct islpci_bss_wpa_ie *bss = NULL;
@@ -1683,14 +2173,14 @@ prism54_wpa_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie)
1683} 2173}
1684 2174
1685void 2175void
1686prism54_wpa_ie_init(islpci_private *priv) 2176prism54_wpa_bss_ie_init(islpci_private *priv)
1687{ 2177{
1688 INIT_LIST_HEAD(&priv->bss_wpa_list); 2178 INIT_LIST_HEAD(&priv->bss_wpa_list);
1689 sema_init(&priv->wpa_sem, 1); 2179 sema_init(&priv->wpa_sem, 1);
1690} 2180}
1691 2181
1692void 2182void
1693prism54_wpa_ie_clean(islpci_private *priv) 2183prism54_wpa_bss_ie_clean(islpci_private *priv)
1694{ 2184{
1695 struct list_head *ptr, *n; 2185 struct list_head *ptr, *n;
1696 2186
@@ -1722,7 +2212,7 @@ prism54_process_bss_data(islpci_private *priv, u32 oid, u8 *addr,
1722 } 2212 }
1723 if (pos[0] == WLAN_EID_GENERIC && pos[1] >= 4 && 2213 if (pos[0] == WLAN_EID_GENERIC && pos[1] >= 4 &&
1724 memcmp(pos + 2, wpa_oid, 4) == 0) { 2214 memcmp(pos + 2, wpa_oid, 4) == 0) {
1725 prism54_wpa_ie_add(priv, addr, pos, pos[1] + 2); 2215 prism54_wpa_bss_ie_add(priv, addr, pos, pos[1] + 2);
1726 return; 2216 return;
1727 } 2217 }
1728 pos += 2 + pos[1]; 2218 pos += 2 + pos[1];
@@ -1879,7 +2369,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
1879 send_formatted_event(priv, "Associate request (ex)", mlme, 1); 2369 send_formatted_event(priv, "Associate request (ex)", mlme, 1);
1880 2370
1881 if (priv->iw_mode != IW_MODE_MASTER 2371 if (priv->iw_mode != IW_MODE_MASTER
1882 && mlmeex->state != DOT11_STATE_AUTHING) 2372 && mlmeex->state != DOT11_STATE_ASSOCING)
1883 break; 2373 break;
1884 2374
1885 confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC); 2375 confirm = kmalloc(sizeof(struct obj_mlmeex), GFP_ATOMIC);
@@ -1893,7 +2383,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
1893 confirm->state = 0; /* not used */ 2383 confirm->state = 0; /* not used */
1894 confirm->code = 0; 2384 confirm->code = 0;
1895 2385
1896 wpa_ie_len = prism54_wpa_ie_get(priv, mlmeex->address, wpa_ie); 2386 wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie);
1897 2387
1898 if (!wpa_ie_len) { 2388 if (!wpa_ie_len) {
1899 printk(KERN_DEBUG "No WPA IE found from " 2389 printk(KERN_DEBUG "No WPA IE found from "
@@ -1937,7 +2427,7 @@ prism54_process_trap_helper(islpci_private *priv, enum oid_num_t oid,
1937 confirm->state = 0; /* not used */ 2427 confirm->state = 0; /* not used */
1938 confirm->code = 0; 2428 confirm->code = 0;
1939 2429
1940 wpa_ie_len = prism54_wpa_ie_get(priv, mlmeex->address, wpa_ie); 2430 wpa_ie_len = prism54_wpa_bss_ie_get(priv, mlmeex->address, wpa_ie);
1941 2431
1942 if (!wpa_ie_len) { 2432 if (!wpa_ie_len) {
1943 printk(KERN_DEBUG "No WPA IE found from " 2433 printk(KERN_DEBUG "No WPA IE found from "
@@ -2553,6 +3043,15 @@ static const iw_handler prism54_handler[] = {
2553 (iw_handler) prism54_get_encode, /* SIOCGIWENCODE */ 3043 (iw_handler) prism54_get_encode, /* SIOCGIWENCODE */
2554 (iw_handler) NULL, /* SIOCSIWPOWER */ 3044 (iw_handler) NULL, /* SIOCSIWPOWER */
2555 (iw_handler) NULL, /* SIOCGIWPOWER */ 3045 (iw_handler) NULL, /* SIOCGIWPOWER */
3046 NULL, /* -- hole -- */
3047 NULL, /* -- hole -- */
3048 (iw_handler) prism54_set_genie, /* SIOCSIWGENIE */
3049 (iw_handler) prism54_get_genie, /* SIOCGIWGENIE */
3050 (iw_handler) prism54_set_auth, /* SIOCSIWAUTH */
3051 (iw_handler) prism54_get_auth, /* SIOCGIWAUTH */
3052 (iw_handler) prism54_set_encodeext, /* SIOCSIWENCODEEXT */
3053 (iw_handler) prism54_get_encodeext, /* SIOCGIWENCODEEXT */
3054 NULL, /* SIOCSIWPMKSA */
2556}; 3055};
2557 3056
2558/* The low order bit identify a SET (0) or a GET (1) ioctl. */ 3057/* The low order bit identify a SET (0) or a GET (1) ioctl. */
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
index 46d5cde80c85..65f33acd0a42 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.h
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -27,7 +27,7 @@
27 27
28#include <net/iw_handler.h> /* New driver API */ 28#include <net/iw_handler.h> /* New driver API */
29 29
30#define SUPPORTED_WIRELESS_EXT 16 30#define SUPPORTED_WIRELESS_EXT 19
31 31
32void prism54_mib_init(islpci_private *); 32void prism54_mib_init(islpci_private *);
33 33
@@ -39,8 +39,8 @@ void prism54_acl_clean(struct islpci_acl *);
39 39
40void prism54_process_trap(void *); 40void prism54_process_trap(void *);
41 41
42void prism54_wpa_ie_init(islpci_private *priv); 42void prism54_wpa_bss_ie_init(islpci_private *priv);
43void prism54_wpa_ie_clean(islpci_private *priv); 43void prism54_wpa_bss_ie_clean(islpci_private *priv);
44 44
45int prism54_set_mac_address(struct net_device *, void *); 45int prism54_set_mac_address(struct net_device *, void *);
46 46
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 5ddf29599032..ab3c5a27efd9 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -715,7 +715,7 @@ islpci_alloc_memory(islpci_private *priv)
715 } 715 }
716 716
717 prism54_acl_init(&priv->acl); 717 prism54_acl_init(&priv->acl);
718 prism54_wpa_ie_init(priv); 718 prism54_wpa_bss_ie_init(priv);
719 if (mgt_init(priv)) 719 if (mgt_init(priv))
720 goto out_free; 720 goto out_free;
721 721
@@ -774,7 +774,7 @@ islpci_free_memory(islpci_private *priv)
774 774
775 /* Free the acces control list and the WPA list */ 775 /* Free the acces control list and the WPA list */
776 prism54_acl_clean(&priv->acl); 776 prism54_acl_clean(&priv->acl);
777 prism54_wpa_ie_clean(priv); 777 prism54_wpa_bss_ie_clean(priv);
778 mgt_clean(priv); 778 mgt_clean(priv);
779 779
780 return 0; 780 return 0;
diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h
index 07053165e4c5..5049f37455b1 100644
--- a/drivers/net/wireless/prism54/islpci_dev.h
+++ b/drivers/net/wireless/prism54/islpci_dev.h
@@ -179,6 +179,8 @@ typedef struct {
179 struct list_head bss_wpa_list; 179 struct list_head bss_wpa_list;
180 int num_bss_wpa; 180 int num_bss_wpa;
181 struct semaphore wpa_sem; 181 struct semaphore wpa_sem;
182 u8 wpa_ie[MAX_WPA_IE_LEN];
183 size_t wpa_ie_len;
182 184
183 struct work_struct reset_task; 185 struct work_struct reset_task;
184 int reset_task_pending; 186 int reset_task_pending;
diff --git a/drivers/net/wireless/zd1211rw/zd_netdev.c b/drivers/net/wireless/zd1211rw/zd_netdev.c
index 9df232c2c863..440ef24b5fd1 100644
--- a/drivers/net/wireless/zd1211rw/zd_netdev.c
+++ b/drivers/net/wireless/zd1211rw/zd_netdev.c
@@ -72,10 +72,18 @@ static int iw_get_name(struct net_device *netdev,
72 struct iw_request_info *info, 72 struct iw_request_info *info,
73 union iwreq_data *req, char *extra) 73 union iwreq_data *req, char *extra)
74{ 74{
75 /* FIXME: check whether 802.11a will also supported, add also 75 /* FIXME: check whether 802.11a will also supported */
76 * zd1211B, if we support it. 76 strlcpy(req->name, "IEEE 802.11b/g", IFNAMSIZ);
77 */ 77 return 0;
78 strlcpy(req->name, "802.11g zd1211", IFNAMSIZ); 78}
79
80static int iw_get_nick(struct net_device *netdev,
81 struct iw_request_info *info,
82 union iwreq_data *req, char *extra)
83{
84 strcpy(extra, "zd1211");
85 req->data.length = strlen(extra) + 1;
86 req->data.flags = 1;
79 return 0; 87 return 0;
80} 88}
81 89
@@ -181,6 +189,7 @@ static int iw_get_encodeext(struct net_device *netdev,
181 189
182static const iw_handler zd_standard_iw_handlers[] = { 190static const iw_handler zd_standard_iw_handlers[] = {
183 WX(SIOCGIWNAME) = iw_get_name, 191 WX(SIOCGIWNAME) = iw_get_name,
192 WX(SIOCGIWNICKN) = iw_get_nick,
184 WX(SIOCSIWFREQ) = iw_set_freq, 193 WX(SIOCSIWFREQ) = iw_set_freq,
185 WX(SIOCGIWFREQ) = iw_get_freq, 194 WX(SIOCGIWFREQ) = iw_get_freq,
186 WX(SIOCSIWMODE) = iw_set_mode, 195 WX(SIOCSIWMODE) = iw_set_mode,
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 6320984126c7..96551da769fc 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -39,9 +39,11 @@ static struct usb_device_id usb_ids[] = {
39 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 }, 39 { USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },
40 { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 }, 40 { USB_DEVICE(0x0df6, 0x9071), .driver_info = DEVICE_ZD1211 },
41 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, 41 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
42 { USB_DEVICE(0x079b, 0x004a), .driver_info = DEVICE_ZD1211 },
42 /* ZD1211B */ 43 /* ZD1211B */
43 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, 44 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
44 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, 45 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
46 { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
45 {} 47 {}
46}; 48};
47 49
diff --git a/include/net/ieee80211.h b/include/net/ieee80211.h
index ecc42864b001..b174ebb277a9 100644
--- a/include/net/ieee80211.h
+++ b/include/net/ieee80211.h
@@ -240,6 +240,11 @@ struct ieee80211_snap_hdr {
240#define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10) 240#define WLAN_CAPABILITY_SHORT_SLOT_TIME (1<<10)
241#define WLAN_CAPABILITY_DSSS_OFDM (1<<13) 241#define WLAN_CAPABILITY_DSSS_OFDM (1<<13)
242 242
243/* 802.11g ERP information element */
244#define WLAN_ERP_NON_ERP_PRESENT (1<<0)
245#define WLAN_ERP_USE_PROTECTION (1<<1)
246#define WLAN_ERP_BARKER_PREAMBLE (1<<2)
247
243/* Status codes */ 248/* Status codes */
244enum ieee80211_statuscode { 249enum ieee80211_statuscode {
245 WLAN_STATUS_SUCCESS = 0, 250 WLAN_STATUS_SUCCESS = 0,
@@ -747,6 +752,8 @@ struct ieee80211_txb {
747#define NETWORK_HAS_IBSS_DFS (1<<8) 752#define NETWORK_HAS_IBSS_DFS (1<<8)
748#define NETWORK_HAS_TPC_REPORT (1<<9) 753#define NETWORK_HAS_TPC_REPORT (1<<9)
749 754
755#define NETWORK_HAS_ERP_VALUE (1<<10)
756
750#define QOS_QUEUE_NUM 4 757#define QOS_QUEUE_NUM 4
751#define QOS_OUI_LEN 3 758#define QOS_OUI_LEN 3
752#define QOS_OUI_TYPE 2 759#define QOS_OUI_TYPE 2
@@ -1252,6 +1259,8 @@ extern int ieee80211_tx_frame(struct ieee80211_device *ieee,
1252 int total_len, int encrypt_mpdu); 1259 int total_len, int encrypt_mpdu);
1253 1260
1254/* ieee80211_rx.c */ 1261/* ieee80211_rx.c */
1262extern void ieee80211_rx_any(struct ieee80211_device *ieee,
1263 struct sk_buff *skb, struct ieee80211_rx_stats *stats);
1255extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, 1264extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
1256 struct ieee80211_rx_stats *rx_stats); 1265 struct ieee80211_rx_stats *rx_stats);
1257/* make sure to set stats->len */ 1266/* make sure to set stats->len */
diff --git a/include/net/ieee80211softmac.h b/include/net/ieee80211softmac.h
index 00ad810eb883..425b3a57ac74 100644
--- a/include/net/ieee80211softmac.h
+++ b/include/net/ieee80211softmac.h
@@ -86,9 +86,6 @@ struct ieee80211softmac_assoc_info {
86 86
87 /* BSSID we're trying to associate to */ 87 /* BSSID we're trying to associate to */
88 char bssid[ETH_ALEN]; 88 char bssid[ETH_ALEN];
89
90 /* Rates supported by the network */
91 struct ieee80211softmac_ratesinfo supported_rates;
92 89
93 /* some flags. 90 /* some flags.
94 * static_essid is valid if the essid is constant, 91 * static_essid is valid if the essid is constant,
@@ -103,6 +100,7 @@ struct ieee80211softmac_assoc_info {
103 * bssfixed is used for SIOCSIWAP. 100 * bssfixed is used for SIOCSIWAP.
104 */ 101 */
105 u8 static_essid:1, 102 u8 static_essid:1,
103 short_preamble_available:1,
106 associating:1, 104 associating:1,
107 assoc_wait:1, 105 assoc_wait:1,
108 bssvalid:1, 106 bssvalid:1,
@@ -115,6 +113,19 @@ struct ieee80211softmac_assoc_info {
115 struct work_struct timeout; 113 struct work_struct timeout;
116}; 114};
117 115
116struct ieee80211softmac_bss_info {
117 /* Rates supported by the network */
118 struct ieee80211softmac_ratesinfo supported_rates;
119
120 /* This indicates whether frames can currently be transmitted with
121 * short preamble (only use this variable during TX at CCK rates) */
122 u8 short_preamble:1;
123
124 /* This indicates whether protection (e.g. self-CTS) should be used
125 * when transmitting with OFDM modulation */
126 u8 use_protection:1;
127};
128
118enum { 129enum {
119 IEEE80211SOFTMAC_AUTH_OPEN_REQUEST = 1, 130 IEEE80211SOFTMAC_AUTH_OPEN_REQUEST = 1,
120 IEEE80211SOFTMAC_AUTH_OPEN_RESPONSE = 2, 131 IEEE80211SOFTMAC_AUTH_OPEN_RESPONSE = 2,
@@ -157,6 +168,10 @@ struct ieee80211softmac_txrates {
157#define IEEE80211SOFTMAC_TXRATECHG_MCAST (1 << 2) /* mcast_rate */ 168#define IEEE80211SOFTMAC_TXRATECHG_MCAST (1 << 2) /* mcast_rate */
158#define IEEE80211SOFTMAC_TXRATECHG_MGT_MCAST (1 << 3) /* mgt_mcast_rate */ 169#define IEEE80211SOFTMAC_TXRATECHG_MGT_MCAST (1 << 3) /* mgt_mcast_rate */
159 170
171#define IEEE80211SOFTMAC_BSSINFOCHG_RATES (1 << 0) /* supported_rates */
172#define IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE (1 << 1) /* short_preamble */
173#define IEEE80211SOFTMAC_BSSINFOCHG_PROTECTION (1 << 2) /* use_protection */
174
160struct ieee80211softmac_device { 175struct ieee80211softmac_device {
161 /* 802.11 structure for data stuff */ 176 /* 802.11 structure for data stuff */
162 struct ieee80211_device *ieee; 177 struct ieee80211_device *ieee;
@@ -200,10 +215,16 @@ struct ieee80211softmac_device {
200 * The driver just needs to read them. 215 * The driver just needs to read them.
201 */ 216 */
202 struct ieee80211softmac_txrates txrates; 217 struct ieee80211softmac_txrates txrates;
203 /* If the driver needs to do stuff on TX rate changes, assign this callback. */ 218
219 /* If the driver needs to do stuff on TX rate changes, assign this
220 * callback. See IEEE80211SOFTMAC_TXRATECHG for change flags. */
204 void (*txrates_change)(struct net_device *dev, 221 void (*txrates_change)(struct net_device *dev,
205 u32 changes, /* see IEEE80211SOFTMAC_TXRATECHG flags */ 222 u32 changes);
206 const struct ieee80211softmac_txrates *rates_before_change); 223
224 /* If the driver needs to do stuff when BSS properties change, assign
225 * this callback. see IEEE80211SOFTMAC_BSSINFOCHG for change flags. */
226 void (*bssinfo_change)(struct net_device *dev,
227 u32 changes);
207 228
208 /* private stuff follows */ 229 /* private stuff follows */
209 /* this lock protects this structure */ 230 /* this lock protects this structure */
@@ -216,6 +237,7 @@ struct ieee80211softmac_device {
216 237
217 struct ieee80211softmac_scaninfo *scaninfo; 238 struct ieee80211softmac_scaninfo *scaninfo;
218 struct ieee80211softmac_assoc_info associnfo; 239 struct ieee80211softmac_assoc_info associnfo;
240 struct ieee80211softmac_bss_info bssinfo;
219 241
220 struct list_head auth_queue; 242 struct list_head auth_queue;
221 struct list_head events; 243 struct list_head events;
@@ -257,6 +279,14 @@ extern void ieee80211softmac_fragment_lost(struct net_device *dev,
257 * Note that the rates need to be sorted. */ 279 * Note that the rates need to be sorted. */
258extern void ieee80211softmac_set_rates(struct net_device *dev, u8 count, u8 *rates); 280extern void ieee80211softmac_set_rates(struct net_device *dev, u8 count, u8 *rates);
259 281
282/* Finds the highest rate which is:
283 * 1. Present in ri (optionally a basic rate)
284 * 2. Supported by the device
285 * 3. Less than or equal to the user-defined rate
286 */
287extern u8 ieee80211softmac_highest_supported_rate(struct ieee80211softmac_device *mac,
288 struct ieee80211softmac_ratesinfo *ri, int basic_only);
289
260/* Helper function which advises you the rate at which a frame should be 290/* Helper function which advises you the rate at which a frame should be
261 * transmitted at. */ 291 * transmitted at. */
262static inline u8 ieee80211softmac_suggest_txrate(struct ieee80211softmac_device *mac, 292static inline u8 ieee80211softmac_suggest_txrate(struct ieee80211softmac_device *mac,
@@ -279,6 +309,24 @@ static inline u8 ieee80211softmac_suggest_txrate(struct ieee80211softmac_device
279 return txrates->mcast_rate; 309 return txrates->mcast_rate;
280} 310}
281 311
312/* Helper function which advises you when it is safe to transmit with short
313 * preamble.
314 * You should only call this function when transmitting at CCK rates. */
315static inline int ieee80211softmac_short_preamble_ok(struct ieee80211softmac_device *mac,
316 int is_multicast,
317 int is_mgt)
318{
319 return (is_multicast && is_mgt) ? 0 : mac->bssinfo.short_preamble;
320}
321
322/* Helper function which advises you whether protection (e.g. self-CTS) is
323 * needed. 1 = protection needed, 0 = no protection needed
324 * Only use this function when transmitting with OFDM modulation. */
325static inline int ieee80211softmac_protection_needed(struct ieee80211softmac_device *mac)
326{
327 return mac->bssinfo.use_protection;
328}
329
282/* Start the SoftMAC. Call this after you initialized the device 330/* Start the SoftMAC. Call this after you initialized the device
283 * and it is ready to run. 331 * and it is ready to run.
284 */ 332 */
diff --git a/net/ieee80211/ieee80211_rx.c b/net/ieee80211/ieee80211_rx.c
index 72d4d4e04d42..d60358d702d7 100644
--- a/net/ieee80211/ieee80211_rx.c
+++ b/net/ieee80211/ieee80211_rx.c
@@ -779,33 +779,44 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
779 return 0; 779 return 0;
780} 780}
781 781
782/* Filter out unrelated packets, call ieee80211_rx[_mgt] */ 782/* Filter out unrelated packets, call ieee80211_rx[_mgt]
783int ieee80211_rx_any(struct ieee80211_device *ieee, 783 * This function takes over the skb, it should not be used again after calling
784 * this function. */
785void ieee80211_rx_any(struct ieee80211_device *ieee,
784 struct sk_buff *skb, struct ieee80211_rx_stats *stats) 786 struct sk_buff *skb, struct ieee80211_rx_stats *stats)
785{ 787{
786 struct ieee80211_hdr_4addr *hdr; 788 struct ieee80211_hdr_4addr *hdr;
787 int is_packet_for_us; 789 int is_packet_for_us;
788 u16 fc; 790 u16 fc;
789 791
790 if (ieee->iw_mode == IW_MODE_MONITOR) 792 if (ieee->iw_mode == IW_MODE_MONITOR) {
791 return ieee80211_rx(ieee, skb, stats) ? 0 : -EINVAL; 793 if (!ieee80211_rx(ieee, skb, stats))
794 dev_kfree_skb_irq(skb);
795 return;
796 }
797
798 if (skb->len < sizeof(struct ieee80211_hdr))
799 goto drop_free;
792 800
793 hdr = (struct ieee80211_hdr_4addr *)skb->data; 801 hdr = (struct ieee80211_hdr_4addr *)skb->data;
794 fc = le16_to_cpu(hdr->frame_ctl); 802 fc = le16_to_cpu(hdr->frame_ctl);
795 803
796 if ((fc & IEEE80211_FCTL_VERS) != 0) 804 if ((fc & IEEE80211_FCTL_VERS) != 0)
797 return -EINVAL; 805 goto drop_free;
798 806
799 switch (fc & IEEE80211_FCTL_FTYPE) { 807 switch (fc & IEEE80211_FCTL_FTYPE) {
800 case IEEE80211_FTYPE_MGMT: 808 case IEEE80211_FTYPE_MGMT:
809 if (skb->len < sizeof(struct ieee80211_hdr_3addr))
810 goto drop_free;
801 ieee80211_rx_mgt(ieee, hdr, stats); 811 ieee80211_rx_mgt(ieee, hdr, stats);
802 return 0; 812 dev_kfree_skb_irq(skb);
813 return;
803 case IEEE80211_FTYPE_DATA: 814 case IEEE80211_FTYPE_DATA:
804 break; 815 break;
805 case IEEE80211_FTYPE_CTL: 816 case IEEE80211_FTYPE_CTL:
806 return 0; 817 return;
807 default: 818 default:
808 return -EINVAL; 819 return;
809 } 820 }
810 821
811 is_packet_for_us = 0; 822 is_packet_for_us = 0;
@@ -849,8 +860,14 @@ int ieee80211_rx_any(struct ieee80211_device *ieee,
849 } 860 }
850 861
851 if (is_packet_for_us) 862 if (is_packet_for_us)
852 return (ieee80211_rx(ieee, skb, stats) ? 0 : -EINVAL); 863 if (!ieee80211_rx(ieee, skb, stats))
853 return 0; 864 dev_kfree_skb_irq(skb);
865 return;
866
867drop_free:
868 dev_kfree_skb_irq(skb);
869 ieee->stats.rx_dropped++;
870 return;
854} 871}
855 872
856#define MGMT_FRAME_FIXED_PART_LENGTH 0x24 873#define MGMT_FRAME_FIXED_PART_LENGTH 0x24
@@ -1166,6 +1183,7 @@ static int ieee80211_parse_info_param(struct ieee80211_info_element
1166 1183
1167 case MFIE_TYPE_ERP_INFO: 1184 case MFIE_TYPE_ERP_INFO:
1168 network->erp_value = info_element->data[0]; 1185 network->erp_value = info_element->data[0];
1186 network->flags |= NETWORK_HAS_ERP_VALUE;
1169 IEEE80211_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n", 1187 IEEE80211_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n",
1170 network->erp_value); 1188 network->erp_value);
1171 break; 1189 break;
@@ -1729,5 +1747,6 @@ void ieee80211_rx_mgt(struct ieee80211_device *ieee,
1729 } 1747 }
1730} 1748}
1731 1749
1750EXPORT_SYMBOL_GPL(ieee80211_rx_any);
1732EXPORT_SYMBOL(ieee80211_rx_mgt); 1751EXPORT_SYMBOL(ieee80211_rx_mgt);
1733EXPORT_SYMBOL(ieee80211_rx); 1752EXPORT_SYMBOL(ieee80211_rx);
diff --git a/net/ieee80211/softmac/ieee80211softmac_assoc.c b/net/ieee80211/softmac/ieee80211softmac_assoc.c
index 44215ce64d4e..589f6d2c548a 100644
--- a/net/ieee80211/softmac/ieee80211softmac_assoc.c
+++ b/net/ieee80211/softmac/ieee80211softmac_assoc.c
@@ -96,7 +96,7 @@ ieee80211softmac_disassoc(struct ieee80211softmac_device *mac)
96 mac->associated = 0; 96 mac->associated = 0;
97 mac->associnfo.bssvalid = 0; 97 mac->associnfo.bssvalid = 0;
98 mac->associnfo.associating = 0; 98 mac->associnfo.associating = 0;
99 ieee80211softmac_init_txrates(mac); 99 ieee80211softmac_init_bss(mac);
100 ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_DISASSOCIATED, NULL); 100 ieee80211softmac_call_events_locked(mac, IEEE80211SOFTMAC_EVENT_DISASSOCIATED, NULL);
101 spin_unlock_irqrestore(&mac->lock, flags); 101 spin_unlock_irqrestore(&mac->lock, flags);
102} 102}
@@ -334,11 +334,19 @@ ieee80211softmac_associated(struct ieee80211softmac_device *mac,
334 struct ieee80211_assoc_response * resp, 334 struct ieee80211_assoc_response * resp,
335 struct ieee80211softmac_network *net) 335 struct ieee80211softmac_network *net)
336{ 336{
337 u16 cap = le16_to_cpu(resp->capability);
338 u8 erp_value = net->erp_value;
339
337 mac->associnfo.associating = 0; 340 mac->associnfo.associating = 0;
338 mac->associnfo.supported_rates = net->supported_rates; 341 mac->bssinfo.supported_rates = net->supported_rates;
339 ieee80211softmac_recalc_txrates(mac); 342 ieee80211softmac_recalc_txrates(mac);
340 343
341 mac->associated = 1; 344 mac->associated = 1;
345
346 mac->associnfo.short_preamble_available =
347 (cap & WLAN_CAPABILITY_SHORT_PREAMBLE) != 0;
348 ieee80211softmac_process_erp(mac, erp_value);
349
342 if (mac->set_bssid_filter) 350 if (mac->set_bssid_filter)
343 mac->set_bssid_filter(mac->dev, net->bssid); 351 mac->set_bssid_filter(mac->dev, net->bssid);
344 memcpy(mac->ieee->bssid, net->bssid, ETH_ALEN); 352 memcpy(mac->ieee->bssid, net->bssid, ETH_ALEN);
@@ -351,9 +359,9 @@ ieee80211softmac_associated(struct ieee80211softmac_device *mac,
351int 359int
352ieee80211softmac_handle_assoc_response(struct net_device * dev, 360ieee80211softmac_handle_assoc_response(struct net_device * dev,
353 struct ieee80211_assoc_response * resp, 361 struct ieee80211_assoc_response * resp,
354 struct ieee80211_network * _ieee80211_network_do_not_use) 362 struct ieee80211_network * _ieee80211_network)
355{ 363{
356 /* NOTE: the network parameter has to be ignored by 364 /* NOTE: the network parameter has to be mostly ignored by
357 * this code because it is the ieee80211's pointer 365 * this code because it is the ieee80211's pointer
358 * to the struct, not ours (we made a copy) 366 * to the struct, not ours (we made a copy)
359 */ 367 */
@@ -385,6 +393,11 @@ ieee80211softmac_handle_assoc_response(struct net_device * dev,
385 /* now that we know it was for us, we can cancel the timeout */ 393 /* now that we know it was for us, we can cancel the timeout */
386 cancel_delayed_work(&mac->associnfo.timeout); 394 cancel_delayed_work(&mac->associnfo.timeout);
387 395
396 /* if the association response included an ERP IE, update our saved
397 * copy */
398 if (_ieee80211_network->flags & NETWORK_HAS_ERP_VALUE)
399 network->erp_value = _ieee80211_network->erp_value;
400
388 switch (status) { 401 switch (status) {
389 case 0: 402 case 0:
390 dprintk(KERN_INFO PFX "associated!\n"); 403 dprintk(KERN_INFO PFX "associated!\n");
diff --git a/net/ieee80211/softmac/ieee80211softmac_io.c b/net/ieee80211/softmac/ieee80211softmac_io.c
index 6ae5a1dc7956..82bfddbf33a2 100644
--- a/net/ieee80211/softmac/ieee80211softmac_io.c
+++ b/net/ieee80211/softmac/ieee80211softmac_io.c
@@ -467,3 +467,17 @@ ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac,
467 kfree(pkt); 467 kfree(pkt);
468 return 0; 468 return 0;
469} 469}
470
471/* Beacon handling */
472int ieee80211softmac_handle_beacon(struct net_device *dev,
473 struct ieee80211_beacon *beacon,
474 struct ieee80211_network *network)
475{
476 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
477
478 if (mac->associated && memcmp(network->bssid, mac->associnfo.bssid, ETH_ALEN) == 0)
479 ieee80211softmac_process_erp(mac, network->erp_value);
480
481 return 0;
482}
483
diff --git a/net/ieee80211/softmac/ieee80211softmac_module.c b/net/ieee80211/softmac/ieee80211softmac_module.c
index 4b2e57d12418..addea1cf73ae 100644
--- a/net/ieee80211/softmac/ieee80211softmac_module.c
+++ b/net/ieee80211/softmac/ieee80211softmac_module.c
@@ -44,6 +44,7 @@ struct net_device *alloc_ieee80211softmac(int sizeof_priv)
44 softmac->ieee->handle_assoc_response = ieee80211softmac_handle_assoc_response; 44 softmac->ieee->handle_assoc_response = ieee80211softmac_handle_assoc_response;
45 softmac->ieee->handle_reassoc_request = ieee80211softmac_handle_reassoc_req; 45 softmac->ieee->handle_reassoc_request = ieee80211softmac_handle_reassoc_req;
46 softmac->ieee->handle_disassoc = ieee80211softmac_handle_disassoc; 46 softmac->ieee->handle_disassoc = ieee80211softmac_handle_disassoc;
47 softmac->ieee->handle_beacon = ieee80211softmac_handle_beacon;
47 softmac->scaninfo = NULL; 48 softmac->scaninfo = NULL;
48 49
49 softmac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT; 50 softmac->associnfo.scan_retry = IEEE80211SOFTMAC_ASSOC_SCAN_RETRY_LIMIT;
@@ -178,21 +179,14 @@ int ieee80211softmac_ratesinfo_rate_supported(struct ieee80211softmac_ratesinfo
178 return 0; 179 return 0;
179} 180}
180 181
181/* Finds the highest rate which is: 182u8 ieee80211softmac_highest_supported_rate(struct ieee80211softmac_device *mac,
182 * 1. Present in ri (optionally a basic rate)
183 * 2. Supported by the device
184 * 3. Less than or equal to the user-defined rate
185 */
186static u8 highest_supported_rate(struct ieee80211softmac_device *mac,
187 struct ieee80211softmac_ratesinfo *ri, int basic_only) 183 struct ieee80211softmac_ratesinfo *ri, int basic_only)
188{ 184{
189 u8 user_rate = mac->txrates.user_rate; 185 u8 user_rate = mac->txrates.user_rate;
190 int i; 186 int i;
191 187
192 if (ri->count == 0) { 188 if (ri->count == 0)
193 dprintk(KERN_ERR PFX "empty ratesinfo?\n");
194 return IEEE80211_CCK_RATE_1MB; 189 return IEEE80211_CCK_RATE_1MB;
195 }
196 190
197 for (i = ri->count - 1; i >= 0; i--) { 191 for (i = ri->count - 1; i >= 0; i--) {
198 u8 rate = ri->rates[i]; 192 u8 rate = ri->rates[i];
@@ -208,36 +202,61 @@ static u8 highest_supported_rate(struct ieee80211softmac_device *mac,
208 /* If we haven't found a suitable rate by now, just trust the user */ 202 /* If we haven't found a suitable rate by now, just trust the user */
209 return user_rate; 203 return user_rate;
210} 204}
205EXPORT_SYMBOL_GPL(ieee80211softmac_highest_supported_rate);
206
207void ieee80211softmac_process_erp(struct ieee80211softmac_device *mac,
208 u8 erp_value)
209{
210 int use_protection;
211 int short_preamble;
212 u32 changes = 0;
213
214 /* Barker preamble mode */
215 short_preamble = ((erp_value & WLAN_ERP_BARKER_PREAMBLE) == 0
216 && mac->associnfo.short_preamble_available) ? 1 : 0;
217
218 /* Protection needed? */
219 use_protection = (erp_value & WLAN_ERP_USE_PROTECTION) != 0;
220
221 if (mac->bssinfo.short_preamble != short_preamble) {
222 changes |= IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE;
223 mac->bssinfo.short_preamble = short_preamble;
224 }
225
226 if (mac->bssinfo.use_protection != use_protection) {
227 changes |= IEEE80211SOFTMAC_BSSINFOCHG_PROTECTION;
228 mac->bssinfo.use_protection = use_protection;
229 }
230
231 if (mac->bssinfo_change && changes)
232 mac->bssinfo_change(mac->dev, changes);
233}
211 234
212void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac) 235void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac)
213{ 236{
214 struct ieee80211softmac_txrates *txrates = &mac->txrates; 237 struct ieee80211softmac_txrates *txrates = &mac->txrates;
215 struct ieee80211softmac_txrates oldrates;
216 u32 change = 0; 238 u32 change = 0;
217 239
218 if (mac->txrates_change)
219 oldrates = mac->txrates;
220
221 change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT; 240 change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
222 txrates->default_rate = highest_supported_rate(mac, &mac->associnfo.supported_rates, 0); 241 txrates->default_rate = ieee80211softmac_highest_supported_rate(mac, &mac->bssinfo.supported_rates, 0);
223 242
224 change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK; 243 change |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT_FBACK;
225 txrates->default_fallback = lower_rate(mac, txrates->default_rate); 244 txrates->default_fallback = lower_rate(mac, txrates->default_rate);
226 245
227 change |= IEEE80211SOFTMAC_TXRATECHG_MCAST; 246 change |= IEEE80211SOFTMAC_TXRATECHG_MCAST;
228 txrates->mcast_rate = highest_supported_rate(mac, &mac->associnfo.supported_rates, 1); 247 txrates->mcast_rate = ieee80211softmac_highest_supported_rate(mac, &mac->bssinfo.supported_rates, 1);
229 248
230 if (mac->txrates_change) 249 if (mac->txrates_change)
231 mac->txrates_change(mac->dev, change, &oldrates); 250 mac->txrates_change(mac->dev, change);
232 251
233} 252}
234 253
235void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac) 254void ieee80211softmac_init_bss(struct ieee80211softmac_device *mac)
236{ 255{
237 struct ieee80211_device *ieee = mac->ieee; 256 struct ieee80211_device *ieee = mac->ieee;
238 u32 change = 0; 257 u32 change = 0;
239 struct ieee80211softmac_txrates *txrates = &mac->txrates; 258 struct ieee80211softmac_txrates *txrates = &mac->txrates;
240 struct ieee80211softmac_txrates oldrates; 259 struct ieee80211softmac_bss_info *bssinfo = &mac->bssinfo;
241 260
242 /* TODO: We need some kind of state machine to lower the default rates 261 /* TODO: We need some kind of state machine to lower the default rates
243 * if we loose too many packets. 262 * if we loose too many packets.
@@ -245,8 +264,6 @@ void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac)
245 /* Change the default txrate to the highest possible value. 264 /* Change the default txrate to the highest possible value.
246 * The txrate machine will lower it, if it is too high. 265 * The txrate machine will lower it, if it is too high.
247 */ 266 */
248 if (mac->txrates_change)
249 oldrates = mac->txrates;
250 /* FIXME: We don't correctly handle backing down to lower 267 /* FIXME: We don't correctly handle backing down to lower
251 rates, so 801.11g devices start off at 11M for now. People 268 rates, so 801.11g devices start off at 11M for now. People
252 can manually change it if they really need to, but 11M is 269 can manually change it if they really need to, but 11M is
@@ -272,7 +289,23 @@ void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac)
272 change |= IEEE80211SOFTMAC_TXRATECHG_MGT_MCAST; 289 change |= IEEE80211SOFTMAC_TXRATECHG_MGT_MCAST;
273 290
274 if (mac->txrates_change) 291 if (mac->txrates_change)
275 mac->txrates_change(mac->dev, change, &oldrates); 292 mac->txrates_change(mac->dev, change);
293
294 change = 0;
295
296 bssinfo->supported_rates.count = 0;
297 memset(bssinfo->supported_rates.rates, 0,
298 sizeof(bssinfo->supported_rates.rates));
299 change |= IEEE80211SOFTMAC_BSSINFOCHG_RATES;
300
301 bssinfo->short_preamble = 0;
302 change |= IEEE80211SOFTMAC_BSSINFOCHG_SHORT_PREAMBLE;
303
304 bssinfo->use_protection = 0;
305 change |= IEEE80211SOFTMAC_BSSINFOCHG_PROTECTION;
306
307 if (mac->bssinfo_change)
308 mac->bssinfo_change(mac->dev, change);
276 309
277 mac->running = 1; 310 mac->running = 1;
278} 311}
@@ -282,7 +315,7 @@ void ieee80211softmac_start(struct net_device *dev)
282 struct ieee80211softmac_device *mac = ieee80211_priv(dev); 315 struct ieee80211softmac_device *mac = ieee80211_priv(dev);
283 316
284 ieee80211softmac_start_check_rates(mac); 317 ieee80211softmac_start_check_rates(mac);
285 ieee80211softmac_init_txrates(mac); 318 ieee80211softmac_init_bss(mac);
286} 319}
287EXPORT_SYMBOL_GPL(ieee80211softmac_start); 320EXPORT_SYMBOL_GPL(ieee80211softmac_start);
288 321
@@ -335,7 +368,6 @@ u8 ieee80211softmac_lower_rate_delta(struct ieee80211softmac_device *mac, u8 rat
335static void ieee80211softmac_add_txrates_badness(struct ieee80211softmac_device *mac, 368static void ieee80211softmac_add_txrates_badness(struct ieee80211softmac_device *mac,
336 int amount) 369 int amount)
337{ 370{
338 struct ieee80211softmac_txrates oldrates;
339 u8 default_rate = mac->txrates.default_rate; 371 u8 default_rate = mac->txrates.default_rate;
340 u8 default_fallback = mac->txrates.default_fallback; 372 u8 default_fallback = mac->txrates.default_fallback;
341 u32 changes = 0; 373 u32 changes = 0;
@@ -348,8 +380,6 @@ printk("badness %d\n", mac->txrate_badness);
348 mac->txrate_badness += amount; 380 mac->txrate_badness += amount;
349 if (mac->txrate_badness <= -1000) { 381 if (mac->txrate_badness <= -1000) {
350 /* Very small badness. Try a faster bitrate. */ 382 /* Very small badness. Try a faster bitrate. */
351 if (mac->txrates_change)
352 memcpy(&oldrates, &mac->txrates, sizeof(oldrates));
353 default_rate = raise_rate(mac, default_rate); 383 default_rate = raise_rate(mac, default_rate);
354 changes |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT; 384 changes |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
355 default_fallback = get_fallback_rate(mac, default_rate); 385 default_fallback = get_fallback_rate(mac, default_rate);
@@ -358,8 +388,6 @@ printk("badness %d\n", mac->txrate_badness);
358printk("Bitrate raised to %u\n", default_rate); 388printk("Bitrate raised to %u\n", default_rate);
359 } else if (mac->txrate_badness >= 10000) { 389 } else if (mac->txrate_badness >= 10000) {
360 /* Very high badness. Try a slower bitrate. */ 390 /* Very high badness. Try a slower bitrate. */
361 if (mac->txrates_change)
362 memcpy(&oldrates, &mac->txrates, sizeof(oldrates));
363 default_rate = lower_rate(mac, default_rate); 391 default_rate = lower_rate(mac, default_rate);
364 changes |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT; 392 changes |= IEEE80211SOFTMAC_TXRATECHG_DEFAULT;
365 default_fallback = get_fallback_rate(mac, default_rate); 393 default_fallback = get_fallback_rate(mac, default_rate);
@@ -372,7 +400,7 @@ printk("Bitrate lowered to %u\n", default_rate);
372 mac->txrates.default_fallback = default_fallback; 400 mac->txrates.default_fallback = default_fallback;
373 401
374 if (changes && mac->txrates_change) 402 if (changes && mac->txrates_change)
375 mac->txrates_change(mac->dev, changes, &oldrates); 403 mac->txrates_change(mac->dev, changes);
376} 404}
377 405
378void ieee80211softmac_fragment_lost(struct net_device *dev, 406void ieee80211softmac_fragment_lost(struct net_device *dev,
@@ -416,7 +444,11 @@ ieee80211softmac_create_network(struct ieee80211softmac_device *mac,
416 memcpy(&softnet->supported_rates.rates[softnet->supported_rates.count], net->rates_ex, net->rates_ex_len); 444 memcpy(&softnet->supported_rates.rates[softnet->supported_rates.count], net->rates_ex, net->rates_ex_len);
417 softnet->supported_rates.count += net->rates_ex_len; 445 softnet->supported_rates.count += net->rates_ex_len;
418 sort(softnet->supported_rates.rates, softnet->supported_rates.count, sizeof(softnet->supported_rates.rates[0]), rate_cmp, NULL); 446 sort(softnet->supported_rates.rates, softnet->supported_rates.count, sizeof(softnet->supported_rates.rates[0]), rate_cmp, NULL);
419 447
448 /* we save the ERP value because it is needed at association time, and
449 * many AP's do not include an ERP IE in the association response. */
450 softnet->erp_value = net->erp_value;
451
420 softnet->capabilities = net->capability; 452 softnet->capabilities = net->capability;
421 return softnet; 453 return softnet;
422} 454}
diff --git a/net/ieee80211/softmac/ieee80211softmac_priv.h b/net/ieee80211/softmac/ieee80211softmac_priv.h
index fa1f8e3acfc0..0642e090b8a7 100644
--- a/net/ieee80211/softmac/ieee80211softmac_priv.h
+++ b/net/ieee80211/softmac/ieee80211softmac_priv.h
@@ -116,9 +116,11 @@ ieee80211softmac_get_network_by_essid(struct ieee80211softmac_device *mac,
116 struct ieee80211softmac_essid *essid); 116 struct ieee80211softmac_essid *essid);
117 117
118/* Rates related */ 118/* Rates related */
119void ieee80211softmac_process_erp(struct ieee80211softmac_device *mac,
120 u8 erp_value);
119int ieee80211softmac_ratesinfo_rate_supported(struct ieee80211softmac_ratesinfo *ri, u8 rate); 121int ieee80211softmac_ratesinfo_rate_supported(struct ieee80211softmac_ratesinfo *ri, u8 rate);
120u8 ieee80211softmac_lower_rate_delta(struct ieee80211softmac_device *mac, u8 rate, int delta); 122u8 ieee80211softmac_lower_rate_delta(struct ieee80211softmac_device *mac, u8 rate, int delta);
121void ieee80211softmac_init_txrates(struct ieee80211softmac_device *mac); 123void ieee80211softmac_init_bss(struct ieee80211softmac_device *mac);
122void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac); 124void ieee80211softmac_recalc_txrates(struct ieee80211softmac_device *mac);
123static inline u8 lower_rate(struct ieee80211softmac_device *mac, u8 rate) { 125static inline u8 lower_rate(struct ieee80211softmac_device *mac, u8 rate) {
124 return ieee80211softmac_lower_rate_delta(mac, rate, 1); 126 return ieee80211softmac_lower_rate_delta(mac, rate, 1);
@@ -133,6 +135,9 @@ static inline u8 get_fallback_rate(struct ieee80211softmac_device *mac, u8 rate)
133/*** prototypes from _io.c */ 135/*** prototypes from _io.c */
134int ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac, 136int ieee80211softmac_send_mgt_frame(struct ieee80211softmac_device *mac,
135 void* ptrarg, u32 type, u32 arg); 137 void* ptrarg, u32 type, u32 arg);
138int ieee80211softmac_handle_beacon(struct net_device *dev,
139 struct ieee80211_beacon *beacon,
140 struct ieee80211_network *network);
136 141
137/*** prototypes from _auth.c */ 142/*** prototypes from _auth.c */
138/* do these have to go into the public header? */ 143/* do these have to go into the public header? */
@@ -189,6 +194,7 @@ struct ieee80211softmac_network {
189 authenticated:1, 194 authenticated:1,
190 auth_desynced_once:1; 195 auth_desynced_once:1;
191 196
197 u8 erp_value; /* Saved ERP value */
192 u16 capabilities; /* Capabilities bitfield */ 198 u16 capabilities; /* Capabilities bitfield */
193 u8 challenge_len; /* Auth Challenge length */ 199 u8 challenge_len; /* Auth Challenge length */
194 char *challenge; /* Challenge Text */ 200 char *challenge; /* Challenge Text */