aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 21:02:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 21:02:35 -0400
commit334d094504c2fe1c44211ecb49146ae6bca8c321 (patch)
treed3c0f68e4b9f8e3d2ccc39e7dfe5de0534a5fad9 /drivers/net
parentd1a4be630fb068f251d64b62919f143c49ca8057 (diff)
parentd1643d24c61b725bef399cc1cf2944b4c9c23177 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.26
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.26: (1090 commits) [NET]: Fix and allocate less memory for ->priv'less netdevices [IPV6]: Fix dangling references on error in fib6_add(). [NETLABEL]: Fix NULL deref in netlbl_unlabel_staticlist_gen() if ifindex not found [PKT_SCHED]: Fix datalen check in tcf_simp_init(). [INET]: Uninline the __inet_inherit_port call. [INET]: Drop the inet_inherit_port() call. SCTP: Initialize partial_bytes_acked to 0, when all of the data is acked. [netdrvr] forcedeth: internal simplifications; changelog removal phylib: factor out get_phy_id from within get_phy_device PHY: add BCM5464 support to broadcom PHY driver cxgb3: Fix __must_check warning with dev_dbg. tc35815: Statistics cleanup natsemi: fix MMIO for PPC 44x platforms [TIPC]: Cleanup of TIPC reference table code [TIPC]: Optimized initialization of TIPC reference table [TIPC]: Remove inlining of reference table locking routines e1000: convert uint16_t style integers to u16 ixgb: convert uint16_t style integers to u16 sb1000.c: make const arrays static sb1000.c: stop inlining largish static functions ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c509.c729
-rw-r--r--drivers/net/8139too.c10
-rw-r--r--drivers/net/8390.c6
-rw-r--r--drivers/net/Kconfig96
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/appletalk/cops.c6
-rw-r--r--drivers/net/arcnet/arcnet.c5
-rw-r--r--drivers/net/arcnet/com20020.c7
-rw-r--r--drivers/net/at1700.c7
-rw-r--r--drivers/net/atarilance.c7
-rw-r--r--drivers/net/atl1/Makefile2
-rw-r--r--drivers/net/atl1/atl1.h286
-rw-r--r--drivers/net/atl1/atl1_ethtool.c505
-rw-r--r--drivers/net/atl1/atl1_hw.c720
-rw-r--r--drivers/net/atl1/atl1_hw.h946
-rw-r--r--drivers/net/atl1/atl1_param.c203
-rw-r--r--drivers/net/atlx/Makefile1
-rw-r--r--drivers/net/atlx/atl1.c (renamed from drivers/net/atl1/atl1_main.c)2120
-rw-r--r--drivers/net/atlx/atl1.h796
-rw-r--r--drivers/net/atlx/atlx.c433
-rw-r--r--drivers/net/atlx/atlx.h506
-rw-r--r--drivers/net/atp.c4
-rw-r--r--drivers/net/au1000_eth.c6
-rw-r--r--drivers/net/bfin_mac.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c2
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c14
-rw-r--r--drivers/net/cassini.c12
-rw-r--r--drivers/net/cpmac.c5
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c4
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c22
-rw-r--r--drivers/net/cxgb3/l2t.c2
-rw-r--r--drivers/net/defxx.c3
-rw-r--r--drivers/net/e1000/e1000.h113
-rw-r--r--drivers/net/e1000/e1000_ethtool.c175
-rw-r--r--drivers/net/e1000/e1000_hw.c1541
-rw-r--r--drivers/net/e1000/e1000_hw.h572
-rw-r--r--drivers/net/e1000/e1000_main.c305
-rw-r--r--drivers/net/e1000/e1000_osdep.h7
-rw-r--r--drivers/net/e1000e/82571.c163
-rw-r--r--drivers/net/e1000e/Makefile2
-rw-r--r--drivers/net/e1000e/defines.h109
-rw-r--r--drivers/net/e1000e/e1000.h37
-rw-r--r--drivers/net/e1000e/es2lan.c137
-rw-r--r--drivers/net/e1000e/ethtool.c282
-rw-r--r--drivers/net/e1000e/hw.h173
-rw-r--r--drivers/net/e1000e/ich8lan.c309
-rw-r--r--drivers/net/e1000e/lib.c348
-rw-r--r--drivers/net/e1000e/netdev.c645
-rw-r--r--drivers/net/e1000e/param.c33
-rw-r--r--drivers/net/e1000e/phy.c164
-rw-r--r--drivers/net/ehea/ehea.h6
-rw-r--r--drivers/net/ehea/ehea_main.c92
-rw-r--r--drivers/net/fec_mpc52xx.c2
-rw-r--r--drivers/net/fec_mpc52xx_phy.c2
-rw-r--r--drivers/net/forcedeth.c240
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c4
-rw-r--r--drivers/net/fs_enet/mii-bitbang.c4
-rw-r--r--drivers/net/fs_enet/mii-fec.c4
-rw-r--r--drivers/net/gianfar.c80
-rw-r--r--drivers/net/gianfar.h20
-rw-r--r--drivers/net/gianfar_mii.c2
-rw-r--r--drivers/net/hamradio/bpqether.c4
-rw-r--r--drivers/net/ibmveth.c42
-rw-r--r--drivers/net/ixgb/ixgb.h60
-rw-r--r--drivers/net/ixgb/ixgb_ee.c124
-rw-r--r--drivers/net/ixgb/ixgb_ee.h12
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c67
-rw-r--r--drivers/net/ixgb/ixgb_hw.c199
-rw-r--r--drivers/net/ixgb/ixgb_hw.h250
-rw-r--r--drivers/net/ixgb/ixgb_main.c156
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h7
-rw-r--r--drivers/net/ixgbe/ixgbe.h87
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c39
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c1520
-rw-r--r--drivers/net/korina.c1233
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/mv643xx_eth.c475
-rw-r--r--drivers/net/natsemi.c20
-rw-r--r--drivers/net/netxen/netxen_nic.h18
-rw-r--r--drivers/net/netxen/netxen_nic_isr.c2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c19
-rw-r--r--drivers/net/ni52.c255
-rw-r--r--drivers/net/ni52.h4
-rw-r--r--drivers/net/niu.c701
-rw-r--r--drivers/net/niu.h4
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/phy/broadcom.c77
-rw-r--r--drivers/net/phy/fixed.c2
-rw-r--r--drivers/net/phy/phy_device.c38
-rw-r--r--drivers/net/pppoe.c6
-rw-r--r--drivers/net/ps3_gelic_wireless.c2
-rw-r--r--drivers/net/qla3xxx.c6
-rw-r--r--drivers/net/s2io.c459
-rw-r--r--drivers/net/s2io.h50
-rw-r--r--drivers/net/sb1000.c101
-rw-r--r--drivers/net/sb1250-mac.c2
-rw-r--r--drivers/net/sc92031.c86
-rw-r--r--drivers/net/sk98lin/Makefile87
-rw-r--r--drivers/net/sk98lin/h/lm80.h179
-rw-r--r--drivers/net/sk98lin/h/skaddr.h285
-rw-r--r--drivers/net/sk98lin/h/skcsum.h213
-rw-r--r--drivers/net/sk98lin/h/skdebug.h74
-rw-r--r--drivers/net/sk98lin/h/skdrv1st.h188
-rw-r--r--drivers/net/sk98lin/h/skdrv2nd.h447
-rw-r--r--drivers/net/sk98lin/h/skerror.h55
-rw-r--r--drivers/net/sk98lin/h/skgedrv.h51
-rw-r--r--drivers/net/sk98lin/h/skgehw.h2126
-rw-r--r--drivers/net/sk98lin/h/skgehwt.h48
-rw-r--r--drivers/net/sk98lin/h/skgei2c.h210
-rw-r--r--drivers/net/sk98lin/h/skgeinit.h797
-rw-r--r--drivers/net/sk98lin/h/skgepnm2.h334
-rw-r--r--drivers/net/sk98lin/h/skgepnmi.h962
-rw-r--r--drivers/net/sk98lin/h/skgesirq.h110
-rw-r--r--drivers/net/sk98lin/h/ski2c.h174
-rw-r--r--drivers/net/sk98lin/h/skqueue.h94
-rw-r--r--drivers/net/sk98lin/h/skrlmt.h438
-rw-r--r--drivers/net/sk98lin/h/sktimer.h63
-rw-r--r--drivers/net/sk98lin/h/sktypes.h69
-rw-r--r--drivers/net/sk98lin/h/skversion.h38
-rw-r--r--drivers/net/sk98lin/h/skvpd.h248
-rw-r--r--drivers/net/sk98lin/h/xmac_ii.h1579
-rw-r--r--drivers/net/sk98lin/skaddr.c1788
-rw-r--r--drivers/net/sk98lin/skdim.c742
-rw-r--r--drivers/net/sk98lin/skethtool.c627
-rw-r--r--drivers/net/sk98lin/skge.c5218
-rw-r--r--drivers/net/sk98lin/skgehwt.c171
-rw-r--r--drivers/net/sk98lin/skgeinit.c2005
-rw-r--r--drivers/net/sk98lin/skgemib.c1075
-rw-r--r--drivers/net/sk98lin/skgepnmi.c8198
-rw-r--r--drivers/net/sk98lin/skgesirq.c2229
-rw-r--r--drivers/net/sk98lin/ski2c.c1296
-rw-r--r--drivers/net/sk98lin/sklm80.c141
-rw-r--r--drivers/net/sk98lin/skqueue.c179
-rw-r--r--drivers/net/sk98lin/skrlmt.c3257
-rw-r--r--drivers/net/sk98lin/sktimer.c250
-rw-r--r--drivers/net/sk98lin/skvpd.c1091
-rw-r--r--drivers/net/sk98lin/skxmac2.c4160
-rw-r--r--drivers/net/skfp/fplustm.c12
-rw-r--r--drivers/net/skfp/h/fplustm.h20
-rw-r--r--drivers/net/skfp/hwmtm.c86
-rw-r--r--drivers/net/skfp/skfddi.c4
-rw-r--r--drivers/net/smc91x.c335
-rw-r--r--drivers/net/smc91x.h331
-rw-r--r--drivers/net/spider_net.c36
-rw-r--r--drivers/net/spider_net.h7
-rw-r--r--drivers/net/tc35815.c1701
-rw-r--r--drivers/net/tokenring/3c359.c21
-rw-r--r--drivers/net/tulip/Kconfig15
-rw-r--r--drivers/net/tulip/Makefile1
-rw-r--r--drivers/net/tulip/tulip.h7
-rw-r--r--drivers/net/tulip/tulip_core.c19
-rw-r--r--drivers/net/tulip/winbond-840.c5
-rw-r--r--drivers/net/tulip/xircom_tulip_cb.c1726
-rw-r--r--drivers/net/tun.c94
-rw-r--r--drivers/net/ucc_geth.c2
-rw-r--r--drivers/net/ucc_geth.h2
-rw-r--r--drivers/net/ucc_geth_mii.c2
-rw-r--r--drivers/net/usb/dm9601.c10
-rw-r--r--drivers/net/usb/rndis_host.c5
-rw-r--r--drivers/net/veth.c2
-rw-r--r--drivers/net/via-velocity.c23
-rw-r--r--drivers/net/wan/cosa.c14
-rw-r--r--drivers/net/wan/dlci.c2
-rw-r--r--drivers/net/wan/hdlc.c4
-rw-r--r--drivers/net/wan/lapbether.c4
-rw-r--r--drivers/net/wan/syncppp.c2
-rw-r--r--drivers/net/wireless/Kconfig113
-rw-r--r--drivers/net/wireless/Makefile5
-rw-r--r--drivers/net/wireless/adm8211.c95
-rw-r--r--drivers/net/wireless/adm8211.h65
-rw-r--r--drivers/net/wireless/ath5k/Kconfig37
-rw-r--r--drivers/net/wireless/ath5k/Makefile8
-rw-r--r--drivers/net/wireless/ath5k/ath5k.h170
-rw-r--r--drivers/net/wireless/ath5k/base.c575
-rw-r--r--drivers/net/wireless/ath5k/base.h18
-rw-r--r--drivers/net/wireless/ath5k/debug.c92
-rw-r--r--drivers/net/wireless/ath5k/debug.h29
-rw-r--r--drivers/net/wireless/ath5k/hw.c791
-rw-r--r--drivers/net/wireless/ath5k/hw.h150
-rw-r--r--drivers/net/wireless/ath5k/initvals.c473
-rw-r--r--drivers/net/wireless/ath5k/phy.c387
-rw-r--r--drivers/net/wireless/ath5k/reg.h4
-rw-r--r--drivers/net/wireless/ath5k/regdom.c121
-rw-r--r--drivers/net/wireless/ath5k/regdom.h500
-rw-r--r--drivers/net/wireless/atmel.c5
-rw-r--r--drivers/net/wireless/b43/Kconfig18
-rw-r--r--drivers/net/wireless/b43/Makefile5
-rw-r--r--drivers/net/wireless/b43/b43.h195
-rw-r--r--drivers/net/wireless/b43/dma.c440
-rw-r--r--drivers/net/wireless/b43/dma.h14
-rw-r--r--drivers/net/wireless/b43/main.c1030
-rw-r--r--drivers/net/wireless/b43/main.h11
-rw-r--r--drivers/net/wireless/b43/nphy.c1
-rw-r--r--drivers/net/wireless/b43/nphy.h40
-rw-r--r--drivers/net/wireless/b43/pcmcia.c10
-rw-r--r--drivers/net/wireless/b43/pio.c842
-rw-r--r--drivers/net/wireless/b43/pio.h220
-rw-r--r--drivers/net/wireless/b43/sysfs.c89
-rw-r--r--drivers/net/wireless/b43/wa.c45
-rw-r--r--drivers/net/wireless/b43/xmit.c206
-rw-r--r--drivers/net/wireless/b43/xmit.h44
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h49
-rw-r--r--drivers/net/wireless/b43legacy/main.c467
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c78
-rw-r--r--drivers/net/wireless/bcm43xx/Kconfig70
-rw-r--r--drivers/net/wireless/bcm43xx/Makefile12
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h997
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c556
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_debugfs.h118
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.c1263
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.h386
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_ethtool.c50
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_ethtool.h8
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_ilt.c352
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_ilt.h33
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_leds.c307
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_leds.h62
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c4281
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.h133
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_phy.c2346
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_phy.h78
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_pio.c674
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_pio.h163
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_power.c393
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_power.h56
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_radio.c2170
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_radio.h115
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c471
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_sysfs.h9
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_wx.c1035
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_wx.h36
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_xmit.c565
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_xmit.h150
-rw-r--r--drivers/net/wireless/ipw2200.c16
-rw-r--r--drivers/net/wireless/ipw2200.h112
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig53
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-commands.h96
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-core.h80
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debug.h19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h213
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-io.h41
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c433
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.h73
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c158
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c431
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h117
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-commands.h159
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h531
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-io.h431
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-rs.c567
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-rs.h20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c2412
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.h430
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c292
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h246
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h265
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h (renamed from drivers/net/wireless/iwlwifi/iwl-4965-debug.h)65
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c335
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c561
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h375
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c278
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h429
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c449
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h82
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h78
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.c173
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rfkill.h54
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c355
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h49
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c1082
-rw-r--r--drivers/net/wireless/iwlwifi/iwl4965-base.c3674
-rw-r--r--drivers/net/wireless/libertas/11d.c16
-rw-r--r--drivers/net/wireless/libertas/11d.h5
-rw-r--r--drivers/net/wireless/libertas/Makefile2
-rw-r--r--drivers/net/wireless/libertas/assoc.c1283
-rw-r--r--drivers/net/wireless/libertas/assoc.h29
-rw-r--r--drivers/net/wireless/libertas/cmd.c538
-rw-r--r--drivers/net/wireless/libertas/cmd.h11
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c229
-rw-r--r--drivers/net/wireless/libertas/debugfs.c178
-rw-r--r--drivers/net/wireless/libertas/decl.h23
-rw-r--r--drivers/net/wireless/libertas/defs.h12
-rw-r--r--drivers/net/wireless/libertas/dev.h84
-rw-r--r--drivers/net/wireless/libertas/ethtool.c77
-rw-r--r--drivers/net/wireless/libertas/host.h4
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h57
-rw-r--r--drivers/net/wireless/libertas/if_cs.c247
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c65
-rw-r--r--drivers/net/wireless/libertas/if_usb.c71
-rw-r--r--drivers/net/wireless/libertas/if_usb.h2
-rw-r--r--drivers/net/wireless/libertas/join.c895
-rw-r--r--drivers/net/wireless/libertas/join.h53
-rw-r--r--drivers/net/wireless/libertas/main.c289
-rw-r--r--drivers/net/wireless/libertas/rx.c6
-rw-r--r--drivers/net/wireless/libertas/scan.c877
-rw-r--r--drivers/net/wireless/libertas/scan.h184
-rw-r--r--drivers/net/wireless/libertas/tx.c25
-rw-r--r--drivers/net/wireless/libertas/types.h13
-rw-r--r--drivers/net/wireless/libertas/wext.c62
-rw-r--r--drivers/net/wireless/libertas/wext.h13
-rw-r--r--drivers/net/wireless/net2280.h452
-rw-r--r--drivers/net/wireless/p54/Kconfig63
-rw-r--r--drivers/net/wireless/p54/Makefile3
-rw-r--r--drivers/net/wireless/p54/net2280.h452
-rw-r--r--drivers/net/wireless/p54/p54.h (renamed from drivers/net/wireless/p54.h)4
-rw-r--r--drivers/net/wireless/p54/p54common.c (renamed from drivers/net/wireless/p54common.c)96
-rw-r--r--drivers/net/wireless/p54/p54common.h (renamed from drivers/net/wireless/p54common.h)75
-rw-r--r--drivers/net/wireless/p54/p54pci.c (renamed from drivers/net/wireless/p54pci.c)0
-rw-r--r--drivers/net/wireless/p54/p54pci.h (renamed from drivers/net/wireless/p54pci.h)0
-rw-r--r--drivers/net/wireless/p54/p54usb.c (renamed from drivers/net/wireless/p54usb.c)0
-rw-r--r--drivers/net/wireless/p54/p54usb.h (renamed from drivers/net/wireless/p54usb.h)0
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c51
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c6
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.h6
-rw-r--r--drivers/net/wireless/ray_cs.c73
-rw-r--r--drivers/net/wireless/rndis_wlan.c451
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig60
-rw-r--r--drivers/net/wireless/rt2x00/Makefile37
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c569
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h14
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c591
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c644
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.h9
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h356
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c167
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c125
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c911
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dump.h8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c21
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.c219
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.h50
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h100
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c309
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c287
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h67
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c304
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h468
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h75
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00rfkill.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ring.h290
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c331
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h161
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c829
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h33
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c699
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h24
-rw-r--r--drivers/net/wireless/rtl8180.h2
-rw-r--r--drivers/net/wireless/rtl8180_dev.c107
-rw-r--r--drivers/net/wireless/rtl8180_grf5101.c5
-rw-r--r--drivers/net/wireless/rtl8180_max2820.c5
-rw-r--r--drivers/net/wireless/rtl8180_rtl8225.c15
-rw-r--r--drivers/net/wireless/rtl8180_sa2400.c5
-rw-r--r--drivers/net/wireless/rtl8187.h2
-rw-r--r--drivers/net/wireless/rtl8187_dev.c92
-rw-r--r--drivers/net/wireless/rtl8187_rtl8225.c15
-rw-r--r--drivers/net/wireless/rtl818x.h70
-rw-r--r--drivers/net/wireless/strip.c6
-rw-r--r--drivers/net/wireless/wavelan_cs.c47
-rw-r--r--drivers/net/wireless/wavelan_cs.p.h26
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c24
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h17
-rw-r--r--drivers/net/wireless/zd1211rw/zd_ieee80211.c11
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c241
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c11
-rw-r--r--drivers/net/yellowfin.c4
375 files changed, 33895 insertions, 86245 deletions
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 8fafac987e0b..54dac0696d91 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -54,25 +54,24 @@
54 v1.19a 28Oct2002 Davud Ruggiero <jdr@farfalle.com> 54 v1.19a 28Oct2002 Davud Ruggiero <jdr@farfalle.com>
55 - Increase *read_eeprom udelay to workaround oops with 2 cards. 55 - Increase *read_eeprom udelay to workaround oops with 2 cards.
56 v1.19b 08Nov2002 Marc Zyngier <maz@wild-wind.fr.eu.org> 56 v1.19b 08Nov2002 Marc Zyngier <maz@wild-wind.fr.eu.org>
57 - Introduce driver model for EISA cards. 57 - Introduce driver model for EISA cards.
58 v1.20 04Feb2008 Ondrej Zary <linux@rainbow-software.org>
59 - convert to isa_driver and pnp_driver and some cleanups
58*/ 60*/
59 61
60#define DRV_NAME "3c509" 62#define DRV_NAME "3c509"
61#define DRV_VERSION "1.19b" 63#define DRV_VERSION "1.20"
62#define DRV_RELDATE "08Nov2002" 64#define DRV_RELDATE "04Feb2008"
63 65
64/* A few values that may be tweaked. */ 66/* A few values that may be tweaked. */
65 67
66/* Time in jiffies before concluding the transmitter is hung. */ 68/* Time in jiffies before concluding the transmitter is hung. */
67#define TX_TIMEOUT (400*HZ/1000) 69#define TX_TIMEOUT (400*HZ/1000)
68/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
69static int max_interrupt_work = 10;
70 70
71#include <linux/module.h> 71#include <linux/module.h>
72#ifdef CONFIG_MCA
73#include <linux/mca.h> 72#include <linux/mca.h>
74#endif 73#include <linux/isa.h>
75#include <linux/isapnp.h> 74#include <linux/pnp.h>
76#include <linux/string.h> 75#include <linux/string.h>
77#include <linux/interrupt.h> 76#include <linux/interrupt.h>
78#include <linux/errno.h> 77#include <linux/errno.h>
@@ -97,10 +96,6 @@ static int max_interrupt_work = 10;
97 96
98static char version[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n"; 97static char version[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
99 98
100#if defined(CONFIG_PM) && (defined(CONFIG_MCA) || defined(CONFIG_EISA))
101#define EL3_SUSPEND
102#endif
103
104#ifdef EL3_DEBUG 99#ifdef EL3_DEBUG
105static int el3_debug = EL3_DEBUG; 100static int el3_debug = EL3_DEBUG;
106#else 101#else
@@ -111,6 +106,7 @@ static int el3_debug = 2;
111 * a global variable so that the mca/eisa probe routines can increment 106 * a global variable so that the mca/eisa probe routines can increment
112 * it */ 107 * it */
113static int el3_cards = 0; 108static int el3_cards = 0;
109#define EL3_MAX_CARDS 8
114 110
115/* To minimize the size of the driver source I only define operating 111/* To minimize the size of the driver source I only define operating
116 constants if they are used several times. You'll need the manual 112 constants if they are used several times. You'll need the manual
@@ -119,7 +115,7 @@ static int el3_cards = 0;
119#define EL3_DATA 0x00 115#define EL3_DATA 0x00
120#define EL3_CMD 0x0e 116#define EL3_CMD 0x0e
121#define EL3_STATUS 0x0e 117#define EL3_STATUS 0x0e
122#define EEPROM_READ 0x80 118#define EEPROM_READ 0x80
123 119
124#define EL3_IO_EXTENT 16 120#define EL3_IO_EXTENT 16
125 121
@@ -168,23 +164,31 @@ enum RxFilter {
168 */ 164 */
169#define SKB_QUEUE_SIZE 64 165#define SKB_QUEUE_SIZE 64
170 166
167enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_MCA, EL3_EISA };
168
171struct el3_private { 169struct el3_private {
172 struct net_device_stats stats; 170 struct net_device_stats stats;
173 struct net_device *next_dev;
174 spinlock_t lock; 171 spinlock_t lock;
175 /* skb send-queue */ 172 /* skb send-queue */
176 int head, size; 173 int head, size;
177 struct sk_buff *queue[SKB_QUEUE_SIZE]; 174 struct sk_buff *queue[SKB_QUEUE_SIZE];
178 enum { 175 enum el3_cardtype type;
179 EL3_MCA,
180 EL3_PNP,
181 EL3_EISA,
182 } type; /* type of device */
183 struct device *dev;
184}; 176};
185static int id_port __initdata = 0x110; /* Start with 0x110 to avoid new sound cards.*/ 177static int id_port;
186static struct net_device *el3_root_dev; 178static int current_tag;
179static struct net_device *el3_devs[EL3_MAX_CARDS];
180
181/* Parameters that may be passed into the module. */
182static int debug = -1;
183static int irq[] = {-1, -1, -1, -1, -1, -1, -1, -1};
184/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
185static int max_interrupt_work = 10;
186#ifdef CONFIG_PNP
187static int nopnp;
188#endif
187 189
190static int __init el3_common_init(struct net_device *dev);
191static void el3_common_remove(struct net_device *dev);
188static ushort id_read_eeprom(int index); 192static ushort id_read_eeprom(int index);
189static ushort read_eeprom(int ioaddr, int index); 193static ushort read_eeprom(int ioaddr, int index);
190static int el3_open(struct net_device *dev); 194static int el3_open(struct net_device *dev);
@@ -199,7 +203,7 @@ static void el3_tx_timeout (struct net_device *dev);
199static void el3_down(struct net_device *dev); 203static void el3_down(struct net_device *dev);
200static void el3_up(struct net_device *dev); 204static void el3_up(struct net_device *dev);
201static const struct ethtool_ops ethtool_ops; 205static const struct ethtool_ops ethtool_ops;
202#ifdef EL3_SUSPEND 206#ifdef CONFIG_PM
203static int el3_suspend(struct device *, pm_message_t); 207static int el3_suspend(struct device *, pm_message_t);
204static int el3_resume(struct device *); 208static int el3_resume(struct device *);
205#else 209#else
@@ -209,13 +213,272 @@ static int el3_resume(struct device *);
209 213
210 214
211/* generic device remove for all device types */ 215/* generic device remove for all device types */
212#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
213static int el3_device_remove (struct device *device); 216static int el3_device_remove (struct device *device);
214#endif
215#ifdef CONFIG_NET_POLL_CONTROLLER 217#ifdef CONFIG_NET_POLL_CONTROLLER
216static void el3_poll_controller(struct net_device *dev); 218static void el3_poll_controller(struct net_device *dev);
217#endif 219#endif
218 220
221/* Return 0 on success, 1 on error, 2 when found already detected PnP card */
222static int el3_isa_id_sequence(__be16 *phys_addr)
223{
224 short lrs_state = 0xff;
225 int i;
226
227 /* ISA boards are detected by sending the ID sequence to the
228 ID_PORT. We find cards past the first by setting the 'current_tag'
229 on cards as they are found. Cards with their tag set will not
230 respond to subsequent ID sequences. */
231
232 outb(0x00, id_port);
233 outb(0x00, id_port);
234 for (i = 0; i < 255; i++) {
235 outb(lrs_state, id_port);
236 lrs_state <<= 1;
237 lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state;
238 }
239 /* For the first probe, clear all board's tag registers. */
240 if (current_tag == 0)
241 outb(0xd0, id_port);
242 else /* Otherwise kill off already-found boards. */
243 outb(0xd8, id_port);
244 if (id_read_eeprom(7) != 0x6d50)
245 return 1;
246 /* Read in EEPROM data, which does contention-select.
247 Only the lowest address board will stay "on-line".
248 3Com got the byte order backwards. */
249 for (i = 0; i < 3; i++)
250 phys_addr[i] = htons(id_read_eeprom(i));
251#ifdef CONFIG_PNP
252 if (!nopnp) {
253 /* The ISA PnP 3c509 cards respond to the ID sequence too.
254 This check is needed in order not to register them twice. */
255 for (i = 0; i < el3_cards; i++) {
256 struct el3_private *lp = netdev_priv(el3_devs[i]);
257 if (lp->type == EL3_PNP
258 && !memcmp(phys_addr, el3_devs[i]->dev_addr,
259 ETH_ALEN)) {
260 if (el3_debug > 3)
261 printk(KERN_DEBUG "3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n",
262 phys_addr[0] & 0xff, phys_addr[0] >> 8,
263 phys_addr[1] & 0xff, phys_addr[1] >> 8,
264 phys_addr[2] & 0xff, phys_addr[2] >> 8);
265 /* Set the adaptor tag so that the next card can be found. */
266 outb(0xd0 + ++current_tag, id_port);
267 return 2;
268 }
269 }
270 }
271#endif /* CONFIG_PNP */
272 return 0;
273
274}
275
276static void __devinit el3_dev_fill(struct net_device *dev, __be16 *phys_addr,
277 int ioaddr, int irq, int if_port,
278 enum el3_cardtype type)
279{
280 struct el3_private *lp = netdev_priv(dev);
281
282 memcpy(dev->dev_addr, phys_addr, ETH_ALEN);
283 dev->base_addr = ioaddr;
284 dev->irq = irq;
285 dev->if_port = if_port;
286 lp->type = type;
287}
288
289static int __devinit el3_isa_match(struct device *pdev,
290 unsigned int ndev)
291{
292 struct net_device *dev;
293 int ioaddr, isa_irq, if_port, err;
294 unsigned int iobase;
295 __be16 phys_addr[3];
296
297 while ((err = el3_isa_id_sequence(phys_addr)) == 2)
298 ; /* Skip to next card when PnP card found */
299 if (err == 1)
300 return 0;
301
302 iobase = id_read_eeprom(8);
303 if_port = iobase >> 14;
304 ioaddr = 0x200 + ((iobase & 0x1f) << 4);
305 if (irq[el3_cards] > 1 && irq[el3_cards] < 16)
306 isa_irq = irq[el3_cards];
307 else
308 isa_irq = id_read_eeprom(9) >> 12;
309
310 dev = alloc_etherdev(sizeof(struct el3_private));
311 if (!dev)
312 return -ENOMEM;
313
314 netdev_boot_setup_check(dev);
315
316 if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) {
317 free_netdev(dev);
318 return 0;
319 }
320
321 /* Set the adaptor tag so that the next card can be found. */
322 outb(0xd0 + ++current_tag, id_port);
323
324 /* Activate the adaptor at the EEPROM location. */
325 outb((ioaddr >> 4) | 0xe0, id_port);
326
327 EL3WINDOW(0);
328 if (inw(ioaddr) != 0x6d50) {
329 free_netdev(dev);
330 return 0;
331 }
332
333 /* Free the interrupt so that some other card can use it. */
334 outw(0x0f00, ioaddr + WN0_IRQ);
335
336 el3_dev_fill(dev, phys_addr, ioaddr, isa_irq, if_port, EL3_ISA);
337 dev_set_drvdata(pdev, dev);
338 if (el3_common_init(dev)) {
339 free_netdev(dev);
340 return 0;
341 }
342
343 el3_devs[el3_cards++] = dev;
344 return 1;
345}
346
347static int __devexit el3_isa_remove(struct device *pdev,
348 unsigned int ndev)
349{
350 el3_device_remove(pdev);
351 dev_set_drvdata(pdev, NULL);
352 return 0;
353}
354
355#ifdef CONFIG_PM
356static int el3_isa_suspend(struct device *dev, unsigned int n,
357 pm_message_t state)
358{
359 current_tag = 0;
360 return el3_suspend(dev, state);
361}
362
363static int el3_isa_resume(struct device *dev, unsigned int n)
364{
365 struct net_device *ndev = dev_get_drvdata(dev);
366 int ioaddr = ndev->base_addr, err;
367 __be16 phys_addr[3];
368
369 while ((err = el3_isa_id_sequence(phys_addr)) == 2)
370 ; /* Skip to next card when PnP card found */
371 if (err == 1)
372 return 0;
373 /* Set the adaptor tag so that the next card can be found. */
374 outb(0xd0 + ++current_tag, id_port);
375 /* Enable the card */
376 outb((ioaddr >> 4) | 0xe0, id_port);
377 EL3WINDOW(0);
378 if (inw(ioaddr) != 0x6d50)
379 return 1;
380 /* Free the interrupt so that some other card can use it. */
381 outw(0x0f00, ioaddr + WN0_IRQ);
382 return el3_resume(dev);
383}
384#endif
385
386static struct isa_driver el3_isa_driver = {
387 .match = el3_isa_match,
388 .remove = __devexit_p(el3_isa_remove),
389#ifdef CONFIG_PM
390 .suspend = el3_isa_suspend,
391 .resume = el3_isa_resume,
392#endif
393 .driver = {
394 .name = "3c509"
395 },
396};
397static int isa_registered;
398
399#ifdef CONFIG_PNP
400static struct pnp_device_id el3_pnp_ids[] = {
401 { .id = "TCM5090" }, /* 3Com Etherlink III (TP) */
402 { .id = "TCM5091" }, /* 3Com Etherlink III */
403 { .id = "TCM5094" }, /* 3Com Etherlink III (combo) */
404 { .id = "TCM5095" }, /* 3Com Etherlink III (TPO) */
405 { .id = "TCM5098" }, /* 3Com Etherlink III (TPC) */
406 { .id = "PNP80f7" }, /* 3Com Etherlink III compatible */
407 { .id = "PNP80f8" }, /* 3Com Etherlink III compatible */
408 { .id = "" }
409};
410MODULE_DEVICE_TABLE(pnp, el3_pnp_ids);
411
412static int __devinit el3_pnp_probe(struct pnp_dev *pdev,
413 const struct pnp_device_id *id)
414{
415 short i;
416 int ioaddr, irq, if_port;
417 u16 phys_addr[3];
418 struct net_device *dev = NULL;
419 int err;
420
421 ioaddr = pnp_port_start(pdev, 0);
422 if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-pnp"))
423 return -EBUSY;
424 irq = pnp_irq(pdev, 0);
425 EL3WINDOW(0);
426 for (i = 0; i < 3; i++)
427 phys_addr[i] = htons(read_eeprom(ioaddr, i));
428 if_port = read_eeprom(ioaddr, 8) >> 14;
429 dev = alloc_etherdev(sizeof(struct el3_private));
430 if (!dev) {
431 release_region(ioaddr, EL3_IO_EXTENT);
432 return -ENOMEM;
433 }
434 SET_NETDEV_DEV(dev, &pdev->dev);
435 netdev_boot_setup_check(dev);
436
437 el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_PNP);
438 pnp_set_drvdata(pdev, dev);
439 err = el3_common_init(dev);
440
441 if (err) {
442 pnp_set_drvdata(pdev, NULL);
443 free_netdev(dev);
444 return err;
445 }
446
447 el3_devs[el3_cards++] = dev;
448 return 0;
449}
450
451static void __devexit el3_pnp_remove(struct pnp_dev *pdev)
452{
453 el3_common_remove(pnp_get_drvdata(pdev));
454 pnp_set_drvdata(pdev, NULL);
455}
456
457#ifdef CONFIG_PM
458static int el3_pnp_suspend(struct pnp_dev *pdev, pm_message_t state)
459{
460 return el3_suspend(&pdev->dev, state);
461}
462
463static int el3_pnp_resume(struct pnp_dev *pdev)
464{
465 return el3_resume(&pdev->dev);
466}
467#endif
468
469static struct pnp_driver el3_pnp_driver = {
470 .name = "3c509",
471 .id_table = el3_pnp_ids,
472 .probe = el3_pnp_probe,
473 .remove = __devexit_p(el3_pnp_remove),
474#ifdef CONFIG_PM
475 .suspend = el3_pnp_suspend,
476 .resume = el3_pnp_resume,
477#endif
478};
479static int pnp_registered;
480#endif /* CONFIG_PNP */
481
219#ifdef CONFIG_EISA 482#ifdef CONFIG_EISA
220static struct eisa_device_id el3_eisa_ids[] = { 483static struct eisa_device_id el3_eisa_ids[] = {
221 { "TCM5092" }, 484 { "TCM5092" },
@@ -230,13 +493,14 @@ static int el3_eisa_probe (struct device *device);
230static struct eisa_driver el3_eisa_driver = { 493static struct eisa_driver el3_eisa_driver = {
231 .id_table = el3_eisa_ids, 494 .id_table = el3_eisa_ids,
232 .driver = { 495 .driver = {
233 .name = "3c509", 496 .name = "3c579",
234 .probe = el3_eisa_probe, 497 .probe = el3_eisa_probe,
235 .remove = __devexit_p (el3_device_remove), 498 .remove = __devexit_p (el3_device_remove),
236 .suspend = el3_suspend, 499 .suspend = el3_suspend,
237 .resume = el3_resume, 500 .resume = el3_resume,
238 } 501 }
239}; 502};
503static int eisa_registered;
240#endif 504#endif
241 505
242#ifdef CONFIG_MCA 506#ifdef CONFIG_MCA
@@ -271,45 +535,9 @@ static struct mca_driver el3_mca_driver = {
271 .resume = el3_resume, 535 .resume = el3_resume,
272 }, 536 },
273}; 537};
538static int mca_registered;
274#endif /* CONFIG_MCA */ 539#endif /* CONFIG_MCA */
275 540
276#if defined(__ISAPNP__)
277static struct isapnp_device_id el3_isapnp_adapters[] __initdata = {
278 { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
279 ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5090),
280 (long) "3Com Etherlink III (TP)" },
281 { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
282 ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5091),
283 (long) "3Com Etherlink III" },
284 { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
285 ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5094),
286 (long) "3Com Etherlink III (combo)" },
287 { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
288 ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5095),
289 (long) "3Com Etherlink III (TPO)" },
290 { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
291 ISAPNP_VENDOR('T', 'C', 'M'), ISAPNP_FUNCTION(0x5098),
292 (long) "3Com Etherlink III (TPC)" },
293 { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
294 ISAPNP_VENDOR('P', 'N', 'P'), ISAPNP_FUNCTION(0x80f7),
295 (long) "3Com Etherlink III compatible" },
296 { ISAPNP_ANY_ID, ISAPNP_ANY_ID,
297 ISAPNP_VENDOR('P', 'N', 'P'), ISAPNP_FUNCTION(0x80f8),
298 (long) "3Com Etherlink III compatible" },
299 { } /* terminate list */
300};
301
302static __be16 el3_isapnp_phys_addr[8][3];
303static int nopnp;
304#endif /* __ISAPNP__ */
305
306/* With the driver model introduction for EISA devices, both init
307 * and cleanup have been split :
308 * - EISA devices probe/remove starts in el3_eisa_probe/el3_device_remove
309 * - MCA/ISA still use el3_probe
310 *
311 * Both call el3_common_init/el3_common_remove. */
312
313static int __init el3_common_init(struct net_device *dev) 541static int __init el3_common_init(struct net_device *dev)
314{ 542{
315 struct el3_private *lp = netdev_priv(dev); 543 struct el3_private *lp = netdev_priv(dev);
@@ -360,231 +588,11 @@ static int __init el3_common_init(struct net_device *dev)
360 588
361static void el3_common_remove (struct net_device *dev) 589static void el3_common_remove (struct net_device *dev)
362{ 590{
363 struct el3_private *lp = netdev_priv(dev);
364
365 (void) lp; /* Keep gcc quiet... */
366#if defined(__ISAPNP__)
367 if (lp->type == EL3_PNP)
368 pnp_device_detach(to_pnp_dev(lp->dev));
369#endif
370
371 unregister_netdev (dev); 591 unregister_netdev (dev);
372 release_region(dev->base_addr, EL3_IO_EXTENT); 592 release_region(dev->base_addr, EL3_IO_EXTENT);
373 free_netdev (dev); 593 free_netdev (dev);
374} 594}
375 595
376static int __init el3_probe(int card_idx)
377{
378 struct net_device *dev;
379 struct el3_private *lp;
380 short lrs_state = 0xff, i;
381 int ioaddr, irq, if_port;
382 __be16 phys_addr[3];
383 static int current_tag;
384 int err = -ENODEV;
385#if defined(__ISAPNP__)
386 static int pnp_cards;
387 struct pnp_dev *idev = NULL;
388 int pnp_found = 0;
389
390 if (nopnp == 1)
391 goto no_pnp;
392
393 for (i=0; el3_isapnp_adapters[i].vendor != 0; i++) {
394 int j;
395 while ((idev = pnp_find_dev(NULL,
396 el3_isapnp_adapters[i].vendor,
397 el3_isapnp_adapters[i].function,
398 idev))) {
399 if (pnp_device_attach(idev) < 0)
400 continue;
401 if (pnp_activate_dev(idev) < 0) {
402__again:
403 pnp_device_detach(idev);
404 continue;
405 }
406 if (!pnp_port_valid(idev, 0) || !pnp_irq_valid(idev, 0))
407 goto __again;
408 ioaddr = pnp_port_start(idev, 0);
409 if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509 PnP")) {
410 pnp_device_detach(idev);
411 return -EBUSY;
412 }
413 irq = pnp_irq(idev, 0);
414 if (el3_debug > 3)
415 printk ("ISAPnP reports %s at i/o 0x%x, irq %d\n",
416 (char*) el3_isapnp_adapters[i].driver_data, ioaddr, irq);
417 EL3WINDOW(0);
418 for (j = 0; j < 3; j++)
419 el3_isapnp_phys_addr[pnp_cards][j] =
420 phys_addr[j] =
421 htons(read_eeprom(ioaddr, j));
422 if_port = read_eeprom(ioaddr, 8) >> 14;
423 dev = alloc_etherdev(sizeof (struct el3_private));
424 if (!dev) {
425 release_region(ioaddr, EL3_IO_EXTENT);
426 pnp_device_detach(idev);
427 return -ENOMEM;
428 }
429
430 SET_NETDEV_DEV(dev, &idev->dev);
431 pnp_cards++;
432
433 netdev_boot_setup_check(dev);
434 pnp_found = 1;
435 goto found;
436 }
437 }
438no_pnp:
439#endif /* __ISAPNP__ */
440
441 /* Select an open I/O location at 0x1*0 to do contention select. */
442 for ( ; id_port < 0x200; id_port += 0x10) {
443 if (!request_region(id_port, 1, "3c509"))
444 continue;
445 outb(0x00, id_port);
446 outb(0xff, id_port);
447 if (inb(id_port) & 0x01){
448 release_region(id_port, 1);
449 break;
450 } else
451 release_region(id_port, 1);
452 }
453 if (id_port >= 0x200) {
454 /* Rare -- do we really need a warning? */
455 printk(" WARNING: No I/O port available for 3c509 activation.\n");
456 return -ENODEV;
457 }
458
459 /* Next check for all ISA bus boards by sending the ID sequence to the
460 ID_PORT. We find cards past the first by setting the 'current_tag'
461 on cards as they are found. Cards with their tag set will not
462 respond to subsequent ID sequences. */
463
464 outb(0x00, id_port);
465 outb(0x00, id_port);
466 for(i = 0; i < 255; i++) {
467 outb(lrs_state, id_port);
468 lrs_state <<= 1;
469 lrs_state = lrs_state & 0x100 ? lrs_state ^ 0xcf : lrs_state;
470 }
471
472 /* For the first probe, clear all board's tag registers. */
473 if (current_tag == 0)
474 outb(0xd0, id_port);
475 else /* Otherwise kill off already-found boards. */
476 outb(0xd8, id_port);
477
478 if (id_read_eeprom(7) != 0x6d50) {
479 return -ENODEV;
480 }
481
482 /* Read in EEPROM data, which does contention-select.
483 Only the lowest address board will stay "on-line".
484 3Com got the byte order backwards. */
485 for (i = 0; i < 3; i++) {
486 phys_addr[i] = htons(id_read_eeprom(i));
487 }
488
489#if defined(__ISAPNP__)
490 if (nopnp == 0) {
491 /* The ISA PnP 3c509 cards respond to the ID sequence.
492 This check is needed in order not to register them twice. */
493 for (i = 0; i < pnp_cards; i++) {
494 if (phys_addr[0] == el3_isapnp_phys_addr[i][0] &&
495 phys_addr[1] == el3_isapnp_phys_addr[i][1] &&
496 phys_addr[2] == el3_isapnp_phys_addr[i][2])
497 {
498 if (el3_debug > 3)
499 printk("3c509 with address %02x %02x %02x %02x %02x %02x was found by ISAPnP\n",
500 phys_addr[0] & 0xff, phys_addr[0] >> 8,
501 phys_addr[1] & 0xff, phys_addr[1] >> 8,
502 phys_addr[2] & 0xff, phys_addr[2] >> 8);
503 /* Set the adaptor tag so that the next card can be found. */
504 outb(0xd0 + ++current_tag, id_port);
505 goto no_pnp;
506 }
507 }
508 }
509#endif /* __ISAPNP__ */
510
511 {
512 unsigned int iobase = id_read_eeprom(8);
513 if_port = iobase >> 14;
514 ioaddr = 0x200 + ((iobase & 0x1f) << 4);
515 }
516 irq = id_read_eeprom(9) >> 12;
517
518 dev = alloc_etherdev(sizeof (struct el3_private));
519 if (!dev)
520 return -ENOMEM;
521
522 netdev_boot_setup_check(dev);
523
524 /* Set passed-in IRQ or I/O Addr. */
525 if (dev->irq > 1 && dev->irq < 16)
526 irq = dev->irq;
527
528 if (dev->base_addr) {
529 if (dev->mem_end == 0x3c509 /* Magic key */
530 && dev->base_addr >= 0x200 && dev->base_addr <= 0x3e0)
531 ioaddr = dev->base_addr & 0x3f0;
532 else if (dev->base_addr != ioaddr)
533 goto out;
534 }
535
536 if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509")) {
537 err = -EBUSY;
538 goto out;
539 }
540
541 /* Set the adaptor tag so that the next card can be found. */
542 outb(0xd0 + ++current_tag, id_port);
543
544 /* Activate the adaptor at the EEPROM location. */
545 outb((ioaddr >> 4) | 0xe0, id_port);
546
547 EL3WINDOW(0);
548 if (inw(ioaddr) != 0x6d50)
549 goto out1;
550
551 /* Free the interrupt so that some other card can use it. */
552 outw(0x0f00, ioaddr + WN0_IRQ);
553
554#if defined(__ISAPNP__)
555 found: /* PNP jumps here... */
556#endif /* __ISAPNP__ */
557
558 memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr));
559 dev->base_addr = ioaddr;
560 dev->irq = irq;
561 dev->if_port = if_port;
562 lp = netdev_priv(dev);
563#if defined(__ISAPNP__)
564 lp->dev = &idev->dev;
565 if (pnp_found)
566 lp->type = EL3_PNP;
567#endif
568 err = el3_common_init(dev);
569
570 if (err)
571 goto out1;
572
573 el3_cards++;
574 lp->next_dev = el3_root_dev;
575 el3_root_dev = dev;
576 return 0;
577
578out1:
579#if defined(__ISAPNP__)
580 if (idev)
581 pnp_device_detach(idev);
582#endif
583out:
584 free_netdev(dev);
585 return err;
586}
587
588#ifdef CONFIG_MCA 596#ifdef CONFIG_MCA
589static int __init el3_mca_probe(struct device *device) 597static int __init el3_mca_probe(struct device *device)
590{ 598{
@@ -596,7 +604,6 @@ static int __init el3_mca_probe(struct device *device)
596 * redone for multi-card detection by ZP Gu (zpg@castle.net) 604 * redone for multi-card detection by ZP Gu (zpg@castle.net)
597 * now works as a module */ 605 * now works as a module */
598 606
599 struct el3_private *lp;
600 short i; 607 short i;
601 int ioaddr, irq, if_port; 608 int ioaddr, irq, if_port;
602 u16 phys_addr[3]; 609 u16 phys_addr[3];
@@ -613,7 +620,7 @@ static int __init el3_mca_probe(struct device *device)
613 irq = pos5 & 0x0f; 620 irq = pos5 & 0x0f;
614 621
615 622
616 printk("3c529: found %s at slot %d\n", 623 printk(KERN_INFO "3c529: found %s at slot %d\n",
617 el3_mca_adapter_names[mdev->index], slot + 1); 624 el3_mca_adapter_names[mdev->index], slot + 1);
618 625
619 /* claim the slot */ 626 /* claim the slot */
@@ -626,7 +633,7 @@ static int __init el3_mca_probe(struct device *device)
626 irq = mca_device_transform_irq(mdev, irq); 633 irq = mca_device_transform_irq(mdev, irq);
627 ioaddr = mca_device_transform_ioport(mdev, ioaddr); 634 ioaddr = mca_device_transform_ioport(mdev, ioaddr);
628 if (el3_debug > 2) { 635 if (el3_debug > 2) {
629 printk("3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port); 636 printk(KERN_DEBUG "3c529: irq %d ioaddr 0x%x ifport %d\n", irq, ioaddr, if_port);
630 } 637 }
631 EL3WINDOW(0); 638 EL3WINDOW(0);
632 for (i = 0; i < 3; i++) { 639 for (i = 0; i < 3; i++) {
@@ -641,13 +648,7 @@ static int __init el3_mca_probe(struct device *device)
641 648
642 netdev_boot_setup_check(dev); 649 netdev_boot_setup_check(dev);
643 650
644 memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr)); 651 el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_MCA);
645 dev->base_addr = ioaddr;
646 dev->irq = irq;
647 dev->if_port = if_port;
648 lp = netdev_priv(dev);
649 lp->dev = device;
650 lp->type = EL3_MCA;
651 device->driver_data = dev; 652 device->driver_data = dev;
652 err = el3_common_init(dev); 653 err = el3_common_init(dev);
653 654
@@ -657,7 +658,7 @@ static int __init el3_mca_probe(struct device *device)
657 return -ENOMEM; 658 return -ENOMEM;
658 } 659 }
659 660
660 el3_cards++; 661 el3_devs[el3_cards++] = dev;
661 return 0; 662 return 0;
662} 663}
663 664
@@ -666,7 +667,6 @@ static int __init el3_mca_probe(struct device *device)
666#ifdef CONFIG_EISA 667#ifdef CONFIG_EISA
667static int __init el3_eisa_probe (struct device *device) 668static int __init el3_eisa_probe (struct device *device)
668{ 669{
669 struct el3_private *lp;
670 short i; 670 short i;
671 int ioaddr, irq, if_port; 671 int ioaddr, irq, if_port;
672 u16 phys_addr[3]; 672 u16 phys_addr[3];
@@ -678,7 +678,7 @@ static int __init el3_eisa_probe (struct device *device)
678 edev = to_eisa_device (device); 678 edev = to_eisa_device (device);
679 ioaddr = edev->base_addr; 679 ioaddr = edev->base_addr;
680 680
681 if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509")) 681 if (!request_region(ioaddr, EL3_IO_EXTENT, "3c579-eisa"))
682 return -EBUSY; 682 return -EBUSY;
683 683
684 /* Change the register set to the configuration window 0. */ 684 /* Change the register set to the configuration window 0. */
@@ -700,13 +700,7 @@ static int __init el3_eisa_probe (struct device *device)
700 700
701 netdev_boot_setup_check(dev); 701 netdev_boot_setup_check(dev);
702 702
703 memcpy(dev->dev_addr, phys_addr, sizeof(phys_addr)); 703 el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA);
704 dev->base_addr = ioaddr;
705 dev->irq = irq;
706 dev->if_port = if_port;
707 lp = netdev_priv(dev);
708 lp->dev = device;
709 lp->type = EL3_EISA;
710 eisa_set_drvdata (edev, dev); 704 eisa_set_drvdata (edev, dev);
711 err = el3_common_init(dev); 705 err = el3_common_init(dev);
712 706
@@ -716,12 +710,11 @@ static int __init el3_eisa_probe (struct device *device)
716 return err; 710 return err;
717 } 711 }
718 712
719 el3_cards++; 713 el3_devs[el3_cards++] = dev;
720 return 0; 714 return 0;
721} 715}
722#endif 716#endif
723 717
724#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
725/* This remove works for all device types. 718/* This remove works for all device types.
726 * 719 *
727 * The net dev must be stored in the driver_data field */ 720 * The net dev must be stored in the driver_data field */
@@ -734,7 +727,6 @@ static int __devexit el3_device_remove (struct device *device)
734 el3_common_remove (dev); 727 el3_common_remove (dev);
735 return 0; 728 return 0;
736} 729}
737#endif
738 730
739/* Read a word from the EEPROM using the regular EEPROM access register. 731/* Read a word from the EEPROM using the regular EEPROM access register.
740 Assume that we are in register window zero. 732 Assume that we are in register window zero.
@@ -749,7 +741,7 @@ static ushort read_eeprom(int ioaddr, int index)
749} 741}
750 742
751/* Read a word from the EEPROM when in the ISA ID probe state. */ 743/* Read a word from the EEPROM when in the ISA ID probe state. */
752static ushort __init id_read_eeprom(int index) 744static ushort id_read_eeprom(int index)
753{ 745{
754 int bit, word = 0; 746 int bit, word = 0;
755 747
@@ -765,7 +757,7 @@ static ushort __init id_read_eeprom(int index)
765 word = (word << 1) + (inb(id_port) & 0x01); 757 word = (word << 1) + (inb(id_port) & 0x01);
766 758
767 if (el3_debug > 3) 759 if (el3_debug > 3)
768 printk(" 3c509 EEPROM word %d %#4.4x.\n", index, word); 760 printk(KERN_DEBUG " 3c509 EEPROM word %d %#4.4x.\n", index, word);
769 761
770 return word; 762 return word;
771} 763}
@@ -787,13 +779,13 @@ el3_open(struct net_device *dev)
787 779
788 EL3WINDOW(0); 780 EL3WINDOW(0);
789 if (el3_debug > 3) 781 if (el3_debug > 3)
790 printk("%s: Opening, IRQ %d status@%x %4.4x.\n", dev->name, 782 printk(KERN_DEBUG "%s: Opening, IRQ %d status@%x %4.4x.\n", dev->name,
791 dev->irq, ioaddr + EL3_STATUS, inw(ioaddr + EL3_STATUS)); 783 dev->irq, ioaddr + EL3_STATUS, inw(ioaddr + EL3_STATUS));
792 784
793 el3_up(dev); 785 el3_up(dev);
794 786
795 if (el3_debug > 3) 787 if (el3_debug > 3)
796 printk("%s: Opened 3c509 IRQ %d status %4.4x.\n", 788 printk(KERN_DEBUG "%s: Opened 3c509 IRQ %d status %4.4x.\n",
797 dev->name, dev->irq, inw(ioaddr + EL3_STATUS)); 789 dev->name, dev->irq, inw(ioaddr + EL3_STATUS));
798 790
799 return 0; 791 return 0;
@@ -806,7 +798,7 @@ el3_tx_timeout (struct net_device *dev)
806 int ioaddr = dev->base_addr; 798 int ioaddr = dev->base_addr;
807 799
808 /* Transmitter timeout, serious problems. */ 800 /* Transmitter timeout, serious problems. */
809 printk("%s: transmit timed out, Tx_status %2.2x status %4.4x " 801 printk(KERN_WARNING "%s: transmit timed out, Tx_status %2.2x status %4.4x "
810 "Tx FIFO room %d.\n", 802 "Tx FIFO room %d.\n",
811 dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), 803 dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
812 inw(ioaddr + TX_FREE)); 804 inw(ioaddr + TX_FREE));
@@ -831,7 +823,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
831 lp->stats.tx_bytes += skb->len; 823 lp->stats.tx_bytes += skb->len;
832 824
833 if (el3_debug > 4) { 825 if (el3_debug > 4) {
834 printk("%s: el3_start_xmit(length = %u) called, status %4.4x.\n", 826 printk(KERN_DEBUG "%s: el3_start_xmit(length = %u) called, status %4.4x.\n",
835 dev->name, skb->len, inw(ioaddr + EL3_STATUS)); 827 dev->name, skb->len, inw(ioaddr + EL3_STATUS));
836 } 828 }
837#if 0 829#if 0
@@ -840,7 +832,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
840 ushort status = inw(ioaddr + EL3_STATUS); 832 ushort status = inw(ioaddr + EL3_STATUS);
841 if (status & 0x0001 /* IRQ line active, missed one. */ 833 if (status & 0x0001 /* IRQ line active, missed one. */
842 && inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */ 834 && inw(ioaddr + EL3_STATUS) & 1) { /* Make sure. */
843 printk("%s: Missed interrupt, status then %04x now %04x" 835 printk(KERN_DEBUG "%s: Missed interrupt, status then %04x now %04x"
844 " Tx %2.2x Rx %4.4x.\n", dev->name, status, 836 " Tx %2.2x Rx %4.4x.\n", dev->name, status,
845 inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS), 837 inw(ioaddr + EL3_STATUS), inb(ioaddr + TX_STATUS),
846 inw(ioaddr + RX_STATUS)); 838 inw(ioaddr + RX_STATUS));
@@ -914,7 +906,7 @@ el3_interrupt(int irq, void *dev_id)
914 906
915 if (el3_debug > 4) { 907 if (el3_debug > 4) {
916 status = inw(ioaddr + EL3_STATUS); 908 status = inw(ioaddr + EL3_STATUS);
917 printk("%s: interrupt, status %4.4x.\n", dev->name, status); 909 printk(KERN_DEBUG "%s: interrupt, status %4.4x.\n", dev->name, status);
918 } 910 }
919 911
920 while ((status = inw(ioaddr + EL3_STATUS)) & 912 while ((status = inw(ioaddr + EL3_STATUS)) &
@@ -925,7 +917,7 @@ el3_interrupt(int irq, void *dev_id)
925 917
926 if (status & TxAvailable) { 918 if (status & TxAvailable) {
927 if (el3_debug > 5) 919 if (el3_debug > 5)
928 printk(" TX room bit was handled.\n"); 920 printk(KERN_DEBUG " TX room bit was handled.\n");
929 /* There's room in the FIFO for a full-sized packet. */ 921 /* There's room in the FIFO for a full-sized packet. */
930 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); 922 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
931 netif_wake_queue (dev); 923 netif_wake_queue (dev);
@@ -964,7 +956,7 @@ el3_interrupt(int irq, void *dev_id)
964 } 956 }
965 957
966 if (--i < 0) { 958 if (--i < 0) {
967 printk("%s: Infinite loop in interrupt, status %4.4x.\n", 959 printk(KERN_ERR "%s: Infinite loop in interrupt, status %4.4x.\n",
968 dev->name, status); 960 dev->name, status);
969 /* Clear all interrupts. */ 961 /* Clear all interrupts. */
970 outw(AckIntr | 0xFF, ioaddr + EL3_CMD); 962 outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
@@ -975,7 +967,7 @@ el3_interrupt(int irq, void *dev_id)
975 } 967 }
976 968
977 if (el3_debug > 4) { 969 if (el3_debug > 4) {
978 printk("%s: exiting interrupt, status %4.4x.\n", dev->name, 970 printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n", dev->name,
979 inw(ioaddr + EL3_STATUS)); 971 inw(ioaddr + EL3_STATUS));
980 } 972 }
981 spin_unlock(&lp->lock); 973 spin_unlock(&lp->lock);
@@ -1450,7 +1442,7 @@ el3_up(struct net_device *dev)
1450} 1442}
1451 1443
1452/* Power Management support functions */ 1444/* Power Management support functions */
1453#ifdef EL3_SUSPEND 1445#ifdef CONFIG_PM
1454 1446
1455static int 1447static int
1456el3_suspend(struct device *pdev, pm_message_t state) 1448el3_suspend(struct device *pdev, pm_message_t state)
@@ -1500,79 +1492,102 @@ el3_resume(struct device *pdev)
1500 return 0; 1492 return 0;
1501} 1493}
1502 1494
1503#endif /* EL3_SUSPEND */ 1495#endif /* CONFIG_PM */
1504
1505/* Parameters that may be passed into the module. */
1506static int debug = -1;
1507static int irq[] = {-1, -1, -1, -1, -1, -1, -1, -1};
1508static int xcvr[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
1509 1496
1510module_param(debug,int, 0); 1497module_param(debug,int, 0);
1511module_param_array(irq, int, NULL, 0); 1498module_param_array(irq, int, NULL, 0);
1512module_param_array(xcvr, int, NULL, 0);
1513module_param(max_interrupt_work, int, 0); 1499module_param(max_interrupt_work, int, 0);
1514MODULE_PARM_DESC(debug, "debug level (0-6)"); 1500MODULE_PARM_DESC(debug, "debug level (0-6)");
1515MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)"); 1501MODULE_PARM_DESC(irq, "IRQ number(s) (assigned)");
1516MODULE_PARM_DESC(xcvr,"transceiver(s) (0=internal, 1=external)");
1517MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt"); 1502MODULE_PARM_DESC(max_interrupt_work, "maximum events handled per interrupt");
1518#if defined(__ISAPNP__) 1503#ifdef CONFIG_PNP
1519module_param(nopnp, int, 0); 1504module_param(nopnp, int, 0);
1520MODULE_PARM_DESC(nopnp, "disable ISA PnP support (0-1)"); 1505MODULE_PARM_DESC(nopnp, "disable ISA PnP support (0-1)");
1521MODULE_DEVICE_TABLE(isapnp, el3_isapnp_adapters); 1506#endif /* CONFIG_PNP */
1522#endif /* __ISAPNP__ */ 1507MODULE_DESCRIPTION("3Com Etherlink III (3c509, 3c509B, 3c529, 3c579) ethernet driver");
1523MODULE_DESCRIPTION("3Com Etherlink III (3c509, 3c509B) ISA/PnP ethernet driver");
1524MODULE_LICENSE("GPL"); 1508MODULE_LICENSE("GPL");
1525 1509
1526static int __init el3_init_module(void) 1510static int __init el3_init_module(void)
1527{ 1511{
1528 int ret = 0; 1512 int ret = 0;
1529 el3_cards = 0;
1530 1513
1531 if (debug >= 0) 1514 if (debug >= 0)
1532 el3_debug = debug; 1515 el3_debug = debug;
1533 1516
1534 el3_root_dev = NULL; 1517#ifdef CONFIG_PNP
1535 while (el3_probe(el3_cards) == 0) { 1518 if (!nopnp) {
1536 if (irq[el3_cards] > 1) 1519 ret = pnp_register_driver(&el3_pnp_driver);
1537 el3_root_dev->irq = irq[el3_cards]; 1520 if (!ret)
1538 if (xcvr[el3_cards] >= 0) 1521 pnp_registered = 1;
1539 el3_root_dev->if_port = xcvr[el3_cards]; 1522 }
1540 el3_cards++; 1523#endif
1524 /* Select an open I/O location at 0x1*0 to do ISA contention select. */
1525 /* Start with 0x110 to avoid some sound cards.*/
1526 for (id_port = 0x110 ; id_port < 0x200; id_port += 0x10) {
1527 if (!request_region(id_port, 1, "3c509-control"))
1528 continue;
1529 outb(0x00, id_port);
1530 outb(0xff, id_port);
1531 if (inb(id_port) & 0x01)
1532 break;
1533 else
1534 release_region(id_port, 1);
1535 }
1536 if (id_port >= 0x200) {
1537 id_port = 0;
1538 printk(KERN_ERR "No I/O port available for 3c509 activation.\n");
1539 } else {
1540 ret = isa_register_driver(&el3_isa_driver, EL3_MAX_CARDS);
1541 if (!ret)
1542 isa_registered = 1;
1541 } 1543 }
1542
1543#ifdef CONFIG_EISA 1544#ifdef CONFIG_EISA
1544 ret = eisa_driver_register(&el3_eisa_driver); 1545 ret = eisa_driver_register(&el3_eisa_driver);
1546 if (!ret)
1547 eisa_registered = 1;
1545#endif 1548#endif
1546#ifdef CONFIG_MCA 1549#ifdef CONFIG_MCA
1547 { 1550 ret = mca_register_driver(&el3_mca_driver);
1548 int err = mca_register_driver(&el3_mca_driver); 1551 if (!ret)
1549 if (ret == 0) 1552 mca_registered = 1;
1550 ret = err; 1553#endif
1551 } 1554
1555#ifdef CONFIG_PNP
1556 if (pnp_registered)
1557 ret = 0;
1558#endif
1559 if (isa_registered)
1560 ret = 0;
1561#ifdef CONFIG_EISA
1562 if (eisa_registered)
1563 ret = 0;
1564#endif
1565#ifdef CONFIG_MCA
1566 if (mca_registered)
1567 ret = 0;
1552#endif 1568#endif
1553 return ret; 1569 return ret;
1554} 1570}
1555 1571
1556static void __exit el3_cleanup_module(void) 1572static void __exit el3_cleanup_module(void)
1557{ 1573{
1558 struct net_device *next_dev; 1574#ifdef CONFIG_PNP
1559 1575 if (pnp_registered)
1560 while (el3_root_dev) { 1576 pnp_unregister_driver(&el3_pnp_driver);
1561 struct el3_private *lp = netdev_priv(el3_root_dev); 1577#endif
1562 1578 if (isa_registered)
1563 next_dev = lp->next_dev; 1579 isa_unregister_driver(&el3_isa_driver);
1564 el3_common_remove (el3_root_dev); 1580 if (id_port)
1565 el3_root_dev = next_dev; 1581 release_region(id_port, 1);
1566 }
1567
1568#ifdef CONFIG_EISA 1582#ifdef CONFIG_EISA
1569 eisa_driver_unregister (&el3_eisa_driver); 1583 if (eisa_registered)
1584 eisa_driver_unregister(&el3_eisa_driver);
1570#endif 1585#endif
1571#ifdef CONFIG_MCA 1586#ifdef CONFIG_MCA
1572 mca_unregister_driver(&el3_mca_driver); 1587 if (mca_registered)
1588 mca_unregister_driver(&el3_mca_driver);
1573#endif 1589#endif
1574} 1590}
1575 1591
1576module_init (el3_init_module); 1592module_init (el3_init_module);
1577module_exit (el3_cleanup_module); 1593module_exit (el3_cleanup_module);
1578
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index be6e918456d9..53bd903d2321 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -966,8 +966,8 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
966 966
967 addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6; 967 addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
968 for (i = 0; i < 3; i++) 968 for (i = 0; i < 3; i++)
969 ((u16 *) (dev->dev_addr))[i] = 969 ((__le16 *) (dev->dev_addr))[i] =
970 le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len)); 970 cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len));
971 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 971 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
972 972
973 /* The Rtl8139-specific entries in the device structure. */ 973 /* The Rtl8139-specific entries in the device structure. */
@@ -1373,8 +1373,8 @@ static void rtl8139_hw_start (struct net_device *dev)
1373 /* unlock Config[01234] and BMCR register writes */ 1373 /* unlock Config[01234] and BMCR register writes */
1374 RTL_W8_F (Cfg9346, Cfg9346_Unlock); 1374 RTL_W8_F (Cfg9346, Cfg9346_Unlock);
1375 /* Restore our idea of the MAC address. */ 1375 /* Restore our idea of the MAC address. */
1376 RTL_W32_F (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0))); 1376 RTL_W32_F (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1377 RTL_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4))); 1377 RTL_W32_F (MAC0 + 4, le16_to_cpu (*(__le16 *) (dev->dev_addr + 4)));
1378 1378
1379 /* Must enable Tx/Rx before setting transfer thresholds! */ 1379 /* Must enable Tx/Rx before setting transfer thresholds! */
1380 RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb); 1380 RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
@@ -1945,7 +1945,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
1945 rmb(); 1945 rmb();
1946 1946
1947 /* read size+status of next frame from DMA ring buffer */ 1947 /* read size+status of next frame from DMA ring buffer */
1948 rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset)); 1948 rx_status = le32_to_cpu (*(__le32 *) (rx_ring + ring_offset));
1949 rx_size = rx_status >> 16; 1949 rx_size = rx_status >> 16;
1950 pkt_size = rx_size - 4; 1950 pkt_size = rx_size - 4;
1951 1951
diff --git a/drivers/net/8390.c b/drivers/net/8390.c
index a82807641dcf..a499e867f0f4 100644
--- a/drivers/net/8390.c
+++ b/drivers/net/8390.c
@@ -48,14 +48,16 @@ EXPORT_SYMBOL(__alloc_ei_netdev);
48 48
49#if defined(MODULE) 49#if defined(MODULE)
50 50
51int init_module(void) 51static int __init ns8390_module_init(void)
52{ 52{
53 return 0; 53 return 0;
54} 54}
55 55
56void cleanup_module(void) 56static void __exit ns8390_module_exit(void)
57{ 57{
58} 58}
59 59
60module_init(ns8390_module_init);
61module_exit(ns8390_module_exit);
60#endif /* MODULE */ 62#endif /* MODULE */
61MODULE_LICENSE("GPL"); 63MODULE_LICENSE("GPL");
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 3a0b20afec7b..45c3a208d93f 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -467,6 +467,13 @@ config SNI_82596
467 Say Y here to support the on-board Intel 82596 ethernet controller 467 Say Y here to support the on-board Intel 82596 ethernet controller
468 built into SNI RM machines. 468 built into SNI RM machines.
469 469
470config KORINA
471 tristate "Korina (IDT RC32434) Ethernet support"
472 depends on NET_ETHERNET && MIKROTIK_RB500
473 help
474 If you have a Mikrotik RouterBoard 500 or IDT RC32434
475 based system say Y. Otherwise say N.
476
470config MIPS_JAZZ_SONIC 477config MIPS_JAZZ_SONIC
471 tristate "MIPS JAZZ onboard SONIC Ethernet support" 478 tristate "MIPS JAZZ onboard SONIC Ethernet support"
472 depends on MACH_JAZZ 479 depends on MACH_JAZZ
@@ -1431,7 +1438,7 @@ config CS89x0
1431config TC35815 1438config TC35815
1432 tristate "TOSHIBA TC35815 Ethernet support" 1439 tristate "TOSHIBA TC35815 Ethernet support"
1433 depends on NET_PCI && PCI && MIPS 1440 depends on NET_PCI && PCI && MIPS
1434 select MII 1441 select PHYLIB
1435 1442
1436config EEPRO100 1443config EEPRO100
1437 tristate "EtherExpressPro/100 support (eepro100, original Becker driver)" 1444 tristate "EtherExpressPro/100 support (eepro100, original Becker driver)"
@@ -2220,93 +2227,6 @@ config SKY2_DEBUG
2220 2227
2221 If unsure, say N. 2228 If unsure, say N.
2222 2229
2223config SK98LIN
2224 tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support (DEPRECATED)"
2225 depends on PCI
2226 ---help---
2227 Say Y here if you have a Marvell Yukon or SysKonnect SK-98xx/SK-95xx
2228 compliant Gigabit Ethernet Adapter.
2229
2230 This driver supports the original Yukon chipset. This driver is
2231 deprecated and will be removed from the kernel in the near future,
2232 it has been replaced by the skge driver. skge is cleaner and
2233 seems to work better.
2234
2235 This driver does not support the newer Yukon2 chipset. A separate
2236 driver, sky2, is provided to support Yukon2-based adapters.
2237
2238 The following adapters are supported by this driver:
2239 - 3Com 3C940 Gigabit LOM Ethernet Adapter
2240 - 3Com 3C941 Gigabit LOM Ethernet Adapter
2241 - Allied Telesyn AT-2970LX Gigabit Ethernet Adapter
2242 - Allied Telesyn AT-2970LX/2SC Gigabit Ethernet Adapter
2243 - Allied Telesyn AT-2970SX Gigabit Ethernet Adapter
2244 - Allied Telesyn AT-2970SX/2SC Gigabit Ethernet Adapter
2245 - Allied Telesyn AT-2970TX Gigabit Ethernet Adapter
2246 - Allied Telesyn AT-2970TX/2TX Gigabit Ethernet Adapter
2247 - Allied Telesyn AT-2971SX Gigabit Ethernet Adapter
2248 - Allied Telesyn AT-2971T Gigabit Ethernet Adapter
2249 - Belkin Gigabit Desktop Card 10/100/1000Base-T Adapter, Copper RJ-45
2250 - EG1032 v2 Instant Gigabit Network Adapter
2251 - EG1064 v2 Instant Gigabit Network Adapter
2252 - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Abit)
2253 - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Albatron)
2254 - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Asus)
2255 - Marvell 88E8001 Gigabit LOM Ethernet Adapter (ECS)
2256 - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Epox)
2257 - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Foxconn)
2258 - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Gigabyte)
2259 - Marvell 88E8001 Gigabit LOM Ethernet Adapter (Iwill)
2260 - Marvell 88E8050 Gigabit LOM Ethernet Adapter (Intel)
2261 - Marvell RDK-8001 Adapter
2262 - Marvell RDK-8002 Adapter
2263 - Marvell RDK-8003 Adapter
2264 - Marvell RDK-8004 Adapter
2265 - Marvell RDK-8006 Adapter
2266 - Marvell RDK-8007 Adapter
2267 - Marvell RDK-8008 Adapter
2268 - Marvell RDK-8009 Adapter
2269 - Marvell RDK-8010 Adapter
2270 - Marvell RDK-8011 Adapter
2271 - Marvell RDK-8012 Adapter
2272 - Marvell RDK-8052 Adapter
2273 - Marvell Yukon Gigabit Ethernet 10/100/1000Base-T Adapter (32 bit)
2274 - Marvell Yukon Gigabit Ethernet 10/100/1000Base-T Adapter (64 bit)
2275 - N-Way PCI-Bus Giga-Card 1000/100/10Mbps(L)
2276 - SK-9521 10/100/1000Base-T Adapter
2277 - SK-9521 V2.0 10/100/1000Base-T Adapter
2278 - SK-9821 Gigabit Ethernet Server Adapter (SK-NET GE-T)
2279 - SK-9821 V2.0 Gigabit Ethernet 10/100/1000Base-T Adapter
2280 - SK-9822 Gigabit Ethernet Server Adapter (SK-NET GE-T dual link)
2281 - SK-9841 Gigabit Ethernet Server Adapter (SK-NET GE-LX)
2282 - SK-9841 V2.0 Gigabit Ethernet 1000Base-LX Adapter
2283 - SK-9842 Gigabit Ethernet Server Adapter (SK-NET GE-LX dual link)
2284 - SK-9843 Gigabit Ethernet Server Adapter (SK-NET GE-SX)
2285 - SK-9843 V2.0 Gigabit Ethernet 1000Base-SX Adapter
2286 - SK-9844 Gigabit Ethernet Server Adapter (SK-NET GE-SX dual link)
2287 - SK-9851 V2.0 Gigabit Ethernet 1000Base-SX Adapter
2288 - SK-9861 Gigabit Ethernet Server Adapter (SK-NET GE-SX Volition)
2289 - SK-9861 V2.0 Gigabit Ethernet 1000Base-SX Adapter
2290 - SK-9862 Gigabit Ethernet Server Adapter (SK-NET GE-SX Volition dual link)
2291 - SK-9871 Gigabit Ethernet Server Adapter (SK-NET GE-ZX)
2292 - SK-9871 V2.0 Gigabit Ethernet 1000Base-ZX Adapter
2293 - SK-9872 Gigabit Ethernet Server Adapter (SK-NET GE-ZX dual link)
2294 - SMC EZ Card 1000 (SMC9452TXV.2)
2295
2296 The adapters support Jumbo Frames.
2297 The dual link adapters support link-failover and dual port features.
2298 Both Marvell Yukon and SysKonnect SK-98xx/SK-95xx adapters support
2299 the scatter-gather functionality with sendfile(). Please refer to
2300 <file:Documentation/networking/sk98lin.txt> for more information about
2301 optional driver parameters.
2302 Questions concerning this driver may be addressed to:
2303 <linux@syskonnect.de>
2304
2305 If you want to compile this driver as a module ( = code which can be
2306 inserted in and removed from the running kernel whenever you want),
2307 say M here and read <file:Documentation/kbuild/modules.txt>. The module will
2308 be called sk98lin. This is recommended.
2309
2310config VIA_VELOCITY 2230config VIA_VELOCITY
2311 tristate "VIA Velocity support" 2231 tristate "VIA Velocity support"
2312 depends on PCI 2232 depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 3b1ea321dc05..4d71729e85e5 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -15,7 +15,7 @@ obj-$(CONFIG_CHELSIO_T3) += cxgb3/
15obj-$(CONFIG_EHEA) += ehea/ 15obj-$(CONFIG_EHEA) += ehea/
16obj-$(CONFIG_CAN) += can/ 16obj-$(CONFIG_CAN) += can/
17obj-$(CONFIG_BONDING) += bonding/ 17obj-$(CONFIG_BONDING) += bonding/
18obj-$(CONFIG_ATL1) += atl1/ 18obj-$(CONFIG_ATL1) += atlx/
19obj-$(CONFIG_GIANFAR) += gianfar_driver.o 19obj-$(CONFIG_GIANFAR) += gianfar_driver.o
20obj-$(CONFIG_TEHUTI) += tehuti.o 20obj-$(CONFIG_TEHUTI) += tehuti.o
21 21
@@ -75,7 +75,6 @@ ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y)
75obj-$(CONFIG_TC35815) += tc35815.o 75obj-$(CONFIG_TC35815) += tc35815.o
76obj-$(CONFIG_SKGE) += skge.o 76obj-$(CONFIG_SKGE) += skge.o
77obj-$(CONFIG_SKY2) += sky2.o 77obj-$(CONFIG_SKY2) += sky2.o
78obj-$(CONFIG_SK98LIN) += sk98lin/
79obj-$(CONFIG_SKFP) += skfp/ 78obj-$(CONFIG_SKFP) += skfp/
80obj-$(CONFIG_VIA_RHINE) += via-rhine.o 79obj-$(CONFIG_VIA_RHINE) += via-rhine.o
81obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o 80obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
@@ -191,6 +190,7 @@ obj-$(CONFIG_ZORRO8390) += zorro8390.o
191obj-$(CONFIG_HPLANCE) += hplance.o 7990.o 190obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
192obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o 191obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
193obj-$(CONFIG_EQUALIZER) += eql.o 192obj-$(CONFIG_EQUALIZER) += eql.o
193obj-$(CONFIG_KORINA) += korina.o
194obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o 194obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
195obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o 195obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
196obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o 196obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 92c3a4cf0bb1..65b901ebfd62 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -1010,7 +1010,7 @@ module_param(io, int, 0);
1010module_param(irq, int, 0); 1010module_param(irq, int, 0);
1011module_param(board_type, int, 0); 1011module_param(board_type, int, 0);
1012 1012
1013int __init init_module(void) 1013static int __init cops_module_init(void)
1014{ 1014{
1015 if (io == 0) 1015 if (io == 0)
1016 printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", 1016 printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
@@ -1021,12 +1021,14 @@ int __init init_module(void)
1021 return 0; 1021 return 0;
1022} 1022}
1023 1023
1024void __exit cleanup_module(void) 1024static void __exit cops_module_exit(void)
1025{ 1025{
1026 unregister_netdev(cops_dev); 1026 unregister_netdev(cops_dev);
1027 cleanup_card(cops_dev); 1027 cleanup_card(cops_dev);
1028 free_netdev(cops_dev); 1028 free_netdev(cops_dev);
1029} 1029}
1030module_init(cops_module_init);
1031module_exit(cops_module_exit);
1030#endif /* MODULE */ 1032#endif /* MODULE */
1031 1033
1032/* 1034/*
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index c59c8067de99..bdc4c0bb56d9 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -940,7 +940,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
940 940
941 /* is the RECON info empty or old? */ 941 /* is the RECON info empty or old? */
942 if (!lp->first_recon || !lp->last_recon || 942 if (!lp->first_recon || !lp->last_recon ||
943 jiffies - lp->last_recon > HZ * 10) { 943 time_after(jiffies, lp->last_recon + HZ * 10)) {
944 if (lp->network_down) 944 if (lp->network_down)
945 BUGMSG(D_NORMAL, "reconfiguration detected: cabling restored?\n"); 945 BUGMSG(D_NORMAL, "reconfiguration detected: cabling restored?\n");
946 lp->first_recon = lp->last_recon = jiffies; 946 lp->first_recon = lp->last_recon = jiffies;
@@ -974,7 +974,8 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
974 lp->num_recons = 1; 974 lp->num_recons = 1;
975 } 975 }
976 } 976 }
977 } else if (lp->network_down && jiffies - lp->last_recon > HZ * 10) { 977 } else if (lp->network_down &&
978 time_after(jiffies, lp->last_recon + HZ * 10)) {
978 if (lp->network_down) 979 if (lp->network_down)
979 BUGMSG(D_NORMAL, "cabling restored?\n"); 980 BUGMSG(D_NORMAL, "cabling restored?\n");
980 lp->first_recon = lp->last_recon = 0; 981 lp->first_recon = lp->last_recon = 0;
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 7cf0a2511697..8b51313b1300 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -348,14 +348,15 @@ MODULE_LICENSE("GPL");
348 348
349#ifdef MODULE 349#ifdef MODULE
350 350
351int init_module(void) 351static int __init com20020_module_init(void)
352{ 352{
353 BUGLVL(D_NORMAL) printk(VERSION); 353 BUGLVL(D_NORMAL) printk(VERSION);
354 return 0; 354 return 0;
355} 355}
356 356
357void cleanup_module(void) 357static void __exit com20020_module_exit(void)
358{ 358{
359} 359}
360 360module_init(com20020_module_init);
361module_exit(com20020_module_exit);
361#endif /* MODULE */ 362#endif /* MODULE */
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 24d81f922533..7e874d485d24 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -881,7 +881,7 @@ MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address");
881MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number"); 881MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number");
882MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)"); 882MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)");
883 883
884int __init init_module(void) 884static int __init at1700_module_init(void)
885{ 885{
886 if (io == 0) 886 if (io == 0)
887 printk("at1700: You should not use auto-probing with insmod!\n"); 887 printk("at1700: You should not use auto-probing with insmod!\n");
@@ -891,13 +891,14 @@ int __init init_module(void)
891 return 0; 891 return 0;
892} 892}
893 893
894void __exit 894static void __exit at1700_module_exit(void)
895cleanup_module(void)
896{ 895{
897 unregister_netdev(dev_at1700); 896 unregister_netdev(dev_at1700);
898 cleanup_card(dev_at1700); 897 cleanup_card(dev_at1700);
899 free_netdev(dev_at1700); 898 free_netdev(dev_at1700);
900} 899}
900module_init(at1700_module_init);
901module_exit(at1700_module_exit);
901#endif /* MODULE */ 902#endif /* MODULE */
902MODULE_LICENSE("GPL"); 903MODULE_LICENSE("GPL");
903 904
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 13c293b286de..4cceaac8863a 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -1155,7 +1155,7 @@ static int lance_set_mac_address( struct net_device *dev, void *addr )
1155#ifdef MODULE 1155#ifdef MODULE
1156static struct net_device *atarilance_dev; 1156static struct net_device *atarilance_dev;
1157 1157
1158int __init init_module(void) 1158static int __init atarilance_module_init(void)
1159{ 1159{
1160 atarilance_dev = atarilance_probe(-1); 1160 atarilance_dev = atarilance_probe(-1);
1161 if (IS_ERR(atarilance_dev)) 1161 if (IS_ERR(atarilance_dev))
@@ -1163,13 +1163,14 @@ int __init init_module(void)
1163 return 0; 1163 return 0;
1164} 1164}
1165 1165
1166void __exit cleanup_module(void) 1166static void __exit atarilance_module_exit(void)
1167{ 1167{
1168 unregister_netdev(atarilance_dev); 1168 unregister_netdev(atarilance_dev);
1169 free_irq(atarilance_dev->irq, atarilance_dev); 1169 free_irq(atarilance_dev->irq, atarilance_dev);
1170 free_netdev(atarilance_dev); 1170 free_netdev(atarilance_dev);
1171} 1171}
1172 1172module_init(atarilance_module_init);
1173module_exit(atarilance_module_exit);
1173#endif /* MODULE */ 1174#endif /* MODULE */
1174 1175
1175 1176
diff --git a/drivers/net/atl1/Makefile b/drivers/net/atl1/Makefile
deleted file mode 100644
index a6b707e4e69e..000000000000
--- a/drivers/net/atl1/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
1obj-$(CONFIG_ATL1) += atl1.o
2atl1-y += atl1_main.o atl1_hw.o atl1_ethtool.o atl1_param.o
diff --git a/drivers/net/atl1/atl1.h b/drivers/net/atl1/atl1.h
deleted file mode 100644
index ff4765f6c3de..000000000000
--- a/drivers/net/atl1/atl1.h
+++ /dev/null
@@ -1,286 +0,0 @@
1/*
2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5 *
6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#ifndef _ATL1_H_
25#define _ATL1_H_
26
27#include <linux/types.h>
28#include <linux/if_vlan.h>
29
30#include "atl1_hw.h"
31
32/* function prototypes needed by multiple files */
33s32 atl1_up(struct atl1_adapter *adapter);
34void atl1_down(struct atl1_adapter *adapter);
35int atl1_reset(struct atl1_adapter *adapter);
36s32 atl1_setup_ring_resources(struct atl1_adapter *adapter);
37void atl1_free_ring_resources(struct atl1_adapter *adapter);
38
39extern char atl1_driver_name[];
40extern char atl1_driver_version[];
41extern const struct ethtool_ops atl1_ethtool_ops;
42
43struct atl1_adapter;
44
45#define ATL1_MAX_INTR 3
46#define ATL1_MAX_TX_BUF_LEN 0x3000 /* 12288 bytes */
47
48#define ATL1_DEFAULT_TPD 256
49#define ATL1_MAX_TPD 1024
50#define ATL1_MIN_TPD 64
51#define ATL1_DEFAULT_RFD 512
52#define ATL1_MIN_RFD 128
53#define ATL1_MAX_RFD 2048
54
55#define ATL1_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i]))
56#define ATL1_RFD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_free_desc)
57#define ATL1_TPD_DESC(R, i) ATL1_GET_DESC(R, i, struct tx_packet_desc)
58#define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc)
59
60/*
61 * This detached comment is preserved for documentation purposes only.
62 * It was originally attached to some code that got deleted, but seems
63 * important enough to keep around...
64 *
65 * <begin detached comment>
66 * Some workarounds require millisecond delays and are run during interrupt
67 * context. Most notably, when establishing link, the phy may need tweaking
68 * but cannot process phy register reads/writes faster than millisecond
69 * intervals...and we establish link due to a "link status change" interrupt.
70 * <end detached comment>
71 */
72
73/*
74 * atl1_ring_header represents a single, contiguous block of DMA space
75 * mapped for the three descriptor rings (tpd, rfd, rrd) and the two
76 * message blocks (cmb, smb) described below
77 */
78struct atl1_ring_header {
79 void *desc; /* virtual address */
80 dma_addr_t dma; /* physical address*/
81 unsigned int size; /* length in bytes */
82};
83
84/*
85 * atl1_buffer is wrapper around a pointer to a socket buffer
86 * so a DMA handle can be stored along with the skb
87 */
88struct atl1_buffer {
89 struct sk_buff *skb; /* socket buffer */
90 u16 length; /* rx buffer length */
91 u16 alloced; /* 1 if skb allocated */
92 dma_addr_t dma;
93};
94
95/* transmit packet descriptor (tpd) ring */
96struct atl1_tpd_ring {
97 void *desc; /* descriptor ring virtual address */
98 dma_addr_t dma; /* descriptor ring physical address */
99 u16 size; /* descriptor ring length in bytes */
100 u16 count; /* number of descriptors in the ring */
101 u16 hw_idx; /* hardware index */
102 atomic_t next_to_clean;
103 atomic_t next_to_use;
104 struct atl1_buffer *buffer_info;
105};
106
107/* receive free descriptor (rfd) ring */
108struct atl1_rfd_ring {
109 void *desc; /* descriptor ring virtual address */
110 dma_addr_t dma; /* descriptor ring physical address */
111 u16 size; /* descriptor ring length in bytes */
112 u16 count; /* number of descriptors in the ring */
113 atomic_t next_to_use;
114 u16 next_to_clean;
115 struct atl1_buffer *buffer_info;
116};
117
118/* receive return descriptor (rrd) ring */
119struct atl1_rrd_ring {
120 void *desc; /* descriptor ring virtual address */
121 dma_addr_t dma; /* descriptor ring physical address */
122 unsigned int size; /* descriptor ring length in bytes */
123 u16 count; /* number of descriptors in the ring */
124 u16 next_to_use;
125 atomic_t next_to_clean;
126};
127
128/* coalescing message block (cmb) */
129struct atl1_cmb {
130 struct coals_msg_block *cmb;
131 dma_addr_t dma;
132};
133
134/* statistics message block (smb) */
135struct atl1_smb {
136 struct stats_msg_block *smb;
137 dma_addr_t dma;
138};
139
140/* Statistics counters */
141struct atl1_sft_stats {
142 u64 rx_packets;
143 u64 tx_packets;
144 u64 rx_bytes;
145 u64 tx_bytes;
146 u64 multicast;
147 u64 collisions;
148 u64 rx_errors;
149 u64 rx_length_errors;
150 u64 rx_crc_errors;
151 u64 rx_frame_errors;
152 u64 rx_fifo_errors;
153 u64 rx_missed_errors;
154 u64 tx_errors;
155 u64 tx_fifo_errors;
156 u64 tx_aborted_errors;
157 u64 tx_window_errors;
158 u64 tx_carrier_errors;
159 u64 tx_pause; /* num pause packets transmitted. */
160 u64 excecol; /* num tx packets w/ excessive collisions. */
161 u64 deffer; /* num tx packets deferred */
162 u64 scc; /* num packets subsequently transmitted
163 * successfully w/ single prior collision. */
164 u64 mcc; /* num packets subsequently transmitted
165 * successfully w/ multiple prior collisions. */
166 u64 latecol; /* num tx packets w/ late collisions. */
167 u64 tx_underun; /* num tx packets aborted due to transmit
168 * FIFO underrun, or TRD FIFO underrun */
169 u64 tx_trunc; /* num tx packets truncated due to size
170 * exceeding MTU, regardless whether truncated
171 * by the chip or not. (The name doesn't really
172 * reflect the meaning in this case.) */
173 u64 rx_pause; /* num Pause packets received. */
174 u64 rx_rrd_ov;
175 u64 rx_trunc;
176};
177
178/* hardware structure */
179struct atl1_hw {
180 u8 __iomem *hw_addr;
181 struct atl1_adapter *back;
182 enum atl1_dma_order dma_ord;
183 enum atl1_dma_rcb rcb_value;
184 enum atl1_dma_req_block dmar_block;
185 enum atl1_dma_req_block dmaw_block;
186 u8 preamble_len;
187 u8 max_retry; /* Retransmission maximum, after which the
188 * packet will be discarded */
189 u8 jam_ipg; /* IPG to start JAM for collision based flow
190 * control in half-duplex mode. In units of
191 * 8-bit time */
192 u8 ipgt; /* Desired back to back inter-packet gap.
193 * The default is 96-bit time */
194 u8 min_ifg; /* Minimum number of IFG to enforce in between
195 * receive frames. Frame gap below such IFP
196 * is dropped */
197 u8 ipgr1; /* 64bit Carrier-Sense window */
198 u8 ipgr2; /* 96-bit IPG window */
199 u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned
200 * burst. Each TPD is 16 bytes long */
201 u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned
202 * burst. Each RFD is 12 bytes long */
203 u8 rfd_fetch_gap;
204 u8 rrd_burst; /* Threshold number of RRDs that can be retired
205 * in a burst. Each RRD is 16 bytes long */
206 u8 tpd_fetch_th;
207 u8 tpd_fetch_gap;
208 u16 tx_jumbo_task_th;
209 u16 txf_burst; /* Number of data bytes to read in a cache-
210 * aligned burst. Each SRAM entry is 8 bytes */
211 u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN
212 * packets should add 4 bytes */
213 u16 rx_jumbo_lkah;
214 u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after
215 * every 512ns passes. */
216 u16 lcol; /* Collision Window */
217
218 u16 cmb_tpd;
219 u16 cmb_rrd;
220 u16 cmb_rx_timer;
221 u16 cmb_tx_timer;
222 u32 smb_timer;
223 u16 media_type;
224 u16 autoneg_advertised;
225
226 u16 mii_autoneg_adv_reg;
227 u16 mii_1000t_ctrl_reg;
228
229 u32 max_frame_size;
230 u32 min_frame_size;
231
232 u16 dev_rev;
233
234 /* spi flash */
235 u8 flash_vendor;
236
237 u8 mac_addr[ETH_ALEN];
238 u8 perm_mac_addr[ETH_ALEN];
239
240 bool phy_configured;
241};
242
243struct atl1_adapter {
244 struct net_device *netdev;
245 struct pci_dev *pdev;
246 struct net_device_stats net_stats;
247 struct atl1_sft_stats soft_stats;
248 struct vlan_group *vlgrp;
249 u32 rx_buffer_len;
250 u32 wol;
251 u16 link_speed;
252 u16 link_duplex;
253 spinlock_t lock;
254 struct work_struct tx_timeout_task;
255 struct work_struct link_chg_task;
256 struct work_struct pcie_dma_to_rst_task;
257 struct timer_list watchdog_timer;
258 struct timer_list phy_config_timer;
259 bool phy_timer_pending;
260
261 /* all descriptor rings' memory */
262 struct atl1_ring_header ring_header;
263
264 /* TX */
265 struct atl1_tpd_ring tpd_ring;
266 spinlock_t mb_lock;
267
268 /* RX */
269 struct atl1_rfd_ring rfd_ring;
270 struct atl1_rrd_ring rrd_ring;
271 u64 hw_csum_err;
272 u64 hw_csum_good;
273
274 u16 imt; /* interrupt moderator timer (2us resolution */
275 u16 ict; /* interrupt clear timer (2us resolution */
276 struct mii_if_info mii; /* MII interface info */
277
278 /* structs defined in atl1_hw.h */
279 u32 bd_number; /* board number */
280 bool pci_using_64;
281 struct atl1_hw hw;
282 struct atl1_smb smb;
283 struct atl1_cmb cmb;
284};
285
286#endif /* _ATL1_H_ */
diff --git a/drivers/net/atl1/atl1_ethtool.c b/drivers/net/atl1/atl1_ethtool.c
deleted file mode 100644
index 68a83be843ab..000000000000
--- a/drivers/net/atl1/atl1_ethtool.c
+++ /dev/null
@@ -1,505 +0,0 @@
1/*
2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5 *
6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/ethtool.h>
27#include <linux/netdevice.h>
28#include <linux/mii.h>
29#include <asm/uaccess.h>
30
31#include "atl1.h"
32
33struct atl1_stats {
34 char stat_string[ETH_GSTRING_LEN];
35 int sizeof_stat;
36 int stat_offset;
37};
38
39#define ATL1_STAT(m) sizeof(((struct atl1_adapter *)0)->m), \
40 offsetof(struct atl1_adapter, m)
41
42static struct atl1_stats atl1_gstrings_stats[] = {
43 {"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
44 {"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
45 {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
46 {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
47 {"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
48 {"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
49 {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)},
50 {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)},
51 {"multicast", ATL1_STAT(soft_stats.multicast)},
52 {"collisions", ATL1_STAT(soft_stats.collisions)},
53 {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
54 {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
55 {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
56 {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
57 {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
58 {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
59 {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
60 {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
61 {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
62 {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
63 {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
64 {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
65 {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
66 {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
67 {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
68 {"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
69 {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
70 {"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
71 {"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
72 {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
73 {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
74};
75
76static void atl1_get_ethtool_stats(struct net_device *netdev,
77 struct ethtool_stats *stats, u64 *data)
78{
79 struct atl1_adapter *adapter = netdev_priv(netdev);
80 int i;
81 char *p;
82
83 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
84 p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
85 data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
86 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
87 }
88
89}
90
91static int atl1_get_sset_count(struct net_device *netdev, int sset)
92{
93 switch (sset) {
94 case ETH_SS_STATS:
95 return ARRAY_SIZE(atl1_gstrings_stats);
96 default:
97 return -EOPNOTSUPP;
98 }
99}
100
101static int atl1_get_settings(struct net_device *netdev,
102 struct ethtool_cmd *ecmd)
103{
104 struct atl1_adapter *adapter = netdev_priv(netdev);
105 struct atl1_hw *hw = &adapter->hw;
106
107 ecmd->supported = (SUPPORTED_10baseT_Half |
108 SUPPORTED_10baseT_Full |
109 SUPPORTED_100baseT_Half |
110 SUPPORTED_100baseT_Full |
111 SUPPORTED_1000baseT_Full |
112 SUPPORTED_Autoneg | SUPPORTED_TP);
113 ecmd->advertising = ADVERTISED_TP;
114 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
115 hw->media_type == MEDIA_TYPE_1000M_FULL) {
116 ecmd->advertising |= ADVERTISED_Autoneg;
117 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
118 ecmd->advertising |= ADVERTISED_Autoneg;
119 ecmd->advertising |=
120 (ADVERTISED_10baseT_Half |
121 ADVERTISED_10baseT_Full |
122 ADVERTISED_100baseT_Half |
123 ADVERTISED_100baseT_Full |
124 ADVERTISED_1000baseT_Full);
125 }
126 else
127 ecmd->advertising |= (ADVERTISED_1000baseT_Full);
128 }
129 ecmd->port = PORT_TP;
130 ecmd->phy_address = 0;
131 ecmd->transceiver = XCVR_INTERNAL;
132
133 if (netif_carrier_ok(adapter->netdev)) {
134 u16 link_speed, link_duplex;
135 atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
136 ecmd->speed = link_speed;
137 if (link_duplex == FULL_DUPLEX)
138 ecmd->duplex = DUPLEX_FULL;
139 else
140 ecmd->duplex = DUPLEX_HALF;
141 } else {
142 ecmd->speed = -1;
143 ecmd->duplex = -1;
144 }
145 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
146 hw->media_type == MEDIA_TYPE_1000M_FULL)
147 ecmd->autoneg = AUTONEG_ENABLE;
148 else
149 ecmd->autoneg = AUTONEG_DISABLE;
150
151 return 0;
152}
153
154static int atl1_set_settings(struct net_device *netdev,
155 struct ethtool_cmd *ecmd)
156{
157 struct atl1_adapter *adapter = netdev_priv(netdev);
158 struct atl1_hw *hw = &adapter->hw;
159 u16 phy_data;
160 int ret_val = 0;
161 u16 old_media_type = hw->media_type;
162
163 if (netif_running(adapter->netdev)) {
164 dev_dbg(&adapter->pdev->dev, "ethtool shutting down adapter\n");
165 atl1_down(adapter);
166 }
167
168 if (ecmd->autoneg == AUTONEG_ENABLE)
169 hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
170 else {
171 if (ecmd->speed == SPEED_1000) {
172 if (ecmd->duplex != DUPLEX_FULL) {
173 dev_warn(&adapter->pdev->dev,
174 "can't force to 1000M half duplex\n");
175 ret_val = -EINVAL;
176 goto exit_sset;
177 }
178 hw->media_type = MEDIA_TYPE_1000M_FULL;
179 } else if (ecmd->speed == SPEED_100) {
180 if (ecmd->duplex == DUPLEX_FULL) {
181 hw->media_type = MEDIA_TYPE_100M_FULL;
182 } else
183 hw->media_type = MEDIA_TYPE_100M_HALF;
184 } else {
185 if (ecmd->duplex == DUPLEX_FULL)
186 hw->media_type = MEDIA_TYPE_10M_FULL;
187 else
188 hw->media_type = MEDIA_TYPE_10M_HALF;
189 }
190 }
191 switch (hw->media_type) {
192 case MEDIA_TYPE_AUTO_SENSOR:
193 ecmd->advertising =
194 ADVERTISED_10baseT_Half |
195 ADVERTISED_10baseT_Full |
196 ADVERTISED_100baseT_Half |
197 ADVERTISED_100baseT_Full |
198 ADVERTISED_1000baseT_Full |
199 ADVERTISED_Autoneg | ADVERTISED_TP;
200 break;
201 case MEDIA_TYPE_1000M_FULL:
202 ecmd->advertising =
203 ADVERTISED_1000baseT_Full |
204 ADVERTISED_Autoneg | ADVERTISED_TP;
205 break;
206 default:
207 ecmd->advertising = 0;
208 break;
209 }
210 if (atl1_phy_setup_autoneg_adv(hw)) {
211 ret_val = -EINVAL;
212 dev_warn(&adapter->pdev->dev,
213 "invalid ethtool speed/duplex setting\n");
214 goto exit_sset;
215 }
216 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
217 hw->media_type == MEDIA_TYPE_1000M_FULL)
218 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
219 else {
220 switch (hw->media_type) {
221 case MEDIA_TYPE_100M_FULL:
222 phy_data =
223 MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
224 MII_CR_RESET;
225 break;
226 case MEDIA_TYPE_100M_HALF:
227 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
228 break;
229 case MEDIA_TYPE_10M_FULL:
230 phy_data =
231 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
232 break;
233 default: /* MEDIA_TYPE_10M_HALF: */
234 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
235 break;
236 }
237 }
238 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
239exit_sset:
240 if (ret_val)
241 hw->media_type = old_media_type;
242
243 if (netif_running(adapter->netdev)) {
244 dev_dbg(&adapter->pdev->dev, "ethtool starting adapter\n");
245 atl1_up(adapter);
246 } else if (!ret_val) {
247 dev_dbg(&adapter->pdev->dev, "ethtool resetting adapter\n");
248 atl1_reset(adapter);
249 }
250 return ret_val;
251}
252
253static void atl1_get_drvinfo(struct net_device *netdev,
254 struct ethtool_drvinfo *drvinfo)
255{
256 struct atl1_adapter *adapter = netdev_priv(netdev);
257
258 strncpy(drvinfo->driver, atl1_driver_name, sizeof(drvinfo->driver));
259 strncpy(drvinfo->version, atl1_driver_version,
260 sizeof(drvinfo->version));
261 strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
262 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
263 sizeof(drvinfo->bus_info));
264 drvinfo->eedump_len = ATL1_EEDUMP_LEN;
265}
266
267static void atl1_get_wol(struct net_device *netdev,
268 struct ethtool_wolinfo *wol)
269{
270 struct atl1_adapter *adapter = netdev_priv(netdev);
271
272 wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
273 wol->wolopts = 0;
274 if (adapter->wol & ATL1_WUFC_EX)
275 wol->wolopts |= WAKE_UCAST;
276 if (adapter->wol & ATL1_WUFC_MC)
277 wol->wolopts |= WAKE_MCAST;
278 if (adapter->wol & ATL1_WUFC_BC)
279 wol->wolopts |= WAKE_BCAST;
280 if (adapter->wol & ATL1_WUFC_MAG)
281 wol->wolopts |= WAKE_MAGIC;
282 return;
283}
284
285static int atl1_set_wol(struct net_device *netdev,
286 struct ethtool_wolinfo *wol)
287{
288 struct atl1_adapter *adapter = netdev_priv(netdev);
289
290 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
291 return -EOPNOTSUPP;
292 adapter->wol = 0;
293 if (wol->wolopts & WAKE_UCAST)
294 adapter->wol |= ATL1_WUFC_EX;
295 if (wol->wolopts & WAKE_MCAST)
296 adapter->wol |= ATL1_WUFC_MC;
297 if (wol->wolopts & WAKE_BCAST)
298 adapter->wol |= ATL1_WUFC_BC;
299 if (wol->wolopts & WAKE_MAGIC)
300 adapter->wol |= ATL1_WUFC_MAG;
301 return 0;
302}
303
304static void atl1_get_ringparam(struct net_device *netdev,
305 struct ethtool_ringparam *ring)
306{
307 struct atl1_adapter *adapter = netdev_priv(netdev);
308 struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
309 struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
310
311 ring->rx_max_pending = ATL1_MAX_RFD;
312 ring->tx_max_pending = ATL1_MAX_TPD;
313 ring->rx_mini_max_pending = 0;
314 ring->rx_jumbo_max_pending = 0;
315 ring->rx_pending = rxdr->count;
316 ring->tx_pending = txdr->count;
317 ring->rx_mini_pending = 0;
318 ring->rx_jumbo_pending = 0;
319}
320
321static int atl1_set_ringparam(struct net_device *netdev,
322 struct ethtool_ringparam *ring)
323{
324 struct atl1_adapter *adapter = netdev_priv(netdev);
325 struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
326 struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
327 struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
328
329 struct atl1_tpd_ring tpd_old, tpd_new;
330 struct atl1_rfd_ring rfd_old, rfd_new;
331 struct atl1_rrd_ring rrd_old, rrd_new;
332 struct atl1_ring_header rhdr_old, rhdr_new;
333 int err;
334
335 tpd_old = adapter->tpd_ring;
336 rfd_old = adapter->rfd_ring;
337 rrd_old = adapter->rrd_ring;
338 rhdr_old = adapter->ring_header;
339
340 if (netif_running(adapter->netdev))
341 atl1_down(adapter);
342
343 rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
344 rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
345 rfdr->count;
346 rfdr->count = (rfdr->count + 3) & ~3;
347 rrdr->count = rfdr->count;
348
349 tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
350 tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
351 tpdr->count;
352 tpdr->count = (tpdr->count + 3) & ~3;
353
354 if (netif_running(adapter->netdev)) {
355 /* try to get new resources before deleting old */
356 err = atl1_setup_ring_resources(adapter);
357 if (err)
358 goto err_setup_ring;
359
360 /*
361 * save the new, restore the old in order to free it,
362 * then restore the new back again
363 */
364
365 rfd_new = adapter->rfd_ring;
366 rrd_new = adapter->rrd_ring;
367 tpd_new = adapter->tpd_ring;
368 rhdr_new = adapter->ring_header;
369 adapter->rfd_ring = rfd_old;
370 adapter->rrd_ring = rrd_old;
371 adapter->tpd_ring = tpd_old;
372 adapter->ring_header = rhdr_old;
373 atl1_free_ring_resources(adapter);
374 adapter->rfd_ring = rfd_new;
375 adapter->rrd_ring = rrd_new;
376 adapter->tpd_ring = tpd_new;
377 adapter->ring_header = rhdr_new;
378
379 err = atl1_up(adapter);
380 if (err)
381 return err;
382 }
383 return 0;
384
385err_setup_ring:
386 adapter->rfd_ring = rfd_old;
387 adapter->rrd_ring = rrd_old;
388 adapter->tpd_ring = tpd_old;
389 adapter->ring_header = rhdr_old;
390 atl1_up(adapter);
391 return err;
392}
393
394static void atl1_get_pauseparam(struct net_device *netdev,
395 struct ethtool_pauseparam *epause)
396{
397 struct atl1_adapter *adapter = netdev_priv(netdev);
398 struct atl1_hw *hw = &adapter->hw;
399
400 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
401 hw->media_type == MEDIA_TYPE_1000M_FULL) {
402 epause->autoneg = AUTONEG_ENABLE;
403 } else {
404 epause->autoneg = AUTONEG_DISABLE;
405 }
406 epause->rx_pause = 1;
407 epause->tx_pause = 1;
408}
409
410static int atl1_set_pauseparam(struct net_device *netdev,
411 struct ethtool_pauseparam *epause)
412{
413 struct atl1_adapter *adapter = netdev_priv(netdev);
414 struct atl1_hw *hw = &adapter->hw;
415
416 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
417 hw->media_type == MEDIA_TYPE_1000M_FULL) {
418 epause->autoneg = AUTONEG_ENABLE;
419 } else {
420 epause->autoneg = AUTONEG_DISABLE;
421 }
422
423 epause->rx_pause = 1;
424 epause->tx_pause = 1;
425
426 return 0;
427}
428
429static u32 atl1_get_rx_csum(struct net_device *netdev)
430{
431 return 1;
432}
433
434static void atl1_get_strings(struct net_device *netdev, u32 stringset,
435 u8 *data)
436{
437 u8 *p = data;
438 int i;
439
440 switch (stringset) {
441 case ETH_SS_STATS:
442 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
443 memcpy(p, atl1_gstrings_stats[i].stat_string,
444 ETH_GSTRING_LEN);
445 p += ETH_GSTRING_LEN;
446 }
447 break;
448 }
449}
450
451static int atl1_nway_reset(struct net_device *netdev)
452{
453 struct atl1_adapter *adapter = netdev_priv(netdev);
454 struct atl1_hw *hw = &adapter->hw;
455
456 if (netif_running(netdev)) {
457 u16 phy_data;
458 atl1_down(adapter);
459
460 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
461 hw->media_type == MEDIA_TYPE_1000M_FULL) {
462 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
463 } else {
464 switch (hw->media_type) {
465 case MEDIA_TYPE_100M_FULL:
466 phy_data = MII_CR_FULL_DUPLEX |
467 MII_CR_SPEED_100 | MII_CR_RESET;
468 break;
469 case MEDIA_TYPE_100M_HALF:
470 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
471 break;
472 case MEDIA_TYPE_10M_FULL:
473 phy_data = MII_CR_FULL_DUPLEX |
474 MII_CR_SPEED_10 | MII_CR_RESET;
475 break;
476 default: /* MEDIA_TYPE_10M_HALF */
477 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
478 }
479 }
480 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
481 atl1_up(adapter);
482 }
483 return 0;
484}
485
486const struct ethtool_ops atl1_ethtool_ops = {
487 .get_settings = atl1_get_settings,
488 .set_settings = atl1_set_settings,
489 .get_drvinfo = atl1_get_drvinfo,
490 .get_wol = atl1_get_wol,
491 .set_wol = atl1_set_wol,
492 .get_ringparam = atl1_get_ringparam,
493 .set_ringparam = atl1_set_ringparam,
494 .get_pauseparam = atl1_get_pauseparam,
495 .set_pauseparam = atl1_set_pauseparam,
496 .get_rx_csum = atl1_get_rx_csum,
497 .set_tx_csum = ethtool_op_set_tx_hw_csum,
498 .get_link = ethtool_op_get_link,
499 .set_sg = ethtool_op_set_sg,
500 .get_strings = atl1_get_strings,
501 .nway_reset = atl1_nway_reset,
502 .get_ethtool_stats = atl1_get_ethtool_stats,
503 .get_sset_count = atl1_get_sset_count,
504 .set_tso = ethtool_op_set_tso,
505};
diff --git a/drivers/net/atl1/atl1_hw.c b/drivers/net/atl1/atl1_hw.c
deleted file mode 100644
index 9d3bd22e3a82..000000000000
--- a/drivers/net/atl1/atl1_hw.c
+++ /dev/null
@@ -1,720 +0,0 @@
1/*
2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5 *
6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/delay.h>
27#include <linux/if_vlan.h>
28#include <linux/etherdevice.h>
29#include <linux/crc32.h>
30#include <asm/byteorder.h>
31
32#include "atl1.h"
33
34/*
35 * Reset the transmit and receive units; mask and clear all interrupts.
36 * hw - Struct containing variables accessed by shared code
37 * return : ATL1_SUCCESS or idle status (if error)
38 */
39s32 atl1_reset_hw(struct atl1_hw *hw)
40{
41 struct pci_dev *pdev = hw->back->pdev;
42 u32 icr;
43 int i;
44
45 /*
46 * Clear Interrupt mask to stop board from generating
47 * interrupts & Clear any pending interrupt events
48 */
49 /*
50 * iowrite32(0, hw->hw_addr + REG_IMR);
51 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
52 */
53
54 /*
55 * Issue Soft Reset to the MAC. This will reset the chip's
56 * transmit, receive, DMA. It will not effect
57 * the current PCI configuration. The global reset bit is self-
58 * clearing, and should clear within a microsecond.
59 */
60 iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
61 ioread32(hw->hw_addr + REG_MASTER_CTRL);
62
63 iowrite16(1, hw->hw_addr + REG_GPHY_ENABLE);
64 ioread16(hw->hw_addr + REG_GPHY_ENABLE);
65
66 msleep(1); /* delay about 1ms */
67
68 /* Wait at least 10ms for All module to be Idle */
69 for (i = 0; i < 10; i++) {
70 icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
71 if (!icr)
72 break;
73 msleep(1); /* delay 1 ms */
74 cpu_relax(); /* FIXME: is this still the right way to do this? */
75 }
76
77 if (icr) {
78 dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
79 return icr;
80 }
81
82 return ATL1_SUCCESS;
83}
84
85/* function about EEPROM
86 *
87 * check_eeprom_exist
88 * return 0 if eeprom exist
89 */
90static int atl1_check_eeprom_exist(struct atl1_hw *hw)
91{
92 u32 value;
93 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
94 if (value & SPI_FLASH_CTRL_EN_VPD) {
95 value &= ~SPI_FLASH_CTRL_EN_VPD;
96 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
97 }
98
99 value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
100 return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
101}
102
103static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
104{
105 int i;
106 u32 control;
107
108 if (offset & 3)
109 return false; /* address do not align */
110
111 iowrite32(0, hw->hw_addr + REG_VPD_DATA);
112 control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
113 iowrite32(control, hw->hw_addr + REG_VPD_CAP);
114 ioread32(hw->hw_addr + REG_VPD_CAP);
115
116 for (i = 0; i < 10; i++) {
117 msleep(2);
118 control = ioread32(hw->hw_addr + REG_VPD_CAP);
119 if (control & VPD_CAP_VPD_FLAG)
120 break;
121 }
122 if (control & VPD_CAP_VPD_FLAG) {
123 *p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
124 return true;
125 }
126 return false; /* timeout */
127}
128
129/*
130 * Reads the value from a PHY register
131 * hw - Struct containing variables accessed by shared code
132 * reg_addr - address of the PHY register to read
133 */
134s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
135{
136 u32 val;
137 int i;
138
139 val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
140 MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
141 MDIO_CLK_SEL_SHIFT;
142 iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
143 ioread32(hw->hw_addr + REG_MDIO_CTRL);
144
145 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
146 udelay(2);
147 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
148 if (!(val & (MDIO_START | MDIO_BUSY)))
149 break;
150 }
151 if (!(val & (MDIO_START | MDIO_BUSY))) {
152 *phy_data = (u16) val;
153 return ATL1_SUCCESS;
154 }
155 return ATL1_ERR_PHY;
156}
157
158#define CUSTOM_SPI_CS_SETUP 2
159#define CUSTOM_SPI_CLK_HI 2
160#define CUSTOM_SPI_CLK_LO 2
161#define CUSTOM_SPI_CS_HOLD 2
162#define CUSTOM_SPI_CS_HI 3
163
164static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
165{
166 int i;
167 u32 value;
168
169 iowrite32(0, hw->hw_addr + REG_SPI_DATA);
170 iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
171
172 value = SPI_FLASH_CTRL_WAIT_READY |
173 (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
174 SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
175 SPI_FLASH_CTRL_CLK_HI_MASK) <<
176 SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
177 SPI_FLASH_CTRL_CLK_LO_MASK) <<
178 SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
179 SPI_FLASH_CTRL_CS_HOLD_MASK) <<
180 SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
181 SPI_FLASH_CTRL_CS_HI_MASK) <<
182 SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
183 SPI_FLASH_CTRL_INS_SHIFT;
184
185 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
186
187 value |= SPI_FLASH_CTRL_START;
188 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
189 ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
190
191 for (i = 0; i < 10; i++) {
192 msleep(1); /* 1ms */
193 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
194 if (!(value & SPI_FLASH_CTRL_START))
195 break;
196 }
197
198 if (value & SPI_FLASH_CTRL_START)
199 return false;
200
201 *buf = ioread32(hw->hw_addr + REG_SPI_DATA);
202
203 return true;
204}
205
206/*
207 * get_permanent_address
208 * return 0 if get valid mac address,
209 */
210static int atl1_get_permanent_address(struct atl1_hw *hw)
211{
212 u32 addr[2];
213 u32 i, control;
214 u16 reg;
215 u8 eth_addr[ETH_ALEN];
216 bool key_valid;
217
218 if (is_valid_ether_addr(hw->perm_mac_addr))
219 return 0;
220
221 /* init */
222 addr[0] = addr[1] = 0;
223
224 if (!atl1_check_eeprom_exist(hw)) { /* eeprom exist */
225 reg = 0;
226 key_valid = false;
227 /* Read out all EEPROM content */
228 i = 0;
229 while (1) {
230 if (atl1_read_eeprom(hw, i + 0x100, &control)) {
231 if (key_valid) {
232 if (reg == REG_MAC_STA_ADDR)
233 addr[0] = control;
234 else if (reg == (REG_MAC_STA_ADDR + 4))
235 addr[1] = control;
236 key_valid = false;
237 } else if ((control & 0xff) == 0x5A) {
238 key_valid = true;
239 reg = (u16) (control >> 16);
240 } else
241 break; /* assume data end while encount an invalid KEYWORD */
242 } else
243 break; /* read error */
244 i += 4;
245 }
246
247 *(u32 *) &eth_addr[2] = swab32(addr[0]);
248 *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
249 if (is_valid_ether_addr(eth_addr)) {
250 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
251 return 0;
252 }
253 return 1;
254 }
255
256 /* see if SPI FLAGS exist ? */
257 addr[0] = addr[1] = 0;
258 reg = 0;
259 key_valid = false;
260 i = 0;
261 while (1) {
262 if (atl1_spi_read(hw, i + 0x1f000, &control)) {
263 if (key_valid) {
264 if (reg == REG_MAC_STA_ADDR)
265 addr[0] = control;
266 else if (reg == (REG_MAC_STA_ADDR + 4))
267 addr[1] = control;
268 key_valid = false;
269 } else if ((control & 0xff) == 0x5A) {
270 key_valid = true;
271 reg = (u16) (control >> 16);
272 } else
273 break; /* data end */
274 } else
275 break; /* read error */
276 i += 4;
277 }
278
279 *(u32 *) &eth_addr[2] = swab32(addr[0]);
280 *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
281 if (is_valid_ether_addr(eth_addr)) {
282 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
283 return 0;
284 }
285
286 /*
287 * On some motherboards, the MAC address is written by the
288 * BIOS directly to the MAC register during POST, and is
289 * not stored in eeprom. If all else thus far has failed
290 * to fetch the permanent MAC address, try reading it directly.
291 */
292 addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
293 addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
294 *(u32 *) &eth_addr[2] = swab32(addr[0]);
295 *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
296 if (is_valid_ether_addr(eth_addr)) {
297 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
298 return 0;
299 }
300
301 return 1;
302}
303
304/*
305 * Reads the adapter's MAC address from the EEPROM
306 * hw - Struct containing variables accessed by shared code
307 */
308s32 atl1_read_mac_addr(struct atl1_hw *hw)
309{
310 u16 i;
311
312 if (atl1_get_permanent_address(hw))
313 random_ether_addr(hw->perm_mac_addr);
314
315 for (i = 0; i < ETH_ALEN; i++)
316 hw->mac_addr[i] = hw->perm_mac_addr[i];
317 return ATL1_SUCCESS;
318}
319
320/*
321 * Hashes an address to determine its location in the multicast table
322 * hw - Struct containing variables accessed by shared code
323 * mc_addr - the multicast address to hash
324 *
325 * atl1_hash_mc_addr
326 * purpose
327 * set hash value for a multicast address
328 * hash calcu processing :
329 * 1. calcu 32bit CRC for multicast address
330 * 2. reverse crc with MSB to LSB
331 */
332u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
333{
334 u32 crc32, value = 0;
335 int i;
336
337 crc32 = ether_crc_le(6, mc_addr);
338 for (i = 0; i < 32; i++)
339 value |= (((crc32 >> i) & 1) << (31 - i));
340
341 return value;
342}
343
344/*
345 * Sets the bit in the multicast table corresponding to the hash value.
346 * hw - Struct containing variables accessed by shared code
347 * hash_value - Multicast address hash value
348 */
349void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
350{
351 u32 hash_bit, hash_reg;
352 u32 mta;
353
354 /*
355 * The HASH Table is a register array of 2 32-bit registers.
356 * It is treated like an array of 64 bits. We want to set
357 * bit BitArray[hash_value]. So we figure out what register
358 * the bit is in, read it, OR in the new bit, then write
359 * back the new value. The register is determined by the
360 * upper 7 bits of the hash value and the bit within that
361 * register are determined by the lower 5 bits of the value.
362 */
363 hash_reg = (hash_value >> 31) & 0x1;
364 hash_bit = (hash_value >> 26) & 0x1F;
365 mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
366 mta |= (1 << hash_bit);
367 iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
368}
369
370/*
371 * Writes a value to a PHY register
372 * hw - Struct containing variables accessed by shared code
373 * reg_addr - address of the PHY register to write
374 * data - data to write to the PHY
375 */
376s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
377{
378 int i;
379 u32 val;
380
381 val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
382 (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
383 MDIO_SUP_PREAMBLE |
384 MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
385 iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
386 ioread32(hw->hw_addr + REG_MDIO_CTRL);
387
388 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
389 udelay(2);
390 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
391 if (!(val & (MDIO_START | MDIO_BUSY)))
392 break;
393 }
394
395 if (!(val & (MDIO_START | MDIO_BUSY)))
396 return ATL1_SUCCESS;
397
398 return ATL1_ERR_PHY;
399}
400
401/*
402 * Make L001's PHY out of Power Saving State (bug)
403 * hw - Struct containing variables accessed by shared code
404 * when power on, L001's PHY always on Power saving State
405 * (Gigabit Link forbidden)
406 */
407static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
408{
409 s32 ret;
410 ret = atl1_write_phy_reg(hw, 29, 0x0029);
411 if (ret)
412 return ret;
413 return atl1_write_phy_reg(hw, 30, 0);
414}
415
416/*
417 *TODO: do something or get rid of this
418 */
419s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
420{
421/* s32 ret_val;
422 * u16 phy_data;
423 */
424
425/*
426 ret_val = atl1_write_phy_reg(hw, ...);
427 ret_val = atl1_write_phy_reg(hw, ...);
428 ....
429*/
430 return ATL1_SUCCESS;
431}
432
433/*
434 * Resets the PHY and make all config validate
435 * hw - Struct containing variables accessed by shared code
436 *
437 * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
438 */
439static s32 atl1_phy_reset(struct atl1_hw *hw)
440{
441 struct pci_dev *pdev = hw->back->pdev;
442 s32 ret_val;
443 u16 phy_data;
444
445 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
446 hw->media_type == MEDIA_TYPE_1000M_FULL)
447 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
448 else {
449 switch (hw->media_type) {
450 case MEDIA_TYPE_100M_FULL:
451 phy_data =
452 MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
453 MII_CR_RESET;
454 break;
455 case MEDIA_TYPE_100M_HALF:
456 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
457 break;
458 case MEDIA_TYPE_10M_FULL:
459 phy_data =
460 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
461 break;
462 default: /* MEDIA_TYPE_10M_HALF: */
463 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
464 break;
465 }
466 }
467
468 ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
469 if (ret_val) {
470 u32 val;
471 int i;
472 /* pcie serdes link may be down! */
473 dev_dbg(&pdev->dev, "pcie phy link down\n");
474
475 for (i = 0; i < 25; i++) {
476 msleep(1);
477 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
478 if (!(val & (MDIO_START | MDIO_BUSY)))
479 break;
480 }
481
482 if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
483 dev_warn(&pdev->dev, "pcie link down at least 25ms\n");
484 return ret_val;
485 }
486 }
487 return ATL1_SUCCESS;
488}
489
490/*
491 * Configures PHY autoneg and flow control advertisement settings
492 * hw - Struct containing variables accessed by shared code
493 */
494s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
495{
496 s32 ret_val;
497 s16 mii_autoneg_adv_reg;
498 s16 mii_1000t_ctrl_reg;
499
500 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
501 mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
502
503 /* Read the MII 1000Base-T Control Register (Address 9). */
504 mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK;
505
506 /*
507 * First we clear all the 10/100 mb speed bits in the Auto-Neg
508 * Advertisement Register (Address 4) and the 1000 mb speed bits in
509 * the 1000Base-T Control Register (Address 9).
510 */
511 mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
512 mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
513
514 /*
515 * Need to parse media_type and set up
516 * the appropriate PHY registers.
517 */
518 switch (hw->media_type) {
519 case MEDIA_TYPE_AUTO_SENSOR:
520 mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
521 MII_AR_10T_FD_CAPS |
522 MII_AR_100TX_HD_CAPS |
523 MII_AR_100TX_FD_CAPS);
524 mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS;
525 break;
526
527 case MEDIA_TYPE_1000M_FULL:
528 mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS;
529 break;
530
531 case MEDIA_TYPE_100M_FULL:
532 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
533 break;
534
535 case MEDIA_TYPE_100M_HALF:
536 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
537 break;
538
539 case MEDIA_TYPE_10M_FULL:
540 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
541 break;
542
543 default:
544 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
545 break;
546 }
547
548 /* flow control fixed to enable all */
549 mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
550
551 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
552 hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
553
554 ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
555 if (ret_val)
556 return ret_val;
557
558 ret_val = atl1_write_phy_reg(hw, MII_AT001_CR, mii_1000t_ctrl_reg);
559 if (ret_val)
560 return ret_val;
561
562 return ATL1_SUCCESS;
563}
564
565/*
566 * Configures link settings.
567 * hw - Struct containing variables accessed by shared code
568 * Assumes the hardware has previously been reset and the
569 * transmitter and receiver are not enabled.
570 */
571static s32 atl1_setup_link(struct atl1_hw *hw)
572{
573 struct pci_dev *pdev = hw->back->pdev;
574 s32 ret_val;
575
576 /*
577 * Options:
578 * PHY will advertise value(s) parsed from
579 * autoneg_advertised and fc
580 * no matter what autoneg is , We will not wait link result.
581 */
582 ret_val = atl1_phy_setup_autoneg_adv(hw);
583 if (ret_val) {
584 dev_dbg(&pdev->dev, "error setting up autonegotiation\n");
585 return ret_val;
586 }
587 /* SW.Reset , En-Auto-Neg if needed */
588 ret_val = atl1_phy_reset(hw);
589 if (ret_val) {
590 dev_dbg(&pdev->dev, "error resetting phy\n");
591 return ret_val;
592 }
593 hw->phy_configured = true;
594 return ret_val;
595}
596
597static struct atl1_spi_flash_dev flash_table[] = {
598/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SECTOR_ERASE CHIP_ERASE */
599 {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62},
600 {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60},
601 {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7},
602};
603
604static void atl1_init_flash_opcode(struct atl1_hw *hw)
605{
606 if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
607 hw->flash_vendor = 0; /* ATMEL */
608
609 /* Init OP table */
610 iowrite8(flash_table[hw->flash_vendor].cmd_program,
611 hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
612 iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
613 hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
614 iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
615 hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
616 iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
617 hw->hw_addr + REG_SPI_FLASH_OP_RDID);
618 iowrite8(flash_table[hw->flash_vendor].cmd_wren,
619 hw->hw_addr + REG_SPI_FLASH_OP_WREN);
620 iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
621 hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
622 iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
623 hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
624 iowrite8(flash_table[hw->flash_vendor].cmd_read,
625 hw->hw_addr + REG_SPI_FLASH_OP_READ);
626}
627
628/*
629 * Performs basic configuration of the adapter.
630 * hw - Struct containing variables accessed by shared code
631 * Assumes that the controller has previously been reset and is in a
632 * post-reset uninitialized state. Initializes multicast table,
633 * and Calls routines to setup link
634 * Leaves the transmit and receive units disabled and uninitialized.
635 */
636s32 atl1_init_hw(struct atl1_hw *hw)
637{
638 u32 ret_val = 0;
639
640 /* Zero out the Multicast HASH table */
641 iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
642 /* clear the old settings from the multicast hash table */
643 iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
644
645 atl1_init_flash_opcode(hw);
646
647 if (!hw->phy_configured) {
648 /* enable GPHY LinkChange Interrrupt */
649 ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
650 if (ret_val)
651 return ret_val;
652 /* make PHY out of power-saving state */
653 ret_val = atl1_phy_leave_power_saving(hw);
654 if (ret_val)
655 return ret_val;
656 /* Call a subroutine to configure the link */
657 ret_val = atl1_setup_link(hw);
658 }
659 return ret_val;
660}
661
662/*
663 * Detects the current speed and duplex settings of the hardware.
664 * hw - Struct containing variables accessed by shared code
665 * speed - Speed of the connection
666 * duplex - Duplex setting of the connection
667 */
668s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
669{
670 struct pci_dev *pdev = hw->back->pdev;
671 s32 ret_val;
672 u16 phy_data;
673
674 /* ; --- Read PHY Specific Status Register (17) */
675 ret_val = atl1_read_phy_reg(hw, MII_AT001_PSSR, &phy_data);
676 if (ret_val)
677 return ret_val;
678
679 if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED))
680 return ATL1_ERR_PHY_RES;
681
682 switch (phy_data & MII_AT001_PSSR_SPEED) {
683 case MII_AT001_PSSR_1000MBS:
684 *speed = SPEED_1000;
685 break;
686 case MII_AT001_PSSR_100MBS:
687 *speed = SPEED_100;
688 break;
689 case MII_AT001_PSSR_10MBS:
690 *speed = SPEED_10;
691 break;
692 default:
693 dev_dbg(&pdev->dev, "error getting speed\n");
694 return ATL1_ERR_PHY_SPEED;
695 break;
696 }
697 if (phy_data & MII_AT001_PSSR_DPLX)
698 *duplex = FULL_DUPLEX;
699 else
700 *duplex = HALF_DUPLEX;
701
702 return ATL1_SUCCESS;
703}
704
705void atl1_set_mac_addr(struct atl1_hw *hw)
706{
707 u32 value;
708 /*
709 * 00-0B-6A-F6-00-DC
710 * 0: 6AF600DC 1: 000B
711 * low dword
712 */
713 value = (((u32) hw->mac_addr[2]) << 24) |
714 (((u32) hw->mac_addr[3]) << 16) |
715 (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
716 iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
717 /* high dword */
718 value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
719 iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
720}
diff --git a/drivers/net/atl1/atl1_hw.h b/drivers/net/atl1/atl1_hw.h
deleted file mode 100644
index 939aa0f53f6e..000000000000
--- a/drivers/net/atl1/atl1_hw.h
+++ /dev/null
@@ -1,946 +0,0 @@
1/*
2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5 *
6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 *
23 * There are a lot of defines in here that are unused and/or have cryptic
24 * names. Please leave them alone, as they're the closest thing we have
25 * to a spec from Attansic at present. *ahem* -- CHS
26 */
27
28#ifndef _ATL1_HW_H_
29#define _ATL1_HW_H_
30
31#include <linux/types.h>
32#include <linux/mii.h>
33
34struct atl1_adapter;
35struct atl1_hw;
36
37/* function prototypes needed by multiple files */
38s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw);
39s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data);
40s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex);
41s32 atl1_read_mac_addr(struct atl1_hw *hw);
42s32 atl1_init_hw(struct atl1_hw *hw);
43s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex);
44s32 atl1_set_speed_and_duplex(struct atl1_hw *hw, u16 speed, u16 duplex);
45u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
46void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
47s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
48void atl1_set_mac_addr(struct atl1_hw *hw);
49s32 atl1_phy_enter_power_saving(struct atl1_hw *hw);
50s32 atl1_reset_hw(struct atl1_hw *hw);
51void atl1_check_options(struct atl1_adapter *adapter);
52
53/* register definitions */
54#define REG_PCIE_CAP_LIST 0x58
55
56#define REG_VPD_CAP 0x6C
57#define VPD_CAP_ID_MASK 0xff
58#define VPD_CAP_ID_SHIFT 0
59#define VPD_CAP_NEXT_PTR_MASK 0xFF
60#define VPD_CAP_NEXT_PTR_SHIFT 8
61#define VPD_CAP_VPD_ADDR_MASK 0x7FFF
62#define VPD_CAP_VPD_ADDR_SHIFT 16
63#define VPD_CAP_VPD_FLAG 0x80000000
64
65#define REG_VPD_DATA 0x70
66
67#define REG_SPI_FLASH_CTRL 0x200
68#define SPI_FLASH_CTRL_STS_NON_RDY 0x1
69#define SPI_FLASH_CTRL_STS_WEN 0x2
70#define SPI_FLASH_CTRL_STS_WPEN 0x80
71#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF
72#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0
73#define SPI_FLASH_CTRL_INS_MASK 0x7
74#define SPI_FLASH_CTRL_INS_SHIFT 8
75#define SPI_FLASH_CTRL_START 0x800
76#define SPI_FLASH_CTRL_EN_VPD 0x2000
77#define SPI_FLASH_CTRL_LDSTART 0x8000
78#define SPI_FLASH_CTRL_CS_HI_MASK 0x3
79#define SPI_FLASH_CTRL_CS_HI_SHIFT 16
80#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3
81#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18
82#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3
83#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20
84#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3
85#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22
86#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3
87#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24
88#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3
89#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26
90#define SPI_FLASH_CTRL_WAIT_READY 0x10000000
91
92#define REG_SPI_ADDR 0x204
93
94#define REG_SPI_DATA 0x208
95
96#define REG_SPI_FLASH_CONFIG 0x20C
97#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF
98#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0
99#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3
100#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24
101#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000
102
103#define REG_SPI_FLASH_OP_PROGRAM 0x210
104#define REG_SPI_FLASH_OP_SC_ERASE 0x211
105#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212
106#define REG_SPI_FLASH_OP_RDID 0x213
107#define REG_SPI_FLASH_OP_WREN 0x214
108#define REG_SPI_FLASH_OP_RDSR 0x215
109#define REG_SPI_FLASH_OP_WRSR 0x216
110#define REG_SPI_FLASH_OP_READ 0x217
111
112#define REG_TWSI_CTRL 0x218
113#define TWSI_CTRL_LD_OFFSET_MASK 0xFF
114#define TWSI_CTRL_LD_OFFSET_SHIFT 0
115#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7
116#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
117#define TWSI_CTRL_SW_LDSTART 0x800
118#define TWSI_CTRL_HW_LDSTART 0x1000
119#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F
120#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
121#define TWSI_CTRL_LD_EXIST 0x400000
122#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
123#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23
124#define TWSI_CTRL_FREQ_SEL_100K 0
125#define TWSI_CTRL_FREQ_SEL_200K 1
126#define TWSI_CTRL_FREQ_SEL_300K 2
127#define TWSI_CTRL_FREQ_SEL_400K 3
128#define TWSI_CTRL_SMB_SLV_ADDR
129#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3
130#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24
131
132#define REG_PCIE_DEV_MISC_CTRL 0x21C
133#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2
134#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1
135#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4
136#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8
137#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10
138
139/* Selene Master Control Register */
140#define REG_MASTER_CTRL 0x1400
141#define MASTER_CTRL_SOFT_RST 0x1
142#define MASTER_CTRL_MTIMER_EN 0x2
143#define MASTER_CTRL_ITIMER_EN 0x4
144#define MASTER_CTRL_MANUAL_INT 0x8
145#define MASTER_CTRL_REV_NUM_SHIFT 16
146#define MASTER_CTRL_REV_NUM_MASK 0xff
147#define MASTER_CTRL_DEV_ID_SHIFT 24
148#define MASTER_CTRL_DEV_ID_MASK 0xff
149
150/* Timer Initial Value Register */
151#define REG_MANUAL_TIMER_INIT 0x1404
152
153/* IRQ ModeratorTimer Initial Value Register */
154#define REG_IRQ_MODU_TIMER_INIT 0x1408
155
156#define REG_GPHY_ENABLE 0x140C
157
158/* IRQ Anti-Lost Timer Initial Value Register */
159#define REG_CMBDISDMA_TIMER 0x140E
160
161/* Block IDLE Status Register */
162#define REG_IDLE_STATUS 0x1410
163#define IDLE_STATUS_RXMAC 1
164#define IDLE_STATUS_TXMAC 2
165#define IDLE_STATUS_RXQ 4
166#define IDLE_STATUS_TXQ 8
167#define IDLE_STATUS_DMAR 0x10
168#define IDLE_STATUS_DMAW 0x20
169#define IDLE_STATUS_SMB 0x40
170#define IDLE_STATUS_CMB 0x80
171
172/* MDIO Control Register */
173#define REG_MDIO_CTRL 0x1414
174#define MDIO_DATA_MASK 0xffff
175#define MDIO_DATA_SHIFT 0
176#define MDIO_REG_ADDR_MASK 0x1f
177#define MDIO_REG_ADDR_SHIFT 16
178#define MDIO_RW 0x200000
179#define MDIO_SUP_PREAMBLE 0x400000
180#define MDIO_START 0x800000
181#define MDIO_CLK_SEL_SHIFT 24
182#define MDIO_CLK_25_4 0
183#define MDIO_CLK_25_6 2
184#define MDIO_CLK_25_8 3
185#define MDIO_CLK_25_10 4
186#define MDIO_CLK_25_14 5
187#define MDIO_CLK_25_20 6
188#define MDIO_CLK_25_28 7
189#define MDIO_BUSY 0x8000000
190#define MDIO_WAIT_TIMES 30
191
192/* MII PHY Status Register */
193#define REG_PHY_STATUS 0x1418
194
195/* BIST Control and Status Register0 (for the Packet Memory) */
196#define REG_BIST0_CTRL 0x141c
197#define BIST0_NOW 0x1
198#define BIST0_SRAM_FAIL 0x2
199#define BIST0_FUSE_FLAG 0x4
200#define REG_BIST1_CTRL 0x1420
201#define BIST1_NOW 0x1
202#define BIST1_SRAM_FAIL 0x2
203#define BIST1_FUSE_FLAG 0x4
204
205/* MAC Control Register */
206#define REG_MAC_CTRL 0x1480
207#define MAC_CTRL_TX_EN 1
208#define MAC_CTRL_RX_EN 2
209#define MAC_CTRL_TX_FLOW 4
210#define MAC_CTRL_RX_FLOW 8
211#define MAC_CTRL_LOOPBACK 0x10
212#define MAC_CTRL_DUPLX 0x20
213#define MAC_CTRL_ADD_CRC 0x40
214#define MAC_CTRL_PAD 0x80
215#define MAC_CTRL_LENCHK 0x100
216#define MAC_CTRL_HUGE_EN 0x200
217#define MAC_CTRL_PRMLEN_SHIFT 10
218#define MAC_CTRL_PRMLEN_MASK 0xf
219#define MAC_CTRL_RMV_VLAN 0x4000
220#define MAC_CTRL_PROMIS_EN 0x8000
221#define MAC_CTRL_TX_PAUSE 0x10000
222#define MAC_CTRL_SCNT 0x20000
223#define MAC_CTRL_SRST_TX 0x40000
224#define MAC_CTRL_TX_SIMURST 0x80000
225#define MAC_CTRL_SPEED_SHIFT 20
226#define MAC_CTRL_SPEED_MASK 0x300000
227#define MAC_CTRL_SPEED_1000 2
228#define MAC_CTRL_SPEED_10_100 1
229#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000
230#define MAC_CTRL_TX_HUGE 0x800000
231#define MAC_CTRL_RX_CHKSUM_EN 0x1000000
232#define MAC_CTRL_MC_ALL_EN 0x2000000
233#define MAC_CTRL_BC_EN 0x4000000
234#define MAC_CTRL_DBG 0x8000000
235
236/* MAC IPG/IFG Control Register */
237#define REG_MAC_IPG_IFG 0x1484
238#define MAC_IPG_IFG_IPGT_SHIFT 0
239#define MAC_IPG_IFG_IPGT_MASK 0x7f
240#define MAC_IPG_IFG_MIFG_SHIFT 8
241#define MAC_IPG_IFG_MIFG_MASK 0xff
242#define MAC_IPG_IFG_IPGR1_SHIFT 16
243#define MAC_IPG_IFG_IPGR1_MASK 0x7f
244#define MAC_IPG_IFG_IPGR2_SHIFT 24
245#define MAC_IPG_IFG_IPGR2_MASK 0x7f
246
247/* MAC STATION ADDRESS */
248#define REG_MAC_STA_ADDR 0x1488
249
250/* Hash table for multicast address */
251#define REG_RX_HASH_TABLE 0x1490
252
253/* MAC Half-Duplex Control Register */
254#define REG_MAC_HALF_DUPLX_CTRL 0x1498
255#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0
256#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff
257#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12
258#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf
259#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000
260#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000
261#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000
262#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000
263#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20
264#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf
265#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24
266#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf
267
268/* Maximum Frame Length Control Register */
269#define REG_MTU 0x149c
270
271/* Wake-On-Lan control register */
272#define REG_WOL_CTRL 0x14a0
273#define WOL_PATTERN_EN 0x00000001
274#define WOL_PATTERN_PME_EN 0x00000002
275#define WOL_MAGIC_EN 0x00000004
276#define WOL_MAGIC_PME_EN 0x00000008
277#define WOL_LINK_CHG_EN 0x00000010
278#define WOL_LINK_CHG_PME_EN 0x00000020
279#define WOL_PATTERN_ST 0x00000100
280#define WOL_MAGIC_ST 0x00000200
281#define WOL_LINKCHG_ST 0x00000400
282#define WOL_CLK_SWITCH_EN 0x00008000
283#define WOL_PT0_EN 0x00010000
284#define WOL_PT1_EN 0x00020000
285#define WOL_PT2_EN 0x00040000
286#define WOL_PT3_EN 0x00080000
287#define WOL_PT4_EN 0x00100000
288#define WOL_PT5_EN 0x00200000
289#define WOL_PT6_EN 0x00400000
290
291/* WOL Length ( 2 DWORD ) */
292#define REG_WOL_PATTERN_LEN 0x14a4
293#define WOL_PT_LEN_MASK 0x7f
294#define WOL_PT0_LEN_SHIFT 0
295#define WOL_PT1_LEN_SHIFT 8
296#define WOL_PT2_LEN_SHIFT 16
297#define WOL_PT3_LEN_SHIFT 24
298#define WOL_PT4_LEN_SHIFT 0
299#define WOL_PT5_LEN_SHIFT 8
300#define WOL_PT6_LEN_SHIFT 16
301
302/* Internal SRAM Partition Register */
303#define REG_SRAM_RFD_ADDR 0x1500
304#define REG_SRAM_RFD_LEN (REG_SRAM_RFD_ADDR+ 4)
305#define REG_SRAM_RRD_ADDR (REG_SRAM_RFD_ADDR+ 8)
306#define REG_SRAM_RRD_LEN (REG_SRAM_RFD_ADDR+12)
307#define REG_SRAM_TPD_ADDR (REG_SRAM_RFD_ADDR+16)
308#define REG_SRAM_TPD_LEN (REG_SRAM_RFD_ADDR+20)
309#define REG_SRAM_TRD_ADDR (REG_SRAM_RFD_ADDR+24)
310#define REG_SRAM_TRD_LEN (REG_SRAM_RFD_ADDR+28)
311#define REG_SRAM_RXF_ADDR (REG_SRAM_RFD_ADDR+32)
312#define REG_SRAM_RXF_LEN (REG_SRAM_RFD_ADDR+36)
313#define REG_SRAM_TXF_ADDR (REG_SRAM_RFD_ADDR+40)
314#define REG_SRAM_TXF_LEN (REG_SRAM_RFD_ADDR+44)
315#define REG_SRAM_TCPH_PATH_ADDR (REG_SRAM_RFD_ADDR+48)
316#define SRAM_TCPH_ADDR_MASK 0x0fff
317#define SRAM_TCPH_ADDR_SHIFT 0
318#define SRAM_PATH_ADDR_MASK 0x0fff
319#define SRAM_PATH_ADDR_SHIFT 16
320
321/* Load Ptr Register */
322#define REG_LOAD_PTR (REG_SRAM_RFD_ADDR+52)
323
324/* Descriptor Control register */
325#define REG_DESC_BASE_ADDR_HI 0x1540
326#define REG_DESC_RFD_ADDR_LO (REG_DESC_BASE_ADDR_HI+4)
327#define REG_DESC_RRD_ADDR_LO (REG_DESC_BASE_ADDR_HI+8)
328#define REG_DESC_TPD_ADDR_LO (REG_DESC_BASE_ADDR_HI+12)
329#define REG_DESC_CMB_ADDR_LO (REG_DESC_BASE_ADDR_HI+16)
330#define REG_DESC_SMB_ADDR_LO (REG_DESC_BASE_ADDR_HI+20)
331#define REG_DESC_RFD_RRD_RING_SIZE (REG_DESC_BASE_ADDR_HI+24)
332#define DESC_RFD_RING_SIZE_MASK 0x7ff
333#define DESC_RFD_RING_SIZE_SHIFT 0
334#define DESC_RRD_RING_SIZE_MASK 0x7ff
335#define DESC_RRD_RING_SIZE_SHIFT 16
336#define REG_DESC_TPD_RING_SIZE (REG_DESC_BASE_ADDR_HI+28)
337#define DESC_TPD_RING_SIZE_MASK 0x3ff
338#define DESC_TPD_RING_SIZE_SHIFT 0
339
340/* TXQ Control Register */
341#define REG_TXQ_CTRL 0x1580
342#define TXQ_CTRL_TPD_BURST_NUM_SHIFT 0
343#define TXQ_CTRL_TPD_BURST_NUM_MASK 0x1f
344#define TXQ_CTRL_EN 0x20
345#define TXQ_CTRL_ENH_MODE 0x40
346#define TXQ_CTRL_TPD_FETCH_TH_SHIFT 8
347#define TXQ_CTRL_TPD_FETCH_TH_MASK 0x3f
348#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16
349#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xffff
350
351/* Jumbo packet Threshold for task offload */
352#define REG_TX_JUMBO_TASK_TH_TPD_IPG 0x1584
353#define TX_JUMBO_TASK_TH_MASK 0x7ff
354#define TX_JUMBO_TASK_TH_SHIFT 0
355#define TX_TPD_MIN_IPG_MASK 0x1f
356#define TX_TPD_MIN_IPG_SHIFT 16
357
358/* RXQ Control Register */
359#define REG_RXQ_CTRL 0x15a0
360#define RXQ_CTRL_RFD_BURST_NUM_SHIFT 0
361#define RXQ_CTRL_RFD_BURST_NUM_MASK 0xff
362#define RXQ_CTRL_RRD_BURST_THRESH_SHIFT 8
363#define RXQ_CTRL_RRD_BURST_THRESH_MASK 0xff
364#define RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT 16
365#define RXQ_CTRL_RFD_PREF_MIN_IPG_MASK 0x1f
366#define RXQ_CTRL_CUT_THRU_EN 0x40000000
367#define RXQ_CTRL_EN 0x80000000
368
369/* Rx jumbo packet threshold and rrd retirement timer */
370#define REG_RXQ_JMBOSZ_RRDTIM (REG_RXQ_CTRL+ 4)
371#define RXQ_JMBOSZ_TH_MASK 0x7ff
372#define RXQ_JMBOSZ_TH_SHIFT 0
373#define RXQ_JMBO_LKAH_MASK 0xf
374#define RXQ_JMBO_LKAH_SHIFT 11
375#define RXQ_RRD_TIMER_MASK 0xffff
376#define RXQ_RRD_TIMER_SHIFT 16
377
378/* RFD flow control register */
379#define REG_RXQ_RXF_PAUSE_THRESH (REG_RXQ_CTRL+ 8)
380#define RXQ_RXF_PAUSE_TH_HI_SHIFT 16
381#define RXQ_RXF_PAUSE_TH_HI_MASK 0xfff
382#define RXQ_RXF_PAUSE_TH_LO_SHIFT 0
383#define RXQ_RXF_PAUSE_TH_LO_MASK 0xfff
384
385/* RRD flow control register */
386#define REG_RXQ_RRD_PAUSE_THRESH (REG_RXQ_CTRL+12)
387#define RXQ_RRD_PAUSE_TH_HI_SHIFT 0
388#define RXQ_RRD_PAUSE_TH_HI_MASK 0xfff
389#define RXQ_RRD_PAUSE_TH_LO_SHIFT 16
390#define RXQ_RRD_PAUSE_TH_LO_MASK 0xfff
391
392/* DMA Engine Control Register */
393#define REG_DMA_CTRL 0x15c0
394#define DMA_CTRL_DMAR_IN_ORDER 0x1
395#define DMA_CTRL_DMAR_ENH_ORDER 0x2
396#define DMA_CTRL_DMAR_OUT_ORDER 0x4
397#define DMA_CTRL_RCB_VALUE 0x8
398#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4
399#define DMA_CTRL_DMAR_BURST_LEN_MASK 7
400#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7
401#define DMA_CTRL_DMAW_BURST_LEN_MASK 7
402#define DMA_CTRL_DMAR_EN 0x400
403#define DMA_CTRL_DMAW_EN 0x800
404
405/* CMB/SMB Control Register */
406#define REG_CSMB_CTRL 0x15d0
407#define CSMB_CTRL_CMB_NOW 1
408#define CSMB_CTRL_SMB_NOW 2
409#define CSMB_CTRL_CMB_EN 4
410#define CSMB_CTRL_SMB_EN 8
411
412/* CMB DMA Write Threshold Register */
413#define REG_CMB_WRITE_TH (REG_CSMB_CTRL+ 4)
414#define CMB_RRD_TH_SHIFT 0
415#define CMB_RRD_TH_MASK 0x7ff
416#define CMB_TPD_TH_SHIFT 16
417#define CMB_TPD_TH_MASK 0x7ff
418
419/* RX/TX count-down timer to trigger CMB-write. 2us resolution. */
420#define REG_CMB_WRITE_TIMER (REG_CSMB_CTRL+ 8)
421#define CMB_RX_TM_SHIFT 0
422#define CMB_RX_TM_MASK 0xffff
423#define CMB_TX_TM_SHIFT 16
424#define CMB_TX_TM_MASK 0xffff
425
426/* Number of packet received since last CMB write */
427#define REG_CMB_RX_PKT_CNT (REG_CSMB_CTRL+12)
428
429/* Number of packet transmitted since last CMB write */
430#define REG_CMB_TX_PKT_CNT (REG_CSMB_CTRL+16)
431
432/* SMB auto DMA timer register */
433#define REG_SMB_TIMER (REG_CSMB_CTRL+20)
434
435/* Mailbox Register */
436#define REG_MAILBOX 0x15f0
437#define MB_RFD_PROD_INDX_SHIFT 0
438#define MB_RFD_PROD_INDX_MASK 0x7ff
439#define MB_RRD_CONS_INDX_SHIFT 11
440#define MB_RRD_CONS_INDX_MASK 0x7ff
441#define MB_TPD_PROD_INDX_SHIFT 22
442#define MB_TPD_PROD_INDX_MASK 0x3ff
443
444/* Interrupt Status Register */
445#define REG_ISR 0x1600
446#define ISR_SMB 1
447#define ISR_TIMER 2
448#define ISR_MANUAL 4
449#define ISR_RXF_OV 8
450#define ISR_RFD_UNRUN 0x10
451#define ISR_RRD_OV 0x20
452#define ISR_TXF_UNRUN 0x40
453#define ISR_LINK 0x80
454#define ISR_HOST_RFD_UNRUN 0x100
455#define ISR_HOST_RRD_OV 0x200
456#define ISR_DMAR_TO_RST 0x400
457#define ISR_DMAW_TO_RST 0x800
458#define ISR_GPHY 0x1000
459#define ISR_RX_PKT 0x10000
460#define ISR_TX_PKT 0x20000
461#define ISR_TX_DMA 0x40000
462#define ISR_RX_DMA 0x80000
463#define ISR_CMB_RX 0x100000
464#define ISR_CMB_TX 0x200000
465#define ISR_MAC_RX 0x400000
466#define ISR_MAC_TX 0x800000
467#define ISR_UR_DETECTED 0x1000000
468#define ISR_FERR_DETECTED 0x2000000
469#define ISR_NFERR_DETECTED 0x4000000
470#define ISR_CERR_DETECTED 0x8000000
471#define ISR_PHY_LINKDOWN 0x10000000
472#define ISR_DIS_SMB 0x20000000
473#define ISR_DIS_DMA 0x40000000
474#define ISR_DIS_INT 0x80000000
475
476/* Interrupt Mask Register */
477#define REG_IMR 0x1604
478
479/* Normal Interrupt mask */
480#define IMR_NORMAL_MASK (\
481 ISR_SMB |\
482 ISR_GPHY |\
483 ISR_PHY_LINKDOWN|\
484 ISR_DMAR_TO_RST |\
485 ISR_DMAW_TO_RST |\
486 ISR_CMB_TX |\
487 ISR_CMB_RX )
488
489/* Debug Interrupt Mask (enable all interrupt) */
490#define IMR_DEBUG_MASK (\
491 ISR_SMB |\
492 ISR_TIMER |\
493 ISR_MANUAL |\
494 ISR_RXF_OV |\
495 ISR_RFD_UNRUN |\
496 ISR_RRD_OV |\
497 ISR_TXF_UNRUN |\
498 ISR_LINK |\
499 ISR_CMB_TX |\
500 ISR_CMB_RX |\
501 ISR_RX_PKT |\
502 ISR_TX_PKT |\
503 ISR_MAC_RX |\
504 ISR_MAC_TX )
505
506/* Interrupt Status Register */
507#define REG_RFD_RRD_IDX 0x1800
508#define REG_TPD_IDX 0x1804
509
510/* MII definition */
511/* PHY Common Register */
512#define MII_AT001_CR 0x09
513#define MII_AT001_SR 0x0A
514#define MII_AT001_ESR 0x0F
515#define MII_AT001_PSCR 0x10
516#define MII_AT001_PSSR 0x11
517
518/* PHY Control Register */
519#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
520#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
521#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
522#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
523#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
524#define MII_CR_POWER_DOWN 0x0800 /* Power down */
525#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
526#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
527#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
528#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
529#define MII_CR_SPEED_MASK 0x2040
530#define MII_CR_SPEED_1000 0x0040
531#define MII_CR_SPEED_100 0x2000
532#define MII_CR_SPEED_10 0x0000
533
534/* PHY Status Register */
535#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
536#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
537#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
538#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
539#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
540#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
541#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
542#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
543#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
544#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
545#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
546#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
547#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
548#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
549#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
550
551/* Link partner ability register. */
552#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
553#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
554#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
555#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
556#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
557#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
558#define MII_LPA_PAUSE 0x0400 /* PAUSE */
559#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
560#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
561#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
562#define MII_LPA_NPAGE 0x8000 /* Next page bit */
563
564/* Autoneg Advertisement Register */
565#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
566#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
567#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
568#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
569#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
570#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
571#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
572#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
573#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
574#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
575#define MII_AR_SPEED_MASK 0x01E0
576#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
577
578/* 1000BASE-T Control Register */
579#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
580#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
581#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port, 0=DTE device */
582#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master, 0=Configure PHY as Slave */
583#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value, 0=Automatic Master/Slave config */
584#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
585#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
586#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
587#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
588#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
589#define MII_AT001_CR_1000T_SPEED_MASK 0x0300
590#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300
591
592/* 1000BASE-T Status Register */
593#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
594#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
595#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
596#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
597#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
598#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
599#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
600#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
601
602/* Extended Status Register */
603#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
604#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
605#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
606#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
607
608/* AT001 PHY Specific Control Register */
609#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
610#define MII_AT001_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
611#define MII_AT001_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
612#define MII_AT001_PSCR_MAC_POWERDOWN 0x0008
613#define MII_AT001_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, 0=CLK125 toggling */
614#define MII_AT001_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5, Manual MDI configuration */
615#define MII_AT001_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
616#define MII_AT001_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
617#define MII_AT001_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled all speeds. */
618#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE 0x0080 /* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T RX Threshold), 0=Normal 10BASE-T RX Threshold */
619#define MII_AT001_PSCR_MII_5BIT_ENABLE 0x0100 /* 1=5-Bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
620#define MII_AT001_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
621#define MII_AT001_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
622#define MII_AT001_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
623#define MII_AT001_PSCR_POLARITY_REVERSAL_SHIFT 1
624#define MII_AT001_PSCR_AUTO_X_MODE_SHIFT 5
625#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
626
627/* AT001 PHY Specific Status Register */
628#define MII_AT001_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
629#define MII_AT001_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
630#define MII_AT001_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
631#define MII_AT001_PSSR_10MBS 0x0000 /* 00=10Mbs */
632#define MII_AT001_PSSR_100MBS 0x4000 /* 01=100Mbs */
633#define MII_AT001_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
634
635/* PCI Command Register Bit Definitions */
636#define PCI_REG_COMMAND 0x04 /* PCI Command Register */
637#define CMD_IO_SPACE 0x0001
638#define CMD_MEMORY_SPACE 0x0002
639#define CMD_BUS_MASTER 0x0004
640
641/* Wake Up Filter Control */
642#define ATL1_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
643#define ATL1_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
644#define ATL1_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
645#define ATL1_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */
646#define ATL1_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
647
648/* Error Codes */
649#define ATL1_SUCCESS 0
650#define ATL1_ERR_EEPROM 1
651#define ATL1_ERR_PHY 2
652#define ATL1_ERR_CONFIG 3
653#define ATL1_ERR_PARAM 4
654#define ATL1_ERR_MAC_TYPE 5
655#define ATL1_ERR_PHY_TYPE 6
656#define ATL1_ERR_PHY_SPEED 7
657#define ATL1_ERR_PHY_RES 8
658
659#define SPEED_0 0xffff
660#define SPEED_10 10
661#define SPEED_100 100
662#define SPEED_1000 1000
663#define HALF_DUPLEX 1
664#define FULL_DUPLEX 2
665
666#define MEDIA_TYPE_AUTO_SENSOR 0
667#define MEDIA_TYPE_1000M_FULL 1
668#define MEDIA_TYPE_100M_FULL 2
669#define MEDIA_TYPE_100M_HALF 3
670#define MEDIA_TYPE_10M_FULL 4
671#define MEDIA_TYPE_10M_HALF 5
672
673#define ADVERTISE_10_HALF 0x0001
674#define ADVERTISE_10_FULL 0x0002
675#define ADVERTISE_100_HALF 0x0004
676#define ADVERTISE_100_FULL 0x0008
677#define ADVERTISE_1000_HALF 0x0010
678#define ADVERTISE_1000_FULL 0x0020
679#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */
680#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */
681#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */
682
683#define MAX_JUMBO_FRAME_SIZE 0x2800
684
685#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */
686#define PHY_FORCE_TIME 20 /* 2.0 Seconds */
687
688/* For checksumming , the sum of all words in the EEPROM should equal 0xBABA */
689#define EEPROM_SUM 0xBABA
690
691#define ATL1_EEDUMP_LEN 48
692
693/* Statistics counters collected by the MAC */
694struct stats_msg_block {
695 /* rx */
696 u32 rx_ok; /* The number of good packet received. */
697 u32 rx_bcast; /* The number of good broadcast packet received. */
698 u32 rx_mcast; /* The number of good multicast packet received. */
699 u32 rx_pause; /* The number of Pause packet received. */
700 u32 rx_ctrl; /* The number of Control packet received other than Pause frame. */
701 u32 rx_fcs_err; /* The number of packets with bad FCS. */
702 u32 rx_len_err; /* The number of packets with mismatch of length field and actual size. */
703 u32 rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */
704 u32 rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */
705 u32 rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */
706 u32 rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */
707 u32 rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */
708 u32 rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */
709 u32 rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */
710 u32 rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */
711 u32 rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */
712 u32 rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */
713 u32 rx_sz_ov; /* The number of good and bad packets received that are more than MTU size Å¡C truncated by Selene. */
714 u32 rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */
715 u32 rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */
716 u32 rx_align_err; /* Alignment Error */
717 u32 rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */
718 u32 rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */
719 u32 rx_err_addr; /* The number of packets dropped due to address filtering. */
720
721 /* tx */
722 u32 tx_ok; /* The number of good packet transmitted. */
723 u32 tx_bcast; /* The number of good broadcast packet transmitted. */
724 u32 tx_mcast; /* The number of good multicast packet transmitted. */
725 u32 tx_pause; /* The number of Pause packet transmitted. */
726 u32 tx_exc_defer; /* The number of packets transmitted with excessive deferral. */
727 u32 tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */
728 u32 tx_defer; /* The number of packets transmitted that is deferred. */
729 u32 tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */
730 u32 tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */
731 u32 tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */
732 u32 tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */
733 u32 tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */
734 u32 tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */
735 u32 tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */
736 u32 tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */
737 u32 tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */
738 u32 tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */
739 u32 tx_late_col; /* The number of packets transmitted with late collisions. */
740 u32 tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */
741 u32 tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */
742 u32 tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */
743 u32 tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */
744 u32 tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */
745 u32 tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */
746 u32 tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */
747 u32 smb_updated; /* 1: SMB Updated. This is used by software as the indication of the statistics update.
748 * Software should clear this bit as soon as retrieving the statistics information. */
749};
750
751/* Coalescing Message Block */
752struct coals_msg_block {
753 u32 int_stats; /* interrupt status */
754 u16 rrd_prod_idx; /* TRD Producer Index. */
755 u16 rfd_cons_idx; /* RFD Consumer Index. */
756 u16 update; /* Selene sets this bit every time it DMA the CMB to host memory.
757 * Software supposes to clear this bit when CMB information is processed. */
758 u16 tpd_cons_idx; /* TPD Consumer Index. */
759};
760
761/* RRD descriptor */
762struct rx_return_desc {
763 u8 num_buf; /* Number of RFD buffers used by the received packet */
764 u8 resved;
765 u16 buf_indx; /* RFD Index of the first buffer */
766 union {
767 u32 valid;
768 struct {
769 u16 rx_chksum;
770 u16 pkt_size;
771 } xsum_sz;
772 } xsz;
773
774 u16 pkt_flg; /* Packet flags */
775 u16 err_flg; /* Error flags */
776 u16 resved2;
777 u16 vlan_tag; /* VLAN TAG */
778};
779
780#define PACKET_FLAG_ETH_TYPE 0x0080
781#define PACKET_FLAG_VLAN_INS 0x0100
782#define PACKET_FLAG_ERR 0x0200
783#define PACKET_FLAG_IPV4 0x0400
784#define PACKET_FLAG_UDP 0x0800
785#define PACKET_FLAG_TCP 0x1000
786#define PACKET_FLAG_BCAST 0x2000
787#define PACKET_FLAG_MCAST 0x4000
788#define PACKET_FLAG_PAUSE 0x8000
789
790#define ERR_FLAG_CRC 0x0001
791#define ERR_FLAG_CODE 0x0002
792#define ERR_FLAG_DRIBBLE 0x0004
793#define ERR_FLAG_RUNT 0x0008
794#define ERR_FLAG_OV 0x0010
795#define ERR_FLAG_TRUNC 0x0020
796#define ERR_FLAG_IP_CHKSUM 0x0040
797#define ERR_FLAG_L4_CHKSUM 0x0080
798#define ERR_FLAG_LEN 0x0100
799#define ERR_FLAG_DES_ADDR 0x0200
800
801/* RFD descriptor */
802struct rx_free_desc {
803 __le64 buffer_addr; /* Address of the descriptor's data buffer */
804 __le16 buf_len; /* Size of the receive buffer in host memory, in byte */
805 u16 coalese; /* Update consumer index to host after the reception of this frame */
806 /* __attribute__ ((packed)) is required */
807} __attribute__ ((packed));
808
809/* tsopu defines */
810#define TSO_PARAM_BUFLEN_MASK 0x3FFF
811#define TSO_PARAM_BUFLEN_SHIFT 0
812#define TSO_PARAM_DMAINT_MASK 0x0001
813#define TSO_PARAM_DMAINT_SHIFT 14
814#define TSO_PARAM_PKTNT_MASK 0x0001
815#define TSO_PARAM_PKTINT_SHIFT 15
816#define TSO_PARAM_VLANTAG_MASK 0xFFFF
817#define TSO_PARAM_VLAN_SHIFT 16
818
819/* tsopl defines */
820#define TSO_PARAM_EOP_MASK 0x0001
821#define TSO_PARAM_EOP_SHIFT 0
822#define TSO_PARAM_COALESCE_MASK 0x0001
823#define TSO_PARAM_COALESCE_SHIFT 1
824#define TSO_PARAM_INSVLAG_MASK 0x0001
825#define TSO_PARAM_INSVLAG_SHIFT 2
826#define TSO_PARAM_CUSTOMCKSUM_MASK 0x0001
827#define TSO_PARAM_CUSTOMCKSUM_SHIFT 3
828#define TSO_PARAM_SEGMENT_MASK 0x0001
829#define TSO_PARAM_SEGMENT_SHIFT 4
830#define TSO_PARAM_IPCKSUM_MASK 0x0001
831#define TSO_PARAM_IPCKSUM_SHIFT 5
832#define TSO_PARAM_TCPCKSUM_MASK 0x0001
833#define TSO_PARAM_TCPCKSUM_SHIFT 6
834#define TSO_PARAM_UDPCKSUM_MASK 0x0001
835#define TSO_PARAM_UDPCKSUM_SHIFT 7
836#define TSO_PARAM_VLANTAGGED_MASK 0x0001
837#define TSO_PARAM_VLANTAGGED_SHIFT 8
838#define TSO_PARAM_ETHTYPE_MASK 0x0001
839#define TSO_PARAM_ETHTYPE_SHIFT 9
840#define TSO_PARAM_IPHL_MASK 0x000F
841#define TSO_PARAM_IPHL_SHIFT 10
842#define TSO_PARAM_TCPHDRLEN_MASK 0x000F
843#define TSO_PARAM_TCPHDRLEN_SHIFT 14
844#define TSO_PARAM_HDRFLAG_MASK 0x0001
845#define TSO_PARAM_HDRFLAG_SHIFT 18
846#define TSO_PARAM_MSS_MASK 0x1FFF
847#define TSO_PARAM_MSS_SHIFT 19
848
849/* csumpu defines */
850#define CSUM_PARAM_BUFLEN_MASK 0x3FFF
851#define CSUM_PARAM_BUFLEN_SHIFT 0
852#define CSUM_PARAM_DMAINT_MASK 0x0001
853#define CSUM_PARAM_DMAINT_SHIFT 14
854#define CSUM_PARAM_PKTINT_MASK 0x0001
855#define CSUM_PARAM_PKTINT_SHIFT 15
856#define CSUM_PARAM_VALANTAG_MASK 0xFFFF
857#define CSUM_PARAM_VALAN_SHIFT 16
858
859/* csumpl defines*/
860#define CSUM_PARAM_EOP_MASK 0x0001
861#define CSUM_PARAM_EOP_SHIFT 0
862#define CSUM_PARAM_COALESCE_MASK 0x0001
863#define CSUM_PARAM_COALESCE_SHIFT 1
864#define CSUM_PARAM_INSVLAG_MASK 0x0001
865#define CSUM_PARAM_INSVLAG_SHIFT 2
866#define CSUM_PARAM_CUSTOMCKSUM_MASK 0x0001
867#define CSUM_PARAM_CUSTOMCKSUM_SHIFT 3
868#define CSUM_PARAM_SEGMENT_MASK 0x0001
869#define CSUM_PARAM_SEGMENT_SHIFT 4
870#define CSUM_PARAM_IPCKSUM_MASK 0x0001
871#define CSUM_PARAM_IPCKSUM_SHIFT 5
872#define CSUM_PARAM_TCPCKSUM_MASK 0x0001
873#define CSUM_PARAM_TCPCKSUM_SHIFT 6
874#define CSUM_PARAM_UDPCKSUM_MASK 0x0001
875#define CSUM_PARAM_UDPCKSUM_SHIFT 7
876#define CSUM_PARAM_VLANTAGGED_MASK 0x0001
877#define CSUM_PARAM_VLANTAGGED_SHIFT 8
878#define CSUM_PARAM_ETHTYPE_MASK 0x0001
879#define CSUM_PARAM_ETHTYPE_SHIFT 9
880#define CSUM_PARAM_IPHL_MASK 0x000F
881#define CSUM_PARAM_IPHL_SHIFT 10
882#define CSUM_PARAM_PLOADOFFSET_MASK 0x00FF
883#define CSUM_PARAM_PLOADOFFSET_SHIFT 16
884#define CSUM_PARAM_XSUMOFFSET_MASK 0x00FF
885#define CSUM_PARAM_XSUMOFFSET_SHIFT 24
886
887/* TPD descriptor */
888struct tso_param {
889 /* The order of these declarations is important -- don't change it */
890 u32 tsopu; /* tso_param upper word */
891 u32 tsopl; /* tso_param lower word */
892};
893
894struct csum_param {
895 /* The order of these declarations is important -- don't change it */
896 u32 csumpu; /* csum_param upper word */
897 u32 csumpl; /* csum_param lower word */
898};
899
900union tpd_descr {
901 u64 data;
902 struct csum_param csum;
903 struct tso_param tso;
904};
905
906struct tx_packet_desc {
907 __le64 buffer_addr;
908 union tpd_descr desc;
909};
910
911/* DMA Order Settings */
912enum atl1_dma_order {
913 atl1_dma_ord_in = 1,
914 atl1_dma_ord_enh = 2,
915 atl1_dma_ord_out = 4
916};
917
918enum atl1_dma_rcb {
919 atl1_rcb_64 = 0,
920 atl1_rcb_128 = 1
921};
922
923enum atl1_dma_req_block {
924 atl1_dma_req_128 = 0,
925 atl1_dma_req_256 = 1,
926 atl1_dma_req_512 = 2,
927 atl1_dma_req_1024 = 3,
928 atl1_dma_req_2048 = 4,
929 atl1_dma_req_4096 = 5
930};
931
932struct atl1_spi_flash_dev {
933 const char *manu_name; /* manufacturer id */
934 /* op-code */
935 u8 cmd_wrsr;
936 u8 cmd_read;
937 u8 cmd_program;
938 u8 cmd_wren;
939 u8 cmd_wrdi;
940 u8 cmd_rdsr;
941 u8 cmd_rdid;
942 u8 cmd_sector_erase;
943 u8 cmd_chip_erase;
944};
945
946#endif /* _ATL1_HW_H_ */
diff --git a/drivers/net/atl1/atl1_param.c b/drivers/net/atl1/atl1_param.c
deleted file mode 100644
index 4246bb9bd50e..000000000000
--- a/drivers/net/atl1/atl1_param.c
+++ /dev/null
@@ -1,203 +0,0 @@
1/*
2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5 *
6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#include <linux/types.h>
25#include <linux/moduleparam.h>
26#include <linux/pci.h>
27#include "atl1.h"
28
29/*
30 * This is the only thing that needs to be changed to adjust the
31 * maximum number of ports that the driver can manage.
32 */
33#define ATL1_MAX_NIC 4
34
35#define OPTION_UNSET -1
36#define OPTION_DISABLED 0
37#define OPTION_ENABLED 1
38
39#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
40
41/*
42 * Interrupt Moderate Timer in units of 2 us
43 *
44 * Valid Range: 10-65535
45 *
46 * Default Value: 100 (200us)
47 */
48static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
49static int num_int_mod_timer = 0;
50module_param_array_named(int_mod_timer, int_mod_timer, int, &num_int_mod_timer, 0);
51MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
52
53/*
54 * flash_vendor
55 *
56 * Valid Range: 0-2
57 *
58 * 0 - Atmel
59 * 1 - SST
60 * 2 - ST
61 *
62 * Default Value: 0
63 */
64static int __devinitdata flash_vendor[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
65static int num_flash_vendor = 0;
66module_param_array_named(flash_vendor, flash_vendor, int, &num_flash_vendor, 0);
67MODULE_PARM_DESC(flash_vendor, "SPI flash vendor");
68
69#define DEFAULT_INT_MOD_CNT 100 /* 200us */
70#define MAX_INT_MOD_CNT 65000
71#define MIN_INT_MOD_CNT 50
72
73#define FLASH_VENDOR_DEFAULT 0
74#define FLASH_VENDOR_MIN 0
75#define FLASH_VENDOR_MAX 2
76
77struct atl1_option {
78 enum { enable_option, range_option, list_option } type;
79 char *name;
80 char *err;
81 int def;
82 union {
83 struct { /* range_option info */
84 int min;
85 int max;
86 } r;
87 struct { /* list_option info */
88 int nr;
89 struct atl1_opt_list {
90 int i;
91 char *str;
92 } *p;
93 } l;
94 } arg;
95};
96
97static int __devinit atl1_validate_option(int *value, struct atl1_option *opt, struct pci_dev *pdev)
98{
99 if (*value == OPTION_UNSET) {
100 *value = opt->def;
101 return 0;
102 }
103
104 switch (opt->type) {
105 case enable_option:
106 switch (*value) {
107 case OPTION_ENABLED:
108 dev_info(&pdev->dev, "%s enabled\n", opt->name);
109 return 0;
110 case OPTION_DISABLED:
111 dev_info(&pdev->dev, "%s disabled\n", opt->name);
112 return 0;
113 }
114 break;
115 case range_option:
116 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
117 dev_info(&pdev->dev, "%s set to %i\n", opt->name,
118 *value);
119 return 0;
120 }
121 break;
122 case list_option:{
123 int i;
124 struct atl1_opt_list *ent;
125
126 for (i = 0; i < opt->arg.l.nr; i++) {
127 ent = &opt->arg.l.p[i];
128 if (*value == ent->i) {
129 if (ent->str[0] != '\0')
130 dev_info(&pdev->dev, "%s\n",
131 ent->str);
132 return 0;
133 }
134 }
135 }
136 break;
137
138 default:
139 break;
140 }
141
142 dev_info(&pdev->dev, "invalid %s specified (%i) %s\n",
143 opt->name, *value, opt->err);
144 *value = opt->def;
145 return -1;
146}
147
148/*
149 * atl1_check_options - Range Checking for Command Line Parameters
150 * @adapter: board private structure
151 *
152 * This routine checks all command line parameters for valid user
153 * input. If an invalid value is given, or if no user specified
154 * value exists, a default value is used. The final value is stored
155 * in a variable in the adapter structure.
156 */
157void __devinit atl1_check_options(struct atl1_adapter *adapter)
158{
159 struct pci_dev *pdev = adapter->pdev;
160 int bd = adapter->bd_number;
161 if (bd >= ATL1_MAX_NIC) {
162 dev_notice(&pdev->dev, "no configuration for board#%i\n", bd);
163 dev_notice(&pdev->dev, "using defaults for all values\n");
164 }
165 { /* Interrupt Moderate Timer */
166 struct atl1_option opt = {
167 .type = range_option,
168 .name = "Interrupt Moderator Timer",
169 .err = "using default of "
170 __MODULE_STRING(DEFAULT_INT_MOD_CNT),
171 .def = DEFAULT_INT_MOD_CNT,
172 .arg = {.r =
173 {.min = MIN_INT_MOD_CNT,.max = MAX_INT_MOD_CNT}}
174 };
175 int val;
176 if (num_int_mod_timer > bd) {
177 val = int_mod_timer[bd];
178 atl1_validate_option(&val, &opt, pdev);
179 adapter->imt = (u16) val;
180 } else
181 adapter->imt = (u16) (opt.def);
182 }
183
184 { /* Flash Vendor */
185 struct atl1_option opt = {
186 .type = range_option,
187 .name = "SPI Flash Vendor",
188 .err = "using default of "
189 __MODULE_STRING(FLASH_VENDOR_DEFAULT),
190 .def = DEFAULT_INT_MOD_CNT,
191 .arg = {.r =
192 {.min = FLASH_VENDOR_MIN,.max =
193 FLASH_VENDOR_MAX}}
194 };
195 int val;
196 if (num_flash_vendor > bd) {
197 val = flash_vendor[bd];
198 atl1_validate_option(&val, &opt, pdev);
199 adapter->hw.flash_vendor = (u8) val;
200 } else
201 adapter->hw.flash_vendor = (u8) (opt.def);
202 }
203}
diff --git a/drivers/net/atlx/Makefile b/drivers/net/atlx/Makefile
new file mode 100644
index 000000000000..ca45553a040d
--- /dev/null
+++ b/drivers/net/atlx/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_ATL1) += atl1.o
diff --git a/drivers/net/atl1/atl1_main.c b/drivers/net/atlx/atl1.c
index 129b8b3aa773..5586fc624688 100644
--- a/drivers/net/atl1/atl1_main.c
+++ b/drivers/net/atlx/atl1.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. 2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 Chris Snook <csnook@redhat.com> 3 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com> 4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5 * 5 *
6 * Derived from Intel e1000 driver 6 * Derived from Intel e1000 driver
@@ -36,7 +36,6 @@
36 * A very incomplete list of things that need to be dealt with: 36 * A very incomplete list of things that need to be dealt with:
37 * 37 *
38 * TODO: 38 * TODO:
39 * Fix TSO; tx performance is horrible with TSO enabled.
40 * Wake on LAN. 39 * Wake on LAN.
41 * Add more ethtool functions. 40 * Add more ethtool functions.
42 * Fix abstruse irq enable/disable condition described here: 41 * Fix abstruse irq enable/disable condition described here:
@@ -50,51 +49,46 @@
50 * SMP torture testing 49 * SMP torture testing
51 */ 50 */
52 51
53#include <linux/types.h> 52#include <asm/atomic.h>
54#include <linux/netdevice.h> 53#include <asm/byteorder.h>
55#include <linux/pci.h> 54
56#include <linux/spinlock.h> 55#include <linux/compiler.h>
57#include <linux/slab.h> 56#include <linux/crc32.h>
58#include <linux/string.h> 57#include <linux/delay.h>
59#include <linux/skbuff.h> 58#include <linux/dma-mapping.h>
60#include <linux/etherdevice.h> 59#include <linux/etherdevice.h>
61#include <linux/if_vlan.h>
62#include <linux/if_ether.h>
63#include <linux/irqreturn.h>
64#include <linux/workqueue.h>
65#include <linux/timer.h>
66#include <linux/jiffies.h>
67#include <linux/hardirq.h> 60#include <linux/hardirq.h>
61#include <linux/if_ether.h>
62#include <linux/if_vlan.h>
63#include <linux/in.h>
68#include <linux/interrupt.h> 64#include <linux/interrupt.h>
65#include <linux/ip.h>
69#include <linux/irqflags.h> 66#include <linux/irqflags.h>
70#include <linux/dma-mapping.h> 67#include <linux/irqreturn.h>
68#include <linux/jiffies.h>
69#include <linux/mii.h>
70#include <linux/module.h>
71#include <linux/moduleparam.h>
71#include <linux/net.h> 72#include <linux/net.h>
73#include <linux/netdevice.h>
74#include <linux/pci.h>
75#include <linux/pci_ids.h>
72#include <linux/pm.h> 76#include <linux/pm.h>
73#include <linux/in.h> 77#include <linux/skbuff.h>
74#include <linux/ip.h> 78#include <linux/slab.h>
79#include <linux/spinlock.h>
80#include <linux/string.h>
75#include <linux/tcp.h> 81#include <linux/tcp.h>
76#include <linux/compiler.h> 82#include <linux/timer.h>
77#include <linux/delay.h> 83#include <linux/types.h>
78#include <linux/mii.h> 84#include <linux/workqueue.h>
79#include <net/checksum.h>
80 85
81#include <asm/atomic.h> 86#include <net/checksum.h>
82#include <asm/byteorder.h>
83 87
84#include "atl1.h" 88#include "atl1.h"
85 89
86#define DRIVER_VERSION "2.0.7" 90/* Temporary hack for merging atl1 and atl2 */
87 91#include "atlx.c"
88char atl1_driver_name[] = "atl1";
89static const char atl1_driver_string[] = "Attansic L1 Ethernet Network Driver";
90static const char atl1_copyright[] = "Copyright(c) 2005-2006 Attansic Corporation.";
91char atl1_driver_version[] = DRIVER_VERSION;
92
93MODULE_AUTHOR
94 ("Attansic Corporation <xiong_huang@attansic.com>, Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
95MODULE_DESCRIPTION("Attansic 1000M Ethernet Network Driver");
96MODULE_LICENSE("GPL");
97MODULE_VERSION(DRIVER_VERSION);
98 92
99/* 93/*
100 * atl1_pci_tbl - PCI Device ID Table 94 * atl1_pci_tbl - PCI Device ID Table
@@ -104,9 +98,720 @@ static const struct pci_device_id atl1_pci_tbl[] = {
104 /* required last entry */ 98 /* required last entry */
105 {0,} 99 {0,}
106}; 100};
107
108MODULE_DEVICE_TABLE(pci, atl1_pci_tbl); 101MODULE_DEVICE_TABLE(pci, atl1_pci_tbl);
109 102
103static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
104 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
105
106static int debug = -1;
107module_param(debug, int, 0);
108MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)");
109
110/*
111 * Reset the transmit and receive units; mask and clear all interrupts.
112 * hw - Struct containing variables accessed by shared code
113 * return : 0 or idle status (if error)
114 */
115static s32 atl1_reset_hw(struct atl1_hw *hw)
116{
117 struct pci_dev *pdev = hw->back->pdev;
118 struct atl1_adapter *adapter = hw->back;
119 u32 icr;
120 int i;
121
122 /*
123 * Clear Interrupt mask to stop board from generating
124 * interrupts & Clear any pending interrupt events
125 */
126 /*
127 * iowrite32(0, hw->hw_addr + REG_IMR);
128 * iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
129 */
130
131 /*
132 * Issue Soft Reset to the MAC. This will reset the chip's
133 * transmit, receive, DMA. It will not effect
134 * the current PCI configuration. The global reset bit is self-
135 * clearing, and should clear within a microsecond.
136 */
137 iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
138 ioread32(hw->hw_addr + REG_MASTER_CTRL);
139
140 iowrite16(1, hw->hw_addr + REG_PHY_ENABLE);
141 ioread16(hw->hw_addr + REG_PHY_ENABLE);
142
143 /* delay about 1ms */
144 msleep(1);
145
146 /* Wait at least 10ms for All module to be Idle */
147 for (i = 0; i < 10; i++) {
148 icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
149 if (!icr)
150 break;
151 /* delay 1 ms */
152 msleep(1);
153 /* FIXME: still the right way to do this? */
154 cpu_relax();
155 }
156
157 if (icr) {
158 if (netif_msg_hw(adapter))
159 dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
160 return icr;
161 }
162
163 return 0;
164}
165
166/* function about EEPROM
167 *
168 * check_eeprom_exist
169 * return 0 if eeprom exist
170 */
171static int atl1_check_eeprom_exist(struct atl1_hw *hw)
172{
173 u32 value;
174 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
175 if (value & SPI_FLASH_CTRL_EN_VPD) {
176 value &= ~SPI_FLASH_CTRL_EN_VPD;
177 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
178 }
179
180 value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
181 return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
182}
183
184static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
185{
186 int i;
187 u32 control;
188
189 if (offset & 3)
190 /* address do not align */
191 return false;
192
193 iowrite32(0, hw->hw_addr + REG_VPD_DATA);
194 control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
195 iowrite32(control, hw->hw_addr + REG_VPD_CAP);
196 ioread32(hw->hw_addr + REG_VPD_CAP);
197
198 for (i = 0; i < 10; i++) {
199 msleep(2);
200 control = ioread32(hw->hw_addr + REG_VPD_CAP);
201 if (control & VPD_CAP_VPD_FLAG)
202 break;
203 }
204 if (control & VPD_CAP_VPD_FLAG) {
205 *p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
206 return true;
207 }
208 /* timeout */
209 return false;
210}
211
212/*
213 * Reads the value from a PHY register
214 * hw - Struct containing variables accessed by shared code
215 * reg_addr - address of the PHY register to read
216 */
217s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
218{
219 u32 val;
220 int i;
221
222 val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
223 MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
224 MDIO_CLK_SEL_SHIFT;
225 iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
226 ioread32(hw->hw_addr + REG_MDIO_CTRL);
227
228 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
229 udelay(2);
230 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
231 if (!(val & (MDIO_START | MDIO_BUSY)))
232 break;
233 }
234 if (!(val & (MDIO_START | MDIO_BUSY))) {
235 *phy_data = (u16) val;
236 return 0;
237 }
238 return ATLX_ERR_PHY;
239}
240
241#define CUSTOM_SPI_CS_SETUP 2
242#define CUSTOM_SPI_CLK_HI 2
243#define CUSTOM_SPI_CLK_LO 2
244#define CUSTOM_SPI_CS_HOLD 2
245#define CUSTOM_SPI_CS_HI 3
246
247static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
248{
249 int i;
250 u32 value;
251
252 iowrite32(0, hw->hw_addr + REG_SPI_DATA);
253 iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
254
255 value = SPI_FLASH_CTRL_WAIT_READY |
256 (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
257 SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
258 SPI_FLASH_CTRL_CLK_HI_MASK) <<
259 SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
260 SPI_FLASH_CTRL_CLK_LO_MASK) <<
261 SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
262 SPI_FLASH_CTRL_CS_HOLD_MASK) <<
263 SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
264 SPI_FLASH_CTRL_CS_HI_MASK) <<
265 SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
266 SPI_FLASH_CTRL_INS_SHIFT;
267
268 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
269
270 value |= SPI_FLASH_CTRL_START;
271 iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
272 ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
273
274 for (i = 0; i < 10; i++) {
275 msleep(1);
276 value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
277 if (!(value & SPI_FLASH_CTRL_START))
278 break;
279 }
280
281 if (value & SPI_FLASH_CTRL_START)
282 return false;
283
284 *buf = ioread32(hw->hw_addr + REG_SPI_DATA);
285
286 return true;
287}
288
289/*
290 * get_permanent_address
291 * return 0 if get valid mac address,
292 */
293static int atl1_get_permanent_address(struct atl1_hw *hw)
294{
295 u32 addr[2];
296 u32 i, control;
297 u16 reg;
298 u8 eth_addr[ETH_ALEN];
299 bool key_valid;
300
301 if (is_valid_ether_addr(hw->perm_mac_addr))
302 return 0;
303
304 /* init */
305 addr[0] = addr[1] = 0;
306
307 if (!atl1_check_eeprom_exist(hw)) {
308 reg = 0;
309 key_valid = false;
310 /* Read out all EEPROM content */
311 i = 0;
312 while (1) {
313 if (atl1_read_eeprom(hw, i + 0x100, &control)) {
314 if (key_valid) {
315 if (reg == REG_MAC_STA_ADDR)
316 addr[0] = control;
317 else if (reg == (REG_MAC_STA_ADDR + 4))
318 addr[1] = control;
319 key_valid = false;
320 } else if ((control & 0xff) == 0x5A) {
321 key_valid = true;
322 reg = (u16) (control >> 16);
323 } else
324 break;
325 } else
326 /* read error */
327 break;
328 i += 4;
329 }
330
331 *(u32 *) &eth_addr[2] = swab32(addr[0]);
332 *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
333 if (is_valid_ether_addr(eth_addr)) {
334 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
335 return 0;
336 }
337 return 1;
338 }
339
340 /* see if SPI FLAGS exist ? */
341 addr[0] = addr[1] = 0;
342 reg = 0;
343 key_valid = false;
344 i = 0;
345 while (1) {
346 if (atl1_spi_read(hw, i + 0x1f000, &control)) {
347 if (key_valid) {
348 if (reg == REG_MAC_STA_ADDR)
349 addr[0] = control;
350 else if (reg == (REG_MAC_STA_ADDR + 4))
351 addr[1] = control;
352 key_valid = false;
353 } else if ((control & 0xff) == 0x5A) {
354 key_valid = true;
355 reg = (u16) (control >> 16);
356 } else
357 /* data end */
358 break;
359 } else
360 /* read error */
361 break;
362 i += 4;
363 }
364
365 *(u32 *) &eth_addr[2] = swab32(addr[0]);
366 *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
367 if (is_valid_ether_addr(eth_addr)) {
368 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
369 return 0;
370 }
371
372 /*
373 * On some motherboards, the MAC address is written by the
374 * BIOS directly to the MAC register during POST, and is
375 * not stored in eeprom. If all else thus far has failed
376 * to fetch the permanent MAC address, try reading it directly.
377 */
378 addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
379 addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
380 *(u32 *) &eth_addr[2] = swab32(addr[0]);
381 *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
382 if (is_valid_ether_addr(eth_addr)) {
383 memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
384 return 0;
385 }
386
387 return 1;
388}
389
390/*
391 * Reads the adapter's MAC address from the EEPROM
392 * hw - Struct containing variables accessed by shared code
393 */
394s32 atl1_read_mac_addr(struct atl1_hw *hw)
395{
396 u16 i;
397
398 if (atl1_get_permanent_address(hw))
399 random_ether_addr(hw->perm_mac_addr);
400
401 for (i = 0; i < ETH_ALEN; i++)
402 hw->mac_addr[i] = hw->perm_mac_addr[i];
403 return 0;
404}
405
406/*
407 * Hashes an address to determine its location in the multicast table
408 * hw - Struct containing variables accessed by shared code
409 * mc_addr - the multicast address to hash
410 *
411 * atl1_hash_mc_addr
412 * purpose
413 * set hash value for a multicast address
414 * hash calcu processing :
415 * 1. calcu 32bit CRC for multicast address
416 * 2. reverse crc with MSB to LSB
417 */
418u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
419{
420 u32 crc32, value = 0;
421 int i;
422
423 crc32 = ether_crc_le(6, mc_addr);
424 for (i = 0; i < 32; i++)
425 value |= (((crc32 >> i) & 1) << (31 - i));
426
427 return value;
428}
429
430/*
431 * Sets the bit in the multicast table corresponding to the hash value.
432 * hw - Struct containing variables accessed by shared code
433 * hash_value - Multicast address hash value
434 */
435void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
436{
437 u32 hash_bit, hash_reg;
438 u32 mta;
439
440 /*
441 * The HASH Table is a register array of 2 32-bit registers.
442 * It is treated like an array of 64 bits. We want to set
443 * bit BitArray[hash_value]. So we figure out what register
444 * the bit is in, read it, OR in the new bit, then write
445 * back the new value. The register is determined by the
446 * upper 7 bits of the hash value and the bit within that
447 * register are determined by the lower 5 bits of the value.
448 */
449 hash_reg = (hash_value >> 31) & 0x1;
450 hash_bit = (hash_value >> 26) & 0x1F;
451 mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
452 mta |= (1 << hash_bit);
453 iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
454}
455
456/*
457 * Writes a value to a PHY register
458 * hw - Struct containing variables accessed by shared code
459 * reg_addr - address of the PHY register to write
460 * data - data to write to the PHY
461 */
462static s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
463{
464 int i;
465 u32 val;
466
467 val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
468 (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
469 MDIO_SUP_PREAMBLE |
470 MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
471 iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
472 ioread32(hw->hw_addr + REG_MDIO_CTRL);
473
474 for (i = 0; i < MDIO_WAIT_TIMES; i++) {
475 udelay(2);
476 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
477 if (!(val & (MDIO_START | MDIO_BUSY)))
478 break;
479 }
480
481 if (!(val & (MDIO_START | MDIO_BUSY)))
482 return 0;
483
484 return ATLX_ERR_PHY;
485}
486
487/*
488 * Make L001's PHY out of Power Saving State (bug)
489 * hw - Struct containing variables accessed by shared code
490 * when power on, L001's PHY always on Power saving State
491 * (Gigabit Link forbidden)
492 */
493static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
494{
495 s32 ret;
496 ret = atl1_write_phy_reg(hw, 29, 0x0029);
497 if (ret)
498 return ret;
499 return atl1_write_phy_reg(hw, 30, 0);
500}
501
502/*
503 *TODO: do something or get rid of this
504 */
505#ifdef CONFIG_PM
506static s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
507{
508/* s32 ret_val;
509 * u16 phy_data;
510 */
511
512/*
513 ret_val = atl1_write_phy_reg(hw, ...);
514 ret_val = atl1_write_phy_reg(hw, ...);
515 ....
516*/
517 return 0;
518}
519#endif
520
521/*
522 * Resets the PHY and make all config validate
523 * hw - Struct containing variables accessed by shared code
524 *
525 * Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
526 */
527static s32 atl1_phy_reset(struct atl1_hw *hw)
528{
529 struct pci_dev *pdev = hw->back->pdev;
530 struct atl1_adapter *adapter = hw->back;
531 s32 ret_val;
532 u16 phy_data;
533
534 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
535 hw->media_type == MEDIA_TYPE_1000M_FULL)
536 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
537 else {
538 switch (hw->media_type) {
539 case MEDIA_TYPE_100M_FULL:
540 phy_data =
541 MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
542 MII_CR_RESET;
543 break;
544 case MEDIA_TYPE_100M_HALF:
545 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
546 break;
547 case MEDIA_TYPE_10M_FULL:
548 phy_data =
549 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
550 break;
551 default:
552 /* MEDIA_TYPE_10M_HALF: */
553 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
554 break;
555 }
556 }
557
558 ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
559 if (ret_val) {
560 u32 val;
561 int i;
562 /* pcie serdes link may be down! */
563 if (netif_msg_hw(adapter))
564 dev_dbg(&pdev->dev, "pcie phy link down\n");
565
566 for (i = 0; i < 25; i++) {
567 msleep(1);
568 val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
569 if (!(val & (MDIO_START | MDIO_BUSY)))
570 break;
571 }
572
573 if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
574 if (netif_msg_hw(adapter))
575 dev_warn(&pdev->dev,
576 "pcie link down at least 25ms\n");
577 return ret_val;
578 }
579 }
580 return 0;
581}
582
583/*
584 * Configures PHY autoneg and flow control advertisement settings
585 * hw - Struct containing variables accessed by shared code
586 */
587static s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
588{
589 s32 ret_val;
590 s16 mii_autoneg_adv_reg;
591 s16 mii_1000t_ctrl_reg;
592
593 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
594 mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
595
596 /* Read the MII 1000Base-T Control Register (Address 9). */
597 mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK;
598
599 /*
600 * First we clear all the 10/100 mb speed bits in the Auto-Neg
601 * Advertisement Register (Address 4) and the 1000 mb speed bits in
602 * the 1000Base-T Control Register (Address 9).
603 */
604 mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
605 mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK;
606
607 /*
608 * Need to parse media_type and set up
609 * the appropriate PHY registers.
610 */
611 switch (hw->media_type) {
612 case MEDIA_TYPE_AUTO_SENSOR:
613 mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
614 MII_AR_10T_FD_CAPS |
615 MII_AR_100TX_HD_CAPS |
616 MII_AR_100TX_FD_CAPS);
617 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
618 break;
619
620 case MEDIA_TYPE_1000M_FULL:
621 mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS;
622 break;
623
624 case MEDIA_TYPE_100M_FULL:
625 mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
626 break;
627
628 case MEDIA_TYPE_100M_HALF:
629 mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
630 break;
631
632 case MEDIA_TYPE_10M_FULL:
633 mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
634 break;
635
636 default:
637 mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
638 break;
639 }
640
641 /* flow control fixed to enable all */
642 mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
643
644 hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
645 hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
646
647 ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
648 if (ret_val)
649 return ret_val;
650
651 ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg);
652 if (ret_val)
653 return ret_val;
654
655 return 0;
656}
657
658/*
659 * Configures link settings.
660 * hw - Struct containing variables accessed by shared code
661 * Assumes the hardware has previously been reset and the
662 * transmitter and receiver are not enabled.
663 */
664static s32 atl1_setup_link(struct atl1_hw *hw)
665{
666 struct pci_dev *pdev = hw->back->pdev;
667 struct atl1_adapter *adapter = hw->back;
668 s32 ret_val;
669
670 /*
671 * Options:
672 * PHY will advertise value(s) parsed from
673 * autoneg_advertised and fc
674 * no matter what autoneg is , We will not wait link result.
675 */
676 ret_val = atl1_phy_setup_autoneg_adv(hw);
677 if (ret_val) {
678 if (netif_msg_link(adapter))
679 dev_dbg(&pdev->dev,
680 "error setting up autonegotiation\n");
681 return ret_val;
682 }
683 /* SW.Reset , En-Auto-Neg if needed */
684 ret_val = atl1_phy_reset(hw);
685 if (ret_val) {
686 if (netif_msg_link(adapter))
687 dev_dbg(&pdev->dev, "error resetting phy\n");
688 return ret_val;
689 }
690 hw->phy_configured = true;
691 return ret_val;
692}
693
694static void atl1_init_flash_opcode(struct atl1_hw *hw)
695{
696 if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
697 /* Atmel */
698 hw->flash_vendor = 0;
699
700 /* Init OP table */
701 iowrite8(flash_table[hw->flash_vendor].cmd_program,
702 hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
703 iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
704 hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
705 iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
706 hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
707 iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
708 hw->hw_addr + REG_SPI_FLASH_OP_RDID);
709 iowrite8(flash_table[hw->flash_vendor].cmd_wren,
710 hw->hw_addr + REG_SPI_FLASH_OP_WREN);
711 iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
712 hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
713 iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
714 hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
715 iowrite8(flash_table[hw->flash_vendor].cmd_read,
716 hw->hw_addr + REG_SPI_FLASH_OP_READ);
717}
718
719/*
720 * Performs basic configuration of the adapter.
721 * hw - Struct containing variables accessed by shared code
722 * Assumes that the controller has previously been reset and is in a
723 * post-reset uninitialized state. Initializes multicast table,
724 * and Calls routines to setup link
725 * Leaves the transmit and receive units disabled and uninitialized.
726 */
727static s32 atl1_init_hw(struct atl1_hw *hw)
728{
729 u32 ret_val = 0;
730
731 /* Zero out the Multicast HASH table */
732 iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
733 /* clear the old settings from the multicast hash table */
734 iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
735
736 atl1_init_flash_opcode(hw);
737
738 if (!hw->phy_configured) {
739 /* enable GPHY LinkChange Interrrupt */
740 ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
741 if (ret_val)
742 return ret_val;
743 /* make PHY out of power-saving state */
744 ret_val = atl1_phy_leave_power_saving(hw);
745 if (ret_val)
746 return ret_val;
747 /* Call a subroutine to configure the link */
748 ret_val = atl1_setup_link(hw);
749 }
750 return ret_val;
751}
752
753/*
754 * Detects the current speed and duplex settings of the hardware.
755 * hw - Struct containing variables accessed by shared code
756 * speed - Speed of the connection
757 * duplex - Duplex setting of the connection
758 */
759static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
760{
761 struct pci_dev *pdev = hw->back->pdev;
762 struct atl1_adapter *adapter = hw->back;
763 s32 ret_val;
764 u16 phy_data;
765
766 /* ; --- Read PHY Specific Status Register (17) */
767 ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data);
768 if (ret_val)
769 return ret_val;
770
771 if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED))
772 return ATLX_ERR_PHY_RES;
773
774 switch (phy_data & MII_ATLX_PSSR_SPEED) {
775 case MII_ATLX_PSSR_1000MBS:
776 *speed = SPEED_1000;
777 break;
778 case MII_ATLX_PSSR_100MBS:
779 *speed = SPEED_100;
780 break;
781 case MII_ATLX_PSSR_10MBS:
782 *speed = SPEED_10;
783 break;
784 default:
785 if (netif_msg_hw(adapter))
786 dev_dbg(&pdev->dev, "error getting speed\n");
787 return ATLX_ERR_PHY_SPEED;
788 break;
789 }
790 if (phy_data & MII_ATLX_PSSR_DPLX)
791 *duplex = FULL_DUPLEX;
792 else
793 *duplex = HALF_DUPLEX;
794
795 return 0;
796}
797
798void atl1_set_mac_addr(struct atl1_hw *hw)
799{
800 u32 value;
801 /*
802 * 00-0B-6A-F6-00-DC
803 * 0: 6AF600DC 1: 000B
804 * low dword
805 */
806 value = (((u32) hw->mac_addr[2]) << 24) |
807 (((u32) hw->mac_addr[3]) << 16) |
808 (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
809 iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
810 /* high dword */
811 value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
812 iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
813}
814
110/* 815/*
111 * atl1_sw_init - Initialize general software structures (struct atl1_adapter) 816 * atl1_sw_init - Initialize general software structures (struct atl1_adapter)
112 * @adapter: board private structure to initialize 817 * @adapter: board private structure to initialize
@@ -125,7 +830,7 @@ static int __devinit atl1_sw_init(struct atl1_adapter *adapter)
125 830
126 adapter->wol = 0; 831 adapter->wol = 0;
127 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; 832 adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7;
128 adapter->ict = 50000; /* 100ms */ 833 adapter->ict = 50000; /* 100ms */
129 adapter->link_speed = SPEED_0; /* hardware init */ 834 adapter->link_speed = SPEED_0; /* hardware init */
130 adapter->link_duplex = FULL_DUPLEX; 835 adapter->link_duplex = FULL_DUPLEX;
131 836
@@ -206,30 +911,12 @@ static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
206} 911}
207 912
208/* 913/*
209 * atl1_ioctl -
210 * @netdev:
211 * @ifreq:
212 * @cmd:
213 */
214static int atl1_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
215{
216 switch (cmd) {
217 case SIOCGMIIPHY:
218 case SIOCGMIIREG:
219 case SIOCSMIIREG:
220 return atl1_mii_ioctl(netdev, ifr, cmd);
221 default:
222 return -EOPNOTSUPP;
223 }
224}
225
226/*
227 * atl1_setup_mem_resources - allocate Tx / RX descriptor resources 914 * atl1_setup_mem_resources - allocate Tx / RX descriptor resources
228 * @adapter: board private structure 915 * @adapter: board private structure
229 * 916 *
230 * Return 0 on success, negative on failure 917 * Return 0 on success, negative on failure
231 */ 918 */
232s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) 919static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
233{ 920{
234 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 921 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
235 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; 922 struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring;
@@ -242,13 +929,16 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
242 size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count); 929 size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count);
243 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); 930 tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL);
244 if (unlikely(!tpd_ring->buffer_info)) { 931 if (unlikely(!tpd_ring->buffer_info)) {
245 dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", size); 932 if (netif_msg_drv(adapter))
933 dev_err(&pdev->dev, "kzalloc failed , size = D%d\n",
934 size);
246 goto err_nomem; 935 goto err_nomem;
247 } 936 }
248 rfd_ring->buffer_info = 937 rfd_ring->buffer_info =
249 (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); 938 (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count);
250 939
251 /* real ring DMA buffer 940 /*
941 * real ring DMA buffer
252 * each ring/block may need up to 8 bytes for alignment, hence the 942 * each ring/block may need up to 8 bytes for alignment, hence the
253 * additional 40 bytes tacked onto the end. 943 * additional 40 bytes tacked onto the end.
254 */ 944 */
@@ -263,7 +953,8 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
263 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, 953 ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
264 &ring_header->dma); 954 &ring_header->dma);
265 if (unlikely(!ring_header->desc)) { 955 if (unlikely(!ring_header->desc)) {
266 dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); 956 if (netif_msg_drv(adapter))
957 dev_err(&pdev->dev, "pci_alloc_consistent failed\n");
267 goto err_nomem; 958 goto err_nomem;
268 } 959 }
269 960
@@ -307,7 +998,7 @@ s32 atl1_setup_ring_resources(struct atl1_adapter *adapter)
307 ((u8 *) adapter->cmb.cmb + 998 ((u8 *) adapter->cmb.cmb +
308 (sizeof(struct coals_msg_block) + offset)); 999 (sizeof(struct coals_msg_block) + offset));
309 1000
310 return ATL1_SUCCESS; 1001 return 0;
311 1002
312err_nomem: 1003err_nomem:
313 kfree(tpd_ring->buffer_info); 1004 kfree(tpd_ring->buffer_info);
@@ -416,7 +1107,7 @@ static void atl1_clean_tx_ring(struct atl1_adapter *adapter)
416 * 1107 *
417 * Free all transmit software resources 1108 * Free all transmit software resources
418 */ 1109 */
419void atl1_free_ring_resources(struct atl1_adapter *adapter) 1110static void atl1_free_ring_resources(struct atl1_adapter *adapter)
420{ 1111{
421 struct pci_dev *pdev = adapter->pdev; 1112 struct pci_dev *pdev = adapter->pdev;
422 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 1113 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
@@ -481,31 +1172,6 @@ static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter)
481 iowrite32(value, hw->hw_addr + REG_MAC_CTRL); 1172 iowrite32(value, hw->hw_addr + REG_MAC_CTRL);
482} 1173}
483 1174
484/*
485 * atl1_set_mac - Change the Ethernet Address of the NIC
486 * @netdev: network interface device structure
487 * @p: pointer to an address structure
488 *
489 * Returns 0 on success, negative on failure
490 */
491static int atl1_set_mac(struct net_device *netdev, void *p)
492{
493 struct atl1_adapter *adapter = netdev_priv(netdev);
494 struct sockaddr *addr = p;
495
496 if (netif_running(netdev))
497 return -EBUSY;
498
499 if (!is_valid_ether_addr(addr->sa_data))
500 return -EADDRNOTAVAIL;
501
502 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
503 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
504
505 atl1_set_mac_addr(&adapter->hw);
506 return 0;
507}
508
509static u32 atl1_check_link(struct atl1_adapter *adapter) 1175static u32 atl1_check_link(struct atl1_adapter *adapter)
510{ 1176{
511 struct atl1_hw *hw = &adapter->hw; 1177 struct atl1_hw *hw = &adapter->hw;
@@ -517,14 +1183,17 @@ static u32 atl1_check_link(struct atl1_adapter *adapter)
517 /* MII_BMSR must read twice */ 1183 /* MII_BMSR must read twice */
518 atl1_read_phy_reg(hw, MII_BMSR, &phy_data); 1184 atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
519 atl1_read_phy_reg(hw, MII_BMSR, &phy_data); 1185 atl1_read_phy_reg(hw, MII_BMSR, &phy_data);
520 if (!(phy_data & BMSR_LSTATUS)) { /* link down */ 1186 if (!(phy_data & BMSR_LSTATUS)) {
521 if (netif_carrier_ok(netdev)) { /* old link state: Up */ 1187 /* link down */
522 dev_info(&adapter->pdev->dev, "link is down\n"); 1188 if (netif_carrier_ok(netdev)) {
1189 /* old link state: Up */
1190 if (netif_msg_link(adapter))
1191 dev_info(&adapter->pdev->dev, "link is down\n");
523 adapter->link_speed = SPEED_0; 1192 adapter->link_speed = SPEED_0;
524 netif_carrier_off(netdev); 1193 netif_carrier_off(netdev);
525 netif_stop_queue(netdev); 1194 netif_stop_queue(netdev);
526 } 1195 }
527 return ATL1_SUCCESS; 1196 return 0;
528 } 1197 }
529 1198
530 /* Link Up */ 1199 /* Link Up */
@@ -562,20 +1231,22 @@ static u32 atl1_check_link(struct atl1_adapter *adapter)
562 adapter->link_speed = speed; 1231 adapter->link_speed = speed;
563 adapter->link_duplex = duplex; 1232 adapter->link_duplex = duplex;
564 atl1_setup_mac_ctrl(adapter); 1233 atl1_setup_mac_ctrl(adapter);
565 dev_info(&adapter->pdev->dev, 1234 if (netif_msg_link(adapter))
566 "%s link is up %d Mbps %s\n", 1235 dev_info(&adapter->pdev->dev,
567 netdev->name, adapter->link_speed, 1236 "%s link is up %d Mbps %s\n",
568 adapter->link_duplex == FULL_DUPLEX ? 1237 netdev->name, adapter->link_speed,
569 "full duplex" : "half duplex"); 1238 adapter->link_duplex == FULL_DUPLEX ?
1239 "full duplex" : "half duplex");
570 } 1240 }
571 if (!netif_carrier_ok(netdev)) { /* Link down -> Up */ 1241 if (!netif_carrier_ok(netdev)) {
1242 /* Link down -> Up */
572 netif_carrier_on(netdev); 1243 netif_carrier_on(netdev);
573 netif_wake_queue(netdev); 1244 netif_wake_queue(netdev);
574 } 1245 }
575 return ATL1_SUCCESS; 1246 return 0;
576 } 1247 }
577 1248
578 /* change orignal link status */ 1249 /* change original link status */
579 if (netif_carrier_ok(netdev)) { 1250 if (netif_carrier_ok(netdev)) {
580 adapter->link_speed = SPEED_0; 1251 adapter->link_speed = SPEED_0;
581 netif_carrier_off(netdev); 1252 netif_carrier_off(netdev);
@@ -596,12 +1267,13 @@ static u32 atl1_check_link(struct atl1_adapter *adapter)
596 phy_data = 1267 phy_data =
597 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; 1268 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
598 break; 1269 break;
599 default: /* MEDIA_TYPE_10M_HALF: */ 1270 default:
1271 /* MEDIA_TYPE_10M_HALF: */
600 phy_data = MII_CR_SPEED_10 | MII_CR_RESET; 1272 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
601 break; 1273 break;
602 } 1274 }
603 atl1_write_phy_reg(hw, MII_BMCR, phy_data); 1275 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
604 return ATL1_SUCCESS; 1276 return 0;
605 } 1277 }
606 1278
607 /* auto-neg, insert timer to re-config phy */ 1279 /* auto-neg, insert timer to re-config phy */
@@ -610,103 +1282,6 @@ static u32 atl1_check_link(struct atl1_adapter *adapter)
610 mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ); 1282 mod_timer(&adapter->phy_config_timer, jiffies + 3 * HZ);
611 } 1283 }
612 1284
613 return ATL1_SUCCESS;
614}
615
616static void atl1_check_for_link(struct atl1_adapter *adapter)
617{
618 struct net_device *netdev = adapter->netdev;
619 u16 phy_data = 0;
620
621 spin_lock(&adapter->lock);
622 adapter->phy_timer_pending = false;
623 atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
624 atl1_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
625 spin_unlock(&adapter->lock);
626
627 /* notify upper layer link down ASAP */
628 if (!(phy_data & BMSR_LSTATUS)) { /* Link Down */
629 if (netif_carrier_ok(netdev)) { /* old link state: Up */
630 dev_info(&adapter->pdev->dev, "%s link is down\n",
631 netdev->name);
632 adapter->link_speed = SPEED_0;
633 netif_carrier_off(netdev);
634 netif_stop_queue(netdev);
635 }
636 }
637 schedule_work(&adapter->link_chg_task);
638}
639
640/*
641 * atl1_set_multi - Multicast and Promiscuous mode set
642 * @netdev: network interface device structure
643 *
644 * The set_multi entry point is called whenever the multicast address
645 * list or the network interface flags are updated. This routine is
646 * responsible for configuring the hardware for proper multicast,
647 * promiscuous mode, and all-multi behavior.
648 */
649static void atl1_set_multi(struct net_device *netdev)
650{
651 struct atl1_adapter *adapter = netdev_priv(netdev);
652 struct atl1_hw *hw = &adapter->hw;
653 struct dev_mc_list *mc_ptr;
654 u32 rctl;
655 u32 hash_value;
656
657 /* Check for Promiscuous and All Multicast modes */
658 rctl = ioread32(hw->hw_addr + REG_MAC_CTRL);
659 if (netdev->flags & IFF_PROMISC)
660 rctl |= MAC_CTRL_PROMIS_EN;
661 else if (netdev->flags & IFF_ALLMULTI) {
662 rctl |= MAC_CTRL_MC_ALL_EN;
663 rctl &= ~MAC_CTRL_PROMIS_EN;
664 } else
665 rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
666
667 iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL);
668
669 /* clear the old settings from the multicast hash table */
670 iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
671 iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
672
673 /* compute mc addresses' hash value ,and put it into hash table */
674 for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
675 hash_value = atl1_hash_mc_addr(hw, mc_ptr->dmi_addr);
676 atl1_hash_set(hw, hash_value);
677 }
678}
679
680/*
681 * atl1_change_mtu - Change the Maximum Transfer Unit
682 * @netdev: network interface device structure
683 * @new_mtu: new value for maximum frame size
684 *
685 * Returns 0 on success, negative on failure
686 */
687static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
688{
689 struct atl1_adapter *adapter = netdev_priv(netdev);
690 int old_mtu = netdev->mtu;
691 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
692
693 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
694 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
695 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
696 return -EINVAL;
697 }
698
699 adapter->hw.max_frame_size = max_frame;
700 adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
701 adapter->rx_buffer_len = (max_frame + 7) & ~7;
702 adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
703
704 netdev->mtu = new_mtu;
705 if ((old_mtu != new_mtu) && netif_running(netdev)) {
706 atl1_down(adapter);
707 atl1_up(adapter);
708 }
709
710 return 0; 1285 return 0;
711} 1286}
712 1287
@@ -974,37 +1549,6 @@ static void atl1_via_workaround(struct atl1_adapter *adapter)
974 iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); 1549 iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND);
975} 1550}
976 1551
977/*
978 * atl1_irq_enable - Enable default interrupt generation settings
979 * @adapter: board private structure
980 */
981static void atl1_irq_enable(struct atl1_adapter *adapter)
982{
983 iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR);
984 ioread32(adapter->hw.hw_addr + REG_IMR);
985}
986
987/*
988 * atl1_irq_disable - Mask off interrupt generation on the NIC
989 * @adapter: board private structure
990 */
991static void atl1_irq_disable(struct atl1_adapter *adapter)
992{
993 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
994 ioread32(adapter->hw.hw_addr + REG_IMR);
995 synchronize_irq(adapter->pdev->irq);
996}
997
998static void atl1_clear_phy_int(struct atl1_adapter *adapter)
999{
1000 u16 phy_data;
1001 unsigned long flags;
1002
1003 spin_lock_irqsave(&adapter->lock, flags);
1004 atl1_read_phy_reg(&adapter->hw, 19, &phy_data);
1005 spin_unlock_irqrestore(&adapter->lock, flags);
1006}
1007
1008static void atl1_inc_smb(struct atl1_adapter *adapter) 1552static void atl1_inc_smb(struct atl1_adapter *adapter)
1009{ 1553{
1010 struct stats_msg_block *smb = adapter->smb.smb; 1554 struct stats_msg_block *smb = adapter->smb.smb;
@@ -1076,19 +1620,6 @@ static void atl1_inc_smb(struct atl1_adapter *adapter)
1076 adapter->soft_stats.tx_carrier_errors; 1620 adapter->soft_stats.tx_carrier_errors;
1077} 1621}
1078 1622
1079/*
1080 * atl1_get_stats - Get System Network Statistics
1081 * @netdev: network interface device structure
1082 *
1083 * Returns the address of the device statistics structure.
1084 * The statistics are actually updated from the timer callback.
1085 */
1086static struct net_device_stats *atl1_get_stats(struct net_device *netdev)
1087{
1088 struct atl1_adapter *adapter = netdev_priv(netdev);
1089 return &adapter->net_stats;
1090}
1091
1092static void atl1_update_mailbox(struct atl1_adapter *adapter) 1623static void atl1_update_mailbox(struct atl1_adapter *adapter)
1093{ 1624{
1094 unsigned long flags; 1625 unsigned long flags;
@@ -1150,8 +1681,9 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
1150 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | 1681 if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC |
1151 ERR_FLAG_CODE | ERR_FLAG_OV)) { 1682 ERR_FLAG_CODE | ERR_FLAG_OV)) {
1152 adapter->hw_csum_err++; 1683 adapter->hw_csum_err++;
1153 dev_printk(KERN_DEBUG, &pdev->dev, 1684 if (netif_msg_rx_err(adapter))
1154 "rx checksum error\n"); 1685 dev_printk(KERN_DEBUG, &pdev->dev,
1686 "rx checksum error\n");
1155 return; 1687 return;
1156 } 1688 }
1157 } 1689 }
@@ -1170,9 +1702,10 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
1170 } 1702 }
1171 1703
1172 /* IPv4, but hardware thinks its checksum is wrong */ 1704 /* IPv4, but hardware thinks its checksum is wrong */
1173 dev_printk(KERN_DEBUG, &pdev->dev, 1705 if (netif_msg_rx_err(adapter))
1174 "hw csum wrong, pkt_flag:%x, err_flag:%x\n", 1706 dev_printk(KERN_DEBUG, &pdev->dev,
1175 rrd->pkt_flg, rrd->err_flg); 1707 "hw csum wrong, pkt_flag:%x, err_flag:%x\n",
1708 rrd->pkt_flg, rrd->err_flg);
1176 skb->ip_summed = CHECKSUM_COMPLETE; 1709 skb->ip_summed = CHECKSUM_COMPLETE;
1177 skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum); 1710 skb->csum = htons(rrd->xsz.xsum_sz.rx_chksum);
1178 adapter->hw_csum_err++; 1711 adapter->hw_csum_err++;
@@ -1210,7 +1743,8 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
1210 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); 1743 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
1211 1744
1212 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN); 1745 skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
1213 if (unlikely(!skb)) { /* Better luck next round */ 1746 if (unlikely(!skb)) {
1747 /* Better luck next round */
1214 adapter->net_stats.rx_dropped++; 1748 adapter->net_stats.rx_dropped++;
1215 break; 1749 break;
1216 } 1750 }
@@ -1281,18 +1815,39 @@ chk_rrd:
1281 /* check rrd status */ 1815 /* check rrd status */
1282 if (likely(rrd->num_buf == 1)) 1816 if (likely(rrd->num_buf == 1))
1283 goto rrd_ok; 1817 goto rrd_ok;
1818 else if (netif_msg_rx_err(adapter)) {
1819 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1820 "unexpected RRD buffer count\n");
1821 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1822 "rx_buf_len = %d\n",
1823 adapter->rx_buffer_len);
1824 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1825 "RRD num_buf = %d\n",
1826 rrd->num_buf);
1827 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1828 "RRD pkt_len = %d\n",
1829 rrd->xsz.xsum_sz.pkt_size);
1830 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1831 "RRD pkt_flg = 0x%08X\n",
1832 rrd->pkt_flg);
1833 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1834 "RRD err_flg = 0x%08X\n",
1835 rrd->err_flg);
1836 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1837 "RRD vlan_tag = 0x%08X\n",
1838 rrd->vlan_tag);
1839 }
1284 1840
1285 /* rrd seems to be bad */ 1841 /* rrd seems to be bad */
1286 if (unlikely(i-- > 0)) { 1842 if (unlikely(i-- > 0)) {
1287 /* rrd may not be DMAed completely */ 1843 /* rrd may not be DMAed completely */
1288 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1289 "incomplete RRD DMA transfer\n");
1290 udelay(1); 1844 udelay(1);
1291 goto chk_rrd; 1845 goto chk_rrd;
1292 } 1846 }
1293 /* bad rrd */ 1847 /* bad rrd */
1294 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 1848 if (netif_msg_rx_err(adapter))
1295 "bad RRD\n"); 1849 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1850 "bad RRD\n");
1296 /* see if update RFD index */ 1851 /* see if update RFD index */
1297 if (rrd->num_buf > 1) 1852 if (rrd->num_buf > 1)
1298 atl1_update_rfd_index(adapter, rrd); 1853 atl1_update_rfd_index(adapter, rrd);
@@ -1411,8 +1966,6 @@ static void atl1_intr_tx(struct atl1_adapter *adapter)
1411 dev_kfree_skb_irq(buffer_info->skb); 1966 dev_kfree_skb_irq(buffer_info->skb);
1412 buffer_info->skb = NULL; 1967 buffer_info->skb = NULL;
1413 } 1968 }
1414 tpd->buffer_addr = 0;
1415 tpd->desc.data = 0;
1416 1969
1417 if (++sw_tpd_next_to_clean == tpd_ring->count) 1970 if (++sw_tpd_next_to_clean == tpd_ring->count)
1418 sw_tpd_next_to_clean = 0; 1971 sw_tpd_next_to_clean = 0;
@@ -1434,167 +1987,192 @@ static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring)
1434} 1987}
1435 1988
1436static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, 1989static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb,
1437 struct tso_param *tso) 1990 struct tx_packet_desc *ptpd)
1438{ 1991{
1439 /* We enter this function holding a spinlock. */ 1992 /* spinlock held */
1440 u8 ipofst; 1993 u8 hdr_len, ip_off;
1994 u32 real_len;
1441 int err; 1995 int err;
1442 1996
1443 if (skb_shinfo(skb)->gso_size) { 1997 if (skb_shinfo(skb)->gso_size) {
1444 if (skb_header_cloned(skb)) { 1998 if (skb_header_cloned(skb)) {
1445 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1999 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
1446 if (unlikely(err)) 2000 if (unlikely(err))
1447 return err; 2001 return -1;
1448 } 2002 }
1449 2003
1450 if (skb->protocol == ntohs(ETH_P_IP)) { 2004 if (skb->protocol == ntohs(ETH_P_IP)) {
1451 struct iphdr *iph = ip_hdr(skb); 2005 struct iphdr *iph = ip_hdr(skb);
1452 2006
1453 iph->tot_len = 0; 2007 real_len = (((unsigned char *)iph - skb->data) +
2008 ntohs(iph->tot_len));
2009 if (real_len < skb->len)
2010 pskb_trim(skb, real_len);
2011 hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
2012 if (skb->len == hdr_len) {
2013 iph->check = 0;
2014 tcp_hdr(skb)->check =
2015 ~csum_tcpudp_magic(iph->saddr,
2016 iph->daddr, tcp_hdrlen(skb),
2017 IPPROTO_TCP, 0);
2018 ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
2019 TPD_IPHL_SHIFT;
2020 ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
2021 TPD_TCPHDRLEN_MASK) <<
2022 TPD_TCPHDRLEN_SHIFT;
2023 ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT;
2024 ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT;
2025 return 1;
2026 }
2027
1454 iph->check = 0; 2028 iph->check = 0;
1455 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 2029 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
1456 iph->daddr, 0, IPPROTO_TCP, 0); 2030 iph->daddr, 0, IPPROTO_TCP, 0);
1457 ipofst = skb_network_offset(skb); 2031 ip_off = (unsigned char *)iph -
1458 if (ipofst != ETH_HLEN) /* 802.3 frame */ 2032 (unsigned char *) skb_network_header(skb);
1459 tso->tsopl |= 1 << TSO_PARAM_ETHTYPE_SHIFT; 2033 if (ip_off == 8) /* 802.3-SNAP frame */
1460 2034 ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT;
1461 tso->tsopl |= (iph->ihl & 2035 else if (ip_off != 0)
1462 CSUM_PARAM_IPHL_MASK) << CSUM_PARAM_IPHL_SHIFT; 2036 return -2;
1463 tso->tsopl |= (tcp_hdrlen(skb) & 2037
1464 TSO_PARAM_TCPHDRLEN_MASK) << 2038 ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) <<
1465 TSO_PARAM_TCPHDRLEN_SHIFT; 2039 TPD_IPHL_SHIFT;
1466 tso->tsopl |= (skb_shinfo(skb)->gso_size & 2040 ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) &
1467 TSO_PARAM_MSS_MASK) << TSO_PARAM_MSS_SHIFT; 2041 TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT;
1468 tso->tsopl |= 1 << TSO_PARAM_IPCKSUM_SHIFT; 2042 ptpd->word3 |= (skb_shinfo(skb)->gso_size &
1469 tso->tsopl |= 1 << TSO_PARAM_TCPCKSUM_SHIFT; 2043 TPD_MSS_MASK) << TPD_MSS_SHIFT;
1470 tso->tsopl |= 1 << TSO_PARAM_SEGMENT_SHIFT; 2044 ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT;
1471 return true; 2045 return 3;
1472 } 2046 }
1473 } 2047 }
1474 return false; 2048 return false;
1475} 2049}
1476 2050
1477static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, 2051static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
1478 struct csum_param *csum) 2052 struct tx_packet_desc *ptpd)
1479{ 2053{
1480 u8 css, cso; 2054 u8 css, cso;
1481 2055
1482 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 2056 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1483 cso = skb_transport_offset(skb); 2057 css = (u8) (skb->csum_start - skb_headroom(skb));
1484 css = cso + skb->csum_offset; 2058 cso = css + (u8) skb->csum_offset;
1485 if (unlikely(cso & 0x1)) { 2059 if (unlikely(css & 0x1)) {
1486 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 2060 /* L1 hardware requires an even number here */
1487 "payload offset not an even number\n"); 2061 if (netif_msg_tx_err(adapter))
2062 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
2063 "payload offset not an even number\n");
1488 return -1; 2064 return -1;
1489 } 2065 }
1490 csum->csumpl |= (cso & CSUM_PARAM_PLOADOFFSET_MASK) << 2066 ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) <<
1491 CSUM_PARAM_PLOADOFFSET_SHIFT; 2067 TPD_PLOADOFFSET_SHIFT;
1492 csum->csumpl |= (css & CSUM_PARAM_XSUMOFFSET_MASK) << 2068 ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) <<
1493 CSUM_PARAM_XSUMOFFSET_SHIFT; 2069 TPD_CCSUMOFFSET_SHIFT;
1494 csum->csumpl |= 1 << CSUM_PARAM_CUSTOMCKSUM_SHIFT; 2070 ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT;
1495 return true; 2071 return true;
1496 } 2072 }
1497 2073 return 0;
1498 return true;
1499} 2074}
1500 2075
1501static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, 2076static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
1502 bool tcp_seg) 2077 struct tx_packet_desc *ptpd)
1503{ 2078{
1504 /* We enter this function holding a spinlock. */ 2079 /* spinlock held */
1505 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 2080 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1506 struct atl1_buffer *buffer_info; 2081 struct atl1_buffer *buffer_info;
2082 u16 buf_len = skb->len;
1507 struct page *page; 2083 struct page *page;
1508 int first_buf_len = skb->len;
1509 unsigned long offset; 2084 unsigned long offset;
1510 unsigned int nr_frags; 2085 unsigned int nr_frags;
1511 unsigned int f; 2086 unsigned int f;
1512 u16 tpd_next_to_use; 2087 int retval;
1513 u16 proto_hdr_len; 2088 u16 next_to_use;
1514 u16 len12; 2089 u16 data_len;
2090 u8 hdr_len;
1515 2091
1516 first_buf_len -= skb->data_len; 2092 buf_len -= skb->data_len;
1517 nr_frags = skb_shinfo(skb)->nr_frags; 2093 nr_frags = skb_shinfo(skb)->nr_frags;
1518 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use); 2094 next_to_use = atomic_read(&tpd_ring->next_to_use);
1519 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; 2095 buffer_info = &tpd_ring->buffer_info[next_to_use];
1520 if (unlikely(buffer_info->skb)) 2096 if (unlikely(buffer_info->skb))
1521 BUG(); 2097 BUG();
1522 buffer_info->skb = NULL; /* put skb in last TPD */ 2098 /* put skb in last TPD */
1523 2099 buffer_info->skb = NULL;
1524 if (tcp_seg) { 2100
1525 /* TSO/GSO */ 2101 retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
1526 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2102 if (retval) {
1527 buffer_info->length = proto_hdr_len; 2103 /* TSO */
2104 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2105 buffer_info->length = hdr_len;
1528 page = virt_to_page(skb->data); 2106 page = virt_to_page(skb->data);
1529 offset = (unsigned long)skb->data & ~PAGE_MASK; 2107 offset = (unsigned long)skb->data & ~PAGE_MASK;
1530 buffer_info->dma = pci_map_page(adapter->pdev, page, 2108 buffer_info->dma = pci_map_page(adapter->pdev, page,
1531 offset, proto_hdr_len, 2109 offset, hdr_len,
1532 PCI_DMA_TODEVICE); 2110 PCI_DMA_TODEVICE);
1533 2111
1534 if (++tpd_next_to_use == tpd_ring->count) 2112 if (++next_to_use == tpd_ring->count)
1535 tpd_next_to_use = 0; 2113 next_to_use = 0;
1536 2114
1537 if (first_buf_len > proto_hdr_len) { 2115 if (buf_len > hdr_len) {
1538 int i, m; 2116 int i, nseg;
1539 2117
1540 len12 = first_buf_len - proto_hdr_len; 2118 data_len = buf_len - hdr_len;
1541 m = (len12 + ATL1_MAX_TX_BUF_LEN - 1) / 2119 nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) /
1542 ATL1_MAX_TX_BUF_LEN; 2120 ATL1_MAX_TX_BUF_LEN;
1543 for (i = 0; i < m; i++) { 2121 for (i = 0; i < nseg; i++) {
1544 buffer_info = 2122 buffer_info =
1545 &tpd_ring->buffer_info[tpd_next_to_use]; 2123 &tpd_ring->buffer_info[next_to_use];
1546 buffer_info->skb = NULL; 2124 buffer_info->skb = NULL;
1547 buffer_info->length = 2125 buffer_info->length =
1548 (ATL1_MAX_TX_BUF_LEN >= 2126 (ATL1_MAX_TX_BUF_LEN >=
1549 len12) ? ATL1_MAX_TX_BUF_LEN : len12; 2127 data_len) ? ATL1_MAX_TX_BUF_LEN : data_len;
1550 len12 -= buffer_info->length; 2128 data_len -= buffer_info->length;
1551 page = virt_to_page(skb->data + 2129 page = virt_to_page(skb->data +
1552 (proto_hdr_len + 2130 (hdr_len + i * ATL1_MAX_TX_BUF_LEN));
1553 i * ATL1_MAX_TX_BUF_LEN));
1554 offset = (unsigned long)(skb->data + 2131 offset = (unsigned long)(skb->data +
1555 (proto_hdr_len + 2132 (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) &
1556 i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK; 2133 ~PAGE_MASK;
1557 buffer_info->dma = pci_map_page(adapter->pdev, 2134 buffer_info->dma = pci_map_page(adapter->pdev,
1558 page, offset, buffer_info->length, 2135 page, offset, buffer_info->length,
1559 PCI_DMA_TODEVICE); 2136 PCI_DMA_TODEVICE);
1560 if (++tpd_next_to_use == tpd_ring->count) 2137 if (++next_to_use == tpd_ring->count)
1561 tpd_next_to_use = 0; 2138 next_to_use = 0;
1562 } 2139 }
1563 } 2140 }
1564 } else { 2141 } else {
1565 /* not TSO/GSO */ 2142 /* not TSO */
1566 buffer_info->length = first_buf_len; 2143 buffer_info->length = buf_len;
1567 page = virt_to_page(skb->data); 2144 page = virt_to_page(skb->data);
1568 offset = (unsigned long)skb->data & ~PAGE_MASK; 2145 offset = (unsigned long)skb->data & ~PAGE_MASK;
1569 buffer_info->dma = pci_map_page(adapter->pdev, page, 2146 buffer_info->dma = pci_map_page(adapter->pdev, page,
1570 offset, first_buf_len, PCI_DMA_TODEVICE); 2147 offset, buf_len, PCI_DMA_TODEVICE);
1571 if (++tpd_next_to_use == tpd_ring->count) 2148 if (++next_to_use == tpd_ring->count)
1572 tpd_next_to_use = 0; 2149 next_to_use = 0;
1573 } 2150 }
1574 2151
1575 for (f = 0; f < nr_frags; f++) { 2152 for (f = 0; f < nr_frags; f++) {
1576 struct skb_frag_struct *frag; 2153 struct skb_frag_struct *frag;
1577 u16 lenf, i, m; 2154 u16 i, nseg;
1578 2155
1579 frag = &skb_shinfo(skb)->frags[f]; 2156 frag = &skb_shinfo(skb)->frags[f];
1580 lenf = frag->size; 2157 buf_len = frag->size;
1581 2158
1582 m = (lenf + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; 2159 nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) /
1583 for (i = 0; i < m; i++) { 2160 ATL1_MAX_TX_BUF_LEN;
1584 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; 2161 for (i = 0; i < nseg; i++) {
2162 buffer_info = &tpd_ring->buffer_info[next_to_use];
1585 if (unlikely(buffer_info->skb)) 2163 if (unlikely(buffer_info->skb))
1586 BUG(); 2164 BUG();
1587 buffer_info->skb = NULL; 2165 buffer_info->skb = NULL;
1588 buffer_info->length = (lenf > ATL1_MAX_TX_BUF_LEN) ? 2166 buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ?
1589 ATL1_MAX_TX_BUF_LEN : lenf; 2167 ATL1_MAX_TX_BUF_LEN : buf_len;
1590 lenf -= buffer_info->length; 2168 buf_len -= buffer_info->length;
1591 buffer_info->dma = pci_map_page(adapter->pdev, 2169 buffer_info->dma = pci_map_page(adapter->pdev,
1592 frag->page, 2170 frag->page,
1593 frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), 2171 frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN),
1594 buffer_info->length, PCI_DMA_TODEVICE); 2172 buffer_info->length, PCI_DMA_TODEVICE);
1595 2173
1596 if (++tpd_next_to_use == tpd_ring->count) 2174 if (++next_to_use == tpd_ring->count)
1597 tpd_next_to_use = 0; 2175 next_to_use = 0;
1598 } 2176 }
1599 } 2177 }
1600 2178
@@ -1602,39 +2180,44 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb,
1602 buffer_info->skb = skb; 2180 buffer_info->skb = skb;
1603} 2181}
1604 2182
1605static void atl1_tx_queue(struct atl1_adapter *adapter, int count, 2183static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
1606 union tpd_descr *descr) 2184 struct tx_packet_desc *ptpd)
1607{ 2185{
1608 /* We enter this function holding a spinlock. */ 2186 /* spinlock held */
1609 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 2187 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1610 int j;
1611 u32 val;
1612 struct atl1_buffer *buffer_info; 2188 struct atl1_buffer *buffer_info;
1613 struct tx_packet_desc *tpd; 2189 struct tx_packet_desc *tpd;
1614 u16 tpd_next_to_use = atomic_read(&tpd_ring->next_to_use); 2190 u16 j;
2191 u32 val;
2192 u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use);
1615 2193
1616 for (j = 0; j < count; j++) { 2194 for (j = 0; j < count; j++) {
1617 buffer_info = &tpd_ring->buffer_info[tpd_next_to_use]; 2195 buffer_info = &tpd_ring->buffer_info[next_to_use];
1618 tpd = ATL1_TPD_DESC(&adapter->tpd_ring, tpd_next_to_use); 2196 tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use);
1619 tpd->desc.csum.csumpu = descr->csum.csumpu; 2197 if (tpd != ptpd)
1620 tpd->desc.csum.csumpl = descr->csum.csumpl; 2198 memcpy(tpd, ptpd, sizeof(struct tx_packet_desc));
1621 tpd->desc.tso.tsopu = descr->tso.tsopu;
1622 tpd->desc.tso.tsopl = descr->tso.tsopl;
1623 tpd->buffer_addr = cpu_to_le64(buffer_info->dma); 2199 tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
1624 tpd->desc.data = descr->data; 2200 tpd->word2 = (cpu_to_le16(buffer_info->length) &
1625 tpd->desc.csum.csumpu |= (cpu_to_le16(buffer_info->length) & 2201 TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT;
1626 CSUM_PARAM_BUFLEN_MASK) << CSUM_PARAM_BUFLEN_SHIFT;
1627 2202
1628 val = (descr->tso.tsopl >> TSO_PARAM_SEGMENT_SHIFT) & 2203 /*
1629 TSO_PARAM_SEGMENT_MASK; 2204 * if this is the first packet in a TSO chain, set
1630 if (val && !j) 2205 * TPD_HDRFLAG, otherwise, clear it.
1631 tpd->desc.tso.tsopl |= 1 << TSO_PARAM_HDRFLAG_SHIFT; 2206 */
2207 val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) &
2208 TPD_SEGMENT_EN_MASK;
2209 if (val) {
2210 if (!j)
2211 tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT;
2212 else
2213 tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT);
2214 }
1632 2215
1633 if (j == (count - 1)) 2216 if (j == (count - 1))
1634 tpd->desc.csum.csumpl |= 1 << CSUM_PARAM_EOP_SHIFT; 2217 tpd->word3 |= 1 << TPD_EOP_SHIFT;
1635 2218
1636 if (++tpd_next_to_use == tpd_ring->count) 2219 if (++next_to_use == tpd_ring->count)
1637 tpd_next_to_use = 0; 2220 next_to_use = 0;
1638 } 2221 }
1639 /* 2222 /*
1640 * Force memory writes to complete before letting h/w 2223 * Force memory writes to complete before letting h/w
@@ -1644,18 +2227,18 @@ static void atl1_tx_queue(struct atl1_adapter *adapter, int count,
1644 */ 2227 */
1645 wmb(); 2228 wmb();
1646 2229
1647 atomic_set(&tpd_ring->next_to_use, (int)tpd_next_to_use); 2230 atomic_set(&tpd_ring->next_to_use, next_to_use);
1648} 2231}
1649 2232
1650static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 2233static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1651{ 2234{
1652 struct atl1_adapter *adapter = netdev_priv(netdev); 2235 struct atl1_adapter *adapter = netdev_priv(netdev);
2236 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
1653 int len = skb->len; 2237 int len = skb->len;
1654 int tso; 2238 int tso;
1655 int count = 1; 2239 int count = 1;
1656 int ret_val; 2240 int ret_val;
1657 u32 val; 2241 struct tx_packet_desc *ptpd;
1658 union tpd_descr param;
1659 u16 frag_size; 2242 u16 frag_size;
1660 u16 vlan_tag; 2243 u16 vlan_tag;
1661 unsigned long flags; 2244 unsigned long flags;
@@ -1666,18 +2249,11 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1666 2249
1667 len -= skb->data_len; 2250 len -= skb->data_len;
1668 2251
1669 if (unlikely(skb->len == 0)) { 2252 if (unlikely(skb->len <= 0)) {
1670 dev_kfree_skb_any(skb); 2253 dev_kfree_skb_any(skb);
1671 return NETDEV_TX_OK; 2254 return NETDEV_TX_OK;
1672 } 2255 }
1673 2256
1674 param.data = 0;
1675 param.tso.tsopu = 0;
1676 param.tso.tsopl = 0;
1677 param.csum.csumpu = 0;
1678 param.csum.csumpl = 0;
1679
1680 /* nr_frags will be nonzero if we're doing scatter/gather (SG) */
1681 nr_frags = skb_shinfo(skb)->nr_frags; 2257 nr_frags = skb_shinfo(skb)->nr_frags;
1682 for (f = 0; f < nr_frags; f++) { 2258 for (f = 0; f < nr_frags; f++) {
1683 frag_size = skb_shinfo(skb)->frags[f].size; 2259 frag_size = skb_shinfo(skb)->frags[f].size;
@@ -1686,10 +2262,9 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1686 ATL1_MAX_TX_BUF_LEN; 2262 ATL1_MAX_TX_BUF_LEN;
1687 } 2263 }
1688 2264
1689 /* mss will be nonzero if we're doing segment offload (TSO/GSO) */
1690 mss = skb_shinfo(skb)->gso_size; 2265 mss = skb_shinfo(skb)->gso_size;
1691 if (mss) { 2266 if (mss) {
1692 if (skb->protocol == htons(ETH_P_IP)) { 2267 if (skb->protocol == ntohs(ETH_P_IP)) {
1693 proto_hdr_len = (skb_transport_offset(skb) + 2268 proto_hdr_len = (skb_transport_offset(skb) +
1694 tcp_hdrlen(skb)); 2269 tcp_hdrlen(skb));
1695 if (unlikely(proto_hdr_len > len)) { 2270 if (unlikely(proto_hdr_len > len)) {
@@ -1706,7 +2281,9 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1706 2281
1707 if (!spin_trylock_irqsave(&adapter->lock, flags)) { 2282 if (!spin_trylock_irqsave(&adapter->lock, flags)) {
1708 /* Can't get lock - tell upper layer to requeue */ 2283 /* Can't get lock - tell upper layer to requeue */
1709 dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx locked\n"); 2284 if (netif_msg_tx_queued(adapter))
2285 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
2286 "tx locked\n");
1710 return NETDEV_TX_LOCKED; 2287 return NETDEV_TX_LOCKED;
1711 } 2288 }
1712 2289
@@ -1714,22 +2291,26 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1714 /* not enough descriptors */ 2291 /* not enough descriptors */
1715 netif_stop_queue(netdev); 2292 netif_stop_queue(netdev);
1716 spin_unlock_irqrestore(&adapter->lock, flags); 2293 spin_unlock_irqrestore(&adapter->lock, flags);
1717 dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n"); 2294 if (netif_msg_tx_queued(adapter))
2295 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
2296 "tx busy\n");
1718 return NETDEV_TX_BUSY; 2297 return NETDEV_TX_BUSY;
1719 } 2298 }
1720 2299
1721 param.data = 0; 2300 ptpd = ATL1_TPD_DESC(tpd_ring,
2301 (u16) atomic_read(&tpd_ring->next_to_use));
2302 memset(ptpd, 0, sizeof(struct tx_packet_desc));
1722 2303
1723 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 2304 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
1724 vlan_tag = vlan_tx_tag_get(skb); 2305 vlan_tag = vlan_tx_tag_get(skb);
1725 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | 2306 vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
1726 ((vlan_tag >> 9) & 0x8); 2307 ((vlan_tag >> 9) & 0x8);
1727 param.csum.csumpl |= 1 << CSUM_PARAM_INSVLAG_SHIFT; 2308 ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
1728 param.csum.csumpu |= (vlan_tag & CSUM_PARAM_VALANTAG_MASK) << 2309 ptpd->word3 |= (vlan_tag & TPD_VL_TAGGED_MASK) <<
1729 CSUM_PARAM_VALAN_SHIFT; 2310 TPD_VL_TAGGED_SHIFT;
1730 } 2311 }
1731 2312
1732 tso = atl1_tso(adapter, skb, &param.tso); 2313 tso = atl1_tso(adapter, skb, ptpd);
1733 if (tso < 0) { 2314 if (tso < 0) {
1734 spin_unlock_irqrestore(&adapter->lock, flags); 2315 spin_unlock_irqrestore(&adapter->lock, flags);
1735 dev_kfree_skb_any(skb); 2316 dev_kfree_skb_any(skb);
@@ -1737,7 +2318,7 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1737 } 2318 }
1738 2319
1739 if (!tso) { 2320 if (!tso) {
1740 ret_val = atl1_tx_csum(adapter, skb, &param.csum); 2321 ret_val = atl1_tx_csum(adapter, skb, ptpd);
1741 if (ret_val < 0) { 2322 if (ret_val < 0) {
1742 spin_unlock_irqrestore(&adapter->lock, flags); 2323 spin_unlock_irqrestore(&adapter->lock, flags);
1743 dev_kfree_skb_any(skb); 2324 dev_kfree_skb_any(skb);
@@ -1745,13 +2326,11 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1745 } 2326 }
1746 } 2327 }
1747 2328
1748 val = (param.csum.csumpl >> CSUM_PARAM_SEGMENT_SHIFT) & 2329 atl1_tx_map(adapter, skb, ptpd);
1749 CSUM_PARAM_SEGMENT_MASK; 2330 atl1_tx_queue(adapter, count, ptpd);
1750 atl1_tx_map(adapter, skb, 1 == val);
1751 atl1_tx_queue(adapter, count, &param);
1752 netdev->trans_start = jiffies;
1753 spin_unlock_irqrestore(&adapter->lock, flags);
1754 atl1_update_mailbox(adapter); 2331 atl1_update_mailbox(adapter);
2332 spin_unlock_irqrestore(&adapter->lock, flags);
2333 netdev->trans_start = jiffies;
1755 return NETDEV_TX_OK; 2334 return NETDEV_TX_OK;
1756} 2335}
1757 2336
@@ -1776,7 +2355,7 @@ static irqreturn_t atl1_intr(int irq, void *data)
1776 adapter->cmb.cmb->int_stats = 0; 2355 adapter->cmb.cmb->int_stats = 0;
1777 2356
1778 if (status & ISR_GPHY) /* clear phy status */ 2357 if (status & ISR_GPHY) /* clear phy status */
1779 atl1_clear_phy_int(adapter); 2358 atlx_clear_phy_int(adapter);
1780 2359
1781 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ 2360 /* clear ISR status, and Enable CMB DMA/Disable Interrupt */
1782 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); 2361 iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR);
@@ -1787,8 +2366,9 @@ static irqreturn_t atl1_intr(int irq, void *data)
1787 2366
1788 /* check if PCIE PHY Link down */ 2367 /* check if PCIE PHY Link down */
1789 if (status & ISR_PHY_LINKDOWN) { 2368 if (status & ISR_PHY_LINKDOWN) {
1790 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 2369 if (netif_msg_intr(adapter))
1791 "pcie phy link down %x\n", status); 2370 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
2371 "pcie phy link down %x\n", status);
1792 if (netif_running(adapter->netdev)) { /* reset MAC */ 2372 if (netif_running(adapter->netdev)) { /* reset MAC */
1793 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 2373 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1794 schedule_work(&adapter->pcie_dma_to_rst_task); 2374 schedule_work(&adapter->pcie_dma_to_rst_task);
@@ -1798,9 +2378,10 @@ static irqreturn_t atl1_intr(int irq, void *data)
1798 2378
1799 /* check if DMA read/write error ? */ 2379 /* check if DMA read/write error ? */
1800 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { 2380 if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
1801 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 2381 if (netif_msg_intr(adapter))
1802 "pcie DMA r/w error (status = 0x%x)\n", 2382 dev_printk(KERN_DEBUG, &adapter->pdev->dev,
1803 status); 2383 "pcie DMA r/w error (status = 0x%x)\n",
2384 status);
1804 iowrite32(0, adapter->hw.hw_addr + REG_IMR); 2385 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
1805 schedule_work(&adapter->pcie_dma_to_rst_task); 2386 schedule_work(&adapter->pcie_dma_to_rst_task);
1806 return IRQ_HANDLED; 2387 return IRQ_HANDLED;
@@ -1823,8 +2404,11 @@ static irqreturn_t atl1_intr(int irq, void *data)
1823 if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | 2404 if (status & (ISR_RXF_OV | ISR_RFD_UNRUN |
1824 ISR_RRD_OV | ISR_HOST_RFD_UNRUN | 2405 ISR_RRD_OV | ISR_HOST_RFD_UNRUN |
1825 ISR_HOST_RRD_OV)) 2406 ISR_HOST_RRD_OV))
1826 dev_printk(KERN_DEBUG, &adapter->pdev->dev, 2407 if (netif_msg_intr(adapter))
1827 "rx exception, ISR = 0x%x\n", status); 2408 dev_printk(KERN_DEBUG,
2409 &adapter->pdev->dev,
2410 "rx exception, ISR = 0x%x\n",
2411 status);
1828 atl1_intr_rx(adapter); 2412 atl1_intr_rx(adapter);
1829 } 2413 }
1830 2414
@@ -1863,23 +2447,12 @@ static void atl1_phy_config(unsigned long data)
1863 spin_lock_irqsave(&adapter->lock, flags); 2447 spin_lock_irqsave(&adapter->lock, flags);
1864 adapter->phy_timer_pending = false; 2448 adapter->phy_timer_pending = false;
1865 atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); 2449 atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg);
1866 atl1_write_phy_reg(hw, MII_AT001_CR, hw->mii_1000t_ctrl_reg); 2450 atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg);
1867 atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); 2451 atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN);
1868 spin_unlock_irqrestore(&adapter->lock, flags); 2452 spin_unlock_irqrestore(&adapter->lock, flags);
1869} 2453}
1870 2454
1871/* 2455/*
1872 * atl1_tx_timeout - Respond to a Tx Hang
1873 * @netdev: network interface device structure
1874 */
1875static void atl1_tx_timeout(struct net_device *netdev)
1876{
1877 struct atl1_adapter *adapter = netdev_priv(netdev);
1878 /* Do the reset outside of interrupt context */
1879 schedule_work(&adapter->tx_timeout_task);
1880}
1881
1882/*
1883 * Orphaned vendor comment left intact here: 2456 * Orphaned vendor comment left intact here:
1884 * <vendor comment> 2457 * <vendor comment>
1885 * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT 2458 * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT
@@ -1889,86 +2462,29 @@ static void atl1_tx_timeout(struct net_device *netdev)
1889 * assert again and again. 2462 * assert again and again.
1890 * </vendor comment> 2463 * </vendor comment>
1891 */ 2464 */
1892static void atl1_tx_timeout_task(struct work_struct *work)
1893{
1894 struct atl1_adapter *adapter =
1895 container_of(work, struct atl1_adapter, tx_timeout_task);
1896 struct net_device *netdev = adapter->netdev;
1897 2465
1898 netif_device_detach(netdev); 2466static int atl1_reset(struct atl1_adapter *adapter)
1899 atl1_down(adapter);
1900 atl1_up(adapter);
1901 netif_device_attach(netdev);
1902}
1903
1904/*
1905 * atl1_link_chg_task - deal with link change event Out of interrupt context
1906 */
1907static void atl1_link_chg_task(struct work_struct *work)
1908{
1909 struct atl1_adapter *adapter =
1910 container_of(work, struct atl1_adapter, link_chg_task);
1911 unsigned long flags;
1912
1913 spin_lock_irqsave(&adapter->lock, flags);
1914 atl1_check_link(adapter);
1915 spin_unlock_irqrestore(&adapter->lock, flags);
1916}
1917
1918static void atl1_vlan_rx_register(struct net_device *netdev,
1919 struct vlan_group *grp)
1920{
1921 struct atl1_adapter *adapter = netdev_priv(netdev);
1922 unsigned long flags;
1923 u32 ctrl;
1924
1925 spin_lock_irqsave(&adapter->lock, flags);
1926 /* atl1_irq_disable(adapter); */
1927 adapter->vlgrp = grp;
1928
1929 if (grp) {
1930 /* enable VLAN tag insert/strip */
1931 ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
1932 ctrl |= MAC_CTRL_RMV_VLAN;
1933 iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
1934 } else {
1935 /* disable VLAN tag insert/strip */
1936 ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
1937 ctrl &= ~MAC_CTRL_RMV_VLAN;
1938 iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
1939 }
1940
1941 /* atl1_irq_enable(adapter); */
1942 spin_unlock_irqrestore(&adapter->lock, flags);
1943}
1944
1945static void atl1_restore_vlan(struct atl1_adapter *adapter)
1946{
1947 atl1_vlan_rx_register(adapter->netdev, adapter->vlgrp);
1948}
1949
1950int atl1_reset(struct atl1_adapter *adapter)
1951{ 2467{
1952 int ret; 2468 int ret;
1953
1954 ret = atl1_reset_hw(&adapter->hw); 2469 ret = atl1_reset_hw(&adapter->hw);
1955 if (ret != ATL1_SUCCESS) 2470 if (ret)
1956 return ret; 2471 return ret;
1957 return atl1_init_hw(&adapter->hw); 2472 return atl1_init_hw(&adapter->hw);
1958} 2473}
1959 2474
1960s32 atl1_up(struct atl1_adapter *adapter) 2475static s32 atl1_up(struct atl1_adapter *adapter)
1961{ 2476{
1962 struct net_device *netdev = adapter->netdev; 2477 struct net_device *netdev = adapter->netdev;
1963 int err; 2478 int err;
1964 int irq_flags = IRQF_SAMPLE_RANDOM; 2479 int irq_flags = IRQF_SAMPLE_RANDOM;
1965 2480
1966 /* hardware has been reset, we need to reload some things */ 2481 /* hardware has been reset, we need to reload some things */
1967 atl1_set_multi(netdev); 2482 atlx_set_multi(netdev);
1968 atl1_init_ring_ptrs(adapter); 2483 atl1_init_ring_ptrs(adapter);
1969 atl1_restore_vlan(adapter); 2484 atlx_restore_vlan(adapter);
1970 err = atl1_alloc_rx_buffers(adapter); 2485 err = atl1_alloc_rx_buffers(adapter);
1971 if (unlikely(!err)) /* no RX BUFFER allocated */ 2486 if (unlikely(!err))
2487 /* no RX BUFFER allocated */
1972 return -ENOMEM; 2488 return -ENOMEM;
1973 2489
1974 if (unlikely(atl1_configure(adapter))) { 2490 if (unlikely(atl1_configure(adapter))) {
@@ -1978,8 +2494,9 @@ s32 atl1_up(struct atl1_adapter *adapter)
1978 2494
1979 err = pci_enable_msi(adapter->pdev); 2495 err = pci_enable_msi(adapter->pdev);
1980 if (err) { 2496 if (err) {
1981 dev_info(&adapter->pdev->dev, 2497 if (netif_msg_ifup(adapter))
1982 "Unable to enable MSI: %d\n", err); 2498 dev_info(&adapter->pdev->dev,
2499 "Unable to enable MSI: %d\n", err);
1983 irq_flags |= IRQF_SHARED; 2500 irq_flags |= IRQF_SHARED;
1984 } 2501 }
1985 2502
@@ -1989,7 +2506,7 @@ s32 atl1_up(struct atl1_adapter *adapter)
1989 goto err_up; 2506 goto err_up;
1990 2507
1991 mod_timer(&adapter->watchdog_timer, jiffies); 2508 mod_timer(&adapter->watchdog_timer, jiffies);
1992 atl1_irq_enable(adapter); 2509 atlx_irq_enable(adapter);
1993 atl1_check_link(adapter); 2510 atl1_check_link(adapter);
1994 return 0; 2511 return 0;
1995 2512
@@ -2000,7 +2517,7 @@ err_up:
2000 return err; 2517 return err;
2001} 2518}
2002 2519
2003void atl1_down(struct atl1_adapter *adapter) 2520static void atl1_down(struct atl1_adapter *adapter)
2004{ 2521{
2005 struct net_device *netdev = adapter->netdev; 2522 struct net_device *netdev = adapter->netdev;
2006 2523
@@ -2008,7 +2525,7 @@ void atl1_down(struct atl1_adapter *adapter)
2008 del_timer_sync(&adapter->phy_config_timer); 2525 del_timer_sync(&adapter->phy_config_timer);
2009 adapter->phy_timer_pending = false; 2526 adapter->phy_timer_pending = false;
2010 2527
2011 atl1_irq_disable(adapter); 2528 atlx_irq_disable(adapter);
2012 free_irq(adapter->pdev->irq, netdev); 2529 free_irq(adapter->pdev->irq, netdev);
2013 pci_disable_msi(adapter->pdev); 2530 pci_disable_msi(adapter->pdev);
2014 atl1_reset_hw(&adapter->hw); 2531 atl1_reset_hw(&adapter->hw);
@@ -2023,6 +2540,52 @@ void atl1_down(struct atl1_adapter *adapter)
2023 atl1_clean_rx_ring(adapter); 2540 atl1_clean_rx_ring(adapter);
2024} 2541}
2025 2542
2543static void atl1_tx_timeout_task(struct work_struct *work)
2544{
2545 struct atl1_adapter *adapter =
2546 container_of(work, struct atl1_adapter, tx_timeout_task);
2547 struct net_device *netdev = adapter->netdev;
2548
2549 netif_device_detach(netdev);
2550 atl1_down(adapter);
2551 atl1_up(adapter);
2552 netif_device_attach(netdev);
2553}
2554
2555/*
2556 * atl1_change_mtu - Change the Maximum Transfer Unit
2557 * @netdev: network interface device structure
2558 * @new_mtu: new value for maximum frame size
2559 *
2560 * Returns 0 on success, negative on failure
2561 */
2562static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
2563{
2564 struct atl1_adapter *adapter = netdev_priv(netdev);
2565 int old_mtu = netdev->mtu;
2566 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2567
2568 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
2569 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
2570 if (netif_msg_link(adapter))
2571 dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
2572 return -EINVAL;
2573 }
2574
2575 adapter->hw.max_frame_size = max_frame;
2576 adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3;
2577 adapter->rx_buffer_len = (max_frame + 7) & ~7;
2578 adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
2579
2580 netdev->mtu = new_mtu;
2581 if ((old_mtu != new_mtu) && netif_running(netdev)) {
2582 atl1_down(adapter);
2583 atl1_up(adapter);
2584 }
2585
2586 return 0;
2587}
2588
2026/* 2589/*
2027 * atl1_open - Called when a network interface is made active 2590 * atl1_open - Called when a network interface is made active
2028 * @netdev: network interface device structure 2591 * @netdev: network interface device structure
@@ -2091,7 +2654,7 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
2091 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2654 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2092 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); 2655 atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl);
2093 if (ctrl & BMSR_LSTATUS) 2656 if (ctrl & BMSR_LSTATUS)
2094 wufc &= ~ATL1_WUFC_LNKC; 2657 wufc &= ~ATLX_WUFC_LNKC;
2095 2658
2096 /* reduce speed to 10/100M */ 2659 /* reduce speed to 10/100M */
2097 if (wufc) { 2660 if (wufc) {
@@ -2099,15 +2662,15 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
2099 /* if resume, let driver to re- setup link */ 2662 /* if resume, let driver to re- setup link */
2100 hw->phy_configured = false; 2663 hw->phy_configured = false;
2101 atl1_set_mac_addr(hw); 2664 atl1_set_mac_addr(hw);
2102 atl1_set_multi(netdev); 2665 atlx_set_multi(netdev);
2103 2666
2104 ctrl = 0; 2667 ctrl = 0;
2105 /* turn on magic packet wol */ 2668 /* turn on magic packet wol */
2106 if (wufc & ATL1_WUFC_MAG) 2669 if (wufc & ATLX_WUFC_MAG)
2107 ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN; 2670 ctrl = WOL_MAGIC_EN | WOL_MAGIC_PME_EN;
2108 2671
2109 /* turn on Link change WOL */ 2672 /* turn on Link change WOL */
2110 if (wufc & ATL1_WUFC_LNKC) 2673 if (wufc & ATLX_WUFC_LNKC)
2111 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); 2674 ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN);
2112 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); 2675 iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL);
2113 2676
@@ -2115,13 +2678,13 @@ static int atl1_suspend(struct pci_dev *pdev, pm_message_t state)
2115 ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL); 2678 ctrl = ioread32(hw->hw_addr + REG_MAC_CTRL);
2116 ctrl &= ~MAC_CTRL_DBG; 2679 ctrl &= ~MAC_CTRL_DBG;
2117 ctrl &= ~MAC_CTRL_PROMIS_EN; 2680 ctrl &= ~MAC_CTRL_PROMIS_EN;
2118 if (wufc & ATL1_WUFC_MC) 2681 if (wufc & ATLX_WUFC_MC)
2119 ctrl |= MAC_CTRL_MC_ALL_EN; 2682 ctrl |= MAC_CTRL_MC_ALL_EN;
2120 else 2683 else
2121 ctrl &= ~MAC_CTRL_MC_ALL_EN; 2684 ctrl &= ~MAC_CTRL_MC_ALL_EN;
2122 2685
2123 /* turn on broadcast mode if wake on-BC is enabled */ 2686 /* turn on broadcast mode if wake on-BC is enabled */
2124 if (wufc & ATL1_WUFC_BC) 2687 if (wufc & ATLX_WUFC_BC)
2125 ctrl |= MAC_CTRL_BC_EN; 2688 ctrl |= MAC_CTRL_BC_EN;
2126 else 2689 else
2127 ctrl &= ~MAC_CTRL_BC_EN; 2690 ctrl &= ~MAC_CTRL_BC_EN;
@@ -2149,12 +2712,13 @@ static int atl1_resume(struct pci_dev *pdev)
2149{ 2712{
2150 struct net_device *netdev = pci_get_drvdata(pdev); 2713 struct net_device *netdev = pci_get_drvdata(pdev);
2151 struct atl1_adapter *adapter = netdev_priv(netdev); 2714 struct atl1_adapter *adapter = netdev_priv(netdev);
2152 u32 ret_val; 2715 u32 err;
2153 2716
2154 pci_set_power_state(pdev, 0); 2717 pci_set_power_state(pdev, PCI_D0);
2155 pci_restore_state(pdev); 2718 pci_restore_state(pdev);
2156 2719
2157 ret_val = pci_enable_device(pdev); 2720 /* FIXME: check and handle */
2721 err = pci_enable_device(pdev);
2158 pci_enable_wake(pdev, PCI_D3hot, 0); 2722 pci_enable_wake(pdev, PCI_D3hot, 0);
2159 pci_enable_wake(pdev, PCI_D3cold, 0); 2723 pci_enable_wake(pdev, PCI_D3cold, 0);
2160 2724
@@ -2221,14 +2785,16 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2221 dev_err(&pdev->dev, "no usable DMA configuration\n"); 2785 dev_err(&pdev->dev, "no usable DMA configuration\n");
2222 goto err_dma; 2786 goto err_dma;
2223 } 2787 }
2224 /* Mark all PCI regions associated with PCI device 2788 /*
2789 * Mark all PCI regions associated with PCI device
2225 * pdev as being reserved by owner atl1_driver_name 2790 * pdev as being reserved by owner atl1_driver_name
2226 */ 2791 */
2227 err = pci_request_regions(pdev, atl1_driver_name); 2792 err = pci_request_regions(pdev, ATLX_DRIVER_NAME);
2228 if (err) 2793 if (err)
2229 goto err_request_regions; 2794 goto err_request_regions;
2230 2795
2231 /* Enables bus-mastering on the device and calls 2796 /*
2797 * Enables bus-mastering on the device and calls
2232 * pcibios_set_master to do the needed arch specific settings 2798 * pcibios_set_master to do the needed arch specific settings
2233 */ 2799 */
2234 pci_set_master(pdev); 2800 pci_set_master(pdev);
@@ -2245,6 +2811,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2245 adapter->netdev = netdev; 2811 adapter->netdev = netdev;
2246 adapter->pdev = pdev; 2812 adapter->pdev = pdev;
2247 adapter->hw.back = adapter; 2813 adapter->hw.back = adapter;
2814 adapter->msg_enable = netif_msg_init(debug, atl1_default_msg);
2248 2815
2249 adapter->hw.hw_addr = pci_iomap(pdev, 0, 0); 2816 adapter->hw.hw_addr = pci_iomap(pdev, 0, 0);
2250 if (!adapter->hw.hw_addr) { 2817 if (!adapter->hw.hw_addr) {
@@ -2254,7 +2821,8 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2254 /* get device revision number */ 2821 /* get device revision number */
2255 adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + 2822 adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr +
2256 (REG_MASTER_CTRL + 2)); 2823 (REG_MASTER_CTRL + 2));
2257 dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); 2824 if (netif_msg_probe(adapter))
2825 dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION);
2258 2826
2259 /* set default ring resource counts */ 2827 /* set default ring resource counts */
2260 adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD; 2828 adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD;
@@ -2269,17 +2837,17 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2269 netdev->open = &atl1_open; 2837 netdev->open = &atl1_open;
2270 netdev->stop = &atl1_close; 2838 netdev->stop = &atl1_close;
2271 netdev->hard_start_xmit = &atl1_xmit_frame; 2839 netdev->hard_start_xmit = &atl1_xmit_frame;
2272 netdev->get_stats = &atl1_get_stats; 2840 netdev->get_stats = &atlx_get_stats;
2273 netdev->set_multicast_list = &atl1_set_multi; 2841 netdev->set_multicast_list = &atlx_set_multi;
2274 netdev->set_mac_address = &atl1_set_mac; 2842 netdev->set_mac_address = &atl1_set_mac;
2275 netdev->change_mtu = &atl1_change_mtu; 2843 netdev->change_mtu = &atl1_change_mtu;
2276 netdev->do_ioctl = &atl1_ioctl; 2844 netdev->do_ioctl = &atlx_ioctl;
2277 netdev->tx_timeout = &atl1_tx_timeout; 2845 netdev->tx_timeout = &atlx_tx_timeout;
2278 netdev->watchdog_timeo = 5 * HZ; 2846 netdev->watchdog_timeo = 5 * HZ;
2279#ifdef CONFIG_NET_POLL_CONTROLLER 2847#ifdef CONFIG_NET_POLL_CONTROLLER
2280 netdev->poll_controller = atl1_poll_controller; 2848 netdev->poll_controller = atl1_poll_controller;
2281#endif 2849#endif
2282 netdev->vlan_rx_register = atl1_vlan_rx_register; 2850 netdev->vlan_rx_register = atlx_vlan_rx_register;
2283 2851
2284 netdev->ethtool_ops = &atl1_ethtool_ops; 2852 netdev->ethtool_ops = &atl1_ethtool_ops;
2285 adapter->bd_number = cards_found; 2853 adapter->bd_number = cards_found;
@@ -2292,13 +2860,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2292 netdev->features = NETIF_F_HW_CSUM; 2860 netdev->features = NETIF_F_HW_CSUM;
2293 netdev->features |= NETIF_F_SG; 2861 netdev->features |= NETIF_F_SG;
2294 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 2862 netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
2295 2863 netdev->features |= NETIF_F_TSO;
2296 /*
2297 * FIXME - Until tso performance gets fixed, disable the feature.
2298 * Enable it with ethtool -K if desired.
2299 */
2300 /* netdev->features |= NETIF_F_TSO; */
2301
2302 netdev->features |= NETIF_F_LLTX; 2864 netdev->features |= NETIF_F_LLTX;
2303 2865
2304 /* 2866 /*
@@ -2309,7 +2871,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2309 /* atl1_pcie_patch(adapter); */ 2871 /* atl1_pcie_patch(adapter); */
2310 2872
2311 /* really reset GPHY core */ 2873 /* really reset GPHY core */
2312 iowrite16(0, adapter->hw.hw_addr + REG_GPHY_ENABLE); 2874 iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
2313 2875
2314 /* 2876 /*
2315 * reset the controller to 2877 * reset the controller to
@@ -2354,7 +2916,7 @@ static int __devinit atl1_probe(struct pci_dev *pdev,
2354 2916
2355 INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task); 2917 INIT_WORK(&adapter->tx_timeout_task, atl1_tx_timeout_task);
2356 2918
2357 INIT_WORK(&adapter->link_chg_task, atl1_link_chg_task); 2919 INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task);
2358 2920
2359 INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task); 2921 INIT_WORK(&adapter->pcie_dma_to_rst_task, atl1_tx_timeout_task);
2360 2922
@@ -2397,7 +2959,8 @@ static void __devexit atl1_remove(struct pci_dev *pdev)
2397 2959
2398 adapter = netdev_priv(netdev); 2960 adapter = netdev_priv(netdev);
2399 2961
2400 /* Some atl1 boards lack persistent storage for their MAC, and get it 2962 /*
2963 * Some atl1 boards lack persistent storage for their MAC, and get it
2401 * from the BIOS during POST. If we've been messing with the MAC 2964 * from the BIOS during POST. If we've been messing with the MAC
2402 * address, we need to save the permanent one. 2965 * address, we need to save the permanent one.
2403 */ 2966 */
@@ -2407,7 +2970,7 @@ static void __devexit atl1_remove(struct pci_dev *pdev)
2407 atl1_set_mac_addr(&adapter->hw); 2970 atl1_set_mac_addr(&adapter->hw);
2408 } 2971 }
2409 2972
2410 iowrite16(0, adapter->hw.hw_addr + REG_GPHY_ENABLE); 2973 iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE);
2411 unregister_netdev(netdev); 2974 unregister_netdev(netdev);
2412 pci_iounmap(pdev, adapter->hw.hw_addr); 2975 pci_iounmap(pdev, adapter->hw.hw_addr);
2413 pci_release_regions(pdev); 2976 pci_release_regions(pdev);
@@ -2416,7 +2979,7 @@ static void __devexit atl1_remove(struct pci_dev *pdev)
2416} 2979}
2417 2980
2418static struct pci_driver atl1_driver = { 2981static struct pci_driver atl1_driver = {
2419 .name = atl1_driver_name, 2982 .name = ATLX_DRIVER_NAME,
2420 .id_table = atl1_pci_tbl, 2983 .id_table = atl1_pci_tbl,
2421 .probe = atl1_probe, 2984 .probe = atl1_probe,
2422 .remove = __devexit_p(atl1_remove), 2985 .remove = __devexit_p(atl1_remove),
@@ -2448,3 +3011,554 @@ static int __init atl1_init_module(void)
2448 3011
2449module_init(atl1_init_module); 3012module_init(atl1_init_module);
2450module_exit(atl1_exit_module); 3013module_exit(atl1_exit_module);
3014
3015struct atl1_stats {
3016 char stat_string[ETH_GSTRING_LEN];
3017 int sizeof_stat;
3018 int stat_offset;
3019};
3020
3021#define ATL1_STAT(m) \
3022 sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m)
3023
3024static struct atl1_stats atl1_gstrings_stats[] = {
3025 {"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
3026 {"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
3027 {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
3028 {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
3029 {"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
3030 {"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
3031 {"rx_dropped", ATL1_STAT(net_stats.rx_dropped)},
3032 {"tx_dropped", ATL1_STAT(net_stats.tx_dropped)},
3033 {"multicast", ATL1_STAT(soft_stats.multicast)},
3034 {"collisions", ATL1_STAT(soft_stats.collisions)},
3035 {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
3036 {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
3037 {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
3038 {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
3039 {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
3040 {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
3041 {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
3042 {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
3043 {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
3044 {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
3045 {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
3046 {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
3047 {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
3048 {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
3049 {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
3050 {"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
3051 {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
3052 {"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
3053 {"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
3054 {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
3055 {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
3056};
3057
3058static void atl1_get_ethtool_stats(struct net_device *netdev,
3059 struct ethtool_stats *stats, u64 *data)
3060{
3061 struct atl1_adapter *adapter = netdev_priv(netdev);
3062 int i;
3063 char *p;
3064
3065 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
3066 p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
3067 data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
3068 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
3069 }
3070
3071}
3072
3073static int atl1_get_sset_count(struct net_device *netdev, int sset)
3074{
3075 switch (sset) {
3076 case ETH_SS_STATS:
3077 return ARRAY_SIZE(atl1_gstrings_stats);
3078 default:
3079 return -EOPNOTSUPP;
3080 }
3081}
3082
3083static int atl1_get_settings(struct net_device *netdev,
3084 struct ethtool_cmd *ecmd)
3085{
3086 struct atl1_adapter *adapter = netdev_priv(netdev);
3087 struct atl1_hw *hw = &adapter->hw;
3088
3089 ecmd->supported = (SUPPORTED_10baseT_Half |
3090 SUPPORTED_10baseT_Full |
3091 SUPPORTED_100baseT_Half |
3092 SUPPORTED_100baseT_Full |
3093 SUPPORTED_1000baseT_Full |
3094 SUPPORTED_Autoneg | SUPPORTED_TP);
3095 ecmd->advertising = ADVERTISED_TP;
3096 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3097 hw->media_type == MEDIA_TYPE_1000M_FULL) {
3098 ecmd->advertising |= ADVERTISED_Autoneg;
3099 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
3100 ecmd->advertising |= ADVERTISED_Autoneg;
3101 ecmd->advertising |=
3102 (ADVERTISED_10baseT_Half |
3103 ADVERTISED_10baseT_Full |
3104 ADVERTISED_100baseT_Half |
3105 ADVERTISED_100baseT_Full |
3106 ADVERTISED_1000baseT_Full);
3107 } else
3108 ecmd->advertising |= (ADVERTISED_1000baseT_Full);
3109 }
3110 ecmd->port = PORT_TP;
3111 ecmd->phy_address = 0;
3112 ecmd->transceiver = XCVR_INTERNAL;
3113
3114 if (netif_carrier_ok(adapter->netdev)) {
3115 u16 link_speed, link_duplex;
3116 atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
3117 ecmd->speed = link_speed;
3118 if (link_duplex == FULL_DUPLEX)
3119 ecmd->duplex = DUPLEX_FULL;
3120 else
3121 ecmd->duplex = DUPLEX_HALF;
3122 } else {
3123 ecmd->speed = -1;
3124 ecmd->duplex = -1;
3125 }
3126 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3127 hw->media_type == MEDIA_TYPE_1000M_FULL)
3128 ecmd->autoneg = AUTONEG_ENABLE;
3129 else
3130 ecmd->autoneg = AUTONEG_DISABLE;
3131
3132 return 0;
3133}
3134
3135static int atl1_set_settings(struct net_device *netdev,
3136 struct ethtool_cmd *ecmd)
3137{
3138 struct atl1_adapter *adapter = netdev_priv(netdev);
3139 struct atl1_hw *hw = &adapter->hw;
3140 u16 phy_data;
3141 int ret_val = 0;
3142 u16 old_media_type = hw->media_type;
3143
3144 if (netif_running(adapter->netdev)) {
3145 if (netif_msg_link(adapter))
3146 dev_dbg(&adapter->pdev->dev,
3147 "ethtool shutting down adapter\n");
3148 atl1_down(adapter);
3149 }
3150
3151 if (ecmd->autoneg == AUTONEG_ENABLE)
3152 hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
3153 else {
3154 if (ecmd->speed == SPEED_1000) {
3155 if (ecmd->duplex != DUPLEX_FULL) {
3156 if (netif_msg_link(adapter))
3157 dev_warn(&adapter->pdev->dev,
3158 "1000M half is invalid\n");
3159 ret_val = -EINVAL;
3160 goto exit_sset;
3161 }
3162 hw->media_type = MEDIA_TYPE_1000M_FULL;
3163 } else if (ecmd->speed == SPEED_100) {
3164 if (ecmd->duplex == DUPLEX_FULL)
3165 hw->media_type = MEDIA_TYPE_100M_FULL;
3166 else
3167 hw->media_type = MEDIA_TYPE_100M_HALF;
3168 } else {
3169 if (ecmd->duplex == DUPLEX_FULL)
3170 hw->media_type = MEDIA_TYPE_10M_FULL;
3171 else
3172 hw->media_type = MEDIA_TYPE_10M_HALF;
3173 }
3174 }
3175 switch (hw->media_type) {
3176 case MEDIA_TYPE_AUTO_SENSOR:
3177 ecmd->advertising =
3178 ADVERTISED_10baseT_Half |
3179 ADVERTISED_10baseT_Full |
3180 ADVERTISED_100baseT_Half |
3181 ADVERTISED_100baseT_Full |
3182 ADVERTISED_1000baseT_Full |
3183 ADVERTISED_Autoneg | ADVERTISED_TP;
3184 break;
3185 case MEDIA_TYPE_1000M_FULL:
3186 ecmd->advertising =
3187 ADVERTISED_1000baseT_Full |
3188 ADVERTISED_Autoneg | ADVERTISED_TP;
3189 break;
3190 default:
3191 ecmd->advertising = 0;
3192 break;
3193 }
3194 if (atl1_phy_setup_autoneg_adv(hw)) {
3195 ret_val = -EINVAL;
3196 if (netif_msg_link(adapter))
3197 dev_warn(&adapter->pdev->dev,
3198 "invalid ethtool speed/duplex setting\n");
3199 goto exit_sset;
3200 }
3201 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3202 hw->media_type == MEDIA_TYPE_1000M_FULL)
3203 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
3204 else {
3205 switch (hw->media_type) {
3206 case MEDIA_TYPE_100M_FULL:
3207 phy_data =
3208 MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
3209 MII_CR_RESET;
3210 break;
3211 case MEDIA_TYPE_100M_HALF:
3212 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
3213 break;
3214 case MEDIA_TYPE_10M_FULL:
3215 phy_data =
3216 MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
3217 break;
3218 default:
3219 /* MEDIA_TYPE_10M_HALF: */
3220 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
3221 break;
3222 }
3223 }
3224 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
3225exit_sset:
3226 if (ret_val)
3227 hw->media_type = old_media_type;
3228
3229 if (netif_running(adapter->netdev)) {
3230 if (netif_msg_link(adapter))
3231 dev_dbg(&adapter->pdev->dev,
3232 "ethtool starting adapter\n");
3233 atl1_up(adapter);
3234 } else if (!ret_val) {
3235 if (netif_msg_link(adapter))
3236 dev_dbg(&adapter->pdev->dev,
3237 "ethtool resetting adapter\n");
3238 atl1_reset(adapter);
3239 }
3240 return ret_val;
3241}
3242
3243static void atl1_get_drvinfo(struct net_device *netdev,
3244 struct ethtool_drvinfo *drvinfo)
3245{
3246 struct atl1_adapter *adapter = netdev_priv(netdev);
3247
3248 strncpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
3249 strncpy(drvinfo->version, ATLX_DRIVER_VERSION,
3250 sizeof(drvinfo->version));
3251 strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
3252 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
3253 sizeof(drvinfo->bus_info));
3254 drvinfo->eedump_len = ATL1_EEDUMP_LEN;
3255}
3256
3257static void atl1_get_wol(struct net_device *netdev,
3258 struct ethtool_wolinfo *wol)
3259{
3260 struct atl1_adapter *adapter = netdev_priv(netdev);
3261
3262 wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
3263 wol->wolopts = 0;
3264 if (adapter->wol & ATLX_WUFC_EX)
3265 wol->wolopts |= WAKE_UCAST;
3266 if (adapter->wol & ATLX_WUFC_MC)
3267 wol->wolopts |= WAKE_MCAST;
3268 if (adapter->wol & ATLX_WUFC_BC)
3269 wol->wolopts |= WAKE_BCAST;
3270 if (adapter->wol & ATLX_WUFC_MAG)
3271 wol->wolopts |= WAKE_MAGIC;
3272 return;
3273}
3274
3275static int atl1_set_wol(struct net_device *netdev,
3276 struct ethtool_wolinfo *wol)
3277{
3278 struct atl1_adapter *adapter = netdev_priv(netdev);
3279
3280 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
3281 return -EOPNOTSUPP;
3282 adapter->wol = 0;
3283 if (wol->wolopts & WAKE_UCAST)
3284 adapter->wol |= ATLX_WUFC_EX;
3285 if (wol->wolopts & WAKE_MCAST)
3286 adapter->wol |= ATLX_WUFC_MC;
3287 if (wol->wolopts & WAKE_BCAST)
3288 adapter->wol |= ATLX_WUFC_BC;
3289 if (wol->wolopts & WAKE_MAGIC)
3290 adapter->wol |= ATLX_WUFC_MAG;
3291 return 0;
3292}
3293
3294static u32 atl1_get_msglevel(struct net_device *netdev)
3295{
3296 struct atl1_adapter *adapter = netdev_priv(netdev);
3297 return adapter->msg_enable;
3298}
3299
3300static void atl1_set_msglevel(struct net_device *netdev, u32 value)
3301{
3302 struct atl1_adapter *adapter = netdev_priv(netdev);
3303 adapter->msg_enable = value;
3304}
3305
3306static int atl1_get_regs_len(struct net_device *netdev)
3307{
3308 return ATL1_REG_COUNT * sizeof(u32);
3309}
3310
3311static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
3312 void *p)
3313{
3314 struct atl1_adapter *adapter = netdev_priv(netdev);
3315 struct atl1_hw *hw = &adapter->hw;
3316 unsigned int i;
3317 u32 *regbuf = p;
3318
3319 for (i = 0; i < ATL1_REG_COUNT; i++) {
3320 /*
3321 * This switch statement avoids reserved regions
3322 * of register space.
3323 */
3324 switch (i) {
3325 case 6 ... 9:
3326 case 14:
3327 case 29 ... 31:
3328 case 34 ... 63:
3329 case 75 ... 127:
3330 case 136 ... 1023:
3331 case 1027 ... 1087:
3332 case 1091 ... 1151:
3333 case 1194 ... 1195:
3334 case 1200 ... 1201:
3335 case 1206 ... 1213:
3336 case 1216 ... 1279:
3337 case 1290 ... 1311:
3338 case 1323 ... 1343:
3339 case 1358 ... 1359:
3340 case 1368 ... 1375:
3341 case 1378 ... 1383:
3342 case 1388 ... 1391:
3343 case 1393 ... 1395:
3344 case 1402 ... 1403:
3345 case 1410 ... 1471:
3346 case 1522 ... 1535:
3347 /* reserved region; don't read it */
3348 regbuf[i] = 0;
3349 break;
3350 default:
3351 /* unreserved region */
3352 regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32)));
3353 }
3354 }
3355}
3356
3357static void atl1_get_ringparam(struct net_device *netdev,
3358 struct ethtool_ringparam *ring)
3359{
3360 struct atl1_adapter *adapter = netdev_priv(netdev);
3361 struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
3362 struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
3363
3364 ring->rx_max_pending = ATL1_MAX_RFD;
3365 ring->tx_max_pending = ATL1_MAX_TPD;
3366 ring->rx_mini_max_pending = 0;
3367 ring->rx_jumbo_max_pending = 0;
3368 ring->rx_pending = rxdr->count;
3369 ring->tx_pending = txdr->count;
3370 ring->rx_mini_pending = 0;
3371 ring->rx_jumbo_pending = 0;
3372}
3373
3374static int atl1_set_ringparam(struct net_device *netdev,
3375 struct ethtool_ringparam *ring)
3376{
3377 struct atl1_adapter *adapter = netdev_priv(netdev);
3378 struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
3379 struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
3380 struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
3381
3382 struct atl1_tpd_ring tpd_old, tpd_new;
3383 struct atl1_rfd_ring rfd_old, rfd_new;
3384 struct atl1_rrd_ring rrd_old, rrd_new;
3385 struct atl1_ring_header rhdr_old, rhdr_new;
3386 int err;
3387
3388 tpd_old = adapter->tpd_ring;
3389 rfd_old = adapter->rfd_ring;
3390 rrd_old = adapter->rrd_ring;
3391 rhdr_old = adapter->ring_header;
3392
3393 if (netif_running(adapter->netdev))
3394 atl1_down(adapter);
3395
3396 rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
3397 rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
3398 rfdr->count;
3399 rfdr->count = (rfdr->count + 3) & ~3;
3400 rrdr->count = rfdr->count;
3401
3402 tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
3403 tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
3404 tpdr->count;
3405 tpdr->count = (tpdr->count + 3) & ~3;
3406
3407 if (netif_running(adapter->netdev)) {
3408 /* try to get new resources before deleting old */
3409 err = atl1_setup_ring_resources(adapter);
3410 if (err)
3411 goto err_setup_ring;
3412
3413 /*
3414 * save the new, restore the old in order to free it,
3415 * then restore the new back again
3416 */
3417
3418 rfd_new = adapter->rfd_ring;
3419 rrd_new = adapter->rrd_ring;
3420 tpd_new = adapter->tpd_ring;
3421 rhdr_new = adapter->ring_header;
3422 adapter->rfd_ring = rfd_old;
3423 adapter->rrd_ring = rrd_old;
3424 adapter->tpd_ring = tpd_old;
3425 adapter->ring_header = rhdr_old;
3426 atl1_free_ring_resources(adapter);
3427 adapter->rfd_ring = rfd_new;
3428 adapter->rrd_ring = rrd_new;
3429 adapter->tpd_ring = tpd_new;
3430 adapter->ring_header = rhdr_new;
3431
3432 err = atl1_up(adapter);
3433 if (err)
3434 return err;
3435 }
3436 return 0;
3437
3438err_setup_ring:
3439 adapter->rfd_ring = rfd_old;
3440 adapter->rrd_ring = rrd_old;
3441 adapter->tpd_ring = tpd_old;
3442 adapter->ring_header = rhdr_old;
3443 atl1_up(adapter);
3444 return err;
3445}
3446
3447static void atl1_get_pauseparam(struct net_device *netdev,
3448 struct ethtool_pauseparam *epause)
3449{
3450 struct atl1_adapter *adapter = netdev_priv(netdev);
3451 struct atl1_hw *hw = &adapter->hw;
3452
3453 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3454 hw->media_type == MEDIA_TYPE_1000M_FULL) {
3455 epause->autoneg = AUTONEG_ENABLE;
3456 } else {
3457 epause->autoneg = AUTONEG_DISABLE;
3458 }
3459 epause->rx_pause = 1;
3460 epause->tx_pause = 1;
3461}
3462
3463static int atl1_set_pauseparam(struct net_device *netdev,
3464 struct ethtool_pauseparam *epause)
3465{
3466 struct atl1_adapter *adapter = netdev_priv(netdev);
3467 struct atl1_hw *hw = &adapter->hw;
3468
3469 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3470 hw->media_type == MEDIA_TYPE_1000M_FULL) {
3471 epause->autoneg = AUTONEG_ENABLE;
3472 } else {
3473 epause->autoneg = AUTONEG_DISABLE;
3474 }
3475
3476 epause->rx_pause = 1;
3477 epause->tx_pause = 1;
3478
3479 return 0;
3480}
3481
3482/* FIXME: is this right? -- CHS */
3483static u32 atl1_get_rx_csum(struct net_device *netdev)
3484{
3485 return 1;
3486}
3487
3488static void atl1_get_strings(struct net_device *netdev, u32 stringset,
3489 u8 *data)
3490{
3491 u8 *p = data;
3492 int i;
3493
3494 switch (stringset) {
3495 case ETH_SS_STATS:
3496 for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
3497 memcpy(p, atl1_gstrings_stats[i].stat_string,
3498 ETH_GSTRING_LEN);
3499 p += ETH_GSTRING_LEN;
3500 }
3501 break;
3502 }
3503}
3504
3505static int atl1_nway_reset(struct net_device *netdev)
3506{
3507 struct atl1_adapter *adapter = netdev_priv(netdev);
3508 struct atl1_hw *hw = &adapter->hw;
3509
3510 if (netif_running(netdev)) {
3511 u16 phy_data;
3512 atl1_down(adapter);
3513
3514 if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
3515 hw->media_type == MEDIA_TYPE_1000M_FULL) {
3516 phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
3517 } else {
3518 switch (hw->media_type) {
3519 case MEDIA_TYPE_100M_FULL:
3520 phy_data = MII_CR_FULL_DUPLEX |
3521 MII_CR_SPEED_100 | MII_CR_RESET;
3522 break;
3523 case MEDIA_TYPE_100M_HALF:
3524 phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
3525 break;
3526 case MEDIA_TYPE_10M_FULL:
3527 phy_data = MII_CR_FULL_DUPLEX |
3528 MII_CR_SPEED_10 | MII_CR_RESET;
3529 break;
3530 default:
3531 /* MEDIA_TYPE_10M_HALF */
3532 phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
3533 }
3534 }
3535 atl1_write_phy_reg(hw, MII_BMCR, phy_data);
3536 atl1_up(adapter);
3537 }
3538 return 0;
3539}
3540
3541const struct ethtool_ops atl1_ethtool_ops = {
3542 .get_settings = atl1_get_settings,
3543 .set_settings = atl1_set_settings,
3544 .get_drvinfo = atl1_get_drvinfo,
3545 .get_wol = atl1_get_wol,
3546 .set_wol = atl1_set_wol,
3547 .get_msglevel = atl1_get_msglevel,
3548 .set_msglevel = atl1_set_msglevel,
3549 .get_regs_len = atl1_get_regs_len,
3550 .get_regs = atl1_get_regs,
3551 .get_ringparam = atl1_get_ringparam,
3552 .set_ringparam = atl1_set_ringparam,
3553 .get_pauseparam = atl1_get_pauseparam,
3554 .set_pauseparam = atl1_set_pauseparam,
3555 .get_rx_csum = atl1_get_rx_csum,
3556 .set_tx_csum = ethtool_op_set_tx_hw_csum,
3557 .get_link = ethtool_op_get_link,
3558 .set_sg = ethtool_op_set_sg,
3559 .get_strings = atl1_get_strings,
3560 .nway_reset = atl1_nway_reset,
3561 .get_ethtool_stats = atl1_get_ethtool_stats,
3562 .get_sset_count = atl1_get_sset_count,
3563 .set_tso = ethtool_op_set_tso,
3564};
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
new file mode 100644
index 000000000000..51893d66eae1
--- /dev/null
+++ b/drivers/net/atlx/atl1.h
@@ -0,0 +1,796 @@
1/*
2 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
3 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
4 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
5 *
6 * Derived from Intel e1000 driver
7 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 *
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 */
23
24#ifndef ATL1_H
25#define ATL1_H
26
27#include <linux/compiler.h>
28#include <linux/ethtool.h>
29#include <linux/if_vlan.h>
30#include <linux/mii.h>
31#include <linux/module.h>
32#include <linux/skbuff.h>
33#include <linux/spinlock.h>
34#include <linux/timer.h>
35#include <linux/types.h>
36#include <linux/workqueue.h>
37
38#include "atlx.h"
39
40#define ATLX_DRIVER_NAME "atl1"
41
42MODULE_DESCRIPTION("Atheros L1 Gigabit Ethernet Driver");
43
44#define atlx_adapter atl1_adapter
45#define atlx_check_for_link atl1_check_for_link
46#define atlx_check_link atl1_check_link
47#define atlx_hash_mc_addr atl1_hash_mc_addr
48#define atlx_hash_set atl1_hash_set
49#define atlx_hw atl1_hw
50#define atlx_mii_ioctl atl1_mii_ioctl
51#define atlx_read_phy_reg atl1_read_phy_reg
52#define atlx_set_mac atl1_set_mac
53#define atlx_set_mac_addr atl1_set_mac_addr
54
55struct atl1_adapter;
56struct atl1_hw;
57
58/* function prototypes needed by multiple files */
59u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
60void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
61s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
62void atl1_set_mac_addr(struct atl1_hw *hw);
63static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
64 int cmd);
65static u32 atl1_check_link(struct atl1_adapter *adapter);
66
67extern const struct ethtool_ops atl1_ethtool_ops;
68
69/* hardware definitions specific to L1 */
70
71/* Block IDLE Status Register */
72#define IDLE_STATUS_RXMAC 0x1
73#define IDLE_STATUS_TXMAC 0x2
74#define IDLE_STATUS_RXQ 0x4
75#define IDLE_STATUS_TXQ 0x8
76#define IDLE_STATUS_DMAR 0x10
77#define IDLE_STATUS_DMAW 0x20
78#define IDLE_STATUS_SMB 0x40
79#define IDLE_STATUS_CMB 0x80
80
81/* MDIO Control Register */
82#define MDIO_WAIT_TIMES 30
83
84/* MAC Control Register */
85#define MAC_CTRL_TX_PAUSE 0x10000
86#define MAC_CTRL_SCNT 0x20000
87#define MAC_CTRL_SRST_TX 0x40000
88#define MAC_CTRL_TX_SIMURST 0x80000
89#define MAC_CTRL_SPEED_SHIFT 20
90#define MAC_CTRL_SPEED_MASK 0x300000
91#define MAC_CTRL_SPEED_1000 0x2
92#define MAC_CTRL_SPEED_10_100 0x1
93#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000
94#define MAC_CTRL_TX_HUGE 0x800000
95#define MAC_CTRL_RX_CHKSUM_EN 0x1000000
96#define MAC_CTRL_DBG 0x8000000
97
98/* Wake-On-Lan control register */
99#define WOL_CLK_SWITCH_EN 0x8000
100#define WOL_PT5_EN 0x200000
101#define WOL_PT6_EN 0x400000
102#define WOL_PT5_MATCH 0x8000000
103#define WOL_PT6_MATCH 0x10000000
104
105/* WOL Length ( 2 DWORD ) */
106#define REG_WOL_PATTERN_LEN 0x14A4
107#define WOL_PT_LEN_MASK 0x7F
108#define WOL_PT0_LEN_SHIFT 0
109#define WOL_PT1_LEN_SHIFT 8
110#define WOL_PT2_LEN_SHIFT 16
111#define WOL_PT3_LEN_SHIFT 24
112#define WOL_PT4_LEN_SHIFT 0
113#define WOL_PT5_LEN_SHIFT 8
114#define WOL_PT6_LEN_SHIFT 16
115
116/* Internal SRAM Partition Registers, low 32 bits */
117#define REG_SRAM_RFD_LEN 0x1504
118#define REG_SRAM_RRD_ADDR 0x1508
119#define REG_SRAM_RRD_LEN 0x150C
120#define REG_SRAM_TPD_ADDR 0x1510
121#define REG_SRAM_TPD_LEN 0x1514
122#define REG_SRAM_TRD_ADDR 0x1518
123#define REG_SRAM_TRD_LEN 0x151C
124#define REG_SRAM_RXF_ADDR 0x1520
125#define REG_SRAM_RXF_LEN 0x1524
126#define REG_SRAM_TXF_ADDR 0x1528
127#define REG_SRAM_TXF_LEN 0x152C
128#define REG_SRAM_TCPH_PATH_ADDR 0x1530
129#define SRAM_TCPH_ADDR_MASK 0xFFF
130#define SRAM_TCPH_ADDR_SHIFT 0
131#define SRAM_PATH_ADDR_MASK 0xFFF
132#define SRAM_PATH_ADDR_SHIFT 16
133
134/* Load Ptr Register */
135#define REG_LOAD_PTR 0x1534
136
137/* Descriptor Control registers, low 32 bits */
138#define REG_DESC_RFD_ADDR_LO 0x1544
139#define REG_DESC_RRD_ADDR_LO 0x1548
140#define REG_DESC_TPD_ADDR_LO 0x154C
141#define REG_DESC_CMB_ADDR_LO 0x1550
142#define REG_DESC_SMB_ADDR_LO 0x1554
143#define REG_DESC_RFD_RRD_RING_SIZE 0x1558
144#define DESC_RFD_RING_SIZE_MASK 0x7FF
145#define DESC_RFD_RING_SIZE_SHIFT 0
146#define DESC_RRD_RING_SIZE_MASK 0x7FF
147#define DESC_RRD_RING_SIZE_SHIFT 16
148#define REG_DESC_TPD_RING_SIZE 0x155C
149#define DESC_TPD_RING_SIZE_MASK 0x3FF
150#define DESC_TPD_RING_SIZE_SHIFT 0
151
152/* TXQ Control Register */
153#define REG_TXQ_CTRL 0x1580
154#define TXQ_CTRL_TPD_BURST_NUM_SHIFT 0
155#define TXQ_CTRL_TPD_BURST_NUM_MASK 0x1F
156#define TXQ_CTRL_EN 0x20
157#define TXQ_CTRL_ENH_MODE 0x40
158#define TXQ_CTRL_TPD_FETCH_TH_SHIFT 8
159#define TXQ_CTRL_TPD_FETCH_TH_MASK 0x3F
160#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16
161#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xFFFF
162
163/* Jumbo packet Threshold for task offload */
164#define REG_TX_JUMBO_TASK_TH_TPD_IPG 0x1584
165#define TX_JUMBO_TASK_TH_MASK 0x7FF
166#define TX_JUMBO_TASK_TH_SHIFT 0
167#define TX_TPD_MIN_IPG_MASK 0x1F
168#define TX_TPD_MIN_IPG_SHIFT 16
169
170/* RXQ Control Register */
171#define REG_RXQ_CTRL 0x15A0
172#define RXQ_CTRL_RFD_BURST_NUM_SHIFT 0
173#define RXQ_CTRL_RFD_BURST_NUM_MASK 0xFF
174#define RXQ_CTRL_RRD_BURST_THRESH_SHIFT 8
175#define RXQ_CTRL_RRD_BURST_THRESH_MASK 0xFF
176#define RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT 16
177#define RXQ_CTRL_RFD_PREF_MIN_IPG_MASK 0x1F
178#define RXQ_CTRL_CUT_THRU_EN 0x40000000
179#define RXQ_CTRL_EN 0x80000000
180
181/* Rx jumbo packet threshold and rrd retirement timer */
182#define REG_RXQ_JMBOSZ_RRDTIM 0x15A4
183#define RXQ_JMBOSZ_TH_MASK 0x7FF
184#define RXQ_JMBOSZ_TH_SHIFT 0
185#define RXQ_JMBO_LKAH_MASK 0xF
186#define RXQ_JMBO_LKAH_SHIFT 11
187#define RXQ_RRD_TIMER_MASK 0xFFFF
188#define RXQ_RRD_TIMER_SHIFT 16
189
190/* RFD flow control register */
191#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8
192#define RXQ_RXF_PAUSE_TH_HI_SHIFT 16
193#define RXQ_RXF_PAUSE_TH_HI_MASK 0xFFF
194#define RXQ_RXF_PAUSE_TH_LO_SHIFT 0
195#define RXQ_RXF_PAUSE_TH_LO_MASK 0xFFF
196
197/* RRD flow control register */
198#define REG_RXQ_RRD_PAUSE_THRESH 0x15AC
199#define RXQ_RRD_PAUSE_TH_HI_SHIFT 0
200#define RXQ_RRD_PAUSE_TH_HI_MASK 0xFFF
201#define RXQ_RRD_PAUSE_TH_LO_SHIFT 16
202#define RXQ_RRD_PAUSE_TH_LO_MASK 0xFFF
203
204/* DMA Engine Control Register */
205#define REG_DMA_CTRL 0x15C0
206#define DMA_CTRL_DMAR_IN_ORDER 0x1
207#define DMA_CTRL_DMAR_ENH_ORDER 0x2
208#define DMA_CTRL_DMAR_OUT_ORDER 0x4
209#define DMA_CTRL_RCB_VALUE 0x8
210#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4
211#define DMA_CTRL_DMAR_BURST_LEN_MASK 7
212#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7
213#define DMA_CTRL_DMAW_BURST_LEN_MASK 7
214#define DMA_CTRL_DMAR_EN 0x400
215#define DMA_CTRL_DMAW_EN 0x800
216
217/* CMB/SMB Control Register */
218#define REG_CSMB_CTRL 0x15D0
219#define CSMB_CTRL_CMB_NOW 1
220#define CSMB_CTRL_SMB_NOW 2
221#define CSMB_CTRL_CMB_EN 4
222#define CSMB_CTRL_SMB_EN 8
223
224/* CMB DMA Write Threshold Register */
225#define REG_CMB_WRITE_TH 0x15D4
226#define CMB_RRD_TH_SHIFT 0
227#define CMB_RRD_TH_MASK 0x7FF
228#define CMB_TPD_TH_SHIFT 16
229#define CMB_TPD_TH_MASK 0x7FF
230
231/* RX/TX count-down timer to trigger CMB-write. 2us resolution. */
232#define REG_CMB_WRITE_TIMER 0x15D8
233#define CMB_RX_TM_SHIFT 0
234#define CMB_RX_TM_MASK 0xFFFF
235#define CMB_TX_TM_SHIFT 16
236#define CMB_TX_TM_MASK 0xFFFF
237
238/* Number of packet received since last CMB write */
239#define REG_CMB_RX_PKT_CNT 0x15DC
240
241/* Number of packet transmitted since last CMB write */
242#define REG_CMB_TX_PKT_CNT 0x15E0
243
244/* SMB auto DMA timer register */
245#define REG_SMB_TIMER 0x15E4
246
247/* Mailbox Register */
248#define REG_MAILBOX 0x15F0
249#define MB_RFD_PROD_INDX_SHIFT 0
250#define MB_RFD_PROD_INDX_MASK 0x7FF
251#define MB_RRD_CONS_INDX_SHIFT 11
252#define MB_RRD_CONS_INDX_MASK 0x7FF
253#define MB_TPD_PROD_INDX_SHIFT 22
254#define MB_TPD_PROD_INDX_MASK 0x3FF
255
256/* Interrupt Status Register */
257#define ISR_SMB 0x1
258#define ISR_TIMER 0x2
259#define ISR_MANUAL 0x4
260#define ISR_RXF_OV 0x8
261#define ISR_RFD_UNRUN 0x10
262#define ISR_RRD_OV 0x20
263#define ISR_TXF_UNRUN 0x40
264#define ISR_LINK 0x80
265#define ISR_HOST_RFD_UNRUN 0x100
266#define ISR_HOST_RRD_OV 0x200
267#define ISR_DMAR_TO_RST 0x400
268#define ISR_DMAW_TO_RST 0x800
269#define ISR_GPHY 0x1000
270#define ISR_RX_PKT 0x10000
271#define ISR_TX_PKT 0x20000
272#define ISR_TX_DMA 0x40000
273#define ISR_RX_DMA 0x80000
274#define ISR_CMB_RX 0x100000
275#define ISR_CMB_TX 0x200000
276#define ISR_MAC_RX 0x400000
277#define ISR_MAC_TX 0x800000
278#define ISR_DIS_SMB 0x20000000
279#define ISR_DIS_DMA 0x40000000
280
281/* Normal Interrupt mask */
282#define IMR_NORMAL_MASK (\
283 ISR_SMB |\
284 ISR_GPHY |\
285 ISR_PHY_LINKDOWN|\
286 ISR_DMAR_TO_RST |\
287 ISR_DMAW_TO_RST |\
288 ISR_CMB_TX |\
289 ISR_CMB_RX)
290
291/* Debug Interrupt Mask (enable all interrupt) */
292#define IMR_DEBUG_MASK (\
293 ISR_SMB |\
294 ISR_TIMER |\
295 ISR_MANUAL |\
296 ISR_RXF_OV |\
297 ISR_RFD_UNRUN |\
298 ISR_RRD_OV |\
299 ISR_TXF_UNRUN |\
300 ISR_LINK |\
301 ISR_CMB_TX |\
302 ISR_CMB_RX |\
303 ISR_RX_PKT |\
304 ISR_TX_PKT |\
305 ISR_MAC_RX |\
306 ISR_MAC_TX)
307
308#define MEDIA_TYPE_1000M_FULL 1
309#define MEDIA_TYPE_100M_FULL 2
310#define MEDIA_TYPE_100M_HALF 3
311#define MEDIA_TYPE_10M_FULL 4
312#define MEDIA_TYPE_10M_HALF 5
313
314#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* All but 1000-Half */
315
316#define MAX_JUMBO_FRAME_SIZE 10240
317
318#define ATL1_EEDUMP_LEN 48
319
320/* Statistics counters collected by the MAC */
321struct stats_msg_block {
322 /* rx */
323 u32 rx_ok; /* good RX packets */
324 u32 rx_bcast; /* good RX broadcast packets */
325 u32 rx_mcast; /* good RX multicast packets */
326 u32 rx_pause; /* RX pause frames */
327 u32 rx_ctrl; /* RX control packets other than pause frames */
328 u32 rx_fcs_err; /* RX packets with bad FCS */
329 u32 rx_len_err; /* RX packets with length != actual size */
330 u32 rx_byte_cnt; /* good bytes received. FCS is NOT included */
331 u32 rx_runt; /* RX packets < 64 bytes with good FCS */
332 u32 rx_frag; /* RX packets < 64 bytes with bad FCS */
333 u32 rx_sz_64; /* 64 byte RX packets */
334 u32 rx_sz_65_127;
335 u32 rx_sz_128_255;
336 u32 rx_sz_256_511;
337 u32 rx_sz_512_1023;
338 u32 rx_sz_1024_1518;
339 u32 rx_sz_1519_max; /* 1519 byte to MTU RX packets */
340 u32 rx_sz_ov; /* truncated RX packets > MTU */
341 u32 rx_rxf_ov; /* frames dropped due to RX FIFO overflow */
342 u32 rx_rrd_ov; /* frames dropped due to RRD overflow */
343 u32 rx_align_err; /* alignment errors */
344 u32 rx_bcast_byte_cnt; /* RX broadcast bytes, excluding FCS */
345 u32 rx_mcast_byte_cnt; /* RX multicast bytes, excluding FCS */
346 u32 rx_err_addr; /* packets dropped due to address filtering */
347
348 /* tx */
349 u32 tx_ok; /* good TX packets */
350 u32 tx_bcast; /* good TX broadcast packets */
351 u32 tx_mcast; /* good TX multicast packets */
352 u32 tx_pause; /* TX pause frames */
353 u32 tx_exc_defer; /* TX packets deferred excessively */
354 u32 tx_ctrl; /* TX control frames, excluding pause frames */
355 u32 tx_defer; /* TX packets deferred */
356 u32 tx_byte_cnt; /* bytes transmitted, FCS is NOT included */
357 u32 tx_sz_64; /* 64 byte TX packets */
358 u32 tx_sz_65_127;
359 u32 tx_sz_128_255;
360 u32 tx_sz_256_511;
361 u32 tx_sz_512_1023;
362 u32 tx_sz_1024_1518;
363 u32 tx_sz_1519_max; /* 1519 byte to MTU TX packets */
364 u32 tx_1_col; /* packets TX after a single collision */
365 u32 tx_2_col; /* packets TX after multiple collisions */
366 u32 tx_late_col; /* TX packets with late collisions */
367 u32 tx_abort_col; /* TX packets aborted w/excessive collisions */
368 u32 tx_underrun; /* TX packets aborted due to TX FIFO underrun
369 * or TRD FIFO underrun */
370 u32 tx_rd_eop; /* reads beyond the EOP into the next frame
371 * when TRD was not written timely */
372 u32 tx_len_err; /* TX packets where length != actual size */
373 u32 tx_trunc; /* TX packets truncated due to size > MTU */
374 u32 tx_bcast_byte; /* broadcast bytes transmitted, excluding FCS */
375 u32 tx_mcast_byte; /* multicast bytes transmitted, excluding FCS */
376 u32 smb_updated; /* 1: SMB Updated. This is used by software to
377 * indicate the statistics update. Software
378 * should clear this bit after retrieving the
379 * statistics information. */
380};
381
382/* Coalescing Message Block */
383struct coals_msg_block {
384 u32 int_stats; /* interrupt status */
385 u16 rrd_prod_idx; /* TRD Producer Index. */
386 u16 rfd_cons_idx; /* RFD Consumer Index. */
387 u16 update; /* Selene sets this bit every time it DMAs the
388 * CMB to host memory. Software should clear
389 * this bit when CMB info is processed. */
390 u16 tpd_cons_idx; /* TPD Consumer Index. */
391};
392
393/* RRD descriptor */
394struct rx_return_desc {
395 u8 num_buf; /* Number of RFD buffers used by the received packet */
396 u8 resved;
397 u16 buf_indx; /* RFD Index of the first buffer */
398 union {
399 u32 valid;
400 struct {
401 u16 rx_chksum;
402 u16 pkt_size;
403 } xsum_sz;
404 } xsz;
405
406 u16 pkt_flg; /* Packet flags */
407 u16 err_flg; /* Error flags */
408 u16 resved2;
409 u16 vlan_tag; /* VLAN TAG */
410};
411
412#define PACKET_FLAG_ETH_TYPE 0x0080
413#define PACKET_FLAG_VLAN_INS 0x0100
414#define PACKET_FLAG_ERR 0x0200
415#define PACKET_FLAG_IPV4 0x0400
416#define PACKET_FLAG_UDP 0x0800
417#define PACKET_FLAG_TCP 0x1000
418#define PACKET_FLAG_BCAST 0x2000
419#define PACKET_FLAG_MCAST 0x4000
420#define PACKET_FLAG_PAUSE 0x8000
421
422#define ERR_FLAG_CRC 0x0001
423#define ERR_FLAG_CODE 0x0002
424#define ERR_FLAG_DRIBBLE 0x0004
425#define ERR_FLAG_RUNT 0x0008
426#define ERR_FLAG_OV 0x0010
427#define ERR_FLAG_TRUNC 0x0020
428#define ERR_FLAG_IP_CHKSUM 0x0040
429#define ERR_FLAG_L4_CHKSUM 0x0080
430#define ERR_FLAG_LEN 0x0100
431#define ERR_FLAG_DES_ADDR 0x0200
432
433/* RFD descriptor */
434struct rx_free_desc {
435 __le64 buffer_addr; /* Address of the descriptor's data buffer */
436 __le16 buf_len; /* Size of the receive buffer in host memory */
437 u16 coalese; /* Update consumer index to host after the
438 * reception of this frame */
439 /* __attribute__ ((packed)) is required */
440} __attribute__ ((packed));
441
442/*
443 * The L1 transmit packet descriptor is comprised of four 32-bit words.
444 *
445 * 31 0
446 * +---------------------------------------+
447 * | Word 0: Buffer addr lo |
448 * +---------------------------------------+
449 * | Word 1: Buffer addr hi |
450 * +---------------------------------------+
451 * | Word 2 |
452 * +---------------------------------------+
453 * | Word 3 |
454 * +---------------------------------------+
455 *
456 * Words 0 and 1 combine to form a 64-bit buffer address.
457 *
458 * Word 2 is self explanatory in the #define block below.
459 *
460 * Word 3 has two forms, depending upon the state of bits 3 and 4.
461 * If bits 3 and 4 are both zero, then bits 14:31 are unused by the
462 * hardware. Otherwise, if either bit 3 or 4 is set, the definition
463 * of bits 14:31 vary according to the following depiction.
464 *
465 * 0 End of packet 0 End of packet
466 * 1 Coalesce 1 Coalesce
467 * 2 Insert VLAN tag 2 Insert VLAN tag
468 * 3 Custom csum enable = 0 3 Custom csum enable = 1
469 * 4 Segment enable = 1 4 Segment enable = 0
470 * 5 Generate IP checksum 5 Generate IP checksum
471 * 6 Generate TCP checksum 6 Generate TCP checksum
472 * 7 Generate UDP checksum 7 Generate UDP checksum
473 * 8 VLAN tagged 8 VLAN tagged
474 * 9 Ethernet frame type 9 Ethernet frame type
475 * 10-+ 10-+
476 * 11 | IP hdr length (10:13) 11 | IP hdr length (10:13)
477 * 12 | (num 32-bit words) 12 | (num 32-bit words)
478 * 13-+ 13-+
479 * 14-+ 14 Unused
480 * 15 | TCP hdr length (14:17) 15 Unused
481 * 16 | (num 32-bit words) 16-+
482 * 17-+ 17 |
483 * 18 Header TPD flag 18 |
484 * 19-+ 19 | Payload offset
485 * 20 | 20 | (16:23)
486 * 21 | 21 |
487 * 22 | 22 |
488 * 23 | 23-+
489 * 24 | 24-+
490 * 25 | MSS (19:31) 25 |
491 * 26 | 26 |
492 * 27 | 27 | Custom csum offset
493 * 28 | 28 | (24:31)
494 * 29 | 29 |
495 * 30 | 30 |
496 * 31-+ 31-+
497 */
498
499/* tpd word 2 */
500#define TPD_BUFLEN_MASK 0x3FFF
501#define TPD_BUFLEN_SHIFT 0
502#define TPD_DMAINT_MASK 0x0001
503#define TPD_DMAINT_SHIFT 14
504#define TPD_PKTNT_MASK 0x0001
505#define TPD_PKTINT_SHIFT 15
506#define TPD_VLANTAG_MASK 0xFFFF
507#define TPD_VLAN_SHIFT 16
508
509/* tpd word 3 bits 0:13 */
510#define TPD_EOP_MASK 0x0001
511#define TPD_EOP_SHIFT 0
512#define TPD_COALESCE_MASK 0x0001
513#define TPD_COALESCE_SHIFT 1
514#define TPD_INS_VL_TAG_MASK 0x0001
515#define TPD_INS_VL_TAG_SHIFT 2
516#define TPD_CUST_CSUM_EN_MASK 0x0001
517#define TPD_CUST_CSUM_EN_SHIFT 3
518#define TPD_SEGMENT_EN_MASK 0x0001
519#define TPD_SEGMENT_EN_SHIFT 4
520#define TPD_IP_CSUM_MASK 0x0001
521#define TPD_IP_CSUM_SHIFT 5
522#define TPD_TCP_CSUM_MASK 0x0001
523#define TPD_TCP_CSUM_SHIFT 6
524#define TPD_UDP_CSUM_MASK 0x0001
525#define TPD_UDP_CSUM_SHIFT 7
526#define TPD_VL_TAGGED_MASK 0x0001
527#define TPD_VL_TAGGED_SHIFT 8
528#define TPD_ETHTYPE_MASK 0x0001
529#define TPD_ETHTYPE_SHIFT 9
530#define TPD_IPHL_MASK 0x000F
531#define TPD_IPHL_SHIFT 10
532
533/* tpd word 3 bits 14:31 if segment enabled */
534#define TPD_TCPHDRLEN_MASK 0x000F
535#define TPD_TCPHDRLEN_SHIFT 14
536#define TPD_HDRFLAG_MASK 0x0001
537#define TPD_HDRFLAG_SHIFT 18
538#define TPD_MSS_MASK 0x1FFF
539#define TPD_MSS_SHIFT 19
540
541/* tpd word 3 bits 16:31 if custom csum enabled */
542#define TPD_PLOADOFFSET_MASK 0x00FF
543#define TPD_PLOADOFFSET_SHIFT 16
544#define TPD_CCSUMOFFSET_MASK 0x00FF
545#define TPD_CCSUMOFFSET_SHIFT 24
546
547struct tx_packet_desc {
548 __le64 buffer_addr;
549 __le32 word2;
550 __le32 word3;
551};
552
553/* DMA Order Settings */
554enum atl1_dma_order {
555 atl1_dma_ord_in = 1,
556 atl1_dma_ord_enh = 2,
557 atl1_dma_ord_out = 4
558};
559
560enum atl1_dma_rcb {
561 atl1_rcb_64 = 0,
562 atl1_rcb_128 = 1
563};
564
565enum atl1_dma_req_block {
566 atl1_dma_req_128 = 0,
567 atl1_dma_req_256 = 1,
568 atl1_dma_req_512 = 2,
569 atl1_dma_req_1024 = 3,
570 atl1_dma_req_2048 = 4,
571 atl1_dma_req_4096 = 5
572};
573
574#define ATL1_MAX_INTR 3
575#define ATL1_MAX_TX_BUF_LEN 0x3000 /* 12288 bytes */
576
577#define ATL1_DEFAULT_TPD 256
578#define ATL1_MAX_TPD 1024
579#define ATL1_MIN_TPD 64
580#define ATL1_DEFAULT_RFD 512
581#define ATL1_MIN_RFD 128
582#define ATL1_MAX_RFD 2048
583#define ATL1_REG_COUNT 1538
584
585#define ATL1_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i]))
586#define ATL1_RFD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_free_desc)
587#define ATL1_TPD_DESC(R, i) ATL1_GET_DESC(R, i, struct tx_packet_desc)
588#define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc)
589
590/*
591 * atl1_ring_header represents a single, contiguous block of DMA space
592 * mapped for the three descriptor rings (tpd, rfd, rrd) and the two
593 * message blocks (cmb, smb) described below
594 */
595struct atl1_ring_header {
596 void *desc; /* virtual address */
597 dma_addr_t dma; /* physical address*/
598 unsigned int size; /* length in bytes */
599};
600
601/*
602 * atl1_buffer is wrapper around a pointer to a socket buffer
603 * so a DMA handle can be stored along with the skb
604 */
605struct atl1_buffer {
606 struct sk_buff *skb; /* socket buffer */
607 u16 length; /* rx buffer length */
608 u16 alloced; /* 1 if skb allocated */
609 dma_addr_t dma;
610};
611
612/* transmit packet descriptor (tpd) ring */
613struct atl1_tpd_ring {
614 void *desc; /* descriptor ring virtual address */
615 dma_addr_t dma; /* descriptor ring physical address */
616 u16 size; /* descriptor ring length in bytes */
617 u16 count; /* number of descriptors in the ring */
618 u16 hw_idx; /* hardware index */
619 atomic_t next_to_clean;
620 atomic_t next_to_use;
621 struct atl1_buffer *buffer_info;
622};
623
624/* receive free descriptor (rfd) ring */
625struct atl1_rfd_ring {
626 void *desc; /* descriptor ring virtual address */
627 dma_addr_t dma; /* descriptor ring physical address */
628 u16 size; /* descriptor ring length in bytes */
629 u16 count; /* number of descriptors in the ring */
630 atomic_t next_to_use;
631 u16 next_to_clean;
632 struct atl1_buffer *buffer_info;
633};
634
635/* receive return descriptor (rrd) ring */
636struct atl1_rrd_ring {
637 void *desc; /* descriptor ring virtual address */
638 dma_addr_t dma; /* descriptor ring physical address */
639 unsigned int size; /* descriptor ring length in bytes */
640 u16 count; /* number of descriptors in the ring */
641 u16 next_to_use;
642 atomic_t next_to_clean;
643};
644
645/* coalescing message block (cmb) */
646struct atl1_cmb {
647 struct coals_msg_block *cmb;
648 dma_addr_t dma;
649};
650
651/* statistics message block (smb) */
652struct atl1_smb {
653 struct stats_msg_block *smb;
654 dma_addr_t dma;
655};
656
657/* Statistics counters */
658struct atl1_sft_stats {
659 u64 rx_packets;
660 u64 tx_packets;
661 u64 rx_bytes;
662 u64 tx_bytes;
663 u64 multicast;
664 u64 collisions;
665 u64 rx_errors;
666 u64 rx_length_errors;
667 u64 rx_crc_errors;
668 u64 rx_frame_errors;
669 u64 rx_fifo_errors;
670 u64 rx_missed_errors;
671 u64 tx_errors;
672 u64 tx_fifo_errors;
673 u64 tx_aborted_errors;
674 u64 tx_window_errors;
675 u64 tx_carrier_errors;
676 u64 tx_pause; /* TX pause frames */
677 u64 excecol; /* TX packets w/ excessive collisions */
678 u64 deffer; /* TX packets deferred */
679 u64 scc; /* packets TX after a single collision */
680 u64 mcc; /* packets TX after multiple collisions */
681 u64 latecol; /* TX packets w/ late collisions */
682 u64 tx_underun; /* TX packets aborted due to TX FIFO underrun
683 * or TRD FIFO underrun */
684 u64 tx_trunc; /* TX packets truncated due to size > MTU */
685 u64 rx_pause; /* num Pause packets received. */
686 u64 rx_rrd_ov;
687 u64 rx_trunc;
688};
689
690/* hardware structure */
691struct atl1_hw {
692 u8 __iomem *hw_addr;
693 struct atl1_adapter *back;
694 enum atl1_dma_order dma_ord;
695 enum atl1_dma_rcb rcb_value;
696 enum atl1_dma_req_block dmar_block;
697 enum atl1_dma_req_block dmaw_block;
698 u8 preamble_len;
699 u8 max_retry;
700 u8 jam_ipg; /* IPG to start JAM for collision based flow
701 * control in half-duplex mode. In units of
702 * 8-bit time */
703 u8 ipgt; /* Desired back to back inter-packet gap.
704 * The default is 96-bit time */
705 u8 min_ifg; /* Minimum number of IFG to enforce in between
706 * receive frames. Frame gap below such IFP
707 * is dropped */
708 u8 ipgr1; /* 64bit Carrier-Sense window */
709 u8 ipgr2; /* 96-bit IPG window */
710 u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned
711 * burst. Each TPD is 16 bytes long */
712 u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned
713 * burst. Each RFD is 12 bytes long */
714 u8 rfd_fetch_gap;
715 u8 rrd_burst; /* Threshold number of RRDs that can be retired
716 * in a burst. Each RRD is 16 bytes long */
717 u8 tpd_fetch_th;
718 u8 tpd_fetch_gap;
719 u16 tx_jumbo_task_th;
720 u16 txf_burst; /* Number of data bytes to read in a cache-
721 * aligned burst. Each SRAM entry is 8 bytes */
722 u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN
723 * packets should add 4 bytes */
724 u16 rx_jumbo_lkah;
725 u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after
726 * every 512ns passes. */
727 u16 lcol; /* Collision Window */
728
729 u16 cmb_tpd;
730 u16 cmb_rrd;
731 u16 cmb_rx_timer;
732 u16 cmb_tx_timer;
733 u32 smb_timer;
734 u16 media_type;
735 u16 autoneg_advertised;
736
737 u16 mii_autoneg_adv_reg;
738 u16 mii_1000t_ctrl_reg;
739
740 u32 max_frame_size;
741 u32 min_frame_size;
742
743 u16 dev_rev;
744
745 /* spi flash */
746 u8 flash_vendor;
747
748 u8 mac_addr[ETH_ALEN];
749 u8 perm_mac_addr[ETH_ALEN];
750
751 bool phy_configured;
752};
753
754struct atl1_adapter {
755 struct net_device *netdev;
756 struct pci_dev *pdev;
757 struct net_device_stats net_stats;
758 struct atl1_sft_stats soft_stats;
759 struct vlan_group *vlgrp;
760 u32 rx_buffer_len;
761 u32 wol;
762 u16 link_speed;
763 u16 link_duplex;
764 spinlock_t lock;
765 struct work_struct tx_timeout_task;
766 struct work_struct link_chg_task;
767 struct work_struct pcie_dma_to_rst_task;
768 struct timer_list watchdog_timer;
769 struct timer_list phy_config_timer;
770 bool phy_timer_pending;
771
772 /* all descriptor rings' memory */
773 struct atl1_ring_header ring_header;
774
775 /* TX */
776 struct atl1_tpd_ring tpd_ring;
777 spinlock_t mb_lock;
778
779 /* RX */
780 struct atl1_rfd_ring rfd_ring;
781 struct atl1_rrd_ring rrd_ring;
782 u64 hw_csum_err;
783 u64 hw_csum_good;
784 u32 msg_enable;
785 u16 imt; /* interrupt moderator timer (2us resolution) */
786 u16 ict; /* interrupt clear timer (2us resolution */
787 struct mii_if_info mii; /* MII interface info */
788
789 u32 bd_number; /* board number */
790 bool pci_using_64;
791 struct atl1_hw hw;
792 struct atl1_smb smb;
793 struct atl1_cmb cmb;
794};
795
796#endif /* ATL1_H */
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
new file mode 100644
index 000000000000..4186326d1b94
--- /dev/null
+++ b/drivers/net/atlx/atlx.c
@@ -0,0 +1,433 @@
1/* atlx.c -- common functions for Attansic network drivers
2 *
3 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
4 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
5 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
6 * Copyright(c) 2007 Atheros Corporation. All rights reserved.
7 *
8 * Derived from Intel e1000 driver
9 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * more details.
20 *
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc., 59
23 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 */
25
26/* Including this file like a header is a temporary hack, I promise. -- CHS */
27#ifndef ATLX_C
28#define ATLX_C
29
30#include <linux/device.h>
31#include <linux/errno.h>
32#include <linux/etherdevice.h>
33#include <linux/if.h>
34#include <linux/netdevice.h>
35#include <linux/socket.h>
36#include <linux/sockios.h>
37#include <linux/spinlock.h>
38#include <linux/string.h>
39#include <linux/types.h>
40#include <linux/workqueue.h>
41
42#include "atlx.h"
43
44static struct atlx_spi_flash_dev flash_table[] = {
45/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */
46 {"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62},
47 {"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60},
48 {"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7},
49};
50
51static int atlx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
52{
53 switch (cmd) {
54 case SIOCGMIIPHY:
55 case SIOCGMIIREG:
56 case SIOCSMIIREG:
57 return atlx_mii_ioctl(netdev, ifr, cmd);
58 default:
59 return -EOPNOTSUPP;
60 }
61}
62
63/*
64 * atlx_set_mac - Change the Ethernet Address of the NIC
65 * @netdev: network interface device structure
66 * @p: pointer to an address structure
67 *
68 * Returns 0 on success, negative on failure
69 */
70static int atlx_set_mac(struct net_device *netdev, void *p)
71{
72 struct atlx_adapter *adapter = netdev_priv(netdev);
73 struct sockaddr *addr = p;
74
75 if (netif_running(netdev))
76 return -EBUSY;
77
78 if (!is_valid_ether_addr(addr->sa_data))
79 return -EADDRNOTAVAIL;
80
81 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
82 memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
83
84 atlx_set_mac_addr(&adapter->hw);
85 return 0;
86}
87
88static void atlx_check_for_link(struct atlx_adapter *adapter)
89{
90 struct net_device *netdev = adapter->netdev;
91 u16 phy_data = 0;
92
93 spin_lock(&adapter->lock);
94 adapter->phy_timer_pending = false;
95 atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
96 atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
97 spin_unlock(&adapter->lock);
98
99 /* notify upper layer link down ASAP */
100 if (!(phy_data & BMSR_LSTATUS)) {
101 /* Link Down */
102 if (netif_carrier_ok(netdev)) {
103 /* old link state: Up */
104 dev_info(&adapter->pdev->dev, "%s link is down\n",
105 netdev->name);
106 adapter->link_speed = SPEED_0;
107 netif_carrier_off(netdev);
108 netif_stop_queue(netdev);
109 }
110 }
111 schedule_work(&adapter->link_chg_task);
112}
113
114/*
115 * atlx_set_multi - Multicast and Promiscuous mode set
116 * @netdev: network interface device structure
117 *
118 * The set_multi entry point is called whenever the multicast address
119 * list or the network interface flags are updated. This routine is
120 * responsible for configuring the hardware for proper multicast,
121 * promiscuous mode, and all-multi behavior.
122 */
123static void atlx_set_multi(struct net_device *netdev)
124{
125 struct atlx_adapter *adapter = netdev_priv(netdev);
126 struct atlx_hw *hw = &adapter->hw;
127 struct dev_mc_list *mc_ptr;
128 u32 rctl;
129 u32 hash_value;
130
131 /* Check for Promiscuous and All Multicast modes */
132 rctl = ioread32(hw->hw_addr + REG_MAC_CTRL);
133 if (netdev->flags & IFF_PROMISC)
134 rctl |= MAC_CTRL_PROMIS_EN;
135 else if (netdev->flags & IFF_ALLMULTI) {
136 rctl |= MAC_CTRL_MC_ALL_EN;
137 rctl &= ~MAC_CTRL_PROMIS_EN;
138 } else
139 rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
140
141 iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL);
142
143 /* clear the old settings from the multicast hash table */
144 iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
145 iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
146
147 /* compute mc addresses' hash value ,and put it into hash table */
148 for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
149 hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr);
150 atlx_hash_set(hw, hash_value);
151 }
152}
153
154/*
155 * atlx_irq_enable - Enable default interrupt generation settings
156 * @adapter: board private structure
157 */
158static void atlx_irq_enable(struct atlx_adapter *adapter)
159{
160 iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR);
161 ioread32(adapter->hw.hw_addr + REG_IMR);
162}
163
164/*
165 * atlx_irq_disable - Mask off interrupt generation on the NIC
166 * @adapter: board private structure
167 */
168static void atlx_irq_disable(struct atlx_adapter *adapter)
169{
170 iowrite32(0, adapter->hw.hw_addr + REG_IMR);
171 ioread32(adapter->hw.hw_addr + REG_IMR);
172 synchronize_irq(adapter->pdev->irq);
173}
174
175static void atlx_clear_phy_int(struct atlx_adapter *adapter)
176{
177 u16 phy_data;
178 unsigned long flags;
179
180 spin_lock_irqsave(&adapter->lock, flags);
181 atlx_read_phy_reg(&adapter->hw, 19, &phy_data);
182 spin_unlock_irqrestore(&adapter->lock, flags);
183}
184
185/*
186 * atlx_get_stats - Get System Network Statistics
187 * @netdev: network interface device structure
188 *
189 * Returns the address of the device statistics structure.
190 * The statistics are actually updated from the timer callback.
191 */
192static struct net_device_stats *atlx_get_stats(struct net_device *netdev)
193{
194 struct atlx_adapter *adapter = netdev_priv(netdev);
195 return &adapter->net_stats;
196}
197
198/*
199 * atlx_tx_timeout - Respond to a Tx Hang
200 * @netdev: network interface device structure
201 */
202static void atlx_tx_timeout(struct net_device *netdev)
203{
204 struct atlx_adapter *adapter = netdev_priv(netdev);
205 /* Do the reset outside of interrupt context */
206 schedule_work(&adapter->tx_timeout_task);
207}
208
209/*
210 * atlx_link_chg_task - deal with link change event Out of interrupt context
211 */
212static void atlx_link_chg_task(struct work_struct *work)
213{
214 struct atlx_adapter *adapter;
215 unsigned long flags;
216
217 adapter = container_of(work, struct atlx_adapter, link_chg_task);
218
219 spin_lock_irqsave(&adapter->lock, flags);
220 atlx_check_link(adapter);
221 spin_unlock_irqrestore(&adapter->lock, flags);
222}
223
224static void atlx_vlan_rx_register(struct net_device *netdev,
225 struct vlan_group *grp)
226{
227 struct atlx_adapter *adapter = netdev_priv(netdev);
228 unsigned long flags;
229 u32 ctrl;
230
231 spin_lock_irqsave(&adapter->lock, flags);
232 /* atlx_irq_disable(adapter); FIXME: confirm/remove */
233 adapter->vlgrp = grp;
234
235 if (grp) {
236 /* enable VLAN tag insert/strip */
237 ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
238 ctrl |= MAC_CTRL_RMV_VLAN;
239 iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
240 } else {
241 /* disable VLAN tag insert/strip */
242 ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
243 ctrl &= ~MAC_CTRL_RMV_VLAN;
244 iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
245 }
246
247 /* atlx_irq_enable(adapter); FIXME */
248 spin_unlock_irqrestore(&adapter->lock, flags);
249}
250
251static void atlx_restore_vlan(struct atlx_adapter *adapter)
252{
253 atlx_vlan_rx_register(adapter->netdev, adapter->vlgrp);
254}
255
256/*
257 * This is the only thing that needs to be changed to adjust the
258 * maximum number of ports that the driver can manage.
259 */
260#define ATL1_MAX_NIC 4
261
262#define OPTION_UNSET -1
263#define OPTION_DISABLED 0
264#define OPTION_ENABLED 1
265
266#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
267
268/*
269 * Interrupt Moderate Timer in units of 2 us
270 *
271 * Valid Range: 10-65535
272 *
273 * Default Value: 100 (200us)
274 */
275static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
276static int num_int_mod_timer;
277module_param_array_named(int_mod_timer, int_mod_timer, int,
278 &num_int_mod_timer, 0);
279MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
280
281/*
282 * flash_vendor
283 *
284 * Valid Range: 0-2
285 *
286 * 0 - Atmel
287 * 1 - SST
288 * 2 - ST
289 *
290 * Default Value: 0
291 */
292static int __devinitdata flash_vendor[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
293static int num_flash_vendor;
294module_param_array_named(flash_vendor, flash_vendor, int, &num_flash_vendor, 0);
295MODULE_PARM_DESC(flash_vendor, "SPI flash vendor");
296
297#define DEFAULT_INT_MOD_CNT 100 /* 200us */
298#define MAX_INT_MOD_CNT 65000
299#define MIN_INT_MOD_CNT 50
300
301#define FLASH_VENDOR_DEFAULT 0
302#define FLASH_VENDOR_MIN 0
303#define FLASH_VENDOR_MAX 2
304
305struct atl1_option {
306 enum { enable_option, range_option, list_option } type;
307 char *name;
308 char *err;
309 int def;
310 union {
311 struct { /* range_option info */
312 int min;
313 int max;
314 } r;
315 struct { /* list_option info */
316 int nr;
317 struct atl1_opt_list {
318 int i;
319 char *str;
320 } *p;
321 } l;
322 } arg;
323};
324
325static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
326 struct pci_dev *pdev)
327{
328 if (*value == OPTION_UNSET) {
329 *value = opt->def;
330 return 0;
331 }
332
333 switch (opt->type) {
334 case enable_option:
335 switch (*value) {
336 case OPTION_ENABLED:
337 dev_info(&pdev->dev, "%s enabled\n", opt->name);
338 return 0;
339 case OPTION_DISABLED:
340 dev_info(&pdev->dev, "%s disabled\n", opt->name);
341 return 0;
342 }
343 break;
344 case range_option:
345 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
346 dev_info(&pdev->dev, "%s set to %i\n", opt->name,
347 *value);
348 return 0;
349 }
350 break;
351 case list_option:{
352 int i;
353 struct atl1_opt_list *ent;
354
355 for (i = 0; i < opt->arg.l.nr; i++) {
356 ent = &opt->arg.l.p[i];
357 if (*value == ent->i) {
358 if (ent->str[0] != '\0')
359 dev_info(&pdev->dev, "%s\n",
360 ent->str);
361 return 0;
362 }
363 }
364 }
365 break;
366
367 default:
368 break;
369 }
370
371 dev_info(&pdev->dev, "invalid %s specified (%i) %s\n",
372 opt->name, *value, opt->err);
373 *value = opt->def;
374 return -1;
375}
376
377/*
378 * atl1_check_options - Range Checking for Command Line Parameters
379 * @adapter: board private structure
380 *
381 * This routine checks all command line parameters for valid user
382 * input. If an invalid value is given, or if no user specified
383 * value exists, a default value is used. The final value is stored
384 * in a variable in the adapter structure.
385 */
386void __devinit atl1_check_options(struct atl1_adapter *adapter)
387{
388 struct pci_dev *pdev = adapter->pdev;
389 int bd = adapter->bd_number;
390 if (bd >= ATL1_MAX_NIC) {
391 dev_notice(&pdev->dev, "no configuration for board#%i\n", bd);
392 dev_notice(&pdev->dev, "using defaults for all values\n");
393 }
394 { /* Interrupt Moderate Timer */
395 struct atl1_option opt = {
396 .type = range_option,
397 .name = "Interrupt Moderator Timer",
398 .err = "using default of "
399 __MODULE_STRING(DEFAULT_INT_MOD_CNT),
400 .def = DEFAULT_INT_MOD_CNT,
401 .arg = {.r = {.min = MIN_INT_MOD_CNT,
402 .max = MAX_INT_MOD_CNT} }
403 };
404 int val;
405 if (num_int_mod_timer > bd) {
406 val = int_mod_timer[bd];
407 atl1_validate_option(&val, &opt, pdev);
408 adapter->imt = (u16) val;
409 } else
410 adapter->imt = (u16) (opt.def);
411 }
412
413 { /* Flash Vendor */
414 struct atl1_option opt = {
415 .type = range_option,
416 .name = "SPI Flash Vendor",
417 .err = "using default of "
418 __MODULE_STRING(FLASH_VENDOR_DEFAULT),
419 .def = DEFAULT_INT_MOD_CNT,
420 .arg = {.r = {.min = FLASH_VENDOR_MIN,
421 .max = FLASH_VENDOR_MAX} }
422 };
423 int val;
424 if (num_flash_vendor > bd) {
425 val = flash_vendor[bd];
426 atl1_validate_option(&val, &opt, pdev);
427 adapter->hw.flash_vendor = (u8) val;
428 } else
429 adapter->hw.flash_vendor = (u8) (opt.def);
430 }
431}
432
433#endif /* ATLX_C */
diff --git a/drivers/net/atlx/atlx.h b/drivers/net/atlx/atlx.h
new file mode 100644
index 000000000000..3be7c09734d4
--- /dev/null
+++ b/drivers/net/atlx/atlx.h
@@ -0,0 +1,506 @@
1/* atlx_hw.h -- common hardware definitions for Attansic network drivers
2 *
3 * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
4 * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
5 * Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
6 * Copyright(c) 2007 Atheros Corporation. All rights reserved.
7 *
8 * Derived from Intel e1000 driver
9 * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the Free
13 * Software Foundation; either version 2 of the License, or (at your option)
14 * any later version.
15 *
16 * This program is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * more details.
20 *
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc., 59
23 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 */
25
26#ifndef ATLX_H
27#define ATLX_H
28
29#include <linux/module.h>
30#include <linux/types.h>
31
32#define ATLX_DRIVER_VERSION "2.1.1"
33MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
34 Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
35MODULE_LICENSE("GPL");
36MODULE_VERSION(ATLX_DRIVER_VERSION);
37
38#define ATLX_ERR_PHY 2
39#define ATLX_ERR_PHY_SPEED 7
40#define ATLX_ERR_PHY_RES 8
41
42#define SPEED_0 0xffff
43#define SPEED_10 10
44#define SPEED_100 100
45#define SPEED_1000 1000
46#define HALF_DUPLEX 1
47#define FULL_DUPLEX 2
48
49#define MEDIA_TYPE_AUTO_SENSOR 0
50
51/* register definitions */
52#define REG_PM_CTRLSTAT 0x44
53
54#define REG_PCIE_CAP_LIST 0x58
55
56#define REG_VPD_CAP 0x6C
57#define VPD_CAP_ID_MASK 0xFF
58#define VPD_CAP_ID_SHIFT 0
59#define VPD_CAP_NEXT_PTR_MASK 0xFF
60#define VPD_CAP_NEXT_PTR_SHIFT 8
61#define VPD_CAP_VPD_ADDR_MASK 0x7FFF
62#define VPD_CAP_VPD_ADDR_SHIFT 16
63#define VPD_CAP_VPD_FLAG 0x80000000
64
65#define REG_VPD_DATA 0x70
66
67#define REG_SPI_FLASH_CTRL 0x200
68#define SPI_FLASH_CTRL_STS_NON_RDY 0x1
69#define SPI_FLASH_CTRL_STS_WEN 0x2
70#define SPI_FLASH_CTRL_STS_WPEN 0x80
71#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF
72#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0
73#define SPI_FLASH_CTRL_INS_MASK 0x7
74#define SPI_FLASH_CTRL_INS_SHIFT 8
75#define SPI_FLASH_CTRL_START 0x800
76#define SPI_FLASH_CTRL_EN_VPD 0x2000
77#define SPI_FLASH_CTRL_LDSTART 0x8000
78#define SPI_FLASH_CTRL_CS_HI_MASK 0x3
79#define SPI_FLASH_CTRL_CS_HI_SHIFT 16
80#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3
81#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18
82#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3
83#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20
84#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3
85#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22
86#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3
87#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24
88#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3
89#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26
90#define SPI_FLASH_CTRL_WAIT_READY 0x10000000
91
92#define REG_SPI_ADDR 0x204
93
94#define REG_SPI_DATA 0x208
95
96#define REG_SPI_FLASH_CONFIG 0x20C
97#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF
98#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0
99#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3
100#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24
101#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000
102
103#define REG_SPI_FLASH_OP_PROGRAM 0x210
104#define REG_SPI_FLASH_OP_SC_ERASE 0x211
105#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212
106#define REG_SPI_FLASH_OP_RDID 0x213
107#define REG_SPI_FLASH_OP_WREN 0x214
108#define REG_SPI_FLASH_OP_RDSR 0x215
109#define REG_SPI_FLASH_OP_WRSR 0x216
110#define REG_SPI_FLASH_OP_READ 0x217
111
112#define REG_TWSI_CTRL 0x218
113#define TWSI_CTRL_LD_OFFSET_MASK 0xFF
114#define TWSI_CTRL_LD_OFFSET_SHIFT 0
115#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7
116#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
117#define TWSI_CTRL_SW_LDSTART 0x800
118#define TWSI_CTRL_HW_LDSTART 0x1000
119#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F
120#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
121#define TWSI_CTRL_LD_EXIST 0x400000
122#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
123#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23
124#define TWSI_CTRL_FREQ_SEL_100K 0
125#define TWSI_CTRL_FREQ_SEL_200K 1
126#define TWSI_CTRL_FREQ_SEL_300K 2
127#define TWSI_CTRL_FREQ_SEL_400K 3
128#define TWSI_CTRL_SMB_SLV_ADDR /* FIXME: define or remove */
129#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3
130#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24
131
132#define REG_PCIE_DEV_MISC_CTRL 0x21C
133#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2
134#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1
135#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4
136#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8
137#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10
138
139#define REG_PCIE_PHYMISC 0x1000
140#define PCIE_PHYMISC_FORCE_RCV_DET 0x4
141
142#define REG_PCIE_DLL_TX_CTRL1 0x1104
143#define PCIE_DLL_TX_CTRL1_SEL_NOR_CLK 0x400
144#define PCIE_DLL_TX_CTRL1_DEF 0x568
145
146#define REG_LTSSM_TEST_MODE 0x12FC
147#define LTSSM_TEST_MODE_DEF 0x6500
148
149/* Master Control Register */
150#define REG_MASTER_CTRL 0x1400
151#define MASTER_CTRL_SOFT_RST 0x1
152#define MASTER_CTRL_MTIMER_EN 0x2
153#define MASTER_CTRL_ITIMER_EN 0x4
154#define MASTER_CTRL_MANUAL_INT 0x8
155#define MASTER_CTRL_REV_NUM_SHIFT 16
156#define MASTER_CTRL_REV_NUM_MASK 0xFF
157#define MASTER_CTRL_DEV_ID_SHIFT 24
158#define MASTER_CTRL_DEV_ID_MASK 0xFF
159
160/* Timer Initial Value Register */
161#define REG_MANUAL_TIMER_INIT 0x1404
162
163/* IRQ Moderator Timer Initial Value Register */
164#define REG_IRQ_MODU_TIMER_INIT 0x1408
165
166#define REG_PHY_ENABLE 0x140C
167
168/* IRQ Anti-Lost Timer Initial Value Register */
169#define REG_CMBDISDMA_TIMER 0x140E
170
171/* Block IDLE Status Register */
172#define REG_IDLE_STATUS 0x1410
173
174/* MDIO Control Register */
175#define REG_MDIO_CTRL 0x1414
176#define MDIO_DATA_MASK 0xFFFF
177#define MDIO_DATA_SHIFT 0
178#define MDIO_REG_ADDR_MASK 0x1F
179#define MDIO_REG_ADDR_SHIFT 16
180#define MDIO_RW 0x200000
181#define MDIO_SUP_PREAMBLE 0x400000
182#define MDIO_START 0x800000
183#define MDIO_CLK_SEL_SHIFT 24
184#define MDIO_CLK_25_4 0
185#define MDIO_CLK_25_6 2
186#define MDIO_CLK_25_8 3
187#define MDIO_CLK_25_10 4
188#define MDIO_CLK_25_14 5
189#define MDIO_CLK_25_20 6
190#define MDIO_CLK_25_28 7
191#define MDIO_BUSY 0x8000000
192
193/* MII PHY Status Register */
194#define REG_PHY_STATUS 0x1418
195
196/* BIST Control and Status Register0 (for the Packet Memory) */
197#define REG_BIST0_CTRL 0x141C
198#define BIST0_NOW 0x1
199#define BIST0_SRAM_FAIL 0x2
200#define BIST0_FUSE_FLAG 0x4
201#define REG_BIST1_CTRL 0x1420
202#define BIST1_NOW 0x1
203#define BIST1_SRAM_FAIL 0x2
204#define BIST1_FUSE_FLAG 0x4
205
206/* SerDes Lock Detect Control and Status Register */
207#define REG_SERDES_LOCK 0x1424
208#define SERDES_LOCK_DETECT 1
209#define SERDES_LOCK_DETECT_EN 2
210
211/* MAC Control Register */
212#define REG_MAC_CTRL 0x1480
213#define MAC_CTRL_TX_EN 1
214#define MAC_CTRL_RX_EN 2
215#define MAC_CTRL_TX_FLOW 4
216#define MAC_CTRL_RX_FLOW 8
217#define MAC_CTRL_LOOPBACK 0x10
218#define MAC_CTRL_DUPLX 0x20
219#define MAC_CTRL_ADD_CRC 0x40
220#define MAC_CTRL_PAD 0x80
221#define MAC_CTRL_LENCHK 0x100
222#define MAC_CTRL_HUGE_EN 0x200
223#define MAC_CTRL_PRMLEN_SHIFT 10
224#define MAC_CTRL_PRMLEN_MASK 0xF
225#define MAC_CTRL_RMV_VLAN 0x4000
226#define MAC_CTRL_PROMIS_EN 0x8000
227#define MAC_CTRL_MC_ALL_EN 0x2000000
228#define MAC_CTRL_BC_EN 0x4000000
229
230/* MAC IPG/IFG Control Register */
231#define REG_MAC_IPG_IFG 0x1484
232#define MAC_IPG_IFG_IPGT_SHIFT 0
233#define MAC_IPG_IFG_IPGT_MASK 0x7F
234#define MAC_IPG_IFG_MIFG_SHIFT 8
235#define MAC_IPG_IFG_MIFG_MASK 0xFF
236#define MAC_IPG_IFG_IPGR1_SHIFT 16
237#define MAC_IPG_IFG_IPGR1_MASK 0x7F
238#define MAC_IPG_IFG_IPGR2_SHIFT 24
239#define MAC_IPG_IFG_IPGR2_MASK 0x7F
240
241/* MAC STATION ADDRESS */
242#define REG_MAC_STA_ADDR 0x1488
243
244/* Hash table for multicast address */
245#define REG_RX_HASH_TABLE 0x1490
246
247/* MAC Half-Duplex Control Register */
248#define REG_MAC_HALF_DUPLX_CTRL 0x1498
249#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0
250#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3FF
251#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12
252#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xF
253#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000
254#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000
255#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000
256#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000
257#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20
258#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xF
259#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24
260#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xF
261
262/* Maximum Frame Length Control Register */
263#define REG_MTU 0x149C
264
265/* Wake-On-Lan control register */
266#define REG_WOL_CTRL 0x14A0
267#define WOL_PATTERN_EN 0x1
268#define WOL_PATTERN_PME_EN 0x2
269#define WOL_MAGIC_EN 0x4
270#define WOL_MAGIC_PME_EN 0x8
271#define WOL_LINK_CHG_EN 0x10
272#define WOL_LINK_CHG_PME_EN 0x20
273#define WOL_PATTERN_ST 0x100
274#define WOL_MAGIC_ST 0x200
275#define WOL_LINKCHG_ST 0x400
276#define WOL_PT0_EN 0x10000
277#define WOL_PT1_EN 0x20000
278#define WOL_PT2_EN 0x40000
279#define WOL_PT3_EN 0x80000
280#define WOL_PT4_EN 0x100000
281#define WOL_PT0_MATCH 0x1000000
282#define WOL_PT1_MATCH 0x2000000
283#define WOL_PT2_MATCH 0x4000000
284#define WOL_PT3_MATCH 0x8000000
285#define WOL_PT4_MATCH 0x10000000
286
287/* Internal SRAM Partition Register, high 32 bits */
288#define REG_SRAM_RFD_ADDR 0x1500
289
290/* Descriptor Control register, high 32 bits */
291#define REG_DESC_BASE_ADDR_HI 0x1540
292
293/* Interrupt Status Register */
294#define REG_ISR 0x1600
295#define ISR_UR_DETECTED 0x1000000
296#define ISR_FERR_DETECTED 0x2000000
297#define ISR_NFERR_DETECTED 0x4000000
298#define ISR_CERR_DETECTED 0x8000000
299#define ISR_PHY_LINKDOWN 0x10000000
300#define ISR_DIS_INT 0x80000000
301
302/* Interrupt Mask Register */
303#define REG_IMR 0x1604
304
305#define REG_RFD_RRD_IDX 0x1800
306#define REG_TPD_IDX 0x1804
307
308/* MII definitions */
309
310/* PHY Common Register */
311#define MII_ATLX_CR 0x09
312#define MII_ATLX_SR 0x0A
313#define MII_ATLX_ESR 0x0F
314#define MII_ATLX_PSCR 0x10
315#define MII_ATLX_PSSR 0x11
316
317/* PHY Control Register */
318#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100,
319 * 00=10
320 */
321#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
322#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
323#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
324#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
325#define MII_CR_POWER_DOWN 0x0800 /* Power down */
326#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
327#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100,
328 * 00=10
329 */
330#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
331#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
332#define MII_CR_SPEED_MASK 0x2040
333#define MII_CR_SPEED_1000 0x0040
334#define MII_CR_SPEED_100 0x2000
335#define MII_CR_SPEED_10 0x0000
336
337/* PHY Status Register */
338#define MII_SR_EXTENDED_CAPS 0x0001 /* Ext register capabilities */
339#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
340#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
341#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
342#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
343#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
344#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
345#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext stat info in Reg 0x0F */
346#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
347#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
348#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
349#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
350#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
351#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
352#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
353
354/* Link partner ability register */
355#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
356#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
357#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
358#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
359#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
360#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
361#define MII_LPA_PAUSE 0x0400 /* PAUSE */
362#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
363#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
364#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
365#define MII_LPA_NPAGE 0x8000 /* Next page bit */
366
367/* Autoneg Advertisement Register */
368#define MII_AR_SELECTOR_FIELD 0x0001 /* IEEE 802.3 CSMA/CD */
369#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
370#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
371#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
372#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
373#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
374#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
375#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Dir bit */
376#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
377#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability support */
378#define MII_AR_SPEED_MASK 0x01E0
379#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
380
381/* 1000BASE-T Control Register */
382#define MII_ATLX_CR_1000T_HD_CAPS 0x0100 /* Adv 1000T HD cap */
383#define MII_ATLX_CR_1000T_FD_CAPS 0x0200 /* Adv 1000T FD cap */
384#define MII_ATLX_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device,
385 * 0=DTE device */
386#define MII_ATLX_CR_1000T_MS_VALUE 0x0800 /* 1=Config PHY as Master,
387 * 0=Configure PHY as Slave */
388#define MII_ATLX_CR_1000T_MS_ENABLE 0x1000 /* 1=Man Master/Slave config,
389 * 0=Auto Master/Slave config
390 */
391#define MII_ATLX_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
392#define MII_ATLX_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
393#define MII_ATLX_CR_1000T_TEST_MODE_2 0x4000 /* Master Xmit Jitter test */
394#define MII_ATLX_CR_1000T_TEST_MODE_3 0x6000 /* Slave Xmit Jitter test */
395#define MII_ATLX_CR_1000T_TEST_MODE_4 0x8000 /* Xmitter Distortion test */
396#define MII_ATLX_CR_1000T_SPEED_MASK 0x0300
397#define MII_ATLX_CR_1000T_DEFAULT_CAP_MASK 0x0300
398
399/* 1000BASE-T Status Register */
400#define MII_ATLX_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
401#define MII_ATLX_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
402#define MII_ATLX_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
403#define MII_ATLX_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
404#define MII_ATLX_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master
405 * 0=Slave
406 */
407#define MII_ATLX_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config
408 * fault */
409#define MII_ATLX_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
410#define MII_ATLX_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
411
412/* Extended Status Register */
413#define MII_ATLX_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
414#define MII_ATLX_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
415#define MII_ATLX_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
416#define MII_ATLX_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
417
418/* ATLX PHY Specific Control Register */
419#define MII_ATLX_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Func disabled */
420#define MII_ATLX_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enbld */
421#define MII_ATLX_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
422#define MII_ATLX_PSCR_MAC_POWERDOWN 0x0008
423#define MII_ATLX_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low
424 * 0=CLK125 toggling
425 */
426#define MII_ATLX_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5,
427 * Manual MDI configuration
428 */
429#define MII_ATLX_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
430#define MII_ATLX_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover
431 * 100BASE-TX/10BASE-T: MDI
432 * Mode */
433#define MII_ATLX_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled
434 * all speeds.
435 */
436#define MII_ATLX_PSCR_10BT_EXT_DIST_ENABLE 0x0080 /* 1=Enable Extended
437 * 10BASE-T distance
438 * (Lower 10BASE-T RX
439 * Threshold)
440 * 0=Normal 10BASE-T RX
441 * Threshold
442 */
443#define MII_ATLX_PSCR_MII_5BIT_ENABLE 0x0100 /* 1=5-Bit interface in
444 * 100BASE-TX
445 * 0=MII interface in
446 * 100BASE-TX
447 */
448#define MII_ATLX_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler dsbl */
449#define MII_ATLX_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
450#define MII_ATLX_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
451#define MII_ATLX_PSCR_POLARITY_REVERSAL_SHIFT 1
452#define MII_ATLX_PSCR_AUTO_X_MODE_SHIFT 5
453#define MII_ATLX_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
454
455/* ATLX PHY Specific Status Register */
456#define MII_ATLX_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
457#define MII_ATLX_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
458#define MII_ATLX_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
459#define MII_ATLX_PSSR_10MBS 0x0000 /* 00=10Mbs */
460#define MII_ATLX_PSSR_100MBS 0x4000 /* 01=100Mbs */
461#define MII_ATLX_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
462
463/* PCI Command Register Bit Definitions */
464#define PCI_REG_COMMAND 0x04 /* PCI Command Register */
465#define CMD_IO_SPACE 0x0001
466#define CMD_MEMORY_SPACE 0x0002
467#define CMD_BUS_MASTER 0x0004
468
469/* Wake Up Filter Control */
470#define ATLX_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
471#define ATLX_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
472#define ATLX_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
473#define ATLX_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */
474#define ATLX_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
475
476#define ADVERTISE_10_HALF 0x0001
477#define ADVERTISE_10_FULL 0x0002
478#define ADVERTISE_100_HALF 0x0004
479#define ADVERTISE_100_FULL 0x0008
480#define ADVERTISE_1000_HALF 0x0010
481#define ADVERTISE_1000_FULL 0x0020
482#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */
483#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */
484
485#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */
486#define PHY_FORCE_TIME 20 /* 2.0 Seconds */
487
488/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA */
489#define EEPROM_SUM 0xBABA
490#define NODE_ADDRESS_SIZE 6
491
492struct atlx_spi_flash_dev {
493 const char *manu_name; /* manufacturer id */
494 /* op-code */
495 u8 cmd_wrsr;
496 u8 cmd_read;
497 u8 cmd_program;
498 u8 cmd_wren;
499 u8 cmd_wrdi;
500 u8 cmd_rdsr;
501 u8 cmd_rdid;
502 u8 cmd_sector_erase;
503 u8 cmd_chip_erase;
504};
505
506#endif /* ATLX_H */
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 62f09e59d9c4..3d4433358a36 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -378,8 +378,8 @@ static void __init get_node_ID(struct net_device *dev)
378 sa_offset = 15; 378 sa_offset = 15;
379 379
380 for (i = 0; i < 3; i++) 380 for (i = 0; i < 3; i++)
381 ((u16 *)dev->dev_addr)[i] = 381 ((__be16 *)dev->dev_addr)[i] =
382 be16_to_cpu(eeprom_op(ioaddr, EE_READ(sa_offset + i))); 382 cpu_to_be16(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
383 383
384 write_reg(ioaddr, CMR2, CMR2_NULL); 384 write_reg(ioaddr, CMR2, CMR2_NULL);
385} 385}
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 504b7ce2747d..3634b5fd7919 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -701,7 +701,7 @@ static struct net_device * au1000_probe(int port_num)
701 aup->mii_bus.write = mdiobus_write; 701 aup->mii_bus.write = mdiobus_write;
702 aup->mii_bus.reset = mdiobus_reset; 702 aup->mii_bus.reset = mdiobus_reset;
703 aup->mii_bus.name = "au1000_eth_mii"; 703 aup->mii_bus.name = "au1000_eth_mii";
704 aup->mii_bus.id = aup->mac_id; 704 snprintf(aup->mii_bus.id, MII_BUS_ID_SIZE, "%x", aup->mac_id);
705 aup->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 705 aup->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
706 for(i = 0; i < PHY_MAX_ADDR; ++i) 706 for(i = 0; i < PHY_MAX_ADDR; ++i)
707 aup->mii_bus.irq[i] = PHY_POLL; 707 aup->mii_bus.irq[i] = PHY_POLL;
@@ -709,11 +709,11 @@ static struct net_device * au1000_probe(int port_num)
709 /* if known, set corresponding PHY IRQs */ 709 /* if known, set corresponding PHY IRQs */
710#if defined(AU1XXX_PHY_STATIC_CONFIG) 710#if defined(AU1XXX_PHY_STATIC_CONFIG)
711# if defined(AU1XXX_PHY0_IRQ) 711# if defined(AU1XXX_PHY0_IRQ)
712 if (AU1XXX_PHY0_BUSID == aup->mii_bus.id) 712 if (AU1XXX_PHY0_BUSID == aup->mac_id)
713 aup->mii_bus.irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ; 713 aup->mii_bus.irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ;
714# endif 714# endif
715# if defined(AU1XXX_PHY1_IRQ) 715# if defined(AU1XXX_PHY1_IRQ)
716 if (AU1XXX_PHY1_BUSID == aup->mii_bus.id) 716 if (AU1XXX_PHY1_BUSID == aup->mac_id)
717 aup->mii_bus.irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ; 717 aup->mii_bus.irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ;
718# endif 718# endif
719#endif 719#endif
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 26b2dd5016cd..717dcc1aa1e9 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -969,7 +969,7 @@ static int __init bf537mac_probe(struct net_device *dev)
969 lp->mii_bus.write = mdiobus_write; 969 lp->mii_bus.write = mdiobus_write;
970 lp->mii_bus.reset = mdiobus_reset; 970 lp->mii_bus.reset = mdiobus_reset;
971 lp->mii_bus.name = "bfin_mac_mdio"; 971 lp->mii_bus.name = "bfin_mac_mdio";
972 lp->mii_bus.id = 0; 972 snprintf(lp->mii_bus.id, MII_BUS_ID_SIZE, "0");
973 lp->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 973 lp->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
974 for (i = 0; i < PHY_MAX_ADDR; ++i) 974 for (i = 0; i < PHY_MAX_ADDR; ++i)
975 lp->mii_bus.irq[i] = PHY_POLL; 975 lp->mii_bus.irq[i] = PHY_POLL;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index d16e0e1d2b30..ebb539e090c3 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2429,7 +2429,7 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
2429 struct slave *slave = NULL; 2429 struct slave *slave = NULL;
2430 int ret = NET_RX_DROP; 2430 int ret = NET_RX_DROP;
2431 2431
2432 if (dev->nd_net != &init_net) 2432 if (dev_net(dev) != &init_net)
2433 goto out; 2433 goto out;
2434 2434
2435 if (!(dev->flags & IFF_MASTER)) 2435 if (!(dev->flags & IFF_MASTER))
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 3f58c3d0b710..5a673725471c 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -345,7 +345,7 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
345 struct arp_pkt *arp = (struct arp_pkt *)skb->data; 345 struct arp_pkt *arp = (struct arp_pkt *)skb->data;
346 int res = NET_RX_DROP; 346 int res = NET_RX_DROP;
347 347
348 if (bond_dev->nd_net != &init_net) 348 if (dev_net(bond_dev) != &init_net)
349 goto out; 349 goto out;
350 350
351 if (!(bond_dev->flags & IFF_MASTER)) 351 if (!(bond_dev->flags & IFF_MASTER))
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0f0675319e9c..6e91b4b7aabb 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2629,7 +2629,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2629 unsigned char *arp_ptr; 2629 unsigned char *arp_ptr;
2630 __be32 sip, tip; 2630 __be32 sip, tip;
2631 2631
2632 if (dev->nd_net != &init_net) 2632 if (dev_net(dev) != &init_net)
2633 goto out; 2633 goto out;
2634 2634
2635 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER)) 2635 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
@@ -2646,10 +2646,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2646 if (!slave || !slave_do_arp_validate(bond, slave)) 2646 if (!slave || !slave_do_arp_validate(bond, slave))
2647 goto out_unlock; 2647 goto out_unlock;
2648 2648
2649 /* ARP header, plus 2 device addresses, plus 2 IP addresses. */ 2649 if (!pskb_may_pull(skb, arp_hdr_len(dev)))
2650 if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
2651 (2 * dev->addr_len) +
2652 (2 * sizeof(u32)))))
2653 goto out_unlock; 2650 goto out_unlock;
2654 2651
2655 arp = arp_hdr(skb); 2652 arp = arp_hdr(skb);
@@ -3068,8 +3065,6 @@ out:
3068 3065
3069#ifdef CONFIG_PROC_FS 3066#ifdef CONFIG_PROC_FS
3070 3067
3071#define SEQ_START_TOKEN ((void *)1)
3072
3073static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) 3068static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
3074{ 3069{
3075 struct bonding *bond = seq->private; 3070 struct bonding *bond = seq->private;
@@ -3473,7 +3468,7 @@ static int bond_netdev_event(struct notifier_block *this, unsigned long event, v
3473{ 3468{
3474 struct net_device *event_dev = (struct net_device *)ptr; 3469 struct net_device *event_dev = (struct net_device *)ptr;
3475 3470
3476 if (event_dev->nd_net != &init_net) 3471 if (dev_net(event_dev) != &init_net)
3477 return NOTIFY_DONE; 3472 return NOTIFY_DONE;
3478 3473
3479 dprintk("event_dev: %s, event: %lx\n", 3474 dprintk("event_dev: %s, event: %lx\n",
@@ -3511,6 +3506,9 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
3511 struct bonding *bond, *bond_next; 3506 struct bonding *bond, *bond_next;
3512 struct vlan_entry *vlan, *vlan_next; 3507 struct vlan_entry *vlan, *vlan_next;
3513 3508
3509 if (dev_net(ifa->ifa_dev->dev) != &init_net)
3510 return NOTIFY_DONE;
3511
3514 list_for_each_entry_safe(bond, bond_next, &bond_dev_list, bond_list) { 3512 list_for_each_entry_safe(bond, bond_next, &bond_dev_list, bond_list) {
3515 if (bond->dev == event_dev) { 3513 if (bond->dev == event_dev) {
3516 switch (event) { 3514 switch (event) {
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 14299f8063af..93e13636f8dd 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -532,8 +532,7 @@ static void cas_spare_free(struct cas *cp)
532 /* free spare buffers */ 532 /* free spare buffers */
533 INIT_LIST_HEAD(&list); 533 INIT_LIST_HEAD(&list);
534 spin_lock(&cp->rx_spare_lock); 534 spin_lock(&cp->rx_spare_lock);
535 list_splice(&cp->rx_spare_list, &list); 535 list_splice_init(&cp->rx_spare_list, &list);
536 INIT_LIST_HEAD(&cp->rx_spare_list);
537 spin_unlock(&cp->rx_spare_lock); 536 spin_unlock(&cp->rx_spare_lock);
538 list_for_each_safe(elem, tmp, &list) { 537 list_for_each_safe(elem, tmp, &list) {
539 cas_page_free(cp, list_entry(elem, cas_page_t, list)); 538 cas_page_free(cp, list_entry(elem, cas_page_t, list));
@@ -546,13 +545,11 @@ static void cas_spare_free(struct cas *cp)
546 * lock than used everywhere else to manipulate this list. 545 * lock than used everywhere else to manipulate this list.
547 */ 546 */
548 spin_lock(&cp->rx_inuse_lock); 547 spin_lock(&cp->rx_inuse_lock);
549 list_splice(&cp->rx_inuse_list, &list); 548 list_splice_init(&cp->rx_inuse_list, &list);
550 INIT_LIST_HEAD(&cp->rx_inuse_list);
551 spin_unlock(&cp->rx_inuse_lock); 549 spin_unlock(&cp->rx_inuse_lock);
552#else 550#else
553 spin_lock(&cp->rx_spare_lock); 551 spin_lock(&cp->rx_spare_lock);
554 list_splice(&cp->rx_inuse_list, &list); 552 list_splice_init(&cp->rx_inuse_list, &list);
555 INIT_LIST_HEAD(&cp->rx_inuse_list);
556 spin_unlock(&cp->rx_spare_lock); 553 spin_unlock(&cp->rx_spare_lock);
557#endif 554#endif
558 list_for_each_safe(elem, tmp, &list) { 555 list_for_each_safe(elem, tmp, &list) {
@@ -573,8 +570,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
573 /* make a local copy of the list */ 570 /* make a local copy of the list */
574 INIT_LIST_HEAD(&list); 571 INIT_LIST_HEAD(&list);
575 spin_lock(&cp->rx_inuse_lock); 572 spin_lock(&cp->rx_inuse_lock);
576 list_splice(&cp->rx_inuse_list, &list); 573 list_splice_init(&cp->rx_inuse_list, &list);
577 INIT_LIST_HEAD(&cp->rx_inuse_list);
578 spin_unlock(&cp->rx_inuse_lock); 574 spin_unlock(&cp->rx_inuse_lock);
579 575
580 list_for_each_safe(elem, tmp, &list) { 576 list_for_each_safe(elem, tmp, &list) {
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index c85194f2cd2d..9da7ff437031 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -987,7 +987,7 @@ static int external_switch;
987static int __devinit cpmac_probe(struct platform_device *pdev) 987static int __devinit cpmac_probe(struct platform_device *pdev)
988{ 988{
989 int rc, phy_id, i; 989 int rc, phy_id, i;
990 int mdio_bus_id = cpmac_mii.id; 990 char *mdio_bus_id = "0";
991 struct resource *mem; 991 struct resource *mem;
992 struct cpmac_priv *priv; 992 struct cpmac_priv *priv;
993 struct net_device *dev; 993 struct net_device *dev;
@@ -1008,8 +1008,6 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
1008 if (external_switch || dumb_switch) { 1008 if (external_switch || dumb_switch) {
1009 struct fixed_phy_status status = {}; 1009 struct fixed_phy_status status = {};
1010 1010
1011 mdio_bus_id = 0;
1012
1013 /* 1011 /*
1014 * FIXME: this should be in the platform code! 1012 * FIXME: this should be in the platform code!
1015 * Since there is not platform code at all (that is, 1013 * Since there is not platform code at all (that is,
@@ -1143,6 +1141,7 @@ int __devinit cpmac_init(void)
1143 } 1141 }
1144 1142
1145 cpmac_mii.phy_mask = ~(mask | 0x80000000); 1143 cpmac_mii.phy_mask = ~(mask | 0x80000000);
1144 snprintf(cpmac_mii.id, MII_BUS_ID_SIZE, "0");
1146 1145
1147 res = mdiobus_register(&cpmac_mii); 1146 res = mdiobus_register(&cpmac_mii);
1148 if (res) 1147 if (res)
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index fd2e05bbb903..05e5f59e87fa 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1014,8 +1014,8 @@ static int offload_open(struct net_device *dev)
1014 adapter->port[0]->mtu : 0xffff); 1014 adapter->port[0]->mtu : 0xffff);
1015 init_smt(adapter); 1015 init_smt(adapter);
1016 1016
1017 /* Never mind if the next step fails */ 1017 if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
1018 sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group); 1018 dev_dbg(&dev->dev, "cannot create sysfs group\n");
1019 1019
1020 /* Call back all registered clients */ 1020 /* Call back all registered clients */
1021 cxgb3_add_clients(tdev); 1021 cxgb3_add_clients(tdev);
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 901c824bfe6d..ff9c013ce535 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -833,10 +833,26 @@ static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
833 return 0; 833 return 0;
834} 834}
835 835
836/*
837 * That skb would better have come from process_responses() where we abuse
838 * ->priority and ->csum to carry our data. NB: if we get to per-arch
839 * ->csum, the things might get really interesting here.
840 */
841
842static inline u32 get_hwtid(struct sk_buff *skb)
843{
844 return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff;
845}
846
847static inline u32 get_opcode(struct sk_buff *skb)
848{
849 return G_OPCODE(ntohl((__force __be32)skb->csum));
850}
851
836static int do_term(struct t3cdev *dev, struct sk_buff *skb) 852static int do_term(struct t3cdev *dev, struct sk_buff *skb)
837{ 853{
838 unsigned int hwtid = ntohl(skb->priority) >> 8 & 0xfffff; 854 unsigned int hwtid = get_hwtid(skb);
839 unsigned int opcode = G_OPCODE(ntohl(skb->csum)); 855 unsigned int opcode = get_opcode(skb);
840 struct t3c_tid_entry *t3c_tid; 856 struct t3c_tid_entry *t3c_tid;
841 857
842 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid); 858 t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
@@ -914,7 +930,7 @@ int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
914{ 930{
915 while (n--) { 931 while (n--) {
916 struct sk_buff *skb = *skbs++; 932 struct sk_buff *skb = *skbs++;
917 unsigned int opcode = G_OPCODE(ntohl(skb->csum)); 933 unsigned int opcode = get_opcode(skb);
918 int ret = cpl_handlers[opcode] (dev, skb); 934 int ret = cpl_handlers[opcode] (dev, skb);
919 935
920#if VALIDATE_TID 936#if VALIDATE_TID
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index 865faee53e17..f510140885ae 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -407,7 +407,7 @@ found:
407 } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE)) 407 } else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
408 setup_l2e_send_pending(dev, NULL, e); 408 setup_l2e_send_pending(dev, NULL, e);
409 } else { 409 } else {
410 e->state = neigh_is_connected(neigh) ? 410 e->state = neigh->nud_state & NUD_CONNECTED ?
411 L2T_STATE_VALID : L2T_STATE_STALE; 411 L2T_STATE_VALID : L2T_STATE_STALE;
412 if (memcmp(e->dmac, neigh->ha, 6)) 412 if (memcmp(e->dmac, neigh->ha, 6))
413 setup_l2e_send_pending(dev, NULL, e); 413 setup_l2e_send_pending(dev, NULL, e);
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index ddc30c4bf34a..c062aacf229c 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -971,7 +971,8 @@ static int __devinit dfx_driver_init(struct net_device *dev,
971 int alloc_size; /* total buffer size needed */ 971 int alloc_size; /* total buffer size needed */
972 char *top_v, *curr_v; /* virtual addrs into memory block */ 972 char *top_v, *curr_v; /* virtual addrs into memory block */
973 dma_addr_t top_p, curr_p; /* physical addrs into memory block */ 973 dma_addr_t top_p, curr_p; /* physical addrs into memory block */
974 u32 data, le32; /* host data register value */ 974 u32 data; /* host data register value */
975 __le32 le32;
975 char *board_name = NULL; 976 char *board_name = NULL;
976 977
977 DBG_printk("In dfx_driver_init...\n"); 978 DBG_printk("In dfx_driver_init...\n");
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 3b840283a9c3..31feae1ea390 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -161,13 +161,13 @@ struct e1000_buffer {
161 struct sk_buff *skb; 161 struct sk_buff *skb;
162 dma_addr_t dma; 162 dma_addr_t dma;
163 unsigned long time_stamp; 163 unsigned long time_stamp;
164 uint16_t length; 164 u16 length;
165 uint16_t next_to_watch; 165 u16 next_to_watch;
166}; 166};
167 167
168 168
169struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; 169struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
170struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; 170struct e1000_ps_page_dma { u64 ps_page_dma[PS_PAGE_BUFFERS]; };
171 171
172struct e1000_tx_ring { 172struct e1000_tx_ring {
173 /* pointer to the descriptor ring memory */ 173 /* pointer to the descriptor ring memory */
@@ -186,9 +186,9 @@ struct e1000_tx_ring {
186 struct e1000_buffer *buffer_info; 186 struct e1000_buffer *buffer_info;
187 187
188 spinlock_t tx_lock; 188 spinlock_t tx_lock;
189 uint16_t tdh; 189 u16 tdh;
190 uint16_t tdt; 190 u16 tdt;
191 boolean_t last_tx_tso; 191 bool last_tx_tso;
192}; 192};
193 193
194struct e1000_rx_ring { 194struct e1000_rx_ring {
@@ -213,8 +213,8 @@ struct e1000_rx_ring {
213 /* cpu for rx queue */ 213 /* cpu for rx queue */
214 int cpu; 214 int cpu;
215 215
216 uint16_t rdh; 216 u16 rdh;
217 uint16_t rdt; 217 u16 rdt;
218}; 218};
219 219
220#define E1000_DESC_UNUSED(R) \ 220#define E1000_DESC_UNUSED(R) \
@@ -237,31 +237,30 @@ struct e1000_adapter {
237 struct timer_list watchdog_timer; 237 struct timer_list watchdog_timer;
238 struct timer_list phy_info_timer; 238 struct timer_list phy_info_timer;
239 struct vlan_group *vlgrp; 239 struct vlan_group *vlgrp;
240 uint16_t mng_vlan_id; 240 u16 mng_vlan_id;
241 uint32_t bd_number; 241 u32 bd_number;
242 uint32_t rx_buffer_len; 242 u32 rx_buffer_len;
243 uint32_t wol; 243 u32 wol;
244 uint32_t smartspeed; 244 u32 smartspeed;
245 uint32_t en_mng_pt; 245 u32 en_mng_pt;
246 uint16_t link_speed; 246 u16 link_speed;
247 uint16_t link_duplex; 247 u16 link_duplex;
248 spinlock_t stats_lock; 248 spinlock_t stats_lock;
249#ifdef CONFIG_E1000_NAPI 249#ifdef CONFIG_E1000_NAPI
250 spinlock_t tx_queue_lock; 250 spinlock_t tx_queue_lock;
251#endif 251#endif
252 atomic_t irq_sem;
253 unsigned int total_tx_bytes; 252 unsigned int total_tx_bytes;
254 unsigned int total_tx_packets; 253 unsigned int total_tx_packets;
255 unsigned int total_rx_bytes; 254 unsigned int total_rx_bytes;
256 unsigned int total_rx_packets; 255 unsigned int total_rx_packets;
257 /* Interrupt Throttle Rate */ 256 /* Interrupt Throttle Rate */
258 uint32_t itr; 257 u32 itr;
259 uint32_t itr_setting; 258 u32 itr_setting;
260 uint16_t tx_itr; 259 u16 tx_itr;
261 uint16_t rx_itr; 260 u16 rx_itr;
262 261
263 struct work_struct reset_task; 262 struct work_struct reset_task;
264 uint8_t fc_autoneg; 263 u8 fc_autoneg;
265 264
266 struct timer_list blink_timer; 265 struct timer_list blink_timer;
267 unsigned long led_status; 266 unsigned long led_status;
@@ -270,30 +269,30 @@ struct e1000_adapter {
270 struct e1000_tx_ring *tx_ring; /* One per active queue */ 269 struct e1000_tx_ring *tx_ring; /* One per active queue */
271 unsigned int restart_queue; 270 unsigned int restart_queue;
272 unsigned long tx_queue_len; 271 unsigned long tx_queue_len;
273 uint32_t txd_cmd; 272 u32 txd_cmd;
274 uint32_t tx_int_delay; 273 u32 tx_int_delay;
275 uint32_t tx_abs_int_delay; 274 u32 tx_abs_int_delay;
276 uint32_t gotcl; 275 u32 gotcl;
277 uint64_t gotcl_old; 276 u64 gotcl_old;
278 uint64_t tpt_old; 277 u64 tpt_old;
279 uint64_t colc_old; 278 u64 colc_old;
280 uint32_t tx_timeout_count; 279 u32 tx_timeout_count;
281 uint32_t tx_fifo_head; 280 u32 tx_fifo_head;
282 uint32_t tx_head_addr; 281 u32 tx_head_addr;
283 uint32_t tx_fifo_size; 282 u32 tx_fifo_size;
284 uint8_t tx_timeout_factor; 283 u8 tx_timeout_factor;
285 atomic_t tx_fifo_stall; 284 atomic_t tx_fifo_stall;
286 boolean_t pcix_82544; 285 bool pcix_82544;
287 boolean_t detect_tx_hung; 286 bool detect_tx_hung;
288 287
289 /* RX */ 288 /* RX */
290#ifdef CONFIG_E1000_NAPI 289#ifdef CONFIG_E1000_NAPI
291 boolean_t (*clean_rx) (struct e1000_adapter *adapter, 290 bool (*clean_rx) (struct e1000_adapter *adapter,
292 struct e1000_rx_ring *rx_ring, 291 struct e1000_rx_ring *rx_ring,
293 int *work_done, int work_to_do); 292 int *work_done, int work_to_do);
294#else 293#else
295 boolean_t (*clean_rx) (struct e1000_adapter *adapter, 294 bool (*clean_rx) (struct e1000_adapter *adapter,
296 struct e1000_rx_ring *rx_ring); 295 struct e1000_rx_ring *rx_ring);
297#endif 296#endif
298 void (*alloc_rx_buf) (struct e1000_adapter *adapter, 297 void (*alloc_rx_buf) (struct e1000_adapter *adapter,
299 struct e1000_rx_ring *rx_ring, 298 struct e1000_rx_ring *rx_ring,
@@ -306,17 +305,17 @@ struct e1000_adapter {
306 int num_tx_queues; 305 int num_tx_queues;
307 int num_rx_queues; 306 int num_rx_queues;
308 307
309 uint64_t hw_csum_err; 308 u64 hw_csum_err;
310 uint64_t hw_csum_good; 309 u64 hw_csum_good;
311 uint64_t rx_hdr_split; 310 u64 rx_hdr_split;
312 uint32_t alloc_rx_buff_failed; 311 u32 alloc_rx_buff_failed;
313 uint32_t rx_int_delay; 312 u32 rx_int_delay;
314 uint32_t rx_abs_int_delay; 313 u32 rx_abs_int_delay;
315 boolean_t rx_csum; 314 bool rx_csum;
316 unsigned int rx_ps_pages; 315 unsigned int rx_ps_pages;
317 uint32_t gorcl; 316 u32 gorcl;
318 uint64_t gorcl_old; 317 u64 gorcl_old;
319 uint16_t rx_ps_bsize0; 318 u16 rx_ps_bsize0;
320 319
321 320
322 /* OS defined structs */ 321 /* OS defined structs */
@@ -330,19 +329,19 @@ struct e1000_adapter {
330 struct e1000_phy_info phy_info; 329 struct e1000_phy_info phy_info;
331 struct e1000_phy_stats phy_stats; 330 struct e1000_phy_stats phy_stats;
332 331
333 uint32_t test_icr; 332 u32 test_icr;
334 struct e1000_tx_ring test_tx_ring; 333 struct e1000_tx_ring test_tx_ring;
335 struct e1000_rx_ring test_rx_ring; 334 struct e1000_rx_ring test_rx_ring;
336 335
337 int msg_enable; 336 int msg_enable;
338 boolean_t have_msi; 337 bool have_msi;
339 338
340 /* to not mess up cache alignment, always add to the bottom */ 339 /* to not mess up cache alignment, always add to the bottom */
341 boolean_t tso_force; 340 bool tso_force;
342 boolean_t smart_power_down; /* phy smart power down */ 341 bool smart_power_down; /* phy smart power down */
343 boolean_t quad_port_a; 342 bool quad_port_a;
344 unsigned long flags; 343 unsigned long flags;
345 uint32_t eeprom_wol; 344 u32 eeprom_wol;
346}; 345};
347 346
348enum e1000_state_t { 347enum e1000_state_t {
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 85e66f4c7886..701531e72e7b 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -36,7 +36,7 @@ extern int e1000_up(struct e1000_adapter *adapter);
36extern void e1000_down(struct e1000_adapter *adapter); 36extern void e1000_down(struct e1000_adapter *adapter);
37extern void e1000_reinit_locked(struct e1000_adapter *adapter); 37extern void e1000_reinit_locked(struct e1000_adapter *adapter);
38extern void e1000_reset(struct e1000_adapter *adapter); 38extern void e1000_reset(struct e1000_adapter *adapter);
39extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); 39extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
40extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 40extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
41extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 41extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
42extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); 42extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
@@ -289,7 +289,7 @@ e1000_set_pauseparam(struct net_device *netdev,
289 return retval; 289 return retval;
290} 290}
291 291
292static uint32_t 292static u32
293e1000_get_rx_csum(struct net_device *netdev) 293e1000_get_rx_csum(struct net_device *netdev)
294{ 294{
295 struct e1000_adapter *adapter = netdev_priv(netdev); 295 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -297,7 +297,7 @@ e1000_get_rx_csum(struct net_device *netdev)
297} 297}
298 298
299static int 299static int
300e1000_set_rx_csum(struct net_device *netdev, uint32_t data) 300e1000_set_rx_csum(struct net_device *netdev, u32 data)
301{ 301{
302 struct e1000_adapter *adapter = netdev_priv(netdev); 302 struct e1000_adapter *adapter = netdev_priv(netdev);
303 adapter->rx_csum = data; 303 adapter->rx_csum = data;
@@ -309,14 +309,14 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
309 return 0; 309 return 0;
310} 310}
311 311
312static uint32_t 312static u32
313e1000_get_tx_csum(struct net_device *netdev) 313e1000_get_tx_csum(struct net_device *netdev)
314{ 314{
315 return (netdev->features & NETIF_F_HW_CSUM) != 0; 315 return (netdev->features & NETIF_F_HW_CSUM) != 0;
316} 316}
317 317
318static int 318static int
319e1000_set_tx_csum(struct net_device *netdev, uint32_t data) 319e1000_set_tx_csum(struct net_device *netdev, u32 data)
320{ 320{
321 struct e1000_adapter *adapter = netdev_priv(netdev); 321 struct e1000_adapter *adapter = netdev_priv(netdev);
322 322
@@ -335,7 +335,7 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
335} 335}
336 336
337static int 337static int
338e1000_set_tso(struct net_device *netdev, uint32_t data) 338e1000_set_tso(struct net_device *netdev, u32 data)
339{ 339{
340 struct e1000_adapter *adapter = netdev_priv(netdev); 340 struct e1000_adapter *adapter = netdev_priv(netdev);
341 if ((adapter->hw.mac_type < e1000_82544) || 341 if ((adapter->hw.mac_type < e1000_82544) ||
@@ -353,11 +353,11 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
353 netdev->features &= ~NETIF_F_TSO6; 353 netdev->features &= ~NETIF_F_TSO6;
354 354
355 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled"); 355 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
356 adapter->tso_force = TRUE; 356 adapter->tso_force = true;
357 return 0; 357 return 0;
358} 358}
359 359
360static uint32_t 360static u32
361e1000_get_msglevel(struct net_device *netdev) 361e1000_get_msglevel(struct net_device *netdev)
362{ 362{
363 struct e1000_adapter *adapter = netdev_priv(netdev); 363 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -365,7 +365,7 @@ e1000_get_msglevel(struct net_device *netdev)
365} 365}
366 366
367static void 367static void
368e1000_set_msglevel(struct net_device *netdev, uint32_t data) 368e1000_set_msglevel(struct net_device *netdev, u32 data)
369{ 369{
370 struct e1000_adapter *adapter = netdev_priv(netdev); 370 struct e1000_adapter *adapter = netdev_priv(netdev);
371 adapter->msg_enable = data; 371 adapter->msg_enable = data;
@@ -375,7 +375,7 @@ static int
375e1000_get_regs_len(struct net_device *netdev) 375e1000_get_regs_len(struct net_device *netdev)
376{ 376{
377#define E1000_REGS_LEN 32 377#define E1000_REGS_LEN 32
378 return E1000_REGS_LEN * sizeof(uint32_t); 378 return E1000_REGS_LEN * sizeof(u32);
379} 379}
380 380
381static void 381static void
@@ -384,10 +384,10 @@ e1000_get_regs(struct net_device *netdev,
384{ 384{
385 struct e1000_adapter *adapter = netdev_priv(netdev); 385 struct e1000_adapter *adapter = netdev_priv(netdev);
386 struct e1000_hw *hw = &adapter->hw; 386 struct e1000_hw *hw = &adapter->hw;
387 uint32_t *regs_buff = p; 387 u32 *regs_buff = p;
388 uint16_t phy_data; 388 u16 phy_data;
389 389
390 memset(p, 0, E1000_REGS_LEN * sizeof(uint32_t)); 390 memset(p, 0, E1000_REGS_LEN * sizeof(u32));
391 391
392 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; 392 regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
393 393
@@ -412,44 +412,44 @@ e1000_get_regs(struct net_device *netdev,
412 IGP01E1000_PHY_AGC_A); 412 IGP01E1000_PHY_AGC_A);
413 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & 413 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
414 IGP01E1000_PHY_PAGE_SELECT, &phy_data); 414 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
415 regs_buff[13] = (uint32_t)phy_data; /* cable length */ 415 regs_buff[13] = (u32)phy_data; /* cable length */
416 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 416 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
417 IGP01E1000_PHY_AGC_B); 417 IGP01E1000_PHY_AGC_B);
418 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & 418 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B &
419 IGP01E1000_PHY_PAGE_SELECT, &phy_data); 419 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
420 regs_buff[14] = (uint32_t)phy_data; /* cable length */ 420 regs_buff[14] = (u32)phy_data; /* cable length */
421 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 421 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
422 IGP01E1000_PHY_AGC_C); 422 IGP01E1000_PHY_AGC_C);
423 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & 423 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C &
424 IGP01E1000_PHY_PAGE_SELECT, &phy_data); 424 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
425 regs_buff[15] = (uint32_t)phy_data; /* cable length */ 425 regs_buff[15] = (u32)phy_data; /* cable length */
426 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 426 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
427 IGP01E1000_PHY_AGC_D); 427 IGP01E1000_PHY_AGC_D);
428 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & 428 e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D &
429 IGP01E1000_PHY_PAGE_SELECT, &phy_data); 429 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
430 regs_buff[16] = (uint32_t)phy_data; /* cable length */ 430 regs_buff[16] = (u32)phy_data; /* cable length */
431 regs_buff[17] = 0; /* extended 10bt distance (not needed) */ 431 regs_buff[17] = 0; /* extended 10bt distance (not needed) */
432 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); 432 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
433 e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & 433 e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS &
434 IGP01E1000_PHY_PAGE_SELECT, &phy_data); 434 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
435 regs_buff[18] = (uint32_t)phy_data; /* cable polarity */ 435 regs_buff[18] = (u32)phy_data; /* cable polarity */
436 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 436 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
437 IGP01E1000_PHY_PCS_INIT_REG); 437 IGP01E1000_PHY_PCS_INIT_REG);
438 e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & 438 e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG &
439 IGP01E1000_PHY_PAGE_SELECT, &phy_data); 439 IGP01E1000_PHY_PAGE_SELECT, &phy_data);
440 regs_buff[19] = (uint32_t)phy_data; /* cable polarity */ 440 regs_buff[19] = (u32)phy_data; /* cable polarity */
441 regs_buff[20] = 0; /* polarity correction enabled (always) */ 441 regs_buff[20] = 0; /* polarity correction enabled (always) */
442 regs_buff[22] = 0; /* phy receive errors (unavailable) */ 442 regs_buff[22] = 0; /* phy receive errors (unavailable) */
443 regs_buff[23] = regs_buff[18]; /* mdix mode */ 443 regs_buff[23] = regs_buff[18]; /* mdix mode */
444 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); 444 e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
445 } else { 445 } else {
446 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); 446 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
447 regs_buff[13] = (uint32_t)phy_data; /* cable length */ 447 regs_buff[13] = (u32)phy_data; /* cable length */
448 regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ 448 regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
449 regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ 449 regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
450 regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ 450 regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
451 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 451 e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
452 regs_buff[17] = (uint32_t)phy_data; /* extended 10bt distance */ 452 regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
453 regs_buff[18] = regs_buff[13]; /* cable polarity */ 453 regs_buff[18] = regs_buff[13]; /* cable polarity */
454 regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ 454 regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
455 regs_buff[20] = regs_buff[17]; /* polarity correction */ 455 regs_buff[20] = regs_buff[17]; /* polarity correction */
@@ -459,7 +459,7 @@ e1000_get_regs(struct net_device *netdev,
459 } 459 }
460 regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ 460 regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */
461 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); 461 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
462 regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */ 462 regs_buff[24] = (u32)phy_data; /* phy local receiver status */
463 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ 463 regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
464 if (hw->mac_type >= e1000_82540 && 464 if (hw->mac_type >= e1000_82540 &&
465 hw->mac_type < e1000_82571 && 465 hw->mac_type < e1000_82571 &&
@@ -477,14 +477,14 @@ e1000_get_eeprom_len(struct net_device *netdev)
477 477
478static int 478static int
479e1000_get_eeprom(struct net_device *netdev, 479e1000_get_eeprom(struct net_device *netdev,
480 struct ethtool_eeprom *eeprom, uint8_t *bytes) 480 struct ethtool_eeprom *eeprom, u8 *bytes)
481{ 481{
482 struct e1000_adapter *adapter = netdev_priv(netdev); 482 struct e1000_adapter *adapter = netdev_priv(netdev);
483 struct e1000_hw *hw = &adapter->hw; 483 struct e1000_hw *hw = &adapter->hw;
484 uint16_t *eeprom_buff; 484 u16 *eeprom_buff;
485 int first_word, last_word; 485 int first_word, last_word;
486 int ret_val = 0; 486 int ret_val = 0;
487 uint16_t i; 487 u16 i;
488 488
489 if (eeprom->len == 0) 489 if (eeprom->len == 0)
490 return -EINVAL; 490 return -EINVAL;
@@ -494,7 +494,7 @@ e1000_get_eeprom(struct net_device *netdev,
494 first_word = eeprom->offset >> 1; 494 first_word = eeprom->offset >> 1;
495 last_word = (eeprom->offset + eeprom->len - 1) >> 1; 495 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
496 496
497 eeprom_buff = kmalloc(sizeof(uint16_t) * 497 eeprom_buff = kmalloc(sizeof(u16) *
498 (last_word - first_word + 1), GFP_KERNEL); 498 (last_word - first_word + 1), GFP_KERNEL);
499 if (!eeprom_buff) 499 if (!eeprom_buff)
500 return -ENOMEM; 500 return -ENOMEM;
@@ -514,7 +514,7 @@ e1000_get_eeprom(struct net_device *netdev,
514 for (i = 0; i < last_word - first_word + 1; i++) 514 for (i = 0; i < last_word - first_word + 1; i++)
515 le16_to_cpus(&eeprom_buff[i]); 515 le16_to_cpus(&eeprom_buff[i]);
516 516
517 memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1), 517 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
518 eeprom->len); 518 eeprom->len);
519 kfree(eeprom_buff); 519 kfree(eeprom_buff);
520 520
@@ -523,14 +523,14 @@ e1000_get_eeprom(struct net_device *netdev,
523 523
524static int 524static int
525e1000_set_eeprom(struct net_device *netdev, 525e1000_set_eeprom(struct net_device *netdev,
526 struct ethtool_eeprom *eeprom, uint8_t *bytes) 526 struct ethtool_eeprom *eeprom, u8 *bytes)
527{ 527{
528 struct e1000_adapter *adapter = netdev_priv(netdev); 528 struct e1000_adapter *adapter = netdev_priv(netdev);
529 struct e1000_hw *hw = &adapter->hw; 529 struct e1000_hw *hw = &adapter->hw;
530 uint16_t *eeprom_buff; 530 u16 *eeprom_buff;
531 void *ptr; 531 void *ptr;
532 int max_len, first_word, last_word, ret_val = 0; 532 int max_len, first_word, last_word, ret_val = 0;
533 uint16_t i; 533 u16 i;
534 534
535 if (eeprom->len == 0) 535 if (eeprom->len == 0)
536 return -EOPNOTSUPP; 536 return -EOPNOTSUPP;
@@ -590,7 +590,7 @@ e1000_get_drvinfo(struct net_device *netdev,
590{ 590{
591 struct e1000_adapter *adapter = netdev_priv(netdev); 591 struct e1000_adapter *adapter = netdev_priv(netdev);
592 char firmware_version[32]; 592 char firmware_version[32];
593 uint16_t eeprom_data; 593 u16 eeprom_data;
594 594
595 strncpy(drvinfo->driver, e1000_driver_name, 32); 595 strncpy(drvinfo->driver, e1000_driver_name, 32);
596 strncpy(drvinfo->version, e1000_driver_version, 32); 596 strncpy(drvinfo->version, e1000_driver_version, 32);
@@ -674,13 +674,13 @@ e1000_set_ringparam(struct net_device *netdev,
674 adapter->tx_ring = txdr; 674 adapter->tx_ring = txdr;
675 adapter->rx_ring = rxdr; 675 adapter->rx_ring = rxdr;
676 676
677 rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD); 677 rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
678 rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ? 678 rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
679 E1000_MAX_RXD : E1000_MAX_82544_RXD)); 679 E1000_MAX_RXD : E1000_MAX_82544_RXD));
680 rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); 680 rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
681 681
682 txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD); 682 txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
683 txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ? 683 txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
684 E1000_MAX_TXD : E1000_MAX_82544_TXD)); 684 E1000_MAX_TXD : E1000_MAX_82544_TXD));
685 txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); 685 txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
686 686
@@ -728,13 +728,13 @@ err_setup:
728 return err; 728 return err;
729} 729}
730 730
731static bool reg_pattern_test(struct e1000_adapter *adapter, uint64_t *data, 731static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
732 int reg, uint32_t mask, uint32_t write) 732 int reg, u32 mask, u32 write)
733{ 733{
734 static const uint32_t test[] = 734 static const u32 test[] =
735 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 735 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
736 uint8_t __iomem *address = adapter->hw.hw_addr + reg; 736 u8 __iomem *address = adapter->hw.hw_addr + reg;
737 uint32_t read; 737 u32 read;
738 int i; 738 int i;
739 739
740 for (i = 0; i < ARRAY_SIZE(test); i++) { 740 for (i = 0; i < ARRAY_SIZE(test); i++) {
@@ -751,11 +751,11 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, uint64_t *data,
751 return false; 751 return false;
752} 752}
753 753
754static bool reg_set_and_check(struct e1000_adapter *adapter, uint64_t *data, 754static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
755 int reg, uint32_t mask, uint32_t write) 755 int reg, u32 mask, u32 write)
756{ 756{
757 uint8_t __iomem *address = adapter->hw.hw_addr + reg; 757 u8 __iomem *address = adapter->hw.hw_addr + reg;
758 uint32_t read; 758 u32 read;
759 759
760 writel(write & mask, address); 760 writel(write & mask, address);
761 read = readl(address); 761 read = readl(address);
@@ -788,10 +788,10 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, uint64_t *data,
788 } while (0) 788 } while (0)
789 789
790static int 790static int
791e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data) 791e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
792{ 792{
793 uint32_t value, before, after; 793 u32 value, before, after;
794 uint32_t i, toggle; 794 u32 i, toggle;
795 795
796 /* The status register is Read Only, so a write should fail. 796 /* The status register is Read Only, so a write should fail.
797 * Some bits that get toggled are ignored. 797 * Some bits that get toggled are ignored.
@@ -884,11 +884,11 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
884} 884}
885 885
886static int 886static int
887e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data) 887e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
888{ 888{
889 uint16_t temp; 889 u16 temp;
890 uint16_t checksum = 0; 890 u16 checksum = 0;
891 uint16_t i; 891 u16 i;
892 892
893 *data = 0; 893 *data = 0;
894 /* Read and add up the contents of the EEPROM */ 894 /* Read and add up the contents of the EEPROM */
@@ -901,7 +901,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
901 } 901 }
902 902
903 /* If Checksum is not Correct return error else test passed */ 903 /* If Checksum is not Correct return error else test passed */
904 if ((checksum != (uint16_t) EEPROM_SUM) && !(*data)) 904 if ((checksum != (u16) EEPROM_SUM) && !(*data))
905 *data = 2; 905 *data = 2;
906 906
907 return *data; 907 return *data;
@@ -919,11 +919,12 @@ e1000_test_intr(int irq, void *data)
919} 919}
920 920
921static int 921static int
922e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data) 922e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
923{ 923{
924 struct net_device *netdev = adapter->netdev; 924 struct net_device *netdev = adapter->netdev;
925 uint32_t mask, i=0, shared_int = TRUE; 925 u32 mask, i = 0;
926 uint32_t irq = adapter->pdev->irq; 926 bool shared_int = true;
927 u32 irq = adapter->pdev->irq;
927 928
928 *data = 0; 929 *data = 0;
929 930
@@ -931,7 +932,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
931 /* Hook up test interrupt handler just for this test */ 932 /* Hook up test interrupt handler just for this test */
932 if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, 933 if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
933 netdev)) 934 netdev))
934 shared_int = FALSE; 935 shared_int = false;
935 else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED, 936 else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
936 netdev->name, netdev)) { 937 netdev->name, netdev)) {
937 *data = 1; 938 *data = 1;
@@ -1069,7 +1070,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1069 struct e1000_tx_ring *txdr = &adapter->test_tx_ring; 1070 struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
1070 struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; 1071 struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
1071 struct pci_dev *pdev = adapter->pdev; 1072 struct pci_dev *pdev = adapter->pdev;
1072 uint32_t rctl; 1073 u32 rctl;
1073 int i, ret_val; 1074 int i, ret_val;
1074 1075
1075 /* Setup Tx descriptor ring and Tx buffers */ 1076 /* Setup Tx descriptor ring and Tx buffers */
@@ -1095,8 +1096,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1095 txdr->next_to_use = txdr->next_to_clean = 0; 1096 txdr->next_to_use = txdr->next_to_clean = 0;
1096 1097
1097 E1000_WRITE_REG(&adapter->hw, TDBAL, 1098 E1000_WRITE_REG(&adapter->hw, TDBAL,
1098 ((uint64_t) txdr->dma & 0x00000000FFFFFFFF)); 1099 ((u64) txdr->dma & 0x00000000FFFFFFFF));
1099 E1000_WRITE_REG(&adapter->hw, TDBAH, ((uint64_t) txdr->dma >> 32)); 1100 E1000_WRITE_REG(&adapter->hw, TDBAH, ((u64) txdr->dma >> 32));
1100 E1000_WRITE_REG(&adapter->hw, TDLEN, 1101 E1000_WRITE_REG(&adapter->hw, TDLEN,
1101 txdr->count * sizeof(struct e1000_tx_desc)); 1102 txdr->count * sizeof(struct e1000_tx_desc));
1102 E1000_WRITE_REG(&adapter->hw, TDH, 0); 1103 E1000_WRITE_REG(&adapter->hw, TDH, 0);
@@ -1152,8 +1153,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
1152 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1153 rctl = E1000_READ_REG(&adapter->hw, RCTL);
1153 E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN); 1154 E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
1154 E1000_WRITE_REG(&adapter->hw, RDBAL, 1155 E1000_WRITE_REG(&adapter->hw, RDBAL,
1155 ((uint64_t) rxdr->dma & 0xFFFFFFFF)); 1156 ((u64) rxdr->dma & 0xFFFFFFFF));
1156 E1000_WRITE_REG(&adapter->hw, RDBAH, ((uint64_t) rxdr->dma >> 32)); 1157 E1000_WRITE_REG(&adapter->hw, RDBAH, ((u64) rxdr->dma >> 32));
1157 E1000_WRITE_REG(&adapter->hw, RDLEN, rxdr->size); 1158 E1000_WRITE_REG(&adapter->hw, RDLEN, rxdr->size);
1158 E1000_WRITE_REG(&adapter->hw, RDH, 0); 1159 E1000_WRITE_REG(&adapter->hw, RDH, 0);
1159 E1000_WRITE_REG(&adapter->hw, RDT, 0); 1160 E1000_WRITE_REG(&adapter->hw, RDT, 0);
@@ -1201,7 +1202,7 @@ e1000_phy_disable_receiver(struct e1000_adapter *adapter)
1201static void 1202static void
1202e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) 1203e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
1203{ 1204{
1204 uint16_t phy_reg; 1205 u16 phy_reg;
1205 1206
1206 /* Because we reset the PHY above, we need to re-force TX_CLK in the 1207 /* Because we reset the PHY above, we need to re-force TX_CLK in the
1207 * Extended PHY Specific Control Register to 25MHz clock. This 1208 * Extended PHY Specific Control Register to 25MHz clock. This
@@ -1225,8 +1226,8 @@ e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
1225static int 1226static int
1226e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) 1227e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
1227{ 1228{
1228 uint32_t ctrl_reg; 1229 u32 ctrl_reg;
1229 uint16_t phy_reg; 1230 u16 phy_reg;
1230 1231
1231 /* Setup the Device Control Register for PHY loopback test. */ 1232 /* Setup the Device Control Register for PHY loopback test. */
1232 1233
@@ -1292,10 +1293,10 @@ e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
1292static int 1293static int
1293e1000_integrated_phy_loopback(struct e1000_adapter *adapter) 1294e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1294{ 1295{
1295 uint32_t ctrl_reg = 0; 1296 u32 ctrl_reg = 0;
1296 uint32_t stat_reg = 0; 1297 u32 stat_reg = 0;
1297 1298
1298 adapter->hw.autoneg = FALSE; 1299 adapter->hw.autoneg = false;
1299 1300
1300 if (adapter->hw.phy_type == e1000_phy_m88) { 1301 if (adapter->hw.phy_type == e1000_phy_m88) {
1301 /* Auto-MDI/MDIX Off */ 1302 /* Auto-MDI/MDIX Off */
@@ -1362,8 +1363,8 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1362static int 1363static int
1363e1000_set_phy_loopback(struct e1000_adapter *adapter) 1364e1000_set_phy_loopback(struct e1000_adapter *adapter)
1364{ 1365{
1365 uint16_t phy_reg = 0; 1366 u16 phy_reg = 0;
1366 uint16_t count = 0; 1367 u16 count = 0;
1367 1368
1368 switch (adapter->hw.mac_type) { 1369 switch (adapter->hw.mac_type) {
1369 case e1000_82543: 1370 case e1000_82543:
@@ -1415,7 +1416,7 @@ static int
1415e1000_setup_loopback_test(struct e1000_adapter *adapter) 1416e1000_setup_loopback_test(struct e1000_adapter *adapter)
1416{ 1417{
1417 struct e1000_hw *hw = &adapter->hw; 1418 struct e1000_hw *hw = &adapter->hw;
1418 uint32_t rctl; 1419 u32 rctl;
1419 1420
1420 if (hw->media_type == e1000_media_type_fiber || 1421 if (hw->media_type == e1000_media_type_fiber ||
1421 hw->media_type == e1000_media_type_internal_serdes) { 1422 hw->media_type == e1000_media_type_internal_serdes) {
@@ -1450,8 +1451,8 @@ static void
1450e1000_loopback_cleanup(struct e1000_adapter *adapter) 1451e1000_loopback_cleanup(struct e1000_adapter *adapter)
1451{ 1452{
1452 struct e1000_hw *hw = &adapter->hw; 1453 struct e1000_hw *hw = &adapter->hw;
1453 uint32_t rctl; 1454 u32 rctl;
1454 uint16_t phy_reg; 1455 u16 phy_reg;
1455 1456
1456 rctl = E1000_READ_REG(hw, RCTL); 1457 rctl = E1000_READ_REG(hw, RCTL);
1457 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); 1458 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
@@ -1473,7 +1474,7 @@ e1000_loopback_cleanup(struct e1000_adapter *adapter)
1473 case e1000_82545_rev_3: 1474 case e1000_82545_rev_3:
1474 case e1000_82546_rev_3: 1475 case e1000_82546_rev_3:
1475 default: 1476 default:
1476 hw->autoneg = TRUE; 1477 hw->autoneg = true;
1477 if (hw->phy_type == e1000_phy_gg82563) 1478 if (hw->phy_type == e1000_phy_gg82563)
1478 e1000_write_phy_reg(hw, 1479 e1000_write_phy_reg(hw,
1479 GG82563_PHY_KMRN_MODE_CTRL, 1480 GG82563_PHY_KMRN_MODE_CTRL,
@@ -1577,7 +1578,7 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
1577} 1578}
1578 1579
1579static int 1580static int
1580e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data) 1581e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1581{ 1582{
1582 /* PHY loopback cannot be performed if SoL/IDER 1583 /* PHY loopback cannot be performed if SoL/IDER
1583 * sessions are active */ 1584 * sessions are active */
@@ -1602,18 +1603,18 @@ out:
1602} 1603}
1603 1604
1604static int 1605static int
1605e1000_link_test(struct e1000_adapter *adapter, uint64_t *data) 1606e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1606{ 1607{
1607 *data = 0; 1608 *data = 0;
1608 if (adapter->hw.media_type == e1000_media_type_internal_serdes) { 1609 if (adapter->hw.media_type == e1000_media_type_internal_serdes) {
1609 int i = 0; 1610 int i = 0;
1610 adapter->hw.serdes_link_down = TRUE; 1611 adapter->hw.serdes_link_down = true;
1611 1612
1612 /* On some blade server designs, link establishment 1613 /* On some blade server designs, link establishment
1613 * could take as long as 2-3 minutes */ 1614 * could take as long as 2-3 minutes */
1614 do { 1615 do {
1615 e1000_check_for_link(&adapter->hw); 1616 e1000_check_for_link(&adapter->hw);
1616 if (adapter->hw.serdes_link_down == FALSE) 1617 if (!adapter->hw.serdes_link_down)
1617 return *data; 1618 return *data;
1618 msleep(20); 1619 msleep(20);
1619 } while (i++ < 3750); 1620 } while (i++ < 3750);
@@ -1646,19 +1647,19 @@ e1000_get_sset_count(struct net_device *netdev, int sset)
1646 1647
1647static void 1648static void
1648e1000_diag_test(struct net_device *netdev, 1649e1000_diag_test(struct net_device *netdev,
1649 struct ethtool_test *eth_test, uint64_t *data) 1650 struct ethtool_test *eth_test, u64 *data)
1650{ 1651{
1651 struct e1000_adapter *adapter = netdev_priv(netdev); 1652 struct e1000_adapter *adapter = netdev_priv(netdev);
1652 boolean_t if_running = netif_running(netdev); 1653 bool if_running = netif_running(netdev);
1653 1654
1654 set_bit(__E1000_TESTING, &adapter->flags); 1655 set_bit(__E1000_TESTING, &adapter->flags);
1655 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1656 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1656 /* Offline tests */ 1657 /* Offline tests */
1657 1658
1658 /* save speed, duplex, autoneg settings */ 1659 /* save speed, duplex, autoneg settings */
1659 uint16_t autoneg_advertised = adapter->hw.autoneg_advertised; 1660 u16 autoneg_advertised = adapter->hw.autoneg_advertised;
1660 uint8_t forced_speed_duplex = adapter->hw.forced_speed_duplex; 1661 u8 forced_speed_duplex = adapter->hw.forced_speed_duplex;
1661 uint8_t autoneg = adapter->hw.autoneg; 1662 u8 autoneg = adapter->hw.autoneg;
1662 1663
1663 DPRINTK(HW, INFO, "offline testing starting\n"); 1664 DPRINTK(HW, INFO, "offline testing starting\n");
1664 1665
@@ -1876,7 +1877,7 @@ e1000_led_blink_callback(unsigned long data)
1876} 1877}
1877 1878
1878static int 1879static int
1879e1000_phys_id(struct net_device *netdev, uint32_t data) 1880e1000_phys_id(struct net_device *netdev, u32 data)
1880{ 1881{
1881 struct e1000_adapter *adapter = netdev_priv(netdev); 1882 struct e1000_adapter *adapter = netdev_priv(netdev);
1882 1883
@@ -1926,7 +1927,7 @@ e1000_nway_reset(struct net_device *netdev)
1926 1927
1927static void 1928static void
1928e1000_get_ethtool_stats(struct net_device *netdev, 1929e1000_get_ethtool_stats(struct net_device *netdev,
1929 struct ethtool_stats *stats, uint64_t *data) 1930 struct ethtool_stats *stats, u64 *data)
1930{ 1931{
1931 struct e1000_adapter *adapter = netdev_priv(netdev); 1932 struct e1000_adapter *adapter = netdev_priv(netdev);
1932 int i; 1933 int i;
@@ -1935,15 +1936,15 @@ e1000_get_ethtool_stats(struct net_device *netdev,
1935 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1936 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1936 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; 1937 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
1937 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 1938 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1938 sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; 1939 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1939 } 1940 }
1940/* BUG_ON(i != E1000_STATS_LEN); */ 1941/* BUG_ON(i != E1000_STATS_LEN); */
1941} 1942}
1942 1943
1943static void 1944static void
1944e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) 1945e1000_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
1945{ 1946{
1946 uint8_t *p = data; 1947 u8 *p = data;
1947 int i; 1948 int i;
1948 1949
1949 switch (stringset) { 1950 switch (stringset) {
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 7c6888c58c21..9a4b6cbddf2c 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -33,106 +33,107 @@
33 33
34#include "e1000_hw.h" 34#include "e1000_hw.h"
35 35
36static int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask); 36static s32 e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask);
37static void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask); 37static void e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask);
38static int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data); 38static s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 *data);
39static int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); 39static s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
40static int32_t e1000_get_software_semaphore(struct e1000_hw *hw); 40static s32 e1000_get_software_semaphore(struct e1000_hw *hw);
41static void e1000_release_software_semaphore(struct e1000_hw *hw); 41static void e1000_release_software_semaphore(struct e1000_hw *hw);
42 42
43static uint8_t e1000_arc_subsystem_valid(struct e1000_hw *hw); 43static u8 e1000_arc_subsystem_valid(struct e1000_hw *hw);
44static int32_t e1000_check_downshift(struct e1000_hw *hw); 44static s32 e1000_check_downshift(struct e1000_hw *hw);
45static int32_t e1000_check_polarity(struct e1000_hw *hw, e1000_rev_polarity *polarity); 45static s32 e1000_check_polarity(struct e1000_hw *hw, e1000_rev_polarity *polarity);
46static void e1000_clear_hw_cntrs(struct e1000_hw *hw); 46static void e1000_clear_hw_cntrs(struct e1000_hw *hw);
47static void e1000_clear_vfta(struct e1000_hw *hw); 47static void e1000_clear_vfta(struct e1000_hw *hw);
48static int32_t e1000_commit_shadow_ram(struct e1000_hw *hw); 48static s32 e1000_commit_shadow_ram(struct e1000_hw *hw);
49static int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw, boolean_t link_up); 49static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw,
50static int32_t e1000_config_fc_after_link_up(struct e1000_hw *hw); 50 bool link_up);
51static int32_t e1000_detect_gig_phy(struct e1000_hw *hw); 51static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw);
52static int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank); 52static s32 e1000_detect_gig_phy(struct e1000_hw *hw);
53static int32_t e1000_get_auto_rd_done(struct e1000_hw *hw); 53static s32 e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank);
54static int32_t e1000_get_cable_length(struct e1000_hw *hw, uint16_t *min_length, uint16_t *max_length); 54static s32 e1000_get_auto_rd_done(struct e1000_hw *hw);
55static int32_t e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw); 55static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, u16 *max_length);
56static int32_t e1000_get_phy_cfg_done(struct e1000_hw *hw); 56static s32 e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw);
57static int32_t e1000_get_software_flag(struct e1000_hw *hw); 57static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
58static int32_t e1000_ich8_cycle_init(struct e1000_hw *hw); 58static s32 e1000_get_software_flag(struct e1000_hw *hw);
59static int32_t e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout); 59static s32 e1000_ich8_cycle_init(struct e1000_hw *hw);
60static int32_t e1000_id_led_init(struct e1000_hw *hw); 60static s32 e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout);
61static int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size); 61static s32 e1000_id_led_init(struct e1000_hw *hw);
62static int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw); 62static s32 e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, u32 cnf_base_addr, u32 cnf_size);
63static s32 e1000_init_lcd_from_nvm(struct e1000_hw *hw);
63static void e1000_init_rx_addrs(struct e1000_hw *hw); 64static void e1000_init_rx_addrs(struct e1000_hw *hw);
64static void e1000_initialize_hardware_bits(struct e1000_hw *hw); 65static void e1000_initialize_hardware_bits(struct e1000_hw *hw);
65static boolean_t e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw); 66static bool e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
66static int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw); 67static s32 e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
67static int32_t e1000_mng_enable_host_if(struct e1000_hw *hw); 68static s32 e1000_mng_enable_host_if(struct e1000_hw *hw);
68static int32_t e1000_mng_host_if_write(struct e1000_hw *hw, uint8_t *buffer, uint16_t length, uint16_t offset, uint8_t *sum); 69static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, u16 length, u16 offset, u8 *sum);
69static int32_t e1000_mng_write_cmd_header(struct e1000_hw* hw, struct e1000_host_mng_command_header* hdr); 70static s32 e1000_mng_write_cmd_header(struct e1000_hw* hw, struct e1000_host_mng_command_header* hdr);
70static int32_t e1000_mng_write_commit(struct e1000_hw *hw); 71static s32 e1000_mng_write_commit(struct e1000_hw *hw);
71static int32_t e1000_phy_ife_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); 72static s32 e1000_phy_ife_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
72static int32_t e1000_phy_igp_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); 73static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
73static int32_t e1000_read_eeprom_eerd(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); 74static s32 e1000_read_eeprom_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
74static int32_t e1000_write_eeprom_eewr(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); 75static s32 e1000_write_eeprom_eewr(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
75static int32_t e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd); 76static s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd);
76static int32_t e1000_phy_m88_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); 77static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
77static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw); 78static void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw);
78static int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t *data); 79static s32 e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8 *data);
79static int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte); 80static s32 e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte);
80static int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte); 81static s32 e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte);
81static int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data); 82static s32 e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data);
82static int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, uint16_t *data); 83static s32 e1000_read_ich8_data(struct e1000_hw *hw, u32 index, u32 size, u16 *data);
83static int32_t e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, uint16_t data); 84static s32 e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size, u16 data);
84static int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); 85static s32 e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
85static int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data); 86static s32 e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
86static void e1000_release_software_flag(struct e1000_hw *hw); 87static void e1000_release_software_flag(struct e1000_hw *hw);
87static int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active); 88static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
88static int32_t e1000_set_d0_lplu_state(struct e1000_hw *hw, boolean_t active); 89static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
89static int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop); 90static s32 e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop);
90static void e1000_set_pci_express_master_disable(struct e1000_hw *hw); 91static void e1000_set_pci_express_master_disable(struct e1000_hw *hw);
91static int32_t e1000_wait_autoneg(struct e1000_hw *hw); 92static s32 e1000_wait_autoneg(struct e1000_hw *hw);
92static void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, uint32_t value); 93static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value);
93static int32_t e1000_set_phy_type(struct e1000_hw *hw); 94static s32 e1000_set_phy_type(struct e1000_hw *hw);
94static void e1000_phy_init_script(struct e1000_hw *hw); 95static void e1000_phy_init_script(struct e1000_hw *hw);
95static int32_t e1000_setup_copper_link(struct e1000_hw *hw); 96static s32 e1000_setup_copper_link(struct e1000_hw *hw);
96static int32_t e1000_setup_fiber_serdes_link(struct e1000_hw *hw); 97static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw);
97static int32_t e1000_adjust_serdes_amplitude(struct e1000_hw *hw); 98static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw);
98static int32_t e1000_phy_force_speed_duplex(struct e1000_hw *hw); 99static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
99static int32_t e1000_config_mac_to_phy(struct e1000_hw *hw); 100static s32 e1000_config_mac_to_phy(struct e1000_hw *hw);
100static void e1000_raise_mdi_clk(struct e1000_hw *hw, uint32_t *ctrl); 101static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
101static void e1000_lower_mdi_clk(struct e1000_hw *hw, uint32_t *ctrl); 102static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl);
102static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, uint32_t data, 103static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data,
103 uint16_t count); 104 u16 count);
104static uint16_t e1000_shift_in_mdi_bits(struct e1000_hw *hw); 105static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw);
105static int32_t e1000_phy_reset_dsp(struct e1000_hw *hw); 106static s32 e1000_phy_reset_dsp(struct e1000_hw *hw);
106static int32_t e1000_write_eeprom_spi(struct e1000_hw *hw, uint16_t offset, 107static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset,
107 uint16_t words, uint16_t *data); 108 u16 words, u16 *data);
108static int32_t e1000_write_eeprom_microwire(struct e1000_hw *hw, 109static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw,
109 uint16_t offset, uint16_t words, 110 u16 offset, u16 words,
110 uint16_t *data); 111 u16 *data);
111static int32_t e1000_spi_eeprom_ready(struct e1000_hw *hw); 112static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw);
112static void e1000_raise_ee_clk(struct e1000_hw *hw, uint32_t *eecd); 113static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd);
113static void e1000_lower_ee_clk(struct e1000_hw *hw, uint32_t *eecd); 114static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd);
114static void e1000_shift_out_ee_bits(struct e1000_hw *hw, uint16_t data, 115static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data,
115 uint16_t count); 116 u16 count);
116static int32_t e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr, 117static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
117 uint16_t phy_data); 118 u16 phy_data);
118static int32_t e1000_read_phy_reg_ex(struct e1000_hw *hw,uint32_t reg_addr, 119static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw,u32 reg_addr,
119 uint16_t *phy_data); 120 u16 *phy_data);
120static uint16_t e1000_shift_in_ee_bits(struct e1000_hw *hw, uint16_t count); 121static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count);
121static int32_t e1000_acquire_eeprom(struct e1000_hw *hw); 122static s32 e1000_acquire_eeprom(struct e1000_hw *hw);
122static void e1000_release_eeprom(struct e1000_hw *hw); 123static void e1000_release_eeprom(struct e1000_hw *hw);
123static void e1000_standby_eeprom(struct e1000_hw *hw); 124static void e1000_standby_eeprom(struct e1000_hw *hw);
124static int32_t e1000_set_vco_speed(struct e1000_hw *hw); 125static s32 e1000_set_vco_speed(struct e1000_hw *hw);
125static int32_t e1000_polarity_reversal_workaround(struct e1000_hw *hw); 126static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw);
126static int32_t e1000_set_phy_mode(struct e1000_hw *hw); 127static s32 e1000_set_phy_mode(struct e1000_hw *hw);
127static int32_t e1000_host_if_read_cookie(struct e1000_hw *hw, uint8_t *buffer); 128static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer);
128static uint8_t e1000_calculate_mng_checksum(char *buffer, uint32_t length); 129static u8 e1000_calculate_mng_checksum(char *buffer, u32 length);
129static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, 130static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw,
130 uint16_t duplex); 131 u16 duplex);
131static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw); 132static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
132 133
133/* IGP cable length table */ 134/* IGP cable length table */
134static const 135static const
135uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = 136u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
136 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 137 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
137 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, 138 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
138 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, 139 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
@@ -143,7 +144,7 @@ uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
143 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120}; 144 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120};
144 145
145static const 146static const
146uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] = 147u16 e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
147 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 148 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
148 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 149 0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
149 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 150 6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
@@ -158,7 +159,7 @@ uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
158 * 159 *
159 * hw - Struct containing variables accessed by shared code 160 * hw - Struct containing variables accessed by shared code
160 *****************************************************************************/ 161 *****************************************************************************/
161static int32_t 162static s32
162e1000_set_phy_type(struct e1000_hw *hw) 163e1000_set_phy_type(struct e1000_hw *hw)
163{ 164{
164 DEBUGFUNC("e1000_set_phy_type"); 165 DEBUGFUNC("e1000_set_phy_type");
@@ -212,8 +213,8 @@ e1000_set_phy_type(struct e1000_hw *hw)
212static void 213static void
213e1000_phy_init_script(struct e1000_hw *hw) 214e1000_phy_init_script(struct e1000_hw *hw)
214{ 215{
215 uint32_t ret_val; 216 u32 ret_val;
216 uint16_t phy_saved_data; 217 u16 phy_saved_data;
217 218
218 DEBUGFUNC("e1000_phy_init_script"); 219 DEBUGFUNC("e1000_phy_init_script");
219 220
@@ -271,7 +272,7 @@ e1000_phy_init_script(struct e1000_hw *hw)
271 e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); 272 e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
272 273
273 if (hw->mac_type == e1000_82547) { 274 if (hw->mac_type == e1000_82547) {
274 uint16_t fused, fine, coarse; 275 u16 fused, fine, coarse;
275 276
276 /* Move to analog registers page */ 277 /* Move to analog registers page */
277 e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused); 278 e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused);
@@ -305,7 +306,7 @@ e1000_phy_init_script(struct e1000_hw *hw)
305 * 306 *
306 * hw - Struct containing variables accessed by shared code 307 * hw - Struct containing variables accessed by shared code
307 *****************************************************************************/ 308 *****************************************************************************/
308int32_t 309s32
309e1000_set_mac_type(struct e1000_hw *hw) 310e1000_set_mac_type(struct e1000_hw *hw)
310{ 311{
311 DEBUGFUNC("e1000_set_mac_type"); 312 DEBUGFUNC("e1000_set_mac_type");
@@ -425,22 +426,22 @@ e1000_set_mac_type(struct e1000_hw *hw)
425 426
426 switch (hw->mac_type) { 427 switch (hw->mac_type) {
427 case e1000_ich8lan: 428 case e1000_ich8lan:
428 hw->swfwhw_semaphore_present = TRUE; 429 hw->swfwhw_semaphore_present = true;
429 hw->asf_firmware_present = TRUE; 430 hw->asf_firmware_present = true;
430 break; 431 break;
431 case e1000_80003es2lan: 432 case e1000_80003es2lan:
432 hw->swfw_sync_present = TRUE; 433 hw->swfw_sync_present = true;
433 /* fall through */ 434 /* fall through */
434 case e1000_82571: 435 case e1000_82571:
435 case e1000_82572: 436 case e1000_82572:
436 case e1000_82573: 437 case e1000_82573:
437 hw->eeprom_semaphore_present = TRUE; 438 hw->eeprom_semaphore_present = true;
438 /* fall through */ 439 /* fall through */
439 case e1000_82541: 440 case e1000_82541:
440 case e1000_82547: 441 case e1000_82547:
441 case e1000_82541_rev_2: 442 case e1000_82541_rev_2:
442 case e1000_82547_rev_2: 443 case e1000_82547_rev_2:
443 hw->asf_firmware_present = TRUE; 444 hw->asf_firmware_present = true;
444 break; 445 break;
445 default: 446 default:
446 break; 447 break;
@@ -450,20 +451,20 @@ e1000_set_mac_type(struct e1000_hw *hw)
450 * FD mode 451 * FD mode
451 */ 452 */
452 if (hw->mac_type == e1000_82543) 453 if (hw->mac_type == e1000_82543)
453 hw->bad_tx_carr_stats_fd = TRUE; 454 hw->bad_tx_carr_stats_fd = true;
454 455
455 /* capable of receiving management packets to the host */ 456 /* capable of receiving management packets to the host */
456 if (hw->mac_type >= e1000_82571) 457 if (hw->mac_type >= e1000_82571)
457 hw->has_manc2h = TRUE; 458 hw->has_manc2h = true;
458 459
459 /* In rare occasions, ESB2 systems would end up started without 460 /* In rare occasions, ESB2 systems would end up started without
460 * the RX unit being turned on. 461 * the RX unit being turned on.
461 */ 462 */
462 if (hw->mac_type == e1000_80003es2lan) 463 if (hw->mac_type == e1000_80003es2lan)
463 hw->rx_needs_kicking = TRUE; 464 hw->rx_needs_kicking = true;
464 465
465 if (hw->mac_type > e1000_82544) 466 if (hw->mac_type > e1000_82544)
466 hw->has_smbus = TRUE; 467 hw->has_smbus = true;
467 468
468 return E1000_SUCCESS; 469 return E1000_SUCCESS;
469} 470}
@@ -476,13 +477,13 @@ e1000_set_mac_type(struct e1000_hw *hw)
476void 477void
477e1000_set_media_type(struct e1000_hw *hw) 478e1000_set_media_type(struct e1000_hw *hw)
478{ 479{
479 uint32_t status; 480 u32 status;
480 481
481 DEBUGFUNC("e1000_set_media_type"); 482 DEBUGFUNC("e1000_set_media_type");
482 483
483 if (hw->mac_type != e1000_82543) { 484 if (hw->mac_type != e1000_82543) {
484 /* tbi_compatibility is only valid on 82543 */ 485 /* tbi_compatibility is only valid on 82543 */
485 hw->tbi_compatibility_en = FALSE; 486 hw->tbi_compatibility_en = false;
486 } 487 }
487 488
488 switch (hw->device_id) { 489 switch (hw->device_id) {
@@ -513,7 +514,7 @@ e1000_set_media_type(struct e1000_hw *hw)
513 if (status & E1000_STATUS_TBIMODE) { 514 if (status & E1000_STATUS_TBIMODE) {
514 hw->media_type = e1000_media_type_fiber; 515 hw->media_type = e1000_media_type_fiber;
515 /* tbi_compatibility not valid on fiber */ 516 /* tbi_compatibility not valid on fiber */
516 hw->tbi_compatibility_en = FALSE; 517 hw->tbi_compatibility_en = false;
517 } else { 518 } else {
518 hw->media_type = e1000_media_type_copper; 519 hw->media_type = e1000_media_type_copper;
519 } 520 }
@@ -527,17 +528,17 @@ e1000_set_media_type(struct e1000_hw *hw)
527 * 528 *
528 * hw - Struct containing variables accessed by shared code 529 * hw - Struct containing variables accessed by shared code
529 *****************************************************************************/ 530 *****************************************************************************/
530int32_t 531s32
531e1000_reset_hw(struct e1000_hw *hw) 532e1000_reset_hw(struct e1000_hw *hw)
532{ 533{
533 uint32_t ctrl; 534 u32 ctrl;
534 uint32_t ctrl_ext; 535 u32 ctrl_ext;
535 uint32_t icr; 536 u32 icr;
536 uint32_t manc; 537 u32 manc;
537 uint32_t led_ctrl; 538 u32 led_ctrl;
538 uint32_t timeout; 539 u32 timeout;
539 uint32_t extcnf_ctrl; 540 u32 extcnf_ctrl;
540 int32_t ret_val; 541 s32 ret_val;
541 542
542 DEBUGFUNC("e1000_reset_hw"); 543 DEBUGFUNC("e1000_reset_hw");
543 544
@@ -569,7 +570,7 @@ e1000_reset_hw(struct e1000_hw *hw)
569 E1000_WRITE_FLUSH(hw); 570 E1000_WRITE_FLUSH(hw);
570 571
571 /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ 572 /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
572 hw->tbi_compatibility_on = FALSE; 573 hw->tbi_compatibility_on = false;
573 574
574 /* Delay to allow any outstanding PCI transactions to complete before 575 /* Delay to allow any outstanding PCI transactions to complete before
575 * resetting the device 576 * resetting the device
@@ -682,7 +683,7 @@ e1000_reset_hw(struct e1000_hw *hw)
682 msleep(20); 683 msleep(20);
683 break; 684 break;
684 case e1000_82573: 685 case e1000_82573:
685 if (e1000_is_onboard_nvm_eeprom(hw) == FALSE) { 686 if (!e1000_is_onboard_nvm_eeprom(hw)) {
686 udelay(10); 687 udelay(10);
687 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); 688 ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
688 ctrl_ext |= E1000_CTRL_EXT_EE_RST; 689 ctrl_ext |= E1000_CTRL_EXT_EE_RST;
@@ -729,7 +730,7 @@ e1000_reset_hw(struct e1000_hw *hw)
729 } 730 }
730 731
731 if (hw->mac_type == e1000_ich8lan) { 732 if (hw->mac_type == e1000_ich8lan) {
732 uint32_t kab = E1000_READ_REG(hw, KABGTXD); 733 u32 kab = E1000_READ_REG(hw, KABGTXD);
733 kab |= E1000_KABGTXD_BGSQLBIAS; 734 kab |= E1000_KABGTXD_BGSQLBIAS;
734 E1000_WRITE_REG(hw, KABGTXD, kab); 735 E1000_WRITE_REG(hw, KABGTXD, kab);
735 } 736 }
@@ -751,10 +752,10 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw)
751{ 752{
752 if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) { 753 if ((hw->mac_type >= e1000_82571) && (!hw->initialize_hw_bits_disable)) {
753 /* Settings common to all PCI-express silicon */ 754 /* Settings common to all PCI-express silicon */
754 uint32_t reg_ctrl, reg_ctrl_ext; 755 u32 reg_ctrl, reg_ctrl_ext;
755 uint32_t reg_tarc0, reg_tarc1; 756 u32 reg_tarc0, reg_tarc1;
756 uint32_t reg_tctl; 757 u32 reg_tctl;
757 uint32_t reg_txdctl, reg_txdctl1; 758 u32 reg_txdctl, reg_txdctl1;
758 759
759 /* link autonegotiation/sync workarounds */ 760 /* link autonegotiation/sync workarounds */
760 reg_tarc0 = E1000_READ_REG(hw, TARC0); 761 reg_tarc0 = E1000_READ_REG(hw, TARC0);
@@ -865,15 +866,15 @@ e1000_initialize_hardware_bits(struct e1000_hw *hw)
865 * configuration and flow control settings. Clears all on-chip counters. Leaves 866 * configuration and flow control settings. Clears all on-chip counters. Leaves
866 * the transmit and receive units disabled and uninitialized. 867 * the transmit and receive units disabled and uninitialized.
867 *****************************************************************************/ 868 *****************************************************************************/
868int32_t 869s32
869e1000_init_hw(struct e1000_hw *hw) 870e1000_init_hw(struct e1000_hw *hw)
870{ 871{
871 uint32_t ctrl; 872 u32 ctrl;
872 uint32_t i; 873 u32 i;
873 int32_t ret_val; 874 s32 ret_val;
874 uint32_t mta_size; 875 u32 mta_size;
875 uint32_t reg_data; 876 u32 reg_data;
876 uint32_t ctrl_ext; 877 u32 ctrl_ext;
877 878
878 DEBUGFUNC("e1000_init_hw"); 879 DEBUGFUNC("e1000_init_hw");
879 880
@@ -1019,7 +1020,7 @@ e1000_init_hw(struct e1000_hw *hw)
1019 1020
1020 1021
1021 if (hw->mac_type == e1000_82573) { 1022 if (hw->mac_type == e1000_82573) {
1022 uint32_t gcr = E1000_READ_REG(hw, GCR); 1023 u32 gcr = E1000_READ_REG(hw, GCR);
1023 gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; 1024 gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
1024 E1000_WRITE_REG(hw, GCR, gcr); 1025 E1000_WRITE_REG(hw, GCR, gcr);
1025 } 1026 }
@@ -1053,11 +1054,11 @@ e1000_init_hw(struct e1000_hw *hw)
1053 * 1054 *
1054 * hw - Struct containing variables accessed by shared code. 1055 * hw - Struct containing variables accessed by shared code.
1055 *****************************************************************************/ 1056 *****************************************************************************/
1056static int32_t 1057static s32
1057e1000_adjust_serdes_amplitude(struct e1000_hw *hw) 1058e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
1058{ 1059{
1059 uint16_t eeprom_data; 1060 u16 eeprom_data;
1060 int32_t ret_val; 1061 s32 ret_val;
1061 1062
1062 DEBUGFUNC("e1000_adjust_serdes_amplitude"); 1063 DEBUGFUNC("e1000_adjust_serdes_amplitude");
1063 1064
@@ -1099,12 +1100,12 @@ e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
1099 * established. Assumes the hardware has previously been reset and the 1100 * established. Assumes the hardware has previously been reset and the
1100 * transmitter and receiver are not enabled. 1101 * transmitter and receiver are not enabled.
1101 *****************************************************************************/ 1102 *****************************************************************************/
1102int32_t 1103s32
1103e1000_setup_link(struct e1000_hw *hw) 1104e1000_setup_link(struct e1000_hw *hw)
1104{ 1105{
1105 uint32_t ctrl_ext; 1106 u32 ctrl_ext;
1106 int32_t ret_val; 1107 s32 ret_val;
1107 uint16_t eeprom_data; 1108 u16 eeprom_data;
1108 1109
1109 DEBUGFUNC("e1000_setup_link"); 1110 DEBUGFUNC("e1000_setup_link");
1110 1111
@@ -1232,15 +1233,15 @@ e1000_setup_link(struct e1000_hw *hw)
1232 * link. Assumes the hardware has been previously reset and the transmitter 1233 * link. Assumes the hardware has been previously reset and the transmitter
1233 * and receiver are not enabled. 1234 * and receiver are not enabled.
1234 *****************************************************************************/ 1235 *****************************************************************************/
1235static int32_t 1236static s32
1236e1000_setup_fiber_serdes_link(struct e1000_hw *hw) 1237e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
1237{ 1238{
1238 uint32_t ctrl; 1239 u32 ctrl;
1239 uint32_t status; 1240 u32 status;
1240 uint32_t txcw = 0; 1241 u32 txcw = 0;
1241 uint32_t i; 1242 u32 i;
1242 uint32_t signal = 0; 1243 u32 signal = 0;
1243 int32_t ret_val; 1244 s32 ret_val;
1244 1245
1245 DEBUGFUNC("e1000_setup_fiber_serdes_link"); 1246 DEBUGFUNC("e1000_setup_fiber_serdes_link");
1246 1247
@@ -1379,12 +1380,12 @@ e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
1379* 1380*
1380* hw - Struct containing variables accessed by shared code 1381* hw - Struct containing variables accessed by shared code
1381******************************************************************************/ 1382******************************************************************************/
1382static int32_t 1383static s32
1383e1000_copper_link_preconfig(struct e1000_hw *hw) 1384e1000_copper_link_preconfig(struct e1000_hw *hw)
1384{ 1385{
1385 uint32_t ctrl; 1386 u32 ctrl;
1386 int32_t ret_val; 1387 s32 ret_val;
1387 uint16_t phy_data; 1388 u16 phy_data;
1388 1389
1389 DEBUGFUNC("e1000_copper_link_preconfig"); 1390 DEBUGFUNC("e1000_copper_link_preconfig");
1390 1391
@@ -1428,7 +1429,7 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
1428 if (hw->mac_type <= e1000_82543 || 1429 if (hw->mac_type <= e1000_82543 ||
1429 hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || 1430 hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
1430 hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) 1431 hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
1431 hw->phy_reset_disable = FALSE; 1432 hw->phy_reset_disable = false;
1432 1433
1433 return E1000_SUCCESS; 1434 return E1000_SUCCESS;
1434} 1435}
@@ -1439,12 +1440,12 @@ e1000_copper_link_preconfig(struct e1000_hw *hw)
1439* 1440*
1440* hw - Struct containing variables accessed by shared code 1441* hw - Struct containing variables accessed by shared code
1441*********************************************************************/ 1442*********************************************************************/
1442static int32_t 1443static s32
1443e1000_copper_link_igp_setup(struct e1000_hw *hw) 1444e1000_copper_link_igp_setup(struct e1000_hw *hw)
1444{ 1445{
1445 uint32_t led_ctrl; 1446 u32 led_ctrl;
1446 int32_t ret_val; 1447 s32 ret_val;
1447 uint16_t phy_data; 1448 u16 phy_data;
1448 1449
1449 DEBUGFUNC("e1000_copper_link_igp_setup"); 1450 DEBUGFUNC("e1000_copper_link_igp_setup");
1450 1451
@@ -1470,7 +1471,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
1470 /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ 1471 /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */
1471 if (hw->phy_type == e1000_phy_igp) { 1472 if (hw->phy_type == e1000_phy_igp) {
1472 /* disable lplu d3 during driver init */ 1473 /* disable lplu d3 during driver init */
1473 ret_val = e1000_set_d3_lplu_state(hw, FALSE); 1474 ret_val = e1000_set_d3_lplu_state(hw, false);
1474 if (ret_val) { 1475 if (ret_val) {
1475 DEBUGOUT("Error Disabling LPLU D3\n"); 1476 DEBUGOUT("Error Disabling LPLU D3\n");
1476 return ret_val; 1477 return ret_val;
@@ -1478,7 +1479,7 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
1478 } 1479 }
1479 1480
1480 /* disable lplu d0 during driver init */ 1481 /* disable lplu d0 during driver init */
1481 ret_val = e1000_set_d0_lplu_state(hw, FALSE); 1482 ret_val = e1000_set_d0_lplu_state(hw, false);
1482 if (ret_val) { 1483 if (ret_val) {
1483 DEBUGOUT("Error Disabling LPLU D0\n"); 1484 DEBUGOUT("Error Disabling LPLU D0\n");
1484 return ret_val; 1485 return ret_val;
@@ -1586,12 +1587,12 @@ e1000_copper_link_igp_setup(struct e1000_hw *hw)
1586* 1587*
1587* hw - Struct containing variables accessed by shared code 1588* hw - Struct containing variables accessed by shared code
1588*********************************************************************/ 1589*********************************************************************/
1589static int32_t 1590static s32
1590e1000_copper_link_ggp_setup(struct e1000_hw *hw) 1591e1000_copper_link_ggp_setup(struct e1000_hw *hw)
1591{ 1592{
1592 int32_t ret_val; 1593 s32 ret_val;
1593 uint16_t phy_data; 1594 u16 phy_data;
1594 uint32_t reg_data; 1595 u32 reg_data;
1595 1596
1596 DEBUGFUNC("e1000_copper_link_ggp_setup"); 1597 DEBUGFUNC("e1000_copper_link_ggp_setup");
1597 1598
@@ -1691,7 +1692,7 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
1691 * firmware will have already initialized them. We only initialize 1692 * firmware will have already initialized them. We only initialize
1692 * them if the HW is not in IAMT mode. 1693 * them if the HW is not in IAMT mode.
1693 */ 1694 */
1694 if (e1000_check_mng_mode(hw) == FALSE) { 1695 if (!e1000_check_mng_mode(hw)) {
1695 /* Enable Electrical Idle on the PHY */ 1696 /* Enable Electrical Idle on the PHY */
1696 phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; 1697 phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
1697 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, 1698 ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
@@ -1734,11 +1735,11 @@ e1000_copper_link_ggp_setup(struct e1000_hw *hw)
1734* 1735*
1735* hw - Struct containing variables accessed by shared code 1736* hw - Struct containing variables accessed by shared code
1736*********************************************************************/ 1737*********************************************************************/
1737static int32_t 1738static s32
1738e1000_copper_link_mgp_setup(struct e1000_hw *hw) 1739e1000_copper_link_mgp_setup(struct e1000_hw *hw)
1739{ 1740{
1740 int32_t ret_val; 1741 s32 ret_val;
1741 uint16_t phy_data; 1742 u16 phy_data;
1742 1743
1743 DEBUGFUNC("e1000_copper_link_mgp_setup"); 1744 DEBUGFUNC("e1000_copper_link_mgp_setup");
1744 1745
@@ -1838,11 +1839,11 @@ e1000_copper_link_mgp_setup(struct e1000_hw *hw)
1838* 1839*
1839* hw - Struct containing variables accessed by shared code 1840* hw - Struct containing variables accessed by shared code
1840*********************************************************************/ 1841*********************************************************************/
1841static int32_t 1842static s32
1842e1000_copper_link_autoneg(struct e1000_hw *hw) 1843e1000_copper_link_autoneg(struct e1000_hw *hw)
1843{ 1844{
1844 int32_t ret_val; 1845 s32 ret_val;
1845 uint16_t phy_data; 1846 u16 phy_data;
1846 1847
1847 DEBUGFUNC("e1000_copper_link_autoneg"); 1848 DEBUGFUNC("e1000_copper_link_autoneg");
1848 1849
@@ -1892,7 +1893,7 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
1892 } 1893 }
1893 } 1894 }
1894 1895
1895 hw->get_link_status = TRUE; 1896 hw->get_link_status = true;
1896 1897
1897 return E1000_SUCCESS; 1898 return E1000_SUCCESS;
1898} 1899}
@@ -1909,10 +1910,10 @@ e1000_copper_link_autoneg(struct e1000_hw *hw)
1909* 1910*
1910* hw - Struct containing variables accessed by shared code 1911* hw - Struct containing variables accessed by shared code
1911******************************************************************************/ 1912******************************************************************************/
1912static int32_t 1913static s32
1913e1000_copper_link_postconfig(struct e1000_hw *hw) 1914e1000_copper_link_postconfig(struct e1000_hw *hw)
1914{ 1915{
1915 int32_t ret_val; 1916 s32 ret_val;
1916 DEBUGFUNC("e1000_copper_link_postconfig"); 1917 DEBUGFUNC("e1000_copper_link_postconfig");
1917 1918
1918 if (hw->mac_type >= e1000_82544) { 1919 if (hw->mac_type >= e1000_82544) {
@@ -1932,7 +1933,7 @@ e1000_copper_link_postconfig(struct e1000_hw *hw)
1932 1933
1933 /* Config DSP to improve Giga link quality */ 1934 /* Config DSP to improve Giga link quality */
1934 if (hw->phy_type == e1000_phy_igp) { 1935 if (hw->phy_type == e1000_phy_igp) {
1935 ret_val = e1000_config_dsp_after_link_change(hw, TRUE); 1936 ret_val = e1000_config_dsp_after_link_change(hw, true);
1936 if (ret_val) { 1937 if (ret_val) {
1937 DEBUGOUT("Error Configuring DSP after link up\n"); 1938 DEBUGOUT("Error Configuring DSP after link up\n");
1938 return ret_val; 1939 return ret_val;
@@ -1947,13 +1948,13 @@ e1000_copper_link_postconfig(struct e1000_hw *hw)
1947* 1948*
1948* hw - Struct containing variables accessed by shared code 1949* hw - Struct containing variables accessed by shared code
1949******************************************************************************/ 1950******************************************************************************/
1950static int32_t 1951static s32
1951e1000_setup_copper_link(struct e1000_hw *hw) 1952e1000_setup_copper_link(struct e1000_hw *hw)
1952{ 1953{
1953 int32_t ret_val; 1954 s32 ret_val;
1954 uint16_t i; 1955 u16 i;
1955 uint16_t phy_data; 1956 u16 phy_data;
1956 uint16_t reg_data; 1957 u16 reg_data;
1957 1958
1958 DEBUGFUNC("e1000_setup_copper_link"); 1959 DEBUGFUNC("e1000_setup_copper_link");
1959 1960
@@ -2061,12 +2062,12 @@ e1000_setup_copper_link(struct e1000_hw *hw)
2061* 2062*
2062* hw - Struct containing variables accessed by shared code 2063* hw - Struct containing variables accessed by shared code
2063******************************************************************************/ 2064******************************************************************************/
2064static int32_t 2065static s32
2065e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex) 2066e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex)
2066{ 2067{
2067 int32_t ret_val = E1000_SUCCESS; 2068 s32 ret_val = E1000_SUCCESS;
2068 uint32_t tipg; 2069 u32 tipg;
2069 uint16_t reg_data; 2070 u16 reg_data;
2070 2071
2071 DEBUGFUNC("e1000_configure_kmrn_for_10_100"); 2072 DEBUGFUNC("e1000_configure_kmrn_for_10_100");
2072 2073
@@ -2097,12 +2098,12 @@ e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex)
2097 return ret_val; 2098 return ret_val;
2098} 2099}
2099 2100
2100static int32_t 2101static s32
2101e1000_configure_kmrn_for_1000(struct e1000_hw *hw) 2102e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
2102{ 2103{
2103 int32_t ret_val = E1000_SUCCESS; 2104 s32 ret_val = E1000_SUCCESS;
2104 uint16_t reg_data; 2105 u16 reg_data;
2105 uint32_t tipg; 2106 u32 tipg;
2106 2107
2107 DEBUGFUNC("e1000_configure_kmrn_for_1000"); 2108 DEBUGFUNC("e1000_configure_kmrn_for_1000");
2108 2109
@@ -2134,12 +2135,12 @@ e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
2134* 2135*
2135* hw - Struct containing variables accessed by shared code 2136* hw - Struct containing variables accessed by shared code
2136******************************************************************************/ 2137******************************************************************************/
2137int32_t 2138s32
2138e1000_phy_setup_autoneg(struct e1000_hw *hw) 2139e1000_phy_setup_autoneg(struct e1000_hw *hw)
2139{ 2140{
2140 int32_t ret_val; 2141 s32 ret_val;
2141 uint16_t mii_autoneg_adv_reg; 2142 u16 mii_autoneg_adv_reg;
2142 uint16_t mii_1000t_ctrl_reg; 2143 u16 mii_1000t_ctrl_reg;
2143 2144
2144 DEBUGFUNC("e1000_phy_setup_autoneg"); 2145 DEBUGFUNC("e1000_phy_setup_autoneg");
2145 2146
@@ -2283,15 +2284,15 @@ e1000_phy_setup_autoneg(struct e1000_hw *hw)
2283* 2284*
2284* hw - Struct containing variables accessed by shared code 2285* hw - Struct containing variables accessed by shared code
2285******************************************************************************/ 2286******************************************************************************/
2286static int32_t 2287static s32
2287e1000_phy_force_speed_duplex(struct e1000_hw *hw) 2288e1000_phy_force_speed_duplex(struct e1000_hw *hw)
2288{ 2289{
2289 uint32_t ctrl; 2290 u32 ctrl;
2290 int32_t ret_val; 2291 s32 ret_val;
2291 uint16_t mii_ctrl_reg; 2292 u16 mii_ctrl_reg;
2292 uint16_t mii_status_reg; 2293 u16 mii_status_reg;
2293 uint16_t phy_data; 2294 u16 phy_data;
2294 uint16_t i; 2295 u16 i;
2295 2296
2296 DEBUGFUNC("e1000_phy_force_speed_duplex"); 2297 DEBUGFUNC("e1000_phy_force_speed_duplex");
2297 2298
@@ -2537,7 +2538,7 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
2537void 2538void
2538e1000_config_collision_dist(struct e1000_hw *hw) 2539e1000_config_collision_dist(struct e1000_hw *hw)
2539{ 2540{
2540 uint32_t tctl, coll_dist; 2541 u32 tctl, coll_dist;
2541 2542
2542 DEBUGFUNC("e1000_config_collision_dist"); 2543 DEBUGFUNC("e1000_config_collision_dist");
2543 2544
@@ -2564,12 +2565,12 @@ e1000_config_collision_dist(struct e1000_hw *hw)
2564* The contents of the PHY register containing the needed information need to 2565* The contents of the PHY register containing the needed information need to
2565* be passed in. 2566* be passed in.
2566******************************************************************************/ 2567******************************************************************************/
2567static int32_t 2568static s32
2568e1000_config_mac_to_phy(struct e1000_hw *hw) 2569e1000_config_mac_to_phy(struct e1000_hw *hw)
2569{ 2570{
2570 uint32_t ctrl; 2571 u32 ctrl;
2571 int32_t ret_val; 2572 s32 ret_val;
2572 uint16_t phy_data; 2573 u16 phy_data;
2573 2574
2574 DEBUGFUNC("e1000_config_mac_to_phy"); 2575 DEBUGFUNC("e1000_config_mac_to_phy");
2575 2576
@@ -2623,10 +2624,10 @@ e1000_config_mac_to_phy(struct e1000_hw *hw)
2623 * by the PHY rather than the MAC. Software must also configure these 2624 * by the PHY rather than the MAC. Software must also configure these
2624 * bits when link is forced on a fiber connection. 2625 * bits when link is forced on a fiber connection.
2625 *****************************************************************************/ 2626 *****************************************************************************/
2626int32_t 2627s32
2627e1000_force_mac_fc(struct e1000_hw *hw) 2628e1000_force_mac_fc(struct e1000_hw *hw)
2628{ 2629{
2629 uint32_t ctrl; 2630 u32 ctrl;
2630 2631
2631 DEBUGFUNC("e1000_force_mac_fc"); 2632 DEBUGFUNC("e1000_force_mac_fc");
2632 2633
@@ -2690,15 +2691,15 @@ e1000_force_mac_fc(struct e1000_hw *hw)
2690 * based on the flow control negotiated by the PHY. In TBI mode, the TFCE 2691 * based on the flow control negotiated by the PHY. In TBI mode, the TFCE
2691 * and RFCE bits will be automaticaly set to the negotiated flow control mode. 2692 * and RFCE bits will be automaticaly set to the negotiated flow control mode.
2692 *****************************************************************************/ 2693 *****************************************************************************/
2693static int32_t 2694static s32
2694e1000_config_fc_after_link_up(struct e1000_hw *hw) 2695e1000_config_fc_after_link_up(struct e1000_hw *hw)
2695{ 2696{
2696 int32_t ret_val; 2697 s32 ret_val;
2697 uint16_t mii_status_reg; 2698 u16 mii_status_reg;
2698 uint16_t mii_nway_adv_reg; 2699 u16 mii_nway_adv_reg;
2699 uint16_t mii_nway_lp_ability_reg; 2700 u16 mii_nway_lp_ability_reg;
2700 uint16_t speed; 2701 u16 speed;
2701 uint16_t duplex; 2702 u16 duplex;
2702 2703
2703 DEBUGFUNC("e1000_config_fc_after_link_up"); 2704 DEBUGFUNC("e1000_config_fc_after_link_up");
2704 2705
@@ -2895,17 +2896,17 @@ e1000_config_fc_after_link_up(struct e1000_hw *hw)
2895 * 2896 *
2896 * Called by any function that needs to check the link status of the adapter. 2897 * Called by any function that needs to check the link status of the adapter.
2897 *****************************************************************************/ 2898 *****************************************************************************/
2898int32_t 2899s32
2899e1000_check_for_link(struct e1000_hw *hw) 2900e1000_check_for_link(struct e1000_hw *hw)
2900{ 2901{
2901 uint32_t rxcw = 0; 2902 u32 rxcw = 0;
2902 uint32_t ctrl; 2903 u32 ctrl;
2903 uint32_t status; 2904 u32 status;
2904 uint32_t rctl; 2905 u32 rctl;
2905 uint32_t icr; 2906 u32 icr;
2906 uint32_t signal = 0; 2907 u32 signal = 0;
2907 int32_t ret_val; 2908 s32 ret_val;
2908 uint16_t phy_data; 2909 u16 phy_data;
2909 2910
2910 DEBUGFUNC("e1000_check_for_link"); 2911 DEBUGFUNC("e1000_check_for_link");
2911 2912
@@ -2923,7 +2924,7 @@ e1000_check_for_link(struct e1000_hw *hw)
2923 if (hw->media_type == e1000_media_type_fiber) { 2924 if (hw->media_type == e1000_media_type_fiber) {
2924 signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; 2925 signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
2925 if (status & E1000_STATUS_LU) 2926 if (status & E1000_STATUS_LU)
2926 hw->get_link_status = FALSE; 2927 hw->get_link_status = false;
2927 } 2928 }
2928 } 2929 }
2929 2930
@@ -2947,7 +2948,7 @@ e1000_check_for_link(struct e1000_hw *hw)
2947 return ret_val; 2948 return ret_val;
2948 2949
2949 if (phy_data & MII_SR_LINK_STATUS) { 2950 if (phy_data & MII_SR_LINK_STATUS) {
2950 hw->get_link_status = FALSE; 2951 hw->get_link_status = false;
2951 /* Check if there was DownShift, must be checked immediately after 2952 /* Check if there was DownShift, must be checked immediately after
2952 * link-up */ 2953 * link-up */
2953 e1000_check_downshift(hw); 2954 e1000_check_downshift(hw);
@@ -2973,7 +2974,7 @@ e1000_check_for_link(struct e1000_hw *hw)
2973 2974
2974 } else { 2975 } else {
2975 /* No link detected */ 2976 /* No link detected */
2976 e1000_config_dsp_after_link_change(hw, FALSE); 2977 e1000_config_dsp_after_link_change(hw, false);
2977 return 0; 2978 return 0;
2978 } 2979 }
2979 2980
@@ -2983,7 +2984,7 @@ e1000_check_for_link(struct e1000_hw *hw)
2983 if (!hw->autoneg) return -E1000_ERR_CONFIG; 2984 if (!hw->autoneg) return -E1000_ERR_CONFIG;
2984 2985
2985 /* optimize the dsp settings for the igp phy */ 2986 /* optimize the dsp settings for the igp phy */
2986 e1000_config_dsp_after_link_change(hw, TRUE); 2987 e1000_config_dsp_after_link_change(hw, true);
2987 2988
2988 /* We have a M88E1000 PHY and Auto-Neg is enabled. If we 2989 /* We have a M88E1000 PHY and Auto-Neg is enabled. If we
2989 * have Si on board that is 82544 or newer, Auto 2990 * have Si on board that is 82544 or newer, Auto
@@ -3021,7 +3022,7 @@ e1000_check_for_link(struct e1000_hw *hw)
3021 * at gigabit speed, we turn on TBI compatibility. 3022 * at gigabit speed, we turn on TBI compatibility.
3022 */ 3023 */
3023 if (hw->tbi_compatibility_en) { 3024 if (hw->tbi_compatibility_en) {
3024 uint16_t speed, duplex; 3025 u16 speed, duplex;
3025 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); 3026 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
3026 if (ret_val) { 3027 if (ret_val) {
3027 DEBUGOUT("Error getting link speed and duplex\n"); 3028 DEBUGOUT("Error getting link speed and duplex\n");
@@ -3036,7 +3037,7 @@ e1000_check_for_link(struct e1000_hw *hw)
3036 rctl = E1000_READ_REG(hw, RCTL); 3037 rctl = E1000_READ_REG(hw, RCTL);
3037 rctl &= ~E1000_RCTL_SBP; 3038 rctl &= ~E1000_RCTL_SBP;
3038 E1000_WRITE_REG(hw, RCTL, rctl); 3039 E1000_WRITE_REG(hw, RCTL, rctl);
3039 hw->tbi_compatibility_on = FALSE; 3040 hw->tbi_compatibility_on = false;
3040 } 3041 }
3041 } else { 3042 } else {
3042 /* If TBI compatibility is was previously off, turn it on. For 3043 /* If TBI compatibility is was previously off, turn it on. For
@@ -3045,7 +3046,7 @@ e1000_check_for_link(struct e1000_hw *hw)
3045 * will look like CRC errors to to the hardware. 3046 * will look like CRC errors to to the hardware.
3046 */ 3047 */
3047 if (!hw->tbi_compatibility_on) { 3048 if (!hw->tbi_compatibility_on) {
3048 hw->tbi_compatibility_on = TRUE; 3049 hw->tbi_compatibility_on = true;
3049 rctl = E1000_READ_REG(hw, RCTL); 3050 rctl = E1000_READ_REG(hw, RCTL);
3050 rctl |= E1000_RCTL_SBP; 3051 rctl |= E1000_RCTL_SBP;
3051 E1000_WRITE_REG(hw, RCTL, rctl); 3052 E1000_WRITE_REG(hw, RCTL, rctl);
@@ -3098,7 +3099,7 @@ e1000_check_for_link(struct e1000_hw *hw)
3098 E1000_WRITE_REG(hw, TXCW, hw->txcw); 3099 E1000_WRITE_REG(hw, TXCW, hw->txcw);
3099 E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU)); 3100 E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU));
3100 3101
3101 hw->serdes_link_down = FALSE; 3102 hw->serdes_link_down = false;
3102 } 3103 }
3103 /* If we force link for non-auto-negotiation switch, check link status 3104 /* If we force link for non-auto-negotiation switch, check link status
3104 * based on MAC synchronization for internal serdes media type. 3105 * based on MAC synchronization for internal serdes media type.
@@ -3109,11 +3110,11 @@ e1000_check_for_link(struct e1000_hw *hw)
3109 udelay(10); 3110 udelay(10);
3110 if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) { 3111 if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) {
3111 if (!(rxcw & E1000_RXCW_IV)) { 3112 if (!(rxcw & E1000_RXCW_IV)) {
3112 hw->serdes_link_down = FALSE; 3113 hw->serdes_link_down = false;
3113 DEBUGOUT("SERDES: Link is up.\n"); 3114 DEBUGOUT("SERDES: Link is up.\n");
3114 } 3115 }
3115 } else { 3116 } else {
3116 hw->serdes_link_down = TRUE; 3117 hw->serdes_link_down = true;
3117 DEBUGOUT("SERDES: Link is down.\n"); 3118 DEBUGOUT("SERDES: Link is down.\n");
3118 } 3119 }
3119 } 3120 }
@@ -3131,14 +3132,14 @@ e1000_check_for_link(struct e1000_hw *hw)
3131 * speed - Speed of the connection 3132 * speed - Speed of the connection
3132 * duplex - Duplex setting of the connection 3133 * duplex - Duplex setting of the connection
3133 *****************************************************************************/ 3134 *****************************************************************************/
3134int32_t 3135s32
3135e1000_get_speed_and_duplex(struct e1000_hw *hw, 3136e1000_get_speed_and_duplex(struct e1000_hw *hw,
3136 uint16_t *speed, 3137 u16 *speed,
3137 uint16_t *duplex) 3138 u16 *duplex)
3138{ 3139{
3139 uint32_t status; 3140 u32 status;
3140 int32_t ret_val; 3141 s32 ret_val;
3141 uint16_t phy_data; 3142 u16 phy_data;
3142 3143
3143 DEBUGFUNC("e1000_get_speed_and_duplex"); 3144 DEBUGFUNC("e1000_get_speed_and_duplex");
3144 3145
@@ -3213,12 +3214,12 @@ e1000_get_speed_and_duplex(struct e1000_hw *hw,
3213* 3214*
3214* hw - Struct containing variables accessed by shared code 3215* hw - Struct containing variables accessed by shared code
3215******************************************************************************/ 3216******************************************************************************/
3216static int32_t 3217static s32
3217e1000_wait_autoneg(struct e1000_hw *hw) 3218e1000_wait_autoneg(struct e1000_hw *hw)
3218{ 3219{
3219 int32_t ret_val; 3220 s32 ret_val;
3220 uint16_t i; 3221 u16 i;
3221 uint16_t phy_data; 3222 u16 phy_data;
3222 3223
3223 DEBUGFUNC("e1000_wait_autoneg"); 3224 DEBUGFUNC("e1000_wait_autoneg");
3224 DEBUGOUT("Waiting for Auto-Neg to complete.\n"); 3225 DEBUGOUT("Waiting for Auto-Neg to complete.\n");
@@ -3250,7 +3251,7 @@ e1000_wait_autoneg(struct e1000_hw *hw)
3250******************************************************************************/ 3251******************************************************************************/
3251static void 3252static void
3252e1000_raise_mdi_clk(struct e1000_hw *hw, 3253e1000_raise_mdi_clk(struct e1000_hw *hw,
3253 uint32_t *ctrl) 3254 u32 *ctrl)
3254{ 3255{
3255 /* Raise the clock input to the Management Data Clock (by setting the MDC 3256 /* Raise the clock input to the Management Data Clock (by setting the MDC
3256 * bit), and then delay 10 microseconds. 3257 * bit), and then delay 10 microseconds.
@@ -3268,7 +3269,7 @@ e1000_raise_mdi_clk(struct e1000_hw *hw,
3268******************************************************************************/ 3269******************************************************************************/
3269static void 3270static void
3270e1000_lower_mdi_clk(struct e1000_hw *hw, 3271e1000_lower_mdi_clk(struct e1000_hw *hw,
3271 uint32_t *ctrl) 3272 u32 *ctrl)
3272{ 3273{
3273 /* Lower the clock input to the Management Data Clock (by clearing the MDC 3274 /* Lower the clock input to the Management Data Clock (by clearing the MDC
3274 * bit), and then delay 10 microseconds. 3275 * bit), and then delay 10 microseconds.
@@ -3289,11 +3290,11 @@ e1000_lower_mdi_clk(struct e1000_hw *hw,
3289******************************************************************************/ 3290******************************************************************************/
3290static void 3291static void
3291e1000_shift_out_mdi_bits(struct e1000_hw *hw, 3292e1000_shift_out_mdi_bits(struct e1000_hw *hw,
3292 uint32_t data, 3293 u32 data,
3293 uint16_t count) 3294 u16 count)
3294{ 3295{
3295 uint32_t ctrl; 3296 u32 ctrl;
3296 uint32_t mask; 3297 u32 mask;
3297 3298
3298 /* We need to shift "count" number of bits out to the PHY. So, the value 3299 /* We need to shift "count" number of bits out to the PHY. So, the value
3299 * in the "data" parameter will be shifted out to the PHY one bit at a 3300 * in the "data" parameter will be shifted out to the PHY one bit at a
@@ -3337,12 +3338,12 @@ e1000_shift_out_mdi_bits(struct e1000_hw *hw,
3337* 3338*
3338* Bits are shifted in in MSB to LSB order. 3339* Bits are shifted in in MSB to LSB order.
3339******************************************************************************/ 3340******************************************************************************/
3340static uint16_t 3341static u16
3341e1000_shift_in_mdi_bits(struct e1000_hw *hw) 3342e1000_shift_in_mdi_bits(struct e1000_hw *hw)
3342{ 3343{
3343 uint32_t ctrl; 3344 u32 ctrl;
3344 uint16_t data = 0; 3345 u16 data = 0;
3345 uint8_t i; 3346 u8 i;
3346 3347
3347 /* In order to read a register from the PHY, we need to shift in a total 3348 /* In order to read a register from the PHY, we need to shift in a total
3348 * of 18 bits from the PHY. The first two bit (turnaround) times are used 3349 * of 18 bits from the PHY. The first two bit (turnaround) times are used
@@ -3383,13 +3384,13 @@ e1000_shift_in_mdi_bits(struct e1000_hw *hw)
3383 return data; 3384 return data;
3384} 3385}
3385 3386
3386static int32_t 3387static s32
3387e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask) 3388e1000_swfw_sync_acquire(struct e1000_hw *hw, u16 mask)
3388{ 3389{
3389 uint32_t swfw_sync = 0; 3390 u32 swfw_sync = 0;
3390 uint32_t swmask = mask; 3391 u32 swmask = mask;
3391 uint32_t fwmask = mask << 16; 3392 u32 fwmask = mask << 16;
3392 int32_t timeout = 200; 3393 s32 timeout = 200;
3393 3394
3394 DEBUGFUNC("e1000_swfw_sync_acquire"); 3395 DEBUGFUNC("e1000_swfw_sync_acquire");
3395 3396
@@ -3428,10 +3429,10 @@ e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
3428} 3429}
3429 3430
3430static void 3431static void
3431e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask) 3432e1000_swfw_sync_release(struct e1000_hw *hw, u16 mask)
3432{ 3433{
3433 uint32_t swfw_sync; 3434 u32 swfw_sync;
3434 uint32_t swmask = mask; 3435 u32 swmask = mask;
3435 3436
3436 DEBUGFUNC("e1000_swfw_sync_release"); 3437 DEBUGFUNC("e1000_swfw_sync_release");
3437 3438
@@ -3463,13 +3464,13 @@ e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask)
3463* hw - Struct containing variables accessed by shared code 3464* hw - Struct containing variables accessed by shared code
3464* reg_addr - address of the PHY register to read 3465* reg_addr - address of the PHY register to read
3465******************************************************************************/ 3466******************************************************************************/
3466int32_t 3467s32
3467e1000_read_phy_reg(struct e1000_hw *hw, 3468e1000_read_phy_reg(struct e1000_hw *hw,
3468 uint32_t reg_addr, 3469 u32 reg_addr,
3469 uint16_t *phy_data) 3470 u16 *phy_data)
3470{ 3471{
3471 uint32_t ret_val; 3472 u32 ret_val;
3472 uint16_t swfw; 3473 u16 swfw;
3473 3474
3474 DEBUGFUNC("e1000_read_phy_reg"); 3475 DEBUGFUNC("e1000_read_phy_reg");
3475 3476
@@ -3487,7 +3488,7 @@ e1000_read_phy_reg(struct e1000_hw *hw,
3487 hw->phy_type == e1000_phy_igp_2) && 3488 hw->phy_type == e1000_phy_igp_2) &&
3488 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 3489 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
3489 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 3490 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
3490 (uint16_t)reg_addr); 3491 (u16)reg_addr);
3491 if (ret_val) { 3492 if (ret_val) {
3492 e1000_swfw_sync_release(hw, swfw); 3493 e1000_swfw_sync_release(hw, swfw);
3493 return ret_val; 3494 return ret_val;
@@ -3498,14 +3499,14 @@ e1000_read_phy_reg(struct e1000_hw *hw,
3498 /* Select Configuration Page */ 3499 /* Select Configuration Page */
3499 if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 3500 if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
3500 ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT, 3501 ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
3501 (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); 3502 (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
3502 } else { 3503 } else {
3503 /* Use Alternative Page Select register to access 3504 /* Use Alternative Page Select register to access
3504 * registers 30 and 31 3505 * registers 30 and 31
3505 */ 3506 */
3506 ret_val = e1000_write_phy_reg_ex(hw, 3507 ret_val = e1000_write_phy_reg_ex(hw,
3507 GG82563_PHY_PAGE_SELECT_ALT, 3508 GG82563_PHY_PAGE_SELECT_ALT,
3508 (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); 3509 (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
3509 } 3510 }
3510 3511
3511 if (ret_val) { 3512 if (ret_val) {
@@ -3522,13 +3523,13 @@ e1000_read_phy_reg(struct e1000_hw *hw,
3522 return ret_val; 3523 return ret_val;
3523} 3524}
3524 3525
3525static int32_t 3526static s32
3526e1000_read_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr, 3527e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
3527 uint16_t *phy_data) 3528 u16 *phy_data)
3528{ 3529{
3529 uint32_t i; 3530 u32 i;
3530 uint32_t mdic = 0; 3531 u32 mdic = 0;
3531 const uint32_t phy_addr = 1; 3532 const u32 phy_addr = 1;
3532 3533
3533 DEBUGFUNC("e1000_read_phy_reg_ex"); 3534 DEBUGFUNC("e1000_read_phy_reg_ex");
3534 3535
@@ -3562,7 +3563,7 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
3562 DEBUGOUT("MDI Error\n"); 3563 DEBUGOUT("MDI Error\n");
3563 return -E1000_ERR_PHY; 3564 return -E1000_ERR_PHY;
3564 } 3565 }
3565 *phy_data = (uint16_t) mdic; 3566 *phy_data = (u16) mdic;
3566 } else { 3567 } else {
3567 /* We must first send a preamble through the MDIO pin to signal the 3568 /* We must first send a preamble through the MDIO pin to signal the
3568 * beginning of an MII instruction. This is done by sending 32 3569 * beginning of an MII instruction. This is done by sending 32
@@ -3602,12 +3603,12 @@ e1000_read_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
3602* reg_addr - address of the PHY register to write 3603* reg_addr - address of the PHY register to write
3603* data - data to write to the PHY 3604* data - data to write to the PHY
3604******************************************************************************/ 3605******************************************************************************/
3605int32_t 3606s32
3606e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, 3607e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr,
3607 uint16_t phy_data) 3608 u16 phy_data)
3608{ 3609{
3609 uint32_t ret_val; 3610 u32 ret_val;
3610 uint16_t swfw; 3611 u16 swfw;
3611 3612
3612 DEBUGFUNC("e1000_write_phy_reg"); 3613 DEBUGFUNC("e1000_write_phy_reg");
3613 3614
@@ -3625,7 +3626,7 @@ e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr,
3625 hw->phy_type == e1000_phy_igp_2) && 3626 hw->phy_type == e1000_phy_igp_2) &&
3626 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 3627 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
3627 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, 3628 ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
3628 (uint16_t)reg_addr); 3629 (u16)reg_addr);
3629 if (ret_val) { 3630 if (ret_val) {
3630 e1000_swfw_sync_release(hw, swfw); 3631 e1000_swfw_sync_release(hw, swfw);
3631 return ret_val; 3632 return ret_val;
@@ -3636,14 +3637,14 @@ e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr,
3636 /* Select Configuration Page */ 3637 /* Select Configuration Page */
3637 if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { 3638 if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
3638 ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT, 3639 ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
3639 (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); 3640 (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
3640 } else { 3641 } else {
3641 /* Use Alternative Page Select register to access 3642 /* Use Alternative Page Select register to access
3642 * registers 30 and 31 3643 * registers 30 and 31
3643 */ 3644 */
3644 ret_val = e1000_write_phy_reg_ex(hw, 3645 ret_val = e1000_write_phy_reg_ex(hw,
3645 GG82563_PHY_PAGE_SELECT_ALT, 3646 GG82563_PHY_PAGE_SELECT_ALT,
3646 (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT)); 3647 (u16)((u16)reg_addr >> GG82563_PAGE_SHIFT));
3647 } 3648 }
3648 3649
3649 if (ret_val) { 3650 if (ret_val) {
@@ -3660,13 +3661,13 @@ e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr,
3660 return ret_val; 3661 return ret_val;
3661} 3662}
3662 3663
3663static int32_t 3664static s32
3664e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr, 3665e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
3665 uint16_t phy_data) 3666 u16 phy_data)
3666{ 3667{
3667 uint32_t i; 3668 u32 i;
3668 uint32_t mdic = 0; 3669 u32 mdic = 0;
3669 const uint32_t phy_addr = 1; 3670 const u32 phy_addr = 1;
3670 3671
3671 DEBUGFUNC("e1000_write_phy_reg_ex"); 3672 DEBUGFUNC("e1000_write_phy_reg_ex");
3672 3673
@@ -3680,7 +3681,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
3680 * for the PHY register in the MDI Control register. The MAC will take 3681 * for the PHY register in the MDI Control register. The MAC will take
3681 * care of interfacing with the PHY to send the desired data. 3682 * care of interfacing with the PHY to send the desired data.
3682 */ 3683 */
3683 mdic = (((uint32_t) phy_data) | 3684 mdic = (((u32) phy_data) |
3684 (reg_addr << E1000_MDIC_REG_SHIFT) | 3685 (reg_addr << E1000_MDIC_REG_SHIFT) |
3685 (phy_addr << E1000_MDIC_PHY_SHIFT) | 3686 (phy_addr << E1000_MDIC_PHY_SHIFT) |
3686 (E1000_MDIC_OP_WRITE)); 3687 (E1000_MDIC_OP_WRITE));
@@ -3714,7 +3715,7 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
3714 mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | 3715 mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
3715 (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); 3716 (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
3716 mdic <<= 16; 3717 mdic <<= 16;
3717 mdic |= (uint32_t) phy_data; 3718 mdic |= (u32) phy_data;
3718 3719
3719 e1000_shift_out_mdi_bits(hw, mdic, 32); 3720 e1000_shift_out_mdi_bits(hw, mdic, 32);
3720 } 3721 }
@@ -3722,13 +3723,13 @@ e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
3722 return E1000_SUCCESS; 3723 return E1000_SUCCESS;
3723} 3724}
3724 3725
3725static int32_t 3726static s32
3726e1000_read_kmrn_reg(struct e1000_hw *hw, 3727e1000_read_kmrn_reg(struct e1000_hw *hw,
3727 uint32_t reg_addr, 3728 u32 reg_addr,
3728 uint16_t *data) 3729 u16 *data)
3729{ 3730{
3730 uint32_t reg_val; 3731 u32 reg_val;
3731 uint16_t swfw; 3732 u16 swfw;
3732 DEBUGFUNC("e1000_read_kmrn_reg"); 3733 DEBUGFUNC("e1000_read_kmrn_reg");
3733 3734
3734 if ((hw->mac_type == e1000_80003es2lan) && 3735 if ((hw->mac_type == e1000_80003es2lan) &&
@@ -3749,19 +3750,19 @@ e1000_read_kmrn_reg(struct e1000_hw *hw,
3749 3750
3750 /* Read the data returned */ 3751 /* Read the data returned */
3751 reg_val = E1000_READ_REG(hw, KUMCTRLSTA); 3752 reg_val = E1000_READ_REG(hw, KUMCTRLSTA);
3752 *data = (uint16_t)reg_val; 3753 *data = (u16)reg_val;
3753 3754
3754 e1000_swfw_sync_release(hw, swfw); 3755 e1000_swfw_sync_release(hw, swfw);
3755 return E1000_SUCCESS; 3756 return E1000_SUCCESS;
3756} 3757}
3757 3758
3758static int32_t 3759static s32
3759e1000_write_kmrn_reg(struct e1000_hw *hw, 3760e1000_write_kmrn_reg(struct e1000_hw *hw,
3760 uint32_t reg_addr, 3761 u32 reg_addr,
3761 uint16_t data) 3762 u16 data)
3762{ 3763{
3763 uint32_t reg_val; 3764 u32 reg_val;
3764 uint16_t swfw; 3765 u16 swfw;
3765 DEBUGFUNC("e1000_write_kmrn_reg"); 3766 DEBUGFUNC("e1000_write_kmrn_reg");
3766 3767
3767 if ((hw->mac_type == e1000_80003es2lan) && 3768 if ((hw->mac_type == e1000_80003es2lan) &&
@@ -3787,13 +3788,13 @@ e1000_write_kmrn_reg(struct e1000_hw *hw,
3787* 3788*
3788* hw - Struct containing variables accessed by shared code 3789* hw - Struct containing variables accessed by shared code
3789******************************************************************************/ 3790******************************************************************************/
3790int32_t 3791s32
3791e1000_phy_hw_reset(struct e1000_hw *hw) 3792e1000_phy_hw_reset(struct e1000_hw *hw)
3792{ 3793{
3793 uint32_t ctrl, ctrl_ext; 3794 u32 ctrl, ctrl_ext;
3794 uint32_t led_ctrl; 3795 u32 led_ctrl;
3795 int32_t ret_val; 3796 s32 ret_val;
3796 uint16_t swfw; 3797 u16 swfw;
3797 3798
3798 DEBUGFUNC("e1000_phy_hw_reset"); 3799 DEBUGFUNC("e1000_phy_hw_reset");
3799 3800
@@ -3881,11 +3882,11 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
3881* 3882*
3882* Sets bit 15 of the MII Control register 3883* Sets bit 15 of the MII Control register
3883******************************************************************************/ 3884******************************************************************************/
3884int32_t 3885s32
3885e1000_phy_reset(struct e1000_hw *hw) 3886e1000_phy_reset(struct e1000_hw *hw)
3886{ 3887{
3887 int32_t ret_val; 3888 s32 ret_val;
3888 uint16_t phy_data; 3889 u16 phy_data;
3889 3890
3890 DEBUGFUNC("e1000_phy_reset"); 3891 DEBUGFUNC("e1000_phy_reset");
3891 3892
@@ -3936,9 +3937,9 @@ e1000_phy_reset(struct e1000_hw *hw)
3936void 3937void
3937e1000_phy_powerdown_workaround(struct e1000_hw *hw) 3938e1000_phy_powerdown_workaround(struct e1000_hw *hw)
3938{ 3939{
3939 int32_t reg; 3940 s32 reg;
3940 uint16_t phy_data; 3941 u16 phy_data;
3941 int32_t retry = 0; 3942 s32 retry = 0;
3942 3943
3943 DEBUGFUNC("e1000_phy_powerdown_workaround"); 3944 DEBUGFUNC("e1000_phy_powerdown_workaround");
3944 3945
@@ -3986,13 +3987,13 @@ e1000_phy_powerdown_workaround(struct e1000_hw *hw)
3986* 3987*
3987* hw - struct containing variables accessed by shared code 3988* hw - struct containing variables accessed by shared code
3988******************************************************************************/ 3989******************************************************************************/
3989static int32_t 3990static s32
3990e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw) 3991e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
3991{ 3992{
3992 int32_t ret_val; 3993 s32 ret_val;
3993 int32_t reg; 3994 s32 reg;
3994 int32_t cnt; 3995 s32 cnt;
3995 uint16_t phy_data; 3996 u16 phy_data;
3996 3997
3997 if (hw->kmrn_lock_loss_workaround_disabled) 3998 if (hw->kmrn_lock_loss_workaround_disabled)
3998 return E1000_SUCCESS; 3999 return E1000_SUCCESS;
@@ -4039,12 +4040,12 @@ e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
4039* 4040*
4040* hw - Struct containing variables accessed by shared code 4041* hw - Struct containing variables accessed by shared code
4041******************************************************************************/ 4042******************************************************************************/
4042static int32_t 4043static s32
4043e1000_detect_gig_phy(struct e1000_hw *hw) 4044e1000_detect_gig_phy(struct e1000_hw *hw)
4044{ 4045{
4045 int32_t phy_init_status, ret_val; 4046 s32 phy_init_status, ret_val;
4046 uint16_t phy_id_high, phy_id_low; 4047 u16 phy_id_high, phy_id_low;
4047 boolean_t match = FALSE; 4048 bool match = false;
4048 4049
4049 DEBUGFUNC("e1000_detect_gig_phy"); 4050 DEBUGFUNC("e1000_detect_gig_phy");
4050 4051
@@ -4075,46 +4076,46 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
4075 if (ret_val) 4076 if (ret_val)
4076 return ret_val; 4077 return ret_val;
4077 4078
4078 hw->phy_id = (uint32_t) (phy_id_high << 16); 4079 hw->phy_id = (u32) (phy_id_high << 16);
4079 udelay(20); 4080 udelay(20);
4080 ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); 4081 ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
4081 if (ret_val) 4082 if (ret_val)
4082 return ret_val; 4083 return ret_val;
4083 4084
4084 hw->phy_id |= (uint32_t) (phy_id_low & PHY_REVISION_MASK); 4085 hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK);
4085 hw->phy_revision = (uint32_t) phy_id_low & ~PHY_REVISION_MASK; 4086 hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK;
4086 4087
4087 switch (hw->mac_type) { 4088 switch (hw->mac_type) {
4088 case e1000_82543: 4089 case e1000_82543:
4089 if (hw->phy_id == M88E1000_E_PHY_ID) match = TRUE; 4090 if (hw->phy_id == M88E1000_E_PHY_ID) match = true;
4090 break; 4091 break;
4091 case e1000_82544: 4092 case e1000_82544:
4092 if (hw->phy_id == M88E1000_I_PHY_ID) match = TRUE; 4093 if (hw->phy_id == M88E1000_I_PHY_ID) match = true;
4093 break; 4094 break;
4094 case e1000_82540: 4095 case e1000_82540:
4095 case e1000_82545: 4096 case e1000_82545:
4096 case e1000_82545_rev_3: 4097 case e1000_82545_rev_3:
4097 case e1000_82546: 4098 case e1000_82546:
4098 case e1000_82546_rev_3: 4099 case e1000_82546_rev_3:
4099 if (hw->phy_id == M88E1011_I_PHY_ID) match = TRUE; 4100 if (hw->phy_id == M88E1011_I_PHY_ID) match = true;
4100 break; 4101 break;
4101 case e1000_82541: 4102 case e1000_82541:
4102 case e1000_82541_rev_2: 4103 case e1000_82541_rev_2:
4103 case e1000_82547: 4104 case e1000_82547:
4104 case e1000_82547_rev_2: 4105 case e1000_82547_rev_2:
4105 if (hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE; 4106 if (hw->phy_id == IGP01E1000_I_PHY_ID) match = true;
4106 break; 4107 break;
4107 case e1000_82573: 4108 case e1000_82573:
4108 if (hw->phy_id == M88E1111_I_PHY_ID) match = TRUE; 4109 if (hw->phy_id == M88E1111_I_PHY_ID) match = true;
4109 break; 4110 break;
4110 case e1000_80003es2lan: 4111 case e1000_80003es2lan:
4111 if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE; 4112 if (hw->phy_id == GG82563_E_PHY_ID) match = true;
4112 break; 4113 break;
4113 case e1000_ich8lan: 4114 case e1000_ich8lan:
4114 if (hw->phy_id == IGP03E1000_E_PHY_ID) match = TRUE; 4115 if (hw->phy_id == IGP03E1000_E_PHY_ID) match = true;
4115 if (hw->phy_id == IFE_E_PHY_ID) match = TRUE; 4116 if (hw->phy_id == IFE_E_PHY_ID) match = true;
4116 if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = TRUE; 4117 if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = true;
4117 if (hw->phy_id == IFE_C_E_PHY_ID) match = TRUE; 4118 if (hw->phy_id == IFE_C_E_PHY_ID) match = true;
4118 break; 4119 break;
4119 default: 4120 default:
4120 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); 4121 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
@@ -4135,10 +4136,10 @@ e1000_detect_gig_phy(struct e1000_hw *hw)
4135* 4136*
4136* hw - Struct containing variables accessed by shared code 4137* hw - Struct containing variables accessed by shared code
4137******************************************************************************/ 4138******************************************************************************/
4138static int32_t 4139static s32
4139e1000_phy_reset_dsp(struct e1000_hw *hw) 4140e1000_phy_reset_dsp(struct e1000_hw *hw)
4140{ 4141{
4141 int32_t ret_val; 4142 s32 ret_val;
4142 DEBUGFUNC("e1000_phy_reset_dsp"); 4143 DEBUGFUNC("e1000_phy_reset_dsp");
4143 4144
4144 do { 4145 do {
@@ -4162,12 +4163,12 @@ e1000_phy_reset_dsp(struct e1000_hw *hw)
4162* hw - Struct containing variables accessed by shared code 4163* hw - Struct containing variables accessed by shared code
4163* phy_info - PHY information structure 4164* phy_info - PHY information structure
4164******************************************************************************/ 4165******************************************************************************/
4165static int32_t 4166static s32
4166e1000_phy_igp_get_info(struct e1000_hw *hw, 4167e1000_phy_igp_get_info(struct e1000_hw *hw,
4167 struct e1000_phy_info *phy_info) 4168 struct e1000_phy_info *phy_info)
4168{ 4169{
4169 int32_t ret_val; 4170 s32 ret_val;
4170 uint16_t phy_data, min_length, max_length, average; 4171 u16 phy_data, min_length, max_length, average;
4171 e1000_rev_polarity polarity; 4172 e1000_rev_polarity polarity;
4172 4173
4173 DEBUGFUNC("e1000_phy_igp_get_info"); 4174 DEBUGFUNC("e1000_phy_igp_get_info");
@@ -4239,12 +4240,12 @@ e1000_phy_igp_get_info(struct e1000_hw *hw,
4239* hw - Struct containing variables accessed by shared code 4240* hw - Struct containing variables accessed by shared code
4240* phy_info - PHY information structure 4241* phy_info - PHY information structure
4241******************************************************************************/ 4242******************************************************************************/
4242static int32_t 4243static s32
4243e1000_phy_ife_get_info(struct e1000_hw *hw, 4244e1000_phy_ife_get_info(struct e1000_hw *hw,
4244 struct e1000_phy_info *phy_info) 4245 struct e1000_phy_info *phy_info)
4245{ 4246{
4246 int32_t ret_val; 4247 s32 ret_val;
4247 uint16_t phy_data; 4248 u16 phy_data;
4248 e1000_rev_polarity polarity; 4249 e1000_rev_polarity polarity;
4249 4250
4250 DEBUGFUNC("e1000_phy_ife_get_info"); 4251 DEBUGFUNC("e1000_phy_ife_get_info");
@@ -4289,12 +4290,12 @@ e1000_phy_ife_get_info(struct e1000_hw *hw,
4289* hw - Struct containing variables accessed by shared code 4290* hw - Struct containing variables accessed by shared code
4290* phy_info - PHY information structure 4291* phy_info - PHY information structure
4291******************************************************************************/ 4292******************************************************************************/
4292static int32_t 4293static s32
4293e1000_phy_m88_get_info(struct e1000_hw *hw, 4294e1000_phy_m88_get_info(struct e1000_hw *hw,
4294 struct e1000_phy_info *phy_info) 4295 struct e1000_phy_info *phy_info)
4295{ 4296{
4296 int32_t ret_val; 4297 s32 ret_val;
4297 uint16_t phy_data; 4298 u16 phy_data;
4298 e1000_rev_polarity polarity; 4299 e1000_rev_polarity polarity;
4299 4300
4300 DEBUGFUNC("e1000_phy_m88_get_info"); 4301 DEBUGFUNC("e1000_phy_m88_get_info");
@@ -4368,12 +4369,12 @@ e1000_phy_m88_get_info(struct e1000_hw *hw,
4368* hw - Struct containing variables accessed by shared code 4369* hw - Struct containing variables accessed by shared code
4369* phy_info - PHY information structure 4370* phy_info - PHY information structure
4370******************************************************************************/ 4371******************************************************************************/
4371int32_t 4372s32
4372e1000_phy_get_info(struct e1000_hw *hw, 4373e1000_phy_get_info(struct e1000_hw *hw,
4373 struct e1000_phy_info *phy_info) 4374 struct e1000_phy_info *phy_info)
4374{ 4375{
4375 int32_t ret_val; 4376 s32 ret_val;
4376 uint16_t phy_data; 4377 u16 phy_data;
4377 4378
4378 DEBUGFUNC("e1000_phy_get_info"); 4379 DEBUGFUNC("e1000_phy_get_info");
4379 4380
@@ -4414,7 +4415,7 @@ e1000_phy_get_info(struct e1000_hw *hw,
4414 return e1000_phy_m88_get_info(hw, phy_info); 4415 return e1000_phy_m88_get_info(hw, phy_info);
4415} 4416}
4416 4417
4417int32_t 4418s32
4418e1000_validate_mdi_setting(struct e1000_hw *hw) 4419e1000_validate_mdi_setting(struct e1000_hw *hw)
4419{ 4420{
4420 DEBUGFUNC("e1000_validate_mdi_settings"); 4421 DEBUGFUNC("e1000_validate_mdi_settings");
@@ -4435,13 +4436,13 @@ e1000_validate_mdi_setting(struct e1000_hw *hw)
4435 * 4436 *
4436 * hw - Struct containing variables accessed by shared code 4437 * hw - Struct containing variables accessed by shared code
4437 *****************************************************************************/ 4438 *****************************************************************************/
4438int32_t 4439s32
4439e1000_init_eeprom_params(struct e1000_hw *hw) 4440e1000_init_eeprom_params(struct e1000_hw *hw)
4440{ 4441{
4441 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4442 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4442 uint32_t eecd = E1000_READ_REG(hw, EECD); 4443 u32 eecd = E1000_READ_REG(hw, EECD);
4443 int32_t ret_val = E1000_SUCCESS; 4444 s32 ret_val = E1000_SUCCESS;
4444 uint16_t eeprom_size; 4445 u16 eeprom_size;
4445 4446
4446 DEBUGFUNC("e1000_init_eeprom_params"); 4447 DEBUGFUNC("e1000_init_eeprom_params");
4447 4448
@@ -4455,8 +4456,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4455 eeprom->opcode_bits = 3; 4456 eeprom->opcode_bits = 3;
4456 eeprom->address_bits = 6; 4457 eeprom->address_bits = 6;
4457 eeprom->delay_usec = 50; 4458 eeprom->delay_usec = 50;
4458 eeprom->use_eerd = FALSE; 4459 eeprom->use_eerd = false;
4459 eeprom->use_eewr = FALSE; 4460 eeprom->use_eewr = false;
4460 break; 4461 break;
4461 case e1000_82540: 4462 case e1000_82540:
4462 case e1000_82545: 4463 case e1000_82545:
@@ -4473,8 +4474,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4473 eeprom->word_size = 64; 4474 eeprom->word_size = 64;
4474 eeprom->address_bits = 6; 4475 eeprom->address_bits = 6;
4475 } 4476 }
4476 eeprom->use_eerd = FALSE; 4477 eeprom->use_eerd = false;
4477 eeprom->use_eewr = FALSE; 4478 eeprom->use_eewr = false;
4478 break; 4479 break;
4479 case e1000_82541: 4480 case e1000_82541:
4480 case e1000_82541_rev_2: 4481 case e1000_82541_rev_2:
@@ -4503,8 +4504,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4503 eeprom->address_bits = 6; 4504 eeprom->address_bits = 6;
4504 } 4505 }
4505 } 4506 }
4506 eeprom->use_eerd = FALSE; 4507 eeprom->use_eerd = false;
4507 eeprom->use_eewr = FALSE; 4508 eeprom->use_eewr = false;
4508 break; 4509 break;
4509 case e1000_82571: 4510 case e1000_82571:
4510 case e1000_82572: 4511 case e1000_82572:
@@ -4518,8 +4519,8 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4518 eeprom->page_size = 8; 4519 eeprom->page_size = 8;
4519 eeprom->address_bits = 8; 4520 eeprom->address_bits = 8;
4520 } 4521 }
4521 eeprom->use_eerd = FALSE; 4522 eeprom->use_eerd = false;
4522 eeprom->use_eewr = FALSE; 4523 eeprom->use_eewr = false;
4523 break; 4524 break;
4524 case e1000_82573: 4525 case e1000_82573:
4525 eeprom->type = e1000_eeprom_spi; 4526 eeprom->type = e1000_eeprom_spi;
@@ -4532,9 +4533,9 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4532 eeprom->page_size = 8; 4533 eeprom->page_size = 8;
4533 eeprom->address_bits = 8; 4534 eeprom->address_bits = 8;
4534 } 4535 }
4535 eeprom->use_eerd = TRUE; 4536 eeprom->use_eerd = true;
4536 eeprom->use_eewr = TRUE; 4537 eeprom->use_eewr = true;
4537 if (e1000_is_onboard_nvm_eeprom(hw) == FALSE) { 4538 if (!e1000_is_onboard_nvm_eeprom(hw)) {
4538 eeprom->type = e1000_eeprom_flash; 4539 eeprom->type = e1000_eeprom_flash;
4539 eeprom->word_size = 2048; 4540 eeprom->word_size = 2048;
4540 4541
@@ -4555,24 +4556,24 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4555 eeprom->page_size = 8; 4556 eeprom->page_size = 8;
4556 eeprom->address_bits = 8; 4557 eeprom->address_bits = 8;
4557 } 4558 }
4558 eeprom->use_eerd = TRUE; 4559 eeprom->use_eerd = true;
4559 eeprom->use_eewr = FALSE; 4560 eeprom->use_eewr = false;
4560 break; 4561 break;
4561 case e1000_ich8lan: 4562 case e1000_ich8lan:
4562 { 4563 {
4563 int32_t i = 0; 4564 s32 i = 0;
4564 uint32_t flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG); 4565 u32 flash_size = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_GFPREG);
4565 4566
4566 eeprom->type = e1000_eeprom_ich8; 4567 eeprom->type = e1000_eeprom_ich8;
4567 eeprom->use_eerd = FALSE; 4568 eeprom->use_eerd = false;
4568 eeprom->use_eewr = FALSE; 4569 eeprom->use_eewr = false;
4569 eeprom->word_size = E1000_SHADOW_RAM_WORDS; 4570 eeprom->word_size = E1000_SHADOW_RAM_WORDS;
4570 4571
4571 /* Zero the shadow RAM structure. But don't load it from NVM 4572 /* Zero the shadow RAM structure. But don't load it from NVM
4572 * so as to save time for driver init */ 4573 * so as to save time for driver init */
4573 if (hw->eeprom_shadow_ram != NULL) { 4574 if (hw->eeprom_shadow_ram != NULL) {
4574 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 4575 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
4575 hw->eeprom_shadow_ram[i].modified = FALSE; 4576 hw->eeprom_shadow_ram[i].modified = false;
4576 hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF; 4577 hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
4577 } 4578 }
4578 } 4579 }
@@ -4585,7 +4586,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4585 4586
4586 hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE; 4587 hw->flash_bank_size *= ICH_FLASH_SECTOR_SIZE;
4587 4588
4588 hw->flash_bank_size /= 2 * sizeof(uint16_t); 4589 hw->flash_bank_size /= 2 * sizeof(u16);
4589 4590
4590 break; 4591 break;
4591 } 4592 }
@@ -4610,7 +4611,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4610 if (eeprom_size) 4611 if (eeprom_size)
4611 eeprom_size++; 4612 eeprom_size++;
4612 } else { 4613 } else {
4613 eeprom_size = (uint16_t)((eecd & E1000_EECD_SIZE_EX_MASK) >> 4614 eeprom_size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
4614 E1000_EECD_SIZE_EX_SHIFT); 4615 E1000_EECD_SIZE_EX_SHIFT);
4615 } 4616 }
4616 4617
@@ -4627,7 +4628,7 @@ e1000_init_eeprom_params(struct e1000_hw *hw)
4627 *****************************************************************************/ 4628 *****************************************************************************/
4628static void 4629static void
4629e1000_raise_ee_clk(struct e1000_hw *hw, 4630e1000_raise_ee_clk(struct e1000_hw *hw,
4630 uint32_t *eecd) 4631 u32 *eecd)
4631{ 4632{
4632 /* Raise the clock input to the EEPROM (by setting the SK bit), and then 4633 /* Raise the clock input to the EEPROM (by setting the SK bit), and then
4633 * wait <delay> microseconds. 4634 * wait <delay> microseconds.
@@ -4646,7 +4647,7 @@ e1000_raise_ee_clk(struct e1000_hw *hw,
4646 *****************************************************************************/ 4647 *****************************************************************************/
4647static void 4648static void
4648e1000_lower_ee_clk(struct e1000_hw *hw, 4649e1000_lower_ee_clk(struct e1000_hw *hw,
4649 uint32_t *eecd) 4650 u32 *eecd)
4650{ 4651{
4651 /* Lower the clock input to the EEPROM (by clearing the SK bit), and then 4652 /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
4652 * wait 50 microseconds. 4653 * wait 50 microseconds.
@@ -4666,12 +4667,12 @@ e1000_lower_ee_clk(struct e1000_hw *hw,
4666 *****************************************************************************/ 4667 *****************************************************************************/
4667static void 4668static void
4668e1000_shift_out_ee_bits(struct e1000_hw *hw, 4669e1000_shift_out_ee_bits(struct e1000_hw *hw,
4669 uint16_t data, 4670 u16 data,
4670 uint16_t count) 4671 u16 count)
4671{ 4672{
4672 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4673 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4673 uint32_t eecd; 4674 u32 eecd;
4674 uint32_t mask; 4675 u32 mask;
4675 4676
4676 /* We need to shift "count" bits out to the EEPROM. So, value in the 4677 /* We need to shift "count" bits out to the EEPROM. So, value in the
4677 * "data" parameter will be shifted out to the EEPROM one bit at a time. 4678 * "data" parameter will be shifted out to the EEPROM one bit at a time.
@@ -4717,13 +4718,13 @@ e1000_shift_out_ee_bits(struct e1000_hw *hw,
4717 * 4718 *
4718 * hw - Struct containing variables accessed by shared code 4719 * hw - Struct containing variables accessed by shared code
4719 *****************************************************************************/ 4720 *****************************************************************************/
4720static uint16_t 4721static u16
4721e1000_shift_in_ee_bits(struct e1000_hw *hw, 4722e1000_shift_in_ee_bits(struct e1000_hw *hw,
4722 uint16_t count) 4723 u16 count)
4723{ 4724{
4724 uint32_t eecd; 4725 u32 eecd;
4725 uint32_t i; 4726 u32 i;
4726 uint16_t data; 4727 u16 data;
4727 4728
4728 /* In order to read a register from the EEPROM, we need to shift 'count' 4729 /* In order to read a register from the EEPROM, we need to shift 'count'
4729 * bits in from the EEPROM. Bits are "shifted in" by raising the clock 4730 * bits in from the EEPROM. Bits are "shifted in" by raising the clock
@@ -4761,11 +4762,11 @@ e1000_shift_in_ee_bits(struct e1000_hw *hw,
4761 * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This 4762 * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
4762 * function should be called before issuing a command to the EEPROM. 4763 * function should be called before issuing a command to the EEPROM.
4763 *****************************************************************************/ 4764 *****************************************************************************/
4764static int32_t 4765static s32
4765e1000_acquire_eeprom(struct e1000_hw *hw) 4766e1000_acquire_eeprom(struct e1000_hw *hw)
4766{ 4767{
4767 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4768 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4768 uint32_t eecd, i=0; 4769 u32 eecd, i=0;
4769 4770
4770 DEBUGFUNC("e1000_acquire_eeprom"); 4771 DEBUGFUNC("e1000_acquire_eeprom");
4771 4772
@@ -4824,7 +4825,7 @@ static void
4824e1000_standby_eeprom(struct e1000_hw *hw) 4825e1000_standby_eeprom(struct e1000_hw *hw)
4825{ 4826{
4826 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4827 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4827 uint32_t eecd; 4828 u32 eecd;
4828 4829
4829 eecd = E1000_READ_REG(hw, EECD); 4830 eecd = E1000_READ_REG(hw, EECD);
4830 4831
@@ -4872,7 +4873,7 @@ e1000_standby_eeprom(struct e1000_hw *hw)
4872static void 4873static void
4873e1000_release_eeprom(struct e1000_hw *hw) 4874e1000_release_eeprom(struct e1000_hw *hw)
4874{ 4875{
4875 uint32_t eecd; 4876 u32 eecd;
4876 4877
4877 DEBUGFUNC("e1000_release_eeprom"); 4878 DEBUGFUNC("e1000_release_eeprom");
4878 4879
@@ -4920,11 +4921,11 @@ e1000_release_eeprom(struct e1000_hw *hw)
4920 * 4921 *
4921 * hw - Struct containing variables accessed by shared code 4922 * hw - Struct containing variables accessed by shared code
4922 *****************************************************************************/ 4923 *****************************************************************************/
4923static int32_t 4924static s32
4924e1000_spi_eeprom_ready(struct e1000_hw *hw) 4925e1000_spi_eeprom_ready(struct e1000_hw *hw)
4925{ 4926{
4926 uint16_t retry_count = 0; 4927 u16 retry_count = 0;
4927 uint8_t spi_stat_reg; 4928 u8 spi_stat_reg;
4928 4929
4929 DEBUGFUNC("e1000_spi_eeprom_ready"); 4930 DEBUGFUNC("e1000_spi_eeprom_ready");
4930 4931
@@ -4937,7 +4938,7 @@ e1000_spi_eeprom_ready(struct e1000_hw *hw)
4937 do { 4938 do {
4938 e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, 4939 e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
4939 hw->eeprom.opcode_bits); 4940 hw->eeprom.opcode_bits);
4940 spi_stat_reg = (uint8_t)e1000_shift_in_ee_bits(hw, 8); 4941 spi_stat_reg = (u8)e1000_shift_in_ee_bits(hw, 8);
4941 if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) 4942 if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
4942 break; 4943 break;
4943 4944
@@ -4966,14 +4967,14 @@ e1000_spi_eeprom_ready(struct e1000_hw *hw)
4966 * data - word read from the EEPROM 4967 * data - word read from the EEPROM
4967 * words - number of words to read 4968 * words - number of words to read
4968 *****************************************************************************/ 4969 *****************************************************************************/
4969int32_t 4970s32
4970e1000_read_eeprom(struct e1000_hw *hw, 4971e1000_read_eeprom(struct e1000_hw *hw,
4971 uint16_t offset, 4972 u16 offset,
4972 uint16_t words, 4973 u16 words,
4973 uint16_t *data) 4974 u16 *data)
4974{ 4975{
4975 struct e1000_eeprom_info *eeprom = &hw->eeprom; 4976 struct e1000_eeprom_info *eeprom = &hw->eeprom;
4976 uint32_t i = 0; 4977 u32 i = 0;
4977 4978
4978 DEBUGFUNC("e1000_read_eeprom"); 4979 DEBUGFUNC("e1000_read_eeprom");
4979 4980
@@ -4994,15 +4995,14 @@ e1000_read_eeprom(struct e1000_hw *hw,
4994 * directly. In this case, we need to acquire the EEPROM so that 4995 * directly. In this case, we need to acquire the EEPROM so that
4995 * FW or other port software does not interrupt. 4996 * FW or other port software does not interrupt.
4996 */ 4997 */
4997 if (e1000_is_onboard_nvm_eeprom(hw) == TRUE && 4998 if (e1000_is_onboard_nvm_eeprom(hw) && !hw->eeprom.use_eerd) {
4998 hw->eeprom.use_eerd == FALSE) {
4999 /* Prepare the EEPROM for bit-bang reading */ 4999 /* Prepare the EEPROM for bit-bang reading */
5000 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) 5000 if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
5001 return -E1000_ERR_EEPROM; 5001 return -E1000_ERR_EEPROM;
5002 } 5002 }
5003 5003
5004 /* Eerd register EEPROM access requires no eeprom aquire/release */ 5004 /* Eerd register EEPROM access requires no eeprom aquire/release */
5005 if (eeprom->use_eerd == TRUE) 5005 if (eeprom->use_eerd)
5006 return e1000_read_eeprom_eerd(hw, offset, words, data); 5006 return e1000_read_eeprom_eerd(hw, offset, words, data);
5007 5007
5008 /* ICH EEPROM access is done via the ICH flash controller */ 5008 /* ICH EEPROM access is done via the ICH flash controller */
@@ -5012,8 +5012,8 @@ e1000_read_eeprom(struct e1000_hw *hw,
5012 /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have 5012 /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have
5013 * acquired the EEPROM at this point, so any returns should relase it */ 5013 * acquired the EEPROM at this point, so any returns should relase it */
5014 if (eeprom->type == e1000_eeprom_spi) { 5014 if (eeprom->type == e1000_eeprom_spi) {
5015 uint16_t word_in; 5015 u16 word_in;
5016 uint8_t read_opcode = EEPROM_READ_OPCODE_SPI; 5016 u8 read_opcode = EEPROM_READ_OPCODE_SPI;
5017 5017
5018 if (e1000_spi_eeprom_ready(hw)) { 5018 if (e1000_spi_eeprom_ready(hw)) {
5019 e1000_release_eeprom(hw); 5019 e1000_release_eeprom(hw);
@@ -5028,7 +5028,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
5028 5028
5029 /* Send the READ command (opcode + addr) */ 5029 /* Send the READ command (opcode + addr) */
5030 e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); 5030 e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
5031 e1000_shift_out_ee_bits(hw, (uint16_t)(offset*2), eeprom->address_bits); 5031 e1000_shift_out_ee_bits(hw, (u16)(offset*2), eeprom->address_bits);
5032 5032
5033 /* Read the data. The address of the eeprom internally increments with 5033 /* Read the data. The address of the eeprom internally increments with
5034 * each byte (spi) being read, saving on the overhead of eeprom setup 5034 * each byte (spi) being read, saving on the overhead of eeprom setup
@@ -5044,7 +5044,7 @@ e1000_read_eeprom(struct e1000_hw *hw,
5044 /* Send the READ command (opcode + addr) */ 5044 /* Send the READ command (opcode + addr) */
5045 e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE, 5045 e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE,
5046 eeprom->opcode_bits); 5046 eeprom->opcode_bits);
5047 e1000_shift_out_ee_bits(hw, (uint16_t)(offset + i), 5047 e1000_shift_out_ee_bits(hw, (u16)(offset + i),
5048 eeprom->address_bits); 5048 eeprom->address_bits);
5049 5049
5050 /* Read the data. For microwire, each word requires the overhead 5050 /* Read the data. For microwire, each word requires the overhead
@@ -5068,14 +5068,14 @@ e1000_read_eeprom(struct e1000_hw *hw,
5068 * data - word read from the EEPROM 5068 * data - word read from the EEPROM
5069 * words - number of words to read 5069 * words - number of words to read
5070 *****************************************************************************/ 5070 *****************************************************************************/
5071static int32_t 5071static s32
5072e1000_read_eeprom_eerd(struct e1000_hw *hw, 5072e1000_read_eeprom_eerd(struct e1000_hw *hw,
5073 uint16_t offset, 5073 u16 offset,
5074 uint16_t words, 5074 u16 words,
5075 uint16_t *data) 5075 u16 *data)
5076{ 5076{
5077 uint32_t i, eerd = 0; 5077 u32 i, eerd = 0;
5078 int32_t error = 0; 5078 s32 error = 0;
5079 5079
5080 for (i = 0; i < words; i++) { 5080 for (i = 0; i < words; i++) {
5081 eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) + 5081 eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) +
@@ -5102,15 +5102,15 @@ e1000_read_eeprom_eerd(struct e1000_hw *hw,
5102 * data - word read from the EEPROM 5102 * data - word read from the EEPROM
5103 * words - number of words to read 5103 * words - number of words to read
5104 *****************************************************************************/ 5104 *****************************************************************************/
5105static int32_t 5105static s32
5106e1000_write_eeprom_eewr(struct e1000_hw *hw, 5106e1000_write_eeprom_eewr(struct e1000_hw *hw,
5107 uint16_t offset, 5107 u16 offset,
5108 uint16_t words, 5108 u16 words,
5109 uint16_t *data) 5109 u16 *data)
5110{ 5110{
5111 uint32_t register_value = 0; 5111 u32 register_value = 0;
5112 uint32_t i = 0; 5112 u32 i = 0;
5113 int32_t error = 0; 5113 s32 error = 0;
5114 5114
5115 if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM)) 5115 if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
5116 return -E1000_ERR_SWFW_SYNC; 5116 return -E1000_ERR_SWFW_SYNC;
@@ -5143,12 +5143,12 @@ e1000_write_eeprom_eewr(struct e1000_hw *hw,
5143 * 5143 *
5144 * hw - Struct containing variables accessed by shared code 5144 * hw - Struct containing variables accessed by shared code
5145 *****************************************************************************/ 5145 *****************************************************************************/
5146static int32_t 5146static s32
5147e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd) 5147e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
5148{ 5148{
5149 uint32_t attempts = 100000; 5149 u32 attempts = 100000;
5150 uint32_t i, reg = 0; 5150 u32 i, reg = 0;
5151 int32_t done = E1000_ERR_EEPROM; 5151 s32 done = E1000_ERR_EEPROM;
5152 5152
5153 for (i = 0; i < attempts; i++) { 5153 for (i = 0; i < attempts; i++) {
5154 if (eerd == E1000_EEPROM_POLL_READ) 5154 if (eerd == E1000_EEPROM_POLL_READ)
@@ -5171,15 +5171,15 @@ e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
5171* 5171*
5172* hw - Struct containing variables accessed by shared code 5172* hw - Struct containing variables accessed by shared code
5173****************************************************************************/ 5173****************************************************************************/
5174static boolean_t 5174static bool
5175e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw) 5175e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
5176{ 5176{
5177 uint32_t eecd = 0; 5177 u32 eecd = 0;
5178 5178
5179 DEBUGFUNC("e1000_is_onboard_nvm_eeprom"); 5179 DEBUGFUNC("e1000_is_onboard_nvm_eeprom");
5180 5180
5181 if (hw->mac_type == e1000_ich8lan) 5181 if (hw->mac_type == e1000_ich8lan)
5182 return FALSE; 5182 return false;
5183 5183
5184 if (hw->mac_type == e1000_82573) { 5184 if (hw->mac_type == e1000_82573) {
5185 eecd = E1000_READ_REG(hw, EECD); 5185 eecd = E1000_READ_REG(hw, EECD);
@@ -5189,10 +5189,10 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
5189 5189
5190 /* If both bits are set, device is Flash type */ 5190 /* If both bits are set, device is Flash type */
5191 if (eecd == 0x03) { 5191 if (eecd == 0x03) {
5192 return FALSE; 5192 return false;
5193 } 5193 }
5194 } 5194 }
5195 return TRUE; 5195 return true;
5196} 5196}
5197 5197
5198/****************************************************************************** 5198/******************************************************************************
@@ -5204,16 +5204,15 @@ e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
5204 * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is 5204 * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
5205 * valid. 5205 * valid.
5206 *****************************************************************************/ 5206 *****************************************************************************/
5207int32_t 5207s32
5208e1000_validate_eeprom_checksum(struct e1000_hw *hw) 5208e1000_validate_eeprom_checksum(struct e1000_hw *hw)
5209{ 5209{
5210 uint16_t checksum = 0; 5210 u16 checksum = 0;
5211 uint16_t i, eeprom_data; 5211 u16 i, eeprom_data;
5212 5212
5213 DEBUGFUNC("e1000_validate_eeprom_checksum"); 5213 DEBUGFUNC("e1000_validate_eeprom_checksum");
5214 5214
5215 if ((hw->mac_type == e1000_82573) && 5215 if ((hw->mac_type == e1000_82573) && !e1000_is_onboard_nvm_eeprom(hw)) {
5216 (e1000_is_onboard_nvm_eeprom(hw) == FALSE)) {
5217 /* Check bit 4 of word 10h. If it is 0, firmware is done updating 5216 /* Check bit 4 of word 10h. If it is 0, firmware is done updating
5218 * 10h-12h. Checksum may need to be fixed. */ 5217 * 10h-12h. Checksum may need to be fixed. */
5219 e1000_read_eeprom(hw, 0x10, 1, &eeprom_data); 5218 e1000_read_eeprom(hw, 0x10, 1, &eeprom_data);
@@ -5253,7 +5252,7 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
5253 checksum += eeprom_data; 5252 checksum += eeprom_data;
5254 } 5253 }
5255 5254
5256 if (checksum == (uint16_t) EEPROM_SUM) 5255 if (checksum == (u16) EEPROM_SUM)
5257 return E1000_SUCCESS; 5256 return E1000_SUCCESS;
5258 else { 5257 else {
5259 DEBUGOUT("EEPROM Checksum Invalid\n"); 5258 DEBUGOUT("EEPROM Checksum Invalid\n");
@@ -5269,12 +5268,12 @@ e1000_validate_eeprom_checksum(struct e1000_hw *hw)
5269 * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. 5268 * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
5270 * Writes the difference to word offset 63 of the EEPROM. 5269 * Writes the difference to word offset 63 of the EEPROM.
5271 *****************************************************************************/ 5270 *****************************************************************************/
5272int32_t 5271s32
5273e1000_update_eeprom_checksum(struct e1000_hw *hw) 5272e1000_update_eeprom_checksum(struct e1000_hw *hw)
5274{ 5273{
5275 uint32_t ctrl_ext; 5274 u32 ctrl_ext;
5276 uint16_t checksum = 0; 5275 u16 checksum = 0;
5277 uint16_t i, eeprom_data; 5276 u16 i, eeprom_data;
5278 5277
5279 DEBUGFUNC("e1000_update_eeprom_checksum"); 5278 DEBUGFUNC("e1000_update_eeprom_checksum");
5280 5279
@@ -5285,7 +5284,7 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw)
5285 } 5284 }
5286 checksum += eeprom_data; 5285 checksum += eeprom_data;
5287 } 5286 }
5288 checksum = (uint16_t) EEPROM_SUM - checksum; 5287 checksum = (u16) EEPROM_SUM - checksum;
5289 if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { 5288 if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
5290 DEBUGOUT("EEPROM Write Error\n"); 5289 DEBUGOUT("EEPROM Write Error\n");
5291 return -E1000_ERR_EEPROM; 5290 return -E1000_ERR_EEPROM;
@@ -5314,14 +5313,14 @@ e1000_update_eeprom_checksum(struct e1000_hw *hw)
5314 * If e1000_update_eeprom_checksum is not called after this function, the 5313 * If e1000_update_eeprom_checksum is not called after this function, the
5315 * EEPROM will most likely contain an invalid checksum. 5314 * EEPROM will most likely contain an invalid checksum.
5316 *****************************************************************************/ 5315 *****************************************************************************/
5317int32_t 5316s32
5318e1000_write_eeprom(struct e1000_hw *hw, 5317e1000_write_eeprom(struct e1000_hw *hw,
5319 uint16_t offset, 5318 u16 offset,
5320 uint16_t words, 5319 u16 words,
5321 uint16_t *data) 5320 u16 *data)
5322{ 5321{
5323 struct e1000_eeprom_info *eeprom = &hw->eeprom; 5322 struct e1000_eeprom_info *eeprom = &hw->eeprom;
5324 int32_t status = 0; 5323 s32 status = 0;
5325 5324
5326 DEBUGFUNC("e1000_write_eeprom"); 5325 DEBUGFUNC("e1000_write_eeprom");
5327 5326
@@ -5339,7 +5338,7 @@ e1000_write_eeprom(struct e1000_hw *hw,
5339 } 5338 }
5340 5339
5341 /* 82573 writes only through eewr */ 5340 /* 82573 writes only through eewr */
5342 if (eeprom->use_eewr == TRUE) 5341 if (eeprom->use_eewr)
5343 return e1000_write_eeprom_eewr(hw, offset, words, data); 5342 return e1000_write_eeprom_eewr(hw, offset, words, data);
5344 5343
5345 if (eeprom->type == e1000_eeprom_ich8) 5344 if (eeprom->type == e1000_eeprom_ich8)
@@ -5371,19 +5370,19 @@ e1000_write_eeprom(struct e1000_hw *hw,
5371 * data - pointer to array of 8 bit words to be written to the EEPROM 5370 * data - pointer to array of 8 bit words to be written to the EEPROM
5372 * 5371 *
5373 *****************************************************************************/ 5372 *****************************************************************************/
5374static int32_t 5373static s32
5375e1000_write_eeprom_spi(struct e1000_hw *hw, 5374e1000_write_eeprom_spi(struct e1000_hw *hw,
5376 uint16_t offset, 5375 u16 offset,
5377 uint16_t words, 5376 u16 words,
5378 uint16_t *data) 5377 u16 *data)
5379{ 5378{
5380 struct e1000_eeprom_info *eeprom = &hw->eeprom; 5379 struct e1000_eeprom_info *eeprom = &hw->eeprom;
5381 uint16_t widx = 0; 5380 u16 widx = 0;
5382 5381
5383 DEBUGFUNC("e1000_write_eeprom_spi"); 5382 DEBUGFUNC("e1000_write_eeprom_spi");
5384 5383
5385 while (widx < words) { 5384 while (widx < words) {
5386 uint8_t write_opcode = EEPROM_WRITE_OPCODE_SPI; 5385 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
5387 5386
5388 if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM; 5387 if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM;
5389 5388
@@ -5402,14 +5401,14 @@ e1000_write_eeprom_spi(struct e1000_hw *hw,
5402 /* Send the Write command (8-bit opcode + addr) */ 5401 /* Send the Write command (8-bit opcode + addr) */
5403 e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); 5402 e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
5404 5403
5405 e1000_shift_out_ee_bits(hw, (uint16_t)((offset + widx)*2), 5404 e1000_shift_out_ee_bits(hw, (u16)((offset + widx)*2),
5406 eeprom->address_bits); 5405 eeprom->address_bits);
5407 5406
5408 /* Send the data */ 5407 /* Send the data */
5409 5408
5410 /* Loop to allow for up to whole page write (32 bytes) of eeprom */ 5409 /* Loop to allow for up to whole page write (32 bytes) of eeprom */
5411 while (widx < words) { 5410 while (widx < words) {
5412 uint16_t word_out = data[widx]; 5411 u16 word_out = data[widx];
5413 word_out = (word_out >> 8) | (word_out << 8); 5412 word_out = (word_out >> 8) | (word_out << 8);
5414 e1000_shift_out_ee_bits(hw, word_out, 16); 5413 e1000_shift_out_ee_bits(hw, word_out, 16);
5415 widx++; 5414 widx++;
@@ -5437,16 +5436,16 @@ e1000_write_eeprom_spi(struct e1000_hw *hw,
5437 * data - pointer to array of 16 bit words to be written to the EEPROM 5436 * data - pointer to array of 16 bit words to be written to the EEPROM
5438 * 5437 *
5439 *****************************************************************************/ 5438 *****************************************************************************/
5440static int32_t 5439static s32
5441e1000_write_eeprom_microwire(struct e1000_hw *hw, 5440e1000_write_eeprom_microwire(struct e1000_hw *hw,
5442 uint16_t offset, 5441 u16 offset,
5443 uint16_t words, 5442 u16 words,
5444 uint16_t *data) 5443 u16 *data)
5445{ 5444{
5446 struct e1000_eeprom_info *eeprom = &hw->eeprom; 5445 struct e1000_eeprom_info *eeprom = &hw->eeprom;
5447 uint32_t eecd; 5446 u32 eecd;
5448 uint16_t words_written = 0; 5447 u16 words_written = 0;
5449 uint16_t i = 0; 5448 u16 i = 0;
5450 5449
5451 DEBUGFUNC("e1000_write_eeprom_microwire"); 5450 DEBUGFUNC("e1000_write_eeprom_microwire");
5452 5451
@@ -5457,9 +5456,9 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
5457 * EEPROM into write/erase mode. 5456 * EEPROM into write/erase mode.
5458 */ 5457 */
5459 e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, 5458 e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
5460 (uint16_t)(eeprom->opcode_bits + 2)); 5459 (u16)(eeprom->opcode_bits + 2));
5461 5460
5462 e1000_shift_out_ee_bits(hw, 0, (uint16_t)(eeprom->address_bits - 2)); 5461 e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
5463 5462
5464 /* Prepare the EEPROM */ 5463 /* Prepare the EEPROM */
5465 e1000_standby_eeprom(hw); 5464 e1000_standby_eeprom(hw);
@@ -5469,7 +5468,7 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
5469 e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, 5468 e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
5470 eeprom->opcode_bits); 5469 eeprom->opcode_bits);
5471 5470
5472 e1000_shift_out_ee_bits(hw, (uint16_t)(offset + words_written), 5471 e1000_shift_out_ee_bits(hw, (u16)(offset + words_written),
5473 eeprom->address_bits); 5472 eeprom->address_bits);
5474 5473
5475 /* Send the data */ 5474 /* Send the data */
@@ -5507,9 +5506,9 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
5507 * EEPROM out of write/erase mode. 5506 * EEPROM out of write/erase mode.
5508 */ 5507 */
5509 e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, 5508 e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
5510 (uint16_t)(eeprom->opcode_bits + 2)); 5509 (u16)(eeprom->opcode_bits + 2));
5511 5510
5512 e1000_shift_out_ee_bits(hw, 0, (uint16_t)(eeprom->address_bits - 2)); 5511 e1000_shift_out_ee_bits(hw, 0, (u16)(eeprom->address_bits - 2));
5513 5512
5514 return E1000_SUCCESS; 5513 return E1000_SUCCESS;
5515} 5514}
@@ -5524,19 +5523,19 @@ e1000_write_eeprom_microwire(struct e1000_hw *hw,
5524 * data - word read from the EEPROM 5523 * data - word read from the EEPROM
5525 * words - number of words to read 5524 * words - number of words to read
5526 *****************************************************************************/ 5525 *****************************************************************************/
5527static int32_t 5526static s32
5528e1000_commit_shadow_ram(struct e1000_hw *hw) 5527e1000_commit_shadow_ram(struct e1000_hw *hw)
5529{ 5528{
5530 uint32_t attempts = 100000; 5529 u32 attempts = 100000;
5531 uint32_t eecd = 0; 5530 u32 eecd = 0;
5532 uint32_t flop = 0; 5531 u32 flop = 0;
5533 uint32_t i = 0; 5532 u32 i = 0;
5534 int32_t error = E1000_SUCCESS; 5533 s32 error = E1000_SUCCESS;
5535 uint32_t old_bank_offset = 0; 5534 u32 old_bank_offset = 0;
5536 uint32_t new_bank_offset = 0; 5535 u32 new_bank_offset = 0;
5537 uint8_t low_byte = 0; 5536 u8 low_byte = 0;
5538 uint8_t high_byte = 0; 5537 u8 high_byte = 0;
5539 boolean_t sector_write_failed = FALSE; 5538 bool sector_write_failed = false;
5540 5539
5541 if (hw->mac_type == e1000_82573) { 5540 if (hw->mac_type == e1000_82573) {
5542 /* The flop register will be used to determine if flash type is STM */ 5541 /* The flop register will be used to determine if flash type is STM */
@@ -5588,24 +5587,24 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5588 e1000_erase_ich8_4k_segment(hw, 0); 5587 e1000_erase_ich8_4k_segment(hw, 0);
5589 } 5588 }
5590 5589
5591 sector_write_failed = FALSE; 5590 sector_write_failed = false;
5592 /* Loop for every byte in the shadow RAM, 5591 /* Loop for every byte in the shadow RAM,
5593 * which is in units of words. */ 5592 * which is in units of words. */
5594 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 5593 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
5595 /* Determine whether to write the value stored 5594 /* Determine whether to write the value stored
5596 * in the other NVM bank or a modified value stored 5595 * in the other NVM bank or a modified value stored
5597 * in the shadow RAM */ 5596 * in the shadow RAM */
5598 if (hw->eeprom_shadow_ram[i].modified == TRUE) { 5597 if (hw->eeprom_shadow_ram[i].modified) {
5599 low_byte = (uint8_t)hw->eeprom_shadow_ram[i].eeprom_word; 5598 low_byte = (u8)hw->eeprom_shadow_ram[i].eeprom_word;
5600 udelay(100); 5599 udelay(100);
5601 error = e1000_verify_write_ich8_byte(hw, 5600 error = e1000_verify_write_ich8_byte(hw,
5602 (i << 1) + new_bank_offset, low_byte); 5601 (i << 1) + new_bank_offset, low_byte);
5603 5602
5604 if (error != E1000_SUCCESS) 5603 if (error != E1000_SUCCESS)
5605 sector_write_failed = TRUE; 5604 sector_write_failed = true;
5606 else { 5605 else {
5607 high_byte = 5606 high_byte =
5608 (uint8_t)(hw->eeprom_shadow_ram[i].eeprom_word >> 8); 5607 (u8)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
5609 udelay(100); 5608 udelay(100);
5610 } 5609 }
5611 } else { 5610 } else {
@@ -5616,7 +5615,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5616 (i << 1) + new_bank_offset, low_byte); 5615 (i << 1) + new_bank_offset, low_byte);
5617 5616
5618 if (error != E1000_SUCCESS) 5617 if (error != E1000_SUCCESS)
5619 sector_write_failed = TRUE; 5618 sector_write_failed = true;
5620 else { 5619 else {
5621 e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1, 5620 e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
5622 &high_byte); 5621 &high_byte);
@@ -5624,10 +5623,10 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5624 } 5623 }
5625 } 5624 }
5626 5625
5627 /* If the write of the low byte was successful, go ahread and 5626 /* If the write of the low byte was successful, go ahead and
5628 * write the high byte while checking to make sure that if it 5627 * write the high byte while checking to make sure that if it
5629 * is the signature byte, then it is handled properly */ 5628 * is the signature byte, then it is handled properly */
5630 if (sector_write_failed == FALSE) { 5629 if (!sector_write_failed) {
5631 /* If the word is 0x13, then make sure the signature bits 5630 /* If the word is 0x13, then make sure the signature bits
5632 * (15:14) are 11b until the commit has completed. 5631 * (15:14) are 11b until the commit has completed.
5633 * This will allow us to write 10b which indicates the 5632 * This will allow us to write 10b which indicates the
@@ -5640,7 +5639,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5640 error = e1000_verify_write_ich8_byte(hw, 5639 error = e1000_verify_write_ich8_byte(hw,
5641 (i << 1) + new_bank_offset + 1, high_byte); 5640 (i << 1) + new_bank_offset + 1, high_byte);
5642 if (error != E1000_SUCCESS) 5641 if (error != E1000_SUCCESS)
5643 sector_write_failed = TRUE; 5642 sector_write_failed = true;
5644 5643
5645 } else { 5644 } else {
5646 /* If the write failed then break from the loop and 5645 /* If the write failed then break from the loop and
@@ -5651,7 +5650,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5651 5650
5652 /* Don't bother writing the segment valid bits if sector 5651 /* Don't bother writing the segment valid bits if sector
5653 * programming failed. */ 5652 * programming failed. */
5654 if (sector_write_failed == FALSE) { 5653 if (!sector_write_failed) {
5655 /* Finally validate the new segment by setting bit 15:14 5654 /* Finally validate the new segment by setting bit 15:14
5656 * to 10b in word 0x13 , this can be done without an 5655 * to 10b in word 0x13 , this can be done without an
5657 * erase as well since these bits are 11 to start with 5656 * erase as well since these bits are 11 to start with
@@ -5673,7 +5672,7 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5673 5672
5674 /* Clear the now not used entry in the cache */ 5673 /* Clear the now not used entry in the cache */
5675 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) { 5674 for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
5676 hw->eeprom_shadow_ram[i].modified = FALSE; 5675 hw->eeprom_shadow_ram[i].modified = false;
5677 hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF; 5676 hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
5678 } 5677 }
5679 } 5678 }
@@ -5688,11 +5687,11 @@ e1000_commit_shadow_ram(struct e1000_hw *hw)
5688 * 5687 *
5689 * hw - Struct containing variables accessed by shared code 5688 * hw - Struct containing variables accessed by shared code
5690 *****************************************************************************/ 5689 *****************************************************************************/
5691int32_t 5690s32
5692e1000_read_mac_addr(struct e1000_hw * hw) 5691e1000_read_mac_addr(struct e1000_hw * hw)
5693{ 5692{
5694 uint16_t offset; 5693 u16 offset;
5695 uint16_t eeprom_data, i; 5694 u16 eeprom_data, i;
5696 5695
5697 DEBUGFUNC("e1000_read_mac_addr"); 5696 DEBUGFUNC("e1000_read_mac_addr");
5698 5697
@@ -5702,8 +5701,8 @@ e1000_read_mac_addr(struct e1000_hw * hw)
5702 DEBUGOUT("EEPROM Read Error\n"); 5701 DEBUGOUT("EEPROM Read Error\n");
5703 return -E1000_ERR_EEPROM; 5702 return -E1000_ERR_EEPROM;
5704 } 5703 }
5705 hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF); 5704 hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF);
5706 hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8); 5705 hw->perm_mac_addr[i+1] = (u8) (eeprom_data >> 8);
5707 } 5706 }
5708 5707
5709 switch (hw->mac_type) { 5708 switch (hw->mac_type) {
@@ -5735,8 +5734,8 @@ e1000_read_mac_addr(struct e1000_hw * hw)
5735static void 5734static void
5736e1000_init_rx_addrs(struct e1000_hw *hw) 5735e1000_init_rx_addrs(struct e1000_hw *hw)
5737{ 5736{
5738 uint32_t i; 5737 u32 i;
5739 uint32_t rar_num; 5738 u32 rar_num;
5740 5739
5741 DEBUGFUNC("e1000_init_rx_addrs"); 5740 DEBUGFUNC("e1000_init_rx_addrs");
5742 5741
@@ -5750,7 +5749,7 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
5750 /* Reserve a spot for the Locally Administered Address to work around 5749 /* Reserve a spot for the Locally Administered Address to work around
5751 * an 82571 issue in which a reset on one port will reload the MAC on 5750 * an 82571 issue in which a reset on one port will reload the MAC on
5752 * the other port. */ 5751 * the other port. */
5753 if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE)) 5752 if ((hw->mac_type == e1000_82571) && (hw->laa_is_present))
5754 rar_num -= 1; 5753 rar_num -= 1;
5755 if (hw->mac_type == e1000_ich8lan) 5754 if (hw->mac_type == e1000_ich8lan)
5756 rar_num = E1000_RAR_ENTRIES_ICH8LAN; 5755 rar_num = E1000_RAR_ENTRIES_ICH8LAN;
@@ -5771,11 +5770,11 @@ e1000_init_rx_addrs(struct e1000_hw *hw)
5771 * hw - Struct containing variables accessed by shared code 5770 * hw - Struct containing variables accessed by shared code
5772 * mc_addr - the multicast address to hash 5771 * mc_addr - the multicast address to hash
5773 *****************************************************************************/ 5772 *****************************************************************************/
5774uint32_t 5773u32
5775e1000_hash_mc_addr(struct e1000_hw *hw, 5774e1000_hash_mc_addr(struct e1000_hw *hw,
5776 uint8_t *mc_addr) 5775 u8 *mc_addr)
5777{ 5776{
5778 uint32_t hash_value = 0; 5777 u32 hash_value = 0;
5779 5778
5780 /* The portion of the address that is used for the hash table is 5779 /* The portion of the address that is used for the hash table is
5781 * determined by the mc_filter_type setting. 5780 * determined by the mc_filter_type setting.
@@ -5788,37 +5787,37 @@ e1000_hash_mc_addr(struct e1000_hw *hw,
5788 case 0: 5787 case 0:
5789 if (hw->mac_type == e1000_ich8lan) { 5788 if (hw->mac_type == e1000_ich8lan) {
5790 /* [47:38] i.e. 0x158 for above example address */ 5789 /* [47:38] i.e. 0x158 for above example address */
5791 hash_value = ((mc_addr[4] >> 6) | (((uint16_t) mc_addr[5]) << 2)); 5790 hash_value = ((mc_addr[4] >> 6) | (((u16) mc_addr[5]) << 2));
5792 } else { 5791 } else {
5793 /* [47:36] i.e. 0x563 for above example address */ 5792 /* [47:36] i.e. 0x563 for above example address */
5794 hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); 5793 hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
5795 } 5794 }
5796 break; 5795 break;
5797 case 1: 5796 case 1:
5798 if (hw->mac_type == e1000_ich8lan) { 5797 if (hw->mac_type == e1000_ich8lan) {
5799 /* [46:37] i.e. 0x2B1 for above example address */ 5798 /* [46:37] i.e. 0x2B1 for above example address */
5800 hash_value = ((mc_addr[4] >> 5) | (((uint16_t) mc_addr[5]) << 3)); 5799 hash_value = ((mc_addr[4] >> 5) | (((u16) mc_addr[5]) << 3));
5801 } else { 5800 } else {
5802 /* [46:35] i.e. 0xAC6 for above example address */ 5801 /* [46:35] i.e. 0xAC6 for above example address */
5803 hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5)); 5802 hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
5804 } 5803 }
5805 break; 5804 break;
5806 case 2: 5805 case 2:
5807 if (hw->mac_type == e1000_ich8lan) { 5806 if (hw->mac_type == e1000_ich8lan) {
5808 /*[45:36] i.e. 0x163 for above example address */ 5807 /*[45:36] i.e. 0x163 for above example address */
5809 hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); 5808 hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
5810 } else { 5809 } else {
5811 /* [45:34] i.e. 0x5D8 for above example address */ 5810 /* [45:34] i.e. 0x5D8 for above example address */
5812 hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); 5811 hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
5813 } 5812 }
5814 break; 5813 break;
5815 case 3: 5814 case 3:
5816 if (hw->mac_type == e1000_ich8lan) { 5815 if (hw->mac_type == e1000_ich8lan) {
5817 /* [43:34] i.e. 0x18D for above example address */ 5816 /* [43:34] i.e. 0x18D for above example address */
5818 hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); 5817 hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
5819 } else { 5818 } else {
5820 /* [43:32] i.e. 0x634 for above example address */ 5819 /* [43:32] i.e. 0x634 for above example address */
5821 hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8)); 5820 hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
5822 } 5821 }
5823 break; 5822 break;
5824 } 5823 }
@@ -5838,11 +5837,11 @@ e1000_hash_mc_addr(struct e1000_hw *hw,
5838 *****************************************************************************/ 5837 *****************************************************************************/
5839void 5838void
5840e1000_mta_set(struct e1000_hw *hw, 5839e1000_mta_set(struct e1000_hw *hw,
5841 uint32_t hash_value) 5840 u32 hash_value)
5842{ 5841{
5843 uint32_t hash_bit, hash_reg; 5842 u32 hash_bit, hash_reg;
5844 uint32_t mta; 5843 u32 mta;
5845 uint32_t temp; 5844 u32 temp;
5846 5845
5847 /* The MTA is a register array of 128 32-bit registers. 5846 /* The MTA is a register array of 128 32-bit registers.
5848 * It is treated like an array of 4096 bits. We want to set 5847 * It is treated like an array of 4096 bits. We want to set
@@ -5887,18 +5886,18 @@ e1000_mta_set(struct e1000_hw *hw,
5887 *****************************************************************************/ 5886 *****************************************************************************/
5888void 5887void
5889e1000_rar_set(struct e1000_hw *hw, 5888e1000_rar_set(struct e1000_hw *hw,
5890 uint8_t *addr, 5889 u8 *addr,
5891 uint32_t index) 5890 u32 index)
5892{ 5891{
5893 uint32_t rar_low, rar_high; 5892 u32 rar_low, rar_high;
5894 5893
5895 /* HW expects these in little endian so we reverse the byte order 5894 /* HW expects these in little endian so we reverse the byte order
5896 * from network order (big endian) to little endian 5895 * from network order (big endian) to little endian
5897 */ 5896 */
5898 rar_low = ((uint32_t) addr[0] | 5897 rar_low = ((u32) addr[0] |
5899 ((uint32_t) addr[1] << 8) | 5898 ((u32) addr[1] << 8) |
5900 ((uint32_t) addr[2] << 16) | ((uint32_t) addr[3] << 24)); 5899 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5901 rar_high = ((uint32_t) addr[4] | ((uint32_t) addr[5] << 8)); 5900 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5902 5901
5903 /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx 5902 /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
5904 * unit hang. 5903 * unit hang.
@@ -5922,7 +5921,7 @@ e1000_rar_set(struct e1000_hw *hw,
5922 case e1000_82571: 5921 case e1000_82571:
5923 case e1000_82572: 5922 case e1000_82572:
5924 case e1000_80003es2lan: 5923 case e1000_80003es2lan:
5925 if (hw->leave_av_bit_off == TRUE) 5924 if (hw->leave_av_bit_off)
5926 break; 5925 break;
5927 default: 5926 default:
5928 /* Indicate to hardware the Address is Valid. */ 5927 /* Indicate to hardware the Address is Valid. */
@@ -5945,10 +5944,10 @@ e1000_rar_set(struct e1000_hw *hw,
5945 *****************************************************************************/ 5944 *****************************************************************************/
5946void 5945void
5947e1000_write_vfta(struct e1000_hw *hw, 5946e1000_write_vfta(struct e1000_hw *hw,
5948 uint32_t offset, 5947 u32 offset,
5949 uint32_t value) 5948 u32 value)
5950{ 5949{
5951 uint32_t temp; 5950 u32 temp;
5952 5951
5953 if (hw->mac_type == e1000_ich8lan) 5952 if (hw->mac_type == e1000_ich8lan)
5954 return; 5953 return;
@@ -5973,10 +5972,10 @@ e1000_write_vfta(struct e1000_hw *hw,
5973static void 5972static void
5974e1000_clear_vfta(struct e1000_hw *hw) 5973e1000_clear_vfta(struct e1000_hw *hw)
5975{ 5974{
5976 uint32_t offset; 5975 u32 offset;
5977 uint32_t vfta_value = 0; 5976 u32 vfta_value = 0;
5978 uint32_t vfta_offset = 0; 5977 u32 vfta_offset = 0;
5979 uint32_t vfta_bit_in_reg = 0; 5978 u32 vfta_bit_in_reg = 0;
5980 5979
5981 if (hw->mac_type == e1000_ich8lan) 5980 if (hw->mac_type == e1000_ich8lan)
5982 return; 5981 return;
@@ -6004,15 +6003,15 @@ e1000_clear_vfta(struct e1000_hw *hw)
6004 } 6003 }
6005} 6004}
6006 6005
6007static int32_t 6006static s32
6008e1000_id_led_init(struct e1000_hw * hw) 6007e1000_id_led_init(struct e1000_hw * hw)
6009{ 6008{
6010 uint32_t ledctl; 6009 u32 ledctl;
6011 const uint32_t ledctl_mask = 0x000000FF; 6010 const u32 ledctl_mask = 0x000000FF;
6012 const uint32_t ledctl_on = E1000_LEDCTL_MODE_LED_ON; 6011 const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
6013 const uint32_t ledctl_off = E1000_LEDCTL_MODE_LED_OFF; 6012 const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
6014 uint16_t eeprom_data, i, temp; 6013 u16 eeprom_data, i, temp;
6015 const uint16_t led_mask = 0x0F; 6014 const u16 led_mask = 0x0F;
6016 6015
6017 DEBUGFUNC("e1000_id_led_init"); 6016 DEBUGFUNC("e1000_id_led_init");
6018 6017
@@ -6087,11 +6086,11 @@ e1000_id_led_init(struct e1000_hw * hw)
6087 * 6086 *
6088 * hw - Struct containing variables accessed by shared code 6087 * hw - Struct containing variables accessed by shared code
6089 *****************************************************************************/ 6088 *****************************************************************************/
6090int32_t 6089s32
6091e1000_setup_led(struct e1000_hw *hw) 6090e1000_setup_led(struct e1000_hw *hw)
6092{ 6091{
6093 uint32_t ledctl; 6092 u32 ledctl;
6094 int32_t ret_val = E1000_SUCCESS; 6093 s32 ret_val = E1000_SUCCESS;
6095 6094
6096 DEBUGFUNC("e1000_setup_led"); 6095 DEBUGFUNC("e1000_setup_led");
6097 6096
@@ -6112,7 +6111,7 @@ e1000_setup_led(struct e1000_hw *hw)
6112 if (ret_val) 6111 if (ret_val)
6113 return ret_val; 6112 return ret_val;
6114 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, 6113 ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
6115 (uint16_t)(hw->phy_spd_default & 6114 (u16)(hw->phy_spd_default &
6116 ~IGP01E1000_GMII_SPD)); 6115 ~IGP01E1000_GMII_SPD));
6117 if (ret_val) 6116 if (ret_val)
6118 return ret_val; 6117 return ret_val;
@@ -6146,11 +6145,11 @@ e1000_setup_led(struct e1000_hw *hw)
6146 * 6145 *
6147 * hw - Struct containing variables accessed by shared code 6146 * hw - Struct containing variables accessed by shared code
6148 *****************************************************************************/ 6147 *****************************************************************************/
6149int32_t 6148s32
6150e1000_blink_led_start(struct e1000_hw *hw) 6149e1000_blink_led_start(struct e1000_hw *hw)
6151{ 6150{
6152 int16_t i; 6151 s16 i;
6153 uint32_t ledctl_blink = 0; 6152 u32 ledctl_blink = 0;
6154 6153
6155 DEBUGFUNC("e1000_id_led_blink_on"); 6154 DEBUGFUNC("e1000_id_led_blink_on");
6156 6155
@@ -6181,10 +6180,10 @@ e1000_blink_led_start(struct e1000_hw *hw)
6181 * 6180 *
6182 * hw - Struct containing variables accessed by shared code 6181 * hw - Struct containing variables accessed by shared code
6183 *****************************************************************************/ 6182 *****************************************************************************/
6184int32_t 6183s32
6185e1000_cleanup_led(struct e1000_hw *hw) 6184e1000_cleanup_led(struct e1000_hw *hw)
6186{ 6185{
6187 int32_t ret_val = E1000_SUCCESS; 6186 s32 ret_val = E1000_SUCCESS;
6188 6187
6189 DEBUGFUNC("e1000_cleanup_led"); 6188 DEBUGFUNC("e1000_cleanup_led");
6190 6189
@@ -6223,10 +6222,10 @@ e1000_cleanup_led(struct e1000_hw *hw)
6223 * 6222 *
6224 * hw - Struct containing variables accessed by shared code 6223 * hw - Struct containing variables accessed by shared code
6225 *****************************************************************************/ 6224 *****************************************************************************/
6226int32_t 6225s32
6227e1000_led_on(struct e1000_hw *hw) 6226e1000_led_on(struct e1000_hw *hw)
6228{ 6227{
6229 uint32_t ctrl = E1000_READ_REG(hw, CTRL); 6228 u32 ctrl = E1000_READ_REG(hw, CTRL);
6230 6229
6231 DEBUGFUNC("e1000_led_on"); 6230 DEBUGFUNC("e1000_led_on");
6232 6231
@@ -6274,10 +6273,10 @@ e1000_led_on(struct e1000_hw *hw)
6274 * 6273 *
6275 * hw - Struct containing variables accessed by shared code 6274 * hw - Struct containing variables accessed by shared code
6276 *****************************************************************************/ 6275 *****************************************************************************/
6277int32_t 6276s32
6278e1000_led_off(struct e1000_hw *hw) 6277e1000_led_off(struct e1000_hw *hw)
6279{ 6278{
6280 uint32_t ctrl = E1000_READ_REG(hw, CTRL); 6279 u32 ctrl = E1000_READ_REG(hw, CTRL);
6281 6280
6282 DEBUGFUNC("e1000_led_off"); 6281 DEBUGFUNC("e1000_led_off");
6283 6282
@@ -6328,7 +6327,7 @@ e1000_led_off(struct e1000_hw *hw)
6328static void 6327static void
6329e1000_clear_hw_cntrs(struct e1000_hw *hw) 6328e1000_clear_hw_cntrs(struct e1000_hw *hw)
6330{ 6329{
6331 volatile uint32_t temp; 6330 volatile u32 temp;
6332 6331
6333 temp = E1000_READ_REG(hw, CRCERRS); 6332 temp = E1000_READ_REG(hw, CRCERRS);
6334 temp = E1000_READ_REG(hw, SYMERRS); 6333 temp = E1000_READ_REG(hw, SYMERRS);
@@ -6425,7 +6424,7 @@ e1000_clear_hw_cntrs(struct e1000_hw *hw)
6425 * hw - Struct containing variables accessed by shared code 6424 * hw - Struct containing variables accessed by shared code
6426 * 6425 *
6427 * Call this after e1000_init_hw. You may override the IFS defaults by setting 6426 * Call this after e1000_init_hw. You may override the IFS defaults by setting
6428 * hw->ifs_params_forced to TRUE. However, you must initialize hw-> 6427 * hw->ifs_params_forced to true. However, you must initialize hw->
6429 * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio 6428 * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
6430 * before calling this function. 6429 * before calling this function.
6431 *****************************************************************************/ 6430 *****************************************************************************/
@@ -6442,7 +6441,7 @@ e1000_reset_adaptive(struct e1000_hw *hw)
6442 hw->ifs_step_size = IFS_STEP; 6441 hw->ifs_step_size = IFS_STEP;
6443 hw->ifs_ratio = IFS_RATIO; 6442 hw->ifs_ratio = IFS_RATIO;
6444 } 6443 }
6445 hw->in_ifs_mode = FALSE; 6444 hw->in_ifs_mode = false;
6446 E1000_WRITE_REG(hw, AIT, 0); 6445 E1000_WRITE_REG(hw, AIT, 0);
6447 } else { 6446 } else {
6448 DEBUGOUT("Not in Adaptive IFS mode!\n"); 6447 DEBUGOUT("Not in Adaptive IFS mode!\n");
@@ -6465,7 +6464,7 @@ e1000_update_adaptive(struct e1000_hw *hw)
6465 if (hw->adaptive_ifs) { 6464 if (hw->adaptive_ifs) {
6466 if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) { 6465 if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
6467 if (hw->tx_packet_delta > MIN_NUM_XMITS) { 6466 if (hw->tx_packet_delta > MIN_NUM_XMITS) {
6468 hw->in_ifs_mode = TRUE; 6467 hw->in_ifs_mode = true;
6469 if (hw->current_ifs_val < hw->ifs_max_val) { 6468 if (hw->current_ifs_val < hw->ifs_max_val) {
6470 if (hw->current_ifs_val == 0) 6469 if (hw->current_ifs_val == 0)
6471 hw->current_ifs_val = hw->ifs_min_val; 6470 hw->current_ifs_val = hw->ifs_min_val;
@@ -6477,7 +6476,7 @@ e1000_update_adaptive(struct e1000_hw *hw)
6477 } else { 6476 } else {
6478 if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { 6477 if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
6479 hw->current_ifs_val = 0; 6478 hw->current_ifs_val = 0;
6480 hw->in_ifs_mode = FALSE; 6479 hw->in_ifs_mode = false;
6481 E1000_WRITE_REG(hw, AIT, 0); 6480 E1000_WRITE_REG(hw, AIT, 0);
6482 } 6481 }
6483 } 6482 }
@@ -6496,10 +6495,10 @@ e1000_update_adaptive(struct e1000_hw *hw)
6496void 6495void
6497e1000_tbi_adjust_stats(struct e1000_hw *hw, 6496e1000_tbi_adjust_stats(struct e1000_hw *hw,
6498 struct e1000_hw_stats *stats, 6497 struct e1000_hw_stats *stats,
6499 uint32_t frame_len, 6498 u32 frame_len,
6500 uint8_t *mac_addr) 6499 u8 *mac_addr)
6501{ 6500{
6502 uint64_t carry_bit; 6501 u64 carry_bit;
6503 6502
6504 /* First adjust the frame length. */ 6503 /* First adjust the frame length. */
6505 frame_len--; 6504 frame_len--;
@@ -6528,7 +6527,7 @@ e1000_tbi_adjust_stats(struct e1000_hw *hw,
6528 * since the test for a multicast frame will test positive on 6527 * since the test for a multicast frame will test positive on
6529 * a broadcast frame. 6528 * a broadcast frame.
6530 */ 6529 */
6531 if ((mac_addr[0] == (uint8_t) 0xff) && (mac_addr[1] == (uint8_t) 0xff)) 6530 if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff))
6532 /* Broadcast packet */ 6531 /* Broadcast packet */
6533 stats->bprc++; 6532 stats->bprc++;
6534 else if (*mac_addr & 0x01) 6533 else if (*mac_addr & 0x01)
@@ -6574,9 +6573,9 @@ e1000_tbi_adjust_stats(struct e1000_hw *hw,
6574void 6573void
6575e1000_get_bus_info(struct e1000_hw *hw) 6574e1000_get_bus_info(struct e1000_hw *hw)
6576{ 6575{
6577 int32_t ret_val; 6576 s32 ret_val;
6578 uint16_t pci_ex_link_status; 6577 u16 pci_ex_link_status;
6579 uint32_t status; 6578 u32 status;
6580 6579
6581 switch (hw->mac_type) { 6580 switch (hw->mac_type) {
6582 case e1000_82542_rev2_0: 6581 case e1000_82542_rev2_0:
@@ -6648,8 +6647,8 @@ e1000_get_bus_info(struct e1000_hw *hw)
6648 *****************************************************************************/ 6647 *****************************************************************************/
6649static void 6648static void
6650e1000_write_reg_io(struct e1000_hw *hw, 6649e1000_write_reg_io(struct e1000_hw *hw,
6651 uint32_t offset, 6650 u32 offset,
6652 uint32_t value) 6651 u32 value)
6653{ 6652{
6654 unsigned long io_addr = hw->io_base; 6653 unsigned long io_addr = hw->io_base;
6655 unsigned long io_data = hw->io_base + 4; 6654 unsigned long io_data = hw->io_base + 4;
@@ -6673,15 +6672,15 @@ e1000_write_reg_io(struct e1000_hw *hw,
6673 * register to the minimum and maximum range. 6672 * register to the minimum and maximum range.
6674 * For IGP phy's, the function calculates the range by the AGC registers. 6673 * For IGP phy's, the function calculates the range by the AGC registers.
6675 *****************************************************************************/ 6674 *****************************************************************************/
6676static int32_t 6675static s32
6677e1000_get_cable_length(struct e1000_hw *hw, 6676e1000_get_cable_length(struct e1000_hw *hw,
6678 uint16_t *min_length, 6677 u16 *min_length,
6679 uint16_t *max_length) 6678 u16 *max_length)
6680{ 6679{
6681 int32_t ret_val; 6680 s32 ret_val;
6682 uint16_t agc_value = 0; 6681 u16 agc_value = 0;
6683 uint16_t i, phy_data; 6682 u16 i, phy_data;
6684 uint16_t cable_length; 6683 u16 cable_length;
6685 6684
6686 DEBUGFUNC("e1000_get_cable_length"); 6685 DEBUGFUNC("e1000_get_cable_length");
6687 6686
@@ -6752,9 +6751,9 @@ e1000_get_cable_length(struct e1000_hw *hw,
6752 break; 6751 break;
6753 } 6752 }
6754 } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ 6753 } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
6755 uint16_t cur_agc_value; 6754 u16 cur_agc_value;
6756 uint16_t min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; 6755 u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
6757 uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = 6756 u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
6758 {IGP01E1000_PHY_AGC_A, 6757 {IGP01E1000_PHY_AGC_A,
6759 IGP01E1000_PHY_AGC_B, 6758 IGP01E1000_PHY_AGC_B,
6760 IGP01E1000_PHY_AGC_C, 6759 IGP01E1000_PHY_AGC_C,
@@ -6800,9 +6799,9 @@ e1000_get_cable_length(struct e1000_hw *hw,
6800 IGP01E1000_AGC_RANGE; 6799 IGP01E1000_AGC_RANGE;
6801 } else if (hw->phy_type == e1000_phy_igp_2 || 6800 } else if (hw->phy_type == e1000_phy_igp_2 ||
6802 hw->phy_type == e1000_phy_igp_3) { 6801 hw->phy_type == e1000_phy_igp_3) {
6803 uint16_t cur_agc_index, max_agc_index = 0; 6802 u16 cur_agc_index, max_agc_index = 0;
6804 uint16_t min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1; 6803 u16 min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1;
6805 uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = 6804 u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
6806 {IGP02E1000_PHY_AGC_A, 6805 {IGP02E1000_PHY_AGC_A,
6807 IGP02E1000_PHY_AGC_B, 6806 IGP02E1000_PHY_AGC_B,
6808 IGP02E1000_PHY_AGC_C, 6807 IGP02E1000_PHY_AGC_C,
@@ -6864,12 +6863,12 @@ e1000_get_cable_length(struct e1000_hw *hw,
6864 * return 0. If the link speed is 1000 Mbps the polarity status is in the 6863 * return 0. If the link speed is 1000 Mbps the polarity status is in the
6865 * IGP01E1000_PHY_PCS_INIT_REG. 6864 * IGP01E1000_PHY_PCS_INIT_REG.
6866 *****************************************************************************/ 6865 *****************************************************************************/
6867static int32_t 6866static s32
6868e1000_check_polarity(struct e1000_hw *hw, 6867e1000_check_polarity(struct e1000_hw *hw,
6869 e1000_rev_polarity *polarity) 6868 e1000_rev_polarity *polarity)
6870{ 6869{
6871 int32_t ret_val; 6870 s32 ret_val;
6872 uint16_t phy_data; 6871 u16 phy_data;
6873 6872
6874 DEBUGFUNC("e1000_check_polarity"); 6873 DEBUGFUNC("e1000_check_polarity");
6875 6874
@@ -6940,11 +6939,11 @@ e1000_check_polarity(struct e1000_hw *hw,
6940 * Link Health register. In IGP this bit is latched high, so the driver must 6939 * Link Health register. In IGP this bit is latched high, so the driver must
6941 * read it immediately after link is established. 6940 * read it immediately after link is established.
6942 *****************************************************************************/ 6941 *****************************************************************************/
6943static int32_t 6942static s32
6944e1000_check_downshift(struct e1000_hw *hw) 6943e1000_check_downshift(struct e1000_hw *hw)
6945{ 6944{
6946 int32_t ret_val; 6945 s32 ret_val;
6947 uint16_t phy_data; 6946 u16 phy_data;
6948 6947
6949 DEBUGFUNC("e1000_check_downshift"); 6948 DEBUGFUNC("e1000_check_downshift");
6950 6949
@@ -6968,7 +6967,7 @@ e1000_check_downshift(struct e1000_hw *hw)
6968 M88E1000_PSSR_DOWNSHIFT_SHIFT; 6967 M88E1000_PSSR_DOWNSHIFT_SHIFT;
6969 } else if (hw->phy_type == e1000_phy_ife) { 6968 } else if (hw->phy_type == e1000_phy_ife) {
6970 /* e1000_phy_ife supports 10/100 speed only */ 6969 /* e1000_phy_ife supports 10/100 speed only */
6971 hw->speed_downgraded = FALSE; 6970 hw->speed_downgraded = false;
6972 } 6971 }
6973 6972
6974 return E1000_SUCCESS; 6973 return E1000_SUCCESS;
@@ -6986,18 +6985,18 @@ e1000_check_downshift(struct e1000_hw *hw)
6986 * 6985 *
6987 ****************************************************************************/ 6986 ****************************************************************************/
6988 6987
6989static int32_t 6988static s32
6990e1000_config_dsp_after_link_change(struct e1000_hw *hw, 6989e1000_config_dsp_after_link_change(struct e1000_hw *hw,
6991 boolean_t link_up) 6990 bool link_up)
6992{ 6991{
6993 int32_t ret_val; 6992 s32 ret_val;
6994 uint16_t phy_data, phy_saved_data, speed, duplex, i; 6993 u16 phy_data, phy_saved_data, speed, duplex, i;
6995 uint16_t dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = 6994 u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
6996 {IGP01E1000_PHY_AGC_PARAM_A, 6995 {IGP01E1000_PHY_AGC_PARAM_A,
6997 IGP01E1000_PHY_AGC_PARAM_B, 6996 IGP01E1000_PHY_AGC_PARAM_B,
6998 IGP01E1000_PHY_AGC_PARAM_C, 6997 IGP01E1000_PHY_AGC_PARAM_C,
6999 IGP01E1000_PHY_AGC_PARAM_D}; 6998 IGP01E1000_PHY_AGC_PARAM_D};
7000 uint16_t min_length, max_length; 6999 u16 min_length, max_length;
7001 7000
7002 DEBUGFUNC("e1000_config_dsp_after_link_change"); 7001 DEBUGFUNC("e1000_config_dsp_after_link_change");
7003 7002
@@ -7039,8 +7038,8 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
7039 if ((hw->ffe_config_state == e1000_ffe_config_enabled) && 7038 if ((hw->ffe_config_state == e1000_ffe_config_enabled) &&
7040 (min_length < e1000_igp_cable_length_50)) { 7039 (min_length < e1000_igp_cable_length_50)) {
7041 7040
7042 uint16_t ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; 7041 u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
7043 uint32_t idle_errs = 0; 7042 u32 idle_errs = 0;
7044 7043
7045 /* clear previous idle error counts */ 7044 /* clear previous idle error counts */
7046 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, 7045 ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
@@ -7174,11 +7173,11 @@ e1000_config_dsp_after_link_change(struct e1000_hw *hw,
7174 * 7173 *
7175 * hw - Struct containing variables accessed by shared code 7174 * hw - Struct containing variables accessed by shared code
7176 ****************************************************************************/ 7175 ****************************************************************************/
7177static int32_t 7176static s32
7178e1000_set_phy_mode(struct e1000_hw *hw) 7177e1000_set_phy_mode(struct e1000_hw *hw)
7179{ 7178{
7180 int32_t ret_val; 7179 s32 ret_val;
7181 uint16_t eeprom_data; 7180 u16 eeprom_data;
7182 7181
7183 DEBUGFUNC("e1000_set_phy_mode"); 7182 DEBUGFUNC("e1000_set_phy_mode");
7184 7183
@@ -7198,7 +7197,7 @@ e1000_set_phy_mode(struct e1000_hw *hw)
7198 if (ret_val) 7197 if (ret_val)
7199 return ret_val; 7198 return ret_val;
7200 7199
7201 hw->phy_reset_disable = FALSE; 7200 hw->phy_reset_disable = false;
7202 } 7201 }
7203 } 7202 }
7204 7203
@@ -7219,13 +7218,13 @@ e1000_set_phy_mode(struct e1000_hw *hw)
7219 * 7218 *
7220 ****************************************************************************/ 7219 ****************************************************************************/
7221 7220
7222static int32_t 7221static s32
7223e1000_set_d3_lplu_state(struct e1000_hw *hw, 7222e1000_set_d3_lplu_state(struct e1000_hw *hw,
7224 boolean_t active) 7223 bool active)
7225{ 7224{
7226 uint32_t phy_ctrl = 0; 7225 u32 phy_ctrl = 0;
7227 int32_t ret_val; 7226 s32 ret_val;
7228 uint16_t phy_data; 7227 u16 phy_data;
7229 DEBUGFUNC("e1000_set_d3_lplu_state"); 7228 DEBUGFUNC("e1000_set_d3_lplu_state");
7230 7229
7231 if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2 7230 if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2
@@ -7349,13 +7348,13 @@ e1000_set_d3_lplu_state(struct e1000_hw *hw,
7349 * 7348 *
7350 ****************************************************************************/ 7349 ****************************************************************************/
7351 7350
7352static int32_t 7351static s32
7353e1000_set_d0_lplu_state(struct e1000_hw *hw, 7352e1000_set_d0_lplu_state(struct e1000_hw *hw,
7354 boolean_t active) 7353 bool active)
7355{ 7354{
7356 uint32_t phy_ctrl = 0; 7355 u32 phy_ctrl = 0;
7357 int32_t ret_val; 7356 s32 ret_val;
7358 uint16_t phy_data; 7357 u16 phy_data;
7359 DEBUGFUNC("e1000_set_d0_lplu_state"); 7358 DEBUGFUNC("e1000_set_d0_lplu_state");
7360 7359
7361 if (hw->mac_type <= e1000_82547_rev_2) 7360 if (hw->mac_type <= e1000_82547_rev_2)
@@ -7440,12 +7439,12 @@ e1000_set_d0_lplu_state(struct e1000_hw *hw,
7440 * 7439 *
7441 * hw - Struct containing variables accessed by shared code 7440 * hw - Struct containing variables accessed by shared code
7442 *****************************************************************************/ 7441 *****************************************************************************/
7443static int32_t 7442static s32
7444e1000_set_vco_speed(struct e1000_hw *hw) 7443e1000_set_vco_speed(struct e1000_hw *hw)
7445{ 7444{
7446 int32_t ret_val; 7445 s32 ret_val;
7447 uint16_t default_page = 0; 7446 u16 default_page = 0;
7448 uint16_t phy_data; 7447 u16 phy_data;
7449 7448
7450 DEBUGFUNC("e1000_set_vco_speed"); 7449 DEBUGFUNC("e1000_set_vco_speed");
7451 7450
@@ -7504,18 +7503,18 @@ e1000_set_vco_speed(struct e1000_hw *hw)
7504 * 7503 *
7505 * returns: - E1000_SUCCESS . 7504 * returns: - E1000_SUCCESS .
7506 ****************************************************************************/ 7505 ****************************************************************************/
7507static int32_t 7506static s32
7508e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer) 7507e1000_host_if_read_cookie(struct e1000_hw * hw, u8 *buffer)
7509{ 7508{
7510 uint8_t i; 7509 u8 i;
7511 uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET; 7510 u32 offset = E1000_MNG_DHCP_COOKIE_OFFSET;
7512 uint8_t length = E1000_MNG_DHCP_COOKIE_LENGTH; 7511 u8 length = E1000_MNG_DHCP_COOKIE_LENGTH;
7513 7512
7514 length = (length >> 2); 7513 length = (length >> 2);
7515 offset = (offset >> 2); 7514 offset = (offset >> 2);
7516 7515
7517 for (i = 0; i < length; i++) { 7516 for (i = 0; i < length; i++) {
7518 *((uint32_t *) buffer + i) = 7517 *((u32 *) buffer + i) =
7519 E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i); 7518 E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i);
7520 } 7519 }
7521 return E1000_SUCCESS; 7520 return E1000_SUCCESS;
@@ -7531,11 +7530,11 @@ e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer)
7531 * timeout 7530 * timeout
7532 * - E1000_SUCCESS for success. 7531 * - E1000_SUCCESS for success.
7533 ****************************************************************************/ 7532 ****************************************************************************/
7534static int32_t 7533static s32
7535e1000_mng_enable_host_if(struct e1000_hw * hw) 7534e1000_mng_enable_host_if(struct e1000_hw * hw)
7536{ 7535{
7537 uint32_t hicr; 7536 u32 hicr;
7538 uint8_t i; 7537 u8 i;
7539 7538
7540 /* Check that the host interface is enabled. */ 7539 /* Check that the host interface is enabled. */
7541 hicr = E1000_READ_REG(hw, HICR); 7540 hicr = E1000_READ_REG(hw, HICR);
@@ -7565,14 +7564,14 @@ e1000_mng_enable_host_if(struct e1000_hw * hw)
7565 * 7564 *
7566 * returns - E1000_SUCCESS for success. 7565 * returns - E1000_SUCCESS for success.
7567 ****************************************************************************/ 7566 ****************************************************************************/
7568static int32_t 7567static s32
7569e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer, 7568e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer,
7570 uint16_t length, uint16_t offset, uint8_t *sum) 7569 u16 length, u16 offset, u8 *sum)
7571{ 7570{
7572 uint8_t *tmp; 7571 u8 *tmp;
7573 uint8_t *bufptr = buffer; 7572 u8 *bufptr = buffer;
7574 uint32_t data = 0; 7573 u32 data = 0;
7575 uint16_t remaining, i, j, prev_bytes; 7574 u16 remaining, i, j, prev_bytes;
7576 7575
7577 /* sum = only sum of the data and it is not checksum */ 7576 /* sum = only sum of the data and it is not checksum */
7578 7577
@@ -7580,14 +7579,14 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
7580 return -E1000_ERR_PARAM; 7579 return -E1000_ERR_PARAM;
7581 } 7580 }
7582 7581
7583 tmp = (uint8_t *)&data; 7582 tmp = (u8 *)&data;
7584 prev_bytes = offset & 0x3; 7583 prev_bytes = offset & 0x3;
7585 offset &= 0xFFFC; 7584 offset &= 0xFFFC;
7586 offset >>= 2; 7585 offset >>= 2;
7587 7586
7588 if (prev_bytes) { 7587 if (prev_bytes) {
7589 data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset); 7588 data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset);
7590 for (j = prev_bytes; j < sizeof(uint32_t); j++) { 7589 for (j = prev_bytes; j < sizeof(u32); j++) {
7591 *(tmp + j) = *bufptr++; 7590 *(tmp + j) = *bufptr++;
7592 *sum += *(tmp + j); 7591 *sum += *(tmp + j);
7593 } 7592 }
@@ -7605,7 +7604,7 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
7605 /* The device driver writes the relevant command block into the 7604 /* The device driver writes the relevant command block into the
7606 * ram area. */ 7605 * ram area. */
7607 for (i = 0; i < length; i++) { 7606 for (i = 0; i < length; i++) {
7608 for (j = 0; j < sizeof(uint32_t); j++) { 7607 for (j = 0; j < sizeof(u32); j++) {
7609 *(tmp + j) = *bufptr++; 7608 *(tmp + j) = *bufptr++;
7610 *sum += *(tmp + j); 7609 *sum += *(tmp + j);
7611 } 7610 }
@@ -7613,7 +7612,7 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
7613 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data); 7612 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
7614 } 7613 }
7615 if (remaining) { 7614 if (remaining) {
7616 for (j = 0; j < sizeof(uint32_t); j++) { 7615 for (j = 0; j < sizeof(u32); j++) {
7617 if (j < remaining) 7616 if (j < remaining)
7618 *(tmp + j) = *bufptr++; 7617 *(tmp + j) = *bufptr++;
7619 else 7618 else
@@ -7633,23 +7632,23 @@ e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
7633 * 7632 *
7634 * returns - E1000_SUCCESS for success. 7633 * returns - E1000_SUCCESS for success.
7635 ****************************************************************************/ 7634 ****************************************************************************/
7636static int32_t 7635static s32
7637e1000_mng_write_cmd_header(struct e1000_hw * hw, 7636e1000_mng_write_cmd_header(struct e1000_hw * hw,
7638 struct e1000_host_mng_command_header * hdr) 7637 struct e1000_host_mng_command_header * hdr)
7639{ 7638{
7640 uint16_t i; 7639 u16 i;
7641 uint8_t sum; 7640 u8 sum;
7642 uint8_t *buffer; 7641 u8 *buffer;
7643 7642
7644 /* Write the whole command header structure which includes sum of 7643 /* Write the whole command header structure which includes sum of
7645 * the buffer */ 7644 * the buffer */
7646 7645
7647 uint16_t length = sizeof(struct e1000_host_mng_command_header); 7646 u16 length = sizeof(struct e1000_host_mng_command_header);
7648 7647
7649 sum = hdr->checksum; 7648 sum = hdr->checksum;
7650 hdr->checksum = 0; 7649 hdr->checksum = 0;
7651 7650
7652 buffer = (uint8_t *) hdr; 7651 buffer = (u8 *) hdr;
7653 i = length; 7652 i = length;
7654 while (i--) 7653 while (i--)
7655 sum += buffer[i]; 7654 sum += buffer[i];
@@ -7659,7 +7658,7 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
7659 length >>= 2; 7658 length >>= 2;
7660 /* The device driver writes the relevant command block into the ram area. */ 7659 /* The device driver writes the relevant command block into the ram area. */
7661 for (i = 0; i < length; i++) { 7660 for (i = 0; i < length; i++) {
7662 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i)); 7661 E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((u32 *) hdr + i));
7663 E1000_WRITE_FLUSH(hw); 7662 E1000_WRITE_FLUSH(hw);
7664 } 7663 }
7665 7664
@@ -7673,10 +7672,10 @@ e1000_mng_write_cmd_header(struct e1000_hw * hw,
7673 * 7672 *
7674 * returns - E1000_SUCCESS for success. 7673 * returns - E1000_SUCCESS for success.
7675 ****************************************************************************/ 7674 ****************************************************************************/
7676static int32_t 7675static s32
7677e1000_mng_write_commit(struct e1000_hw * hw) 7676e1000_mng_write_commit(struct e1000_hw * hw)
7678{ 7677{
7679 uint32_t hicr; 7678 u32 hicr;
7680 7679
7681 hicr = E1000_READ_REG(hw, HICR); 7680 hicr = E1000_READ_REG(hw, HICR);
7682 /* Setting this bit tells the ARC that a new command is pending. */ 7681 /* Setting this bit tells the ARC that a new command is pending. */
@@ -7689,35 +7688,35 @@ e1000_mng_write_commit(struct e1000_hw * hw)
7689/***************************************************************************** 7688/*****************************************************************************
7690 * This function checks the mode of the firmware. 7689 * This function checks the mode of the firmware.
7691 * 7690 *
7692 * returns - TRUE when the mode is IAMT or FALSE. 7691 * returns - true when the mode is IAMT or false.
7693 ****************************************************************************/ 7692 ****************************************************************************/
7694boolean_t 7693bool
7695e1000_check_mng_mode(struct e1000_hw *hw) 7694e1000_check_mng_mode(struct e1000_hw *hw)
7696{ 7695{
7697 uint32_t fwsm; 7696 u32 fwsm;
7698 7697
7699 fwsm = E1000_READ_REG(hw, FWSM); 7698 fwsm = E1000_READ_REG(hw, FWSM);
7700 7699
7701 if (hw->mac_type == e1000_ich8lan) { 7700 if (hw->mac_type == e1000_ich8lan) {
7702 if ((fwsm & E1000_FWSM_MODE_MASK) == 7701 if ((fwsm & E1000_FWSM_MODE_MASK) ==
7703 (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) 7702 (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
7704 return TRUE; 7703 return true;
7705 } else if ((fwsm & E1000_FWSM_MODE_MASK) == 7704 } else if ((fwsm & E1000_FWSM_MODE_MASK) ==
7706 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)) 7705 (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
7707 return TRUE; 7706 return true;
7708 7707
7709 return FALSE; 7708 return false;
7710} 7709}
7711 7710
7712 7711
7713/***************************************************************************** 7712/*****************************************************************************
7714 * This function writes the dhcp info . 7713 * This function writes the dhcp info .
7715 ****************************************************************************/ 7714 ****************************************************************************/
7716int32_t 7715s32
7717e1000_mng_write_dhcp_info(struct e1000_hw * hw, uint8_t *buffer, 7716e1000_mng_write_dhcp_info(struct e1000_hw * hw, u8 *buffer,
7718 uint16_t length) 7717 u16 length)
7719{ 7718{
7720 int32_t ret_val; 7719 s32 ret_val;
7721 struct e1000_host_mng_command_header hdr; 7720 struct e1000_host_mng_command_header hdr;
7722 7721
7723 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; 7722 hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
@@ -7745,11 +7744,11 @@ e1000_mng_write_dhcp_info(struct e1000_hw * hw, uint8_t *buffer,
7745 * 7744 *
7746 * returns - checksum of buffer contents. 7745 * returns - checksum of buffer contents.
7747 ****************************************************************************/ 7746 ****************************************************************************/
7748static uint8_t 7747static u8
7749e1000_calculate_mng_checksum(char *buffer, uint32_t length) 7748e1000_calculate_mng_checksum(char *buffer, u32 length)
7750{ 7749{
7751 uint8_t sum = 0; 7750 u8 sum = 0;
7752 uint32_t i; 7751 u32 i;
7753 7752
7754 if (!buffer) 7753 if (!buffer)
7755 return 0; 7754 return 0;
@@ -7757,23 +7756,23 @@ e1000_calculate_mng_checksum(char *buffer, uint32_t length)
7757 for (i=0; i < length; i++) 7756 for (i=0; i < length; i++)
7758 sum += buffer[i]; 7757 sum += buffer[i];
7759 7758
7760 return (uint8_t) (0 - sum); 7759 return (u8) (0 - sum);
7761} 7760}
7762 7761
7763/***************************************************************************** 7762/*****************************************************************************
7764 * This function checks whether tx pkt filtering needs to be enabled or not. 7763 * This function checks whether tx pkt filtering needs to be enabled or not.
7765 * 7764 *
7766 * returns - TRUE for packet filtering or FALSE. 7765 * returns - true for packet filtering or false.
7767 ****************************************************************************/ 7766 ****************************************************************************/
7768boolean_t 7767bool
7769e1000_enable_tx_pkt_filtering(struct e1000_hw *hw) 7768e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
7770{ 7769{
7771 /* called in init as well as watchdog timer functions */ 7770 /* called in init as well as watchdog timer functions */
7772 7771
7773 int32_t ret_val, checksum; 7772 s32 ret_val, checksum;
7774 boolean_t tx_filter = FALSE; 7773 bool tx_filter = false;
7775 struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie); 7774 struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie);
7776 uint8_t *buffer = (uint8_t *) &(hw->mng_cookie); 7775 u8 *buffer = (u8 *) &(hw->mng_cookie);
7777 7776
7778 if (e1000_check_mng_mode(hw)) { 7777 if (e1000_check_mng_mode(hw)) {
7779 ret_val = e1000_mng_enable_host_if(hw); 7778 ret_val = e1000_mng_enable_host_if(hw);
@@ -7787,11 +7786,11 @@ e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
7787 E1000_MNG_DHCP_COOKIE_LENGTH)) { 7786 E1000_MNG_DHCP_COOKIE_LENGTH)) {
7788 if (hdr->status & 7787 if (hdr->status &
7789 E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT) 7788 E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT)
7790 tx_filter = TRUE; 7789 tx_filter = true;
7791 } else 7790 } else
7792 tx_filter = TRUE; 7791 tx_filter = true;
7793 } else 7792 } else
7794 tx_filter = TRUE; 7793 tx_filter = true;
7795 } 7794 }
7796 } 7795 }
7797 7796
@@ -7804,41 +7803,41 @@ e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
7804 * 7803 *
7805 * hw - Struct containing variables accessed by shared code 7804 * hw - Struct containing variables accessed by shared code
7806 * 7805 *
7807 * returns: - TRUE/FALSE 7806 * returns: - true/false
7808 * 7807 *
7809 *****************************************************************************/ 7808 *****************************************************************************/
7810uint32_t 7809u32
7811e1000_enable_mng_pass_thru(struct e1000_hw *hw) 7810e1000_enable_mng_pass_thru(struct e1000_hw *hw)
7812{ 7811{
7813 uint32_t manc; 7812 u32 manc;
7814 uint32_t fwsm, factps; 7813 u32 fwsm, factps;
7815 7814
7816 if (hw->asf_firmware_present) { 7815 if (hw->asf_firmware_present) {
7817 manc = E1000_READ_REG(hw, MANC); 7816 manc = E1000_READ_REG(hw, MANC);
7818 7817
7819 if (!(manc & E1000_MANC_RCV_TCO_EN) || 7818 if (!(manc & E1000_MANC_RCV_TCO_EN) ||
7820 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) 7819 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
7821 return FALSE; 7820 return false;
7822 if (e1000_arc_subsystem_valid(hw) == TRUE) { 7821 if (e1000_arc_subsystem_valid(hw)) {
7823 fwsm = E1000_READ_REG(hw, FWSM); 7822 fwsm = E1000_READ_REG(hw, FWSM);
7824 factps = E1000_READ_REG(hw, FACTPS); 7823 factps = E1000_READ_REG(hw, FACTPS);
7825 7824
7826 if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) == 7825 if ((((fwsm & E1000_FWSM_MODE_MASK) >> E1000_FWSM_MODE_SHIFT) ==
7827 e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG)) 7826 e1000_mng_mode_pt) && !(factps & E1000_FACTPS_MNGCG))
7828 return TRUE; 7827 return true;
7829 } else 7828 } else
7830 if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) 7829 if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
7831 return TRUE; 7830 return true;
7832 } 7831 }
7833 return FALSE; 7832 return false;
7834} 7833}
7835 7834
7836static int32_t 7835static s32
7837e1000_polarity_reversal_workaround(struct e1000_hw *hw) 7836e1000_polarity_reversal_workaround(struct e1000_hw *hw)
7838{ 7837{
7839 int32_t ret_val; 7838 s32 ret_val;
7840 uint16_t mii_status_reg; 7839 u16 mii_status_reg;
7841 uint16_t i; 7840 u16 i;
7842 7841
7843 /* Polarity reversal workaround for forced 10F/10H links. */ 7842 /* Polarity reversal workaround for forced 10F/10H links. */
7844 7843
@@ -7930,7 +7929,7 @@ e1000_polarity_reversal_workaround(struct e1000_hw *hw)
7930static void 7929static void
7931e1000_set_pci_express_master_disable(struct e1000_hw *hw) 7930e1000_set_pci_express_master_disable(struct e1000_hw *hw)
7932{ 7931{
7933 uint32_t ctrl; 7932 u32 ctrl;
7934 7933
7935 DEBUGFUNC("e1000_set_pci_express_master_disable"); 7934 DEBUGFUNC("e1000_set_pci_express_master_disable");
7936 7935
@@ -7953,10 +7952,10 @@ e1000_set_pci_express_master_disable(struct e1000_hw *hw)
7953 * E1000_SUCCESS master requests disabled. 7952 * E1000_SUCCESS master requests disabled.
7954 * 7953 *
7955 ******************************************************************************/ 7954 ******************************************************************************/
7956int32_t 7955s32
7957e1000_disable_pciex_master(struct e1000_hw *hw) 7956e1000_disable_pciex_master(struct e1000_hw *hw)
7958{ 7957{
7959 int32_t timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */ 7958 s32 timeout = MASTER_DISABLE_TIMEOUT; /* 80ms */
7960 7959
7961 DEBUGFUNC("e1000_disable_pciex_master"); 7960 DEBUGFUNC("e1000_disable_pciex_master");
7962 7961
@@ -7991,10 +7990,10 @@ e1000_disable_pciex_master(struct e1000_hw *hw)
7991 * E1000_SUCCESS at any other case. 7990 * E1000_SUCCESS at any other case.
7992 * 7991 *
7993 ******************************************************************************/ 7992 ******************************************************************************/
7994static int32_t 7993static s32
7995e1000_get_auto_rd_done(struct e1000_hw *hw) 7994e1000_get_auto_rd_done(struct e1000_hw *hw)
7996{ 7995{
7997 int32_t timeout = AUTO_READ_DONE_TIMEOUT; 7996 s32 timeout = AUTO_READ_DONE_TIMEOUT;
7998 7997
7999 DEBUGFUNC("e1000_get_auto_rd_done"); 7998 DEBUGFUNC("e1000_get_auto_rd_done");
8000 7999
@@ -8039,11 +8038,11 @@ e1000_get_auto_rd_done(struct e1000_hw *hw)
8039 * E1000_SUCCESS at any other case. 8038 * E1000_SUCCESS at any other case.
8040 * 8039 *
8041 ***************************************************************************/ 8040 ***************************************************************************/
8042static int32_t 8041static s32
8043e1000_get_phy_cfg_done(struct e1000_hw *hw) 8042e1000_get_phy_cfg_done(struct e1000_hw *hw)
8044{ 8043{
8045 int32_t timeout = PHY_CFG_TIMEOUT; 8044 s32 timeout = PHY_CFG_TIMEOUT;
8046 uint32_t cfg_mask = E1000_EEPROM_CFG_DONE; 8045 u32 cfg_mask = E1000_EEPROM_CFG_DONE;
8047 8046
8048 DEBUGFUNC("e1000_get_phy_cfg_done"); 8047 DEBUGFUNC("e1000_get_phy_cfg_done");
8049 8048
@@ -8086,11 +8085,11 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
8086 * E1000_SUCCESS at any other case. 8085 * E1000_SUCCESS at any other case.
8087 * 8086 *
8088 ***************************************************************************/ 8087 ***************************************************************************/
8089static int32_t 8088static s32
8090e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw) 8089e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
8091{ 8090{
8092 int32_t timeout; 8091 s32 timeout;
8093 uint32_t swsm; 8092 u32 swsm;
8094 8093
8095 DEBUGFUNC("e1000_get_hw_eeprom_semaphore"); 8094 DEBUGFUNC("e1000_get_hw_eeprom_semaphore");
8096 8095
@@ -8139,7 +8138,7 @@ e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
8139static void 8138static void
8140e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw) 8139e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
8141{ 8140{
8142 uint32_t swsm; 8141 u32 swsm;
8143 8142
8144 DEBUGFUNC("e1000_put_hw_eeprom_semaphore"); 8143 DEBUGFUNC("e1000_put_hw_eeprom_semaphore");
8145 8144
@@ -8165,11 +8164,11 @@ e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
8165 * E1000_SUCCESS at any other case. 8164 * E1000_SUCCESS at any other case.
8166 * 8165 *
8167 ***************************************************************************/ 8166 ***************************************************************************/
8168static int32_t 8167static s32
8169e1000_get_software_semaphore(struct e1000_hw *hw) 8168e1000_get_software_semaphore(struct e1000_hw *hw)
8170{ 8169{
8171 int32_t timeout = hw->eeprom.word_size + 1; 8170 s32 timeout = hw->eeprom.word_size + 1;
8172 uint32_t swsm; 8171 u32 swsm;
8173 8172
8174 DEBUGFUNC("e1000_get_software_semaphore"); 8173 DEBUGFUNC("e1000_get_software_semaphore");
8175 8174
@@ -8204,7 +8203,7 @@ e1000_get_software_semaphore(struct e1000_hw *hw)
8204static void 8203static void
8205e1000_release_software_semaphore(struct e1000_hw *hw) 8204e1000_release_software_semaphore(struct e1000_hw *hw)
8206{ 8205{
8207 uint32_t swsm; 8206 u32 swsm;
8208 8207
8209 DEBUGFUNC("e1000_release_software_semaphore"); 8208 DEBUGFUNC("e1000_release_software_semaphore");
8210 8209
@@ -8229,11 +8228,11 @@ e1000_release_software_semaphore(struct e1000_hw *hw)
8229 * E1000_SUCCESS 8228 * E1000_SUCCESS
8230 * 8229 *
8231 *****************************************************************************/ 8230 *****************************************************************************/
8232int32_t 8231s32
8233e1000_check_phy_reset_block(struct e1000_hw *hw) 8232e1000_check_phy_reset_block(struct e1000_hw *hw)
8234{ 8233{
8235 uint32_t manc = 0; 8234 u32 manc = 0;
8236 uint32_t fwsm = 0; 8235 u32 fwsm = 0;
8237 8236
8238 if (hw->mac_type == e1000_ich8lan) { 8237 if (hw->mac_type == e1000_ich8lan) {
8239 fwsm = E1000_READ_REG(hw, FWSM); 8238 fwsm = E1000_READ_REG(hw, FWSM);
@@ -8247,10 +8246,10 @@ e1000_check_phy_reset_block(struct e1000_hw *hw)
8247 E1000_BLK_PHY_RESET : E1000_SUCCESS; 8246 E1000_BLK_PHY_RESET : E1000_SUCCESS;
8248} 8247}
8249 8248
8250static uint8_t 8249static u8
8251e1000_arc_subsystem_valid(struct e1000_hw *hw) 8250e1000_arc_subsystem_valid(struct e1000_hw *hw)
8252{ 8251{
8253 uint32_t fwsm; 8252 u32 fwsm;
8254 8253
8255 /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC 8254 /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC
8256 * may not be provided a DMA clock when no manageability features are 8255 * may not be provided a DMA clock when no manageability features are
@@ -8264,14 +8263,14 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
8264 case e1000_80003es2lan: 8263 case e1000_80003es2lan:
8265 fwsm = E1000_READ_REG(hw, FWSM); 8264 fwsm = E1000_READ_REG(hw, FWSM);
8266 if ((fwsm & E1000_FWSM_MODE_MASK) != 0) 8265 if ((fwsm & E1000_FWSM_MODE_MASK) != 0)
8267 return TRUE; 8266 return true;
8268 break; 8267 break;
8269 case e1000_ich8lan: 8268 case e1000_ich8lan:
8270 return TRUE; 8269 return true;
8271 default: 8270 default:
8272 break; 8271 break;
8273 } 8272 }
8274 return FALSE; 8273 return false;
8275} 8274}
8276 8275
8277 8276
@@ -8284,10 +8283,10 @@ e1000_arc_subsystem_valid(struct e1000_hw *hw)
8284 * returns: E1000_SUCCESS 8283 * returns: E1000_SUCCESS
8285 * 8284 *
8286 *****************************************************************************/ 8285 *****************************************************************************/
8287static int32_t 8286static s32
8288e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop) 8287e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, u32 no_snoop)
8289{ 8288{
8290 uint32_t gcr_reg = 0; 8289 u32 gcr_reg = 0;
8291 8290
8292 DEBUGFUNC("e1000_set_pci_ex_no_snoop"); 8291 DEBUGFUNC("e1000_set_pci_ex_no_snoop");
8293 8292
@@ -8304,7 +8303,7 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
8304 E1000_WRITE_REG(hw, GCR, gcr_reg); 8303 E1000_WRITE_REG(hw, GCR, gcr_reg);
8305 } 8304 }
8306 if (hw->mac_type == e1000_ich8lan) { 8305 if (hw->mac_type == e1000_ich8lan) {
8307 uint32_t ctrl_ext; 8306 u32 ctrl_ext;
8308 8307
8309 E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL); 8308 E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL);
8310 8309
@@ -8325,11 +8324,11 @@ e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
8325 * hw: Struct containing variables accessed by shared code 8324 * hw: Struct containing variables accessed by shared code
8326 * 8325 *
8327 ***************************************************************************/ 8326 ***************************************************************************/
8328static int32_t 8327static s32
8329e1000_get_software_flag(struct e1000_hw *hw) 8328e1000_get_software_flag(struct e1000_hw *hw)
8330{ 8329{
8331 int32_t timeout = PHY_CFG_TIMEOUT; 8330 s32 timeout = PHY_CFG_TIMEOUT;
8332 uint32_t extcnf_ctrl; 8331 u32 extcnf_ctrl;
8333 8332
8334 DEBUGFUNC("e1000_get_software_flag"); 8333 DEBUGFUNC("e1000_get_software_flag");
8335 8334
@@ -8367,7 +8366,7 @@ e1000_get_software_flag(struct e1000_hw *hw)
8367static void 8366static void
8368e1000_release_software_flag(struct e1000_hw *hw) 8367e1000_release_software_flag(struct e1000_hw *hw)
8369{ 8368{
8370 uint32_t extcnf_ctrl; 8369 u32 extcnf_ctrl;
8371 8370
8372 DEBUGFUNC("e1000_release_software_flag"); 8371 DEBUGFUNC("e1000_release_software_flag");
8373 8372
@@ -8389,16 +8388,16 @@ e1000_release_software_flag(struct e1000_hw *hw)
8389 * data - word read from the EEPROM 8388 * data - word read from the EEPROM
8390 * words - number of words to read 8389 * words - number of words to read
8391 *****************************************************************************/ 8390 *****************************************************************************/
8392static int32_t 8391static s32
8393e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, 8392e1000_read_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
8394 uint16_t *data) 8393 u16 *data)
8395{ 8394{
8396 int32_t error = E1000_SUCCESS; 8395 s32 error = E1000_SUCCESS;
8397 uint32_t flash_bank = 0; 8396 u32 flash_bank = 0;
8398 uint32_t act_offset = 0; 8397 u32 act_offset = 0;
8399 uint32_t bank_offset = 0; 8398 u32 bank_offset = 0;
8400 uint16_t word = 0; 8399 u16 word = 0;
8401 uint16_t i = 0; 8400 u16 i = 0;
8402 8401
8403 /* We need to know which is the valid flash bank. In the event 8402 /* We need to know which is the valid flash bank. In the event
8404 * that we didn't allocate eeprom_shadow_ram, we may not be 8403 * that we didn't allocate eeprom_shadow_ram, we may not be
@@ -8417,7 +8416,7 @@ e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8417 8416
8418 for (i = 0; i < words; i++) { 8417 for (i = 0; i < words; i++) {
8419 if (hw->eeprom_shadow_ram != NULL && 8418 if (hw->eeprom_shadow_ram != NULL &&
8420 hw->eeprom_shadow_ram[offset+i].modified == TRUE) { 8419 hw->eeprom_shadow_ram[offset+i].modified) {
8421 data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word; 8420 data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word;
8422 } else { 8421 } else {
8423 /* The NVM part needs a byte offset, hence * 2 */ 8422 /* The NVM part needs a byte offset, hence * 2 */
@@ -8445,12 +8444,12 @@ e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8445 * words - number of words to write 8444 * words - number of words to write
8446 * data - words to write to the EEPROM 8445 * data - words to write to the EEPROM
8447 *****************************************************************************/ 8446 *****************************************************************************/
8448static int32_t 8447static s32
8449e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words, 8448e1000_write_eeprom_ich8(struct e1000_hw *hw, u16 offset, u16 words,
8450 uint16_t *data) 8449 u16 *data)
8451{ 8450{
8452 uint32_t i = 0; 8451 u32 i = 0;
8453 int32_t error = E1000_SUCCESS; 8452 s32 error = E1000_SUCCESS;
8454 8453
8455 error = e1000_get_software_flag(hw); 8454 error = e1000_get_software_flag(hw);
8456 if (error != E1000_SUCCESS) 8455 if (error != E1000_SUCCESS)
@@ -8466,7 +8465,7 @@ e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8466 if (hw->eeprom_shadow_ram != NULL) { 8465 if (hw->eeprom_shadow_ram != NULL) {
8467 for (i = 0; i < words; i++) { 8466 for (i = 0; i < words; i++) {
8468 if ((offset + i) < E1000_SHADOW_RAM_WORDS) { 8467 if ((offset + i) < E1000_SHADOW_RAM_WORDS) {
8469 hw->eeprom_shadow_ram[offset+i].modified = TRUE; 8468 hw->eeprom_shadow_ram[offset+i].modified = true;
8470 hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i]; 8469 hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i];
8471 } else { 8470 } else {
8472 error = -E1000_ERR_EEPROM; 8471 error = -E1000_ERR_EEPROM;
@@ -8492,12 +8491,12 @@ e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
8492 * 8491 *
8493 * hw - The pointer to the hw structure 8492 * hw - The pointer to the hw structure
8494 ****************************************************************************/ 8493 ****************************************************************************/
8495static int32_t 8494static s32
8496e1000_ich8_cycle_init(struct e1000_hw *hw) 8495e1000_ich8_cycle_init(struct e1000_hw *hw)
8497{ 8496{
8498 union ich8_hws_flash_status hsfsts; 8497 union ich8_hws_flash_status hsfsts;
8499 int32_t error = E1000_ERR_EEPROM; 8498 s32 error = E1000_ERR_EEPROM;
8500 int32_t i = 0; 8499 s32 i = 0;
8501 8500
8502 DEBUGFUNC("e1000_ich8_cycle_init"); 8501 DEBUGFUNC("e1000_ich8_cycle_init");
8503 8502
@@ -8559,13 +8558,13 @@ e1000_ich8_cycle_init(struct e1000_hw *hw)
8559 * 8558 *
8560 * hw - The pointer to the hw structure 8559 * hw - The pointer to the hw structure
8561 ****************************************************************************/ 8560 ****************************************************************************/
8562static int32_t 8561static s32
8563e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout) 8562e1000_ich8_flash_cycle(struct e1000_hw *hw, u32 timeout)
8564{ 8563{
8565 union ich8_hws_flash_ctrl hsflctl; 8564 union ich8_hws_flash_ctrl hsflctl;
8566 union ich8_hws_flash_status hsfsts; 8565 union ich8_hws_flash_status hsfsts;
8567 int32_t error = E1000_ERR_EEPROM; 8566 s32 error = E1000_ERR_EEPROM;
8568 uint32_t i = 0; 8567 u32 i = 0;
8569 8568
8570 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ 8569 /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
8571 hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL); 8570 hsflctl.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
@@ -8594,16 +8593,16 @@ e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
8594 * size - Size of data to read, 1=byte 2=word 8593 * size - Size of data to read, 1=byte 2=word
8595 * data - Pointer to the word to store the value read. 8594 * data - Pointer to the word to store the value read.
8596 *****************************************************************************/ 8595 *****************************************************************************/
8597static int32_t 8596static s32
8598e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index, 8597e1000_read_ich8_data(struct e1000_hw *hw, u32 index,
8599 uint32_t size, uint16_t* data) 8598 u32 size, u16* data)
8600{ 8599{
8601 union ich8_hws_flash_status hsfsts; 8600 union ich8_hws_flash_status hsfsts;
8602 union ich8_hws_flash_ctrl hsflctl; 8601 union ich8_hws_flash_ctrl hsflctl;
8603 uint32_t flash_linear_address; 8602 u32 flash_linear_address;
8604 uint32_t flash_data = 0; 8603 u32 flash_data = 0;
8605 int32_t error = -E1000_ERR_EEPROM; 8604 s32 error = -E1000_ERR_EEPROM;
8606 int32_t count = 0; 8605 s32 count = 0;
8607 8606
8608 DEBUGFUNC("e1000_read_ich8_data"); 8607 DEBUGFUNC("e1000_read_ich8_data");
8609 8608
@@ -8641,9 +8640,9 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
8641 if (error == E1000_SUCCESS) { 8640 if (error == E1000_SUCCESS) {
8642 flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0); 8641 flash_data = E1000_READ_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0);
8643 if (size == 1) { 8642 if (size == 1) {
8644 *data = (uint8_t)(flash_data & 0x000000FF); 8643 *data = (u8)(flash_data & 0x000000FF);
8645 } else if (size == 2) { 8644 } else if (size == 2) {
8646 *data = (uint16_t)(flash_data & 0x0000FFFF); 8645 *data = (u16)(flash_data & 0x0000FFFF);
8647 } 8646 }
8648 break; 8647 break;
8649 } else { 8648 } else {
@@ -8673,16 +8672,16 @@ e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
8673 * size - Size of data to read, 1=byte 2=word 8672 * size - Size of data to read, 1=byte 2=word
8674 * data - The byte(s) to write to the NVM. 8673 * data - The byte(s) to write to the NVM.
8675 *****************************************************************************/ 8674 *****************************************************************************/
8676static int32_t 8675static s32
8677e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size, 8676e1000_write_ich8_data(struct e1000_hw *hw, u32 index, u32 size,
8678 uint16_t data) 8677 u16 data)
8679{ 8678{
8680 union ich8_hws_flash_status hsfsts; 8679 union ich8_hws_flash_status hsfsts;
8681 union ich8_hws_flash_ctrl hsflctl; 8680 union ich8_hws_flash_ctrl hsflctl;
8682 uint32_t flash_linear_address; 8681 u32 flash_linear_address;
8683 uint32_t flash_data = 0; 8682 u32 flash_data = 0;
8684 int32_t error = -E1000_ERR_EEPROM; 8683 s32 error = -E1000_ERR_EEPROM;
8685 int32_t count = 0; 8684 s32 count = 0;
8686 8685
8687 DEBUGFUNC("e1000_write_ich8_data"); 8686 DEBUGFUNC("e1000_write_ich8_data");
8688 8687
@@ -8711,9 +8710,9 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
8711 E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address); 8710 E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_address);
8712 8711
8713 if (size == 1) 8712 if (size == 1)
8714 flash_data = (uint32_t)data & 0x00FF; 8713 flash_data = (u32)data & 0x00FF;
8715 else 8714 else
8716 flash_data = (uint32_t)data; 8715 flash_data = (u32)data;
8717 8716
8718 E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data); 8717 E1000_WRITE_ICH_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
8719 8718
@@ -8748,15 +8747,15 @@ e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
8748 * index - The index of the byte to read. 8747 * index - The index of the byte to read.
8749 * data - Pointer to a byte to store the value read. 8748 * data - Pointer to a byte to store the value read.
8750 *****************************************************************************/ 8749 *****************************************************************************/
8751static int32_t 8750static s32
8752e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data) 8751e1000_read_ich8_byte(struct e1000_hw *hw, u32 index, u8* data)
8753{ 8752{
8754 int32_t status = E1000_SUCCESS; 8753 s32 status = E1000_SUCCESS;
8755 uint16_t word = 0; 8754 u16 word = 0;
8756 8755
8757 status = e1000_read_ich8_data(hw, index, 1, &word); 8756 status = e1000_read_ich8_data(hw, index, 1, &word);
8758 if (status == E1000_SUCCESS) { 8757 if (status == E1000_SUCCESS) {
8759 *data = (uint8_t)word; 8758 *data = (u8)word;
8760 } 8759 }
8761 8760
8762 return status; 8761 return status;
@@ -8771,11 +8770,11 @@ e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data)
8771 * index - The index of the byte to write. 8770 * index - The index of the byte to write.
8772 * byte - The byte to write to the NVM. 8771 * byte - The byte to write to the NVM.
8773 *****************************************************************************/ 8772 *****************************************************************************/
8774static int32_t 8773static s32
8775e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte) 8774e1000_verify_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 byte)
8776{ 8775{
8777 int32_t error = E1000_SUCCESS; 8776 s32 error = E1000_SUCCESS;
8778 int32_t program_retries = 0; 8777 s32 program_retries = 0;
8779 8778
8780 DEBUGOUT2("Byte := %2.2X Offset := %d\n", byte, index); 8779 DEBUGOUT2("Byte := %2.2X Offset := %d\n", byte, index);
8781 8780
@@ -8804,11 +8803,11 @@ e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte)
8804 * index - The index of the byte to read. 8803 * index - The index of the byte to read.
8805 * data - The byte to write to the NVM. 8804 * data - The byte to write to the NVM.
8806 *****************************************************************************/ 8805 *****************************************************************************/
8807static int32_t 8806static s32
8808e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data) 8807e1000_write_ich8_byte(struct e1000_hw *hw, u32 index, u8 data)
8809{ 8808{
8810 int32_t status = E1000_SUCCESS; 8809 s32 status = E1000_SUCCESS;
8811 uint16_t word = (uint16_t)data; 8810 u16 word = (u16)data;
8812 8811
8813 status = e1000_write_ich8_data(hw, index, 1, word); 8812 status = e1000_write_ich8_data(hw, index, 1, word);
8814 8813
@@ -8822,10 +8821,10 @@ e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data)
8822 * index - The starting byte index of the word to read. 8821 * index - The starting byte index of the word to read.
8823 * data - Pointer to a word to store the value read. 8822 * data - Pointer to a word to store the value read.
8824 *****************************************************************************/ 8823 *****************************************************************************/
8825static int32_t 8824static s32
8826e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data) 8825e1000_read_ich8_word(struct e1000_hw *hw, u32 index, u16 *data)
8827{ 8826{
8828 int32_t status = E1000_SUCCESS; 8827 s32 status = E1000_SUCCESS;
8829 status = e1000_read_ich8_data(hw, index, 2, data); 8828 status = e1000_read_ich8_data(hw, index, 2, data);
8830 return status; 8829 return status;
8831} 8830}
@@ -8841,19 +8840,19 @@ e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data)
8841 * amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the 8840 * amount of NVM used in each bank is a *minimum* of 4 KBytes, but in fact the
8842 * bank size may be 4, 8 or 64 KBytes 8841 * bank size may be 4, 8 or 64 KBytes
8843 *****************************************************************************/ 8842 *****************************************************************************/
8844static int32_t 8843static s32
8845e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank) 8844e1000_erase_ich8_4k_segment(struct e1000_hw *hw, u32 bank)
8846{ 8845{
8847 union ich8_hws_flash_status hsfsts; 8846 union ich8_hws_flash_status hsfsts;
8848 union ich8_hws_flash_ctrl hsflctl; 8847 union ich8_hws_flash_ctrl hsflctl;
8849 uint32_t flash_linear_address; 8848 u32 flash_linear_address;
8850 int32_t count = 0; 8849 s32 count = 0;
8851 int32_t error = E1000_ERR_EEPROM; 8850 s32 error = E1000_ERR_EEPROM;
8852 int32_t iteration; 8851 s32 iteration;
8853 int32_t sub_sector_size = 0; 8852 s32 sub_sector_size = 0;
8854 int32_t bank_size; 8853 s32 bank_size;
8855 int32_t j = 0; 8854 s32 j = 0;
8856 int32_t error_flag = 0; 8855 s32 error_flag = 0;
8857 8856
8858 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS); 8857 hsfsts.regval = E1000_READ_ICH_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
8859 8858
@@ -8931,16 +8930,16 @@ e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t bank)
8931 return error; 8930 return error;
8932} 8931}
8933 8932
8934static int32_t 8933static s32
8935e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, 8934e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
8936 uint32_t cnf_base_addr, uint32_t cnf_size) 8935 u32 cnf_base_addr, u32 cnf_size)
8937{ 8936{
8938 uint32_t ret_val = E1000_SUCCESS; 8937 u32 ret_val = E1000_SUCCESS;
8939 uint16_t word_addr, reg_data, reg_addr; 8938 u16 word_addr, reg_data, reg_addr;
8940 uint16_t i; 8939 u16 i;
8941 8940
8942 /* cnf_base_addr is in DWORD */ 8941 /* cnf_base_addr is in DWORD */
8943 word_addr = (uint16_t)(cnf_base_addr << 1); 8942 word_addr = (u16)(cnf_base_addr << 1);
8944 8943
8945 /* cnf_size is returned in size of dwords */ 8944 /* cnf_size is returned in size of dwords */
8946 for (i = 0; i < cnf_size; i++) { 8945 for (i = 0; i < cnf_size; i++) {
@@ -8956,7 +8955,7 @@ e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
8956 if (ret_val != E1000_SUCCESS) 8955 if (ret_val != E1000_SUCCESS)
8957 return ret_val; 8956 return ret_val;
8958 8957
8959 ret_val = e1000_write_phy_reg_ex(hw, (uint32_t)reg_addr, reg_data); 8958 ret_val = e1000_write_phy_reg_ex(hw, (u32)reg_addr, reg_data);
8960 8959
8961 e1000_release_software_flag(hw); 8960 e1000_release_software_flag(hw);
8962 } 8961 }
@@ -8973,10 +8972,10 @@ e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
8973 * 8972 *
8974 * hw: Struct containing variables accessed by shared code 8973 * hw: Struct containing variables accessed by shared code
8975 *****************************************************************************/ 8974 *****************************************************************************/
8976static int32_t 8975static s32
8977e1000_init_lcd_from_nvm(struct e1000_hw *hw) 8976e1000_init_lcd_from_nvm(struct e1000_hw *hw)
8978{ 8977{
8979 uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop; 8978 u32 reg_data, cnf_base_addr, cnf_size, ret_val, loop;
8980 8979
8981 if (hw->phy_type != e1000_phy_igp_3) 8980 if (hw->phy_type != e1000_phy_igp_3)
8982 return E1000_SUCCESS; 8981 return E1000_SUCCESS;
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index a6c3c34feb98..99fce2c5dd26 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -100,8 +100,8 @@ typedef enum {
100} e1000_fc_type; 100} e1000_fc_type;
101 101
102struct e1000_shadow_ram { 102struct e1000_shadow_ram {
103 uint16_t eeprom_word; 103 u16 eeprom_word;
104 boolean_t modified; 104 bool modified;
105}; 105};
106 106
107/* PCI bus types */ 107/* PCI bus types */
@@ -263,19 +263,19 @@ struct e1000_phy_info {
263}; 263};
264 264
265struct e1000_phy_stats { 265struct e1000_phy_stats {
266 uint32_t idle_errors; 266 u32 idle_errors;
267 uint32_t receive_errors; 267 u32 receive_errors;
268}; 268};
269 269
270struct e1000_eeprom_info { 270struct e1000_eeprom_info {
271 e1000_eeprom_type type; 271 e1000_eeprom_type type;
272 uint16_t word_size; 272 u16 word_size;
273 uint16_t opcode_bits; 273 u16 opcode_bits;
274 uint16_t address_bits; 274 u16 address_bits;
275 uint16_t delay_usec; 275 u16 delay_usec;
276 uint16_t page_size; 276 u16 page_size;
277 boolean_t use_eerd; 277 bool use_eerd;
278 boolean_t use_eewr; 278 bool use_eewr;
279}; 279};
280 280
281/* Flex ASF Information */ 281/* Flex ASF Information */
@@ -308,34 +308,34 @@ typedef enum {
308 308
309/* Function prototypes */ 309/* Function prototypes */
310/* Initialization */ 310/* Initialization */
311int32_t e1000_reset_hw(struct e1000_hw *hw); 311s32 e1000_reset_hw(struct e1000_hw *hw);
312int32_t e1000_init_hw(struct e1000_hw *hw); 312s32 e1000_init_hw(struct e1000_hw *hw);
313int32_t e1000_set_mac_type(struct e1000_hw *hw); 313s32 e1000_set_mac_type(struct e1000_hw *hw);
314void e1000_set_media_type(struct e1000_hw *hw); 314void e1000_set_media_type(struct e1000_hw *hw);
315 315
316/* Link Configuration */ 316/* Link Configuration */
317int32_t e1000_setup_link(struct e1000_hw *hw); 317s32 e1000_setup_link(struct e1000_hw *hw);
318int32_t e1000_phy_setup_autoneg(struct e1000_hw *hw); 318s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
319void e1000_config_collision_dist(struct e1000_hw *hw); 319void e1000_config_collision_dist(struct e1000_hw *hw);
320int32_t e1000_check_for_link(struct e1000_hw *hw); 320s32 e1000_check_for_link(struct e1000_hw *hw);
321int32_t e1000_get_speed_and_duplex(struct e1000_hw *hw, uint16_t *speed, uint16_t *duplex); 321s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
322int32_t e1000_force_mac_fc(struct e1000_hw *hw); 322s32 e1000_force_mac_fc(struct e1000_hw *hw);
323 323
324/* PHY */ 324/* PHY */
325int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy_data); 325s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data);
326int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data); 326s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
327int32_t e1000_phy_hw_reset(struct e1000_hw *hw); 327s32 e1000_phy_hw_reset(struct e1000_hw *hw);
328int32_t e1000_phy_reset(struct e1000_hw *hw); 328s32 e1000_phy_reset(struct e1000_hw *hw);
329int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); 329s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
330int32_t e1000_validate_mdi_setting(struct e1000_hw *hw); 330s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
331 331
332void e1000_phy_powerdown_workaround(struct e1000_hw *hw); 332void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
333 333
334/* EEPROM Functions */ 334/* EEPROM Functions */
335int32_t e1000_init_eeprom_params(struct e1000_hw *hw); 335s32 e1000_init_eeprom_params(struct e1000_hw *hw);
336 336
337/* MNG HOST IF functions */ 337/* MNG HOST IF functions */
338uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw); 338u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw);
339 339
340#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 340#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
341#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ 341#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */
@@ -354,80 +354,80 @@ uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
354#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F 354#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
355 355
356struct e1000_host_mng_command_header { 356struct e1000_host_mng_command_header {
357 uint8_t command_id; 357 u8 command_id;
358 uint8_t checksum; 358 u8 checksum;
359 uint16_t reserved1; 359 u16 reserved1;
360 uint16_t reserved2; 360 u16 reserved2;
361 uint16_t command_length; 361 u16 command_length;
362}; 362};
363 363
364struct e1000_host_mng_command_info { 364struct e1000_host_mng_command_info {
365 struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ 365 struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
366 uint8_t command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/ 366 u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/
367}; 367};
368#ifdef __BIG_ENDIAN 368#ifdef __BIG_ENDIAN
369struct e1000_host_mng_dhcp_cookie{ 369struct e1000_host_mng_dhcp_cookie{
370 uint32_t signature; 370 u32 signature;
371 uint16_t vlan_id; 371 u16 vlan_id;
372 uint8_t reserved0; 372 u8 reserved0;
373 uint8_t status; 373 u8 status;
374 uint32_t reserved1; 374 u32 reserved1;
375 uint8_t checksum; 375 u8 checksum;
376 uint8_t reserved3; 376 u8 reserved3;
377 uint16_t reserved2; 377 u16 reserved2;
378}; 378};
379#else 379#else
380struct e1000_host_mng_dhcp_cookie{ 380struct e1000_host_mng_dhcp_cookie{
381 uint32_t signature; 381 u32 signature;
382 uint8_t status; 382 u8 status;
383 uint8_t reserved0; 383 u8 reserved0;
384 uint16_t vlan_id; 384 u16 vlan_id;
385 uint32_t reserved1; 385 u32 reserved1;
386 uint16_t reserved2; 386 u16 reserved2;
387 uint8_t reserved3; 387 u8 reserved3;
388 uint8_t checksum; 388 u8 checksum;
389}; 389};
390#endif 390#endif
391 391
392int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer, 392s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer,
393 uint16_t length); 393 u16 length);
394boolean_t e1000_check_mng_mode(struct e1000_hw *hw); 394bool e1000_check_mng_mode(struct e1000_hw *hw);
395boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw); 395bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
396int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 396s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
397int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw); 397s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw);
398int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw); 398s32 e1000_update_eeprom_checksum(struct e1000_hw *hw);
399int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data); 399s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
400int32_t e1000_read_mac_addr(struct e1000_hw * hw); 400s32 e1000_read_mac_addr(struct e1000_hw * hw);
401 401
402/* Filters (multicast, vlan, receive) */ 402/* Filters (multicast, vlan, receive) */
403uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr); 403u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr);
404void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value); 404void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
405void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index); 405void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index);
406void e1000_write_vfta(struct e1000_hw *hw, uint32_t offset, uint32_t value); 406void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
407 407
408/* LED functions */ 408/* LED functions */
409int32_t e1000_setup_led(struct e1000_hw *hw); 409s32 e1000_setup_led(struct e1000_hw *hw);
410int32_t e1000_cleanup_led(struct e1000_hw *hw); 410s32 e1000_cleanup_led(struct e1000_hw *hw);
411int32_t e1000_led_on(struct e1000_hw *hw); 411s32 e1000_led_on(struct e1000_hw *hw);
412int32_t e1000_led_off(struct e1000_hw *hw); 412s32 e1000_led_off(struct e1000_hw *hw);
413int32_t e1000_blink_led_start(struct e1000_hw *hw); 413s32 e1000_blink_led_start(struct e1000_hw *hw);
414 414
415/* Adaptive IFS Functions */ 415/* Adaptive IFS Functions */
416 416
417/* Everything else */ 417/* Everything else */
418void e1000_reset_adaptive(struct e1000_hw *hw); 418void e1000_reset_adaptive(struct e1000_hw *hw);
419void e1000_update_adaptive(struct e1000_hw *hw); 419void e1000_update_adaptive(struct e1000_hw *hw);
420void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, uint32_t frame_len, uint8_t * mac_addr); 420void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, u32 frame_len, u8 * mac_addr);
421void e1000_get_bus_info(struct e1000_hw *hw); 421void e1000_get_bus_info(struct e1000_hw *hw);
422void e1000_pci_set_mwi(struct e1000_hw *hw); 422void e1000_pci_set_mwi(struct e1000_hw *hw);
423void e1000_pci_clear_mwi(struct e1000_hw *hw); 423void e1000_pci_clear_mwi(struct e1000_hw *hw);
424int32_t e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value); 424s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
425void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); 425void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc);
426int e1000_pcix_get_mmrbc(struct e1000_hw *hw); 426int e1000_pcix_get_mmrbc(struct e1000_hw *hw);
427/* Port I/O is only supported on 82544 and newer */ 427/* Port I/O is only supported on 82544 and newer */
428void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value); 428void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
429int32_t e1000_disable_pciex_master(struct e1000_hw *hw); 429s32 e1000_disable_pciex_master(struct e1000_hw *hw);
430int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); 430s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
431 431
432 432
433#define E1000_READ_REG_IO(a, reg) \ 433#define E1000_READ_REG_IO(a, reg) \
@@ -596,8 +596,8 @@ struct e1000_rx_desc {
596 __le64 buffer_addr; /* Address of the descriptor's data buffer */ 596 __le64 buffer_addr; /* Address of the descriptor's data buffer */
597 __le16 length; /* Length of data DMAed into data buffer */ 597 __le16 length; /* Length of data DMAed into data buffer */
598 __le16 csum; /* Packet checksum */ 598 __le16 csum; /* Packet checksum */
599 uint8_t status; /* Descriptor status */ 599 u8 status; /* Descriptor status */
600 uint8_t errors; /* Descriptor Errors */ 600 u8 errors; /* Descriptor Errors */
601 __le16 special; 601 __le16 special;
602}; 602};
603 603
@@ -718,15 +718,15 @@ struct e1000_tx_desc {
718 __le32 data; 718 __le32 data;
719 struct { 719 struct {
720 __le16 length; /* Data buffer length */ 720 __le16 length; /* Data buffer length */
721 uint8_t cso; /* Checksum offset */ 721 u8 cso; /* Checksum offset */
722 uint8_t cmd; /* Descriptor control */ 722 u8 cmd; /* Descriptor control */
723 } flags; 723 } flags;
724 } lower; 724 } lower;
725 union { 725 union {
726 __le32 data; 726 __le32 data;
727 struct { 727 struct {
728 uint8_t status; /* Descriptor status */ 728 u8 status; /* Descriptor status */
729 uint8_t css; /* Checksum start */ 729 u8 css; /* Checksum start */
730 __le16 special; 730 __le16 special;
731 } fields; 731 } fields;
732 } upper; 732 } upper;
@@ -759,16 +759,16 @@ struct e1000_context_desc {
759 union { 759 union {
760 __le32 ip_config; 760 __le32 ip_config;
761 struct { 761 struct {
762 uint8_t ipcss; /* IP checksum start */ 762 u8 ipcss; /* IP checksum start */
763 uint8_t ipcso; /* IP checksum offset */ 763 u8 ipcso; /* IP checksum offset */
764 __le16 ipcse; /* IP checksum end */ 764 __le16 ipcse; /* IP checksum end */
765 } ip_fields; 765 } ip_fields;
766 } lower_setup; 766 } lower_setup;
767 union { 767 union {
768 __le32 tcp_config; 768 __le32 tcp_config;
769 struct { 769 struct {
770 uint8_t tucss; /* TCP checksum start */ 770 u8 tucss; /* TCP checksum start */
771 uint8_t tucso; /* TCP checksum offset */ 771 u8 tucso; /* TCP checksum offset */
772 __le16 tucse; /* TCP checksum end */ 772 __le16 tucse; /* TCP checksum end */
773 } tcp_fields; 773 } tcp_fields;
774 } upper_setup; 774 } upper_setup;
@@ -776,8 +776,8 @@ struct e1000_context_desc {
776 union { 776 union {
777 __le32 data; 777 __le32 data;
778 struct { 778 struct {
779 uint8_t status; /* Descriptor status */ 779 u8 status; /* Descriptor status */
780 uint8_t hdr_len; /* Header length */ 780 u8 hdr_len; /* Header length */
781 __le16 mss; /* Maximum segment size */ 781 __le16 mss; /* Maximum segment size */
782 } fields; 782 } fields;
783 } tcp_seg_setup; 783 } tcp_seg_setup;
@@ -790,15 +790,15 @@ struct e1000_data_desc {
790 __le32 data; 790 __le32 data;
791 struct { 791 struct {
792 __le16 length; /* Data buffer length */ 792 __le16 length; /* Data buffer length */
793 uint8_t typ_len_ext; /* */ 793 u8 typ_len_ext; /* */
794 uint8_t cmd; /* */ 794 u8 cmd; /* */
795 } flags; 795 } flags;
796 } lower; 796 } lower;
797 union { 797 union {
798 __le32 data; 798 __le32 data;
799 struct { 799 struct {
800 uint8_t status; /* Descriptor status */ 800 u8 status; /* Descriptor status */
801 uint8_t popts; /* Packet Options */ 801 u8 popts; /* Packet Options */
802 __le16 special; /* */ 802 __le16 special; /* */
803 } fields; 803 } fields;
804 } upper; 804 } upper;
@@ -825,8 +825,8 @@ struct e1000_rar {
825 825
826/* IPv4 Address Table Entry */ 826/* IPv4 Address Table Entry */
827struct e1000_ipv4_at_entry { 827struct e1000_ipv4_at_entry {
828 volatile uint32_t ipv4_addr; /* IP Address (RW) */ 828 volatile u32 ipv4_addr; /* IP Address (RW) */
829 volatile uint32_t reserved; 829 volatile u32 reserved;
830}; 830};
831 831
832/* Four wakeup IP addresses are supported */ 832/* Four wakeup IP addresses are supported */
@@ -837,25 +837,25 @@ struct e1000_ipv4_at_entry {
837 837
838/* IPv6 Address Table Entry */ 838/* IPv6 Address Table Entry */
839struct e1000_ipv6_at_entry { 839struct e1000_ipv6_at_entry {
840 volatile uint8_t ipv6_addr[16]; 840 volatile u8 ipv6_addr[16];
841}; 841};
842 842
843/* Flexible Filter Length Table Entry */ 843/* Flexible Filter Length Table Entry */
844struct e1000_fflt_entry { 844struct e1000_fflt_entry {
845 volatile uint32_t length; /* Flexible Filter Length (RW) */ 845 volatile u32 length; /* Flexible Filter Length (RW) */
846 volatile uint32_t reserved; 846 volatile u32 reserved;
847}; 847};
848 848
849/* Flexible Filter Mask Table Entry */ 849/* Flexible Filter Mask Table Entry */
850struct e1000_ffmt_entry { 850struct e1000_ffmt_entry {
851 volatile uint32_t mask; /* Flexible Filter Mask (RW) */ 851 volatile u32 mask; /* Flexible Filter Mask (RW) */
852 volatile uint32_t reserved; 852 volatile u32 reserved;
853}; 853};
854 854
855/* Flexible Filter Value Table Entry */ 855/* Flexible Filter Value Table Entry */
856struct e1000_ffvt_entry { 856struct e1000_ffvt_entry {
857 volatile uint32_t value; /* Flexible Filter Value (RW) */ 857 volatile u32 value; /* Flexible Filter Value (RW) */
858 volatile uint32_t reserved; 858 volatile u32 reserved;
859}; 859};
860 860
861/* Four Flexible Filters are supported */ 861/* Four Flexible Filters are supported */
@@ -1309,89 +1309,89 @@ struct e1000_ffvt_entry {
1309 1309
1310/* Statistics counters collected by the MAC */ 1310/* Statistics counters collected by the MAC */
1311struct e1000_hw_stats { 1311struct e1000_hw_stats {
1312 uint64_t crcerrs; 1312 u64 crcerrs;
1313 uint64_t algnerrc; 1313 u64 algnerrc;
1314 uint64_t symerrs; 1314 u64 symerrs;
1315 uint64_t rxerrc; 1315 u64 rxerrc;
1316 uint64_t txerrc; 1316 u64 txerrc;
1317 uint64_t mpc; 1317 u64 mpc;
1318 uint64_t scc; 1318 u64 scc;
1319 uint64_t ecol; 1319 u64 ecol;
1320 uint64_t mcc; 1320 u64 mcc;
1321 uint64_t latecol; 1321 u64 latecol;
1322 uint64_t colc; 1322 u64 colc;
1323 uint64_t dc; 1323 u64 dc;
1324 uint64_t tncrs; 1324 u64 tncrs;
1325 uint64_t sec; 1325 u64 sec;
1326 uint64_t cexterr; 1326 u64 cexterr;
1327 uint64_t rlec; 1327 u64 rlec;
1328 uint64_t xonrxc; 1328 u64 xonrxc;
1329 uint64_t xontxc; 1329 u64 xontxc;
1330 uint64_t xoffrxc; 1330 u64 xoffrxc;
1331 uint64_t xofftxc; 1331 u64 xofftxc;
1332 uint64_t fcruc; 1332 u64 fcruc;
1333 uint64_t prc64; 1333 u64 prc64;
1334 uint64_t prc127; 1334 u64 prc127;
1335 uint64_t prc255; 1335 u64 prc255;
1336 uint64_t prc511; 1336 u64 prc511;
1337 uint64_t prc1023; 1337 u64 prc1023;
1338 uint64_t prc1522; 1338 u64 prc1522;
1339 uint64_t gprc; 1339 u64 gprc;
1340 uint64_t bprc; 1340 u64 bprc;
1341 uint64_t mprc; 1341 u64 mprc;
1342 uint64_t gptc; 1342 u64 gptc;
1343 uint64_t gorcl; 1343 u64 gorcl;
1344 uint64_t gorch; 1344 u64 gorch;
1345 uint64_t gotcl; 1345 u64 gotcl;
1346 uint64_t gotch; 1346 u64 gotch;
1347 uint64_t rnbc; 1347 u64 rnbc;
1348 uint64_t ruc; 1348 u64 ruc;
1349 uint64_t rfc; 1349 u64 rfc;
1350 uint64_t roc; 1350 u64 roc;
1351 uint64_t rlerrc; 1351 u64 rlerrc;
1352 uint64_t rjc; 1352 u64 rjc;
1353 uint64_t mgprc; 1353 u64 mgprc;
1354 uint64_t mgpdc; 1354 u64 mgpdc;
1355 uint64_t mgptc; 1355 u64 mgptc;
1356 uint64_t torl; 1356 u64 torl;
1357 uint64_t torh; 1357 u64 torh;
1358 uint64_t totl; 1358 u64 totl;
1359 uint64_t toth; 1359 u64 toth;
1360 uint64_t tpr; 1360 u64 tpr;
1361 uint64_t tpt; 1361 u64 tpt;
1362 uint64_t ptc64; 1362 u64 ptc64;
1363 uint64_t ptc127; 1363 u64 ptc127;
1364 uint64_t ptc255; 1364 u64 ptc255;
1365 uint64_t ptc511; 1365 u64 ptc511;
1366 uint64_t ptc1023; 1366 u64 ptc1023;
1367 uint64_t ptc1522; 1367 u64 ptc1522;
1368 uint64_t mptc; 1368 u64 mptc;
1369 uint64_t bptc; 1369 u64 bptc;
1370 uint64_t tsctc; 1370 u64 tsctc;
1371 uint64_t tsctfc; 1371 u64 tsctfc;
1372 uint64_t iac; 1372 u64 iac;
1373 uint64_t icrxptc; 1373 u64 icrxptc;
1374 uint64_t icrxatc; 1374 u64 icrxatc;
1375 uint64_t ictxptc; 1375 u64 ictxptc;
1376 uint64_t ictxatc; 1376 u64 ictxatc;
1377 uint64_t ictxqec; 1377 u64 ictxqec;
1378 uint64_t ictxqmtc; 1378 u64 ictxqmtc;
1379 uint64_t icrxdmtc; 1379 u64 icrxdmtc;
1380 uint64_t icrxoc; 1380 u64 icrxoc;
1381}; 1381};
1382 1382
1383/* Structure containing variables used by the shared code (e1000_hw.c) */ 1383/* Structure containing variables used by the shared code (e1000_hw.c) */
1384struct e1000_hw { 1384struct e1000_hw {
1385 uint8_t __iomem *hw_addr; 1385 u8 __iomem *hw_addr;
1386 uint8_t __iomem *flash_address; 1386 u8 __iomem *flash_address;
1387 e1000_mac_type mac_type; 1387 e1000_mac_type mac_type;
1388 e1000_phy_type phy_type; 1388 e1000_phy_type phy_type;
1389 uint32_t phy_init_script; 1389 u32 phy_init_script;
1390 e1000_media_type media_type; 1390 e1000_media_type media_type;
1391 void *back; 1391 void *back;
1392 struct e1000_shadow_ram *eeprom_shadow_ram; 1392 struct e1000_shadow_ram *eeprom_shadow_ram;
1393 uint32_t flash_bank_size; 1393 u32 flash_bank_size;
1394 uint32_t flash_base_addr; 1394 u32 flash_base_addr;
1395 e1000_fc_type fc; 1395 e1000_fc_type fc;
1396 e1000_bus_speed bus_speed; 1396 e1000_bus_speed bus_speed;
1397 e1000_bus_width bus_width; 1397 e1000_bus_width bus_width;
@@ -1400,75 +1400,75 @@ struct e1000_hw {
1400 e1000_ms_type master_slave; 1400 e1000_ms_type master_slave;
1401 e1000_ms_type original_master_slave; 1401 e1000_ms_type original_master_slave;
1402 e1000_ffe_config ffe_config_state; 1402 e1000_ffe_config ffe_config_state;
1403 uint32_t asf_firmware_present; 1403 u32 asf_firmware_present;
1404 uint32_t eeprom_semaphore_present; 1404 u32 eeprom_semaphore_present;
1405 uint32_t swfw_sync_present; 1405 u32 swfw_sync_present;
1406 uint32_t swfwhw_semaphore_present; 1406 u32 swfwhw_semaphore_present;
1407 unsigned long io_base; 1407 unsigned long io_base;
1408 uint32_t phy_id; 1408 u32 phy_id;
1409 uint32_t phy_revision; 1409 u32 phy_revision;
1410 uint32_t phy_addr; 1410 u32 phy_addr;
1411 uint32_t original_fc; 1411 u32 original_fc;
1412 uint32_t txcw; 1412 u32 txcw;
1413 uint32_t autoneg_failed; 1413 u32 autoneg_failed;
1414 uint32_t max_frame_size; 1414 u32 max_frame_size;
1415 uint32_t min_frame_size; 1415 u32 min_frame_size;
1416 uint32_t mc_filter_type; 1416 u32 mc_filter_type;
1417 uint32_t num_mc_addrs; 1417 u32 num_mc_addrs;
1418 uint32_t collision_delta; 1418 u32 collision_delta;
1419 uint32_t tx_packet_delta; 1419 u32 tx_packet_delta;
1420 uint32_t ledctl_default; 1420 u32 ledctl_default;
1421 uint32_t ledctl_mode1; 1421 u32 ledctl_mode1;
1422 uint32_t ledctl_mode2; 1422 u32 ledctl_mode2;
1423 boolean_t tx_pkt_filtering; 1423 bool tx_pkt_filtering;
1424 struct e1000_host_mng_dhcp_cookie mng_cookie; 1424 struct e1000_host_mng_dhcp_cookie mng_cookie;
1425 uint16_t phy_spd_default; 1425 u16 phy_spd_default;
1426 uint16_t autoneg_advertised; 1426 u16 autoneg_advertised;
1427 uint16_t pci_cmd_word; 1427 u16 pci_cmd_word;
1428 uint16_t fc_high_water; 1428 u16 fc_high_water;
1429 uint16_t fc_low_water; 1429 u16 fc_low_water;
1430 uint16_t fc_pause_time; 1430 u16 fc_pause_time;
1431 uint16_t current_ifs_val; 1431 u16 current_ifs_val;
1432 uint16_t ifs_min_val; 1432 u16 ifs_min_val;
1433 uint16_t ifs_max_val; 1433 u16 ifs_max_val;
1434 uint16_t ifs_step_size; 1434 u16 ifs_step_size;
1435 uint16_t ifs_ratio; 1435 u16 ifs_ratio;
1436 uint16_t device_id; 1436 u16 device_id;
1437 uint16_t vendor_id; 1437 u16 vendor_id;
1438 uint16_t subsystem_id; 1438 u16 subsystem_id;
1439 uint16_t subsystem_vendor_id; 1439 u16 subsystem_vendor_id;
1440 uint8_t revision_id; 1440 u8 revision_id;
1441 uint8_t autoneg; 1441 u8 autoneg;
1442 uint8_t mdix; 1442 u8 mdix;
1443 uint8_t forced_speed_duplex; 1443 u8 forced_speed_duplex;
1444 uint8_t wait_autoneg_complete; 1444 u8 wait_autoneg_complete;
1445 uint8_t dma_fairness; 1445 u8 dma_fairness;
1446 uint8_t mac_addr[NODE_ADDRESS_SIZE]; 1446 u8 mac_addr[NODE_ADDRESS_SIZE];
1447 uint8_t perm_mac_addr[NODE_ADDRESS_SIZE]; 1447 u8 perm_mac_addr[NODE_ADDRESS_SIZE];
1448 boolean_t disable_polarity_correction; 1448 bool disable_polarity_correction;
1449 boolean_t speed_downgraded; 1449 bool speed_downgraded;
1450 e1000_smart_speed smart_speed; 1450 e1000_smart_speed smart_speed;
1451 e1000_dsp_config dsp_config_state; 1451 e1000_dsp_config dsp_config_state;
1452 boolean_t get_link_status; 1452 bool get_link_status;
1453 boolean_t serdes_link_down; 1453 bool serdes_link_down;
1454 boolean_t tbi_compatibility_en; 1454 bool tbi_compatibility_en;
1455 boolean_t tbi_compatibility_on; 1455 bool tbi_compatibility_on;
1456 boolean_t laa_is_present; 1456 bool laa_is_present;
1457 boolean_t phy_reset_disable; 1457 bool phy_reset_disable;
1458 boolean_t initialize_hw_bits_disable; 1458 bool initialize_hw_bits_disable;
1459 boolean_t fc_send_xon; 1459 bool fc_send_xon;
1460 boolean_t fc_strict_ieee; 1460 bool fc_strict_ieee;
1461 boolean_t report_tx_early; 1461 bool report_tx_early;
1462 boolean_t adaptive_ifs; 1462 bool adaptive_ifs;
1463 boolean_t ifs_params_forced; 1463 bool ifs_params_forced;
1464 boolean_t in_ifs_mode; 1464 bool in_ifs_mode;
1465 boolean_t mng_reg_access_disabled; 1465 bool mng_reg_access_disabled;
1466 boolean_t leave_av_bit_off; 1466 bool leave_av_bit_off;
1467 boolean_t kmrn_lock_loss_workaround_disabled; 1467 bool kmrn_lock_loss_workaround_disabled;
1468 boolean_t bad_tx_carr_stats_fd; 1468 bool bad_tx_carr_stats_fd;
1469 boolean_t has_manc2h; 1469 bool has_manc2h;
1470 boolean_t rx_needs_kicking; 1470 bool rx_needs_kicking;
1471 boolean_t has_smbus; 1471 bool has_smbus;
1472}; 1472};
1473 1473
1474 1474
@@ -2165,14 +2165,14 @@ typedef enum {
2165#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ 2165#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */
2166 2166
2167struct e1000_host_command_header { 2167struct e1000_host_command_header {
2168 uint8_t command_id; 2168 u8 command_id;
2169 uint8_t command_length; 2169 u8 command_length;
2170 uint8_t command_options; /* I/F bits for command, status for return */ 2170 u8 command_options; /* I/F bits for command, status for return */
2171 uint8_t checksum; 2171 u8 checksum;
2172}; 2172};
2173struct e1000_host_command_info { 2173struct e1000_host_command_info {
2174 struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ 2174 struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
2175 uint8_t command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ 2175 u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */
2176}; 2176};
2177 2177
2178/* Host SMB register #0 */ 2178/* Host SMB register #0 */
@@ -2495,7 +2495,7 @@ struct e1000_host_command_info {
2495/* Number of milliseconds we wait for PHY configuration done after MAC reset */ 2495/* Number of milliseconds we wait for PHY configuration done after MAC reset */
2496#define PHY_CFG_TIMEOUT 100 2496#define PHY_CFG_TIMEOUT 100
2497 2497
2498#define E1000_TX_BUFFER_SIZE ((uint32_t)1514) 2498#define E1000_TX_BUFFER_SIZE ((u32)1514)
2499 2499
2500/* The carrier extension symbol, as received by the NIC. */ 2500/* The carrier extension symbol, as received by the NIC. */
2501#define CARRIER_EXTENSION 0x0F 2501#define CARRIER_EXTENSION 0x0F
@@ -2518,11 +2518,11 @@ struct e1000_host_command_info {
2518 * Typical use: 2518 * Typical use:
2519 * ... 2519 * ...
2520 * if (TBI_ACCEPT) { 2520 * if (TBI_ACCEPT) {
2521 * accept_frame = TRUE; 2521 * accept_frame = true;
2522 * e1000_tbi_adjust_stats(adapter, MacAddress); 2522 * e1000_tbi_adjust_stats(adapter, MacAddress);
2523 * frame_length--; 2523 * frame_length--;
2524 * } else { 2524 * } else {
2525 * accept_frame = FALSE; 2525 * accept_frame = false;
2526 * } 2526 * }
2527 * ... 2527 * ...
2528 */ 2528 */
@@ -3312,68 +3312,68 @@ struct e1000_host_command_info {
3312/* Offset 04h HSFSTS */ 3312/* Offset 04h HSFSTS */
3313union ich8_hws_flash_status { 3313union ich8_hws_flash_status {
3314 struct ich8_hsfsts { 3314 struct ich8_hsfsts {
3315#ifdef E1000_BIG_ENDIAN 3315#ifdef __BIG_ENDIAN
3316 uint16_t reserved2 :6; 3316 u16 reserved2 :6;
3317 uint16_t fldesvalid :1; 3317 u16 fldesvalid :1;
3318 uint16_t flockdn :1; 3318 u16 flockdn :1;
3319 uint16_t flcdone :1; 3319 u16 flcdone :1;
3320 uint16_t flcerr :1; 3320 u16 flcerr :1;
3321 uint16_t dael :1; 3321 u16 dael :1;
3322 uint16_t berasesz :2; 3322 u16 berasesz :2;
3323 uint16_t flcinprog :1; 3323 u16 flcinprog :1;
3324 uint16_t reserved1 :2; 3324 u16 reserved1 :2;
3325#else 3325#else
3326 uint16_t flcdone :1; /* bit 0 Flash Cycle Done */ 3326 u16 flcdone :1; /* bit 0 Flash Cycle Done */
3327 uint16_t flcerr :1; /* bit 1 Flash Cycle Error */ 3327 u16 flcerr :1; /* bit 1 Flash Cycle Error */
3328 uint16_t dael :1; /* bit 2 Direct Access error Log */ 3328 u16 dael :1; /* bit 2 Direct Access error Log */
3329 uint16_t berasesz :2; /* bit 4:3 Block/Sector Erase Size */ 3329 u16 berasesz :2; /* bit 4:3 Block/Sector Erase Size */
3330 uint16_t flcinprog :1; /* bit 5 flash SPI cycle in Progress */ 3330 u16 flcinprog :1; /* bit 5 flash SPI cycle in Progress */
3331 uint16_t reserved1 :2; /* bit 13:6 Reserved */ 3331 u16 reserved1 :2; /* bit 13:6 Reserved */
3332 uint16_t reserved2 :6; /* bit 13:6 Reserved */ 3332 u16 reserved2 :6; /* bit 13:6 Reserved */
3333 uint16_t fldesvalid :1; /* bit 14 Flash Descriptor Valid */ 3333 u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
3334 uint16_t flockdn :1; /* bit 15 Flash Configuration Lock-Down */ 3334 u16 flockdn :1; /* bit 15 Flash Configuration Lock-Down */
3335#endif 3335#endif
3336 } hsf_status; 3336 } hsf_status;
3337 uint16_t regval; 3337 u16 regval;
3338}; 3338};
3339 3339
3340/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */ 3340/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */
3341/* Offset 06h FLCTL */ 3341/* Offset 06h FLCTL */
3342union ich8_hws_flash_ctrl { 3342union ich8_hws_flash_ctrl {
3343 struct ich8_hsflctl { 3343 struct ich8_hsflctl {
3344#ifdef E1000_BIG_ENDIAN 3344#ifdef __BIG_ENDIAN
3345 uint16_t fldbcount :2; 3345 u16 fldbcount :2;
3346 uint16_t flockdn :6; 3346 u16 flockdn :6;
3347 uint16_t flcgo :1; 3347 u16 flcgo :1;
3348 uint16_t flcycle :2; 3348 u16 flcycle :2;
3349 uint16_t reserved :5; 3349 u16 reserved :5;
3350#else 3350#else
3351 uint16_t flcgo :1; /* 0 Flash Cycle Go */ 3351 u16 flcgo :1; /* 0 Flash Cycle Go */
3352 uint16_t flcycle :2; /* 2:1 Flash Cycle */ 3352 u16 flcycle :2; /* 2:1 Flash Cycle */
3353 uint16_t reserved :5; /* 7:3 Reserved */ 3353 u16 reserved :5; /* 7:3 Reserved */
3354 uint16_t fldbcount :2; /* 9:8 Flash Data Byte Count */ 3354 u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
3355 uint16_t flockdn :6; /* 15:10 Reserved */ 3355 u16 flockdn :6; /* 15:10 Reserved */
3356#endif 3356#endif
3357 } hsf_ctrl; 3357 } hsf_ctrl;
3358 uint16_t regval; 3358 u16 regval;
3359}; 3359};
3360 3360
3361/* ICH8 Flash Region Access Permissions */ 3361/* ICH8 Flash Region Access Permissions */
3362union ich8_hws_flash_regacc { 3362union ich8_hws_flash_regacc {
3363 struct ich8_flracc { 3363 struct ich8_flracc {
3364#ifdef E1000_BIG_ENDIAN 3364#ifdef __BIG_ENDIAN
3365 uint32_t gmwag :8; 3365 u32 gmwag :8;
3366 uint32_t gmrag :8; 3366 u32 gmrag :8;
3367 uint32_t grwa :8; 3367 u32 grwa :8;
3368 uint32_t grra :8; 3368 u32 grra :8;
3369#else 3369#else
3370 uint32_t grra :8; /* 0:7 GbE region Read Access */ 3370 u32 grra :8; /* 0:7 GbE region Read Access */
3371 uint32_t grwa :8; /* 8:15 GbE region Write Access */ 3371 u32 grwa :8; /* 8:15 GbE region Write Access */
3372 uint32_t gmrag :8; /* 23:16 GbE Master Read Access Grant */ 3372 u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
3373 uint32_t gmwag :8; /* 31:24 GbE Master Write Access Grant */ 3373 u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
3374#endif 3374#endif
3375 } hsf_flregacc; 3375 } hsf_flregacc;
3376 uint16_t regval; 3376 u16 regval;
3377}; 3377};
3378 3378
3379/* Miscellaneous PHY bit definitions. */ 3379/* Miscellaneous PHY bit definitions. */
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 0991648c53dc..59579b1d8843 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -127,7 +127,7 @@ int e1000_up(struct e1000_adapter *adapter);
127void e1000_down(struct e1000_adapter *adapter); 127void e1000_down(struct e1000_adapter *adapter);
128void e1000_reinit_locked(struct e1000_adapter *adapter); 128void e1000_reinit_locked(struct e1000_adapter *adapter);
129void e1000_reset(struct e1000_adapter *adapter); 129void e1000_reset(struct e1000_adapter *adapter);
130int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx); 130int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
131int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); 131int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
132int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); 132int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
133void e1000_free_all_tx_resources(struct e1000_adapter *adapter); 133void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
@@ -169,21 +169,21 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
169static int e1000_set_mac(struct net_device *netdev, void *p); 169static int e1000_set_mac(struct net_device *netdev, void *p);
170static irqreturn_t e1000_intr(int irq, void *data); 170static irqreturn_t e1000_intr(int irq, void *data);
171static irqreturn_t e1000_intr_msi(int irq, void *data); 171static irqreturn_t e1000_intr_msi(int irq, void *data);
172static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter, 172static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
173 struct e1000_tx_ring *tx_ring); 173 struct e1000_tx_ring *tx_ring);
174#ifdef CONFIG_E1000_NAPI 174#ifdef CONFIG_E1000_NAPI
175static int e1000_clean(struct napi_struct *napi, int budget); 175static int e1000_clean(struct napi_struct *napi, int budget);
176static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, 176static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
177 struct e1000_rx_ring *rx_ring, 177 struct e1000_rx_ring *rx_ring,
178 int *work_done, int work_to_do); 178 int *work_done, int work_to_do);
179static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 179static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
180 struct e1000_rx_ring *rx_ring, 180 struct e1000_rx_ring *rx_ring,
181 int *work_done, int work_to_do); 181 int *work_done, int work_to_do);
182#else 182#else
183static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter, 183static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
184 struct e1000_rx_ring *rx_ring); 184 struct e1000_rx_ring *rx_ring);
185static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 185static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
186 struct e1000_rx_ring *rx_ring); 186 struct e1000_rx_ring *rx_ring);
187#endif 187#endif
188static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, 188static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
189 struct e1000_rx_ring *rx_ring, 189 struct e1000_rx_ring *rx_ring,
@@ -203,8 +203,8 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
203 struct sk_buff *skb); 203 struct sk_buff *skb);
204 204
205static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp); 205static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
206static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); 206static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
207static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 207static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
208static void e1000_restore_vlan(struct e1000_adapter *adapter); 208static void e1000_restore_vlan(struct e1000_adapter *adapter);
209 209
210static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); 210static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
@@ -347,7 +347,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
347static void 347static void
348e1000_irq_disable(struct e1000_adapter *adapter) 348e1000_irq_disable(struct e1000_adapter *adapter)
349{ 349{
350 atomic_inc(&adapter->irq_sem);
351 E1000_WRITE_REG(&adapter->hw, IMC, ~0); 350 E1000_WRITE_REG(&adapter->hw, IMC, ~0);
352 E1000_WRITE_FLUSH(&adapter->hw); 351 E1000_WRITE_FLUSH(&adapter->hw);
353 synchronize_irq(adapter->pdev->irq); 352 synchronize_irq(adapter->pdev->irq);
@@ -361,18 +360,16 @@ e1000_irq_disable(struct e1000_adapter *adapter)
361static void 360static void
362e1000_irq_enable(struct e1000_adapter *adapter) 361e1000_irq_enable(struct e1000_adapter *adapter)
363{ 362{
364 if (likely(atomic_dec_and_test(&adapter->irq_sem))) { 363 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
365 E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK); 364 E1000_WRITE_FLUSH(&adapter->hw);
366 E1000_WRITE_FLUSH(&adapter->hw);
367 }
368} 365}
369 366
370static void 367static void
371e1000_update_mng_vlan(struct e1000_adapter *adapter) 368e1000_update_mng_vlan(struct e1000_adapter *adapter)
372{ 369{
373 struct net_device *netdev = adapter->netdev; 370 struct net_device *netdev = adapter->netdev;
374 uint16_t vid = adapter->hw.mng_cookie.vlan_id; 371 u16 vid = adapter->hw.mng_cookie.vlan_id;
375 uint16_t old_vid = adapter->mng_vlan_id; 372 u16 old_vid = adapter->mng_vlan_id;
376 if (adapter->vlgrp) { 373 if (adapter->vlgrp) {
377 if (!vlan_group_get_device(adapter->vlgrp, vid)) { 374 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
378 if (adapter->hw.mng_cookie.status & 375 if (adapter->hw.mng_cookie.status &
@@ -382,7 +379,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
382 } else 379 } else
383 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 380 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
384 381
385 if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) && 382 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
386 (vid != old_vid) && 383 (vid != old_vid) &&
387 !vlan_group_get_device(adapter->vlgrp, old_vid)) 384 !vlan_group_get_device(adapter->vlgrp, old_vid))
388 e1000_vlan_rx_kill_vid(netdev, old_vid); 385 e1000_vlan_rx_kill_vid(netdev, old_vid);
@@ -405,8 +402,8 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
405static void 402static void
406e1000_release_hw_control(struct e1000_adapter *adapter) 403e1000_release_hw_control(struct e1000_adapter *adapter)
407{ 404{
408 uint32_t ctrl_ext; 405 u32 ctrl_ext;
409 uint32_t swsm; 406 u32 swsm;
410 407
411 /* Let firmware taken over control of h/w */ 408 /* Let firmware taken over control of h/w */
412 switch (adapter->hw.mac_type) { 409 switch (adapter->hw.mac_type) {
@@ -442,8 +439,8 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
442static void 439static void
443e1000_get_hw_control(struct e1000_adapter *adapter) 440e1000_get_hw_control(struct e1000_adapter *adapter)
444{ 441{
445 uint32_t ctrl_ext; 442 u32 ctrl_ext;
446 uint32_t swsm; 443 u32 swsm;
447 444
448 /* Let firmware know the driver has taken over */ 445 /* Let firmware know the driver has taken over */
449 switch (adapter->hw.mac_type) { 446 switch (adapter->hw.mac_type) {
@@ -469,7 +466,7 @@ static void
469e1000_init_manageability(struct e1000_adapter *adapter) 466e1000_init_manageability(struct e1000_adapter *adapter)
470{ 467{
471 if (adapter->en_mng_pt) { 468 if (adapter->en_mng_pt) {
472 uint32_t manc = E1000_READ_REG(&adapter->hw, MANC); 469 u32 manc = E1000_READ_REG(&adapter->hw, MANC);
473 470
474 /* disable hardware interception of ARP */ 471 /* disable hardware interception of ARP */
475 manc &= ~(E1000_MANC_ARP_EN); 472 manc &= ~(E1000_MANC_ARP_EN);
@@ -478,7 +475,7 @@ e1000_init_manageability(struct e1000_adapter *adapter)
478 /* this will probably generate destination unreachable messages 475 /* this will probably generate destination unreachable messages
479 * from the host OS, but the packets will be handled on SMBUS */ 476 * from the host OS, but the packets will be handled on SMBUS */
480 if (adapter->hw.has_manc2h) { 477 if (adapter->hw.has_manc2h) {
481 uint32_t manc2h = E1000_READ_REG(&adapter->hw, MANC2H); 478 u32 manc2h = E1000_READ_REG(&adapter->hw, MANC2H);
482 479
483 manc |= E1000_MANC_EN_MNG2HOST; 480 manc |= E1000_MANC_EN_MNG2HOST;
484#define E1000_MNG2HOST_PORT_623 (1 << 5) 481#define E1000_MNG2HOST_PORT_623 (1 << 5)
@@ -496,7 +493,7 @@ static void
496e1000_release_manageability(struct e1000_adapter *adapter) 493e1000_release_manageability(struct e1000_adapter *adapter)
497{ 494{
498 if (adapter->en_mng_pt) { 495 if (adapter->en_mng_pt) {
499 uint32_t manc = E1000_READ_REG(&adapter->hw, MANC); 496 u32 manc = E1000_READ_REG(&adapter->hw, MANC);
500 497
501 /* re-enable hardware interception of ARP */ 498 /* re-enable hardware interception of ARP */
502 manc |= E1000_MANC_ARP_EN; 499 manc |= E1000_MANC_ARP_EN;
@@ -569,7 +566,7 @@ int e1000_up(struct e1000_adapter *adapter)
569 566
570void e1000_power_up_phy(struct e1000_adapter *adapter) 567void e1000_power_up_phy(struct e1000_adapter *adapter)
571{ 568{
572 uint16_t mii_reg = 0; 569 u16 mii_reg = 0;
573 570
574 /* Just clear the power down bit to wake the phy back up */ 571 /* Just clear the power down bit to wake the phy back up */
575 if (adapter->hw.media_type == e1000_media_type_copper) { 572 if (adapter->hw.media_type == e1000_media_type_copper) {
@@ -584,13 +581,13 @@ void e1000_power_up_phy(struct e1000_adapter *adapter)
584static void e1000_power_down_phy(struct e1000_adapter *adapter) 581static void e1000_power_down_phy(struct e1000_adapter *adapter)
585{ 582{
586 /* Power down the PHY so no link is implied when interface is down * 583 /* Power down the PHY so no link is implied when interface is down *
587 * The PHY cannot be powered down if any of the following is TRUE * 584 * The PHY cannot be powered down if any of the following is true *
588 * (a) WoL is enabled 585 * (a) WoL is enabled
589 * (b) AMT is active 586 * (b) AMT is active
590 * (c) SoL/IDER session is active */ 587 * (c) SoL/IDER session is active */
591 if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 && 588 if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
592 adapter->hw.media_type == e1000_media_type_copper) { 589 adapter->hw.media_type == e1000_media_type_copper) {
593 uint16_t mii_reg = 0; 590 u16 mii_reg = 0;
594 591
595 switch (adapter->hw.mac_type) { 592 switch (adapter->hw.mac_type) {
596 case e1000_82540: 593 case e1000_82540:
@@ -638,7 +635,6 @@ e1000_down(struct e1000_adapter *adapter)
638 635
639#ifdef CONFIG_E1000_NAPI 636#ifdef CONFIG_E1000_NAPI
640 napi_disable(&adapter->napi); 637 napi_disable(&adapter->napi);
641 atomic_set(&adapter->irq_sem, 0);
642#endif 638#endif
643 e1000_irq_disable(adapter); 639 e1000_irq_disable(adapter);
644 640
@@ -671,9 +667,9 @@ e1000_reinit_locked(struct e1000_adapter *adapter)
671void 667void
672e1000_reset(struct e1000_adapter *adapter) 668e1000_reset(struct e1000_adapter *adapter)
673{ 669{
674 uint32_t pba = 0, tx_space, min_tx_space, min_rx_space; 670 u32 pba = 0, tx_space, min_tx_space, min_rx_space;
675 uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; 671 u16 fc_high_water_mark = E1000_FC_HIGH_DIFF;
676 boolean_t legacy_pba_adjust = FALSE; 672 bool legacy_pba_adjust = false;
677 673
678 /* Repartition Pba for greater than 9k mtu 674 /* Repartition Pba for greater than 9k mtu
679 * To take effect CTRL.RST is required. 675 * To take effect CTRL.RST is required.
@@ -687,7 +683,7 @@ e1000_reset(struct e1000_adapter *adapter)
687 case e1000_82540: 683 case e1000_82540:
688 case e1000_82541: 684 case e1000_82541:
689 case e1000_82541_rev_2: 685 case e1000_82541_rev_2:
690 legacy_pba_adjust = TRUE; 686 legacy_pba_adjust = true;
691 pba = E1000_PBA_48K; 687 pba = E1000_PBA_48K;
692 break; 688 break;
693 case e1000_82545: 689 case e1000_82545:
@@ -698,7 +694,7 @@ e1000_reset(struct e1000_adapter *adapter)
698 break; 694 break;
699 case e1000_82547: 695 case e1000_82547:
700 case e1000_82547_rev_2: 696 case e1000_82547_rev_2:
701 legacy_pba_adjust = TRUE; 697 legacy_pba_adjust = true;
702 pba = E1000_PBA_30K; 698 pba = E1000_PBA_30K;
703 break; 699 break;
704 case e1000_82571: 700 case e1000_82571:
@@ -716,7 +712,7 @@ e1000_reset(struct e1000_adapter *adapter)
716 break; 712 break;
717 } 713 }
718 714
719 if (legacy_pba_adjust == TRUE) { 715 if (legacy_pba_adjust) {
720 if (adapter->netdev->mtu > E1000_RXBUFFER_8192) 716 if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
721 pba -= 8; /* allocate more FIFO for Tx */ 717 pba -= 8; /* allocate more FIFO for Tx */
722 718
@@ -819,7 +815,7 @@ e1000_reset(struct e1000_adapter *adapter)
819 adapter->hw.mac_type <= e1000_82547_rev_2 && 815 adapter->hw.mac_type <= e1000_82547_rev_2 &&
820 adapter->hw.autoneg == 1 && 816 adapter->hw.autoneg == 1 &&
821 adapter->hw.autoneg_advertised == ADVERTISE_1000_FULL) { 817 adapter->hw.autoneg_advertised == ADVERTISE_1000_FULL) {
822 uint32_t ctrl = E1000_READ_REG(&adapter->hw, CTRL); 818 u32 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
823 /* clear phy power management bit if we are in gig only mode, 819 /* clear phy power management bit if we are in gig only mode,
824 * which if enabled will attempt negotiation to 100Mb, which 820 * which if enabled will attempt negotiation to 100Mb, which
825 * can cause a loss of link at power off or driver unload */ 821 * can cause a loss of link at power off or driver unload */
@@ -836,7 +832,7 @@ e1000_reset(struct e1000_adapter *adapter)
836 if (!adapter->smart_power_down && 832 if (!adapter->smart_power_down &&
837 (adapter->hw.mac_type == e1000_82571 || 833 (adapter->hw.mac_type == e1000_82571 ||
838 adapter->hw.mac_type == e1000_82572)) { 834 adapter->hw.mac_type == e1000_82572)) {
839 uint16_t phy_data = 0; 835 u16 phy_data = 0;
840 /* speed up time to link by disabling smart power down, ignore 836 /* speed up time to link by disabling smart power down, ignore
841 * the return value of this function because there is nothing 837 * the return value of this function because there is nothing
842 * different we would do if it failed */ 838 * different we would do if it failed */
@@ -930,8 +926,8 @@ e1000_probe(struct pci_dev *pdev,
930 static int cards_found = 0; 926 static int cards_found = 0;
931 static int global_quad_port_a = 0; /* global ksp3 port a indication */ 927 static int global_quad_port_a = 0; /* global ksp3 port a indication */
932 int i, err, pci_using_dac; 928 int i, err, pci_using_dac;
933 uint16_t eeprom_data = 0; 929 u16 eeprom_data = 0;
934 uint16_t eeprom_apme_mask = E1000_EEPROM_APME; 930 u16 eeprom_apme_mask = E1000_EEPROM_APME;
935 DECLARE_MAC_BUF(mac); 931 DECLARE_MAC_BUF(mac);
936 932
937 if ((err = pci_enable_device(pdev))) 933 if ((err = pci_enable_device(pdev)))
@@ -1366,15 +1362,15 @@ e1000_sw_init(struct e1000_adapter *adapter)
1366 1362
1367 e1000_set_media_type(hw); 1363 e1000_set_media_type(hw);
1368 1364
1369 hw->wait_autoneg_complete = FALSE; 1365 hw->wait_autoneg_complete = false;
1370 hw->tbi_compatibility_en = TRUE; 1366 hw->tbi_compatibility_en = true;
1371 hw->adaptive_ifs = TRUE; 1367 hw->adaptive_ifs = true;
1372 1368
1373 /* Copper options */ 1369 /* Copper options */
1374 1370
1375 if (hw->media_type == e1000_media_type_copper) { 1371 if (hw->media_type == e1000_media_type_copper) {
1376 hw->mdix = AUTO_ALL_MODES; 1372 hw->mdix = AUTO_ALL_MODES;
1377 hw->disable_polarity_correction = FALSE; 1373 hw->disable_polarity_correction = false;
1378 hw->master_slave = E1000_MASTER_SLAVE; 1374 hw->master_slave = E1000_MASTER_SLAVE;
1379 } 1375 }
1380 1376
@@ -1396,7 +1392,6 @@ e1000_sw_init(struct e1000_adapter *adapter)
1396#endif 1392#endif
1397 1393
1398 /* Explicitly disable IRQ since the NIC can be in any state. */ 1394 /* Explicitly disable IRQ since the NIC can be in any state. */
1399 atomic_set(&adapter->irq_sem, 0);
1400 e1000_irq_disable(adapter); 1395 e1000_irq_disable(adapter);
1401 1396
1402 spin_lock_init(&adapter->stats_lock); 1397 spin_lock_init(&adapter->stats_lock);
@@ -1576,7 +1571,7 @@ e1000_close(struct net_device *netdev)
1576 * @start: address of beginning of memory 1571 * @start: address of beginning of memory
1577 * @len: length of memory 1572 * @len: length of memory
1578 **/ 1573 **/
1579static boolean_t 1574static bool
1580e1000_check_64k_bound(struct e1000_adapter *adapter, 1575e1000_check_64k_bound(struct e1000_adapter *adapter,
1581 void *start, unsigned long len) 1576 void *start, unsigned long len)
1582{ 1577{
@@ -1587,10 +1582,10 @@ e1000_check_64k_bound(struct e1000_adapter *adapter,
1587 * write location to cross 64k boundary due to errata 23 */ 1582 * write location to cross 64k boundary due to errata 23 */
1588 if (adapter->hw.mac_type == e1000_82545 || 1583 if (adapter->hw.mac_type == e1000_82545 ||
1589 adapter->hw.mac_type == e1000_82546) { 1584 adapter->hw.mac_type == e1000_82546) {
1590 return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE; 1585 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1591 } 1586 }
1592 1587
1593 return TRUE; 1588 return true;
1594} 1589}
1595 1590
1596/** 1591/**
@@ -1707,10 +1702,10 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1707static void 1702static void
1708e1000_configure_tx(struct e1000_adapter *adapter) 1703e1000_configure_tx(struct e1000_adapter *adapter)
1709{ 1704{
1710 uint64_t tdba; 1705 u64 tdba;
1711 struct e1000_hw *hw = &adapter->hw; 1706 struct e1000_hw *hw = &adapter->hw;
1712 uint32_t tdlen, tctl, tipg, tarc; 1707 u32 tdlen, tctl, tipg, tarc;
1713 uint32_t ipgr1, ipgr2; 1708 u32 ipgr1, ipgr2;
1714 1709
1715 /* Setup the HW Tx Head and Tail descriptor pointers */ 1710 /* Setup the HW Tx Head and Tail descriptor pointers */
1716 1711
@@ -1952,10 +1947,10 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1952static void 1947static void
1953e1000_setup_rctl(struct e1000_adapter *adapter) 1948e1000_setup_rctl(struct e1000_adapter *adapter)
1954{ 1949{
1955 uint32_t rctl, rfctl; 1950 u32 rctl, rfctl;
1956 uint32_t psrctl = 0; 1951 u32 psrctl = 0;
1957#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT 1952#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
1958 uint32_t pages = 0; 1953 u32 pages = 0;
1959#endif 1954#endif
1960 1955
1961 rctl = E1000_READ_REG(&adapter->hw, RCTL); 1956 rctl = E1000_READ_REG(&adapter->hw, RCTL);
@@ -2070,9 +2065,9 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
2070static void 2065static void
2071e1000_configure_rx(struct e1000_adapter *adapter) 2066e1000_configure_rx(struct e1000_adapter *adapter)
2072{ 2067{
2073 uint64_t rdba; 2068 u64 rdba;
2074 struct e1000_hw *hw = &adapter->hw; 2069 struct e1000_hw *hw = &adapter->hw;
2075 uint32_t rdlen, rctl, rxcsum, ctrl_ext; 2070 u32 rdlen, rctl, rxcsum, ctrl_ext;
2076 2071
2077 if (adapter->rx_ps_pages) { 2072 if (adapter->rx_ps_pages) {
2078 /* this is a 32 byte descriptor */ 2073 /* this is a 32 byte descriptor */
@@ -2133,7 +2128,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
2133 /* Enable 82543 Receive Checksum Offload for TCP and UDP */ 2128 /* Enable 82543 Receive Checksum Offload for TCP and UDP */
2134 if (hw->mac_type >= e1000_82543) { 2129 if (hw->mac_type >= e1000_82543) {
2135 rxcsum = E1000_READ_REG(hw, RXCSUM); 2130 rxcsum = E1000_READ_REG(hw, RXCSUM);
2136 if (adapter->rx_csum == TRUE) { 2131 if (adapter->rx_csum) {
2137 rxcsum |= E1000_RXCSUM_TUOFL; 2132 rxcsum |= E1000_RXCSUM_TUOFL;
2138 2133
2139 /* Enable 82571 IPv4 payload checksum for UDP fragments 2134 /* Enable 82571 IPv4 payload checksum for UDP fragments
@@ -2392,7 +2387,7 @@ static void
2392e1000_enter_82542_rst(struct e1000_adapter *adapter) 2387e1000_enter_82542_rst(struct e1000_adapter *adapter)
2393{ 2388{
2394 struct net_device *netdev = adapter->netdev; 2389 struct net_device *netdev = adapter->netdev;
2395 uint32_t rctl; 2390 u32 rctl;
2396 2391
2397 e1000_pci_clear_mwi(&adapter->hw); 2392 e1000_pci_clear_mwi(&adapter->hw);
2398 2393
@@ -2410,7 +2405,7 @@ static void
2410e1000_leave_82542_rst(struct e1000_adapter *adapter) 2405e1000_leave_82542_rst(struct e1000_adapter *adapter)
2411{ 2406{
2412 struct net_device *netdev = adapter->netdev; 2407 struct net_device *netdev = adapter->netdev;
2413 uint32_t rctl; 2408 u32 rctl;
2414 2409
2415 rctl = E1000_READ_REG(&adapter->hw, RCTL); 2410 rctl = E1000_READ_REG(&adapter->hw, RCTL);
2416 rctl &= ~E1000_RCTL_RST; 2411 rctl &= ~E1000_RCTL_RST;
@@ -2495,8 +2490,8 @@ e1000_set_rx_mode(struct net_device *netdev)
2495 struct e1000_hw *hw = &adapter->hw; 2490 struct e1000_hw *hw = &adapter->hw;
2496 struct dev_addr_list *uc_ptr; 2491 struct dev_addr_list *uc_ptr;
2497 struct dev_addr_list *mc_ptr; 2492 struct dev_addr_list *mc_ptr;
2498 uint32_t rctl; 2493 u32 rctl;
2499 uint32_t hash_value; 2494 u32 hash_value;
2500 int i, rar_entries = E1000_RAR_ENTRIES; 2495 int i, rar_entries = E1000_RAR_ENTRIES;
2501 int mta_reg_count = (hw->mac_type == e1000_ich8lan) ? 2496 int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
2502 E1000_NUM_MTA_REGISTERS_ICH8LAN : 2497 E1000_NUM_MTA_REGISTERS_ICH8LAN :
@@ -2600,7 +2595,7 @@ e1000_82547_tx_fifo_stall(unsigned long data)
2600{ 2595{
2601 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 2596 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2602 struct net_device *netdev = adapter->netdev; 2597 struct net_device *netdev = adapter->netdev;
2603 uint32_t tctl; 2598 u32 tctl;
2604 2599
2605 if (atomic_read(&adapter->tx_fifo_stall)) { 2600 if (atomic_read(&adapter->tx_fifo_stall)) {
2606 if ((E1000_READ_REG(&adapter->hw, TDT) == 2601 if ((E1000_READ_REG(&adapter->hw, TDT) ==
@@ -2642,8 +2637,8 @@ e1000_watchdog(unsigned long data)
2642 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 2637 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
2643 struct net_device *netdev = adapter->netdev; 2638 struct net_device *netdev = adapter->netdev;
2644 struct e1000_tx_ring *txdr = adapter->tx_ring; 2639 struct e1000_tx_ring *txdr = adapter->tx_ring;
2645 uint32_t link, tctl; 2640 u32 link, tctl;
2646 int32_t ret_val; 2641 s32 ret_val;
2647 2642
2648 ret_val = e1000_check_for_link(&adapter->hw); 2643 ret_val = e1000_check_for_link(&adapter->hw);
2649 if ((ret_val == E1000_ERR_PHY) && 2644 if ((ret_val == E1000_ERR_PHY) &&
@@ -2668,8 +2663,8 @@ e1000_watchdog(unsigned long data)
2668 2663
2669 if (link) { 2664 if (link) {
2670 if (!netif_carrier_ok(netdev)) { 2665 if (!netif_carrier_ok(netdev)) {
2671 uint32_t ctrl; 2666 u32 ctrl;
2672 boolean_t txb2b = 1; 2667 bool txb2b = true;
2673 e1000_get_speed_and_duplex(&adapter->hw, 2668 e1000_get_speed_and_duplex(&adapter->hw,
2674 &adapter->link_speed, 2669 &adapter->link_speed,
2675 &adapter->link_duplex); 2670 &adapter->link_duplex);
@@ -2691,12 +2686,12 @@ e1000_watchdog(unsigned long data)
2691 adapter->tx_timeout_factor = 1; 2686 adapter->tx_timeout_factor = 1;
2692 switch (adapter->link_speed) { 2687 switch (adapter->link_speed) {
2693 case SPEED_10: 2688 case SPEED_10:
2694 txb2b = 0; 2689 txb2b = false;
2695 netdev->tx_queue_len = 10; 2690 netdev->tx_queue_len = 10;
2696 adapter->tx_timeout_factor = 8; 2691 adapter->tx_timeout_factor = 8;
2697 break; 2692 break;
2698 case SPEED_100: 2693 case SPEED_100:
2699 txb2b = 0; 2694 txb2b = false;
2700 netdev->tx_queue_len = 100; 2695 netdev->tx_queue_len = 100;
2701 /* maybe add some timeout factor ? */ 2696 /* maybe add some timeout factor ? */
2702 break; 2697 break;
@@ -2704,8 +2699,8 @@ e1000_watchdog(unsigned long data)
2704 2699
2705 if ((adapter->hw.mac_type == e1000_82571 || 2700 if ((adapter->hw.mac_type == e1000_82571 ||
2706 adapter->hw.mac_type == e1000_82572) && 2701 adapter->hw.mac_type == e1000_82572) &&
2707 txb2b == 0) { 2702 !txb2b) {
2708 uint32_t tarc0; 2703 u32 tarc0;
2709 tarc0 = E1000_READ_REG(&adapter->hw, TARC0); 2704 tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
2710 tarc0 &= ~(1 << 21); 2705 tarc0 &= ~(1 << 21);
2711 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0); 2706 E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
@@ -2747,7 +2742,7 @@ e1000_watchdog(unsigned long data)
2747 /* make sure the receive unit is started */ 2742 /* make sure the receive unit is started */
2748 if (adapter->hw.rx_needs_kicking) { 2743 if (adapter->hw.rx_needs_kicking) {
2749 struct e1000_hw *hw = &adapter->hw; 2744 struct e1000_hw *hw = &adapter->hw;
2750 uint32_t rctl = E1000_READ_REG(hw, RCTL); 2745 u32 rctl = E1000_READ_REG(hw, RCTL);
2751 E1000_WRITE_REG(hw, RCTL, rctl | E1000_RCTL_EN); 2746 E1000_WRITE_REG(hw, RCTL, rctl | E1000_RCTL_EN);
2752 } 2747 }
2753 } 2748 }
@@ -2802,7 +2797,7 @@ e1000_watchdog(unsigned long data)
2802 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0); 2797 E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
2803 2798
2804 /* Force detection of hung controller every watchdog period */ 2799 /* Force detection of hung controller every watchdog period */
2805 adapter->detect_tx_hung = TRUE; 2800 adapter->detect_tx_hung = true;
2806 2801
2807 /* With 82571 controllers, LAA may be overwritten due to controller 2802 /* With 82571 controllers, LAA may be overwritten due to controller
2808 * reset from the other port. Set the appropriate LAA in RAR[0] */ 2803 * reset from the other port. Set the appropriate LAA in RAR[0] */
@@ -2837,7 +2832,7 @@ enum latency_range {
2837 * @bytes: the number of bytes during this measurement interval 2832 * @bytes: the number of bytes during this measurement interval
2838 **/ 2833 **/
2839static unsigned int e1000_update_itr(struct e1000_adapter *adapter, 2834static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2840 uint16_t itr_setting, 2835 u16 itr_setting,
2841 int packets, 2836 int packets,
2842 int bytes) 2837 int bytes)
2843{ 2838{
@@ -2889,8 +2884,8 @@ update_itr_done:
2889static void e1000_set_itr(struct e1000_adapter *adapter) 2884static void e1000_set_itr(struct e1000_adapter *adapter)
2890{ 2885{
2891 struct e1000_hw *hw = &adapter->hw; 2886 struct e1000_hw *hw = &adapter->hw;
2892 uint16_t current_itr; 2887 u16 current_itr;
2893 uint32_t new_itr = adapter->itr; 2888 u32 new_itr = adapter->itr;
2894 2889
2895 if (unlikely(hw->mac_type < e1000_82540)) 2890 if (unlikely(hw->mac_type < e1000_82540))
2896 return; 2891 return;
@@ -2964,9 +2959,9 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
2964 struct e1000_context_desc *context_desc; 2959 struct e1000_context_desc *context_desc;
2965 struct e1000_buffer *buffer_info; 2960 struct e1000_buffer *buffer_info;
2966 unsigned int i; 2961 unsigned int i;
2967 uint32_t cmd_length = 0; 2962 u32 cmd_length = 0;
2968 uint16_t ipcse = 0, tucse, mss; 2963 u16 ipcse = 0, tucse, mss;
2969 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 2964 u8 ipcss, ipcso, tucss, tucso, hdr_len;
2970 int err; 2965 int err;
2971 2966
2972 if (skb_is_gso(skb)) { 2967 if (skb_is_gso(skb)) {
@@ -3025,19 +3020,19 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3025 if (++i == tx_ring->count) i = 0; 3020 if (++i == tx_ring->count) i = 0;
3026 tx_ring->next_to_use = i; 3021 tx_ring->next_to_use = i;
3027 3022
3028 return TRUE; 3023 return true;
3029 } 3024 }
3030 return FALSE; 3025 return false;
3031} 3026}
3032 3027
3033static boolean_t 3028static bool
3034e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, 3029e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3035 struct sk_buff *skb) 3030 struct sk_buff *skb)
3036{ 3031{
3037 struct e1000_context_desc *context_desc; 3032 struct e1000_context_desc *context_desc;
3038 struct e1000_buffer *buffer_info; 3033 struct e1000_buffer *buffer_info;
3039 unsigned int i; 3034 unsigned int i;
3040 uint8_t css; 3035 u8 css;
3041 3036
3042 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 3037 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
3043 css = skb_transport_offset(skb); 3038 css = skb_transport_offset(skb);
@@ -3060,10 +3055,10 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3060 if (unlikely(++i == tx_ring->count)) i = 0; 3055 if (unlikely(++i == tx_ring->count)) i = 0;
3061 tx_ring->next_to_use = i; 3056 tx_ring->next_to_use = i;
3062 3057
3063 return TRUE; 3058 return true;
3064 } 3059 }
3065 3060
3066 return FALSE; 3061 return false;
3067} 3062}
3068 3063
3069#define E1000_MAX_TXD_PWR 12 3064#define E1000_MAX_TXD_PWR 12
@@ -3182,7 +3177,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3182{ 3177{
3183 struct e1000_tx_desc *tx_desc = NULL; 3178 struct e1000_tx_desc *tx_desc = NULL;
3184 struct e1000_buffer *buffer_info; 3179 struct e1000_buffer *buffer_info;
3185 uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; 3180 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3186 unsigned int i; 3181 unsigned int i;
3187 3182
3188 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { 3183 if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
@@ -3246,8 +3241,8 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
3246static int 3241static int
3247e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb) 3242e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
3248{ 3243{
3249 uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; 3244 u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3250 uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR; 3245 u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3251 3246
3252 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); 3247 skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3253 3248
@@ -3274,7 +3269,7 @@ static int
3274e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb) 3269e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
3275{ 3270{
3276 struct e1000_hw *hw = &adapter->hw; 3271 struct e1000_hw *hw = &adapter->hw;
3277 uint16_t length, offset; 3272 u16 length, offset;
3278 if (vlan_tx_tag_present(skb)) { 3273 if (vlan_tx_tag_present(skb)) {
3279 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && 3274 if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
3280 ( adapter->hw.mng_cookie.status & 3275 ( adapter->hw.mng_cookie.status &
@@ -3285,17 +3280,17 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
3285 struct ethhdr *eth = (struct ethhdr *) skb->data; 3280 struct ethhdr *eth = (struct ethhdr *) skb->data;
3286 if ((htons(ETH_P_IP) == eth->h_proto)) { 3281 if ((htons(ETH_P_IP) == eth->h_proto)) {
3287 const struct iphdr *ip = 3282 const struct iphdr *ip =
3288 (struct iphdr *)((uint8_t *)skb->data+14); 3283 (struct iphdr *)((u8 *)skb->data+14);
3289 if (IPPROTO_UDP == ip->protocol) { 3284 if (IPPROTO_UDP == ip->protocol) {
3290 struct udphdr *udp = 3285 struct udphdr *udp =
3291 (struct udphdr *)((uint8_t *)ip + 3286 (struct udphdr *)((u8 *)ip +
3292 (ip->ihl << 2)); 3287 (ip->ihl << 2));
3293 if (ntohs(udp->dest) == 67) { 3288 if (ntohs(udp->dest) == 67) {
3294 offset = (uint8_t *)udp + 8 - skb->data; 3289 offset = (u8 *)udp + 8 - skb->data;
3295 length = skb->len - offset; 3290 length = skb->len - offset;
3296 3291
3297 return e1000_mng_write_dhcp_info(hw, 3292 return e1000_mng_write_dhcp_info(hw,
3298 (uint8_t *)udp + 8, 3293 (u8 *)udp + 8,
3299 length); 3294 length);
3300 } 3295 }
3301 } 3296 }
@@ -3375,7 +3370,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3375 * overrun the FIFO, adjust the max buffer len if mss 3370 * overrun the FIFO, adjust the max buffer len if mss
3376 * drops. */ 3371 * drops. */
3377 if (mss) { 3372 if (mss) {
3378 uint8_t hdr_len; 3373 u8 hdr_len;
3379 max_per_txd = min(mss << 2, max_per_txd); 3374 max_per_txd = min(mss << 2, max_per_txd);
3380 max_txd_pwr = fls(max_per_txd) - 1; 3375 max_txd_pwr = fls(max_per_txd) - 1;
3381 3376
@@ -3562,7 +3557,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
3562{ 3557{
3563 struct e1000_adapter *adapter = netdev_priv(netdev); 3558 struct e1000_adapter *adapter = netdev_priv(netdev);
3564 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; 3559 int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3565 uint16_t eeprom_data = 0; 3560 u16 eeprom_data = 0;
3566 3561
3567 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3562 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3568 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3563 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
@@ -3657,7 +3652,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
3657 struct e1000_hw *hw = &adapter->hw; 3652 struct e1000_hw *hw = &adapter->hw;
3658 struct pci_dev *pdev = adapter->pdev; 3653 struct pci_dev *pdev = adapter->pdev;
3659 unsigned long flags; 3654 unsigned long flags;
3660 uint16_t phy_tmp; 3655 u16 phy_tmp;
3661 3656
3662#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3657#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3663 3658
@@ -3834,13 +3829,10 @@ e1000_intr_msi(int irq, void *data)
3834#ifndef CONFIG_E1000_NAPI 3829#ifndef CONFIG_E1000_NAPI
3835 int i; 3830 int i;
3836#endif 3831#endif
3837 uint32_t icr = E1000_READ_REG(hw, ICR); 3832 u32 icr = E1000_READ_REG(hw, ICR);
3833
3834 /* in NAPI mode read ICR disables interrupts using IAM */
3838 3835
3839#ifdef CONFIG_E1000_NAPI
3840 /* read ICR disables interrupts using IAM, so keep up with our
3841 * enable/disable accounting */
3842 atomic_inc(&adapter->irq_sem);
3843#endif
3844 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 3836 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
3845 hw->get_link_status = 1; 3837 hw->get_link_status = 1;
3846 /* 80003ES2LAN workaround-- For packet buffer work-around on 3838 /* 80003ES2LAN workaround-- For packet buffer work-around on
@@ -3849,7 +3841,7 @@ e1000_intr_msi(int irq, void *data)
3849 if (netif_carrier_ok(netdev) && 3841 if (netif_carrier_ok(netdev) &&
3850 (adapter->hw.mac_type == e1000_80003es2lan)) { 3842 (adapter->hw.mac_type == e1000_80003es2lan)) {
3851 /* disable receives */ 3843 /* disable receives */
3852 uint32_t rctl = E1000_READ_REG(hw, RCTL); 3844 u32 rctl = E1000_READ_REG(hw, RCTL);
3853 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN); 3845 E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
3854 } 3846 }
3855 /* guard against interrupt when we're going down */ 3847 /* guard against interrupt when we're going down */
@@ -3896,7 +3888,7 @@ e1000_intr(int irq, void *data)
3896 struct net_device *netdev = data; 3888 struct net_device *netdev = data;
3897 struct e1000_adapter *adapter = netdev_priv(netdev); 3889 struct e1000_adapter *adapter = netdev_priv(netdev);
3898 struct e1000_hw *hw = &adapter->hw; 3890 struct e1000_hw *hw = &adapter->hw;
3899 uint32_t rctl, icr = E1000_READ_REG(hw, ICR); 3891 u32 rctl, icr = E1000_READ_REG(hw, ICR);
3900#ifndef CONFIG_E1000_NAPI 3892#ifndef CONFIG_E1000_NAPI
3901 int i; 3893 int i;
3902#endif 3894#endif
@@ -3910,12 +3902,8 @@ e1000_intr(int irq, void *data)
3910 !(icr & E1000_ICR_INT_ASSERTED))) 3902 !(icr & E1000_ICR_INT_ASSERTED)))
3911 return IRQ_NONE; 3903 return IRQ_NONE;
3912 3904
3913 /* Interrupt Auto-Mask...upon reading ICR, 3905 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
3914 * interrupts are masked. No need for the 3906 * need for the IMC write */
3915 * IMC write, but it does mean we should
3916 * account for it ASAP. */
3917 if (likely(hw->mac_type >= e1000_82571))
3918 atomic_inc(&adapter->irq_sem);
3919#endif 3907#endif
3920 3908
3921 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { 3909 if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
@@ -3939,7 +3927,6 @@ e1000_intr(int irq, void *data)
3939#ifdef CONFIG_E1000_NAPI 3927#ifdef CONFIG_E1000_NAPI
3940 if (unlikely(hw->mac_type < e1000_82571)) { 3928 if (unlikely(hw->mac_type < e1000_82571)) {
3941 /* disable interrupts, without the synchronize_irq bit */ 3929 /* disable interrupts, without the synchronize_irq bit */
3942 atomic_inc(&adapter->irq_sem);
3943 E1000_WRITE_REG(hw, IMC, ~0); 3930 E1000_WRITE_REG(hw, IMC, ~0);
3944 E1000_WRITE_FLUSH(hw); 3931 E1000_WRITE_FLUSH(hw);
3945 } 3932 }
@@ -3964,10 +3951,8 @@ e1000_intr(int irq, void *data)
3964 * in dead lock. Writing IMC forces 82547 into 3951 * in dead lock. Writing IMC forces 82547 into
3965 * de-assertion state. 3952 * de-assertion state.
3966 */ 3953 */
3967 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) { 3954 if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
3968 atomic_inc(&adapter->irq_sem);
3969 E1000_WRITE_REG(hw, IMC, ~0); 3955 E1000_WRITE_REG(hw, IMC, ~0);
3970 }
3971 3956
3972 adapter->total_tx_bytes = 0; 3957 adapter->total_tx_bytes = 0;
3973 adapter->total_rx_bytes = 0; 3958 adapter->total_rx_bytes = 0;
@@ -4038,7 +4023,7 @@ e1000_clean(struct napi_struct *napi, int budget)
4038 * @adapter: board private structure 4023 * @adapter: board private structure
4039 **/ 4024 **/
4040 4025
4041static boolean_t 4026static bool
4042e1000_clean_tx_irq(struct e1000_adapter *adapter, 4027e1000_clean_tx_irq(struct e1000_adapter *adapter,
4043 struct e1000_tx_ring *tx_ring) 4028 struct e1000_tx_ring *tx_ring)
4044{ 4029{
@@ -4049,7 +4034,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4049#ifdef CONFIG_E1000_NAPI 4034#ifdef CONFIG_E1000_NAPI
4050 unsigned int count = 0; 4035 unsigned int count = 0;
4051#endif 4036#endif
4052 boolean_t cleaned = FALSE; 4037 bool cleaned = false;
4053 unsigned int total_tx_bytes=0, total_tx_packets=0; 4038 unsigned int total_tx_bytes=0, total_tx_packets=0;
4054 4039
4055 i = tx_ring->next_to_clean; 4040 i = tx_ring->next_to_clean;
@@ -4057,7 +4042,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4057 eop_desc = E1000_TX_DESC(*tx_ring, eop); 4042 eop_desc = E1000_TX_DESC(*tx_ring, eop);
4058 4043
4059 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) { 4044 while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
4060 for (cleaned = FALSE; !cleaned; ) { 4045 for (cleaned = false; !cleaned; ) {
4061 tx_desc = E1000_TX_DESC(*tx_ring, i); 4046 tx_desc = E1000_TX_DESC(*tx_ring, i);
4062 buffer_info = &tx_ring->buffer_info[i]; 4047 buffer_info = &tx_ring->buffer_info[i];
4063 cleaned = (i == eop); 4048 cleaned = (i == eop);
@@ -4105,7 +4090,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4105 if (adapter->detect_tx_hung) { 4090 if (adapter->detect_tx_hung) {
4106 /* Detect a transmit hang in hardware, this serializes the 4091 /* Detect a transmit hang in hardware, this serializes the
4107 * check with the clearing of time_stamp and movement of i */ 4092 * check with the clearing of time_stamp and movement of i */
4108 adapter->detect_tx_hung = FALSE; 4093 adapter->detect_tx_hung = false;
4109 if (tx_ring->buffer_info[eop].dma && 4094 if (tx_ring->buffer_info[eop].dma &&
4110 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + 4095 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
4111 (adapter->tx_timeout_factor * HZ)) 4096 (adapter->tx_timeout_factor * HZ))
@@ -4154,11 +4139,11 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
4154 4139
4155static void 4140static void
4156e1000_rx_checksum(struct e1000_adapter *adapter, 4141e1000_rx_checksum(struct e1000_adapter *adapter,
4157 uint32_t status_err, uint32_t csum, 4142 u32 status_err, u32 csum,
4158 struct sk_buff *skb) 4143 struct sk_buff *skb)
4159{ 4144{
4160 uint16_t status = (uint16_t)status_err; 4145 u16 status = (u16)status_err;
4161 uint8_t errors = (uint8_t)(status_err >> 24); 4146 u8 errors = (u8)(status_err >> 24);
4162 skb->ip_summed = CHECKSUM_NONE; 4147 skb->ip_summed = CHECKSUM_NONE;
4163 4148
4164 /* 82543 or newer only */ 4149 /* 82543 or newer only */
@@ -4200,7 +4185,7 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
4200 * @adapter: board private structure 4185 * @adapter: board private structure
4201 **/ 4186 **/
4202 4187
4203static boolean_t 4188static bool
4204#ifdef CONFIG_E1000_NAPI 4189#ifdef CONFIG_E1000_NAPI
4205e1000_clean_rx_irq(struct e1000_adapter *adapter, 4190e1000_clean_rx_irq(struct e1000_adapter *adapter,
4206 struct e1000_rx_ring *rx_ring, 4191 struct e1000_rx_ring *rx_ring,
@@ -4215,11 +4200,11 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
4215 struct e1000_rx_desc *rx_desc, *next_rxd; 4200 struct e1000_rx_desc *rx_desc, *next_rxd;
4216 struct e1000_buffer *buffer_info, *next_buffer; 4201 struct e1000_buffer *buffer_info, *next_buffer;
4217 unsigned long flags; 4202 unsigned long flags;
4218 uint32_t length; 4203 u32 length;
4219 uint8_t last_byte; 4204 u8 last_byte;
4220 unsigned int i; 4205 unsigned int i;
4221 int cleaned_count = 0; 4206 int cleaned_count = 0;
4222 boolean_t cleaned = FALSE; 4207 bool cleaned = false;
4223 unsigned int total_rx_bytes=0, total_rx_packets=0; 4208 unsigned int total_rx_bytes=0, total_rx_packets=0;
4224 4209
4225 i = rx_ring->next_to_clean; 4210 i = rx_ring->next_to_clean;
@@ -4247,7 +4232,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
4247 4232
4248 next_buffer = &rx_ring->buffer_info[i]; 4233 next_buffer = &rx_ring->buffer_info[i];
4249 4234
4250 cleaned = TRUE; 4235 cleaned = true;
4251 cleaned_count++; 4236 cleaned_count++;
4252 pci_unmap_single(pdev, 4237 pci_unmap_single(pdev,
4253 buffer_info->dma, 4238 buffer_info->dma,
@@ -4316,8 +4301,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
4316 4301
4317 /* Receive Checksum Offload */ 4302 /* Receive Checksum Offload */
4318 e1000_rx_checksum(adapter, 4303 e1000_rx_checksum(adapter,
4319 (uint32_t)(status) | 4304 (u32)(status) |
4320 ((uint32_t)(rx_desc->errors) << 24), 4305 ((u32)(rx_desc->errors) << 24),
4321 le16_to_cpu(rx_desc->csum), skb); 4306 le16_to_cpu(rx_desc->csum), skb);
4322 4307
4323 skb->protocol = eth_type_trans(skb, netdev); 4308 skb->protocol = eth_type_trans(skb, netdev);
@@ -4373,7 +4358,7 @@ next_desc:
4373 * @adapter: board private structure 4358 * @adapter: board private structure
4374 **/ 4359 **/
4375 4360
4376static boolean_t 4361static bool
4377#ifdef CONFIG_E1000_NAPI 4362#ifdef CONFIG_E1000_NAPI
4378e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, 4363e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4379 struct e1000_rx_ring *rx_ring, 4364 struct e1000_rx_ring *rx_ring,
@@ -4391,9 +4376,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4391 struct e1000_ps_page_dma *ps_page_dma; 4376 struct e1000_ps_page_dma *ps_page_dma;
4392 struct sk_buff *skb; 4377 struct sk_buff *skb;
4393 unsigned int i, j; 4378 unsigned int i, j;
4394 uint32_t length, staterr; 4379 u32 length, staterr;
4395 int cleaned_count = 0; 4380 int cleaned_count = 0;
4396 boolean_t cleaned = FALSE; 4381 bool cleaned = false;
4397 unsigned int total_rx_bytes=0, total_rx_packets=0; 4382 unsigned int total_rx_bytes=0, total_rx_packets=0;
4398 4383
4399 i = rx_ring->next_to_clean; 4384 i = rx_ring->next_to_clean;
@@ -4420,7 +4405,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
4420 4405
4421 next_buffer = &rx_ring->buffer_info[i]; 4406 next_buffer = &rx_ring->buffer_info[i];
4422 4407
4423 cleaned = TRUE; 4408 cleaned = true;
4424 cleaned_count++; 4409 cleaned_count++;
4425 pci_unmap_single(pdev, buffer_info->dma, 4410 pci_unmap_single(pdev, buffer_info->dma,
4426 buffer_info->length, 4411 buffer_info->length,
@@ -4774,8 +4759,8 @@ no_buffers:
4774static void 4759static void
4775e1000_smartspeed(struct e1000_adapter *adapter) 4760e1000_smartspeed(struct e1000_adapter *adapter)
4776{ 4761{
4777 uint16_t phy_status; 4762 u16 phy_status;
4778 uint16_t phy_ctrl; 4763 u16 phy_ctrl;
4779 4764
4780 if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg || 4765 if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
4781 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) 4766 !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
@@ -4854,8 +4839,8 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4854 struct e1000_adapter *adapter = netdev_priv(netdev); 4839 struct e1000_adapter *adapter = netdev_priv(netdev);
4855 struct mii_ioctl_data *data = if_mii(ifr); 4840 struct mii_ioctl_data *data = if_mii(ifr);
4856 int retval; 4841 int retval;
4857 uint16_t mii_reg; 4842 u16 mii_reg;
4858 uint16_t spddplx; 4843 u16 spddplx;
4859 unsigned long flags; 4844 unsigned long flags;
4860 4845
4861 if (adapter->hw.media_type != e1000_media_type_copper) 4846 if (adapter->hw.media_type != e1000_media_type_copper)
@@ -4974,11 +4959,11 @@ e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4974 pcix_set_mmrbc(adapter->pdev, mmrbc); 4959 pcix_set_mmrbc(adapter->pdev, mmrbc);
4975} 4960}
4976 4961
4977int32_t 4962s32
4978e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value) 4963e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
4979{ 4964{
4980 struct e1000_adapter *adapter = hw->back; 4965 struct e1000_adapter *adapter = hw->back;
4981 uint16_t cap_offset; 4966 u16 cap_offset;
4982 4967
4983 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP); 4968 cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
4984 if (!cap_offset) 4969 if (!cap_offset)
@@ -4990,7 +4975,7 @@ e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
4990} 4975}
4991 4976
4992void 4977void
4993e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value) 4978e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4994{ 4979{
4995 outl(value, port); 4980 outl(value, port);
4996} 4981}
@@ -4999,9 +4984,10 @@ static void
4999e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 4984e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
5000{ 4985{
5001 struct e1000_adapter *adapter = netdev_priv(netdev); 4986 struct e1000_adapter *adapter = netdev_priv(netdev);
5002 uint32_t ctrl, rctl; 4987 u32 ctrl, rctl;
5003 4988
5004 e1000_irq_disable(adapter); 4989 if (!test_bit(__E1000_DOWN, &adapter->flags))
4990 e1000_irq_disable(adapter);
5005 adapter->vlgrp = grp; 4991 adapter->vlgrp = grp;
5006 4992
5007 if (grp) { 4993 if (grp) {
@@ -5030,7 +5016,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
5030 rctl &= ~E1000_RCTL_VFE; 5016 rctl &= ~E1000_RCTL_VFE;
5031 E1000_WRITE_REG(&adapter->hw, RCTL, rctl); 5017 E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
5032 if (adapter->mng_vlan_id != 5018 if (adapter->mng_vlan_id !=
5033 (uint16_t)E1000_MNG_VLAN_NONE) { 5019 (u16)E1000_MNG_VLAN_NONE) {
5034 e1000_vlan_rx_kill_vid(netdev, 5020 e1000_vlan_rx_kill_vid(netdev,
5035 adapter->mng_vlan_id); 5021 adapter->mng_vlan_id);
5036 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 5022 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -5038,14 +5024,15 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
5038 } 5024 }
5039 } 5025 }
5040 5026
5041 e1000_irq_enable(adapter); 5027 if (!test_bit(__E1000_DOWN, &adapter->flags))
5028 e1000_irq_enable(adapter);
5042} 5029}
5043 5030
5044static void 5031static void
5045e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid) 5032e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5046{ 5033{
5047 struct e1000_adapter *adapter = netdev_priv(netdev); 5034 struct e1000_adapter *adapter = netdev_priv(netdev);
5048 uint32_t vfta, index; 5035 u32 vfta, index;
5049 5036
5050 if ((adapter->hw.mng_cookie.status & 5037 if ((adapter->hw.mng_cookie.status &
5051 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 5038 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
@@ -5059,14 +5046,16 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
5059} 5046}
5060 5047
5061static void 5048static void
5062e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) 5049e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5063{ 5050{
5064 struct e1000_adapter *adapter = netdev_priv(netdev); 5051 struct e1000_adapter *adapter = netdev_priv(netdev);
5065 uint32_t vfta, index; 5052 u32 vfta, index;
5066 5053
5067 e1000_irq_disable(adapter); 5054 if (!test_bit(__E1000_DOWN, &adapter->flags))
5055 e1000_irq_disable(adapter);
5068 vlan_group_set_device(adapter->vlgrp, vid, NULL); 5056 vlan_group_set_device(adapter->vlgrp, vid, NULL);
5069 e1000_irq_enable(adapter); 5057 if (!test_bit(__E1000_DOWN, &adapter->flags))
5058 e1000_irq_enable(adapter);
5070 5059
5071 if ((adapter->hw.mng_cookie.status & 5060 if ((adapter->hw.mng_cookie.status &
5072 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && 5061 E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
@@ -5089,7 +5078,7 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
5089 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp); 5078 e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
5090 5079
5091 if (adapter->vlgrp) { 5080 if (adapter->vlgrp) {
5092 uint16_t vid; 5081 u16 vid;
5093 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 5082 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
5094 if (!vlan_group_get_device(adapter->vlgrp, vid)) 5083 if (!vlan_group_get_device(adapter->vlgrp, vid))
5095 continue; 5084 continue;
@@ -5099,7 +5088,7 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
5099} 5088}
5100 5089
5101int 5090int
5102e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx) 5091e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
5103{ 5092{
5104 adapter->hw.autoneg = 0; 5093 adapter->hw.autoneg = 0;
5105 5094
@@ -5140,8 +5129,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5140{ 5129{
5141 struct net_device *netdev = pci_get_drvdata(pdev); 5130 struct net_device *netdev = pci_get_drvdata(pdev);
5142 struct e1000_adapter *adapter = netdev_priv(netdev); 5131 struct e1000_adapter *adapter = netdev_priv(netdev);
5143 uint32_t ctrl, ctrl_ext, rctl, status; 5132 u32 ctrl, ctrl_ext, rctl, status;
5144 uint32_t wufc = adapter->wol; 5133 u32 wufc = adapter->wol;
5145#ifdef CONFIG_PM 5134#ifdef CONFIG_PM
5146 int retval = 0; 5135 int retval = 0;
5147#endif 5136#endif
@@ -5238,7 +5227,7 @@ e1000_resume(struct pci_dev *pdev)
5238{ 5227{
5239 struct net_device *netdev = pci_get_drvdata(pdev); 5228 struct net_device *netdev = pci_get_drvdata(pdev);
5240 struct e1000_adapter *adapter = netdev_priv(netdev); 5229 struct e1000_adapter *adapter = netdev_priv(netdev);
5241 uint32_t err; 5230 u32 err;
5242 5231
5243 pci_set_power_state(pdev, PCI_D0); 5232 pci_set_power_state(pdev, PCI_D0);
5244 pci_restore_state(pdev); 5233 pci_restore_state(pdev);
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 10af742d8a20..365626d3177e 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -41,13 +41,6 @@
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/sched.h> 42#include <linux/sched.h>
43 43
44typedef enum {
45#undef FALSE
46 FALSE = 0,
47#undef TRUE
48 TRUE = 1
49} boolean_t;
50
51#ifdef DBG 44#ifdef DBG
52#define DEBUGOUT(S) printk(KERN_DEBUG S "\n") 45#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
53#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A) 46#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 7fe20310eb5f..01c88664bad3 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -29,6 +29,9 @@
29/* 29/*
30 * 82571EB Gigabit Ethernet Controller 30 * 82571EB Gigabit Ethernet Controller
31 * 82571EB Gigabit Ethernet Controller (Fiber) 31 * 82571EB Gigabit Ethernet Controller (Fiber)
32 * 82571EB Dual Port Gigabit Mezzanine Adapter
33 * 82571EB Quad Port Gigabit Mezzanine Adapter
34 * 82571PT Gigabit PT Quad Port Server ExpressModule
32 * 82572EI Gigabit Ethernet Controller (Copper) 35 * 82572EI Gigabit Ethernet Controller (Copper)
33 * 82572EI Gigabit Ethernet Controller (Fiber) 36 * 82572EI Gigabit Ethernet Controller (Fiber)
34 * 82572EI Gigabit Ethernet Controller 37 * 82572EI Gigabit Ethernet Controller
@@ -72,7 +75,7 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
72 struct e1000_phy_info *phy = &hw->phy; 75 struct e1000_phy_info *phy = &hw->phy;
73 s32 ret_val; 76 s32 ret_val;
74 77
75 if (hw->media_type != e1000_media_type_copper) { 78 if (hw->phy.media_type != e1000_media_type_copper) {
76 phy->type = e1000_phy_none; 79 phy->type = e1000_phy_none;
77 return 0; 80 return 0;
78 } 81 }
@@ -150,7 +153,8 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
150 if (((eecd >> 15) & 0x3) == 0x3) { 153 if (((eecd >> 15) & 0x3) == 0x3) {
151 nvm->type = e1000_nvm_flash_hw; 154 nvm->type = e1000_nvm_flash_hw;
152 nvm->word_size = 2048; 155 nvm->word_size = 2048;
153 /* Autonomous Flash update bit must be cleared due 156 /*
157 * Autonomous Flash update bit must be cleared due
154 * to Flash update issue. 158 * to Flash update issue.
155 */ 159 */
156 eecd &= ~E1000_EECD_AUPDEN; 160 eecd &= ~E1000_EECD_AUPDEN;
@@ -159,13 +163,18 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
159 } 163 }
160 /* Fall Through */ 164 /* Fall Through */
161 default: 165 default:
162 nvm->type = e1000_nvm_eeprom_spi; 166 nvm->type = e1000_nvm_eeprom_spi;
163 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 167 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
164 E1000_EECD_SIZE_EX_SHIFT); 168 E1000_EECD_SIZE_EX_SHIFT);
165 /* Added to a constant, "size" becomes the left-shift value 169 /*
170 * Added to a constant, "size" becomes the left-shift value
166 * for setting word_size. 171 * for setting word_size.
167 */ 172 */
168 size += NVM_WORD_SIZE_BASE_SHIFT; 173 size += NVM_WORD_SIZE_BASE_SHIFT;
174
175 /* EEPROM access above 16k is unsupported */
176 if (size > 14)
177 size = 14;
169 nvm->word_size = 1 << size; 178 nvm->word_size = 1 << size;
170 break; 179 break;
171 } 180 }
@@ -190,16 +199,16 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
190 case E1000_DEV_ID_82571EB_FIBER: 199 case E1000_DEV_ID_82571EB_FIBER:
191 case E1000_DEV_ID_82572EI_FIBER: 200 case E1000_DEV_ID_82572EI_FIBER:
192 case E1000_DEV_ID_82571EB_QUAD_FIBER: 201 case E1000_DEV_ID_82571EB_QUAD_FIBER:
193 hw->media_type = e1000_media_type_fiber; 202 hw->phy.media_type = e1000_media_type_fiber;
194 break; 203 break;
195 case E1000_DEV_ID_82571EB_SERDES: 204 case E1000_DEV_ID_82571EB_SERDES:
196 case E1000_DEV_ID_82572EI_SERDES: 205 case E1000_DEV_ID_82572EI_SERDES:
197 case E1000_DEV_ID_82571EB_SERDES_DUAL: 206 case E1000_DEV_ID_82571EB_SERDES_DUAL:
198 case E1000_DEV_ID_82571EB_SERDES_QUAD: 207 case E1000_DEV_ID_82571EB_SERDES_QUAD:
199 hw->media_type = e1000_media_type_internal_serdes; 208 hw->phy.media_type = e1000_media_type_internal_serdes;
200 break; 209 break;
201 default: 210 default:
202 hw->media_type = e1000_media_type_copper; 211 hw->phy.media_type = e1000_media_type_copper;
203 break; 212 break;
204 } 213 }
205 214
@@ -208,25 +217,28 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
208 /* Set rar entry count */ 217 /* Set rar entry count */
209 mac->rar_entry_count = E1000_RAR_ENTRIES; 218 mac->rar_entry_count = E1000_RAR_ENTRIES;
210 /* Set if manageability features are enabled. */ 219 /* Set if manageability features are enabled. */
211 mac->arc_subsystem_valid = 220 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
212 (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
213 221
214 /* check for link */ 222 /* check for link */
215 switch (hw->media_type) { 223 switch (hw->phy.media_type) {
216 case e1000_media_type_copper: 224 case e1000_media_type_copper:
217 func->setup_physical_interface = e1000_setup_copper_link_82571; 225 func->setup_physical_interface = e1000_setup_copper_link_82571;
218 func->check_for_link = e1000e_check_for_copper_link; 226 func->check_for_link = e1000e_check_for_copper_link;
219 func->get_link_up_info = e1000e_get_speed_and_duplex_copper; 227 func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
220 break; 228 break;
221 case e1000_media_type_fiber: 229 case e1000_media_type_fiber:
222 func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571; 230 func->setup_physical_interface =
231 e1000_setup_fiber_serdes_link_82571;
223 func->check_for_link = e1000e_check_for_fiber_link; 232 func->check_for_link = e1000e_check_for_fiber_link;
224 func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes; 233 func->get_link_up_info =
234 e1000e_get_speed_and_duplex_fiber_serdes;
225 break; 235 break;
226 case e1000_media_type_internal_serdes: 236 case e1000_media_type_internal_serdes:
227 func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571; 237 func->setup_physical_interface =
238 e1000_setup_fiber_serdes_link_82571;
228 func->check_for_link = e1000e_check_for_serdes_link; 239 func->check_for_link = e1000e_check_for_serdes_link;
229 func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes; 240 func->get_link_up_info =
241 e1000e_get_speed_and_duplex_fiber_serdes;
230 break; 242 break;
231 default: 243 default:
232 return -E1000_ERR_CONFIG; 244 return -E1000_ERR_CONFIG;
@@ -236,7 +248,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
236 return 0; 248 return 0;
237} 249}
238 250
239static s32 e1000_get_invariants_82571(struct e1000_adapter *adapter) 251static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
240{ 252{
241 struct e1000_hw *hw = &adapter->hw; 253 struct e1000_hw *hw = &adapter->hw;
242 static int global_quad_port_a; /* global port a indication */ 254 static int global_quad_port_a; /* global port a indication */
@@ -322,10 +334,12 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
322 switch (hw->mac.type) { 334 switch (hw->mac.type) {
323 case e1000_82571: 335 case e1000_82571:
324 case e1000_82572: 336 case e1000_82572:
325 /* The 82571 firmware may still be configuring the PHY. 337 /*
338 * The 82571 firmware may still be configuring the PHY.
326 * In this case, we cannot access the PHY until the 339 * In this case, we cannot access the PHY until the
327 * configuration is done. So we explicitly set the 340 * configuration is done. So we explicitly set the
328 * PHY ID. */ 341 * PHY ID.
342 */
329 phy->id = IGP01E1000_I_PHY_ID; 343 phy->id = IGP01E1000_I_PHY_ID;
330 break; 344 break;
331 case e1000_82573: 345 case e1000_82573:
@@ -479,8 +493,10 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
479 if (ret_val) 493 if (ret_val)
480 return ret_val; 494 return ret_val;
481 495
482 /* If our nvm is an EEPROM, then we're done 496 /*
483 * otherwise, commit the checksum to the flash NVM. */ 497 * If our nvm is an EEPROM, then we're done
498 * otherwise, commit the checksum to the flash NVM.
499 */
484 if (hw->nvm.type != e1000_nvm_flash_hw) 500 if (hw->nvm.type != e1000_nvm_flash_hw)
485 return ret_val; 501 return ret_val;
486 502
@@ -496,7 +512,8 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
496 512
497 /* Reset the firmware if using STM opcode. */ 513 /* Reset the firmware if using STM opcode. */
498 if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { 514 if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) {
499 /* The enabling of and the actual reset must be done 515 /*
516 * The enabling of and the actual reset must be done
500 * in two write cycles. 517 * in two write cycles.
501 */ 518 */
502 ew32(HICR, E1000_HICR_FW_RESET_ENABLE); 519 ew32(HICR, E1000_HICR_FW_RESET_ENABLE);
@@ -557,8 +574,10 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
557 u32 eewr = 0; 574 u32 eewr = 0;
558 s32 ret_val = 0; 575 s32 ret_val = 0;
559 576
560 /* A check for invalid values: offset too large, too many words, 577 /*
561 * and not enough words. */ 578 * A check for invalid values: offset too large, too many words,
579 * and not enough words.
580 */
562 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 581 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
563 (words == 0)) { 582 (words == 0)) {
564 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 583 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
@@ -645,30 +664,32 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
645 } else { 664 } else {
646 data &= ~IGP02E1000_PM_D0_LPLU; 665 data &= ~IGP02E1000_PM_D0_LPLU;
647 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); 666 ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
648 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 667 /*
668 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
649 * during Dx states where the power conservation is most 669 * during Dx states where the power conservation is most
650 * important. During driver activity we should enable 670 * important. During driver activity we should enable
651 * SmartSpeed, so performance is maintained. */ 671 * SmartSpeed, so performance is maintained.
672 */
652 if (phy->smart_speed == e1000_smart_speed_on) { 673 if (phy->smart_speed == e1000_smart_speed_on) {
653 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 674 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
654 &data); 675 &data);
655 if (ret_val) 676 if (ret_val)
656 return ret_val; 677 return ret_val;
657 678
658 data |= IGP01E1000_PSCFR_SMART_SPEED; 679 data |= IGP01E1000_PSCFR_SMART_SPEED;
659 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 680 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
660 data); 681 data);
661 if (ret_val) 682 if (ret_val)
662 return ret_val; 683 return ret_val;
663 } else if (phy->smart_speed == e1000_smart_speed_off) { 684 } else if (phy->smart_speed == e1000_smart_speed_off) {
664 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 685 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
665 &data); 686 &data);
666 if (ret_val) 687 if (ret_val)
667 return ret_val; 688 return ret_val;
668 689
669 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 690 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
670 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 691 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
671 data); 692 data);
672 if (ret_val) 693 if (ret_val)
673 return ret_val; 694 return ret_val;
674 } 695 }
@@ -693,7 +714,8 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
693 s32 ret_val; 714 s32 ret_val;
694 u16 i = 0; 715 u16 i = 0;
695 716
696 /* Prevent the PCI-E bus from sticking if there is no TLP connection 717 /*
718 * Prevent the PCI-E bus from sticking if there is no TLP connection
697 * on the last TLP read/write transaction when MAC is reset. 719 * on the last TLP read/write transaction when MAC is reset.
698 */ 720 */
699 ret_val = e1000e_disable_pcie_master(hw); 721 ret_val = e1000e_disable_pcie_master(hw);
@@ -709,8 +731,10 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
709 731
710 msleep(10); 732 msleep(10);
711 733
712 /* Must acquire the MDIO ownership before MAC reset. 734 /*
713 * Ownership defaults to firmware after a reset. */ 735 * Must acquire the MDIO ownership before MAC reset.
736 * Ownership defaults to firmware after a reset.
737 */
714 if (hw->mac.type == e1000_82573) { 738 if (hw->mac.type == e1000_82573) {
715 extcnf_ctrl = er32(EXTCNF_CTRL); 739 extcnf_ctrl = er32(EXTCNF_CTRL);
716 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; 740 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
@@ -747,7 +771,8 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
747 /* We don't want to continue accessing MAC registers. */ 771 /* We don't want to continue accessing MAC registers. */
748 return ret_val; 772 return ret_val;
749 773
750 /* Phy configuration from NVM just starts after EECD_AUTO_RD is set. 774 /*
775 * Phy configuration from NVM just starts after EECD_AUTO_RD is set.
751 * Need to wait for Phy configuration completion before accessing 776 * Need to wait for Phy configuration completion before accessing
752 * NVM and Phy. 777 * NVM and Phy.
753 */ 778 */
@@ -793,7 +818,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
793 e1000e_clear_vfta(hw); 818 e1000e_clear_vfta(hw);
794 819
795 /* Setup the receive address. */ 820 /* Setup the receive address. */
796 /* If, however, a locally administered address was assigned to the 821 /*
822 * If, however, a locally administered address was assigned to the
797 * 82571, we must reserve a RAR for it to work around an issue where 823 * 82571, we must reserve a RAR for it to work around an issue where
798 * resetting one port will reload the MAC on the other port. 824 * resetting one port will reload the MAC on the other port.
799 */ 825 */
@@ -810,19 +836,19 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
810 ret_val = e1000_setup_link_82571(hw); 836 ret_val = e1000_setup_link_82571(hw);
811 837
812 /* Set the transmit descriptor write-back policy */ 838 /* Set the transmit descriptor write-back policy */
813 reg_data = er32(TXDCTL); 839 reg_data = er32(TXDCTL(0));
814 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 840 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
815 E1000_TXDCTL_FULL_TX_DESC_WB | 841 E1000_TXDCTL_FULL_TX_DESC_WB |
816 E1000_TXDCTL_COUNT_DESC; 842 E1000_TXDCTL_COUNT_DESC;
817 ew32(TXDCTL, reg_data); 843 ew32(TXDCTL(0), reg_data);
818 844
819 /* ...for both queues. */ 845 /* ...for both queues. */
820 if (mac->type != e1000_82573) { 846 if (mac->type != e1000_82573) {
821 reg_data = er32(TXDCTL1); 847 reg_data = er32(TXDCTL(1));
822 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 848 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
823 E1000_TXDCTL_FULL_TX_DESC_WB | 849 E1000_TXDCTL_FULL_TX_DESC_WB |
824 E1000_TXDCTL_COUNT_DESC; 850 E1000_TXDCTL_COUNT_DESC;
825 ew32(TXDCTL1, reg_data); 851 ew32(TXDCTL(1), reg_data);
826 } else { 852 } else {
827 e1000e_enable_tx_pkt_filtering(hw); 853 e1000e_enable_tx_pkt_filtering(hw);
828 reg_data = er32(GCR); 854 reg_data = er32(GCR);
@@ -830,7 +856,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
830 ew32(GCR, reg_data); 856 ew32(GCR, reg_data);
831 } 857 }
832 858
833 /* Clear all of the statistics registers (clear on read). It is 859 /*
860 * Clear all of the statistics registers (clear on read). It is
834 * important that we do this after we have tried to establish link 861 * important that we do this after we have tried to establish link
835 * because the symbol error count will increment wildly if there 862 * because the symbol error count will increment wildly if there
836 * is no link. 863 * is no link.
@@ -851,17 +878,17 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
851 u32 reg; 878 u32 reg;
852 879
853 /* Transmit Descriptor Control 0 */ 880 /* Transmit Descriptor Control 0 */
854 reg = er32(TXDCTL); 881 reg = er32(TXDCTL(0));
855 reg |= (1 << 22); 882 reg |= (1 << 22);
856 ew32(TXDCTL, reg); 883 ew32(TXDCTL(0), reg);
857 884
858 /* Transmit Descriptor Control 1 */ 885 /* Transmit Descriptor Control 1 */
859 reg = er32(TXDCTL1); 886 reg = er32(TXDCTL(1));
860 reg |= (1 << 22); 887 reg |= (1 << 22);
861 ew32(TXDCTL1, reg); 888 ew32(TXDCTL(1), reg);
862 889
863 /* Transmit Arbitration Control 0 */ 890 /* Transmit Arbitration Control 0 */
864 reg = er32(TARC0); 891 reg = er32(TARC(0));
865 reg &= ~(0xF << 27); /* 30:27 */ 892 reg &= ~(0xF << 27); /* 30:27 */
866 switch (hw->mac.type) { 893 switch (hw->mac.type) {
867 case e1000_82571: 894 case e1000_82571:
@@ -871,10 +898,10 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
871 default: 898 default:
872 break; 899 break;
873 } 900 }
874 ew32(TARC0, reg); 901 ew32(TARC(0), reg);
875 902
876 /* Transmit Arbitration Control 1 */ 903 /* Transmit Arbitration Control 1 */
877 reg = er32(TARC1); 904 reg = er32(TARC(1));
878 switch (hw->mac.type) { 905 switch (hw->mac.type) {
879 case e1000_82571: 906 case e1000_82571:
880 case e1000_82572: 907 case e1000_82572:
@@ -884,7 +911,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
884 reg &= ~(1 << 28); 911 reg &= ~(1 << 28);
885 else 912 else
886 reg |= (1 << 28); 913 reg |= (1 << 28);
887 ew32(TARC1, reg); 914 ew32(TARC(1), reg);
888 break; 915 break;
889 default: 916 default:
890 break; 917 break;
@@ -922,7 +949,8 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
922 949
923 if (hw->mac.type == e1000_82573) { 950 if (hw->mac.type == e1000_82573) {
924 if (hw->mng_cookie.vlan_id != 0) { 951 if (hw->mng_cookie.vlan_id != 0) {
925 /* The VFTA is a 4096b bit-field, each identifying 952 /*
953 * The VFTA is a 4096b bit-field, each identifying
926 * a single VLAN ID. The following operations 954 * a single VLAN ID. The following operations
927 * determine which 32b entry (i.e. offset) into the 955 * determine which 32b entry (i.e. offset) into the
928 * array we want to set the VLAN ID (i.e. bit) of 956 * array we want to set the VLAN ID (i.e. bit) of
@@ -936,7 +964,8 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
936 } 964 }
937 } 965 }
938 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { 966 for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
939 /* If the offset we want to clear is the same offset of the 967 /*
968 * If the offset we want to clear is the same offset of the
940 * manageability VLAN ID, then clear all bits except that of 969 * manageability VLAN ID, then clear all bits except that of
941 * the manageability unit. 970 * the manageability unit.
942 */ 971 */
@@ -947,7 +976,7 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
947} 976}
948 977
949/** 978/**
950 * e1000_mc_addr_list_update_82571 - Update Multicast addresses 979 * e1000_update_mc_addr_list_82571 - Update Multicast addresses
951 * @hw: pointer to the HW structure 980 * @hw: pointer to the HW structure
952 * @mc_addr_list: array of multicast addresses to program 981 * @mc_addr_list: array of multicast addresses to program
953 * @mc_addr_count: number of multicast addresses to program 982 * @mc_addr_count: number of multicast addresses to program
@@ -959,7 +988,7 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
959 * The parameter rar_count will usually be hw->mac.rar_entry_count 988 * The parameter rar_count will usually be hw->mac.rar_entry_count
960 * unless there are workarounds that change this. 989 * unless there are workarounds that change this.
961 **/ 990 **/
962static void e1000_mc_addr_list_update_82571(struct e1000_hw *hw, 991static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw,
963 u8 *mc_addr_list, 992 u8 *mc_addr_list,
964 u32 mc_addr_count, 993 u32 mc_addr_count,
965 u32 rar_used_count, 994 u32 rar_used_count,
@@ -968,8 +997,8 @@ static void e1000_mc_addr_list_update_82571(struct e1000_hw *hw,
968 if (e1000e_get_laa_state_82571(hw)) 997 if (e1000e_get_laa_state_82571(hw))
969 rar_count--; 998 rar_count--;
970 999
971 e1000e_mc_addr_list_update_generic(hw, mc_addr_list, mc_addr_count, 1000 e1000e_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count,
972 rar_used_count, rar_count); 1001 rar_used_count, rar_count);
973} 1002}
974 1003
975/** 1004/**
@@ -984,12 +1013,13 @@ static void e1000_mc_addr_list_update_82571(struct e1000_hw *hw,
984 **/ 1013 **/
985static s32 e1000_setup_link_82571(struct e1000_hw *hw) 1014static s32 e1000_setup_link_82571(struct e1000_hw *hw)
986{ 1015{
987 /* 82573 does not have a word in the NVM to determine 1016 /*
1017 * 82573 does not have a word in the NVM to determine
988 * the default flow control setting, so we explicitly 1018 * the default flow control setting, so we explicitly
989 * set it to full. 1019 * set it to full.
990 */ 1020 */
991 if (hw->mac.type == e1000_82573) 1021 if (hw->mac.type == e1000_82573)
992 hw->mac.fc = e1000_fc_full; 1022 hw->fc.type = e1000_fc_full;
993 1023
994 return e1000e_setup_link(hw); 1024 return e1000e_setup_link(hw);
995} 1025}
@@ -1050,14 +1080,14 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
1050 switch (hw->mac.type) { 1080 switch (hw->mac.type) {
1051 case e1000_82571: 1081 case e1000_82571:
1052 case e1000_82572: 1082 case e1000_82572:
1053 /* If SerDes loopback mode is entered, there is no form 1083 /*
1084 * If SerDes loopback mode is entered, there is no form
1054 * of reset to take the adapter out of that mode. So we 1085 * of reset to take the adapter out of that mode. So we
1055 * have to explicitly take the adapter out of loopback 1086 * have to explicitly take the adapter out of loopback
1056 * mode. This prevents drivers from twiddling their thumbs 1087 * mode. This prevents drivers from twiddling their thumbs
1057 * if another tool failed to take it out of loopback mode. 1088 * if another tool failed to take it out of loopback mode.
1058 */ 1089 */
1059 ew32(SCTL, 1090 ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1060 E1000_SCTL_DISABLE_SERDES_LOOPBACK);
1061 break; 1091 break;
1062 default: 1092 default:
1063 break; 1093 break;
@@ -1124,7 +1154,8 @@ void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
1124 1154
1125 /* If workaround is activated... */ 1155 /* If workaround is activated... */
1126 if (state) 1156 if (state)
1127 /* Hold a copy of the LAA in RAR[14] This is done so that 1157 /*
1158 * Hold a copy of the LAA in RAR[14] This is done so that
1128 * between the time RAR[0] gets clobbered and the time it 1159 * between the time RAR[0] gets clobbered and the time it
1129 * gets fixed, the actual LAA is in one of the RARs and no 1160 * gets fixed, the actual LAA is in one of the RARs and no
1130 * incoming packets directed to this port are dropped. 1161 * incoming packets directed to this port are dropped.
@@ -1152,7 +1183,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
1152 if (nvm->type != e1000_nvm_flash_hw) 1183 if (nvm->type != e1000_nvm_flash_hw)
1153 return 0; 1184 return 0;
1154 1185
1155 /* Check bit 4 of word 10h. If it is 0, firmware is done updating 1186 /*
1187 * Check bit 4 of word 10h. If it is 0, firmware is done updating
1156 * 10h-12h. Checksum may need to be fixed. 1188 * 10h-12h. Checksum may need to be fixed.
1157 */ 1189 */
1158 ret_val = e1000_read_nvm(hw, 0x10, 1, &data); 1190 ret_val = e1000_read_nvm(hw, 0x10, 1, &data);
@@ -1160,7 +1192,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
1160 return ret_val; 1192 return ret_val;
1161 1193
1162 if (!(data & 0x10)) { 1194 if (!(data & 0x10)) {
1163 /* Read 0x23 and check bit 15. This bit is a 1 1195 /*
1196 * Read 0x23 and check bit 15. This bit is a 1
1164 * when the checksum has already been fixed. If 1197 * when the checksum has already been fixed. If
1165 * the checksum is still wrong and this bit is a 1198 * the checksum is still wrong and this bit is a
1166 * 1, we need to return bad checksum. Otherwise, 1199 * 1, we need to return bad checksum. Otherwise,
@@ -1240,7 +1273,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
1240 /* .get_link_up_info: media type dependent */ 1273 /* .get_link_up_info: media type dependent */
1241 .led_on = e1000e_led_on_generic, 1274 .led_on = e1000e_led_on_generic,
1242 .led_off = e1000e_led_off_generic, 1275 .led_off = e1000e_led_off_generic,
1243 .mc_addr_list_update = e1000_mc_addr_list_update_82571, 1276 .update_mc_addr_list = e1000_update_mc_addr_list_82571,
1244 .reset_hw = e1000_reset_hw_82571, 1277 .reset_hw = e1000_reset_hw_82571,
1245 .init_hw = e1000_init_hw_82571, 1278 .init_hw = e1000_init_hw_82571,
1246 .setup_link = e1000_setup_link_82571, 1279 .setup_link = e1000_setup_link_82571,
@@ -1304,7 +1337,7 @@ struct e1000_info e1000_82571_info = {
1304 | FLAG_TARC_SPEED_MODE_BIT /* errata */ 1337 | FLAG_TARC_SPEED_MODE_BIT /* errata */
1305 | FLAG_APME_CHECK_PORT_B, 1338 | FLAG_APME_CHECK_PORT_B,
1306 .pba = 38, 1339 .pba = 38,
1307 .get_invariants = e1000_get_invariants_82571, 1340 .get_variants = e1000_get_variants_82571,
1308 .mac_ops = &e82571_mac_ops, 1341 .mac_ops = &e82571_mac_ops,
1309 .phy_ops = &e82_phy_ops_igp, 1342 .phy_ops = &e82_phy_ops_igp,
1310 .nvm_ops = &e82571_nvm_ops, 1343 .nvm_ops = &e82571_nvm_ops,
@@ -1322,7 +1355,7 @@ struct e1000_info e1000_82572_info = {
1322 | FLAG_HAS_STATS_ICR_ICT 1355 | FLAG_HAS_STATS_ICR_ICT
1323 | FLAG_TARC_SPEED_MODE_BIT, /* errata */ 1356 | FLAG_TARC_SPEED_MODE_BIT, /* errata */
1324 .pba = 38, 1357 .pba = 38,
1325 .get_invariants = e1000_get_invariants_82571, 1358 .get_variants = e1000_get_variants_82571,
1326 .mac_ops = &e82571_mac_ops, 1359 .mac_ops = &e82571_mac_ops,
1327 .phy_ops = &e82_phy_ops_igp, 1360 .phy_ops = &e82_phy_ops_igp,
1328 .nvm_ops = &e82571_nvm_ops, 1361 .nvm_ops = &e82571_nvm_ops,
@@ -1342,7 +1375,7 @@ struct e1000_info e1000_82573_info = {
1342 | FLAG_HAS_ERT 1375 | FLAG_HAS_ERT
1343 | FLAG_HAS_SWSM_ON_LOAD, 1376 | FLAG_HAS_SWSM_ON_LOAD,
1344 .pba = 20, 1377 .pba = 20,
1345 .get_invariants = e1000_get_invariants_82571, 1378 .get_variants = e1000_get_variants_82571,
1346 .mac_ops = &e82571_mac_ops, 1379 .mac_ops = &e82571_mac_ops,
1347 .phy_ops = &e82_phy_ops_m88, 1380 .phy_ops = &e82_phy_ops_m88,
1348 .nvm_ops = &e82571_nvm_ops, 1381 .nvm_ops = &e82571_nvm_ops,
diff --git a/drivers/net/e1000e/Makefile b/drivers/net/e1000e/Makefile
index 650f866e7ac2..360c91369f35 100644
--- a/drivers/net/e1000e/Makefile
+++ b/drivers/net/e1000e/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel PRO/1000 Linux driver 3# Intel PRO/1000 Linux driver
4# Copyright(c) 1999 - 2007 Intel Corporation. 4# Copyright(c) 1999 - 2008 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index a4f511f549f7..572cfd44397a 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -120,10 +120,10 @@
120#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ 120#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
121#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ 121#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
122#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ 122#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
123#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address 123/* Enable MAC address filtering */
124 * filtering */ 124#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
125#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host 125/* Enable MNG packets to host memory */
126 * memory */ 126#define E1000_MANC_EN_MNG2HOST 0x00200000
127 127
128/* Receive Control */ 128/* Receive Control */
129#define E1000_RCTL_EN 0x00000002 /* enable */ 129#define E1000_RCTL_EN 0x00000002 /* enable */
@@ -135,25 +135,26 @@
135#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ 135#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
136#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ 136#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
137#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ 137#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
138#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ 138#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */
139#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ 139#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
140#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ 140#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
141/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ 141/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
142#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ 142#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
143#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ 143#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */
144#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ 144#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */
145#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ 145#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
146/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ 146/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
147#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ 147#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */
148#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ 148#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */
149#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ 149#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */
150#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ 150#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
151#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ 151#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
152#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ 152#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
153#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ 153#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
154#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ 154#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
155 155
156/* Use byte values for the following shift parameters 156/*
157 * Use byte values for the following shift parameters
157 * Usage: 158 * Usage:
158 * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & 159 * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
159 * E1000_PSRCTL_BSIZE0_MASK) | 160 * E1000_PSRCTL_BSIZE0_MASK) |
@@ -206,7 +207,8 @@
206#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ 207#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
207#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ 208#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
208 209
209/* Bit definitions for the Management Data IO (MDIO) and Management Data 210/*
211 * Bit definitions for the Management Data IO (MDIO) and Management Data
210 * Clock (MDC) pins in the Device Control Register. 212 * Clock (MDC) pins in the Device Control Register.
211 */ 213 */
212 214
@@ -279,7 +281,7 @@
279#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ 281#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
280 282
281/* Transmit Control */ 283/* Transmit Control */
282#define E1000_TCTL_EN 0x00000002 /* enable tx */ 284#define E1000_TCTL_EN 0x00000002 /* enable Tx */
283#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ 285#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
284#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ 286#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
285#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ 287#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
@@ -337,8 +339,8 @@
337#define E1000_KABGTXD_BGSQLBIAS 0x00050000 339#define E1000_KABGTXD_BGSQLBIAS 0x00050000
338 340
339/* PBA constants */ 341/* PBA constants */
340#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ 342#define E1000_PBA_8K 0x0008 /* 8KB */
341#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ 343#define E1000_PBA_16K 0x0010 /* 16KB */
342 344
343#define E1000_PBS_16K E1000_PBA_16K 345#define E1000_PBS_16K E1000_PBA_16K
344 346
@@ -356,12 +358,13 @@
356/* Interrupt Cause Read */ 358/* Interrupt Cause Read */
357#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ 359#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
358#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ 360#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
359#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ 361#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
360#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ 362#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
361#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ 363#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
362#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ 364#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
363 365
364/* This defines the bits that are set in the Interrupt Mask 366/*
367 * This defines the bits that are set in the Interrupt Mask
365 * Set/Read Register. Each bit is documented below: 368 * Set/Read Register. Each bit is documented below:
366 * o RXT0 = Receiver Timer Interrupt (ring 0) 369 * o RXT0 = Receiver Timer Interrupt (ring 0)
367 * o TXDW = Transmit Descriptor Written Back 370 * o TXDW = Transmit Descriptor Written Back
@@ -379,21 +382,22 @@
379/* Interrupt Mask Set */ 382/* Interrupt Mask Set */
380#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 383#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
381#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ 384#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
382#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ 385#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
383#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 386#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
384#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ 387#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
385 388
386/* Interrupt Cause Set */ 389/* Interrupt Cause Set */
387#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ 390#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
388#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 391#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
392#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
389 393
390/* Transmit Descriptor Control */ 394/* Transmit Descriptor Control */
391#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ 395#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
392#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ 396#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
393#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ 397#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
394#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ 398#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
395#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. 399/* Enable the counting of desc. still to be processed. */
396 still to be processed. */ 400#define E1000_TXDCTL_COUNT_DESC 0x00400000
397 401
398/* Flow Control Constants */ 402/* Flow Control Constants */
399#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 403#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
@@ -404,7 +408,8 @@
404#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ 408#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
405 409
406/* Receive Address */ 410/* Receive Address */
407/* Number of high/low register pairs in the RAR. The RAR (Receive Address 411/*
412 * Number of high/low register pairs in the RAR. The RAR (Receive Address
408 * Registers) holds the directed and multicast addresses that we monitor. 413 * Registers) holds the directed and multicast addresses that we monitor.
409 * Technically, we have 16 spots. However, we reserve one of these spots 414 * Technically, we have 16 spots. However, we reserve one of these spots
410 * (RAR[15]) for our directed address used by controllers with 415 * (RAR[15]) for our directed address used by controllers with
@@ -533,8 +538,8 @@
533#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ 538#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
534#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ 539#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
535#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ 540#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
536#define E1000_EECD_ADDR_BITS 0x00000400 /* NVM Addressing bits based on type 541/* NVM Addressing bits based on type (0-small, 1-large) */
537 * (0-small, 1-large) */ 542#define E1000_EECD_ADDR_BITS 0x00000400
538#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ 543#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
539#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ 544#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
540#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ 545#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
@@ -626,7 +631,8 @@
626#define MAX_PHY_MULTI_PAGE_REG 0xF 631#define MAX_PHY_MULTI_PAGE_REG 0xF
627 632
628/* Bit definitions for valid PHY IDs. */ 633/* Bit definitions for valid PHY IDs. */
629/* I = Integrated 634/*
635 * I = Integrated
630 * E = External 636 * E = External
631 */ 637 */
632#define M88E1000_E_PHY_ID 0x01410C50 638#define M88E1000_E_PHY_ID 0x01410C50
@@ -653,37 +659,37 @@
653#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ 659#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
654 /* Manual MDI configuration */ 660 /* Manual MDI configuration */
655#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ 661#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
656#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, 662/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
657 * 100BASE-TX/10BASE-T: 663#define M88E1000_PSCR_AUTO_X_1000T 0x0040
658 * MDI Mode 664/* Auto crossover enabled all speeds */
659 */ 665#define M88E1000_PSCR_AUTO_X_MODE 0x0060
660#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled 666/*
661 * all speeds. 667 * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
662 */ 668 * 0=Normal 10BASE-T Rx Threshold
663 /* 1=Enable Extended 10BASE-T distance 669 */
664 * (Lower 10BASE-T RX Threshold) 670#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
665 * 0=Normal 10BASE-T RX Threshold */
666 /* 1=5-Bit interface in 100BASE-TX
667 * 0=MII interface in 100BASE-TX */
668#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
669 671
670/* M88E1000 PHY Specific Status Register */ 672/* M88E1000 PHY Specific Status Register */
671#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ 673#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
672#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ 674#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
673#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ 675#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
674#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; 676/* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */
675 * 3=110-140M;4=>140M */ 677#define M88E1000_PSSR_CABLE_LENGTH 0x0380
676#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ 678#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
677#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ 679#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
678 680
679#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 681#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
680 682
681/* Number of times we will attempt to autonegotiate before downshifting if we 683/*
682 * are the master */ 684 * Number of times we will attempt to autonegotiate before downshifting if we
685 * are the master
686 */
683#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 687#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
684#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 688#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
685/* Number of times we will attempt to autonegotiate before downshifting if we 689/*
686 * are the slave */ 690 * Number of times we will attempt to autonegotiate before downshifting if we
691 * are the slave
692 */
687#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 693#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
688#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 694#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
689#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ 695#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
@@ -692,7 +698,8 @@
692#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 698#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
693#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 699#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
694 700
695/* Bits... 701/*
702 * Bits...
696 * 15-5: page 703 * 15-5: page
697 * 4-0: register offset 704 * 4-0: register offset
698 */ 705 */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 327c0620da31..5a89dff52264 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -61,7 +61,7 @@ struct e1000_info;
61 ndev_printk(KERN_NOTICE , netdev, format, ## arg) 61 ndev_printk(KERN_NOTICE , netdev, format, ## arg)
62 62
63 63
64/* TX/RX descriptor defines */ 64/* Tx/Rx descriptor defines */
65#define E1000_DEFAULT_TXD 256 65#define E1000_DEFAULT_TXD 256
66#define E1000_MAX_TXD 4096 66#define E1000_MAX_TXD 4096
67#define E1000_MIN_TXD 80 67#define E1000_MIN_TXD 80
@@ -114,13 +114,13 @@ struct e1000_buffer {
114 dma_addr_t dma; 114 dma_addr_t dma;
115 struct sk_buff *skb; 115 struct sk_buff *skb;
116 union { 116 union {
117 /* TX */ 117 /* Tx */
118 struct { 118 struct {
119 unsigned long time_stamp; 119 unsigned long time_stamp;
120 u16 length; 120 u16 length;
121 u16 next_to_watch; 121 u16 next_to_watch;
122 }; 122 };
123 /* RX */ 123 /* Rx */
124 /* arrays of page information for packet split */ 124 /* arrays of page information for packet split */
125 struct e1000_ps_page *ps_pages; 125 struct e1000_ps_page *ps_pages;
126 }; 126 };
@@ -167,9 +167,6 @@ struct e1000_adapter {
167 167
168 spinlock_t tx_queue_lock; /* prevent concurrent tail updates */ 168 spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
169 169
170 /* this is still needed for 82571 and above */
171 atomic_t irq_sem;
172
173 /* track device up/down/testing state */ 170 /* track device up/down/testing state */
174 unsigned long state; 171 unsigned long state;
175 172
@@ -180,7 +177,7 @@ struct e1000_adapter {
180 u16 rx_itr; 177 u16 rx_itr;
181 178
182 /* 179 /*
183 * TX 180 * Tx
184 */ 181 */
185 struct e1000_ring *tx_ring /* One per active queue */ 182 struct e1000_ring *tx_ring /* One per active queue */
186 ____cacheline_aligned_in_smp; 183 ____cacheline_aligned_in_smp;
@@ -202,7 +199,7 @@ struct e1000_adapter {
202 unsigned int total_rx_bytes; 199 unsigned int total_rx_bytes;
203 unsigned int total_rx_packets; 200 unsigned int total_rx_packets;
204 201
205 /* TX stats */ 202 /* Tx stats */
206 u64 tpt_old; 203 u64 tpt_old;
207 u64 colc_old; 204 u64 colc_old;
208 u64 gotcl_old; 205 u64 gotcl_old;
@@ -214,7 +211,7 @@ struct e1000_adapter {
214 u32 tx_dma_failed; 211 u32 tx_dma_failed;
215 212
216 /* 213 /*
217 * RX 214 * Rx
218 */ 215 */
219 bool (*clean_rx) (struct e1000_adapter *adapter, 216 bool (*clean_rx) (struct e1000_adapter *adapter,
220 int *work_done, int work_to_do) 217 int *work_done, int work_to_do)
@@ -226,7 +223,7 @@ struct e1000_adapter {
226 u32 rx_int_delay; 223 u32 rx_int_delay;
227 u32 rx_abs_int_delay; 224 u32 rx_abs_int_delay;
228 225
229 /* RX stats */ 226 /* Rx stats */
230 u64 hw_csum_err; 227 u64 hw_csum_err;
231 u64 hw_csum_good; 228 u64 hw_csum_good;
232 u64 rx_hdr_split; 229 u64 rx_hdr_split;
@@ -237,6 +234,8 @@ struct e1000_adapter {
237 234
238 unsigned int rx_ps_pages; 235 unsigned int rx_ps_pages;
239 u16 rx_ps_bsize0; 236 u16 rx_ps_bsize0;
237 u32 max_frame_size;
238 u32 min_frame_size;
240 239
241 /* OS defined structs */ 240 /* OS defined structs */
242 struct net_device *netdev; 241 struct net_device *netdev;
@@ -261,7 +260,7 @@ struct e1000_adapter {
261 u32 wol; 260 u32 wol;
262 u32 pba; 261 u32 pba;
263 262
264 u8 fc_autoneg; 263 bool fc_autoneg;
265 264
266 unsigned long led_status; 265 unsigned long led_status;
267 266
@@ -272,7 +271,7 @@ struct e1000_info {
272 enum e1000_mac_type mac; 271 enum e1000_mac_type mac;
273 unsigned int flags; 272 unsigned int flags;
274 u32 pba; 273 u32 pba;
275 s32 (*get_invariants)(struct e1000_adapter *); 274 s32 (*get_variants)(struct e1000_adapter *);
276 struct e1000_mac_operations *mac_ops; 275 struct e1000_mac_operations *mac_ops;
277 struct e1000_phy_operations *phy_ops; 276 struct e1000_phy_operations *phy_ops;
278 struct e1000_nvm_operations *nvm_ops; 277 struct e1000_nvm_operations *nvm_ops;
@@ -308,6 +307,7 @@ struct e1000_info {
308#define FLAG_MSI_ENABLED (1 << 27) 307#define FLAG_MSI_ENABLED (1 << 27)
309#define FLAG_RX_CSUM_ENABLED (1 << 28) 308#define FLAG_RX_CSUM_ENABLED (1 << 28)
310#define FLAG_TSO_FORCE (1 << 29) 309#define FLAG_TSO_FORCE (1 << 29)
310#define FLAG_RX_RESTART_NOW (1 << 30)
311 311
312#define E1000_RX_DESC_PS(R, i) \ 312#define E1000_RX_DESC_PS(R, i) \
313 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 313 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -357,7 +357,7 @@ extern struct e1000_info e1000_ich8_info;
357extern struct e1000_info e1000_ich9_info; 357extern struct e1000_info e1000_ich9_info;
358extern struct e1000_info e1000_es2_info; 358extern struct e1000_info e1000_es2_info;
359 359
360extern s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num); 360extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
361 361
362extern s32 e1000e_commit_phy(struct e1000_hw *hw); 362extern s32 e1000e_commit_phy(struct e1000_hw *hw);
363 363
@@ -390,9 +390,11 @@ extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
390extern s32 e1000e_setup_link(struct e1000_hw *hw); 390extern s32 e1000e_setup_link(struct e1000_hw *hw);
391extern void e1000e_clear_vfta(struct e1000_hw *hw); 391extern void e1000e_clear_vfta(struct e1000_hw *hw);
392extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); 392extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
393extern void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw, 393extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
394 u8 *mc_addr_list, u32 mc_addr_count, 394 u8 *mc_addr_list,
395 u32 rar_used_count, u32 rar_count); 395 u32 mc_addr_count,
396 u32 rar_used_count,
397 u32 rar_count);
396extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index); 398extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
397extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); 399extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
398extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); 400extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
@@ -462,7 +464,6 @@ extern s32 e1000e_acquire_nvm(struct e1000_hw *hw);
462extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 464extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
463extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw); 465extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
464extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); 466extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
465extern s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
466extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 467extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
467extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); 468extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
468extern void e1000e_release_nvm(struct e1000_hw *hw); 469extern void e1000e_release_nvm(struct e1000_hw *hw);
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 88657adf965f..d59a99ae44be 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -92,7 +92,8 @@
92/* In-Band Control Register (Page 194, Register 18) */ 92/* In-Band Control Register (Page 194, Register 18) */
93#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ 93#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
94 94
95/* A table for the GG82563 cable length where the range is defined 95/*
96 * A table for the GG82563 cable length where the range is defined
96 * with a lower bound at "index" and the upper bound at 97 * with a lower bound at "index" and the upper bound at
97 * "index + 5". 98 * "index + 5".
98 */ 99 */
@@ -118,7 +119,7 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
118 struct e1000_phy_info *phy = &hw->phy; 119 struct e1000_phy_info *phy = &hw->phy;
119 s32 ret_val; 120 s32 ret_val;
120 121
121 if (hw->media_type != e1000_media_type_copper) { 122 if (hw->phy.media_type != e1000_media_type_copper) {
122 phy->type = e1000_phy_none; 123 phy->type = e1000_phy_none;
123 return 0; 124 return 0;
124 } 125 }
@@ -167,15 +168,20 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
167 break; 168 break;
168 } 169 }
169 170
170 nvm->type = e1000_nvm_eeprom_spi; 171 nvm->type = e1000_nvm_eeprom_spi;
171 172
172 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> 173 size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
173 E1000_EECD_SIZE_EX_SHIFT); 174 E1000_EECD_SIZE_EX_SHIFT);
174 175
175 /* Added to a constant, "size" becomes the left-shift value 176 /*
177 * Added to a constant, "size" becomes the left-shift value
176 * for setting word_size. 178 * for setting word_size.
177 */ 179 */
178 size += NVM_WORD_SIZE_BASE_SHIFT; 180 size += NVM_WORD_SIZE_BASE_SHIFT;
181
182 /* EEPROM access above 16k is unsupported */
183 if (size > 14)
184 size = 14;
179 nvm->word_size = 1 << size; 185 nvm->word_size = 1 << size;
180 186
181 return 0; 187 return 0;
@@ -196,10 +202,10 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
196 /* Set media type */ 202 /* Set media type */
197 switch (adapter->pdev->device) { 203 switch (adapter->pdev->device) {
198 case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: 204 case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
199 hw->media_type = e1000_media_type_internal_serdes; 205 hw->phy.media_type = e1000_media_type_internal_serdes;
200 break; 206 break;
201 default: 207 default:
202 hw->media_type = e1000_media_type_copper; 208 hw->phy.media_type = e1000_media_type_copper;
203 break; 209 break;
204 } 210 }
205 211
@@ -208,11 +214,10 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
208 /* Set rar entry count */ 214 /* Set rar entry count */
209 mac->rar_entry_count = E1000_RAR_ENTRIES; 215 mac->rar_entry_count = E1000_RAR_ENTRIES;
210 /* Set if manageability features are enabled. */ 216 /* Set if manageability features are enabled. */
211 mac->arc_subsystem_valid = 217 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
212 (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
213 218
214 /* check for link */ 219 /* check for link */
215 switch (hw->media_type) { 220 switch (hw->phy.media_type) {
216 case e1000_media_type_copper: 221 case e1000_media_type_copper:
217 func->setup_physical_interface = e1000_setup_copper_link_80003es2lan; 222 func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
218 func->check_for_link = e1000e_check_for_copper_link; 223 func->check_for_link = e1000e_check_for_copper_link;
@@ -233,7 +238,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
233 return 0; 238 return 0;
234} 239}
235 240
236static s32 e1000_get_invariants_80003es2lan(struct e1000_adapter *adapter) 241static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter)
237{ 242{
238 struct e1000_hw *hw = &adapter->hw; 243 struct e1000_hw *hw = &adapter->hw;
239 s32 rc; 244 s32 rc;
@@ -344,8 +349,10 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
344 if (!(swfw_sync & (fwmask | swmask))) 349 if (!(swfw_sync & (fwmask | swmask)))
345 break; 350 break;
346 351
347 /* Firmware currently using resource (fwmask) 352 /*
348 * or other software thread using resource (swmask) */ 353 * Firmware currently using resource (fwmask)
354 * or other software thread using resource (swmask)
355 */
349 e1000e_put_hw_semaphore(hw); 356 e1000e_put_hw_semaphore(hw);
350 mdelay(5); 357 mdelay(5);
351 i++; 358 i++;
@@ -407,7 +414,8 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
407 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) 414 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG)
408 page_select = GG82563_PHY_PAGE_SELECT; 415 page_select = GG82563_PHY_PAGE_SELECT;
409 else 416 else
410 /* Use Alternative Page Select register to access 417 /*
418 * Use Alternative Page Select register to access
411 * registers 30 and 31 419 * registers 30 and 31
412 */ 420 */
413 page_select = GG82563_PHY_PAGE_SELECT_ALT; 421 page_select = GG82563_PHY_PAGE_SELECT_ALT;
@@ -417,7 +425,8 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
417 if (ret_val) 425 if (ret_val)
418 return ret_val; 426 return ret_val;
419 427
420 /* The "ready" bit in the MDIC register may be incorrectly set 428 /*
429 * The "ready" bit in the MDIC register may be incorrectly set
421 * before the device has completed the "Page Select" MDI 430 * before the device has completed the "Page Select" MDI
422 * transaction. So we wait 200us after each MDI command... 431 * transaction. So we wait 200us after each MDI command...
423 */ 432 */
@@ -462,7 +471,8 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
462 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) 471 if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG)
463 page_select = GG82563_PHY_PAGE_SELECT; 472 page_select = GG82563_PHY_PAGE_SELECT;
464 else 473 else
465 /* Use Alternative Page Select register to access 474 /*
475 * Use Alternative Page Select register to access
466 * registers 30 and 31 476 * registers 30 and 31
467 */ 477 */
468 page_select = GG82563_PHY_PAGE_SELECT_ALT; 478 page_select = GG82563_PHY_PAGE_SELECT_ALT;
@@ -473,7 +483,8 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
473 return ret_val; 483 return ret_val;
474 484
475 485
476 /* The "ready" bit in the MDIC register may be incorrectly set 486 /*
487 * The "ready" bit in the MDIC register may be incorrectly set
477 * before the device has completed the "Page Select" MDI 488 * before the device has completed the "Page Select" MDI
478 * transaction. So we wait 200us after each MDI command... 489 * transaction. So we wait 200us after each MDI command...
479 */ 490 */
@@ -554,7 +565,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
554 u16 phy_data; 565 u16 phy_data;
555 bool link; 566 bool link;
556 567
557 /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI 568 /*
569 * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
558 * forced whenever speed and duplex are forced. 570 * forced whenever speed and duplex are forced.
559 */ 571 */
560 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 572 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -583,7 +595,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
583 595
584 udelay(1); 596 udelay(1);
585 597
586 if (hw->phy.wait_for_link) { 598 if (hw->phy.autoneg_wait_to_complete) {
587 hw_dbg(hw, "Waiting for forced speed/duplex link " 599 hw_dbg(hw, "Waiting for forced speed/duplex link "
588 "on GG82563 phy.\n"); 600 "on GG82563 phy.\n");
589 601
@@ -593,7 +605,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
593 return ret_val; 605 return ret_val;
594 606
595 if (!link) { 607 if (!link) {
596 /* We didn't get link. 608 /*
609 * We didn't get link.
597 * Reset the DSP and cross our fingers. 610 * Reset the DSP and cross our fingers.
598 */ 611 */
599 ret_val = e1000e_phy_reset_dsp(hw); 612 ret_val = e1000e_phy_reset_dsp(hw);
@@ -612,7 +625,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
612 if (ret_val) 625 if (ret_val)
613 return ret_val; 626 return ret_val;
614 627
615 /* Resetting the phy means we need to verify the TX_CLK corresponds 628 /*
629 * Resetting the phy means we need to verify the TX_CLK corresponds
616 * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. 630 * to the link speed. 10Mbps -> 2.5MHz, else 25MHz.
617 */ 631 */
618 phy_data &= ~GG82563_MSCR_TX_CLK_MASK; 632 phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
@@ -621,7 +635,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
621 else 635 else
622 phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; 636 phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
623 637
624 /* In addition, we must re-enable CRS on Tx for both half and full 638 /*
639 * In addition, we must re-enable CRS on Tx for both half and full
625 * duplex. 640 * duplex.
626 */ 641 */
627 phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; 642 phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
@@ -671,7 +686,7 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
671{ 686{
672 s32 ret_val; 687 s32 ret_val;
673 688
674 if (hw->media_type == e1000_media_type_copper) { 689 if (hw->phy.media_type == e1000_media_type_copper) {
675 ret_val = e1000e_get_speed_and_duplex_copper(hw, 690 ret_val = e1000e_get_speed_and_duplex_copper(hw,
676 speed, 691 speed,
677 duplex); 692 duplex);
@@ -704,7 +719,8 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
704 u32 icr; 719 u32 icr;
705 s32 ret_val; 720 s32 ret_val;
706 721
707 /* Prevent the PCI-E bus from sticking if there is no TLP connection 722 /*
723 * Prevent the PCI-E bus from sticking if there is no TLP connection
708 * on the last TLP read/write transaction when MAC is reset. 724 * on the last TLP read/write transaction when MAC is reset.
709 */ 725 */
710 ret_val = e1000e_disable_pcie_master(hw); 726 ret_val = e1000e_disable_pcie_master(hw);
@@ -776,16 +792,16 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
776 ret_val = e1000e_setup_link(hw); 792 ret_val = e1000e_setup_link(hw);
777 793
778 /* Set the transmit descriptor write-back policy */ 794 /* Set the transmit descriptor write-back policy */
779 reg_data = er32(TXDCTL); 795 reg_data = er32(TXDCTL(0));
780 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 796 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
781 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; 797 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
782 ew32(TXDCTL, reg_data); 798 ew32(TXDCTL(0), reg_data);
783 799
784 /* ...for both queues. */ 800 /* ...for both queues. */
785 reg_data = er32(TXDCTL1); 801 reg_data = er32(TXDCTL(1));
786 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) | 802 reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
787 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC; 803 E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
788 ew32(TXDCTL1, reg_data); 804 ew32(TXDCTL(1), reg_data);
789 805
790 /* Enable retransmit on late collisions */ 806 /* Enable retransmit on late collisions */
791 reg_data = er32(TCTL); 807 reg_data = er32(TCTL);
@@ -808,7 +824,8 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
808 reg_data &= ~0x00100000; 824 reg_data &= ~0x00100000;
809 E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); 825 E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
810 826
811 /* Clear all of the statistics registers (clear on read). It is 827 /*
828 * Clear all of the statistics registers (clear on read). It is
812 * important that we do this after we have tried to establish link 829 * important that we do this after we have tried to establish link
813 * because the symbol error count will increment wildly if there 830 * because the symbol error count will increment wildly if there
814 * is no link. 831 * is no link.
@@ -829,29 +846,29 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
829 u32 reg; 846 u32 reg;
830 847
831 /* Transmit Descriptor Control 0 */ 848 /* Transmit Descriptor Control 0 */
832 reg = er32(TXDCTL); 849 reg = er32(TXDCTL(0));
833 reg |= (1 << 22); 850 reg |= (1 << 22);
834 ew32(TXDCTL, reg); 851 ew32(TXDCTL(0), reg);
835 852
836 /* Transmit Descriptor Control 1 */ 853 /* Transmit Descriptor Control 1 */
837 reg = er32(TXDCTL1); 854 reg = er32(TXDCTL(1));
838 reg |= (1 << 22); 855 reg |= (1 << 22);
839 ew32(TXDCTL1, reg); 856 ew32(TXDCTL(1), reg);
840 857
841 /* Transmit Arbitration Control 0 */ 858 /* Transmit Arbitration Control 0 */
842 reg = er32(TARC0); 859 reg = er32(TARC(0));
843 reg &= ~(0xF << 27); /* 30:27 */ 860 reg &= ~(0xF << 27); /* 30:27 */
844 if (hw->media_type != e1000_media_type_copper) 861 if (hw->phy.media_type != e1000_media_type_copper)
845 reg &= ~(1 << 20); 862 reg &= ~(1 << 20);
846 ew32(TARC0, reg); 863 ew32(TARC(0), reg);
847 864
848 /* Transmit Arbitration Control 1 */ 865 /* Transmit Arbitration Control 1 */
849 reg = er32(TARC1); 866 reg = er32(TARC(1));
850 if (er32(TCTL) & E1000_TCTL_MULR) 867 if (er32(TCTL) & E1000_TCTL_MULR)
851 reg &= ~(1 << 28); 868 reg &= ~(1 << 28);
852 else 869 else
853 reg |= (1 << 28); 870 reg |= (1 << 28);
854 ew32(TARC1, reg); 871 ew32(TARC(1), reg);
855} 872}
856 873
857/** 874/**
@@ -881,7 +898,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
881 if (ret_val) 898 if (ret_val)
882 return ret_val; 899 return ret_val;
883 900
884 /* Options: 901 /*
902 * Options:
885 * MDI/MDI-X = 0 (default) 903 * MDI/MDI-X = 0 (default)
886 * 0 - Auto for all speeds 904 * 0 - Auto for all speeds
887 * 1 - MDI mode 905 * 1 - MDI mode
@@ -907,7 +925,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
907 break; 925 break;
908 } 926 }
909 927
910 /* Options: 928 /*
929 * Options:
911 * disable_polarity_correction = 0 (default) 930 * disable_polarity_correction = 0 (default)
912 * Automatic Correction for Reversed Cable Polarity 931 * Automatic Correction for Reversed Cable Polarity
913 * 0 - Disabled 932 * 0 - Disabled
@@ -928,10 +947,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
928 return ret_val; 947 return ret_val;
929 } 948 }
930 949
931 /* Bypass RX and TX FIFO's */ 950 /* Bypass Rx and Tx FIFO's */
932 ret_val = e1000e_write_kmrn_reg(hw, 951 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
933 E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL, 952 E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
934 E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
935 E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); 953 E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
936 if (ret_val) 954 if (ret_val)
937 return ret_val; 955 return ret_val;
@@ -953,7 +971,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
953 if (ret_val) 971 if (ret_val)
954 return ret_val; 972 return ret_val;
955 973
956 /* Do not init these registers when the HW is in IAMT mode, since the 974 /*
975 * Do not init these registers when the HW is in IAMT mode, since the
957 * firmware will have already initialized them. We only initialize 976 * firmware will have already initialized them. We only initialize
958 * them if the HW is not in IAMT mode. 977 * them if the HW is not in IAMT mode.
959 */ 978 */
@@ -974,7 +993,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
974 return ret_val; 993 return ret_val;
975 } 994 }
976 995
977 /* Workaround: Disable padding in Kumeran interface in the MAC 996 /*
997 * Workaround: Disable padding in Kumeran interface in the MAC
978 * and in the PHY to avoid CRC errors. 998 * and in the PHY to avoid CRC errors.
979 */ 999 */
980 ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); 1000 ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data);
@@ -1007,9 +1027,11 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
1007 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1027 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1008 ew32(CTRL, ctrl); 1028 ew32(CTRL, ctrl);
1009 1029
1010 /* Set the mac to wait the maximum time between each 1030 /*
1031 * Set the mac to wait the maximum time between each
1011 * iteration and increase the max iterations when 1032 * iteration and increase the max iterations when
1012 * polling the phy; this fixes erroneous timeouts at 10Mbps. */ 1033 * polling the phy; this fixes erroneous timeouts at 10Mbps.
1034 */
1013 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); 1035 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
1014 if (ret_val) 1036 if (ret_val)
1015 return ret_val; 1037 return ret_val;
@@ -1026,9 +1048,8 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
1026 if (ret_val) 1048 if (ret_val)
1027 return ret_val; 1049 return ret_val;
1028 reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; 1050 reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
1029 ret_val = e1000e_write_kmrn_reg(hw, 1051 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
1030 E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, 1052 reg_data);
1031 reg_data);
1032 if (ret_val) 1053 if (ret_val)
1033 return ret_val; 1054 return ret_val;
1034 1055
@@ -1056,9 +1077,8 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
1056 u16 reg_data; 1077 u16 reg_data;
1057 1078
1058 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; 1079 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
1059 ret_val = e1000e_write_kmrn_reg(hw, 1080 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
1060 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, 1081 reg_data);
1061 reg_data);
1062 if (ret_val) 1082 if (ret_val)
1063 return ret_val; 1083 return ret_val;
1064 1084
@@ -1096,9 +1116,8 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
1096 u32 tipg; 1116 u32 tipg;
1097 1117
1098 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; 1118 reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
1099 ret_val = e1000e_write_kmrn_reg(hw, 1119 ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
1100 E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, 1120 reg_data);
1101 reg_data);
1102 if (ret_val) 1121 if (ret_val)
1103 return ret_val; 1122 return ret_val;
1104 1123
@@ -1175,7 +1194,7 @@ static struct e1000_mac_operations es2_mac_ops = {
1175 .get_link_up_info = e1000_get_link_up_info_80003es2lan, 1194 .get_link_up_info = e1000_get_link_up_info_80003es2lan,
1176 .led_on = e1000e_led_on_generic, 1195 .led_on = e1000e_led_on_generic,
1177 .led_off = e1000e_led_off_generic, 1196 .led_off = e1000e_led_off_generic,
1178 .mc_addr_list_update = e1000e_mc_addr_list_update_generic, 1197 .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
1179 .reset_hw = e1000_reset_hw_80003es2lan, 1198 .reset_hw = e1000_reset_hw_80003es2lan,
1180 .init_hw = e1000_init_hw_80003es2lan, 1199 .init_hw = e1000_init_hw_80003es2lan,
1181 .setup_link = e1000e_setup_link, 1200 .setup_link = e1000e_setup_link,
@@ -1224,7 +1243,7 @@ struct e1000_info e1000_es2_info = {
1224 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ 1243 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
1225 | FLAG_TIPG_MEDIUM_FOR_80003ESLAN, 1244 | FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
1226 .pba = 38, 1245 .pba = 38,
1227 .get_invariants = e1000_get_invariants_80003es2lan, 1246 .get_variants = e1000_get_variants_80003es2lan,
1228 .mac_ops = &es2_mac_ops, 1247 .mac_ops = &es2_mac_ops,
1229 .phy_ops = &es2_phy_ops, 1248 .phy_ops = &es2_phy_ops,
1230 .nvm_ops = &es2_nvm_ops, 1249 .nvm_ops = &es2_nvm_ops,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index f77a7427d3a0..6d1b257bbda6 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -102,7 +102,7 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
102 "Interrupt test (offline)", "Loopback test (offline)", 102 "Interrupt test (offline)", "Loopback test (offline)",
103 "Link test (on/offline)" 103 "Link test (on/offline)"
104}; 104};
105#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) 105#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
106 106
107static int e1000_get_settings(struct net_device *netdev, 107static int e1000_get_settings(struct net_device *netdev,
108 struct ethtool_cmd *ecmd) 108 struct ethtool_cmd *ecmd)
@@ -111,7 +111,7 @@ static int e1000_get_settings(struct net_device *netdev,
111 struct e1000_hw *hw = &adapter->hw; 111 struct e1000_hw *hw = &adapter->hw;
112 u32 status; 112 u32 status;
113 113
114 if (hw->media_type == e1000_media_type_copper) { 114 if (hw->phy.media_type == e1000_media_type_copper) {
115 115
116 ecmd->supported = (SUPPORTED_10baseT_Half | 116 ecmd->supported = (SUPPORTED_10baseT_Half |
117 SUPPORTED_10baseT_Full | 117 SUPPORTED_10baseT_Full |
@@ -165,7 +165,7 @@ static int e1000_get_settings(struct net_device *netdev,
165 ecmd->duplex = -1; 165 ecmd->duplex = -1;
166 } 166 }
167 167
168 ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || 168 ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
169 hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; 169 hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
170 return 0; 170 return 0;
171} 171}
@@ -187,7 +187,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
187 mac->autoneg = 0; 187 mac->autoneg = 0;
188 188
189 /* Fiber NICs only allow 1000 gbps Full duplex */ 189 /* Fiber NICs only allow 1000 gbps Full duplex */
190 if ((adapter->hw.media_type == e1000_media_type_fiber) && 190 if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
191 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 191 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
192 ndev_err(adapter->netdev, "Unsupported Speed/Duplex " 192 ndev_err(adapter->netdev, "Unsupported Speed/Duplex "
193 "configuration\n"); 193 "configuration\n");
@@ -226,8 +226,10 @@ static int e1000_set_settings(struct net_device *netdev,
226 struct e1000_adapter *adapter = netdev_priv(netdev); 226 struct e1000_adapter *adapter = netdev_priv(netdev);
227 struct e1000_hw *hw = &adapter->hw; 227 struct e1000_hw *hw = &adapter->hw;
228 228
229 /* When SoL/IDER sessions are active, autoneg/speed/duplex 229 /*
230 * cannot be changed */ 230 * When SoL/IDER sessions are active, autoneg/speed/duplex
231 * cannot be changed
232 */
231 if (e1000_check_reset_block(hw)) { 233 if (e1000_check_reset_block(hw)) {
232 ndev_err(netdev, "Cannot change link " 234 ndev_err(netdev, "Cannot change link "
233 "characteristics when SoL/IDER is active.\n"); 235 "characteristics when SoL/IDER is active.\n");
@@ -239,7 +241,7 @@ static int e1000_set_settings(struct net_device *netdev,
239 241
240 if (ecmd->autoneg == AUTONEG_ENABLE) { 242 if (ecmd->autoneg == AUTONEG_ENABLE) {
241 hw->mac.autoneg = 1; 243 hw->mac.autoneg = 1;
242 if (hw->media_type == e1000_media_type_fiber) 244 if (hw->phy.media_type == e1000_media_type_fiber)
243 hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | 245 hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
244 ADVERTISED_FIBRE | 246 ADVERTISED_FIBRE |
245 ADVERTISED_Autoneg; 247 ADVERTISED_Autoneg;
@@ -248,6 +250,8 @@ static int e1000_set_settings(struct net_device *netdev,
248 ADVERTISED_TP | 250 ADVERTISED_TP |
249 ADVERTISED_Autoneg; 251 ADVERTISED_Autoneg;
250 ecmd->advertising = hw->phy.autoneg_advertised; 252 ecmd->advertising = hw->phy.autoneg_advertised;
253 if (adapter->fc_autoneg)
254 hw->fc.original_type = e1000_fc_default;
251 } else { 255 } else {
252 if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) { 256 if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
253 clear_bit(__E1000_RESETTING, &adapter->state); 257 clear_bit(__E1000_RESETTING, &adapter->state);
@@ -277,11 +281,11 @@ static void e1000_get_pauseparam(struct net_device *netdev,
277 pause->autoneg = 281 pause->autoneg =
278 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); 282 (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
279 283
280 if (hw->mac.fc == e1000_fc_rx_pause) { 284 if (hw->fc.type == e1000_fc_rx_pause) {
281 pause->rx_pause = 1; 285 pause->rx_pause = 1;
282 } else if (hw->mac.fc == e1000_fc_tx_pause) { 286 } else if (hw->fc.type == e1000_fc_tx_pause) {
283 pause->tx_pause = 1; 287 pause->tx_pause = 1;
284 } else if (hw->mac.fc == e1000_fc_full) { 288 } else if (hw->fc.type == e1000_fc_full) {
285 pause->rx_pause = 1; 289 pause->rx_pause = 1;
286 pause->tx_pause = 1; 290 pause->tx_pause = 1;
287 } 291 }
@@ -300,18 +304,18 @@ static int e1000_set_pauseparam(struct net_device *netdev,
300 msleep(1); 304 msleep(1);
301 305
302 if (pause->rx_pause && pause->tx_pause) 306 if (pause->rx_pause && pause->tx_pause)
303 hw->mac.fc = e1000_fc_full; 307 hw->fc.type = e1000_fc_full;
304 else if (pause->rx_pause && !pause->tx_pause) 308 else if (pause->rx_pause && !pause->tx_pause)
305 hw->mac.fc = e1000_fc_rx_pause; 309 hw->fc.type = e1000_fc_rx_pause;
306 else if (!pause->rx_pause && pause->tx_pause) 310 else if (!pause->rx_pause && pause->tx_pause)
307 hw->mac.fc = e1000_fc_tx_pause; 311 hw->fc.type = e1000_fc_tx_pause;
308 else if (!pause->rx_pause && !pause->tx_pause) 312 else if (!pause->rx_pause && !pause->tx_pause)
309 hw->mac.fc = e1000_fc_none; 313 hw->fc.type = e1000_fc_none;
310 314
311 hw->mac.original_fc = hw->mac.fc; 315 hw->fc.original_type = hw->fc.type;
312 316
313 if (adapter->fc_autoneg == AUTONEG_ENABLE) { 317 if (adapter->fc_autoneg == AUTONEG_ENABLE) {
314 hw->mac.fc = e1000_fc_default; 318 hw->fc.type = e1000_fc_default;
315 if (netif_running(adapter->netdev)) { 319 if (netif_running(adapter->netdev)) {
316 e1000e_down(adapter); 320 e1000e_down(adapter);
317 e1000e_up(adapter); 321 e1000e_up(adapter);
@@ -319,7 +323,7 @@ static int e1000_set_pauseparam(struct net_device *netdev,
319 e1000e_reset(adapter); 323 e1000e_reset(adapter);
320 } 324 }
321 } else { 325 } else {
322 retval = ((hw->media_type == e1000_media_type_fiber) ? 326 retval = ((hw->phy.media_type == e1000_media_type_fiber) ?
323 hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw)); 327 hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw));
324 } 328 }
325 329
@@ -558,8 +562,10 @@ static int e1000_set_eeprom(struct net_device *netdev,
558 ret_val = e1000_write_nvm(hw, first_word, 562 ret_val = e1000_write_nvm(hw, first_word,
559 last_word - first_word + 1, eeprom_buff); 563 last_word - first_word + 1, eeprom_buff);
560 564
561 /* Update the checksum over the first part of the EEPROM if needed 565 /*
562 * and flush shadow RAM for 82573 controllers */ 566 * Update the checksum over the first part of the EEPROM if needed
567 * and flush shadow RAM for 82573 controllers
568 */
563 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) || 569 if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) ||
564 (hw->mac.type == e1000_82573))) 570 (hw->mac.type == e1000_82573)))
565 e1000e_update_nvm_checksum(hw); 571 e1000e_update_nvm_checksum(hw);
@@ -578,8 +584,10 @@ static void e1000_get_drvinfo(struct net_device *netdev,
578 strncpy(drvinfo->driver, e1000e_driver_name, 32); 584 strncpy(drvinfo->driver, e1000e_driver_name, 32);
579 strncpy(drvinfo->version, e1000e_driver_version, 32); 585 strncpy(drvinfo->version, e1000e_driver_version, 32);
580 586
581 /* EEPROM image version # is reported as firmware version # for 587 /*
582 * PCI-E controllers */ 588 * EEPROM image version # is reported as firmware version # for
589 * PCI-E controllers
590 */
583 e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data); 591 e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data);
584 sprintf(firmware_version, "%d.%d-%d", 592 sprintf(firmware_version, "%d.%d-%d",
585 (eeprom_data & 0xF000) >> 12, 593 (eeprom_data & 0xF000) >> 12,
@@ -633,10 +641,17 @@ static int e1000_set_ringparam(struct net_device *netdev,
633 tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 641 tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
634 if (!tx_ring) 642 if (!tx_ring)
635 goto err_alloc_tx; 643 goto err_alloc_tx;
644 /*
645 * use a memcpy to save any previously configured
646 * items like napi structs from having to be
647 * reinitialized
648 */
649 memcpy(tx_ring, tx_old, sizeof(struct e1000_ring));
636 650
637 rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 651 rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
638 if (!rx_ring) 652 if (!rx_ring)
639 goto err_alloc_rx; 653 goto err_alloc_rx;
654 memcpy(rx_ring, rx_old, sizeof(struct e1000_ring));
640 655
641 adapter->tx_ring = tx_ring; 656 adapter->tx_ring = tx_ring;
642 adapter->rx_ring = rx_ring; 657 adapter->rx_ring = rx_ring;
@@ -658,8 +673,10 @@ static int e1000_set_ringparam(struct net_device *netdev,
658 if (err) 673 if (err)
659 goto err_setup_tx; 674 goto err_setup_tx;
660 675
661 /* save the new, restore the old in order to free it, 676 /*
662 * then restore the new back again */ 677 * restore the old in order to free it,
678 * then add in the new
679 */
663 adapter->rx_ring = rx_old; 680 adapter->rx_ring = rx_old;
664 adapter->tx_ring = tx_old; 681 adapter->tx_ring = tx_old;
665 e1000e_free_rx_resources(adapter); 682 e1000e_free_rx_resources(adapter);
@@ -690,61 +707,55 @@ err_setup:
690 return err; 707 return err;
691} 708}
692 709
693static bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data, 710static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
694 int reg, int offset, u32 mask, u32 write) 711 int reg, int offset, u32 mask, u32 write)
695{ 712{
696 int i; 713 u32 pat, val;
697 u32 read;
698 static const u32 test[] = 714 static const u32 test[] =
699 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 715 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
700 for (i = 0; i < ARRAY_SIZE(test); i++) { 716 for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
701 E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, 717 E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
702 (test[i] & write)); 718 (test[pat] & write));
703 read = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); 719 val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
704 if (read != (test[i] & write & mask)) { 720 if (val != (test[pat] & write & mask)) {
705 ndev_err(adapter->netdev, "pattern test reg %04X " 721 ndev_err(adapter->netdev, "pattern test reg %04X "
706 "failed: got 0x%08X expected 0x%08X\n", 722 "failed: got 0x%08X expected 0x%08X\n",
707 reg + offset, 723 reg + offset,
708 read, (test[i] & write & mask)); 724 val, (test[pat] & write & mask));
709 *data = reg; 725 *data = reg;
710 return true; 726 return 1;
711 } 727 }
712 } 728 }
713 return false; 729 return 0;
714} 730}
715 731
716static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, 732static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
717 int reg, u32 mask, u32 write) 733 int reg, u32 mask, u32 write)
718{ 734{
719 u32 read; 735 u32 val;
720 __ew32(&adapter->hw, reg, write & mask); 736 __ew32(&adapter->hw, reg, write & mask);
721 read = __er32(&adapter->hw, reg); 737 val = __er32(&adapter->hw, reg);
722 if ((write & mask) != (read & mask)) { 738 if ((write & mask) != (val & mask)) {
723 ndev_err(adapter->netdev, "set/check reg %04X test failed: " 739 ndev_err(adapter->netdev, "set/check reg %04X test failed: "
724 "got 0x%08X expected 0x%08X\n", reg, (read & mask), 740 "got 0x%08X expected 0x%08X\n", reg, (val & mask),
725 (write & mask)); 741 (write & mask));
726 *data = reg; 742 *data = reg;
727 return true; 743 return 1;
728 } 744 }
729 return false; 745 return 0;
730} 746}
731 747#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
732#define REG_PATTERN_TEST(R, M, W) \ 748 do { \
733 do { \ 749 if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
734 if (reg_pattern_test_array(adapter, data, R, 0, M, W)) \ 750 return 1; \
735 return 1; \
736 } while (0) 751 } while (0)
752#define REG_PATTERN_TEST(reg, mask, write) \
753 REG_PATTERN_TEST_ARRAY(reg, 0, mask, write)
737 754
738#define REG_PATTERN_TEST_ARRAY(R, offset, M, W) \ 755#define REG_SET_AND_CHECK(reg, mask, write) \
739 do { \ 756 do { \
740 if (reg_pattern_test_array(adapter, data, R, offset, M, W)) \ 757 if (reg_set_and_check(adapter, data, reg, mask, write)) \
741 return 1; \ 758 return 1; \
742 } while (0)
743
744#define REG_SET_AND_CHECK(R, M, W) \
745 do { \
746 if (reg_set_and_check(adapter, data, R, M, W)) \
747 return 1; \
748 } while (0) 759 } while (0)
749 760
750static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) 761static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
@@ -758,7 +769,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
758 u32 i; 769 u32 i;
759 u32 toggle; 770 u32 toggle;
760 771
761 /* The status register is Read Only, so a write should fail. 772 /*
773 * The status register is Read Only, so a write should fail.
762 * Some bits that get toggled are ignored. 774 * Some bits that get toggled are ignored.
763 */ 775 */
764 switch (mac->type) { 776 switch (mac->type) {
@@ -908,7 +920,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
908 mask = 1 << i; 920 mask = 1 << i;
909 921
910 if (!shared_int) { 922 if (!shared_int) {
911 /* Disable the interrupt to be reported in 923 /*
924 * Disable the interrupt to be reported in
912 * the cause register and then force the same 925 * the cause register and then force the same
913 * interrupt and see if one gets posted. If 926 * interrupt and see if one gets posted. If
914 * an interrupt was posted to the bus, the 927 * an interrupt was posted to the bus, the
@@ -925,7 +938,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
925 } 938 }
926 } 939 }
927 940
928 /* Enable the interrupt to be reported in 941 /*
942 * Enable the interrupt to be reported in
929 * the cause register and then force the same 943 * the cause register and then force the same
930 * interrupt and see if one gets posted. If 944 * interrupt and see if one gets posted. If
931 * an interrupt was not posted to the bus, the 945 * an interrupt was not posted to the bus, the
@@ -942,7 +956,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
942 } 956 }
943 957
944 if (!shared_int) { 958 if (!shared_int) {
945 /* Disable the other interrupts to be reported in 959 /*
960 * Disable the other interrupts to be reported in
946 * the cause register and then force the other 961 * the cause register and then force the other
947 * interrupts and see if any get posted. If 962 * interrupts and see if any get posted. If
948 * an interrupt was posted to the bus, the 963 * an interrupt was posted to the bus, the
@@ -1024,7 +1039,6 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1024 struct pci_dev *pdev = adapter->pdev; 1039 struct pci_dev *pdev = adapter->pdev;
1025 struct e1000_hw *hw = &adapter->hw; 1040 struct e1000_hw *hw = &adapter->hw;
1026 u32 rctl; 1041 u32 rctl;
1027 int size;
1028 int i; 1042 int i;
1029 int ret_val; 1043 int ret_val;
1030 1044
@@ -1033,13 +1047,13 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1033 if (!tx_ring->count) 1047 if (!tx_ring->count)
1034 tx_ring->count = E1000_DEFAULT_TXD; 1048 tx_ring->count = E1000_DEFAULT_TXD;
1035 1049
1036 size = tx_ring->count * sizeof(struct e1000_buffer); 1050 tx_ring->buffer_info = kcalloc(tx_ring->count,
1037 tx_ring->buffer_info = kmalloc(size, GFP_KERNEL); 1051 sizeof(struct e1000_buffer),
1038 if (!tx_ring->buffer_info) { 1052 GFP_KERNEL);
1053 if (!(tx_ring->buffer_info)) {
1039 ret_val = 1; 1054 ret_val = 1;
1040 goto err_nomem; 1055 goto err_nomem;
1041 } 1056 }
1042 memset(tx_ring->buffer_info, 0, size);
1043 1057
1044 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 1058 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
1045 tx_ring->size = ALIGN(tx_ring->size, 4096); 1059 tx_ring->size = ALIGN(tx_ring->size, 4096);
@@ -1049,21 +1063,17 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1049 ret_val = 2; 1063 ret_val = 2;
1050 goto err_nomem; 1064 goto err_nomem;
1051 } 1065 }
1052 memset(tx_ring->desc, 0, tx_ring->size);
1053 tx_ring->next_to_use = 0; 1066 tx_ring->next_to_use = 0;
1054 tx_ring->next_to_clean = 0; 1067 tx_ring->next_to_clean = 0;
1055 1068
1056 ew32(TDBAL, 1069 ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1057 ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1058 ew32(TDBAH, ((u64) tx_ring->dma >> 32)); 1070 ew32(TDBAH, ((u64) tx_ring->dma >> 32));
1059 ew32(TDLEN, 1071 ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc));
1060 tx_ring->count * sizeof(struct e1000_tx_desc));
1061 ew32(TDH, 0); 1072 ew32(TDH, 0);
1062 ew32(TDT, 0); 1073 ew32(TDT, 0);
1063 ew32(TCTL, 1074 ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
1064 E1000_TCTL_PSP | E1000_TCTL_EN | 1075 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1065 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | 1076 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1066 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1067 1077
1068 for (i = 0; i < tx_ring->count; i++) { 1078 for (i = 0; i < tx_ring->count; i++) {
1069 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); 1079 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
@@ -1085,12 +1095,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1085 ret_val = 4; 1095 ret_val = 4;
1086 goto err_nomem; 1096 goto err_nomem;
1087 } 1097 }
1088 tx_desc->buffer_addr = cpu_to_le64( 1098 tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
1089 tx_ring->buffer_info[i].dma);
1090 tx_desc->lower.data = cpu_to_le32(skb->len); 1099 tx_desc->lower.data = cpu_to_le32(skb->len);
1091 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | 1100 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
1092 E1000_TXD_CMD_IFCS | 1101 E1000_TXD_CMD_IFCS |
1093 E1000_TXD_CMD_RPS); 1102 E1000_TXD_CMD_RS);
1094 tx_desc->upper.data = 0; 1103 tx_desc->upper.data = 0;
1095 } 1104 }
1096 1105
@@ -1099,13 +1108,13 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1099 if (!rx_ring->count) 1108 if (!rx_ring->count)
1100 rx_ring->count = E1000_DEFAULT_RXD; 1109 rx_ring->count = E1000_DEFAULT_RXD;
1101 1110
1102 size = rx_ring->count * sizeof(struct e1000_buffer); 1111 rx_ring->buffer_info = kcalloc(rx_ring->count,
1103 rx_ring->buffer_info = kmalloc(size, GFP_KERNEL); 1112 sizeof(struct e1000_buffer),
1104 if (!rx_ring->buffer_info) { 1113 GFP_KERNEL);
1114 if (!(rx_ring->buffer_info)) {
1105 ret_val = 5; 1115 ret_val = 5;
1106 goto err_nomem; 1116 goto err_nomem;
1107 } 1117 }
1108 memset(rx_ring->buffer_info, 0, size);
1109 1118
1110 rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); 1119 rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
1111 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 1120 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
@@ -1114,7 +1123,6 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1114 ret_val = 6; 1123 ret_val = 6;
1115 goto err_nomem; 1124 goto err_nomem;
1116 } 1125 }
1117 memset(rx_ring->desc, 0, rx_ring->size);
1118 rx_ring->next_to_use = 0; 1126 rx_ring->next_to_use = 0;
1119 rx_ring->next_to_clean = 0; 1127 rx_ring->next_to_clean = 0;
1120 1128
@@ -1126,6 +1134,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1126 ew32(RDH, 0); 1134 ew32(RDH, 0);
1127 ew32(RDT, 0); 1135 ew32(RDT, 0);
1128 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | 1136 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
1137 E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
1138 E1000_RCTL_SBP | E1000_RCTL_SECRC |
1129 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | 1139 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1130 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 1140 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1131 ew32(RCTL, rctl); 1141 ew32(RCTL, rctl);
@@ -1175,21 +1185,22 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1175 u32 ctrl_reg = 0; 1185 u32 ctrl_reg = 0;
1176 u32 stat_reg = 0; 1186 u32 stat_reg = 0;
1177 1187
1178 adapter->hw.mac.autoneg = 0; 1188 hw->mac.autoneg = 0;
1179 1189
1180 if (adapter->hw.phy.type == e1000_phy_m88) { 1190 if (hw->phy.type == e1000_phy_m88) {
1181 /* Auto-MDI/MDIX Off */ 1191 /* Auto-MDI/MDIX Off */
1182 e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); 1192 e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
1183 /* reset to update Auto-MDI/MDIX */ 1193 /* reset to update Auto-MDI/MDIX */
1184 e1e_wphy(hw, PHY_CONTROL, 0x9140); 1194 e1e_wphy(hw, PHY_CONTROL, 0x9140);
1185 /* autoneg off */ 1195 /* autoneg off */
1186 e1e_wphy(hw, PHY_CONTROL, 0x8140); 1196 e1e_wphy(hw, PHY_CONTROL, 0x8140);
1187 } else if (adapter->hw.phy.type == e1000_phy_gg82563) 1197 } else if (hw->phy.type == e1000_phy_gg82563)
1188 e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); 1198 e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
1189 1199
1190 ctrl_reg = er32(CTRL); 1200 ctrl_reg = er32(CTRL);
1191 1201
1192 if (adapter->hw.phy.type == e1000_phy_ife) { 1202 switch (hw->phy.type) {
1203 case e1000_phy_ife:
1193 /* force 100, set loopback */ 1204 /* force 100, set loopback */
1194 e1e_wphy(hw, PHY_CONTROL, 0x6100); 1205 e1e_wphy(hw, PHY_CONTROL, 0x6100);
1195 1206
@@ -1199,9 +1210,11 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1199 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 1210 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1200 E1000_CTRL_SPD_100 |/* Force Speed to 100 */ 1211 E1000_CTRL_SPD_100 |/* Force Speed to 100 */
1201 E1000_CTRL_FD); /* Force Duplex to FULL */ 1212 E1000_CTRL_FD); /* Force Duplex to FULL */
1202 } else { 1213 break;
1214 default:
1203 /* force 1000, set loopback */ 1215 /* force 1000, set loopback */
1204 e1e_wphy(hw, PHY_CONTROL, 0x4140); 1216 e1e_wphy(hw, PHY_CONTROL, 0x4140);
1217 mdelay(250);
1205 1218
1206 /* Now set up the MAC to the same speed/duplex as the PHY. */ 1219 /* Now set up the MAC to the same speed/duplex as the PHY. */
1207 ctrl_reg = er32(CTRL); 1220 ctrl_reg = er32(CTRL);
@@ -1210,14 +1223,20 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1210 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ 1223 E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1211 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ 1224 E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1212 E1000_CTRL_FD); /* Force Duplex to FULL */ 1225 E1000_CTRL_FD); /* Force Duplex to FULL */
1226
1227 if ((adapter->hw.mac.type == e1000_ich8lan) ||
1228 (adapter->hw.mac.type == e1000_ich9lan))
1229 ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */
1213 } 1230 }
1214 1231
1215 if (adapter->hw.media_type == e1000_media_type_copper && 1232 if (hw->phy.media_type == e1000_media_type_copper &&
1216 adapter->hw.phy.type == e1000_phy_m88) { 1233 hw->phy.type == e1000_phy_m88) {
1217 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ 1234 ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1218 } else { 1235 } else {
1219 /* Set the ILOS bit on the fiber Nic if half duplex link is 1236 /*
1220 * detected. */ 1237 * Set the ILOS bit on the fiber Nic if half duplex link is
1238 * detected.
1239 */
1221 stat_reg = er32(STATUS); 1240 stat_reg = er32(STATUS);
1222 if ((stat_reg & E1000_STATUS_FD) == 0) 1241 if ((stat_reg & E1000_STATUS_FD) == 0)
1223 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); 1242 ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
@@ -1225,10 +1244,11 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1225 1244
1226 ew32(CTRL, ctrl_reg); 1245 ew32(CTRL, ctrl_reg);
1227 1246
1228 /* Disable the receiver on the PHY so when a cable is plugged in, the 1247 /*
1248 * Disable the receiver on the PHY so when a cable is plugged in, the
1229 * PHY does not begin to autoneg when a cable is reconnected to the NIC. 1249 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1230 */ 1250 */
1231 if (adapter->hw.phy.type == e1000_phy_m88) 1251 if (hw->phy.type == e1000_phy_m88)
1232 e1000_phy_disable_receiver(adapter); 1252 e1000_phy_disable_receiver(adapter);
1233 1253
1234 udelay(500); 1254 udelay(500);
@@ -1244,8 +1264,10 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
1244 1264
1245 /* special requirements for 82571/82572 fiber adapters */ 1265 /* special requirements for 82571/82572 fiber adapters */
1246 1266
1247 /* jump through hoops to make sure link is up because serdes 1267 /*
1248 * link is hardwired up */ 1268 * jump through hoops to make sure link is up because serdes
1269 * link is hardwired up
1270 */
1249 ctrl |= E1000_CTRL_SLU; 1271 ctrl |= E1000_CTRL_SLU;
1250 ew32(CTRL, ctrl); 1272 ew32(CTRL, ctrl);
1251 1273
@@ -1263,8 +1285,10 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
1263 ew32(CTRL, ctrl); 1285 ew32(CTRL, ctrl);
1264 } 1286 }
1265 1287
1266 /* special write to serdes control register to enable SerDes analog 1288 /*
1267 * loopback */ 1289 * special write to serdes control register to enable SerDes analog
1290 * loopback
1291 */
1268#define E1000_SERDES_LB_ON 0x410 1292#define E1000_SERDES_LB_ON 0x410
1269 ew32(SCTL, E1000_SERDES_LB_ON); 1293 ew32(SCTL, E1000_SERDES_LB_ON);
1270 msleep(10); 1294 msleep(10);
@@ -1279,8 +1303,10 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
1279 u32 ctrlext = er32(CTRL_EXT); 1303 u32 ctrlext = er32(CTRL_EXT);
1280 u32 ctrl = er32(CTRL); 1304 u32 ctrl = er32(CTRL);
1281 1305
1282 /* save CTRL_EXT to restore later, reuse an empty variable (unused 1306 /*
1283 on mac_type 80003es2lan) */ 1307 * save CTRL_EXT to restore later, reuse an empty variable (unused
1308 * on mac_type 80003es2lan)
1309 */
1284 adapter->tx_fifo_head = ctrlext; 1310 adapter->tx_fifo_head = ctrlext;
1285 1311
1286 /* clear the serdes mode bits, putting the device into mac loopback */ 1312 /* clear the serdes mode bits, putting the device into mac loopback */
@@ -1302,7 +1328,7 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
1302#define KMRNCTRLSTA_OPMODE (0x1F << 16) 1328#define KMRNCTRLSTA_OPMODE (0x1F << 16)
1303#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582 1329#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582
1304 ew32(KMRNCTRLSTA, 1330 ew32(KMRNCTRLSTA,
1305 (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); 1331 (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII));
1306 1332
1307 return 0; 1333 return 0;
1308} 1334}
@@ -1312,8 +1338,8 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
1312 struct e1000_hw *hw = &adapter->hw; 1338 struct e1000_hw *hw = &adapter->hw;
1313 u32 rctl; 1339 u32 rctl;
1314 1340
1315 if (hw->media_type == e1000_media_type_fiber || 1341 if (hw->phy.media_type == e1000_media_type_fiber ||
1316 hw->media_type == e1000_media_type_internal_serdes) { 1342 hw->phy.media_type == e1000_media_type_internal_serdes) {
1317 switch (hw->mac.type) { 1343 switch (hw->mac.type) {
1318 case e1000_80003es2lan: 1344 case e1000_80003es2lan:
1319 return e1000_set_es2lan_mac_loopback(adapter); 1345 return e1000_set_es2lan_mac_loopback(adapter);
@@ -1328,7 +1354,7 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
1328 ew32(RCTL, rctl); 1354 ew32(RCTL, rctl);
1329 return 0; 1355 return 0;
1330 } 1356 }
1331 } else if (hw->media_type == e1000_media_type_copper) { 1357 } else if (hw->phy.media_type == e1000_media_type_copper) {
1332 return e1000_integrated_phy_loopback(adapter); 1358 return e1000_integrated_phy_loopback(adapter);
1333 } 1359 }
1334 1360
@@ -1347,18 +1373,17 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
1347 1373
1348 switch (hw->mac.type) { 1374 switch (hw->mac.type) {
1349 case e1000_80003es2lan: 1375 case e1000_80003es2lan:
1350 if (hw->media_type == e1000_media_type_fiber || 1376 if (hw->phy.media_type == e1000_media_type_fiber ||
1351 hw->media_type == e1000_media_type_internal_serdes) { 1377 hw->phy.media_type == e1000_media_type_internal_serdes) {
1352 /* restore CTRL_EXT, stealing space from tx_fifo_head */ 1378 /* restore CTRL_EXT, stealing space from tx_fifo_head */
1353 ew32(CTRL_EXT, 1379 ew32(CTRL_EXT, adapter->tx_fifo_head);
1354 adapter->tx_fifo_head);
1355 adapter->tx_fifo_head = 0; 1380 adapter->tx_fifo_head = 0;
1356 } 1381 }
1357 /* fall through */ 1382 /* fall through */
1358 case e1000_82571: 1383 case e1000_82571:
1359 case e1000_82572: 1384 case e1000_82572:
1360 if (hw->media_type == e1000_media_type_fiber || 1385 if (hw->phy.media_type == e1000_media_type_fiber ||
1361 hw->media_type == e1000_media_type_internal_serdes) { 1386 hw->phy.media_type == e1000_media_type_internal_serdes) {
1362#define E1000_SERDES_LB_OFF 0x400 1387#define E1000_SERDES_LB_OFF 0x400
1363 ew32(SCTL, E1000_SERDES_LB_OFF); 1388 ew32(SCTL, E1000_SERDES_LB_OFF);
1364 msleep(10); 1389 msleep(10);
@@ -1414,7 +1439,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1414 1439
1415 ew32(RDT, rx_ring->count - 1); 1440 ew32(RDT, rx_ring->count - 1);
1416 1441
1417 /* Calculate the loop count based on the largest descriptor ring 1442 /*
1443 * Calculate the loop count based on the largest descriptor ring
1418 * The idea is to wrap the largest ring a number of times using 64 1444 * The idea is to wrap the largest ring a number of times using 64
1419 * send/receive pairs during each loop 1445 * send/receive pairs during each loop
1420 */ 1446 */
@@ -1428,8 +1454,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1428 l = 0; 1454 l = 0;
1429 for (j = 0; j <= lc; j++) { /* loop count loop */ 1455 for (j = 0; j <= lc; j++) { /* loop count loop */
1430 for (i = 0; i < 64; i++) { /* send the packets */ 1456 for (i = 0; i < 64; i++) { /* send the packets */
1431 e1000_create_lbtest_frame( 1457 e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1432 tx_ring->buffer_info[i].skb, 1024); 1458 1024);
1433 pci_dma_sync_single_for_device(pdev, 1459 pci_dma_sync_single_for_device(pdev,
1434 tx_ring->buffer_info[k].dma, 1460 tx_ring->buffer_info[k].dma,
1435 tx_ring->buffer_info[k].length, 1461 tx_ring->buffer_info[k].length,
@@ -1454,7 +1480,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1454 l++; 1480 l++;
1455 if (l == rx_ring->count) 1481 if (l == rx_ring->count)
1456 l = 0; 1482 l = 0;
1457 /* time + 20 msecs (200 msecs on 2.4) is more than 1483 /*
1484 * time + 20 msecs (200 msecs on 2.4) is more than
1458 * enough time to complete the receives, if it's 1485 * enough time to complete the receives, if it's
1459 * exceeded, break and error off 1486 * exceeded, break and error off
1460 */ 1487 */
@@ -1463,7 +1490,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1463 ret_val = 13; /* ret_val is the same as mis-compare */ 1490 ret_val = 13; /* ret_val is the same as mis-compare */
1464 break; 1491 break;
1465 } 1492 }
1466 if (jiffies >= (time + 2)) { 1493 if (jiffies >= (time + 20)) {
1467 ret_val = 14; /* error code for time out error */ 1494 ret_val = 14; /* error code for time out error */
1468 break; 1495 break;
1469 } 1496 }
@@ -1473,8 +1500,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1473 1500
1474static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) 1501static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1475{ 1502{
1476 /* PHY loopback cannot be performed if SoL/IDER 1503 /*
1477 * sessions are active */ 1504 * PHY loopback cannot be performed if SoL/IDER
1505 * sessions are active
1506 */
1478 if (e1000_check_reset_block(&adapter->hw)) { 1507 if (e1000_check_reset_block(&adapter->hw)) {
1479 ndev_err(adapter->netdev, "Cannot do PHY loopback test " 1508 ndev_err(adapter->netdev, "Cannot do PHY loopback test "
1480 "when SoL/IDER is active.\n"); 1509 "when SoL/IDER is active.\n");
@@ -1504,12 +1533,14 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1504 struct e1000_hw *hw = &adapter->hw; 1533 struct e1000_hw *hw = &adapter->hw;
1505 1534
1506 *data = 0; 1535 *data = 0;
1507 if (hw->media_type == e1000_media_type_internal_serdes) { 1536 if (hw->phy.media_type == e1000_media_type_internal_serdes) {
1508 int i = 0; 1537 int i = 0;
1509 hw->mac.serdes_has_link = 0; 1538 hw->mac.serdes_has_link = 0;
1510 1539
1511 /* On some blade server designs, link establishment 1540 /*
1512 * could take as long as 2-3 minutes */ 1541 * On some blade server designs, link establishment
1542 * could take as long as 2-3 minutes
1543 */
1513 do { 1544 do {
1514 hw->mac.ops.check_for_link(hw); 1545 hw->mac.ops.check_for_link(hw);
1515 if (hw->mac.serdes_has_link) 1546 if (hw->mac.serdes_has_link)
@@ -1562,8 +1593,10 @@ static void e1000_diag_test(struct net_device *netdev,
1562 1593
1563 ndev_info(netdev, "offline testing starting\n"); 1594 ndev_info(netdev, "offline testing starting\n");
1564 1595
1565 /* Link test performed before hardware reset so autoneg doesn't 1596 /*
1566 * interfere with test result */ 1597 * Link test performed before hardware reset so autoneg doesn't
1598 * interfere with test result
1599 */
1567 if (e1000_link_test(adapter, &data[4])) 1600 if (e1000_link_test(adapter, &data[4]))
1568 eth_test->flags |= ETH_TEST_FL_FAILED; 1601 eth_test->flags |= ETH_TEST_FL_FAILED;
1569 1602
@@ -1596,9 +1629,9 @@ static void e1000_diag_test(struct net_device *netdev,
1596 adapter->hw.mac.autoneg = autoneg; 1629 adapter->hw.mac.autoneg = autoneg;
1597 1630
1598 /* force this routine to wait until autoneg complete/timeout */ 1631 /* force this routine to wait until autoneg complete/timeout */
1599 adapter->hw.phy.wait_for_link = 1; 1632 adapter->hw.phy.autoneg_wait_to_complete = 1;
1600 e1000e_reset(adapter); 1633 e1000e_reset(adapter);
1601 adapter->hw.phy.wait_for_link = 0; 1634 adapter->hw.phy.autoneg_wait_to_complete = 0;
1602 1635
1603 clear_bit(__E1000_TESTING, &adapter->state); 1636 clear_bit(__E1000_TESTING, &adapter->state);
1604 if (if_running) 1637 if (if_running)
@@ -1768,8 +1801,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
1768 1801
1769 switch (stringset) { 1802 switch (stringset) {
1770 case ETH_SS_TEST: 1803 case ETH_SS_TEST:
1771 memcpy(data, *e1000_gstrings_test, 1804 memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test));
1772 sizeof(e1000_gstrings_test));
1773 break; 1805 break;
1774 case ETH_SS_STATS: 1806 case ETH_SS_STATS:
1775 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1807 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 916025b30fc3..53f1ac6327fa 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -66,14 +66,14 @@ enum e1e_registers {
66 E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */ 66 E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */
67 E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */ 67 E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */
68 E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */ 68 E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */
69 E1000_RCTL = 0x00100, /* RX Control - RW */ 69 E1000_RCTL = 0x00100, /* Rx Control - RW */
70 E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */ 70 E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */
71 E1000_TXCW = 0x00178, /* TX Configuration Word - RW */ 71 E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */
72 E1000_RXCW = 0x00180, /* RX Configuration Word - RO */ 72 E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */
73 E1000_TCTL = 0x00400, /* TX Control - RW */ 73 E1000_TCTL = 0x00400, /* Tx Control - RW */
74 E1000_TCTL_EXT = 0x00404, /* Extended TX Control - RW */ 74 E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */
75 E1000_TIPG = 0x00410, /* TX Inter-packet gap -RW */ 75 E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */
76 E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle - RW */ 76 E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */
77 E1000_LEDCTL = 0x00E00, /* LED Control - RW */ 77 E1000_LEDCTL = 0x00E00, /* LED Control - RW */
78 E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ 78 E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
79 E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ 79 E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
@@ -87,12 +87,14 @@ enum e1e_registers {
87 E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */ 87 E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
88 E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */ 88 E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
89 E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */ 89 E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
90 E1000_RDBAL = 0x02800, /* RX Descriptor Base Address Low - RW */ 90 E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */
91 E1000_RDBAH = 0x02804, /* RX Descriptor Base Address High - RW */ 91 E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */
92 E1000_RDLEN = 0x02808, /* RX Descriptor Length - RW */ 92 E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */
93 E1000_RDH = 0x02810, /* RX Descriptor Head - RW */ 93 E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */
94 E1000_RDT = 0x02818, /* RX Descriptor Tail - RW */ 94 E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */
95 E1000_RDTR = 0x02820, /* RX Delay Timer - RW */ 95 E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
96 E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
97#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
96 E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */ 98 E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */
97 99
98/* Convenience macros 100/* Convenience macros
@@ -105,17 +107,17 @@ enum e1e_registers {
105 */ 107 */
106#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8)) 108#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8))
107 E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */ 109 E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */
108 E1000_TDBAL = 0x03800, /* TX Descriptor Base Address Low - RW */ 110 E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */
109 E1000_TDBAH = 0x03804, /* TX Descriptor Base Address High - RW */ 111 E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */
110 E1000_TDLEN = 0x03808, /* TX Descriptor Length - RW */ 112 E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */
111 E1000_TDH = 0x03810, /* TX Descriptor Head - RW */ 113 E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */
112 E1000_TDT = 0x03818, /* TX Descriptor Tail - RW */ 114 E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */
113 E1000_TIDV = 0x03820, /* TX Interrupt Delay Value - RW */ 115 E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */
114 E1000_TXDCTL = 0x03828, /* TX Descriptor Control - RW */ 116 E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
115 E1000_TADV = 0x0382C, /* TX Interrupt Absolute Delay Val - RW */ 117#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8))
116 E1000_TARC0 = 0x03840, /* TX Arbitration Count (0) */ 118 E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */
117 E1000_TXDCTL1 = 0x03928, /* TX Descriptor Control (1) - RW */ 119 E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */
118 E1000_TARC1 = 0x03940, /* TX Arbitration Count (1) */ 120#define E1000_TARC(_n) (E1000_TARC_BASE + (_n << 8))
119 E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */ 121 E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */
120 E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */ 122 E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */
121 E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */ 123 E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */
@@ -127,53 +129,53 @@ enum e1e_registers {
127 E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */ 129 E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */
128 E1000_COLC = 0x04028, /* Collision Count - R/clr */ 130 E1000_COLC = 0x04028, /* Collision Count - R/clr */
129 E1000_DC = 0x04030, /* Defer Count - R/clr */ 131 E1000_DC = 0x04030, /* Defer Count - R/clr */
130 E1000_TNCRS = 0x04034, /* TX-No CRS - R/clr */ 132 E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */
131 E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */ 133 E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */
132 E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */ 134 E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */
133 E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */ 135 E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */
134 E1000_XONRXC = 0x04048, /* XON RX Count - R/clr */ 136 E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */
135 E1000_XONTXC = 0x0404C, /* XON TX Count - R/clr */ 137 E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */
136 E1000_XOFFRXC = 0x04050, /* XOFF RX Count - R/clr */ 138 E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */
137 E1000_XOFFTXC = 0x04054, /* XOFF TX Count - R/clr */ 139 E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */
138 E1000_FCRUC = 0x04058, /* Flow Control RX Unsupported Count- R/clr */ 140 E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */
139 E1000_PRC64 = 0x0405C, /* Packets RX (64 bytes) - R/clr */ 141 E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */
140 E1000_PRC127 = 0x04060, /* Packets RX (65-127 bytes) - R/clr */ 142 E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */
141 E1000_PRC255 = 0x04064, /* Packets RX (128-255 bytes) - R/clr */ 143 E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */
142 E1000_PRC511 = 0x04068, /* Packets RX (255-511 bytes) - R/clr */ 144 E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */
143 E1000_PRC1023 = 0x0406C, /* Packets RX (512-1023 bytes) - R/clr */ 145 E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */
144 E1000_PRC1522 = 0x04070, /* Packets RX (1024-1522 bytes) - R/clr */ 146 E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */
145 E1000_GPRC = 0x04074, /* Good Packets RX Count - R/clr */ 147 E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */
146 E1000_BPRC = 0x04078, /* Broadcast Packets RX Count - R/clr */ 148 E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */
147 E1000_MPRC = 0x0407C, /* Multicast Packets RX Count - R/clr */ 149 E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */
148 E1000_GPTC = 0x04080, /* Good Packets TX Count - R/clr */ 150 E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */
149 E1000_GORCL = 0x04088, /* Good Octets RX Count Low - R/clr */ 151 E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */
150 E1000_GORCH = 0x0408C, /* Good Octets RX Count High - R/clr */ 152 E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */
151 E1000_GOTCL = 0x04090, /* Good Octets TX Count Low - R/clr */ 153 E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */
152 E1000_GOTCH = 0x04094, /* Good Octets TX Count High - R/clr */ 154 E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */
153 E1000_RNBC = 0x040A0, /* RX No Buffers Count - R/clr */ 155 E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */
154 E1000_RUC = 0x040A4, /* RX Undersize Count - R/clr */ 156 E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */
155 E1000_RFC = 0x040A8, /* RX Fragment Count - R/clr */ 157 E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */
156 E1000_ROC = 0x040AC, /* RX Oversize Count - R/clr */ 158 E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */
157 E1000_RJC = 0x040B0, /* RX Jabber Count - R/clr */ 159 E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */
158 E1000_MGTPRC = 0x040B4, /* Management Packets RX Count - R/clr */ 160 E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */
159 E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */ 161 E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */
160 E1000_MGTPTC = 0x040BC, /* Management Packets TX Count - R/clr */ 162 E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */
161 E1000_TORL = 0x040C0, /* Total Octets RX Low - R/clr */ 163 E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */
162 E1000_TORH = 0x040C4, /* Total Octets RX High - R/clr */ 164 E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */
163 E1000_TOTL = 0x040C8, /* Total Octets TX Low - R/clr */ 165 E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */
164 E1000_TOTH = 0x040CC, /* Total Octets TX High - R/clr */ 166 E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */
165 E1000_TPR = 0x040D0, /* Total Packets RX - R/clr */ 167 E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */
166 E1000_TPT = 0x040D4, /* Total Packets TX - R/clr */ 168 E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */
167 E1000_PTC64 = 0x040D8, /* Packets TX (64 bytes) - R/clr */ 169 E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */
168 E1000_PTC127 = 0x040DC, /* Packets TX (65-127 bytes) - R/clr */ 170 E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */
169 E1000_PTC255 = 0x040E0, /* Packets TX (128-255 bytes) - R/clr */ 171 E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */
170 E1000_PTC511 = 0x040E4, /* Packets TX (256-511 bytes) - R/clr */ 172 E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */
171 E1000_PTC1023 = 0x040E8, /* Packets TX (512-1023 bytes) - R/clr */ 173 E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */
172 E1000_PTC1522 = 0x040EC, /* Packets TX (1024-1522 Bytes) - R/clr */ 174 E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */
173 E1000_MPTC = 0x040F0, /* Multicast Packets TX Count - R/clr */ 175 E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */
174 E1000_BPTC = 0x040F4, /* Broadcast Packets TX Count - R/clr */ 176 E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */
175 E1000_TSCTC = 0x040F8, /* TCP Segmentation Context TX - R/clr */ 177 E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */
176 E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context TX Fail - R/clr */ 178 E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */
177 E1000_IAC = 0x04100, /* Interrupt Assertion Count */ 179 E1000_IAC = 0x04100, /* Interrupt Assertion Count */
178 E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */ 180 E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */
179 E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */ 181 E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */
@@ -183,7 +185,7 @@ enum e1e_registers {
183 E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */ 185 E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */
184 E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */ 186 E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */
185 E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */ 187 E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */
186 E1000_RXCSUM = 0x05000, /* RX Checksum Control - RW */ 188 E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */
187 E1000_RFCTL = 0x05008, /* Receive Filter Control */ 189 E1000_RFCTL = 0x05008, /* Receive Filter Control */
188 E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */ 190 E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */
189 E1000_RA = 0x05400, /* Receive Address - RW Array */ 191 E1000_RA = 0x05400, /* Receive Address - RW Array */
@@ -250,8 +252,8 @@ enum e1e_registers {
250#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F 252#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
251 253
252#define E1000_HICR_EN 0x01 /* Enable bit - RO */ 254#define E1000_HICR_EN 0x01 /* Enable bit - RO */
253#define E1000_HICR_C 0x02 /* Driver sets this bit when done 255/* Driver sets this bit when done to put command in RAM */
254 * to put command in RAM */ 256#define E1000_HICR_C 0x02
255#define E1000_HICR_FW_RESET_ENABLE 0x40 257#define E1000_HICR_FW_RESET_ENABLE 0x40
256#define E1000_HICR_FW_RESET 0x80 258#define E1000_HICR_FW_RESET 0x80
257 259
@@ -400,7 +402,7 @@ enum e1000_rev_polarity{
400 e1000_rev_polarity_undefined = 0xFF 402 e1000_rev_polarity_undefined = 0xFF
401}; 403};
402 404
403enum e1000_fc_mode { 405enum e1000_fc_type {
404 e1000_fc_none = 0, 406 e1000_fc_none = 0,
405 e1000_fc_rx_pause, 407 e1000_fc_rx_pause,
406 e1000_fc_tx_pause, 408 e1000_fc_tx_pause,
@@ -685,8 +687,7 @@ struct e1000_mac_operations {
685 s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); 687 s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
686 s32 (*led_on)(struct e1000_hw *); 688 s32 (*led_on)(struct e1000_hw *);
687 s32 (*led_off)(struct e1000_hw *); 689 s32 (*led_off)(struct e1000_hw *);
688 void (*mc_addr_list_update)(struct e1000_hw *, u8 *, u32, u32, 690 void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32);
689 u32);
690 s32 (*reset_hw)(struct e1000_hw *); 691 s32 (*reset_hw)(struct e1000_hw *);
691 s32 (*init_hw)(struct e1000_hw *); 692 s32 (*init_hw)(struct e1000_hw *);
692 s32 (*setup_link)(struct e1000_hw *); 693 s32 (*setup_link)(struct e1000_hw *);
@@ -728,16 +729,12 @@ struct e1000_mac_info {
728 u8 perm_addr[6]; 729 u8 perm_addr[6];
729 730
730 enum e1000_mac_type type; 731 enum e1000_mac_type type;
731 enum e1000_fc_mode fc;
732 enum e1000_fc_mode original_fc;
733 732
734 u32 collision_delta; 733 u32 collision_delta;
735 u32 ledctl_default; 734 u32 ledctl_default;
736 u32 ledctl_mode1; 735 u32 ledctl_mode1;
737 u32 ledctl_mode2; 736 u32 ledctl_mode2;
738 u32 max_frame_size;
739 u32 mc_filter_type; 737 u32 mc_filter_type;
740 u32 min_frame_size;
741 u32 tx_packet_delta; 738 u32 tx_packet_delta;
742 u32 txcw; 739 u32 txcw;
743 740
@@ -748,9 +745,6 @@ struct e1000_mac_info {
748 u16 ifs_step_size; 745 u16 ifs_step_size;
749 u16 mta_reg_count; 746 u16 mta_reg_count;
750 u16 rar_entry_count; 747 u16 rar_entry_count;
751 u16 fc_high_water;
752 u16 fc_low_water;
753 u16 fc_pause_time;
754 748
755 u8 forced_speed_duplex; 749 u8 forced_speed_duplex;
756 750
@@ -780,6 +774,8 @@ struct e1000_phy_info {
780 u32 reset_delay_us; /* in usec */ 774 u32 reset_delay_us; /* in usec */
781 u32 revision; 775 u32 revision;
782 776
777 enum e1000_media_type media_type;
778
783 u16 autoneg_advertised; 779 u16 autoneg_advertised;
784 u16 autoneg_mask; 780 u16 autoneg_mask;
785 u16 cable_length; 781 u16 cable_length;
@@ -792,7 +788,7 @@ struct e1000_phy_info {
792 bool is_mdix; 788 bool is_mdix;
793 bool polarity_correction; 789 bool polarity_correction;
794 bool speed_downgraded; 790 bool speed_downgraded;
795 bool wait_for_link; 791 bool autoneg_wait_to_complete;
796}; 792};
797 793
798struct e1000_nvm_info { 794struct e1000_nvm_info {
@@ -817,6 +813,16 @@ struct e1000_bus_info {
817 u16 func; 813 u16 func;
818}; 814};
819 815
816struct e1000_fc_info {
817 u32 high_water; /* Flow control high-water mark */
818 u32 low_water; /* Flow control low-water mark */
819 u16 pause_time; /* Flow control pause timer */
820 bool send_xon; /* Flow control send XON */
821 bool strict_ieee; /* Strict IEEE mode */
822 enum e1000_fc_type type; /* Type of flow control */
823 enum e1000_fc_type original_type;
824};
825
820struct e1000_dev_spec_82571 { 826struct e1000_dev_spec_82571 {
821 bool laa_is_present; 827 bool laa_is_present;
822 bool alt_mac_addr_is_present; 828 bool alt_mac_addr_is_present;
@@ -841,6 +847,7 @@ struct e1000_hw {
841 u8 __iomem *flash_address; 847 u8 __iomem *flash_address;
842 848
843 struct e1000_mac_info mac; 849 struct e1000_mac_info mac;
850 struct e1000_fc_info fc;
844 struct e1000_phy_info phy; 851 struct e1000_phy_info phy;
845 struct e1000_nvm_info nvm; 852 struct e1000_nvm_info nvm;
846 struct e1000_bus_info bus; 853 struct e1000_bus_info bus;
@@ -850,8 +857,6 @@ struct e1000_hw {
850 struct e1000_dev_spec_82571 e82571; 857 struct e1000_dev_spec_82571 e82571;
851 struct e1000_dev_spec_ich8lan ich8lan; 858 struct e1000_dev_spec_ich8lan ich8lan;
852 } dev_spec; 859 } dev_spec;
853
854 enum e1000_media_type media_type;
855}; 860};
856 861
857#ifdef DEBUG 862#ifdef DEBUG
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 0ae39550768d..768485dbb2c6 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -243,8 +243,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
243 u32 sector_end_addr; 243 u32 sector_end_addr;
244 u16 i; 244 u16 i;
245 245
246 /* Can't read flash registers if the register set isn't mapped. 246 /* Can't read flash registers if the register set isn't mapped. */
247 */
248 if (!hw->flash_address) { 247 if (!hw->flash_address) {
249 hw_dbg(hw, "ERROR: Flash registers not mapped\n"); 248 hw_dbg(hw, "ERROR: Flash registers not mapped\n");
250 return -E1000_ERR_CONFIG; 249 return -E1000_ERR_CONFIG;
@@ -254,17 +253,21 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
254 253
255 gfpreg = er32flash(ICH_FLASH_GFPREG); 254 gfpreg = er32flash(ICH_FLASH_GFPREG);
256 255
257 /* sector_X_addr is a "sector"-aligned address (4096 bytes) 256 /*
257 * sector_X_addr is a "sector"-aligned address (4096 bytes)
258 * Add 1 to sector_end_addr since this sector is included in 258 * Add 1 to sector_end_addr since this sector is included in
259 * the overall size. */ 259 * the overall size.
260 */
260 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; 261 sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
261 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; 262 sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
262 263
263 /* flash_base_addr is byte-aligned */ 264 /* flash_base_addr is byte-aligned */
264 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; 265 nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
265 266
266 /* find total size of the NVM, then cut in half since the total 267 /*
267 * size represents two separate NVM banks. */ 268 * find total size of the NVM, then cut in half since the total
269 * size represents two separate NVM banks.
270 */
268 nvm->flash_bank_size = (sector_end_addr - sector_base_addr) 271 nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
269 << FLASH_SECTOR_ADDR_SHIFT; 272 << FLASH_SECTOR_ADDR_SHIFT;
270 nvm->flash_bank_size /= 2; 273 nvm->flash_bank_size /= 2;
@@ -295,7 +298,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
295 struct e1000_mac_info *mac = &hw->mac; 298 struct e1000_mac_info *mac = &hw->mac;
296 299
297 /* Set media type function pointer */ 300 /* Set media type function pointer */
298 hw->media_type = e1000_media_type_copper; 301 hw->phy.media_type = e1000_media_type_copper;
299 302
300 /* Set mta register count */ 303 /* Set mta register count */
301 mac->mta_reg_count = 32; 304 mac->mta_reg_count = 32;
@@ -313,7 +316,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
313 return 0; 316 return 0;
314} 317}
315 318
316static s32 e1000_get_invariants_ich8lan(struct e1000_adapter *adapter) 319static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
317{ 320{
318 struct e1000_hw *hw = &adapter->hw; 321 struct e1000_hw *hw = &adapter->hw;
319 s32 rc; 322 s32 rc;
@@ -450,7 +453,7 @@ static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw)
450 453
451 udelay(1); 454 udelay(1);
452 455
453 if (phy->wait_for_link) { 456 if (phy->autoneg_wait_to_complete) {
454 hw_dbg(hw, "Waiting for forced speed/duplex link on IFE phy.\n"); 457 hw_dbg(hw, "Waiting for forced speed/duplex link on IFE phy.\n");
455 458
456 ret_val = e1000e_phy_has_link_generic(hw, 459 ret_val = e1000e_phy_has_link_generic(hw,
@@ -496,7 +499,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
496 if (ret_val) 499 if (ret_val)
497 return ret_val; 500 return ret_val;
498 501
499 /* Initialize the PHY from the NVM on ICH platforms. This 502 /*
503 * Initialize the PHY from the NVM on ICH platforms. This
500 * is needed due to an issue where the NVM configuration is 504 * is needed due to an issue where the NVM configuration is
501 * not properly autoloaded after power transitions. 505 * not properly autoloaded after power transitions.
502 * Therefore, after each PHY reset, we will load the 506 * Therefore, after each PHY reset, we will load the
@@ -523,7 +527,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
523 udelay(100); 527 udelay(100);
524 } while ((!data) && --loop); 528 } while ((!data) && --loop);
525 529
526 /* If basic configuration is incomplete before the above loop 530 /*
531 * If basic configuration is incomplete before the above loop
527 * count reaches 0, loading the configuration from NVM will 532 * count reaches 0, loading the configuration from NVM will
528 * leave the PHY in a bad state possibly resulting in no link. 533 * leave the PHY in a bad state possibly resulting in no link.
529 */ 534 */
@@ -536,8 +541,10 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
536 data &= ~E1000_STATUS_LAN_INIT_DONE; 541 data &= ~E1000_STATUS_LAN_INIT_DONE;
537 ew32(STATUS, data); 542 ew32(STATUS, data);
538 543
539 /* Make sure HW does not configure LCD from PHY 544 /*
540 * extended configuration before SW configuration */ 545 * Make sure HW does not configure LCD from PHY
546 * extended configuration before SW configuration
547 */
541 data = er32(EXTCNF_CTRL); 548 data = er32(EXTCNF_CTRL);
542 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) 549 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
543 return 0; 550 return 0;
@@ -551,8 +558,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
551 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 558 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
552 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; 559 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
553 560
554 /* Configure LCD from extended configuration 561 /* Configure LCD from extended configuration region. */
555 * region. */
556 562
557 /* cnf_base_addr is in DWORD */ 563 /* cnf_base_addr is in DWORD */
558 word_addr = (u16)(cnf_base_addr << 1); 564 word_addr = (u16)(cnf_base_addr << 1);
@@ -681,8 +687,8 @@ static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw)
681 s32 ret_val; 687 s32 ret_val;
682 u16 phy_data, offset, mask; 688 u16 phy_data, offset, mask;
683 689
684 /* Polarity is determined based on the reversal feature 690 /*
685 * being enabled. 691 * Polarity is determined based on the reversal feature being enabled.
686 */ 692 */
687 if (phy->polarity_correction) { 693 if (phy->polarity_correction) {
688 offset = IFE_PHY_EXTENDED_STATUS_CONTROL; 694 offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
@@ -731,8 +737,10 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
731 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; 737 phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
732 ew32(PHY_CTRL, phy_ctrl); 738 ew32(PHY_CTRL, phy_ctrl);
733 739
734 /* Call gig speed drop workaround on LPLU before accessing 740 /*
735 * any PHY registers */ 741 * Call gig speed drop workaround on LPLU before accessing
742 * any PHY registers
743 */
736 if ((hw->mac.type == e1000_ich8lan) && 744 if ((hw->mac.type == e1000_ich8lan) &&
737 (hw->phy.type == e1000_phy_igp_3)) 745 (hw->phy.type == e1000_phy_igp_3))
738 e1000e_gig_downshift_workaround_ich8lan(hw); 746 e1000e_gig_downshift_workaround_ich8lan(hw);
@@ -747,30 +755,32 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
747 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; 755 phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
748 ew32(PHY_CTRL, phy_ctrl); 756 ew32(PHY_CTRL, phy_ctrl);
749 757
750 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 758 /*
759 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
751 * during Dx states where the power conservation is most 760 * during Dx states where the power conservation is most
752 * important. During driver activity we should enable 761 * important. During driver activity we should enable
753 * SmartSpeed, so performance is maintained. */ 762 * SmartSpeed, so performance is maintained.
763 */
754 if (phy->smart_speed == e1000_smart_speed_on) { 764 if (phy->smart_speed == e1000_smart_speed_on) {
755 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 765 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
756 &data); 766 &data);
757 if (ret_val) 767 if (ret_val)
758 return ret_val; 768 return ret_val;
759 769
760 data |= IGP01E1000_PSCFR_SMART_SPEED; 770 data |= IGP01E1000_PSCFR_SMART_SPEED;
761 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 771 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
762 data); 772 data);
763 if (ret_val) 773 if (ret_val)
764 return ret_val; 774 return ret_val;
765 } else if (phy->smart_speed == e1000_smart_speed_off) { 775 } else if (phy->smart_speed == e1000_smart_speed_off) {
766 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 776 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
767 &data); 777 &data);
768 if (ret_val) 778 if (ret_val)
769 return ret_val; 779 return ret_val;
770 780
771 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 781 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
772 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 782 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
773 data); 783 data);
774 if (ret_val) 784 if (ret_val)
775 return ret_val; 785 return ret_val;
776 } 786 }
@@ -804,34 +814,32 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
804 if (!active) { 814 if (!active) {
805 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; 815 phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
806 ew32(PHY_CTRL, phy_ctrl); 816 ew32(PHY_CTRL, phy_ctrl);
807 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 817 /*
818 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
808 * during Dx states where the power conservation is most 819 * during Dx states where the power conservation is most
809 * important. During driver activity we should enable 820 * important. During driver activity we should enable
810 * SmartSpeed, so performance is maintained. */ 821 * SmartSpeed, so performance is maintained.
822 */
811 if (phy->smart_speed == e1000_smart_speed_on) { 823 if (phy->smart_speed == e1000_smart_speed_on) {
812 ret_val = e1e_rphy(hw, 824 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
813 IGP01E1000_PHY_PORT_CONFIG, 825 &data);
814 &data);
815 if (ret_val) 826 if (ret_val)
816 return ret_val; 827 return ret_val;
817 828
818 data |= IGP01E1000_PSCFR_SMART_SPEED; 829 data |= IGP01E1000_PSCFR_SMART_SPEED;
819 ret_val = e1e_wphy(hw, 830 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
820 IGP01E1000_PHY_PORT_CONFIG, 831 data);
821 data);
822 if (ret_val) 832 if (ret_val)
823 return ret_val; 833 return ret_val;
824 } else if (phy->smart_speed == e1000_smart_speed_off) { 834 } else if (phy->smart_speed == e1000_smart_speed_off) {
825 ret_val = e1e_rphy(hw, 835 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
826 IGP01E1000_PHY_PORT_CONFIG, 836 &data);
827 &data);
828 if (ret_val) 837 if (ret_val)
829 return ret_val; 838 return ret_val;
830 839
831 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 840 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
832 ret_val = e1e_wphy(hw, 841 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
833 IGP01E1000_PHY_PORT_CONFIG, 842 data);
834 data);
835 if (ret_val) 843 if (ret_val)
836 return ret_val; 844 return ret_val;
837 } 845 }
@@ -841,23 +849,21 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
841 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; 849 phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
842 ew32(PHY_CTRL, phy_ctrl); 850 ew32(PHY_CTRL, phy_ctrl);
843 851
844 /* Call gig speed drop workaround on LPLU before accessing 852 /*
845 * any PHY registers */ 853 * Call gig speed drop workaround on LPLU before accessing
854 * any PHY registers
855 */
846 if ((hw->mac.type == e1000_ich8lan) && 856 if ((hw->mac.type == e1000_ich8lan) &&
847 (hw->phy.type == e1000_phy_igp_3)) 857 (hw->phy.type == e1000_phy_igp_3))
848 e1000e_gig_downshift_workaround_ich8lan(hw); 858 e1000e_gig_downshift_workaround_ich8lan(hw);
849 859
850 /* When LPLU is enabled, we should disable SmartSpeed */ 860 /* When LPLU is enabled, we should disable SmartSpeed */
851 ret_val = e1e_rphy(hw, 861 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
852 IGP01E1000_PHY_PORT_CONFIG,
853 &data);
854 if (ret_val) 862 if (ret_val)
855 return ret_val; 863 return ret_val;
856 864
857 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 865 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
858 ret_val = e1e_wphy(hw, 866 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
859 IGP01E1000_PHY_PORT_CONFIG,
860 data);
861 } 867 }
862 868
863 return 0; 869 return 0;
@@ -944,7 +950,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
944 950
945 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 951 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
946 952
947 /* Either we should have a hardware SPI cycle in progress 953 /*
954 * Either we should have a hardware SPI cycle in progress
948 * bit to check against, in order to start a new cycle or 955 * bit to check against, in order to start a new cycle or
949 * FDONE bit should be changed in the hardware so that it 956 * FDONE bit should be changed in the hardware so that it
950 * is 1 after hardware reset, which can then be used as an 957 * is 1 after hardware reset, which can then be used as an
@@ -953,15 +960,19 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
953 */ 960 */
954 961
955 if (hsfsts.hsf_status.flcinprog == 0) { 962 if (hsfsts.hsf_status.flcinprog == 0) {
956 /* There is no cycle running at present, 963 /*
957 * so we can start a cycle */ 964 * There is no cycle running at present,
958 /* Begin by setting Flash Cycle Done. */ 965 * so we can start a cycle
966 * Begin by setting Flash Cycle Done.
967 */
959 hsfsts.hsf_status.flcdone = 1; 968 hsfsts.hsf_status.flcdone = 1;
960 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 969 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
961 ret_val = 0; 970 ret_val = 0;
962 } else { 971 } else {
963 /* otherwise poll for sometime so the current 972 /*
964 * cycle has a chance to end before giving up. */ 973 * otherwise poll for sometime so the current
974 * cycle has a chance to end before giving up.
975 */
965 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { 976 for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
966 hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS); 977 hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS);
967 if (hsfsts.hsf_status.flcinprog == 0) { 978 if (hsfsts.hsf_status.flcinprog == 0) {
@@ -971,8 +982,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
971 udelay(1); 982 udelay(1);
972 } 983 }
973 if (ret_val == 0) { 984 if (ret_val == 0) {
974 /* Successful in waiting for previous cycle to timeout, 985 /*
975 * now set the Flash Cycle Done. */ 986 * Successful in waiting for previous cycle to timeout,
987 * now set the Flash Cycle Done.
988 */
976 hsfsts.hsf_status.flcdone = 1; 989 hsfsts.hsf_status.flcdone = 1;
977 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 990 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
978 } else { 991 } else {
@@ -1077,10 +1090,12 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
1077 ret_val = e1000_flash_cycle_ich8lan(hw, 1090 ret_val = e1000_flash_cycle_ich8lan(hw,
1078 ICH_FLASH_READ_COMMAND_TIMEOUT); 1091 ICH_FLASH_READ_COMMAND_TIMEOUT);
1079 1092
1080 /* Check if FCERR is set to 1, if set to 1, clear it 1093 /*
1094 * Check if FCERR is set to 1, if set to 1, clear it
1081 * and try the whole sequence a few more times, else 1095 * and try the whole sequence a few more times, else
1082 * read in (shift in) the Flash Data0, the order is 1096 * read in (shift in) the Flash Data0, the order is
1083 * least significant byte first msb to lsb */ 1097 * least significant byte first msb to lsb
1098 */
1084 if (ret_val == 0) { 1099 if (ret_val == 0) {
1085 flash_data = er32flash(ICH_FLASH_FDATA0); 1100 flash_data = er32flash(ICH_FLASH_FDATA0);
1086 if (size == 1) { 1101 if (size == 1) {
@@ -1090,7 +1105,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
1090 } 1105 }
1091 break; 1106 break;
1092 } else { 1107 } else {
1093 /* If we've gotten here, then things are probably 1108 /*
1109 * If we've gotten here, then things are probably
1094 * completely hosed, but if the error condition is 1110 * completely hosed, but if the error condition is
1095 * detected, it won't hurt to give it another try... 1111 * detected, it won't hurt to give it another try...
1096 * ICH_FLASH_CYCLE_REPEAT_COUNT times. 1112 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
@@ -1168,18 +1184,20 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1168 1184
1169 ret_val = e1000e_update_nvm_checksum_generic(hw); 1185 ret_val = e1000e_update_nvm_checksum_generic(hw);
1170 if (ret_val) 1186 if (ret_val)
1171 return ret_val;; 1187 return ret_val;
1172 1188
1173 if (nvm->type != e1000_nvm_flash_sw) 1189 if (nvm->type != e1000_nvm_flash_sw)
1174 return ret_val;; 1190 return ret_val;
1175 1191
1176 ret_val = e1000_acquire_swflag_ich8lan(hw); 1192 ret_val = e1000_acquire_swflag_ich8lan(hw);
1177 if (ret_val) 1193 if (ret_val)
1178 return ret_val;; 1194 return ret_val;
1179 1195
1180 /* We're writing to the opposite bank so if we're on bank 1, 1196 /*
1197 * We're writing to the opposite bank so if we're on bank 1,
1181 * write to bank 0 etc. We also need to erase the segment that 1198 * write to bank 0 etc. We also need to erase the segment that
1182 * is going to be written */ 1199 * is going to be written
1200 */
1183 if (!(er32(EECD) & E1000_EECD_SEC1VAL)) { 1201 if (!(er32(EECD) & E1000_EECD_SEC1VAL)) {
1184 new_bank_offset = nvm->flash_bank_size; 1202 new_bank_offset = nvm->flash_bank_size;
1185 old_bank_offset = 0; 1203 old_bank_offset = 0;
@@ -1191,9 +1209,11 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1191 } 1209 }
1192 1210
1193 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { 1211 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
1194 /* Determine whether to write the value stored 1212 /*
1213 * Determine whether to write the value stored
1195 * in the other NVM bank or a modified value stored 1214 * in the other NVM bank or a modified value stored
1196 * in the shadow RAM */ 1215 * in the shadow RAM
1216 */
1197 if (dev_spec->shadow_ram[i].modified) { 1217 if (dev_spec->shadow_ram[i].modified) {
1198 data = dev_spec->shadow_ram[i].value; 1218 data = dev_spec->shadow_ram[i].value;
1199 } else { 1219 } else {
@@ -1202,12 +1222,14 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1202 &data); 1222 &data);
1203 } 1223 }
1204 1224
1205 /* If the word is 0x13, then make sure the signature bits 1225 /*
1226 * If the word is 0x13, then make sure the signature bits
1206 * (15:14) are 11b until the commit has completed. 1227 * (15:14) are 11b until the commit has completed.
1207 * This will allow us to write 10b which indicates the 1228 * This will allow us to write 10b which indicates the
1208 * signature is valid. We want to do this after the write 1229 * signature is valid. We want to do this after the write
1209 * has completed so that we don't mark the segment valid 1230 * has completed so that we don't mark the segment valid
1210 * while the write is still in progress */ 1231 * while the write is still in progress
1232 */
1211 if (i == E1000_ICH_NVM_SIG_WORD) 1233 if (i == E1000_ICH_NVM_SIG_WORD)
1212 data |= E1000_ICH_NVM_SIG_MASK; 1234 data |= E1000_ICH_NVM_SIG_MASK;
1213 1235
@@ -1230,18 +1252,22 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1230 break; 1252 break;
1231 } 1253 }
1232 1254
1233 /* Don't bother writing the segment valid bits if sector 1255 /*
1234 * programming failed. */ 1256 * Don't bother writing the segment valid bits if sector
1257 * programming failed.
1258 */
1235 if (ret_val) { 1259 if (ret_val) {
1236 hw_dbg(hw, "Flash commit failed.\n"); 1260 hw_dbg(hw, "Flash commit failed.\n");
1237 e1000_release_swflag_ich8lan(hw); 1261 e1000_release_swflag_ich8lan(hw);
1238 return ret_val; 1262 return ret_val;
1239 } 1263 }
1240 1264
1241 /* Finally validate the new segment by setting bit 15:14 1265 /*
1266 * Finally validate the new segment by setting bit 15:14
1242 * to 10b in word 0x13 , this can be done without an 1267 * to 10b in word 0x13 , this can be done without an
1243 * erase as well since these bits are 11 to start with 1268 * erase as well since these bits are 11 to start with
1244 * and we need to change bit 14 to 0b */ 1269 * and we need to change bit 14 to 0b
1270 */
1245 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 1271 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
1246 e1000_read_flash_word_ich8lan(hw, act_offset, &data); 1272 e1000_read_flash_word_ich8lan(hw, act_offset, &data);
1247 data &= 0xBFFF; 1273 data &= 0xBFFF;
@@ -1253,10 +1279,12 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1253 return ret_val; 1279 return ret_val;
1254 } 1280 }
1255 1281
1256 /* And invalidate the previously valid segment by setting 1282 /*
1283 * And invalidate the previously valid segment by setting
1257 * its signature word (0x13) high_byte to 0b. This can be 1284 * its signature word (0x13) high_byte to 0b. This can be
1258 * done without an erase because flash erase sets all bits 1285 * done without an erase because flash erase sets all bits
1259 * to 1's. We can write 1's to 0's without an erase */ 1286 * to 1's. We can write 1's to 0's without an erase
1287 */
1260 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 1288 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
1261 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 1289 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
1262 if (ret_val) { 1290 if (ret_val) {
@@ -1272,7 +1300,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1272 1300
1273 e1000_release_swflag_ich8lan(hw); 1301 e1000_release_swflag_ich8lan(hw);
1274 1302
1275 /* Reload the EEPROM, or else modifications will not appear 1303 /*
1304 * Reload the EEPROM, or else modifications will not appear
1276 * until after the next adapter reset. 1305 * until after the next adapter reset.
1277 */ 1306 */
1278 e1000e_reload_nvm(hw); 1307 e1000e_reload_nvm(hw);
@@ -1294,7 +1323,8 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
1294 s32 ret_val; 1323 s32 ret_val;
1295 u16 data; 1324 u16 data;
1296 1325
1297 /* Read 0x19 and check bit 6. If this bit is 0, the checksum 1326 /*
1327 * Read 0x19 and check bit 6. If this bit is 0, the checksum
1298 * needs to be fixed. This bit is an indication that the NVM 1328 * needs to be fixed. This bit is an indication that the NVM
1299 * was prepared by OEM software and did not calculate the 1329 * was prepared by OEM software and did not calculate the
1300 * checksum...a likely scenario. 1330 * checksum...a likely scenario.
@@ -1364,14 +1394,17 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
1364 1394
1365 ew32flash(ICH_FLASH_FDATA0, flash_data); 1395 ew32flash(ICH_FLASH_FDATA0, flash_data);
1366 1396
1367 /* check if FCERR is set to 1 , if set to 1, clear it 1397 /*
1368 * and try the whole sequence a few more times else done */ 1398 * check if FCERR is set to 1 , if set to 1, clear it
1399 * and try the whole sequence a few more times else done
1400 */
1369 ret_val = e1000_flash_cycle_ich8lan(hw, 1401 ret_val = e1000_flash_cycle_ich8lan(hw,
1370 ICH_FLASH_WRITE_COMMAND_TIMEOUT); 1402 ICH_FLASH_WRITE_COMMAND_TIMEOUT);
1371 if (!ret_val) 1403 if (!ret_val)
1372 break; 1404 break;
1373 1405
1374 /* If we're here, then things are most likely 1406 /*
1407 * If we're here, then things are most likely
1375 * completely hosed, but if the error condition 1408 * completely hosed, but if the error condition
1376 * is detected, it won't hurt to give it another 1409 * is detected, it won't hurt to give it another
1377 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. 1410 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
@@ -1462,9 +1495,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
1462 1495
1463 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 1496 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
1464 1497
1465 /* Determine HW Sector size: Read BERASE bits of hw flash status 1498 /*
1466 * register */ 1499 * Determine HW Sector size: Read BERASE bits of hw flash status
1467 /* 00: The Hw sector is 256 bytes, hence we need to erase 16 1500 * register
1501 * 00: The Hw sector is 256 bytes, hence we need to erase 16
1468 * consecutive sectors. The start index for the nth Hw sector 1502 * consecutive sectors. The start index for the nth Hw sector
1469 * can be calculated as = bank * 4096 + n * 256 1503 * can be calculated as = bank * 4096 + n * 256
1470 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. 1504 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
@@ -1511,13 +1545,16 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
1511 if (ret_val) 1545 if (ret_val)
1512 return ret_val; 1546 return ret_val;
1513 1547
1514 /* Write a value 11 (block Erase) in Flash 1548 /*
1515 * Cycle field in hw flash control */ 1549 * Write a value 11 (block Erase) in Flash
1550 * Cycle field in hw flash control
1551 */
1516 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); 1552 hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
1517 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; 1553 hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
1518 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); 1554 ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
1519 1555
1520 /* Write the last 24 bits of an index within the 1556 /*
1557 * Write the last 24 bits of an index within the
1521 * block into Flash Linear address field in Flash 1558 * block into Flash Linear address field in Flash
1522 * Address. 1559 * Address.
1523 */ 1560 */
@@ -1529,13 +1566,14 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
1529 if (ret_val == 0) 1566 if (ret_val == 0)
1530 break; 1567 break;
1531 1568
1532 /* Check if FCERR is set to 1. If 1, 1569 /*
1570 * Check if FCERR is set to 1. If 1,
1533 * clear it and try the whole sequence 1571 * clear it and try the whole sequence
1534 * a few more times else Done */ 1572 * a few more times else Done
1573 */
1535 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); 1574 hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
1536 if (hsfsts.hsf_status.flcerr == 1) 1575 if (hsfsts.hsf_status.flcerr == 1)
1537 /* repeat for some time before 1576 /* repeat for some time before giving up */
1538 * giving up */
1539 continue; 1577 continue;
1540 else if (hsfsts.hsf_status.flcdone == 0) 1578 else if (hsfsts.hsf_status.flcdone == 0)
1541 return ret_val; 1579 return ret_val;
@@ -1585,7 +1623,8 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
1585 1623
1586 ret_val = e1000e_get_bus_info_pcie(hw); 1624 ret_val = e1000e_get_bus_info_pcie(hw);
1587 1625
1588 /* ICH devices are "PCI Express"-ish. They have 1626 /*
1627 * ICH devices are "PCI Express"-ish. They have
1589 * a configuration space, but do not contain 1628 * a configuration space, but do not contain
1590 * PCI Express Capability registers, so bus width 1629 * PCI Express Capability registers, so bus width
1591 * must be hardcoded. 1630 * must be hardcoded.
@@ -1608,7 +1647,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
1608 u32 ctrl, icr, kab; 1647 u32 ctrl, icr, kab;
1609 s32 ret_val; 1648 s32 ret_val;
1610 1649
1611 /* Prevent the PCI-E bus from sticking if there is no TLP connection 1650 /*
1651 * Prevent the PCI-E bus from sticking if there is no TLP connection
1612 * on the last TLP read/write transaction when MAC is reset. 1652 * on the last TLP read/write transaction when MAC is reset.
1613 */ 1653 */
1614 ret_val = e1000e_disable_pcie_master(hw); 1654 ret_val = e1000e_disable_pcie_master(hw);
@@ -1619,7 +1659,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
1619 hw_dbg(hw, "Masking off all interrupts\n"); 1659 hw_dbg(hw, "Masking off all interrupts\n");
1620 ew32(IMC, 0xffffffff); 1660 ew32(IMC, 0xffffffff);
1621 1661
1622 /* Disable the Transmit and Receive units. Then delay to allow 1662 /*
1663 * Disable the Transmit and Receive units. Then delay to allow
1623 * any pending transactions to complete before we hit the MAC 1664 * any pending transactions to complete before we hit the MAC
1624 * with the global reset. 1665 * with the global reset.
1625 */ 1666 */
@@ -1640,7 +1681,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
1640 ctrl = er32(CTRL); 1681 ctrl = er32(CTRL);
1641 1682
1642 if (!e1000_check_reset_block(hw)) { 1683 if (!e1000_check_reset_block(hw)) {
1643 /* PHY HW reset requires MAC CORE reset at the same 1684 /*
1685 * PHY HW reset requires MAC CORE reset at the same
1644 * time to make sure the interface between MAC and the 1686 * time to make sure the interface between MAC and the
1645 * external PHY is reset. 1687 * external PHY is reset.
1646 */ 1688 */
@@ -1711,21 +1753,23 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
1711 ret_val = e1000_setup_link_ich8lan(hw); 1753 ret_val = e1000_setup_link_ich8lan(hw);
1712 1754
1713 /* Set the transmit descriptor write-back policy for both queues */ 1755 /* Set the transmit descriptor write-back policy for both queues */
1714 txdctl = er32(TXDCTL); 1756 txdctl = er32(TXDCTL(0));
1715 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | 1757 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
1716 E1000_TXDCTL_FULL_TX_DESC_WB; 1758 E1000_TXDCTL_FULL_TX_DESC_WB;
1717 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | 1759 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
1718 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 1760 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
1719 ew32(TXDCTL, txdctl); 1761 ew32(TXDCTL(0), txdctl);
1720 txdctl = er32(TXDCTL1); 1762 txdctl = er32(TXDCTL(1));
1721 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) | 1763 txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
1722 E1000_TXDCTL_FULL_TX_DESC_WB; 1764 E1000_TXDCTL_FULL_TX_DESC_WB;
1723 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) | 1765 txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
1724 E1000_TXDCTL_MAX_TX_DESC_PREFETCH; 1766 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
1725 ew32(TXDCTL1, txdctl); 1767 ew32(TXDCTL(1), txdctl);
1726 1768
1727 /* ICH8 has opposite polarity of no_snoop bits. 1769 /*
1728 * By default, we should use snoop behavior. */ 1770 * ICH8 has opposite polarity of no_snoop bits.
1771 * By default, we should use snoop behavior.
1772 */
1729 if (mac->type == e1000_ich8lan) 1773 if (mac->type == e1000_ich8lan)
1730 snoop = PCIE_ICH8_SNOOP_ALL; 1774 snoop = PCIE_ICH8_SNOOP_ALL;
1731 else 1775 else
@@ -1736,7 +1780,8 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
1736 ctrl_ext |= E1000_CTRL_EXT_RO_DIS; 1780 ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
1737 ew32(CTRL_EXT, ctrl_ext); 1781 ew32(CTRL_EXT, ctrl_ext);
1738 1782
1739 /* Clear all of the statistics registers (clear on read). It is 1783 /*
1784 * Clear all of the statistics registers (clear on read). It is
1740 * important that we do this after we have tried to establish link 1785 * important that we do this after we have tried to establish link
1741 * because the symbol error count will increment wildly if there 1786 * because the symbol error count will increment wildly if there
1742 * is no link. 1787 * is no link.
@@ -1762,30 +1807,30 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
1762 ew32(CTRL_EXT, reg); 1807 ew32(CTRL_EXT, reg);
1763 1808
1764 /* Transmit Descriptor Control 0 */ 1809 /* Transmit Descriptor Control 0 */
1765 reg = er32(TXDCTL); 1810 reg = er32(TXDCTL(0));
1766 reg |= (1 << 22); 1811 reg |= (1 << 22);
1767 ew32(TXDCTL, reg); 1812 ew32(TXDCTL(0), reg);
1768 1813
1769 /* Transmit Descriptor Control 1 */ 1814 /* Transmit Descriptor Control 1 */
1770 reg = er32(TXDCTL1); 1815 reg = er32(TXDCTL(1));
1771 reg |= (1 << 22); 1816 reg |= (1 << 22);
1772 ew32(TXDCTL1, reg); 1817 ew32(TXDCTL(1), reg);
1773 1818
1774 /* Transmit Arbitration Control 0 */ 1819 /* Transmit Arbitration Control 0 */
1775 reg = er32(TARC0); 1820 reg = er32(TARC(0));
1776 if (hw->mac.type == e1000_ich8lan) 1821 if (hw->mac.type == e1000_ich8lan)
1777 reg |= (1 << 28) | (1 << 29); 1822 reg |= (1 << 28) | (1 << 29);
1778 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); 1823 reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
1779 ew32(TARC0, reg); 1824 ew32(TARC(0), reg);
1780 1825
1781 /* Transmit Arbitration Control 1 */ 1826 /* Transmit Arbitration Control 1 */
1782 reg = er32(TARC1); 1827 reg = er32(TARC(1));
1783 if (er32(TCTL) & E1000_TCTL_MULR) 1828 if (er32(TCTL) & E1000_TCTL_MULR)
1784 reg &= ~(1 << 28); 1829 reg &= ~(1 << 28);
1785 else 1830 else
1786 reg |= (1 << 28); 1831 reg |= (1 << 28);
1787 reg |= (1 << 24) | (1 << 26) | (1 << 30); 1832 reg |= (1 << 24) | (1 << 26) | (1 << 30);
1788 ew32(TARC1, reg); 1833 ew32(TARC(1), reg);
1789 1834
1790 /* Device Status */ 1835 /* Device Status */
1791 if (hw->mac.type == e1000_ich8lan) { 1836 if (hw->mac.type == e1000_ich8lan) {
@@ -1807,29 +1852,29 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
1807 **/ 1852 **/
1808static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) 1853static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
1809{ 1854{
1810 struct e1000_mac_info *mac = &hw->mac;
1811 s32 ret_val; 1855 s32 ret_val;
1812 1856
1813 if (e1000_check_reset_block(hw)) 1857 if (e1000_check_reset_block(hw))
1814 return 0; 1858 return 0;
1815 1859
1816 /* ICH parts do not have a word in the NVM to determine 1860 /*
1861 * ICH parts do not have a word in the NVM to determine
1817 * the default flow control setting, so we explicitly 1862 * the default flow control setting, so we explicitly
1818 * set it to full. 1863 * set it to full.
1819 */ 1864 */
1820 if (mac->fc == e1000_fc_default) 1865 if (hw->fc.type == e1000_fc_default)
1821 mac->fc = e1000_fc_full; 1866 hw->fc.type = e1000_fc_full;
1822 1867
1823 mac->original_fc = mac->fc; 1868 hw->fc.original_type = hw->fc.type;
1824 1869
1825 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc); 1870 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", hw->fc.type);
1826 1871
1827 /* Continue to configure the copper link. */ 1872 /* Continue to configure the copper link. */
1828 ret_val = e1000_setup_copper_link_ich8lan(hw); 1873 ret_val = e1000_setup_copper_link_ich8lan(hw);
1829 if (ret_val) 1874 if (ret_val)
1830 return ret_val; 1875 return ret_val;
1831 1876
1832 ew32(FCTTV, mac->fc_pause_time); 1877 ew32(FCTTV, hw->fc.pause_time);
1833 1878
1834 return e1000e_set_fc_watermarks(hw); 1879 return e1000e_set_fc_watermarks(hw);
1835} 1880}
@@ -1853,9 +1898,11 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
1853 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1898 ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1854 ew32(CTRL, ctrl); 1899 ew32(CTRL, ctrl);
1855 1900
1856 /* Set the mac to wait the maximum time between each iteration 1901 /*
1902 * Set the mac to wait the maximum time between each iteration
1857 * and increase the max iterations when polling the phy; 1903 * and increase the max iterations when polling the phy;
1858 * this fixes erroneous timeouts at 10Mbps. */ 1904 * this fixes erroneous timeouts at 10Mbps.
1905 */
1859 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF); 1906 ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
1860 if (ret_val) 1907 if (ret_val)
1861 return ret_val; 1908 return ret_val;
@@ -1882,7 +1929,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
1882 * @speed: pointer to store current link speed 1929 * @speed: pointer to store current link speed
1883 * @duplex: pointer to store the current link duplex 1930 * @duplex: pointer to store the current link duplex
1884 * 1931 *
1885 * Calls the generic get_speed_and_duplex to retreive the current link 1932 * Calls the generic get_speed_and_duplex to retrieve the current link
1886 * information and then calls the Kumeran lock loss workaround for links at 1933 * information and then calls the Kumeran lock loss workaround for links at
1887 * gigabit speeds. 1934 * gigabit speeds.
1888 **/ 1935 **/
@@ -1930,9 +1977,11 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
1930 if (!dev_spec->kmrn_lock_loss_workaround_enabled) 1977 if (!dev_spec->kmrn_lock_loss_workaround_enabled)
1931 return 0; 1978 return 0;
1932 1979
1933 /* Make sure link is up before proceeding. If not just return. 1980 /*
1981 * Make sure link is up before proceeding. If not just return.
1934 * Attempting this while link is negotiating fouled up link 1982 * Attempting this while link is negotiating fouled up link
1935 * stability */ 1983 * stability
1984 */
1936 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); 1985 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1937 if (!link) 1986 if (!link)
1938 return 0; 1987 return 0;
@@ -1961,8 +2010,10 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
1961 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 2010 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
1962 ew32(PHY_CTRL, phy_ctrl); 2011 ew32(PHY_CTRL, phy_ctrl);
1963 2012
1964 /* Call gig speed drop workaround on Gig disable before accessing 2013 /*
1965 * any PHY registers */ 2014 * Call gig speed drop workaround on Gig disable before accessing
2015 * any PHY registers
2016 */
1966 e1000e_gig_downshift_workaround_ich8lan(hw); 2017 e1000e_gig_downshift_workaround_ich8lan(hw);
1967 2018
1968 /* unable to acquire PCS lock */ 2019 /* unable to acquire PCS lock */
@@ -1970,7 +2021,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
1970} 2021}
1971 2022
1972/** 2023/**
1973 * e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state 2024 * e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
1974 * @hw: pointer to the HW structure 2025 * @hw: pointer to the HW structure
1975 * @state: boolean value used to set the current Kumeran workaround state 2026 * @state: boolean value used to set the current Kumeran workaround state
1976 * 2027 *
@@ -2017,8 +2068,10 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
2017 E1000_PHY_CTRL_NOND0A_GBE_DISABLE); 2068 E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
2018 ew32(PHY_CTRL, reg); 2069 ew32(PHY_CTRL, reg);
2019 2070
2020 /* Call gig speed drop workaround on Gig disable before 2071 /*
2021 * accessing any PHY registers */ 2072 * Call gig speed drop workaround on Gig disable before
2073 * accessing any PHY registers
2074 */
2022 if (hw->mac.type == e1000_ich8lan) 2075 if (hw->mac.type == e1000_ich8lan)
2023 e1000e_gig_downshift_workaround_ich8lan(hw); 2076 e1000e_gig_downshift_workaround_ich8lan(hw);
2024 2077
@@ -2158,7 +2211,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
2158 .get_link_up_info = e1000_get_link_up_info_ich8lan, 2211 .get_link_up_info = e1000_get_link_up_info_ich8lan,
2159 .led_on = e1000_led_on_ich8lan, 2212 .led_on = e1000_led_on_ich8lan,
2160 .led_off = e1000_led_off_ich8lan, 2213 .led_off = e1000_led_off_ich8lan,
2161 .mc_addr_list_update = e1000e_mc_addr_list_update_generic, 2214 .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
2162 .reset_hw = e1000_reset_hw_ich8lan, 2215 .reset_hw = e1000_reset_hw_ich8lan,
2163 .init_hw = e1000_init_hw_ich8lan, 2216 .init_hw = e1000_init_hw_ich8lan,
2164 .setup_link = e1000_setup_link_ich8lan, 2217 .setup_link = e1000_setup_link_ich8lan,
@@ -2200,7 +2253,7 @@ struct e1000_info e1000_ich8_info = {
2200 | FLAG_HAS_FLASH 2253 | FLAG_HAS_FLASH
2201 | FLAG_APME_IN_WUC, 2254 | FLAG_APME_IN_WUC,
2202 .pba = 8, 2255 .pba = 8,
2203 .get_invariants = e1000_get_invariants_ich8lan, 2256 .get_variants = e1000_get_variants_ich8lan,
2204 .mac_ops = &ich8_mac_ops, 2257 .mac_ops = &ich8_mac_ops,
2205 .phy_ops = &ich8_phy_ops, 2258 .phy_ops = &ich8_phy_ops,
2206 .nvm_ops = &ich8_nvm_ops, 2259 .nvm_ops = &ich8_nvm_ops,
@@ -2217,7 +2270,7 @@ struct e1000_info e1000_ich9_info = {
2217 | FLAG_HAS_FLASH 2270 | FLAG_HAS_FLASH
2218 | FLAG_APME_IN_WUC, 2271 | FLAG_APME_IN_WUC,
2219 .pba = 10, 2272 .pba = 10,
2220 .get_invariants = e1000_get_invariants_ich8lan, 2273 .get_variants = e1000_get_variants_ich8lan,
2221 .mac_ops = &ich8_mac_ops, 2274 .mac_ops = &ich8_mac_ops,
2222 .phy_ops = &ich8_phy_ops, 2275 .phy_ops = &ich8_phy_ops,
2223 .nvm_ops = &ich8_nvm_ops, 2276 .nvm_ops = &ich8_nvm_ops,
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 95f75a43c9f9..f1f4e9dfd0a0 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -43,8 +43,8 @@ enum e1000_mng_mode {
43 43
44#define E1000_FACTPS_MNGCG 0x20000000 44#define E1000_FACTPS_MNGCG 0x20000000
45 45
46#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management 46/* Intel(R) Active Management Technology signature */
47 * Technology signature */ 47#define E1000_IAMT_SIGNATURE 0x544D4149
48 48
49/** 49/**
50 * e1000e_get_bus_info_pcie - Get PCIe bus information 50 * e1000e_get_bus_info_pcie - Get PCIe bus information
@@ -142,7 +142,8 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
142{ 142{
143 u32 rar_low, rar_high; 143 u32 rar_low, rar_high;
144 144
145 /* HW expects these in little endian so we reverse the byte order 145 /*
146 * HW expects these in little endian so we reverse the byte order
146 * from network order (big endian) to little endian 147 * from network order (big endian) to little endian
147 */ 148 */
148 rar_low = ((u32) addr[0] | 149 rar_low = ((u32) addr[0] |
@@ -171,7 +172,8 @@ static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
171{ 172{
172 u32 hash_bit, hash_reg, mta; 173 u32 hash_bit, hash_reg, mta;
173 174
174 /* The MTA is a register array of 32-bit registers. It is 175 /*
176 * The MTA is a register array of 32-bit registers. It is
175 * treated like an array of (32*mta_reg_count) bits. We want to 177 * treated like an array of (32*mta_reg_count) bits. We want to
176 * set bit BitArray[hash_value]. So we figure out what register 178 * set bit BitArray[hash_value]. So we figure out what register
177 * the bit is in, read it, OR in the new bit, then write 179 * the bit is in, read it, OR in the new bit, then write
@@ -208,12 +210,15 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
208 /* Register count multiplied by bits per register */ 210 /* Register count multiplied by bits per register */
209 hash_mask = (hw->mac.mta_reg_count * 32) - 1; 211 hash_mask = (hw->mac.mta_reg_count * 32) - 1;
210 212
211 /* For a mc_filter_type of 0, bit_shift is the number of left-shifts 213 /*
212 * where 0xFF would still fall within the hash mask. */ 214 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
215 * where 0xFF would still fall within the hash mask.
216 */
213 while (hash_mask >> bit_shift != 0xFF) 217 while (hash_mask >> bit_shift != 0xFF)
214 bit_shift++; 218 bit_shift++;
215 219
216 /* The portion of the address that is used for the hash table 220 /*
221 * The portion of the address that is used for the hash table
217 * is determined by the mc_filter_type setting. 222 * is determined by the mc_filter_type setting.
218 * The algorithm is such that there is a total of 8 bits of shifting. 223 * The algorithm is such that there is a total of 8 bits of shifting.
219 * The bit_shift for a mc_filter_type of 0 represents the number of 224 * The bit_shift for a mc_filter_type of 0 represents the number of
@@ -224,8 +229,8 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
224 * cases are a variation of this algorithm...essentially raising the 229 * cases are a variation of this algorithm...essentially raising the
225 * number of bits to shift mc_addr[5] left, while still keeping the 230 * number of bits to shift mc_addr[5] left, while still keeping the
226 * 8-bit shifting total. 231 * 8-bit shifting total.
227 */ 232 *
228 /* For example, given the following Destination MAC Address and an 233 * For example, given the following Destination MAC Address and an
229 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), 234 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
230 * we can see that the bit_shift for case 0 is 4. These are the hash 235 * we can see that the bit_shift for case 0 is 4. These are the hash
231 * values resulting from each mc_filter_type... 236 * values resulting from each mc_filter_type...
@@ -260,7 +265,7 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
260} 265}
261 266
262/** 267/**
263 * e1000e_mc_addr_list_update_generic - Update Multicast addresses 268 * e1000e_update_mc_addr_list_generic - Update Multicast addresses
264 * @hw: pointer to the HW structure 269 * @hw: pointer to the HW structure
265 * @mc_addr_list: array of multicast addresses to program 270 * @mc_addr_list: array of multicast addresses to program
266 * @mc_addr_count: number of multicast addresses to program 271 * @mc_addr_count: number of multicast addresses to program
@@ -272,14 +277,15 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
272 * The parameter rar_count will usually be hw->mac.rar_entry_count 277 * The parameter rar_count will usually be hw->mac.rar_entry_count
273 * unless there are workarounds that change this. 278 * unless there are workarounds that change this.
274 **/ 279 **/
275void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw, 280void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
276 u8 *mc_addr_list, u32 mc_addr_count, 281 u8 *mc_addr_list, u32 mc_addr_count,
277 u32 rar_used_count, u32 rar_count) 282 u32 rar_used_count, u32 rar_count)
278{ 283{
279 u32 hash_value; 284 u32 hash_value;
280 u32 i; 285 u32 i;
281 286
282 /* Load the first set of multicast addresses into the exact 287 /*
288 * Load the first set of multicast addresses into the exact
283 * filters (RAR). If there are not enough to fill the RAR 289 * filters (RAR). If there are not enough to fill the RAR
284 * array, clear the filters. 290 * array, clear the filters.
285 */ 291 */
@@ -375,7 +381,8 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
375 s32 ret_val; 381 s32 ret_val;
376 bool link; 382 bool link;
377 383
378 /* We only want to go out to the PHY registers to see if Auto-Neg 384 /*
385 * We only want to go out to the PHY registers to see if Auto-Neg
379 * has completed and/or if our link status has changed. The 386 * has completed and/or if our link status has changed. The
380 * get_link_status flag is set upon receiving a Link Status 387 * get_link_status flag is set upon receiving a Link Status
381 * Change or Rx Sequence Error interrupt. 388 * Change or Rx Sequence Error interrupt.
@@ -383,7 +390,8 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
383 if (!mac->get_link_status) 390 if (!mac->get_link_status)
384 return 0; 391 return 0;
385 392
386 /* First we want to see if the MII Status Register reports 393 /*
394 * First we want to see if the MII Status Register reports
387 * link. If so, then we want to get the current speed/duplex 395 * link. If so, then we want to get the current speed/duplex
388 * of the PHY. 396 * of the PHY.
389 */ 397 */
@@ -396,11 +404,14 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
396 404
397 mac->get_link_status = 0; 405 mac->get_link_status = 0;
398 406
399 /* Check if there was DownShift, must be checked 407 /*
400 * immediately after link-up */ 408 * Check if there was DownShift, must be checked
409 * immediately after link-up
410 */
401 e1000e_check_downshift(hw); 411 e1000e_check_downshift(hw);
402 412
403 /* If we are forcing speed/duplex, then we simply return since 413 /*
414 * If we are forcing speed/duplex, then we simply return since
404 * we have already determined whether we have link or not. 415 * we have already determined whether we have link or not.
405 */ 416 */
406 if (!mac->autoneg) { 417 if (!mac->autoneg) {
@@ -408,13 +419,15 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
408 return ret_val; 419 return ret_val;
409 } 420 }
410 421
411 /* Auto-Neg is enabled. Auto Speed Detection takes care 422 /*
423 * Auto-Neg is enabled. Auto Speed Detection takes care
412 * of MAC speed/duplex configuration. So we only need to 424 * of MAC speed/duplex configuration. So we only need to
413 * configure Collision Distance in the MAC. 425 * configure Collision Distance in the MAC.
414 */ 426 */
415 e1000e_config_collision_dist(hw); 427 e1000e_config_collision_dist(hw);
416 428
417 /* Configure Flow Control now that Auto-Neg has completed. 429 /*
430 * Configure Flow Control now that Auto-Neg has completed.
418 * First, we need to restore the desired flow control 431 * First, we need to restore the desired flow control
419 * settings because we may have had to re-autoneg with a 432 * settings because we may have had to re-autoneg with a
420 * different link partner. 433 * different link partner.
@@ -446,7 +459,8 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
446 status = er32(STATUS); 459 status = er32(STATUS);
447 rxcw = er32(RXCW); 460 rxcw = er32(RXCW);
448 461
449 /* If we don't have link (auto-negotiation failed or link partner 462 /*
463 * If we don't have link (auto-negotiation failed or link partner
450 * cannot auto-negotiate), the cable is plugged in (we have signal), 464 * cannot auto-negotiate), the cable is plugged in (we have signal),
451 * and our link partner is not trying to auto-negotiate with us (we 465 * and our link partner is not trying to auto-negotiate with us (we
452 * are receiving idles or data), we need to force link up. We also 466 * are receiving idles or data), we need to force link up. We also
@@ -477,7 +491,8 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
477 return ret_val; 491 return ret_val;
478 } 492 }
479 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 493 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
480 /* If we are forcing link and we are receiving /C/ ordered 494 /*
495 * If we are forcing link and we are receiving /C/ ordered
481 * sets, re-enable auto-negotiation in the TXCW register 496 * sets, re-enable auto-negotiation in the TXCW register
482 * and disable forced link in the Device Control register 497 * and disable forced link in the Device Control register
483 * in an attempt to auto-negotiate with our link partner. 498 * in an attempt to auto-negotiate with our link partner.
@@ -511,7 +526,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
511 status = er32(STATUS); 526 status = er32(STATUS);
512 rxcw = er32(RXCW); 527 rxcw = er32(RXCW);
513 528
514 /* If we don't have link (auto-negotiation failed or link partner 529 /*
530 * If we don't have link (auto-negotiation failed or link partner
515 * cannot auto-negotiate), and our link partner is not trying to 531 * cannot auto-negotiate), and our link partner is not trying to
516 * auto-negotiate with us (we are receiving idles or data), 532 * auto-negotiate with us (we are receiving idles or data),
517 * we need to force link up. We also need to give auto-negotiation 533 * we need to force link up. We also need to give auto-negotiation
@@ -540,7 +556,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
540 return ret_val; 556 return ret_val;
541 } 557 }
542 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 558 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
543 /* If we are forcing link and we are receiving /C/ ordered 559 /*
560 * If we are forcing link and we are receiving /C/ ordered
544 * sets, re-enable auto-negotiation in the TXCW register 561 * sets, re-enable auto-negotiation in the TXCW register
545 * and disable forced link in the Device Control register 562 * and disable forced link in the Device Control register
546 * in an attempt to auto-negotiate with our link partner. 563 * in an attempt to auto-negotiate with our link partner.
@@ -551,7 +568,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
551 568
552 mac->serdes_has_link = 1; 569 mac->serdes_has_link = 1;
553 } else if (!(E1000_TXCW_ANE & er32(TXCW))) { 570 } else if (!(E1000_TXCW_ANE & er32(TXCW))) {
554 /* If we force link for non-auto-negotiation switch, check 571 /*
572 * If we force link for non-auto-negotiation switch, check
555 * link status based on MAC synchronization for internal 573 * link status based on MAC synchronization for internal
556 * serdes media type. 574 * serdes media type.
557 */ 575 */
@@ -585,11 +603,11 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
585 **/ 603 **/
586static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) 604static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
587{ 605{
588 struct e1000_mac_info *mac = &hw->mac;
589 s32 ret_val; 606 s32 ret_val;
590 u16 nvm_data; 607 u16 nvm_data;
591 608
592 /* Read and store word 0x0F of the EEPROM. This word contains bits 609 /*
610 * Read and store word 0x0F of the EEPROM. This word contains bits
593 * that determine the hardware's default PAUSE (flow control) mode, 611 * that determine the hardware's default PAUSE (flow control) mode,
594 * a bit that determines whether the HW defaults to enabling or 612 * a bit that determines whether the HW defaults to enabling or
595 * disabling auto-negotiation, and the direction of the 613 * disabling auto-negotiation, and the direction of the
@@ -605,12 +623,12 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
605 } 623 }
606 624
607 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0) 625 if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
608 mac->fc = e1000_fc_none; 626 hw->fc.type = e1000_fc_none;
609 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 627 else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
610 NVM_WORD0F_ASM_DIR) 628 NVM_WORD0F_ASM_DIR)
611 mac->fc = e1000_fc_tx_pause; 629 hw->fc.type = e1000_fc_tx_pause;
612 else 630 else
613 mac->fc = e1000_fc_full; 631 hw->fc.type = e1000_fc_full;
614 632
615 return 0; 633 return 0;
616} 634}
@@ -630,7 +648,8 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
630 struct e1000_mac_info *mac = &hw->mac; 648 struct e1000_mac_info *mac = &hw->mac;
631 s32 ret_val; 649 s32 ret_val;
632 650
633 /* In the case of the phy reset being blocked, we already have a link. 651 /*
652 * In the case of the phy reset being blocked, we already have a link.
634 * We do not need to set it up again. 653 * We do not need to set it up again.
635 */ 654 */
636 if (e1000_check_reset_block(hw)) 655 if (e1000_check_reset_block(hw))
@@ -640,26 +659,28 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
640 * If flow control is set to default, set flow control based on 659 * If flow control is set to default, set flow control based on
641 * the EEPROM flow control settings. 660 * the EEPROM flow control settings.
642 */ 661 */
643 if (mac->fc == e1000_fc_default) { 662 if (hw->fc.type == e1000_fc_default) {
644 ret_val = e1000_set_default_fc_generic(hw); 663 ret_val = e1000_set_default_fc_generic(hw);
645 if (ret_val) 664 if (ret_val)
646 return ret_val; 665 return ret_val;
647 } 666 }
648 667
649 /* We want to save off the original Flow Control configuration just 668 /*
669 * We want to save off the original Flow Control configuration just
650 * in case we get disconnected and then reconnected into a different 670 * in case we get disconnected and then reconnected into a different
651 * hub or switch with different Flow Control capabilities. 671 * hub or switch with different Flow Control capabilities.
652 */ 672 */
653 mac->original_fc = mac->fc; 673 hw->fc.original_type = hw->fc.type;
654 674
655 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc); 675 hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", hw->fc.type);
656 676
657 /* Call the necessary media_type subroutine to configure the link. */ 677 /* Call the necessary media_type subroutine to configure the link. */
658 ret_val = mac->ops.setup_physical_interface(hw); 678 ret_val = mac->ops.setup_physical_interface(hw);
659 if (ret_val) 679 if (ret_val)
660 return ret_val; 680 return ret_val;
661 681
662 /* Initialize the flow control address, type, and PAUSE timer 682 /*
683 * Initialize the flow control address, type, and PAUSE timer
663 * registers to their default values. This is done even if flow 684 * registers to their default values. This is done even if flow
664 * control is disabled, because it does not hurt anything to 685 * control is disabled, because it does not hurt anything to
665 * initialize these registers. 686 * initialize these registers.
@@ -669,7 +690,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
669 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); 690 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
670 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); 691 ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
671 692
672 ew32(FCTTV, mac->fc_pause_time); 693 ew32(FCTTV, hw->fc.pause_time);
673 694
674 return e1000e_set_fc_watermarks(hw); 695 return e1000e_set_fc_watermarks(hw);
675} 696}
@@ -686,7 +707,8 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
686 struct e1000_mac_info *mac = &hw->mac; 707 struct e1000_mac_info *mac = &hw->mac;
687 u32 txcw; 708 u32 txcw;
688 709
689 /* Check for a software override of the flow control settings, and 710 /*
711 * Check for a software override of the flow control settings, and
690 * setup the device accordingly. If auto-negotiation is enabled, then 712 * setup the device accordingly. If auto-negotiation is enabled, then
691 * software will have to set the "PAUSE" bits to the correct value in 713 * software will have to set the "PAUSE" bits to the correct value in
692 * the Transmit Config Word Register (TXCW) and re-start auto- 714 * the Transmit Config Word Register (TXCW) and re-start auto-
@@ -700,31 +722,34 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
700 * but not send pause frames). 722 * but not send pause frames).
701 * 2: Tx flow control is enabled (we can send pause frames but we 723 * 2: Tx flow control is enabled (we can send pause frames but we
702 * do not support receiving pause frames). 724 * do not support receiving pause frames).
703 * 3: Both Rx and TX flow control (symmetric) are enabled. 725 * 3: Both Rx and Tx flow control (symmetric) are enabled.
704 */ 726 */
705 switch (mac->fc) { 727 switch (hw->fc.type) {
706 case e1000_fc_none: 728 case e1000_fc_none:
707 /* Flow control completely disabled by a software over-ride. */ 729 /* Flow control completely disabled by a software over-ride. */
708 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); 730 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
709 break; 731 break;
710 case e1000_fc_rx_pause: 732 case e1000_fc_rx_pause:
711 /* RX Flow control is enabled and TX Flow control is disabled 733 /*
734 * Rx Flow control is enabled and Tx Flow control is disabled
712 * by a software over-ride. Since there really isn't a way to 735 * by a software over-ride. Since there really isn't a way to
713 * advertise that we are capable of RX Pause ONLY, we will 736 * advertise that we are capable of Rx Pause ONLY, we will
714 * advertise that we support both symmetric and asymmetric RX 737 * advertise that we support both symmetric and asymmetric Rx
715 * PAUSE. Later, we will disable the adapter's ability to send 738 * PAUSE. Later, we will disable the adapter's ability to send
716 * PAUSE frames. 739 * PAUSE frames.
717 */ 740 */
718 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 741 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
719 break; 742 break;
720 case e1000_fc_tx_pause: 743 case e1000_fc_tx_pause:
721 /* TX Flow control is enabled, and RX Flow control is disabled, 744 /*
745 * Tx Flow control is enabled, and Rx Flow control is disabled,
722 * by a software over-ride. 746 * by a software over-ride.
723 */ 747 */
724 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); 748 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
725 break; 749 break;
726 case e1000_fc_full: 750 case e1000_fc_full:
727 /* Flow control (both RX and TX) is enabled by a software 751 /*
752 * Flow control (both Rx and Tx) is enabled by a software
728 * over-ride. 753 * over-ride.
729 */ 754 */
730 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 755 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
@@ -754,7 +779,8 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
754 u32 i, status; 779 u32 i, status;
755 s32 ret_val; 780 s32 ret_val;
756 781
757 /* If we have a signal (the cable is plugged in, or assumed true for 782 /*
783 * If we have a signal (the cable is plugged in, or assumed true for
758 * serdes media) then poll for a "Link-Up" indication in the Device 784 * serdes media) then poll for a "Link-Up" indication in the Device
759 * Status Register. Time-out if a link isn't seen in 500 milliseconds 785 * Status Register. Time-out if a link isn't seen in 500 milliseconds
760 * seconds (Auto-negotiation should complete in less than 500 786 * seconds (Auto-negotiation should complete in less than 500
@@ -769,7 +795,8 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
769 if (i == FIBER_LINK_UP_LIMIT) { 795 if (i == FIBER_LINK_UP_LIMIT) {
770 hw_dbg(hw, "Never got a valid link from auto-neg!!!\n"); 796 hw_dbg(hw, "Never got a valid link from auto-neg!!!\n");
771 mac->autoneg_failed = 1; 797 mac->autoneg_failed = 1;
772 /* AutoNeg failed to achieve a link, so we'll call 798 /*
799 * AutoNeg failed to achieve a link, so we'll call
773 * mac->check_for_link. This routine will force the 800 * mac->check_for_link. This routine will force the
774 * link up if we detect a signal. This will allow us to 801 * link up if we detect a signal. This will allow us to
775 * communicate with non-autonegotiating link partners. 802 * communicate with non-autonegotiating link partners.
@@ -811,7 +838,8 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
811 if (ret_val) 838 if (ret_val)
812 return ret_val; 839 return ret_val;
813 840
814 /* Since auto-negotiation is enabled, take the link out of reset (the 841 /*
842 * Since auto-negotiation is enabled, take the link out of reset (the
815 * link will be in reset, because we previously reset the chip). This 843 * link will be in reset, because we previously reset the chip). This
816 * will restart auto-negotiation. If auto-negotiation is successful 844 * will restart auto-negotiation. If auto-negotiation is successful
817 * then the link-up status bit will be set and the flow control enable 845 * then the link-up status bit will be set and the flow control enable
@@ -823,11 +851,12 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
823 e1e_flush(); 851 e1e_flush();
824 msleep(1); 852 msleep(1);
825 853
826 /* For these adapters, the SW defineable pin 1 is set when the optics 854 /*
855 * For these adapters, the SW definable pin 1 is set when the optics
827 * detect a signal. If we have a signal, then poll for a "Link-Up" 856 * detect a signal. If we have a signal, then poll for a "Link-Up"
828 * indication. 857 * indication.
829 */ 858 */
830 if (hw->media_type == e1000_media_type_internal_serdes || 859 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
831 (er32(CTRL) & E1000_CTRL_SWDPIN1)) { 860 (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
832 ret_val = e1000_poll_fiber_serdes_link_generic(hw); 861 ret_val = e1000_poll_fiber_serdes_link_generic(hw);
833 } else { 862 } else {
@@ -864,27 +893,28 @@ void e1000e_config_collision_dist(struct e1000_hw *hw)
864 * 893 *
865 * Sets the flow control high/low threshold (watermark) registers. If 894 * Sets the flow control high/low threshold (watermark) registers. If
866 * flow control XON frame transmission is enabled, then set XON frame 895 * flow control XON frame transmission is enabled, then set XON frame
867 * tansmission as well. 896 * transmission as well.
868 **/ 897 **/
869s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) 898s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
870{ 899{
871 struct e1000_mac_info *mac = &hw->mac;
872 u32 fcrtl = 0, fcrth = 0; 900 u32 fcrtl = 0, fcrth = 0;
873 901
874 /* Set the flow control receive threshold registers. Normally, 902 /*
903 * Set the flow control receive threshold registers. Normally,
875 * these registers will be set to a default threshold that may be 904 * these registers will be set to a default threshold that may be
876 * adjusted later by the driver's runtime code. However, if the 905 * adjusted later by the driver's runtime code. However, if the
877 * ability to transmit pause frames is not enabled, then these 906 * ability to transmit pause frames is not enabled, then these
878 * registers will be set to 0. 907 * registers will be set to 0.
879 */ 908 */
880 if (mac->fc & e1000_fc_tx_pause) { 909 if (hw->fc.type & e1000_fc_tx_pause) {
881 /* We need to set up the Receive Threshold high and low water 910 /*
911 * We need to set up the Receive Threshold high and low water
882 * marks as well as (optionally) enabling the transmission of 912 * marks as well as (optionally) enabling the transmission of
883 * XON frames. 913 * XON frames.
884 */ 914 */
885 fcrtl = mac->fc_low_water; 915 fcrtl = hw->fc.low_water;
886 fcrtl |= E1000_FCRTL_XONE; 916 fcrtl |= E1000_FCRTL_XONE;
887 fcrth = mac->fc_high_water; 917 fcrth = hw->fc.high_water;
888 } 918 }
889 ew32(FCRTL, fcrtl); 919 ew32(FCRTL, fcrtl);
890 ew32(FCRTH, fcrth); 920 ew32(FCRTH, fcrth);
@@ -904,18 +934,18 @@ s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
904 **/ 934 **/
905s32 e1000e_force_mac_fc(struct e1000_hw *hw) 935s32 e1000e_force_mac_fc(struct e1000_hw *hw)
906{ 936{
907 struct e1000_mac_info *mac = &hw->mac;
908 u32 ctrl; 937 u32 ctrl;
909 938
910 ctrl = er32(CTRL); 939 ctrl = er32(CTRL);
911 940
912 /* Because we didn't get link via the internal auto-negotiation 941 /*
942 * Because we didn't get link via the internal auto-negotiation
913 * mechanism (we either forced link or we got link via PHY 943 * mechanism (we either forced link or we got link via PHY
914 * auto-neg), we have to manually enable/disable transmit an 944 * auto-neg), we have to manually enable/disable transmit an
915 * receive flow control. 945 * receive flow control.
916 * 946 *
917 * The "Case" statement below enables/disable flow control 947 * The "Case" statement below enables/disable flow control
918 * according to the "mac->fc" parameter. 948 * according to the "hw->fc.type" parameter.
919 * 949 *
920 * The possible values of the "fc" parameter are: 950 * The possible values of the "fc" parameter are:
921 * 0: Flow control is completely disabled 951 * 0: Flow control is completely disabled
@@ -923,12 +953,12 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
923 * frames but not send pause frames). 953 * frames but not send pause frames).
924 * 2: Tx flow control is enabled (we can send pause frames 954 * 2: Tx flow control is enabled (we can send pause frames
925 * frames but we do not receive pause frames). 955 * frames but we do not receive pause frames).
926 * 3: Both Rx and TX flow control (symmetric) is enabled. 956 * 3: Both Rx and Tx flow control (symmetric) is enabled.
927 * other: No other values should be possible at this point. 957 * other: No other values should be possible at this point.
928 */ 958 */
929 hw_dbg(hw, "mac->fc = %u\n", mac->fc); 959 hw_dbg(hw, "hw->fc.type = %u\n", hw->fc.type);
930 960
931 switch (mac->fc) { 961 switch (hw->fc.type) {
932 case e1000_fc_none: 962 case e1000_fc_none:
933 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); 963 ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
934 break; 964 break;
@@ -970,16 +1000,17 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
970 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; 1000 u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
971 u16 speed, duplex; 1001 u16 speed, duplex;
972 1002
973 /* Check for the case where we have fiber media and auto-neg failed 1003 /*
1004 * Check for the case where we have fiber media and auto-neg failed
974 * so we had to force link. In this case, we need to force the 1005 * so we had to force link. In this case, we need to force the
975 * configuration of the MAC to match the "fc" parameter. 1006 * configuration of the MAC to match the "fc" parameter.
976 */ 1007 */
977 if (mac->autoneg_failed) { 1008 if (mac->autoneg_failed) {
978 if (hw->media_type == e1000_media_type_fiber || 1009 if (hw->phy.media_type == e1000_media_type_fiber ||
979 hw->media_type == e1000_media_type_internal_serdes) 1010 hw->phy.media_type == e1000_media_type_internal_serdes)
980 ret_val = e1000e_force_mac_fc(hw); 1011 ret_val = e1000e_force_mac_fc(hw);
981 } else { 1012 } else {
982 if (hw->media_type == e1000_media_type_copper) 1013 if (hw->phy.media_type == e1000_media_type_copper)
983 ret_val = e1000e_force_mac_fc(hw); 1014 ret_val = e1000e_force_mac_fc(hw);
984 } 1015 }
985 1016
@@ -988,13 +1019,15 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
988 return ret_val; 1019 return ret_val;
989 } 1020 }
990 1021
991 /* Check for the case where we have copper media and auto-neg is 1022 /*
1023 * Check for the case where we have copper media and auto-neg is
992 * enabled. In this case, we need to check and see if Auto-Neg 1024 * enabled. In this case, we need to check and see if Auto-Neg
993 * has completed, and if so, how the PHY and link partner has 1025 * has completed, and if so, how the PHY and link partner has
994 * flow control configured. 1026 * flow control configured.
995 */ 1027 */
996 if ((hw->media_type == e1000_media_type_copper) && mac->autoneg) { 1028 if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
997 /* Read the MII Status Register and check to see if AutoNeg 1029 /*
1030 * Read the MII Status Register and check to see if AutoNeg
998 * has completed. We read this twice because this reg has 1031 * has completed. We read this twice because this reg has
999 * some "sticky" (latched) bits. 1032 * some "sticky" (latched) bits.
1000 */ 1033 */
@@ -1011,7 +1044,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1011 return ret_val; 1044 return ret_val;
1012 } 1045 }
1013 1046
1014 /* The AutoNeg process has completed, so we now need to 1047 /*
1048 * The AutoNeg process has completed, so we now need to
1015 * read both the Auto Negotiation Advertisement 1049 * read both the Auto Negotiation Advertisement
1016 * Register (Address 4) and the Auto_Negotiation Base 1050 * Register (Address 4) and the Auto_Negotiation Base
1017 * Page Ability Register (Address 5) to determine how 1051 * Page Ability Register (Address 5) to determine how
@@ -1024,7 +1058,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1024 if (ret_val) 1058 if (ret_val)
1025 return ret_val; 1059 return ret_val;
1026 1060
1027 /* Two bits in the Auto Negotiation Advertisement Register 1061 /*
1062 * Two bits in the Auto Negotiation Advertisement Register
1028 * (Address 4) and two bits in the Auto Negotiation Base 1063 * (Address 4) and two bits in the Auto Negotiation Base
1029 * Page Ability Register (Address 5) determine flow control 1064 * Page Ability Register (Address 5) determine flow control
1030 * for both the PHY and the link partner. The following 1065 * for both the PHY and the link partner. The following
@@ -1045,8 +1080,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1045 * 1 | 1 | 0 | 0 | e1000_fc_none 1080 * 1 | 1 | 0 | 0 | e1000_fc_none
1046 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause 1081 * 1 | 1 | 0 | 1 | e1000_fc_rx_pause
1047 * 1082 *
1048 */ 1083 *
1049 /* Are both PAUSE bits set to 1? If so, this implies 1084 * Are both PAUSE bits set to 1? If so, this implies
1050 * Symmetric Flow Control is enabled at both ends. The 1085 * Symmetric Flow Control is enabled at both ends. The
1051 * ASM_DIR bits are irrelevant per the spec. 1086 * ASM_DIR bits are irrelevant per the spec.
1052 * 1087 *
@@ -1060,22 +1095,24 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1060 */ 1095 */
1061 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && 1096 if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
1062 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { 1097 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
1063 /* Now we need to check if the user selected RX ONLY 1098 /*
1099 * Now we need to check if the user selected Rx ONLY
1064 * of pause frames. In this case, we had to advertise 1100 * of pause frames. In this case, we had to advertise
1065 * FULL flow control because we could not advertise RX 1101 * FULL flow control because we could not advertise Rx
1066 * ONLY. Hence, we must now check to see if we need to 1102 * ONLY. Hence, we must now check to see if we need to
1067 * turn OFF the TRANSMISSION of PAUSE frames. 1103 * turn OFF the TRANSMISSION of PAUSE frames.
1068 */ 1104 */
1069 if (mac->original_fc == e1000_fc_full) { 1105 if (hw->fc.original_type == e1000_fc_full) {
1070 mac->fc = e1000_fc_full; 1106 hw->fc.type = e1000_fc_full;
1071 hw_dbg(hw, "Flow Control = FULL.\r\n"); 1107 hw_dbg(hw, "Flow Control = FULL.\r\n");
1072 } else { 1108 } else {
1073 mac->fc = e1000_fc_rx_pause; 1109 hw->fc.type = e1000_fc_rx_pause;
1074 hw_dbg(hw, "Flow Control = " 1110 hw_dbg(hw, "Flow Control = "
1075 "RX PAUSE frames only.\r\n"); 1111 "RX PAUSE frames only.\r\n");
1076 } 1112 }
1077 } 1113 }
1078 /* For receiving PAUSE frames ONLY. 1114 /*
1115 * For receiving PAUSE frames ONLY.
1079 * 1116 *
1080 * LOCAL DEVICE | LINK PARTNER 1117 * LOCAL DEVICE | LINK PARTNER
1081 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1118 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -1087,10 +1124,11 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1087 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 1124 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1088 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 1125 (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1089 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 1126 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1090 mac->fc = e1000_fc_tx_pause; 1127 hw->fc.type = e1000_fc_tx_pause;
1091 hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n"); 1128 hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n");
1092 } 1129 }
1093 /* For transmitting PAUSE frames ONLY. 1130 /*
1131 * For transmitting PAUSE frames ONLY.
1094 * 1132 *
1095 * LOCAL DEVICE | LINK PARTNER 1133 * LOCAL DEVICE | LINK PARTNER
1096 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result 1134 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@@ -1102,18 +1140,19 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1102 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && 1140 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
1103 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && 1141 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
1104 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) { 1142 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
1105 mac->fc = e1000_fc_rx_pause; 1143 hw->fc.type = e1000_fc_rx_pause;
1106 hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n"); 1144 hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n");
1107 } else { 1145 } else {
1108 /* 1146 /*
1109 * Per the IEEE spec, at this point flow control 1147 * Per the IEEE spec, at this point flow control
1110 * should be disabled. 1148 * should be disabled.
1111 */ 1149 */
1112 mac->fc = e1000_fc_none; 1150 hw->fc.type = e1000_fc_none;
1113 hw_dbg(hw, "Flow Control = NONE.\r\n"); 1151 hw_dbg(hw, "Flow Control = NONE.\r\n");
1114 } 1152 }
1115 1153
1116 /* Now we need to do one last check... If we auto- 1154 /*
1155 * Now we need to do one last check... If we auto-
1117 * negotiated to HALF DUPLEX, flow control should not be 1156 * negotiated to HALF DUPLEX, flow control should not be
1118 * enabled per IEEE 802.3 spec. 1157 * enabled per IEEE 802.3 spec.
1119 */ 1158 */
@@ -1124,9 +1163,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1124 } 1163 }
1125 1164
1126 if (duplex == HALF_DUPLEX) 1165 if (duplex == HALF_DUPLEX)
1127 mac->fc = e1000_fc_none; 1166 hw->fc.type = e1000_fc_none;
1128 1167
1129 /* Now we call a subroutine to actually force the MAC 1168 /*
1169 * Now we call a subroutine to actually force the MAC
1130 * controller to use the correct flow control settings. 1170 * controller to use the correct flow control settings.
1131 */ 1171 */
1132 ret_val = e1000e_force_mac_fc(hw); 1172 ret_val = e1000e_force_mac_fc(hw);
@@ -1393,13 +1433,15 @@ s32 e1000e_blink_led(struct e1000_hw *hw)
1393 u32 ledctl_blink = 0; 1433 u32 ledctl_blink = 0;
1394 u32 i; 1434 u32 i;
1395 1435
1396 if (hw->media_type == e1000_media_type_fiber) { 1436 if (hw->phy.media_type == e1000_media_type_fiber) {
1397 /* always blink LED0 for PCI-E fiber */ 1437 /* always blink LED0 for PCI-E fiber */
1398 ledctl_blink = E1000_LEDCTL_LED0_BLINK | 1438 ledctl_blink = E1000_LEDCTL_LED0_BLINK |
1399 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); 1439 (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
1400 } else { 1440 } else {
1401 /* set the blink bit for each LED that's "on" (0x0E) 1441 /*
1402 * in ledctl_mode2 */ 1442 * set the blink bit for each LED that's "on" (0x0E)
1443 * in ledctl_mode2
1444 */
1403 ledctl_blink = hw->mac.ledctl_mode2; 1445 ledctl_blink = hw->mac.ledctl_mode2;
1404 for (i = 0; i < 4; i++) 1446 for (i = 0; i < 4; i++)
1405 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == 1447 if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
@@ -1423,7 +1465,7 @@ s32 e1000e_led_on_generic(struct e1000_hw *hw)
1423{ 1465{
1424 u32 ctrl; 1466 u32 ctrl;
1425 1467
1426 switch (hw->media_type) { 1468 switch (hw->phy.media_type) {
1427 case e1000_media_type_fiber: 1469 case e1000_media_type_fiber:
1428 ctrl = er32(CTRL); 1470 ctrl = er32(CTRL);
1429 ctrl &= ~E1000_CTRL_SWDPIN0; 1471 ctrl &= ~E1000_CTRL_SWDPIN0;
@@ -1450,7 +1492,7 @@ s32 e1000e_led_off_generic(struct e1000_hw *hw)
1450{ 1492{
1451 u32 ctrl; 1493 u32 ctrl;
1452 1494
1453 switch (hw->media_type) { 1495 switch (hw->phy.media_type) {
1454 case e1000_media_type_fiber: 1496 case e1000_media_type_fiber:
1455 ctrl = er32(CTRL); 1497 ctrl = er32(CTRL);
1456 ctrl |= E1000_CTRL_SWDPIN0; 1498 ctrl |= E1000_CTRL_SWDPIN0;
@@ -1562,8 +1604,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
1562 else 1604 else
1563 mac->current_ifs_val += 1605 mac->current_ifs_val +=
1564 mac->ifs_step_size; 1606 mac->ifs_step_size;
1565 ew32(AIT, 1607 ew32(AIT, mac->current_ifs_val);
1566 mac->current_ifs_val);
1567 } 1608 }
1568 } 1609 }
1569 } else { 1610 } else {
@@ -1826,10 +1867,12 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1826 udelay(1); 1867 udelay(1);
1827 timeout = NVM_MAX_RETRY_SPI; 1868 timeout = NVM_MAX_RETRY_SPI;
1828 1869
1829 /* Read "Status Register" repeatedly until the LSB is cleared. 1870 /*
1871 * Read "Status Register" repeatedly until the LSB is cleared.
1830 * The EEPROM will signal that the command has been completed 1872 * The EEPROM will signal that the command has been completed
1831 * by clearing bit 0 of the internal status register. If it's 1873 * by clearing bit 0 of the internal status register. If it's
1832 * not cleared within 'timeout', then error out. */ 1874 * not cleared within 'timeout', then error out.
1875 */
1833 while (timeout) { 1876 while (timeout) {
1834 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, 1877 e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
1835 hw->nvm.opcode_bits); 1878 hw->nvm.opcode_bits);
@@ -1852,62 +1895,6 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
1852} 1895}
1853 1896
1854/** 1897/**
1855 * e1000e_read_nvm_spi - Reads EEPROM using SPI
1856 * @hw: pointer to the HW structure
1857 * @offset: offset of word in the EEPROM to read
1858 * @words: number of words to read
1859 * @data: word read from the EEPROM
1860 *
1861 * Reads a 16 bit word from the EEPROM.
1862 **/
1863s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1864{
1865 struct e1000_nvm_info *nvm = &hw->nvm;
1866 u32 i = 0;
1867 s32 ret_val;
1868 u16 word_in;
1869 u8 read_opcode = NVM_READ_OPCODE_SPI;
1870
1871 /* A check for invalid values: offset too large, too many words,
1872 * and not enough words. */
1873 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1874 (words == 0)) {
1875 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
1876 return -E1000_ERR_NVM;
1877 }
1878
1879 ret_val = nvm->ops.acquire_nvm(hw);
1880 if (ret_val)
1881 return ret_val;
1882
1883 ret_val = e1000_ready_nvm_eeprom(hw);
1884 if (ret_val) {
1885 nvm->ops.release_nvm(hw);
1886 return ret_val;
1887 }
1888
1889 e1000_standby_nvm(hw);
1890
1891 if ((nvm->address_bits == 8) && (offset >= 128))
1892 read_opcode |= NVM_A8_OPCODE_SPI;
1893
1894 /* Send the READ command (opcode + addr) */
1895 e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
1896 e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
1897
1898 /* Read the data. SPI NVMs increment the address with each byte
1899 * read and will roll over if reading beyond the end. This allows
1900 * us to read the whole NVM from any offset */
1901 for (i = 0; i < words; i++) {
1902 word_in = e1000_shift_in_eec_bits(hw, 16);
1903 data[i] = (word_in >> 8) | (word_in << 8);
1904 }
1905
1906 nvm->ops.release_nvm(hw);
1907 return 0;
1908}
1909
1910/**
1911 * e1000e_read_nvm_eerd - Reads EEPROM using EERD register 1898 * e1000e_read_nvm_eerd - Reads EEPROM using EERD register
1912 * @hw: pointer to the HW structure 1899 * @hw: pointer to the HW structure
1913 * @offset: offset of word in the EEPROM to read 1900 * @offset: offset of word in the EEPROM to read
@@ -1922,8 +1909,10 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1922 u32 i, eerd = 0; 1909 u32 i, eerd = 0;
1923 s32 ret_val = 0; 1910 s32 ret_val = 0;
1924 1911
1925 /* A check for invalid values: offset too large, too many words, 1912 /*
1926 * and not enough words. */ 1913 * A check for invalid values: offset too large, too many words,
1914 * too many words for the offset, and not enough words.
1915 */
1927 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 1916 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1928 (words == 0)) { 1917 (words == 0)) {
1929 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 1918 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
@@ -1939,8 +1928,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1939 if (ret_val) 1928 if (ret_val)
1940 break; 1929 break;
1941 1930
1942 data[i] = (er32(EERD) >> 1931 data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
1943 E1000_NVM_RW_REG_DATA);
1944 } 1932 }
1945 1933
1946 return ret_val; 1934 return ret_val;
@@ -1964,8 +1952,10 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1964 s32 ret_val; 1952 s32 ret_val;
1965 u16 widx = 0; 1953 u16 widx = 0;
1966 1954
1967 /* A check for invalid values: offset too large, too many words, 1955 /*
1968 * and not enough words. */ 1956 * A check for invalid values: offset too large, too many words,
1957 * and not enough words.
1958 */
1969 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || 1959 if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
1970 (words == 0)) { 1960 (words == 0)) {
1971 hw_dbg(hw, "nvm parameter(s) out of bounds\n"); 1961 hw_dbg(hw, "nvm parameter(s) out of bounds\n");
@@ -1995,8 +1985,10 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
1995 1985
1996 e1000_standby_nvm(hw); 1986 e1000_standby_nvm(hw);
1997 1987
1998 /* Some SPI eeproms use the 8th address bit embedded in the 1988 /*
1999 * opcode */ 1989 * Some SPI eeproms use the 8th address bit embedded in the
1990 * opcode
1991 */
2000 if ((nvm->address_bits == 8) && (offset >= 128)) 1992 if ((nvm->address_bits == 8) && (offset >= 128))
2001 write_opcode |= NVM_A8_OPCODE_SPI; 1993 write_opcode |= NVM_A8_OPCODE_SPI;
2002 1994
@@ -2041,9 +2033,9 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
2041 /* Check for an alternate MAC address. An alternate MAC 2033 /* Check for an alternate MAC address. An alternate MAC
2042 * address can be setup by pre-boot software and must be 2034 * address can be setup by pre-boot software and must be
2043 * treated like a permanent address and must override the 2035 * treated like a permanent address and must override the
2044 * actual permanent MAC address. */ 2036 * actual permanent MAC address.*/
2045 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, 2037 ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
2046 &mac_addr_offset); 2038 &mac_addr_offset);
2047 if (ret_val) { 2039 if (ret_val) {
2048 hw_dbg(hw, "NVM Read Error\n"); 2040 hw_dbg(hw, "NVM Read Error\n");
2049 return ret_val; 2041 return ret_val;
@@ -2056,7 +2048,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
2056 mac_addr_offset += ETH_ALEN/sizeof(u16); 2048 mac_addr_offset += ETH_ALEN/sizeof(u16);
2057 2049
2058 /* make sure we have a valid mac address here 2050 /* make sure we have a valid mac address here
2059 * before using it */ 2051 * before using it */
2060 ret_val = e1000_read_nvm(hw, mac_addr_offset, 1, 2052 ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
2061 &nvm_data); 2053 &nvm_data);
2062 if (ret_val) { 2054 if (ret_val) {
@@ -2068,7 +2060,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
2068 } 2060 }
2069 2061
2070 if (mac_addr_offset) 2062 if (mac_addr_offset)
2071 hw->dev_spec.e82571.alt_mac_addr_is_present = 1; 2063 hw->dev_spec.e82571.alt_mac_addr_is_present = 1;
2072 } 2064 }
2073 2065
2074 for (i = 0; i < ETH_ALEN; i += 2) { 2066 for (i = 0; i < ETH_ALEN; i += 2) {
@@ -2244,7 +2236,7 @@ bool e1000e_check_mng_mode(struct e1000_hw *hw)
2244} 2236}
2245 2237
2246/** 2238/**
2247 * e1000e_enable_tx_pkt_filtering - Enable packet filtering on TX 2239 * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx
2248 * @hw: pointer to the HW structure 2240 * @hw: pointer to the HW structure
2249 * 2241 *
2250 * Enables packet filtering on transmit packets if manageability is enabled 2242 * Enables packet filtering on transmit packets if manageability is enabled
@@ -2264,7 +2256,8 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2264 return 0; 2256 return 0;
2265 } 2257 }
2266 2258
2267 /* If we can't read from the host interface for whatever 2259 /*
2260 * If we can't read from the host interface for whatever
2268 * reason, disable filtering. 2261 * reason, disable filtering.
2269 */ 2262 */
2270 ret_val = e1000_mng_enable_host_if(hw); 2263 ret_val = e1000_mng_enable_host_if(hw);
@@ -2282,7 +2275,8 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
2282 hdr->checksum = 0; 2275 hdr->checksum = 0;
2283 csum = e1000_calculate_checksum((u8 *)hdr, 2276 csum = e1000_calculate_checksum((u8 *)hdr,
2284 E1000_MNG_DHCP_COOKIE_LENGTH); 2277 E1000_MNG_DHCP_COOKIE_LENGTH);
2285 /* If either the checksums or signature don't match, then 2278 /*
2279 * If either the checksums or signature don't match, then
2286 * the cookie area isn't considered valid, in which case we 2280 * the cookie area isn't considered valid, in which case we
2287 * take the safe route of assuming Tx filtering is enabled. 2281 * take the safe route of assuming Tx filtering is enabled.
2288 */ 2282 */
@@ -2374,8 +2368,10 @@ static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
2374 /* Calculate length in DWORDs */ 2368 /* Calculate length in DWORDs */
2375 length >>= 2; 2369 length >>= 2;
2376 2370
2377 /* The device driver writes the relevant command block into the 2371 /*
2378 * ram area. */ 2372 * The device driver writes the relevant command block into the
2373 * ram area.
2374 */
2379 for (i = 0; i < length; i++) { 2375 for (i = 0; i < length; i++) {
2380 for (j = 0; j < sizeof(u32); j++) { 2376 for (j = 0; j < sizeof(u32); j++) {
2381 *(tmp + j) = *bufptr++; 2377 *(tmp + j) = *bufptr++;
@@ -2481,7 +2477,7 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2481 return ret_val; 2477 return ret_val;
2482} 2478}
2483 2479
2484s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num) 2480s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
2485{ 2481{
2486 s32 ret_val; 2482 s32 ret_val;
2487 u16 nvm_data; 2483 u16 nvm_data;
@@ -2491,14 +2487,14 @@ s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num)
2491 hw_dbg(hw, "NVM Read Error\n"); 2487 hw_dbg(hw, "NVM Read Error\n");
2492 return ret_val; 2488 return ret_val;
2493 } 2489 }
2494 *part_num = (u32)(nvm_data << 16); 2490 *pba_num = (u32)(nvm_data << 16);
2495 2491
2496 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); 2492 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
2497 if (ret_val) { 2493 if (ret_val) {
2498 hw_dbg(hw, "NVM Read Error\n"); 2494 hw_dbg(hw, "NVM Read Error\n");
2499 return ret_val; 2495 return ret_val;
2500 } 2496 }
2501 *part_num |= nvm_data; 2497 *pba_num |= nvm_data;
2502 2498
2503 return 0; 2499 return 0;
2504} 2500}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index fc5c63f4f578..c8dc47fd132a 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -82,7 +82,7 @@ static int e1000_desc_unused(struct e1000_ring *ring)
82} 82}
83 83
84/** 84/**
85 * e1000_receive_skb - helper function to handle rx indications 85 * e1000_receive_skb - helper function to handle Rx indications
86 * @adapter: board private structure 86 * @adapter: board private structure
87 * @status: descriptor status field as written by hardware 87 * @status: descriptor status field as written by hardware
88 * @vlan: descriptor vlan field as written by hardware (no le/be conversion) 88 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
@@ -138,8 +138,9 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
138 /* TCP checksum is good */ 138 /* TCP checksum is good */
139 skb->ip_summed = CHECKSUM_UNNECESSARY; 139 skb->ip_summed = CHECKSUM_UNNECESSARY;
140 } else { 140 } else {
141 /* IP fragment with UDP payload */ 141 /*
142 /* Hardware complements the payload checksum, so we undo it 142 * IP fragment with UDP payload
143 * Hardware complements the payload checksum, so we undo it
143 * and then put the value in host order for further stack use. 144 * and then put the value in host order for further stack use.
144 */ 145 */
145 __sum16 sum = (__force __sum16)htons(csum); 146 __sum16 sum = (__force __sum16)htons(csum);
@@ -182,7 +183,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
182 break; 183 break;
183 } 184 }
184 185
185 /* Make buffer alignment 2 beyond a 16 byte boundary 186 /*
187 * Make buffer alignment 2 beyond a 16 byte boundary
186 * this will result in a 16 byte aligned IP header after 188 * this will result in a 16 byte aligned IP header after
187 * the 14 byte MAC header is removed 189 * the 14 byte MAC header is removed
188 */ 190 */
@@ -213,10 +215,12 @@ map_skb:
213 if (i-- == 0) 215 if (i-- == 0)
214 i = (rx_ring->count - 1); 216 i = (rx_ring->count - 1);
215 217
216 /* Force memory writes to complete before letting h/w 218 /*
219 * Force memory writes to complete before letting h/w
217 * know there are new descriptors to fetch. (Only 220 * know there are new descriptors to fetch. (Only
218 * applicable for weak-ordered memory model archs, 221 * applicable for weak-ordered memory model archs,
219 * such as IA-64). */ 222 * such as IA-64).
223 */
220 wmb(); 224 wmb();
221 writel(i, adapter->hw.hw_addr + rx_ring->tail); 225 writel(i, adapter->hw.hw_addr + rx_ring->tail);
222 } 226 }
@@ -285,7 +289,8 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
285 break; 289 break;
286 } 290 }
287 291
288 /* Make buffer alignment 2 beyond a 16 byte boundary 292 /*
293 * Make buffer alignment 2 beyond a 16 byte boundary
289 * this will result in a 16 byte aligned IP header after 294 * this will result in a 16 byte aligned IP header after
290 * the 14 byte MAC header is removed 295 * the 14 byte MAC header is removed
291 */ 296 */
@@ -319,12 +324,15 @@ no_buffers:
319 if (!(i--)) 324 if (!(i--))
320 i = (rx_ring->count - 1); 325 i = (rx_ring->count - 1);
321 326
322 /* Force memory writes to complete before letting h/w 327 /*
328 * Force memory writes to complete before letting h/w
323 * know there are new descriptors to fetch. (Only 329 * know there are new descriptors to fetch. (Only
324 * applicable for weak-ordered memory model archs, 330 * applicable for weak-ordered memory model archs,
325 * such as IA-64). */ 331 * such as IA-64).
332 */
326 wmb(); 333 wmb();
327 /* Hardware increments by 16 bytes, but packet split 334 /*
335 * Hardware increments by 16 bytes, but packet split
328 * descriptors are 32 bytes...so we increment tail 336 * descriptors are 32 bytes...so we increment tail
329 * twice as much. 337 * twice as much.
330 */ 338 */
@@ -409,9 +417,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
409 total_rx_bytes += length; 417 total_rx_bytes += length;
410 total_rx_packets++; 418 total_rx_packets++;
411 419
412 /* code added for copybreak, this should improve 420 /*
421 * code added for copybreak, this should improve
413 * performance for small packets with large amounts 422 * performance for small packets with large amounts
414 * of reassembly being done in the stack */ 423 * of reassembly being done in the stack
424 */
415 if (length < copybreak) { 425 if (length < copybreak) {
416 struct sk_buff *new_skb = 426 struct sk_buff *new_skb =
417 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 427 netdev_alloc_skb(netdev, length + NET_IP_ALIGN);
@@ -581,14 +591,15 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
581 } 591 }
582 592
583 if (adapter->detect_tx_hung) { 593 if (adapter->detect_tx_hung) {
584 /* Detect a transmit hang in hardware, this serializes the 594 /*
585 * check with the clearing of time_stamp and movement of i */ 595 * Detect a transmit hang in hardware, this serializes the
596 * check with the clearing of time_stamp and movement of i
597 */
586 adapter->detect_tx_hung = 0; 598 adapter->detect_tx_hung = 0;
587 if (tx_ring->buffer_info[eop].dma && 599 if (tx_ring->buffer_info[eop].dma &&
588 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp 600 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp
589 + (adapter->tx_timeout_factor * HZ)) 601 + (adapter->tx_timeout_factor * HZ))
590 && !(er32(STATUS) & 602 && !(er32(STATUS) & E1000_STATUS_TXOFF)) {
591 E1000_STATUS_TXOFF)) {
592 e1000_print_tx_hang(adapter); 603 e1000_print_tx_hang(adapter);
593 netif_stop_queue(netdev); 604 netif_stop_queue(netdev);
594 } 605 }
@@ -677,21 +688,28 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
677 skb_put(skb, length); 688 skb_put(skb, length);
678 689
679 { 690 {
680 /* this looks ugly, but it seems compiler issues make it 691 /*
681 more efficient than reusing j */ 692 * this looks ugly, but it seems compiler issues make it
693 * more efficient than reusing j
694 */
682 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); 695 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
683 696
684 /* page alloc/put takes too long and effects small packet 697 /*
685 * throughput, so unsplit small packets and save the alloc/put*/ 698 * page alloc/put takes too long and effects small packet
699 * throughput, so unsplit small packets and save the alloc/put
700 * only valid in softirq (napi) context to call kmap_*
701 */
686 if (l1 && (l1 <= copybreak) && 702 if (l1 && (l1 <= copybreak) &&
687 ((length + l1) <= adapter->rx_ps_bsize0)) { 703 ((length + l1) <= adapter->rx_ps_bsize0)) {
688 u8 *vaddr; 704 u8 *vaddr;
689 705
690 ps_page = &buffer_info->ps_pages[0]; 706 ps_page = &buffer_info->ps_pages[0];
691 707
692 /* there is no documentation about how to call 708 /*
709 * there is no documentation about how to call
693 * kmap_atomic, so we can't hold the mapping 710 * kmap_atomic, so we can't hold the mapping
694 * very long */ 711 * very long
712 */
695 pci_dma_sync_single_for_cpu(pdev, ps_page->dma, 713 pci_dma_sync_single_for_cpu(pdev, ps_page->dma,
696 PAGE_SIZE, PCI_DMA_FROMDEVICE); 714 PAGE_SIZE, PCI_DMA_FROMDEVICE);
697 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); 715 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
@@ -836,26 +854,31 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
836 struct e1000_hw *hw = &adapter->hw; 854 struct e1000_hw *hw = &adapter->hw;
837 u32 icr = er32(ICR); 855 u32 icr = er32(ICR);
838 856
839 /* read ICR disables interrupts using IAM, so keep up with our 857 /*
840 * enable/disable accounting */ 858 * read ICR disables interrupts using IAM
841 atomic_inc(&adapter->irq_sem); 859 */
842 860
843 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 861 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
844 hw->mac.get_link_status = 1; 862 hw->mac.get_link_status = 1;
845 /* ICH8 workaround-- Call gig speed drop workaround on cable 863 /*
846 * disconnect (LSC) before accessing any PHY registers */ 864 * ICH8 workaround-- Call gig speed drop workaround on cable
865 * disconnect (LSC) before accessing any PHY registers
866 */
847 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 867 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
848 (!(er32(STATUS) & E1000_STATUS_LU))) 868 (!(er32(STATUS) & E1000_STATUS_LU)))
849 e1000e_gig_downshift_workaround_ich8lan(hw); 869 e1000e_gig_downshift_workaround_ich8lan(hw);
850 870
851 /* 80003ES2LAN workaround-- For packet buffer work-around on 871 /*
872 * 80003ES2LAN workaround-- For packet buffer work-around on
852 * link down event; disable receives here in the ISR and reset 873 * link down event; disable receives here in the ISR and reset
853 * adapter in watchdog */ 874 * adapter in watchdog
875 */
854 if (netif_carrier_ok(netdev) && 876 if (netif_carrier_ok(netdev) &&
855 adapter->flags & FLAG_RX_NEEDS_RESTART) { 877 adapter->flags & FLAG_RX_NEEDS_RESTART) {
856 /* disable receives */ 878 /* disable receives */
857 u32 rctl = er32(RCTL); 879 u32 rctl = er32(RCTL);
858 ew32(RCTL, rctl & ~E1000_RCTL_EN); 880 ew32(RCTL, rctl & ~E1000_RCTL_EN);
881 adapter->flags |= FLAG_RX_RESTART_NOW;
859 } 882 }
860 /* guard against interrupt when we're going down */ 883 /* guard against interrupt when we're going down */
861 if (!test_bit(__E1000_DOWN, &adapter->state)) 884 if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -868,8 +891,6 @@ static irqreturn_t e1000_intr_msi(int irq, void *data)
868 adapter->total_rx_bytes = 0; 891 adapter->total_rx_bytes = 0;
869 adapter->total_rx_packets = 0; 892 adapter->total_rx_packets = 0;
870 __netif_rx_schedule(netdev, &adapter->napi); 893 __netif_rx_schedule(netdev, &adapter->napi);
871 } else {
872 atomic_dec(&adapter->irq_sem);
873 } 894 }
874 895
875 return IRQ_HANDLED; 896 return IRQ_HANDLED;
@@ -890,26 +911,31 @@ static irqreturn_t e1000_intr(int irq, void *data)
890 if (!icr) 911 if (!icr)
891 return IRQ_NONE; /* Not our interrupt */ 912 return IRQ_NONE; /* Not our interrupt */
892 913
893 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 914 /*
894 * not set, then the adapter didn't send an interrupt */ 915 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
916 * not set, then the adapter didn't send an interrupt
917 */
895 if (!(icr & E1000_ICR_INT_ASSERTED)) 918 if (!(icr & E1000_ICR_INT_ASSERTED))
896 return IRQ_NONE; 919 return IRQ_NONE;
897 920
898 /* Interrupt Auto-Mask...upon reading ICR, 921 /*
922 * Interrupt Auto-Mask...upon reading ICR,
899 * interrupts are masked. No need for the 923 * interrupts are masked. No need for the
900 * IMC write, but it does mean we should 924 * IMC write
901 * account for it ASAP. */ 925 */
902 atomic_inc(&adapter->irq_sem);
903 926
904 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { 927 if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
905 hw->mac.get_link_status = 1; 928 hw->mac.get_link_status = 1;
906 /* ICH8 workaround-- Call gig speed drop workaround on cable 929 /*
907 * disconnect (LSC) before accessing any PHY registers */ 930 * ICH8 workaround-- Call gig speed drop workaround on cable
931 * disconnect (LSC) before accessing any PHY registers
932 */
908 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && 933 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
909 (!(er32(STATUS) & E1000_STATUS_LU))) 934 (!(er32(STATUS) & E1000_STATUS_LU)))
910 e1000e_gig_downshift_workaround_ich8lan(hw); 935 e1000e_gig_downshift_workaround_ich8lan(hw);
911 936
912 /* 80003ES2LAN workaround-- 937 /*
938 * 80003ES2LAN workaround--
913 * For packet buffer work-around on link down event; 939 * For packet buffer work-around on link down event;
914 * disable receives here in the ISR and 940 * disable receives here in the ISR and
915 * reset adapter in watchdog 941 * reset adapter in watchdog
@@ -919,6 +945,7 @@ static irqreturn_t e1000_intr(int irq, void *data)
919 /* disable receives */ 945 /* disable receives */
920 rctl = er32(RCTL); 946 rctl = er32(RCTL);
921 ew32(RCTL, rctl & ~E1000_RCTL_EN); 947 ew32(RCTL, rctl & ~E1000_RCTL_EN);
948 adapter->flags |= FLAG_RX_RESTART_NOW;
922 } 949 }
923 /* guard against interrupt when we're going down */ 950 /* guard against interrupt when we're going down */
924 if (!test_bit(__E1000_DOWN, &adapter->state)) 951 if (!test_bit(__E1000_DOWN, &adapter->state))
@@ -931,8 +958,6 @@ static irqreturn_t e1000_intr(int irq, void *data)
931 adapter->total_rx_bytes = 0; 958 adapter->total_rx_bytes = 0;
932 adapter->total_rx_packets = 0; 959 adapter->total_rx_packets = 0;
933 __netif_rx_schedule(netdev, &adapter->napi); 960 __netif_rx_schedule(netdev, &adapter->napi);
934 } else {
935 atomic_dec(&adapter->irq_sem);
936 } 961 }
937 962
938 return IRQ_HANDLED; 963 return IRQ_HANDLED;
@@ -983,7 +1008,6 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
983{ 1008{
984 struct e1000_hw *hw = &adapter->hw; 1009 struct e1000_hw *hw = &adapter->hw;
985 1010
986 atomic_inc(&adapter->irq_sem);
987 ew32(IMC, ~0); 1011 ew32(IMC, ~0);
988 e1e_flush(); 1012 e1e_flush();
989 synchronize_irq(adapter->pdev->irq); 1013 synchronize_irq(adapter->pdev->irq);
@@ -996,10 +1020,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
996{ 1020{
997 struct e1000_hw *hw = &adapter->hw; 1021 struct e1000_hw *hw = &adapter->hw;
998 1022
999 if (atomic_dec_and_test(&adapter->irq_sem)) { 1023 ew32(IMS, IMS_ENABLE_MASK);
1000 ew32(IMS, IMS_ENABLE_MASK); 1024 e1e_flush();
1001 e1e_flush();
1002 }
1003} 1025}
1004 1026
1005/** 1027/**
@@ -1023,8 +1045,7 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter)
1023 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); 1045 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
1024 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 1046 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1025 ctrl_ext = er32(CTRL_EXT); 1047 ctrl_ext = er32(CTRL_EXT);
1026 ew32(CTRL_EXT, 1048 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1027 ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
1028 } 1049 }
1029} 1050}
1030 1051
@@ -1050,8 +1071,7 @@ static void e1000_release_hw_control(struct e1000_adapter *adapter)
1050 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); 1071 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
1051 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { 1072 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
1052 ctrl_ext = er32(CTRL_EXT); 1073 ctrl_ext = er32(CTRL_EXT);
1053 ew32(CTRL_EXT, 1074 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1054 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
1055 } 1075 }
1056} 1076}
1057 1077
@@ -1353,9 +1373,11 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
1353 1373
1354set_itr_now: 1374set_itr_now:
1355 if (new_itr != adapter->itr) { 1375 if (new_itr != adapter->itr) {
1356 /* this attempts to bias the interrupt rate towards Bulk 1376 /*
1377 * this attempts to bias the interrupt rate towards Bulk
1357 * by adding intermediate steps when interrupt rate is 1378 * by adding intermediate steps when interrupt rate is
1358 * increasing */ 1379 * increasing
1380 */
1359 new_itr = new_itr > adapter->itr ? 1381 new_itr = new_itr > adapter->itr ?
1360 min(adapter->itr + (new_itr >> 2), new_itr) : 1382 min(adapter->itr + (new_itr >> 2), new_itr) :
1361 new_itr; 1383 new_itr;
@@ -1366,7 +1388,7 @@ set_itr_now:
1366 1388
1367/** 1389/**
1368 * e1000_clean - NAPI Rx polling callback 1390 * e1000_clean - NAPI Rx polling callback
1369 * @adapter: board private structure 1391 * @napi: struct associated with this polling callback
1370 * @budget: amount of packets driver is allowed to process this poll 1392 * @budget: amount of packets driver is allowed to process this poll
1371 **/ 1393 **/
1372static int e1000_clean(struct napi_struct *napi, int budget) 1394static int e1000_clean(struct napi_struct *napi, int budget)
@@ -1378,10 +1400,12 @@ static int e1000_clean(struct napi_struct *napi, int budget)
1378 /* Must NOT use netdev_priv macro here. */ 1400 /* Must NOT use netdev_priv macro here. */
1379 adapter = poll_dev->priv; 1401 adapter = poll_dev->priv;
1380 1402
1381 /* e1000_clean is called per-cpu. This lock protects 1403 /*
1404 * e1000_clean is called per-cpu. This lock protects
1382 * tx_ring from being cleaned by multiple cpus 1405 * tx_ring from being cleaned by multiple cpus
1383 * simultaneously. A failure obtaining the lock means 1406 * simultaneously. A failure obtaining the lock means
1384 * tx_ring is currently being cleaned anyway. */ 1407 * tx_ring is currently being cleaned anyway.
1408 */
1385 if (spin_trylock(&adapter->tx_queue_lock)) { 1409 if (spin_trylock(&adapter->tx_queue_lock)) {
1386 tx_cleaned = e1000_clean_tx_irq(adapter); 1410 tx_cleaned = e1000_clean_tx_irq(adapter);
1387 spin_unlock(&adapter->tx_queue_lock); 1411 spin_unlock(&adapter->tx_queue_lock);
@@ -1427,9 +1451,12 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1427 struct e1000_hw *hw = &adapter->hw; 1451 struct e1000_hw *hw = &adapter->hw;
1428 u32 vfta, index; 1452 u32 vfta, index;
1429 1453
1430 e1000_irq_disable(adapter); 1454 if (!test_bit(__E1000_DOWN, &adapter->state))
1455 e1000_irq_disable(adapter);
1431 vlan_group_set_device(adapter->vlgrp, vid, NULL); 1456 vlan_group_set_device(adapter->vlgrp, vid, NULL);
1432 e1000_irq_enable(adapter); 1457
1458 if (!test_bit(__E1000_DOWN, &adapter->state))
1459 e1000_irq_enable(adapter);
1433 1460
1434 if ((adapter->hw.mng_cookie.status & 1461 if ((adapter->hw.mng_cookie.status &
1435 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 1462 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
@@ -1480,7 +1507,8 @@ static void e1000_vlan_rx_register(struct net_device *netdev,
1480 struct e1000_hw *hw = &adapter->hw; 1507 struct e1000_hw *hw = &adapter->hw;
1481 u32 ctrl, rctl; 1508 u32 ctrl, rctl;
1482 1509
1483 e1000_irq_disable(adapter); 1510 if (!test_bit(__E1000_DOWN, &adapter->state))
1511 e1000_irq_disable(adapter);
1484 adapter->vlgrp = grp; 1512 adapter->vlgrp = grp;
1485 1513
1486 if (grp) { 1514 if (grp) {
@@ -1517,7 +1545,8 @@ static void e1000_vlan_rx_register(struct net_device *netdev,
1517 } 1545 }
1518 } 1546 }
1519 1547
1520 e1000_irq_enable(adapter); 1548 if (!test_bit(__E1000_DOWN, &adapter->state))
1549 e1000_irq_enable(adapter);
1521} 1550}
1522 1551
1523static void e1000_restore_vlan(struct e1000_adapter *adapter) 1552static void e1000_restore_vlan(struct e1000_adapter *adapter)
@@ -1546,9 +1575,11 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
1546 1575
1547 manc = er32(MANC); 1576 manc = er32(MANC);
1548 1577
1549 /* enable receiving management packets to the host. this will probably 1578 /*
1579 * enable receiving management packets to the host. this will probably
1550 * generate destination unreachable messages from the host OS, but 1580 * generate destination unreachable messages from the host OS, but
1551 * the packets will be handled on SMBUS */ 1581 * the packets will be handled on SMBUS
1582 */
1552 manc |= E1000_MANC_EN_MNG2HOST; 1583 manc |= E1000_MANC_EN_MNG2HOST;
1553 manc2h = er32(MANC2H); 1584 manc2h = er32(MANC2H);
1554#define E1000_MNG2HOST_PORT_623 (1 << 5) 1585#define E1000_MNG2HOST_PORT_623 (1 << 5)
@@ -1598,7 +1629,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1598 1629
1599 /* Set the Tx Interrupt Delay register */ 1630 /* Set the Tx Interrupt Delay register */
1600 ew32(TIDV, adapter->tx_int_delay); 1631 ew32(TIDV, adapter->tx_int_delay);
1601 /* tx irq moderation */ 1632 /* Tx irq moderation */
1602 ew32(TADV, adapter->tx_abs_int_delay); 1633 ew32(TADV, adapter->tx_abs_int_delay);
1603 1634
1604 /* Program the Transmit Control Register */ 1635 /* Program the Transmit Control Register */
@@ -1608,22 +1639,24 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
1608 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); 1639 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1609 1640
1610 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { 1641 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
1611 tarc = er32(TARC0); 1642 tarc = er32(TARC(0));
1612 /* set the speed mode bit, we'll clear it if we're not at 1643 /*
1613 * gigabit link later */ 1644 * set the speed mode bit, we'll clear it if we're not at
1645 * gigabit link later
1646 */
1614#define SPEED_MODE_BIT (1 << 21) 1647#define SPEED_MODE_BIT (1 << 21)
1615 tarc |= SPEED_MODE_BIT; 1648 tarc |= SPEED_MODE_BIT;
1616 ew32(TARC0, tarc); 1649 ew32(TARC(0), tarc);
1617 } 1650 }
1618 1651
1619 /* errata: program both queues to unweighted RR */ 1652 /* errata: program both queues to unweighted RR */
1620 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { 1653 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
1621 tarc = er32(TARC0); 1654 tarc = er32(TARC(0));
1622 tarc |= 1; 1655 tarc |= 1;
1623 ew32(TARC0, tarc); 1656 ew32(TARC(0), tarc);
1624 tarc = er32(TARC1); 1657 tarc = er32(TARC(1));
1625 tarc |= 1; 1658 tarc |= 1;
1626 ew32(TARC1, tarc); 1659 ew32(TARC(1), tarc);
1627 } 1660 }
1628 1661
1629 e1000e_config_collision_dist(hw); 1662 e1000e_config_collision_dist(hw);
@@ -1731,8 +1764,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1731 /* Configure extra packet-split registers */ 1764 /* Configure extra packet-split registers */
1732 rfctl = er32(RFCTL); 1765 rfctl = er32(RFCTL);
1733 rfctl |= E1000_RFCTL_EXTEN; 1766 rfctl |= E1000_RFCTL_EXTEN;
1734 /* disable packet split support for IPv6 extension headers, 1767 /*
1735 * because some malformed IPv6 headers can hang the RX */ 1768 * disable packet split support for IPv6 extension headers,
1769 * because some malformed IPv6 headers can hang the Rx
1770 */
1736 rfctl |= (E1000_RFCTL_IPV6_EX_DIS | 1771 rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
1737 E1000_RFCTL_NEW_IPV6_EXT_DIS); 1772 E1000_RFCTL_NEW_IPV6_EXT_DIS);
1738 1773
@@ -1761,6 +1796,8 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
1761 } 1796 }
1762 1797
1763 ew32(RCTL, rctl); 1798 ew32(RCTL, rctl);
1799 /* just started the receive unit, no need to restart */
1800 adapter->flags &= ~FLAG_RX_RESTART_NOW;
1764} 1801}
1765 1802
1766/** 1803/**
@@ -1801,8 +1838,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1801 /* irq moderation */ 1838 /* irq moderation */
1802 ew32(RADV, adapter->rx_abs_int_delay); 1839 ew32(RADV, adapter->rx_abs_int_delay);
1803 if (adapter->itr_setting != 0) 1840 if (adapter->itr_setting != 0)
1804 ew32(ITR, 1841 ew32(ITR, 1000000000 / (adapter->itr * 256));
1805 1000000000 / (adapter->itr * 256));
1806 1842
1807 ctrl_ext = er32(CTRL_EXT); 1843 ctrl_ext = er32(CTRL_EXT);
1808 /* Reset delay timers after every interrupt */ 1844 /* Reset delay timers after every interrupt */
@@ -1813,8 +1849,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1813 ew32(CTRL_EXT, ctrl_ext); 1849 ew32(CTRL_EXT, ctrl_ext);
1814 e1e_flush(); 1850 e1e_flush();
1815 1851
1816 /* Setup the HW Rx Head and Tail Descriptor Pointers and 1852 /*
1817 * the Base and Length of the Rx Descriptor Ring */ 1853 * Setup the HW Rx Head and Tail Descriptor Pointers and
1854 * the Base and Length of the Rx Descriptor Ring
1855 */
1818 rdba = rx_ring->dma; 1856 rdba = rx_ring->dma;
1819 ew32(RDBAL, (rdba & DMA_32BIT_MASK)); 1857 ew32(RDBAL, (rdba & DMA_32BIT_MASK));
1820 ew32(RDBAH, (rdba >> 32)); 1858 ew32(RDBAH, (rdba >> 32));
@@ -1829,8 +1867,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1829 if (adapter->flags & FLAG_RX_CSUM_ENABLED) { 1867 if (adapter->flags & FLAG_RX_CSUM_ENABLED) {
1830 rxcsum |= E1000_RXCSUM_TUOFL; 1868 rxcsum |= E1000_RXCSUM_TUOFL;
1831 1869
1832 /* IPv4 payload checksum for UDP fragments must be 1870 /*
1833 * used in conjunction with packet-split. */ 1871 * IPv4 payload checksum for UDP fragments must be
1872 * used in conjunction with packet-split.
1873 */
1834 if (adapter->rx_ps_pages) 1874 if (adapter->rx_ps_pages)
1835 rxcsum |= E1000_RXCSUM_IPPCSE; 1875 rxcsum |= E1000_RXCSUM_IPPCSE;
1836 } else { 1876 } else {
@@ -1839,9 +1879,11 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1839 } 1879 }
1840 ew32(RXCSUM, rxcsum); 1880 ew32(RXCSUM, rxcsum);
1841 1881
1842 /* Enable early receives on supported devices, only takes effect when 1882 /*
1883 * Enable early receives on supported devices, only takes effect when
1843 * packet size is equal or larger than the specified value (in 8 byte 1884 * packet size is equal or larger than the specified value (in 8 byte
1844 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 */ 1885 * units), e.g. using jumbo frames when setting to E1000_ERT_2048
1886 */
1845 if ((adapter->flags & FLAG_HAS_ERT) && 1887 if ((adapter->flags & FLAG_HAS_ERT) &&
1846 (adapter->netdev->mtu > ETH_DATA_LEN)) 1888 (adapter->netdev->mtu > ETH_DATA_LEN))
1847 ew32(ERT, E1000_ERT_2048); 1889 ew32(ERT, E1000_ERT_2048);
@@ -1851,7 +1893,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1851} 1893}
1852 1894
1853/** 1895/**
1854 * e1000_mc_addr_list_update - Update Multicast addresses 1896 * e1000_update_mc_addr_list - Update Multicast addresses
1855 * @hw: pointer to the HW structure 1897 * @hw: pointer to the HW structure
1856 * @mc_addr_list: array of multicast addresses to program 1898 * @mc_addr_list: array of multicast addresses to program
1857 * @mc_addr_count: number of multicast addresses to program 1899 * @mc_addr_count: number of multicast addresses to program
@@ -1865,11 +1907,11 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
1865 * exists and all implementations are handled in the generic version of this 1907 * exists and all implementations are handled in the generic version of this
1866 * function. 1908 * function.
1867 **/ 1909 **/
1868static void e1000_mc_addr_list_update(struct e1000_hw *hw, u8 *mc_addr_list, 1910static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
1869 u32 mc_addr_count, u32 rar_used_count, 1911 u32 mc_addr_count, u32 rar_used_count,
1870 u32 rar_count) 1912 u32 rar_count)
1871{ 1913{
1872 hw->mac.ops.mc_addr_list_update(hw, mc_addr_list, mc_addr_count, 1914 hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
1873 rar_used_count, rar_count); 1915 rar_used_count, rar_count);
1874} 1916}
1875 1917
@@ -1923,7 +1965,7 @@ static void e1000_set_multi(struct net_device *netdev)
1923 mc_ptr = mc_ptr->next; 1965 mc_ptr = mc_ptr->next;
1924 } 1966 }
1925 1967
1926 e1000_mc_addr_list_update(hw, mta_list, i, 1, 1968 e1000_update_mc_addr_list(hw, mta_list, i, 1,
1927 mac->rar_entry_count); 1969 mac->rar_entry_count);
1928 kfree(mta_list); 1970 kfree(mta_list);
1929 } else { 1971 } else {
@@ -1931,13 +1973,12 @@ static void e1000_set_multi(struct net_device *netdev)
1931 * if we're called from probe, we might not have 1973 * if we're called from probe, we might not have
1932 * anything to do here, so clear out the list 1974 * anything to do here, so clear out the list
1933 */ 1975 */
1934 e1000_mc_addr_list_update(hw, NULL, 0, 1, 1976 e1000_update_mc_addr_list(hw, NULL, 0, 1, mac->rar_entry_count);
1935 mac->rar_entry_count);
1936 } 1977 }
1937} 1978}
1938 1979
1939/** 1980/**
1940 * e1000_configure - configure the hardware for RX and TX 1981 * e1000_configure - configure the hardware for Rx and Tx
1941 * @adapter: private board structure 1982 * @adapter: private board structure
1942 **/ 1983 **/
1943static void e1000_configure(struct e1000_adapter *adapter) 1984static void e1000_configure(struct e1000_adapter *adapter)
@@ -1950,8 +1991,7 @@ static void e1000_configure(struct e1000_adapter *adapter)
1950 e1000_configure_tx(adapter); 1991 e1000_configure_tx(adapter);
1951 e1000_setup_rctl(adapter); 1992 e1000_setup_rctl(adapter);
1952 e1000_configure_rx(adapter); 1993 e1000_configure_rx(adapter);
1953 adapter->alloc_rx_buf(adapter, 1994 adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring));
1954 e1000_desc_unused(adapter->rx_ring));
1955} 1995}
1956 1996
1957/** 1997/**
@@ -1967,9 +2007,11 @@ void e1000e_power_up_phy(struct e1000_adapter *adapter)
1967 u16 mii_reg = 0; 2007 u16 mii_reg = 0;
1968 2008
1969 /* Just clear the power down bit to wake the phy back up */ 2009 /* Just clear the power down bit to wake the phy back up */
1970 if (adapter->hw.media_type == e1000_media_type_copper) { 2010 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
1971 /* according to the manual, the phy will retain its 2011 /*
1972 * settings across a power-down/up cycle */ 2012 * According to the manual, the phy will retain its
2013 * settings across a power-down/up cycle
2014 */
1973 e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg); 2015 e1e_rphy(&adapter->hw, PHY_CONTROL, &mii_reg);
1974 mii_reg &= ~MII_CR_POWER_DOWN; 2016 mii_reg &= ~MII_CR_POWER_DOWN;
1975 e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg); 2017 e1e_wphy(&adapter->hw, PHY_CONTROL, mii_reg);
@@ -1994,12 +2036,11 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
1994 return; 2036 return;
1995 2037
1996 /* non-copper PHY? */ 2038 /* non-copper PHY? */
1997 if (adapter->hw.media_type != e1000_media_type_copper) 2039 if (adapter->hw.phy.media_type != e1000_media_type_copper)
1998 return; 2040 return;
1999 2041
2000 /* reset is blocked because of a SoL/IDER session */ 2042 /* reset is blocked because of a SoL/IDER session */
2001 if (e1000e_check_mng_mode(hw) || 2043 if (e1000e_check_mng_mode(hw) || e1000_check_reset_block(hw))
2002 e1000_check_reset_block(hw))
2003 return; 2044 return;
2004 2045
2005 /* manageability (AMT) is enabled */ 2046 /* manageability (AMT) is enabled */
@@ -2019,51 +2060,61 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
2019 * This function boots the hardware and enables some settings that 2060 * This function boots the hardware and enables some settings that
2020 * require a configuration cycle of the hardware - those cannot be 2061 * require a configuration cycle of the hardware - those cannot be
2021 * set/changed during runtime. After reset the device needs to be 2062 * set/changed during runtime. After reset the device needs to be
2022 * properly configured for rx, tx etc. 2063 * properly configured for Rx, Tx etc.
2023 */ 2064 */
2024void e1000e_reset(struct e1000_adapter *adapter) 2065void e1000e_reset(struct e1000_adapter *adapter)
2025{ 2066{
2026 struct e1000_mac_info *mac = &adapter->hw.mac; 2067 struct e1000_mac_info *mac = &adapter->hw.mac;
2068 struct e1000_fc_info *fc = &adapter->hw.fc;
2027 struct e1000_hw *hw = &adapter->hw; 2069 struct e1000_hw *hw = &adapter->hw;
2028 u32 tx_space, min_tx_space, min_rx_space; 2070 u32 tx_space, min_tx_space, min_rx_space;
2029 u32 pba; 2071 u32 pba = adapter->pba;
2030 u16 hwm; 2072 u16 hwm;
2031 2073
2032 ew32(PBA, adapter->pba); 2074 /* reset Packet Buffer Allocation to default */
2075 ew32(PBA, pba);
2033 2076
2034 if (mac->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN ) { 2077 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
2035 /* To maintain wire speed transmits, the Tx FIFO should be 2078 /*
2079 * To maintain wire speed transmits, the Tx FIFO should be
2036 * large enough to accommodate two full transmit packets, 2080 * large enough to accommodate two full transmit packets,
2037 * rounded up to the next 1KB and expressed in KB. Likewise, 2081 * rounded up to the next 1KB and expressed in KB. Likewise,
2038 * the Rx FIFO should be large enough to accommodate at least 2082 * the Rx FIFO should be large enough to accommodate at least
2039 * one full receive packet and is similarly rounded up and 2083 * one full receive packet and is similarly rounded up and
2040 * expressed in KB. */ 2084 * expressed in KB.
2085 */
2041 pba = er32(PBA); 2086 pba = er32(PBA);
2042 /* upper 16 bits has Tx packet buffer allocation size in KB */ 2087 /* upper 16 bits has Tx packet buffer allocation size in KB */
2043 tx_space = pba >> 16; 2088 tx_space = pba >> 16;
2044 /* lower 16 bits has Rx packet buffer allocation size in KB */ 2089 /* lower 16 bits has Rx packet buffer allocation size in KB */
2045 pba &= 0xffff; 2090 pba &= 0xffff;
2046 /* the tx fifo also stores 16 bytes of information about the tx 2091 /*
2047 * but don't include ethernet FCS because hardware appends it */ 2092 * the Tx fifo also stores 16 bytes of information about the tx
2048 min_tx_space = (mac->max_frame_size + 2093 * but don't include ethernet FCS because hardware appends it
2094 */
2095 min_tx_space = (adapter->max_frame_size +
2049 sizeof(struct e1000_tx_desc) - 2096 sizeof(struct e1000_tx_desc) -
2050 ETH_FCS_LEN) * 2; 2097 ETH_FCS_LEN) * 2;
2051 min_tx_space = ALIGN(min_tx_space, 1024); 2098 min_tx_space = ALIGN(min_tx_space, 1024);
2052 min_tx_space >>= 10; 2099 min_tx_space >>= 10;
2053 /* software strips receive CRC, so leave room for it */ 2100 /* software strips receive CRC, so leave room for it */
2054 min_rx_space = mac->max_frame_size; 2101 min_rx_space = adapter->max_frame_size;
2055 min_rx_space = ALIGN(min_rx_space, 1024); 2102 min_rx_space = ALIGN(min_rx_space, 1024);
2056 min_rx_space >>= 10; 2103 min_rx_space >>= 10;
2057 2104
2058 /* If current Tx allocation is less than the min Tx FIFO size, 2105 /*
2106 * If current Tx allocation is less than the min Tx FIFO size,
2059 * and the min Tx FIFO size is less than the current Rx FIFO 2107 * and the min Tx FIFO size is less than the current Rx FIFO
2060 * allocation, take space away from current Rx allocation */ 2108 * allocation, take space away from current Rx allocation
2109 */
2061 if ((tx_space < min_tx_space) && 2110 if ((tx_space < min_tx_space) &&
2062 ((min_tx_space - tx_space) < pba)) { 2111 ((min_tx_space - tx_space) < pba)) {
2063 pba -= min_tx_space - tx_space; 2112 pba -= min_tx_space - tx_space;
2064 2113
2065 /* if short on rx space, rx wins and must trump tx 2114 /*
2066 * adjustment or use Early Receive if available */ 2115 * if short on Rx space, Rx wins and must trump tx
2116 * adjustment or use Early Receive if available
2117 */
2067 if ((pba < min_rx_space) && 2118 if ((pba < min_rx_space) &&
2068 (!(adapter->flags & FLAG_HAS_ERT))) 2119 (!(adapter->flags & FLAG_HAS_ERT)))
2069 /* ERT enabled in e1000_configure_rx */ 2120 /* ERT enabled in e1000_configure_rx */
@@ -2074,29 +2125,33 @@ void e1000e_reset(struct e1000_adapter *adapter)
2074 } 2125 }
2075 2126
2076 2127
2077 /* flow control settings */ 2128 /*
2078 /* The high water mark must be low enough to fit one full frame 2129 * flow control settings
2130 *
2131 * The high water mark must be low enough to fit one full frame
2079 * (or the size used for early receive) above it in the Rx FIFO. 2132 * (or the size used for early receive) above it in the Rx FIFO.
2080 * Set it to the lower of: 2133 * Set it to the lower of:
2081 * - 90% of the Rx FIFO size, and 2134 * - 90% of the Rx FIFO size, and
2082 * - the full Rx FIFO size minus the early receive size (for parts 2135 * - the full Rx FIFO size minus the early receive size (for parts
2083 * with ERT support assuming ERT set to E1000_ERT_2048), or 2136 * with ERT support assuming ERT set to E1000_ERT_2048), or
2084 * - the full Rx FIFO size minus one full frame */ 2137 * - the full Rx FIFO size minus one full frame
2138 */
2085 if (adapter->flags & FLAG_HAS_ERT) 2139 if (adapter->flags & FLAG_HAS_ERT)
2086 hwm = min(((adapter->pba << 10) * 9 / 10), 2140 hwm = min(((pba << 10) * 9 / 10),
2087 ((adapter->pba << 10) - (E1000_ERT_2048 << 3))); 2141 ((pba << 10) - (E1000_ERT_2048 << 3)));
2088 else 2142 else
2089 hwm = min(((adapter->pba << 10) * 9 / 10), 2143 hwm = min(((pba << 10) * 9 / 10),
2090 ((adapter->pba << 10) - mac->max_frame_size)); 2144 ((pba << 10) - adapter->max_frame_size));
2091 2145
2092 mac->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ 2146 fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */
2093 mac->fc_low_water = mac->fc_high_water - 8; 2147 fc->low_water = fc->high_water - 8;
2094 2148
2095 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) 2149 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
2096 mac->fc_pause_time = 0xFFFF; 2150 fc->pause_time = 0xFFFF;
2097 else 2151 else
2098 mac->fc_pause_time = E1000_FC_PAUSE_TIME; 2152 fc->pause_time = E1000_FC_PAUSE_TIME;
2099 mac->fc = mac->original_fc; 2153 fc->send_xon = 1;
2154 fc->type = fc->original_type;
2100 2155
2101 /* Allow time for pending master requests to run */ 2156 /* Allow time for pending master requests to run */
2102 mac->ops.reset_hw(hw); 2157 mac->ops.reset_hw(hw);
@@ -2115,9 +2170,11 @@ void e1000e_reset(struct e1000_adapter *adapter)
2115 2170
2116 if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) { 2171 if (!(adapter->flags & FLAG_SMART_POWER_DOWN)) {
2117 u16 phy_data = 0; 2172 u16 phy_data = 0;
2118 /* speed up time to link by disabling smart power down, ignore 2173 /*
2174 * speed up time to link by disabling smart power down, ignore
2119 * the return value of this function because there is nothing 2175 * the return value of this function because there is nothing
2120 * different we would do if it failed */ 2176 * different we would do if it failed
2177 */
2121 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); 2178 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
2122 phy_data &= ~IGP02E1000_PM_SPD; 2179 phy_data &= ~IGP02E1000_PM_SPD;
2123 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); 2180 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
@@ -2147,8 +2204,10 @@ void e1000e_down(struct e1000_adapter *adapter)
2147 struct e1000_hw *hw = &adapter->hw; 2204 struct e1000_hw *hw = &adapter->hw;
2148 u32 tctl, rctl; 2205 u32 tctl, rctl;
2149 2206
2150 /* signal that we're down so the interrupt handler does not 2207 /*
2151 * reschedule our watchdog timer */ 2208 * signal that we're down so the interrupt handler does not
2209 * reschedule our watchdog timer
2210 */
2152 set_bit(__E1000_DOWN, &adapter->state); 2211 set_bit(__E1000_DOWN, &adapter->state);
2153 2212
2154 /* disable receives in the hardware */ 2213 /* disable receives in the hardware */
@@ -2167,7 +2226,6 @@ void e1000e_down(struct e1000_adapter *adapter)
2167 msleep(10); 2226 msleep(10);
2168 2227
2169 napi_disable(&adapter->napi); 2228 napi_disable(&adapter->napi);
2170 atomic_set(&adapter->irq_sem, 0);
2171 e1000_irq_disable(adapter); 2229 e1000_irq_disable(adapter);
2172 2230
2173 del_timer_sync(&adapter->watchdog_timer); 2231 del_timer_sync(&adapter->watchdog_timer);
@@ -2208,13 +2266,12 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
2208 **/ 2266 **/
2209static int __devinit e1000_sw_init(struct e1000_adapter *adapter) 2267static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2210{ 2268{
2211 struct e1000_hw *hw = &adapter->hw;
2212 struct net_device *netdev = adapter->netdev; 2269 struct net_device *netdev = adapter->netdev;
2213 2270
2214 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; 2271 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
2215 adapter->rx_ps_bsize0 = 128; 2272 adapter->rx_ps_bsize0 = 128;
2216 hw->mac.max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 2273 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2217 hw->mac.min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 2274 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2218 2275
2219 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL); 2276 adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
2220 if (!adapter->tx_ring) 2277 if (!adapter->tx_ring)
@@ -2227,7 +2284,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2227 spin_lock_init(&adapter->tx_queue_lock); 2284 spin_lock_init(&adapter->tx_queue_lock);
2228 2285
2229 /* Explicitly disable IRQ since the NIC can be in any state. */ 2286 /* Explicitly disable IRQ since the NIC can be in any state. */
2230 atomic_set(&adapter->irq_sem, 0);
2231 e1000_irq_disable(adapter); 2287 e1000_irq_disable(adapter);
2232 2288
2233 spin_lock_init(&adapter->stats_lock); 2289 spin_lock_init(&adapter->stats_lock);
@@ -2281,16 +2337,20 @@ static int e1000_open(struct net_device *netdev)
2281 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) 2337 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
2282 e1000_update_mng_vlan(adapter); 2338 e1000_update_mng_vlan(adapter);
2283 2339
2284 /* If AMT is enabled, let the firmware know that the network 2340 /*
2285 * interface is now open */ 2341 * If AMT is enabled, let the firmware know that the network
2342 * interface is now open
2343 */
2286 if ((adapter->flags & FLAG_HAS_AMT) && 2344 if ((adapter->flags & FLAG_HAS_AMT) &&
2287 e1000e_check_mng_mode(&adapter->hw)) 2345 e1000e_check_mng_mode(&adapter->hw))
2288 e1000_get_hw_control(adapter); 2346 e1000_get_hw_control(adapter);
2289 2347
2290 /* before we allocate an interrupt, we must be ready to handle it. 2348 /*
2349 * before we allocate an interrupt, we must be ready to handle it.
2291 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 2350 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2292 * as soon as we call pci_request_irq, so we have to setup our 2351 * as soon as we call pci_request_irq, so we have to setup our
2293 * clean_rx handler before we do so. */ 2352 * clean_rx handler before we do so.
2353 */
2294 e1000_configure(adapter); 2354 e1000_configure(adapter);
2295 2355
2296 err = e1000_request_irq(adapter); 2356 err = e1000_request_irq(adapter);
@@ -2344,16 +2404,20 @@ static int e1000_close(struct net_device *netdev)
2344 e1000e_free_tx_resources(adapter); 2404 e1000e_free_tx_resources(adapter);
2345 e1000e_free_rx_resources(adapter); 2405 e1000e_free_rx_resources(adapter);
2346 2406
2347 /* kill manageability vlan ID if supported, but not if a vlan with 2407 /*
2348 * the same ID is registered on the host OS (let 8021q kill it) */ 2408 * kill manageability vlan ID if supported, but not if a vlan with
2409 * the same ID is registered on the host OS (let 8021q kill it)
2410 */
2349 if ((adapter->hw.mng_cookie.status & 2411 if ((adapter->hw.mng_cookie.status &
2350 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2412 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2351 !(adapter->vlgrp && 2413 !(adapter->vlgrp &&
2352 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) 2414 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
2353 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); 2415 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
2354 2416
2355 /* If AMT is enabled, let the firmware know that the network 2417 /*
2356 * interface is now closed */ 2418 * If AMT is enabled, let the firmware know that the network
2419 * interface is now closed
2420 */
2357 if ((adapter->flags & FLAG_HAS_AMT) && 2421 if ((adapter->flags & FLAG_HAS_AMT) &&
2358 e1000e_check_mng_mode(&adapter->hw)) 2422 e1000e_check_mng_mode(&adapter->hw))
2359 e1000_release_hw_control(adapter); 2423 e1000_release_hw_control(adapter);
@@ -2384,12 +2448,14 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
2384 /* activate the work around */ 2448 /* activate the work around */
2385 e1000e_set_laa_state_82571(&adapter->hw, 1); 2449 e1000e_set_laa_state_82571(&adapter->hw, 1);
2386 2450
2387 /* Hold a copy of the LAA in RAR[14] This is done so that 2451 /*
2452 * Hold a copy of the LAA in RAR[14] This is done so that
2388 * between the time RAR[0] gets clobbered and the time it 2453 * between the time RAR[0] gets clobbered and the time it
2389 * gets fixed (in e1000_watchdog), the actual LAA is in one 2454 * gets fixed (in e1000_watchdog), the actual LAA is in one
2390 * of the RARs and no incoming packets directed to this port 2455 * of the RARs and no incoming packets directed to this port
2391 * are dropped. Eventually the LAA will be in RAR[0] and 2456 * are dropped. Eventually the LAA will be in RAR[0] and
2392 * RAR[14] */ 2457 * RAR[14]
2458 */
2393 e1000e_rar_set(&adapter->hw, 2459 e1000e_rar_set(&adapter->hw,
2394 adapter->hw.mac.addr, 2460 adapter->hw.mac.addr,
2395 adapter->hw.mac.rar_entry_count - 1); 2461 adapter->hw.mac.rar_entry_count - 1);
@@ -2398,8 +2464,10 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
2398 return 0; 2464 return 0;
2399} 2465}
2400 2466
2401/* Need to wait a few seconds after link up to get diagnostic information from 2467/*
2402 * the phy */ 2468 * Need to wait a few seconds after link up to get diagnostic information from
2469 * the phy
2470 */
2403static void e1000_update_phy_info(unsigned long data) 2471static void e1000_update_phy_info(unsigned long data)
2404{ 2472{
2405 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 2473 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
@@ -2430,7 +2498,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
2430 2498
2431 spin_lock_irqsave(&adapter->stats_lock, irq_flags); 2499 spin_lock_irqsave(&adapter->stats_lock, irq_flags);
2432 2500
2433 /* these counters are modified from e1000_adjust_tbi_stats, 2501 /*
2502 * these counters are modified from e1000_adjust_tbi_stats,
2434 * called from the interrupt context, so they must only 2503 * called from the interrupt context, so they must only
2435 * be written while holding adapter->stats_lock 2504 * be written while holding adapter->stats_lock
2436 */ 2505 */
@@ -2524,8 +2593,10 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
2524 2593
2525 /* Rx Errors */ 2594 /* Rx Errors */
2526 2595
2527 /* RLEC on some newer hardware can be incorrect so build 2596 /*
2528 * our own version based on RUC and ROC */ 2597 * RLEC on some newer hardware can be incorrect so build
2598 * our own version based on RUC and ROC
2599 */
2529 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 2600 adapter->net_stats.rx_errors = adapter->stats.rxerrc +
2530 adapter->stats.crcerrs + adapter->stats.algnerrc + 2601 adapter->stats.crcerrs + adapter->stats.algnerrc +
2531 adapter->stats.ruc + adapter->stats.roc + 2602 adapter->stats.ruc + adapter->stats.roc +
@@ -2546,7 +2617,7 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
2546 /* Tx Dropped needs to be maintained elsewhere */ 2617 /* Tx Dropped needs to be maintained elsewhere */
2547 2618
2548 /* Phy Stats */ 2619 /* Phy Stats */
2549 if (hw->media_type == e1000_media_type_copper) { 2620 if (hw->phy.media_type == e1000_media_type_copper) {
2550 if ((adapter->link_speed == SPEED_1000) && 2621 if ((adapter->link_speed == SPEED_1000) &&
2551 (!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) { 2622 (!e1e_rphy(hw, PHY_1000T_STATUS, &phy_tmp))) {
2552 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; 2623 phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
@@ -2564,8 +2635,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
2564 2635
2565static void e1000_print_link_info(struct e1000_adapter *adapter) 2636static void e1000_print_link_info(struct e1000_adapter *adapter)
2566{ 2637{
2567 struct net_device *netdev = adapter->netdev;
2568 struct e1000_hw *hw = &adapter->hw; 2638 struct e1000_hw *hw = &adapter->hw;
2639 struct net_device *netdev = adapter->netdev;
2569 u32 ctrl = er32(CTRL); 2640 u32 ctrl = er32(CTRL);
2570 2641
2571 ndev_info(netdev, 2642 ndev_info(netdev,
@@ -2579,6 +2650,62 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
2579 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" ))); 2650 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
2580} 2651}
2581 2652
2653static bool e1000_has_link(struct e1000_adapter *adapter)
2654{
2655 struct e1000_hw *hw = &adapter->hw;
2656 bool link_active = 0;
2657 s32 ret_val = 0;
2658
2659 /*
2660 * get_link_status is set on LSC (link status) interrupt or
2661 * Rx sequence error interrupt. get_link_status will stay
2662 * false until the check_for_link establishes link
2663 * for copper adapters ONLY
2664 */
2665 switch (hw->phy.media_type) {
2666 case e1000_media_type_copper:
2667 if (hw->mac.get_link_status) {
2668 ret_val = hw->mac.ops.check_for_link(hw);
2669 link_active = !hw->mac.get_link_status;
2670 } else {
2671 link_active = 1;
2672 }
2673 break;
2674 case e1000_media_type_fiber:
2675 ret_val = hw->mac.ops.check_for_link(hw);
2676 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2677 break;
2678 case e1000_media_type_internal_serdes:
2679 ret_val = hw->mac.ops.check_for_link(hw);
2680 link_active = adapter->hw.mac.serdes_has_link;
2681 break;
2682 default:
2683 case e1000_media_type_unknown:
2684 break;
2685 }
2686
2687 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
2688 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2689 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
2690 ndev_info(adapter->netdev,
2691 "Gigabit has been disabled, downgrading speed\n");
2692 }
2693
2694 return link_active;
2695}
2696
2697static void e1000e_enable_receives(struct e1000_adapter *adapter)
2698{
2699 /* make sure the receive unit is started */
2700 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
2701 (adapter->flags & FLAG_RX_RESTART_NOW)) {
2702 struct e1000_hw *hw = &adapter->hw;
2703 u32 rctl = er32(RCTL);
2704 ew32(RCTL, rctl | E1000_RCTL_EN);
2705 adapter->flags &= ~FLAG_RX_RESTART_NOW;
2706 }
2707}
2708
2582/** 2709/**
2583 * e1000_watchdog - Timer Call-back 2710 * e1000_watchdog - Timer Call-back
2584 * @data: pointer to adapter cast into an unsigned long 2711 * @data: pointer to adapter cast into an unsigned long
@@ -2597,48 +2724,35 @@ static void e1000_watchdog_task(struct work_struct *work)
2597{ 2724{
2598 struct e1000_adapter *adapter = container_of(work, 2725 struct e1000_adapter *adapter = container_of(work,
2599 struct e1000_adapter, watchdog_task); 2726 struct e1000_adapter, watchdog_task);
2600
2601 struct net_device *netdev = adapter->netdev; 2727 struct net_device *netdev = adapter->netdev;
2602 struct e1000_mac_info *mac = &adapter->hw.mac; 2728 struct e1000_mac_info *mac = &adapter->hw.mac;
2603 struct e1000_ring *tx_ring = adapter->tx_ring; 2729 struct e1000_ring *tx_ring = adapter->tx_ring;
2604 struct e1000_hw *hw = &adapter->hw; 2730 struct e1000_hw *hw = &adapter->hw;
2605 u32 link, tctl; 2731 u32 link, tctl;
2606 s32 ret_val;
2607 int tx_pending = 0; 2732 int tx_pending = 0;
2608 2733
2609 if ((netif_carrier_ok(netdev)) && 2734 link = e1000_has_link(adapter);
2610 (er32(STATUS) & E1000_STATUS_LU)) 2735 if ((netif_carrier_ok(netdev)) && link) {
2736 e1000e_enable_receives(adapter);
2611 goto link_up; 2737 goto link_up;
2612
2613 ret_val = mac->ops.check_for_link(hw);
2614 if ((ret_val == E1000_ERR_PHY) &&
2615 (adapter->hw.phy.type == e1000_phy_igp_3) &&
2616 (er32(CTRL) &
2617 E1000_PHY_CTRL_GBE_DISABLE)) {
2618 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
2619 ndev_info(netdev,
2620 "Gigabit has been disabled, downgrading speed\n");
2621 } 2738 }
2622 2739
2623 if ((e1000e_enable_tx_pkt_filtering(hw)) && 2740 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
2624 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) 2741 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
2625 e1000_update_mng_vlan(adapter); 2742 e1000_update_mng_vlan(adapter);
2626 2743
2627 if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
2628 !(er32(TXCW) & E1000_TXCW_ANE))
2629 link = adapter->hw.mac.serdes_has_link;
2630 else
2631 link = er32(STATUS) & E1000_STATUS_LU;
2632
2633 if (link) { 2744 if (link) {
2634 if (!netif_carrier_ok(netdev)) { 2745 if (!netif_carrier_ok(netdev)) {
2635 bool txb2b = 1; 2746 bool txb2b = 1;
2747 /* update snapshot of PHY registers on LSC */
2636 mac->ops.get_link_up_info(&adapter->hw, 2748 mac->ops.get_link_up_info(&adapter->hw,
2637 &adapter->link_speed, 2749 &adapter->link_speed,
2638 &adapter->link_duplex); 2750 &adapter->link_duplex);
2639 e1000_print_link_info(adapter); 2751 e1000_print_link_info(adapter);
2640 /* tweak tx_queue_len according to speed/duplex 2752 /*
2641 * and adjust the timeout factor */ 2753 * tweak tx_queue_len according to speed/duplex
2754 * and adjust the timeout factor
2755 */
2642 netdev->tx_queue_len = adapter->tx_queue_len; 2756 netdev->tx_queue_len = adapter->tx_queue_len;
2643 adapter->tx_timeout_factor = 1; 2757 adapter->tx_timeout_factor = 1;
2644 switch (adapter->link_speed) { 2758 switch (adapter->link_speed) {
@@ -2654,18 +2768,22 @@ static void e1000_watchdog_task(struct work_struct *work)
2654 break; 2768 break;
2655 } 2769 }
2656 2770
2657 /* workaround: re-program speed mode bit after 2771 /*
2658 * link-up event */ 2772 * workaround: re-program speed mode bit after
2773 * link-up event
2774 */
2659 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && 2775 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
2660 !txb2b) { 2776 !txb2b) {
2661 u32 tarc0; 2777 u32 tarc0;
2662 tarc0 = er32(TARC0); 2778 tarc0 = er32(TARC(0));
2663 tarc0 &= ~SPEED_MODE_BIT; 2779 tarc0 &= ~SPEED_MODE_BIT;
2664 ew32(TARC0, tarc0); 2780 ew32(TARC(0), tarc0);
2665 } 2781 }
2666 2782
2667 /* disable TSO for pcie and 10/100 speeds, to avoid 2783 /*
2668 * some hardware issues */ 2784 * disable TSO for pcie and 10/100 speeds, to avoid
2785 * some hardware issues
2786 */
2669 if (!(adapter->flags & FLAG_TSO_FORCE)) { 2787 if (!(adapter->flags & FLAG_TSO_FORCE)) {
2670 switch (adapter->link_speed) { 2788 switch (adapter->link_speed) {
2671 case SPEED_10: 2789 case SPEED_10:
@@ -2685,8 +2803,10 @@ static void e1000_watchdog_task(struct work_struct *work)
2685 } 2803 }
2686 } 2804 }
2687 2805
2688 /* enable transmits in the hardware, need to do this 2806 /*
2689 * after setting TARC0 */ 2807 * enable transmits in the hardware, need to do this
2808 * after setting TARC(0)
2809 */
2690 tctl = er32(TCTL); 2810 tctl = er32(TCTL);
2691 tctl |= E1000_TCTL_EN; 2811 tctl |= E1000_TCTL_EN;
2692 ew32(TCTL, tctl); 2812 ew32(TCTL, tctl);
@@ -2697,13 +2817,6 @@ static void e1000_watchdog_task(struct work_struct *work)
2697 if (!test_bit(__E1000_DOWN, &adapter->state)) 2817 if (!test_bit(__E1000_DOWN, &adapter->state))
2698 mod_timer(&adapter->phy_info_timer, 2818 mod_timer(&adapter->phy_info_timer,
2699 round_jiffies(jiffies + 2 * HZ)); 2819 round_jiffies(jiffies + 2 * HZ));
2700 } else {
2701 /* make sure the receive unit is started */
2702 if (adapter->flags & FLAG_RX_NEEDS_RESTART) {
2703 u32 rctl = er32(RCTL);
2704 ew32(RCTL, rctl |
2705 E1000_RCTL_EN);
2706 }
2707 } 2820 }
2708 } else { 2821 } else {
2709 if (netif_carrier_ok(netdev)) { 2822 if (netif_carrier_ok(netdev)) {
@@ -2740,23 +2853,27 @@ link_up:
2740 tx_pending = (e1000_desc_unused(tx_ring) + 1 < 2853 tx_pending = (e1000_desc_unused(tx_ring) + 1 <
2741 tx_ring->count); 2854 tx_ring->count);
2742 if (tx_pending) { 2855 if (tx_pending) {
2743 /* We've lost link, so the controller stops DMA, 2856 /*
2857 * We've lost link, so the controller stops DMA,
2744 * but we've got queued Tx work that's never going 2858 * but we've got queued Tx work that's never going
2745 * to get done, so reset controller to flush Tx. 2859 * to get done, so reset controller to flush Tx.
2746 * (Do the reset outside of interrupt context). */ 2860 * (Do the reset outside of interrupt context).
2861 */
2747 adapter->tx_timeout_count++; 2862 adapter->tx_timeout_count++;
2748 schedule_work(&adapter->reset_task); 2863 schedule_work(&adapter->reset_task);
2749 } 2864 }
2750 } 2865 }
2751 2866
2752 /* Cause software interrupt to ensure rx ring is cleaned */ 2867 /* Cause software interrupt to ensure Rx ring is cleaned */
2753 ew32(ICS, E1000_ICS_RXDMT0); 2868 ew32(ICS, E1000_ICS_RXDMT0);
2754 2869
2755 /* Force detection of hung controller every watchdog period */ 2870 /* Force detection of hung controller every watchdog period */
2756 adapter->detect_tx_hung = 1; 2871 adapter->detect_tx_hung = 1;
2757 2872
2758 /* With 82571 controllers, LAA may be overwritten due to controller 2873 /*
2759 * reset from the other port. Set the appropriate LAA in RAR[0] */ 2874 * With 82571 controllers, LAA may be overwritten due to controller
2875 * reset from the other port. Set the appropriate LAA in RAR[0]
2876 */
2760 if (e1000e_get_laa_state_82571(hw)) 2877 if (e1000e_get_laa_state_82571(hw))
2761 e1000e_rar_set(hw, adapter->hw.mac.addr, 0); 2878 e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
2762 2879
@@ -3032,16 +3149,20 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
3032 3149
3033 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 3150 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3034 3151
3035 /* Force memory writes to complete before letting h/w 3152 /*
3153 * Force memory writes to complete before letting h/w
3036 * know there are new descriptors to fetch. (Only 3154 * know there are new descriptors to fetch. (Only
3037 * applicable for weak-ordered memory model archs, 3155 * applicable for weak-ordered memory model archs,
3038 * such as IA-64). */ 3156 * such as IA-64).
3157 */
3039 wmb(); 3158 wmb();
3040 3159
3041 tx_ring->next_to_use = i; 3160 tx_ring->next_to_use = i;
3042 writel(i, adapter->hw.hw_addr + tx_ring->tail); 3161 writel(i, adapter->hw.hw_addr + tx_ring->tail);
3043 /* we need this if more than one processor can write to our tail 3162 /*
3044 * at a time, it synchronizes IO on IA64/Altix systems */ 3163 * we need this if more than one processor can write to our tail
3164 * at a time, it synchronizes IO on IA64/Altix systems
3165 */
3045 mmiowb(); 3166 mmiowb();
3046} 3167}
3047 3168
@@ -3089,13 +3210,17 @@ static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3089 struct e1000_adapter *adapter = netdev_priv(netdev); 3210 struct e1000_adapter *adapter = netdev_priv(netdev);
3090 3211
3091 netif_stop_queue(netdev); 3212 netif_stop_queue(netdev);
3092 /* Herbert's original patch had: 3213 /*
3214 * Herbert's original patch had:
3093 * smp_mb__after_netif_stop_queue(); 3215 * smp_mb__after_netif_stop_queue();
3094 * but since that doesn't exist yet, just open code it. */ 3216 * but since that doesn't exist yet, just open code it.
3217 */
3095 smp_mb(); 3218 smp_mb();
3096 3219
3097 /* We need to check again in a case another CPU has just 3220 /*
3098 * made room available. */ 3221 * We need to check again in a case another CPU has just
3222 * made room available.
3223 */
3099 if (e1000_desc_unused(adapter->tx_ring) < size) 3224 if (e1000_desc_unused(adapter->tx_ring) < size)
3100 return -EBUSY; 3225 return -EBUSY;
3101 3226
@@ -3142,21 +3267,29 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3142 } 3267 }
3143 3268
3144 mss = skb_shinfo(skb)->gso_size; 3269 mss = skb_shinfo(skb)->gso_size;
3145 /* The controller does a simple calculation to 3270 /*
3271 * The controller does a simple calculation to
3146 * make sure there is enough room in the FIFO before 3272 * make sure there is enough room in the FIFO before
3147 * initiating the DMA for each buffer. The calc is: 3273 * initiating the DMA for each buffer. The calc is:
3148 * 4 = ceil(buffer len/mss). To make sure we don't 3274 * 4 = ceil(buffer len/mss). To make sure we don't
3149 * overrun the FIFO, adjust the max buffer len if mss 3275 * overrun the FIFO, adjust the max buffer len if mss
3150 * drops. */ 3276 * drops.
3277 */
3151 if (mss) { 3278 if (mss) {
3152 u8 hdr_len; 3279 u8 hdr_len;
3153 max_per_txd = min(mss << 2, max_per_txd); 3280 max_per_txd = min(mss << 2, max_per_txd);
3154 max_txd_pwr = fls(max_per_txd) - 1; 3281 max_txd_pwr = fls(max_per_txd) - 1;
3155 3282
3156 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data 3283 /*
3157 * points to just header, pull a few bytes of payload from 3284 * TSO Workaround for 82571/2/3 Controllers -- if skb->data
3158 * frags into skb->data */ 3285 * points to just header, pull a few bytes of payload from
3286 * frags into skb->data
3287 */
3159 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 3288 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3289 /*
3290 * we do this workaround for ES2LAN, but it is un-necessary,
3291 * avoiding it could save a lot of cycles
3292 */
3160 if (skb->data_len && (hdr_len == len)) { 3293 if (skb->data_len && (hdr_len == len)) {
3161 unsigned int pull_size; 3294 unsigned int pull_size;
3162 3295
@@ -3190,8 +3323,10 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3190 /* Collision - tell upper layer to requeue */ 3323 /* Collision - tell upper layer to requeue */
3191 return NETDEV_TX_LOCKED; 3324 return NETDEV_TX_LOCKED;
3192 3325
3193 /* need: count + 2 desc gap to keep tail from touching 3326 /*
3194 * head, otherwise try next time */ 3327 * need: count + 2 desc gap to keep tail from touching
3328 * head, otherwise try next time
3329 */
3195 if (e1000_maybe_stop_tx(netdev, count + 2)) { 3330 if (e1000_maybe_stop_tx(netdev, count + 2)) {
3196 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags); 3331 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
3197 return NETDEV_TX_BUSY; 3332 return NETDEV_TX_BUSY;
@@ -3216,9 +3351,11 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3216 else if (e1000_tx_csum(adapter, skb)) 3351 else if (e1000_tx_csum(adapter, skb))
3217 tx_flags |= E1000_TX_FLAGS_CSUM; 3352 tx_flags |= E1000_TX_FLAGS_CSUM;
3218 3353
3219 /* Old method was to assume IPv4 packet by default if TSO was enabled. 3354 /*
3355 * Old method was to assume IPv4 packet by default if TSO was enabled.
3220 * 82571 hardware supports TSO capabilities for IPv6 as well... 3356 * 82571 hardware supports TSO capabilities for IPv6 as well...
3221 * no longer assume, we must. */ 3357 * no longer assume, we must.
3358 */
3222 if (skb->protocol == htons(ETH_P_IP)) 3359 if (skb->protocol == htons(ETH_P_IP))
3223 tx_flags |= E1000_TX_FLAGS_IPV4; 3360 tx_flags |= E1000_TX_FLAGS_IPV4;
3224 3361
@@ -3316,14 +3453,16 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3316 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 3453 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
3317 msleep(1); 3454 msleep(1);
3318 /* e1000e_down has a dependency on max_frame_size */ 3455 /* e1000e_down has a dependency on max_frame_size */
3319 adapter->hw.mac.max_frame_size = max_frame; 3456 adapter->max_frame_size = max_frame;
3320 if (netif_running(netdev)) 3457 if (netif_running(netdev))
3321 e1000e_down(adapter); 3458 e1000e_down(adapter);
3322 3459
3323 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3460 /*
3461 * NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3324 * means we reserve 2 more, this pushes us to allocate from the next 3462 * means we reserve 2 more, this pushes us to allocate from the next
3325 * larger slab size. 3463 * larger slab size.
3326 * i.e. RXBUFFER_2048 --> size-4096 slab */ 3464 * i.e. RXBUFFER_2048 --> size-4096 slab
3465 */
3327 3466
3328 if (max_frame <= 256) 3467 if (max_frame <= 256)
3329 adapter->rx_buffer_len = 256; 3468 adapter->rx_buffer_len = 256;
@@ -3340,7 +3479,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3340 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 3479 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
3341 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) 3480 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
3342 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN 3481 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
3343 + ETH_FCS_LEN ; 3482 + ETH_FCS_LEN;
3344 3483
3345 ndev_info(netdev, "changing MTU from %d to %d\n", 3484 ndev_info(netdev, "changing MTU from %d to %d\n",
3346 netdev->mtu, new_mtu); 3485 netdev->mtu, new_mtu);
@@ -3363,7 +3502,7 @@ static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
3363 struct mii_ioctl_data *data = if_mii(ifr); 3502 struct mii_ioctl_data *data = if_mii(ifr);
3364 unsigned long irq_flags; 3503 unsigned long irq_flags;
3365 3504
3366 if (adapter->hw.media_type != e1000_media_type_copper) 3505 if (adapter->hw.phy.media_type != e1000_media_type_copper)
3367 return -EOPNOTSUPP; 3506 return -EOPNOTSUPP;
3368 3507
3369 switch (cmd) { 3508 switch (cmd) {
@@ -3445,8 +3584,9 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3445 E1000_CTRL_EN_PHY_PWR_MGMT; 3584 E1000_CTRL_EN_PHY_PWR_MGMT;
3446 ew32(CTRL, ctrl); 3585 ew32(CTRL, ctrl);
3447 3586
3448 if (adapter->hw.media_type == e1000_media_type_fiber || 3587 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
3449 adapter->hw.media_type == e1000_media_type_internal_serdes) { 3588 adapter->hw.phy.media_type ==
3589 e1000_media_type_internal_serdes) {
3450 /* keep the laser running in D3 */ 3590 /* keep the laser running in D3 */
3451 ctrl_ext = er32(CTRL_EXT); 3591 ctrl_ext = er32(CTRL_EXT);
3452 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; 3592 ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
@@ -3476,8 +3616,10 @@ static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
3476 if (adapter->hw.phy.type == e1000_phy_igp_3) 3616 if (adapter->hw.phy.type == e1000_phy_igp_3)
3477 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); 3617 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
3478 3618
3479 /* Release control of h/w to f/w. If f/w is AMT enabled, this 3619 /*
3480 * would have already happened in close and is redundant. */ 3620 * Release control of h/w to f/w. If f/w is AMT enabled, this
3621 * would have already happened in close and is redundant.
3622 */
3481 e1000_release_hw_control(adapter); 3623 e1000_release_hw_control(adapter);
3482 3624
3483 pci_disable_device(pdev); 3625 pci_disable_device(pdev);
@@ -3552,9 +3694,11 @@ static int e1000_resume(struct pci_dev *pdev)
3552 3694
3553 netif_device_attach(netdev); 3695 netif_device_attach(netdev);
3554 3696
3555 /* If the controller has AMT, do not set DRV_LOAD until the interface 3697 /*
3698 * If the controller has AMT, do not set DRV_LOAD until the interface
3556 * is up. For all other cases, let the f/w know that the h/w is now 3699 * is up. For all other cases, let the f/w know that the h/w is now
3557 * under the control of the driver. */ 3700 * under the control of the driver.
3701 */
3558 if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw)) 3702 if (!(adapter->flags & FLAG_HAS_AMT) || !e1000e_check_mng_mode(&adapter->hw))
3559 e1000_get_hw_control(adapter); 3703 e1000_get_hw_control(adapter);
3560 3704
@@ -3665,9 +3809,11 @@ static void e1000_io_resume(struct pci_dev *pdev)
3665 3809
3666 netif_device_attach(netdev); 3810 netif_device_attach(netdev);
3667 3811
3668 /* If the controller has AMT, do not set DRV_LOAD until the interface 3812 /*
3813 * If the controller has AMT, do not set DRV_LOAD until the interface
3669 * is up. For all other cases, let the f/w know that the h/w is now 3814 * is up. For all other cases, let the f/w know that the h/w is now
3670 * under the control of the driver. */ 3815 * under the control of the driver.
3816 */
3671 if (!(adapter->flags & FLAG_HAS_AMT) || 3817 if (!(adapter->flags & FLAG_HAS_AMT) ||
3672 !e1000e_check_mng_mode(&adapter->hw)) 3818 !e1000e_check_mng_mode(&adapter->hw))
3673 e1000_get_hw_control(adapter); 3819 e1000_get_hw_control(adapter);
@@ -3678,7 +3824,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
3678{ 3824{
3679 struct e1000_hw *hw = &adapter->hw; 3825 struct e1000_hw *hw = &adapter->hw;
3680 struct net_device *netdev = adapter->netdev; 3826 struct net_device *netdev = adapter->netdev;
3681 u32 part_num; 3827 u32 pba_num;
3682 3828
3683 /* print bus type/speed/width info */ 3829 /* print bus type/speed/width info */
3684 ndev_info(netdev, "(PCI Express:2.5GB/s:%s) " 3830 ndev_info(netdev, "(PCI Express:2.5GB/s:%s) "
@@ -3693,10 +3839,10 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
3693 ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n", 3839 ndev_info(netdev, "Intel(R) PRO/%s Network Connection\n",
3694 (hw->phy.type == e1000_phy_ife) 3840 (hw->phy.type == e1000_phy_ife)
3695 ? "10/100" : "1000"); 3841 ? "10/100" : "1000");
3696 e1000e_read_part_num(hw, &part_num); 3842 e1000e_read_pba_num(hw, &pba_num);
3697 ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 3843 ndev_info(netdev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
3698 hw->mac.type, hw->phy.type, 3844 hw->mac.type, hw->phy.type,
3699 (part_num >> 8), (part_num & 0xff)); 3845 (pba_num >> 8), (pba_num & 0xff));
3700} 3846}
3701 3847
3702/** 3848/**
@@ -3828,16 +3974,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
3828 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); 3974 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
3829 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 3975 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
3830 3976
3831 err = ei->get_invariants(adapter); 3977 err = ei->get_variants(adapter);
3832 if (err) 3978 if (err)
3833 goto err_hw_init; 3979 goto err_hw_init;
3834 3980
3835 hw->mac.ops.get_bus_info(&adapter->hw); 3981 hw->mac.ops.get_bus_info(&adapter->hw);
3836 3982
3837 adapter->hw.phy.wait_for_link = 0; 3983 adapter->hw.phy.autoneg_wait_to_complete = 0;
3838 3984
3839 /* Copper options */ 3985 /* Copper options */
3840 if (adapter->hw.media_type == e1000_media_type_copper) { 3986 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
3841 adapter->hw.phy.mdix = AUTO_ALL_MODES; 3987 adapter->hw.phy.mdix = AUTO_ALL_MODES;
3842 adapter->hw.phy.disable_polarity_correction = 0; 3988 adapter->hw.phy.disable_polarity_correction = 0;
3843 adapter->hw.phy.ms_type = e1000_ms_hw_default; 3989 adapter->hw.phy.ms_type = e1000_ms_hw_default;
@@ -3861,15 +4007,19 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
3861 if (pci_using_dac) 4007 if (pci_using_dac)
3862 netdev->features |= NETIF_F_HIGHDMA; 4008 netdev->features |= NETIF_F_HIGHDMA;
3863 4009
3864 /* We should not be using LLTX anymore, but we are still TX faster with 4010 /*
3865 * it. */ 4011 * We should not be using LLTX anymore, but we are still Tx faster with
4012 * it.
4013 */
3866 netdev->features |= NETIF_F_LLTX; 4014 netdev->features |= NETIF_F_LLTX;
3867 4015
3868 if (e1000e_enable_mng_pass_thru(&adapter->hw)) 4016 if (e1000e_enable_mng_pass_thru(&adapter->hw))
3869 adapter->flags |= FLAG_MNG_PT_ENABLED; 4017 adapter->flags |= FLAG_MNG_PT_ENABLED;
3870 4018
3871 /* before reading the NVM, reset the controller to 4019 /*
3872 * put the device in a known good starting state */ 4020 * before reading the NVM, reset the controller to
4021 * put the device in a known good starting state
4022 */
3873 adapter->hw.mac.ops.reset_hw(&adapter->hw); 4023 adapter->hw.mac.ops.reset_hw(&adapter->hw);
3874 4024
3875 /* 4025 /*
@@ -3919,8 +4069,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
3919 /* Initialize link parameters. User can change them with ethtool */ 4069 /* Initialize link parameters. User can change them with ethtool */
3920 adapter->hw.mac.autoneg = 1; 4070 adapter->hw.mac.autoneg = 1;
3921 adapter->fc_autoneg = 1; 4071 adapter->fc_autoneg = 1;
3922 adapter->hw.mac.original_fc = e1000_fc_default; 4072 adapter->hw.fc.original_type = e1000_fc_default;
3923 adapter->hw.mac.fc = e1000_fc_default; 4073 adapter->hw.fc.type = e1000_fc_default;
3924 adapter->hw.phy.autoneg_advertised = 0x2f; 4074 adapter->hw.phy.autoneg_advertised = 0x2f;
3925 4075
3926 /* ring size defaults */ 4076 /* ring size defaults */
@@ -3963,9 +4113,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
3963 /* reset the hardware with the new settings */ 4113 /* reset the hardware with the new settings */
3964 e1000e_reset(adapter); 4114 e1000e_reset(adapter);
3965 4115
3966 /* If the controller has AMT, do not set DRV_LOAD until the interface 4116 /*
4117 * If the controller has AMT, do not set DRV_LOAD until the interface
3967 * is up. For all other cases, let the f/w know that the h/w is now 4118 * is up. For all other cases, let the f/w know that the h/w is now
3968 * under the control of the driver. */ 4119 * under the control of the driver.
4120 */
3969 if (!(adapter->flags & FLAG_HAS_AMT) || 4121 if (!(adapter->flags & FLAG_HAS_AMT) ||
3970 !e1000e_check_mng_mode(&adapter->hw)) 4122 !e1000e_check_mng_mode(&adapter->hw))
3971 e1000_get_hw_control(adapter); 4123 e1000_get_hw_control(adapter);
@@ -4022,16 +4174,20 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
4022 struct net_device *netdev = pci_get_drvdata(pdev); 4174 struct net_device *netdev = pci_get_drvdata(pdev);
4023 struct e1000_adapter *adapter = netdev_priv(netdev); 4175 struct e1000_adapter *adapter = netdev_priv(netdev);
4024 4176
4025 /* flush_scheduled work may reschedule our watchdog task, so 4177 /*
4026 * explicitly disable watchdog tasks from being rescheduled */ 4178 * flush_scheduled work may reschedule our watchdog task, so
4179 * explicitly disable watchdog tasks from being rescheduled
4180 */
4027 set_bit(__E1000_DOWN, &adapter->state); 4181 set_bit(__E1000_DOWN, &adapter->state);
4028 del_timer_sync(&adapter->watchdog_timer); 4182 del_timer_sync(&adapter->watchdog_timer);
4029 del_timer_sync(&adapter->phy_info_timer); 4183 del_timer_sync(&adapter->phy_info_timer);
4030 4184
4031 flush_scheduled_work(); 4185 flush_scheduled_work();
4032 4186
4033 /* Release control of h/w to f/w. If f/w is AMT enabled, this 4187 /*
4034 * would have already happened in close and is redundant. */ 4188 * Release control of h/w to f/w. If f/w is AMT enabled, this
4189 * would have already happened in close and is redundant.
4190 */
4035 e1000_release_hw_control(adapter); 4191 e1000_release_hw_control(adapter);
4036 4192
4037 unregister_netdev(netdev); 4193 unregister_netdev(netdev);
@@ -4069,13 +4225,16 @@ static struct pci_device_id e1000_pci_tbl[] = {
4069 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, 4225 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
4070 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, 4226 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
4071 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, 4227 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
4228
4072 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, 4229 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
4073 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, 4230 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
4074 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, 4231 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
4075 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, 4232 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
4233
4076 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, 4234 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
4077 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, 4235 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
4078 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, 4236 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
4237
4079 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), 4238 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
4080 board_80003es2lan }, 4239 board_80003es2lan },
4081 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), 4240 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
@@ -4084,6 +4243,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
4084 board_80003es2lan }, 4243 board_80003es2lan },
4085 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), 4244 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
4086 board_80003es2lan }, 4245 board_80003es2lan },
4246
4087 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, 4247 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
4088 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, 4248 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
4089 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, 4249 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
@@ -4091,6 +4251,7 @@ static struct pci_device_id e1000_pci_tbl[] = {
4091 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, 4251 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
4092 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, 4252 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
4093 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, 4253 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
4254
4094 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, 4255 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
4095 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, 4256 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
4096 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, 4257 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
@@ -4108,7 +4269,7 @@ static struct pci_driver e1000_driver = {
4108 .probe = e1000_probe, 4269 .probe = e1000_probe,
4109 .remove = __devexit_p(e1000_remove), 4270 .remove = __devexit_p(e1000_remove),
4110#ifdef CONFIG_PM 4271#ifdef CONFIG_PM
4111 /* Power Managment Hooks */ 4272 /* Power Management Hooks */
4112 .suspend = e1000_suspend, 4273 .suspend = e1000_suspend,
4113 .resume = e1000_resume, 4274 .resume = e1000_resume,
4114#endif 4275#endif
@@ -4127,7 +4288,7 @@ static int __init e1000_init_module(void)
4127 int ret; 4288 int ret;
4128 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", 4289 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
4129 e1000e_driver_name, e1000e_driver_version); 4290 e1000e_driver_name, e1000e_driver_version);
4130 printk(KERN_INFO "%s: Copyright (c) 1999-2007 Intel Corporation.\n", 4291 printk(KERN_INFO "%s: Copyright (c) 1999-2008 Intel Corporation.\n",
4131 e1000e_driver_name); 4292 e1000e_driver_name);
4132 ret = pci_register_driver(&e1000_driver); 4293 ret = pci_register_driver(&e1000_driver);
4133 4294
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index df266c32ac4b..a66b92efcf80 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -30,7 +30,8 @@
30 30
31#include "e1000.h" 31#include "e1000.h"
32 32
33/* This is the only thing that needs to be changed to adjust the 33/*
34 * This is the only thing that needs to be changed to adjust the
34 * maximum number of ports that the driver can manage. 35 * maximum number of ports that the driver can manage.
35 */ 36 */
36 37
@@ -46,7 +47,8 @@ module_param(copybreak, uint, 0644);
46MODULE_PARM_DESC(copybreak, 47MODULE_PARM_DESC(copybreak,
47 "Maximum size of packet that is copied to a new buffer on receive"); 48 "Maximum size of packet that is copied to a new buffer on receive");
48 49
49/* All parameters are treated the same, as an integer array of values. 50/*
51 * All parameters are treated the same, as an integer array of values.
50 * This macro just reduces the need to repeat the same declaration code 52 * This macro just reduces the need to repeat the same declaration code
51 * over and over (plus this helps to avoid typo bugs). 53 * over and over (plus this helps to avoid typo bugs).
52 */ 54 */
@@ -60,8 +62,9 @@ MODULE_PARM_DESC(copybreak,
60 MODULE_PARM_DESC(X, desc); 62 MODULE_PARM_DESC(X, desc);
61 63
62 64
63/* Transmit Interrupt Delay in units of 1.024 microseconds 65/*
64 * Tx interrupt delay needs to typically be set to something non zero 66 * Transmit Interrupt Delay in units of 1.024 microseconds
67 * Tx interrupt delay needs to typically be set to something non zero
65 * 68 *
66 * Valid Range: 0-65535 69 * Valid Range: 0-65535
67 */ 70 */
@@ -70,7 +73,8 @@ E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
70#define MAX_TXDELAY 0xFFFF 73#define MAX_TXDELAY 0xFFFF
71#define MIN_TXDELAY 0 74#define MIN_TXDELAY 0
72 75
73/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds 76/*
77 * Transmit Absolute Interrupt Delay in units of 1.024 microseconds
74 * 78 *
75 * Valid Range: 0-65535 79 * Valid Range: 0-65535
76 */ 80 */
@@ -79,8 +83,9 @@ E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
79#define MAX_TXABSDELAY 0xFFFF 83#define MAX_TXABSDELAY 0xFFFF
80#define MIN_TXABSDELAY 0 84#define MIN_TXABSDELAY 0
81 85
82/* Receive Interrupt Delay in units of 1.024 microseconds 86/*
83 * hardware will likely hang if you set this to anything but zero. 87 * Receive Interrupt Delay in units of 1.024 microseconds
88 * hardware will likely hang if you set this to anything but zero.
84 * 89 *
85 * Valid Range: 0-65535 90 * Valid Range: 0-65535
86 */ 91 */
@@ -89,7 +94,8 @@ E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
89#define MAX_RXDELAY 0xFFFF 94#define MAX_RXDELAY 0xFFFF
90#define MIN_RXDELAY 0 95#define MIN_RXDELAY 0
91 96
92/* Receive Absolute Interrupt Delay in units of 1.024 microseconds 97/*
98 * Receive Absolute Interrupt Delay in units of 1.024 microseconds
93 * 99 *
94 * Valid Range: 0-65535 100 * Valid Range: 0-65535
95 */ 101 */
@@ -98,7 +104,8 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
98#define MAX_RXABSDELAY 0xFFFF 104#define MAX_RXABSDELAY 0xFFFF
99#define MIN_RXABSDELAY 0 105#define MIN_RXABSDELAY 0
100 106
101/* Interrupt Throttle Rate (interrupts/sec) 107/*
108 * Interrupt Throttle Rate (interrupts/sec)
102 * 109 *
103 * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) 110 * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
104 */ 111 */
@@ -107,7 +114,8 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
107#define MAX_ITR 100000 114#define MAX_ITR 100000
108#define MIN_ITR 100 115#define MIN_ITR 100
109 116
110/* Enable Smart Power Down of the PHY 117/*
118 * Enable Smart Power Down of the PHY
111 * 119 *
112 * Valid Range: 0, 1 120 * Valid Range: 0, 1
113 * 121 *
@@ -115,7 +123,8 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
115 */ 123 */
116E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); 124E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
117 125
118/* Enable Kumeran Lock Loss workaround 126/*
127 * Enable Kumeran Lock Loss workaround
119 * 128 *
120 * Valid Range: 0, 1 129 * Valid Range: 0, 1
121 * 130 *
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index dab3c468a768..3a4574caa75b 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel PRO/1000 Linux driver 3 Intel PRO/1000 Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation. 4 Copyright(c) 1999 - 2008 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -134,7 +134,8 @@ static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
134 return -E1000_ERR_PARAM; 134 return -E1000_ERR_PARAM;
135 } 135 }
136 136
137 /* Set up Op-code, Phy Address, and register offset in the MDI 137 /*
138 * Set up Op-code, Phy Address, and register offset in the MDI
138 * Control register. The MAC will take care of interfacing with the 139 * Control register. The MAC will take care of interfacing with the
139 * PHY to retrieve the desired data. 140 * PHY to retrieve the desired data.
140 */ 141 */
@@ -144,7 +145,11 @@ static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
144 145
145 ew32(MDIC, mdic); 146 ew32(MDIC, mdic);
146 147
147 /* Poll the ready bit to see if the MDI read completed */ 148 /*
149 * Poll the ready bit to see if the MDI read completed
150 * Increasing the time out as testing showed failures with
151 * the lower time out
152 */
148 for (i = 0; i < 64; i++) { 153 for (i = 0; i < 64; i++) {
149 udelay(50); 154 udelay(50);
150 mdic = er32(MDIC); 155 mdic = er32(MDIC);
@@ -182,7 +187,8 @@ static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
182 return -E1000_ERR_PARAM; 187 return -E1000_ERR_PARAM;
183 } 188 }
184 189
185 /* Set up Op-code, Phy Address, and register offset in the MDI 190 /*
191 * Set up Op-code, Phy Address, and register offset in the MDI
186 * Control register. The MAC will take care of interfacing with the 192 * Control register. The MAC will take care of interfacing with the
187 * PHY to retrieve the desired data. 193 * PHY to retrieve the desired data.
188 */ 194 */
@@ -409,14 +415,15 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
409 s32 ret_val; 415 s32 ret_val;
410 u16 phy_data; 416 u16 phy_data;
411 417
412 /* Enable CRS on TX. This must be set for half-duplex operation. */ 418 /* Enable CRS on Tx. This must be set for half-duplex operation. */
413 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 419 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
414 if (ret_val) 420 if (ret_val)
415 return ret_val; 421 return ret_val;
416 422
417 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; 423 phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
418 424
419 /* Options: 425 /*
426 * Options:
420 * MDI/MDI-X = 0 (default) 427 * MDI/MDI-X = 0 (default)
421 * 0 - Auto for all speeds 428 * 0 - Auto for all speeds
422 * 1 - MDI mode 429 * 1 - MDI mode
@@ -441,7 +448,8 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
441 break; 448 break;
442 } 449 }
443 450
444 /* Options: 451 /*
452 * Options:
445 * disable_polarity_correction = 0 (default) 453 * disable_polarity_correction = 0 (default)
446 * Automatic Correction for Reversed Cable Polarity 454 * Automatic Correction for Reversed Cable Polarity
447 * 0 - Disabled 455 * 0 - Disabled
@@ -456,7 +464,8 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
456 return ret_val; 464 return ret_val;
457 465
458 if (phy->revision < 4) { 466 if (phy->revision < 4) {
459 /* Force TX_CLK in the Extended PHY Specific Control Register 467 /*
468 * Force TX_CLK in the Extended PHY Specific Control Register
460 * to 25MHz clock. 469 * to 25MHz clock.
461 */ 470 */
462 ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); 471 ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
@@ -543,19 +552,21 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
543 552
544 /* set auto-master slave resolution settings */ 553 /* set auto-master slave resolution settings */
545 if (hw->mac.autoneg) { 554 if (hw->mac.autoneg) {
546 /* when autonegotiation advertisement is only 1000Mbps then we 555 /*
556 * when autonegotiation advertisement is only 1000Mbps then we
547 * should disable SmartSpeed and enable Auto MasterSlave 557 * should disable SmartSpeed and enable Auto MasterSlave
548 * resolution as hardware default. */ 558 * resolution as hardware default.
559 */
549 if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { 560 if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
550 /* Disable SmartSpeed */ 561 /* Disable SmartSpeed */
551 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 562 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
552 &data); 563 &data);
553 if (ret_val) 564 if (ret_val)
554 return ret_val; 565 return ret_val;
555 566
556 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 567 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
557 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 568 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
558 data); 569 data);
559 if (ret_val) 570 if (ret_val)
560 return ret_val; 571 return ret_val;
561 572
@@ -630,14 +641,16 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
630 return ret_val; 641 return ret_val;
631 } 642 }
632 643
633 /* Need to parse both autoneg_advertised and fc and set up 644 /*
645 * Need to parse both autoneg_advertised and fc and set up
634 * the appropriate PHY registers. First we will parse for 646 * the appropriate PHY registers. First we will parse for
635 * autoneg_advertised software override. Since we can advertise 647 * autoneg_advertised software override. Since we can advertise
636 * a plethora of combinations, we need to check each bit 648 * a plethora of combinations, we need to check each bit
637 * individually. 649 * individually.
638 */ 650 */
639 651
640 /* First we clear all the 10/100 mb speed bits in the Auto-Neg 652 /*
653 * First we clear all the 10/100 mb speed bits in the Auto-Neg
641 * Advertisement Register (Address 4) and the 1000 mb speed bits in 654 * Advertisement Register (Address 4) and the 1000 mb speed bits in
642 * the 1000Base-T Control Register (Address 9). 655 * the 1000Base-T Control Register (Address 9).
643 */ 656 */
@@ -683,7 +696,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
683 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; 696 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
684 } 697 }
685 698
686 /* Check for a software override of the flow control settings, and 699 /*
700 * Check for a software override of the flow control settings, and
687 * setup the PHY advertisement registers accordingly. If 701 * setup the PHY advertisement registers accordingly. If
688 * auto-negotiation is enabled, then software will have to set the 702 * auto-negotiation is enabled, then software will have to set the
689 * "PAUSE" bits to the correct value in the Auto-Negotiation 703 * "PAUSE" bits to the correct value in the Auto-Negotiation
@@ -696,38 +710,42 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
696 * but not send pause frames). 710 * but not send pause frames).
697 * 2: Tx flow control is enabled (we can send pause frames 711 * 2: Tx flow control is enabled (we can send pause frames
698 * but we do not support receiving pause frames). 712 * but we do not support receiving pause frames).
699 * 3: Both Rx and TX flow control (symmetric) are enabled. 713 * 3: Both Rx and Tx flow control (symmetric) are enabled.
700 * other: No software override. The flow control configuration 714 * other: No software override. The flow control configuration
701 * in the EEPROM is used. 715 * in the EEPROM is used.
702 */ 716 */
703 switch (hw->mac.fc) { 717 switch (hw->fc.type) {
704 case e1000_fc_none: 718 case e1000_fc_none:
705 /* Flow control (RX & TX) is completely disabled by a 719 /*
720 * Flow control (Rx & Tx) is completely disabled by a
706 * software over-ride. 721 * software over-ride.
707 */ 722 */
708 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 723 mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
709 break; 724 break;
710 case e1000_fc_rx_pause: 725 case e1000_fc_rx_pause:
711 /* RX Flow control is enabled, and TX Flow control is 726 /*
727 * Rx Flow control is enabled, and Tx Flow control is
712 * disabled, by a software over-ride. 728 * disabled, by a software over-ride.
713 */ 729 *
714 /* Since there really isn't a way to advertise that we are 730 * Since there really isn't a way to advertise that we are
715 * capable of RX Pause ONLY, we will advertise that we 731 * capable of Rx Pause ONLY, we will advertise that we
716 * support both symmetric and asymmetric RX PAUSE. Later 732 * support both symmetric and asymmetric Rx PAUSE. Later
717 * (in e1000e_config_fc_after_link_up) we will disable the 733 * (in e1000e_config_fc_after_link_up) we will disable the
718 * hw's ability to send PAUSE frames. 734 * hw's ability to send PAUSE frames.
719 */ 735 */
720 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 736 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
721 break; 737 break;
722 case e1000_fc_tx_pause: 738 case e1000_fc_tx_pause:
723 /* TX Flow control is enabled, and RX Flow control is 739 /*
740 * Tx Flow control is enabled, and Rx Flow control is
724 * disabled, by a software over-ride. 741 * disabled, by a software over-ride.
725 */ 742 */
726 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; 743 mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
727 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; 744 mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
728 break; 745 break;
729 case e1000_fc_full: 746 case e1000_fc_full:
730 /* Flow control (both RX and TX) is enabled by a software 747 /*
748 * Flow control (both Rx and Tx) is enabled by a software
731 * over-ride. 749 * over-ride.
732 */ 750 */
733 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 751 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
@@ -758,7 +776,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
758 * Performs initial bounds checking on autoneg advertisement parameter, then 776 * Performs initial bounds checking on autoneg advertisement parameter, then
759 * configure to advertise the full capability. Setup the PHY to autoneg 777 * configure to advertise the full capability. Setup the PHY to autoneg
760 * and restart the negotiation process between the link partner. If 778 * and restart the negotiation process between the link partner. If
761 * wait_for_link, then wait for autoneg to complete before exiting. 779 * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
762 **/ 780 **/
763static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) 781static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
764{ 782{
@@ -766,12 +784,14 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
766 s32 ret_val; 784 s32 ret_val;
767 u16 phy_ctrl; 785 u16 phy_ctrl;
768 786
769 /* Perform some bounds checking on the autoneg advertisement 787 /*
788 * Perform some bounds checking on the autoneg advertisement
770 * parameter. 789 * parameter.
771 */ 790 */
772 phy->autoneg_advertised &= phy->autoneg_mask; 791 phy->autoneg_advertised &= phy->autoneg_mask;
773 792
774 /* If autoneg_advertised is zero, we assume it was not defaulted 793 /*
794 * If autoneg_advertised is zero, we assume it was not defaulted
775 * by the calling code so we set to advertise full capability. 795 * by the calling code so we set to advertise full capability.
776 */ 796 */
777 if (phy->autoneg_advertised == 0) 797 if (phy->autoneg_advertised == 0)
@@ -785,7 +805,8 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
785 } 805 }
786 hw_dbg(hw, "Restarting Auto-Neg\n"); 806 hw_dbg(hw, "Restarting Auto-Neg\n");
787 807
788 /* Restart auto-negotiation by setting the Auto Neg Enable bit and 808 /*
809 * Restart auto-negotiation by setting the Auto Neg Enable bit and
789 * the Auto Neg Restart bit in the PHY control register. 810 * the Auto Neg Restart bit in the PHY control register.
790 */ 811 */
791 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl); 812 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
@@ -797,10 +818,11 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
797 if (ret_val) 818 if (ret_val)
798 return ret_val; 819 return ret_val;
799 820
800 /* Does the user want to wait for Auto-Neg to complete here, or 821 /*
822 * Does the user want to wait for Auto-Neg to complete here, or
801 * check at a later time (for example, callback routine). 823 * check at a later time (for example, callback routine).
802 */ 824 */
803 if (phy->wait_for_link) { 825 if (phy->autoneg_wait_to_complete) {
804 ret_val = e1000_wait_autoneg(hw); 826 ret_val = e1000_wait_autoneg(hw);
805 if (ret_val) { 827 if (ret_val) {
806 hw_dbg(hw, "Error while waiting for " 828 hw_dbg(hw, "Error while waiting for "
@@ -829,14 +851,18 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
829 bool link; 851 bool link;
830 852
831 if (hw->mac.autoneg) { 853 if (hw->mac.autoneg) {
832 /* Setup autoneg and flow control advertisement and perform 854 /*
833 * autonegotiation. */ 855 * Setup autoneg and flow control advertisement and perform
856 * autonegotiation.
857 */
834 ret_val = e1000_copper_link_autoneg(hw); 858 ret_val = e1000_copper_link_autoneg(hw);
835 if (ret_val) 859 if (ret_val)
836 return ret_val; 860 return ret_val;
837 } else { 861 } else {
838 /* PHY will be set to 10H, 10F, 100H or 100F 862 /*
839 * depending on user settings. */ 863 * PHY will be set to 10H, 10F, 100H or 100F
864 * depending on user settings.
865 */
840 hw_dbg(hw, "Forcing Speed and Duplex\n"); 866 hw_dbg(hw, "Forcing Speed and Duplex\n");
841 ret_val = e1000_phy_force_speed_duplex(hw); 867 ret_val = e1000_phy_force_speed_duplex(hw);
842 if (ret_val) { 868 if (ret_val) {
@@ -845,7 +871,8 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
845 } 871 }
846 } 872 }
847 873
848 /* Check link status. Wait up to 100 microseconds for link to become 874 /*
875 * Check link status. Wait up to 100 microseconds for link to become
849 * valid. 876 * valid.
850 */ 877 */
851 ret_val = e1000e_phy_has_link_generic(hw, 878 ret_val = e1000e_phy_has_link_generic(hw,
@@ -891,7 +918,8 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
891 if (ret_val) 918 if (ret_val)
892 return ret_val; 919 return ret_val;
893 920
894 /* Clear Auto-Crossover to force MDI manually. IGP requires MDI 921 /*
922 * Clear Auto-Crossover to force MDI manually. IGP requires MDI
895 * forced whenever speed and duplex are forced. 923 * forced whenever speed and duplex are forced.
896 */ 924 */
897 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); 925 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
@@ -909,7 +937,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
909 937
910 udelay(1); 938 udelay(1);
911 939
912 if (phy->wait_for_link) { 940 if (phy->autoneg_wait_to_complete) {
913 hw_dbg(hw, "Waiting for forced speed/duplex link on IGP phy.\n"); 941 hw_dbg(hw, "Waiting for forced speed/duplex link on IGP phy.\n");
914 942
915 ret_val = e1000e_phy_has_link_generic(hw, 943 ret_val = e1000e_phy_has_link_generic(hw,
@@ -941,7 +969,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
941 * Calls the PHY setup function to force speed and duplex. Clears the 969 * Calls the PHY setup function to force speed and duplex. Clears the
942 * auto-crossover to force MDI manually. Resets the PHY to commit the 970 * auto-crossover to force MDI manually. Resets the PHY to commit the
943 * changes. If time expires while waiting for link up, we reset the DSP. 971 * changes. If time expires while waiting for link up, we reset the DSP.
944 * After reset, TX_CLK and CRS on TX must be set. Return successful upon 972 * After reset, TX_CLK and CRS on Tx must be set. Return successful upon
945 * successful completion, else return corresponding error code. 973 * successful completion, else return corresponding error code.
946 **/ 974 **/
947s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) 975s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
@@ -951,7 +979,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
951 u16 phy_data; 979 u16 phy_data;
952 bool link; 980 bool link;
953 981
954 /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI 982 /*
983 * Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
955 * forced whenever speed and duplex are forced. 984 * forced whenever speed and duplex are forced.
956 */ 985 */
957 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 986 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -980,7 +1009,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
980 1009
981 udelay(1); 1010 udelay(1);
982 1011
983 if (phy->wait_for_link) { 1012 if (phy->autoneg_wait_to_complete) {
984 hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n"); 1013 hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n");
985 1014
986 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, 1015 ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
@@ -989,10 +1018,12 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
989 return ret_val; 1018 return ret_val;
990 1019
991 if (!link) { 1020 if (!link) {
992 /* We didn't get link. 1021 /*
1022 * We didn't get link.
993 * Reset the DSP and cross our fingers. 1023 * Reset the DSP and cross our fingers.
994 */ 1024 */
995 ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, 0x001d); 1025 ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
1026 0x001d);
996 if (ret_val) 1027 if (ret_val)
997 return ret_val; 1028 return ret_val;
998 ret_val = e1000e_phy_reset_dsp(hw); 1029 ret_val = e1000e_phy_reset_dsp(hw);
@@ -1011,7 +1042,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1011 if (ret_val) 1042 if (ret_val)
1012 return ret_val; 1043 return ret_val;
1013 1044
1014 /* Resetting the phy means we need to re-force TX_CLK in the 1045 /*
1046 * Resetting the phy means we need to re-force TX_CLK in the
1015 * Extended PHY Specific Control Register to 25MHz clock from 1047 * Extended PHY Specific Control Register to 25MHz clock from
1016 * the reset value of 2.5MHz. 1048 * the reset value of 2.5MHz.
1017 */ 1049 */
@@ -1020,7 +1052,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1020 if (ret_val) 1052 if (ret_val)
1021 return ret_val; 1053 return ret_val;
1022 1054
1023 /* In addition, we must re-enable CRS on Tx for both half and full 1055 /*
1056 * In addition, we must re-enable CRS on Tx for both half and full
1024 * duplex. 1057 * duplex.
1025 */ 1058 */
1026 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); 1059 ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@@ -1051,7 +1084,7 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
1051 u32 ctrl; 1084 u32 ctrl;
1052 1085
1053 /* Turn off flow control when forcing speed/duplex */ 1086 /* Turn off flow control when forcing speed/duplex */
1054 mac->fc = e1000_fc_none; 1087 hw->fc.type = e1000_fc_none;
1055 1088
1056 /* Force speed/duplex on the mac */ 1089 /* Force speed/duplex on the mac */
1057 ctrl = er32(CTRL); 1090 ctrl = er32(CTRL);
@@ -1124,30 +1157,32 @@ s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1124 data); 1157 data);
1125 if (ret_val) 1158 if (ret_val)
1126 return ret_val; 1159 return ret_val;
1127 /* LPLU and SmartSpeed are mutually exclusive. LPLU is used 1160 /*
1161 * LPLU and SmartSpeed are mutually exclusive. LPLU is used
1128 * during Dx states where the power conservation is most 1162 * during Dx states where the power conservation is most
1129 * important. During driver activity we should enable 1163 * important. During driver activity we should enable
1130 * SmartSpeed, so performance is maintained. */ 1164 * SmartSpeed, so performance is maintained.
1165 */
1131 if (phy->smart_speed == e1000_smart_speed_on) { 1166 if (phy->smart_speed == e1000_smart_speed_on) {
1132 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 1167 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1133 &data); 1168 &data);
1134 if (ret_val) 1169 if (ret_val)
1135 return ret_val; 1170 return ret_val;
1136 1171
1137 data |= IGP01E1000_PSCFR_SMART_SPEED; 1172 data |= IGP01E1000_PSCFR_SMART_SPEED;
1138 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 1173 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1139 data); 1174 data);
1140 if (ret_val) 1175 if (ret_val)
1141 return ret_val; 1176 return ret_val;
1142 } else if (phy->smart_speed == e1000_smart_speed_off) { 1177 } else if (phy->smart_speed == e1000_smart_speed_off) {
1143 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, 1178 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1144 &data); 1179 &data);
1145 if (ret_val) 1180 if (ret_val)
1146 return ret_val; 1181 return ret_val;
1147 1182
1148 data &= ~IGP01E1000_PSCFR_SMART_SPEED; 1183 data &= ~IGP01E1000_PSCFR_SMART_SPEED;
1149 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, 1184 ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
1150 data); 1185 data);
1151 if (ret_val) 1186 if (ret_val)
1152 return ret_val; 1187 return ret_val;
1153 } 1188 }
@@ -1249,8 +1284,10 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw)
1249 s32 ret_val; 1284 s32 ret_val;
1250 u16 data, offset, mask; 1285 u16 data, offset, mask;
1251 1286
1252 /* Polarity is determined based on the speed of 1287 /*
1253 * our connection. */ 1288 * Polarity is determined based on the speed of
1289 * our connection.
1290 */
1254 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); 1291 ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
1255 if (ret_val) 1292 if (ret_val)
1256 return ret_val; 1293 return ret_val;
@@ -1260,7 +1297,8 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw)
1260 offset = IGP01E1000_PHY_PCS_INIT_REG; 1297 offset = IGP01E1000_PHY_PCS_INIT_REG;
1261 mask = IGP01E1000_PHY_POLARITY_MASK; 1298 mask = IGP01E1000_PHY_POLARITY_MASK;
1262 } else { 1299 } else {
1263 /* This really only applies to 10Mbps since 1300 /*
1301 * This really only applies to 10Mbps since
1264 * there is no polarity for 100Mbps (always 0). 1302 * there is no polarity for 100Mbps (always 0).
1265 */ 1303 */
1266 offset = IGP01E1000_PHY_PORT_STATUS; 1304 offset = IGP01E1000_PHY_PORT_STATUS;
@@ -1278,7 +1316,7 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw)
1278} 1316}
1279 1317
1280/** 1318/**
1281 * e1000_wait_autoneg - Wait for auto-neg compeletion 1319 * e1000_wait_autoneg - Wait for auto-neg completion
1282 * @hw: pointer to the HW structure 1320 * @hw: pointer to the HW structure
1283 * 1321 *
1284 * Waits for auto-negotiation to complete or for the auto-negotiation time 1322 * Waits for auto-negotiation to complete or for the auto-negotiation time
@@ -1302,7 +1340,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
1302 msleep(100); 1340 msleep(100);
1303 } 1341 }
1304 1342
1305 /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation 1343 /*
1344 * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
1306 * has completed. 1345 * has completed.
1307 */ 1346 */
1308 return ret_val; 1347 return ret_val;
@@ -1324,7 +1363,8 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
1324 u16 i, phy_status; 1363 u16 i, phy_status;
1325 1364
1326 for (i = 0; i < iterations; i++) { 1365 for (i = 0; i < iterations; i++) {
1327 /* Some PHYs require the PHY_STATUS register to be read 1366 /*
1367 * Some PHYs require the PHY_STATUS register to be read
1328 * twice due to the link bit being sticky. No harm doing 1368 * twice due to the link bit being sticky. No harm doing
1329 * it across the board. 1369 * it across the board.
1330 */ 1370 */
@@ -1412,10 +1452,12 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
1412 if (ret_val) 1452 if (ret_val)
1413 return ret_val; 1453 return ret_val;
1414 1454
1415 /* Getting bits 15:9, which represent the combination of 1455 /*
1456 * Getting bits 15:9, which represent the combination of
1416 * course and fine gain values. The result is a number 1457 * course and fine gain values. The result is a number
1417 * that can be put into the lookup table to obtain the 1458 * that can be put into the lookup table to obtain the
1418 * approximate cable length. */ 1459 * approximate cable length.
1460 */
1419 cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & 1461 cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
1420 IGP02E1000_AGC_LENGTH_MASK; 1462 IGP02E1000_AGC_LENGTH_MASK;
1421 1463
@@ -1466,7 +1508,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
1466 u16 phy_data; 1508 u16 phy_data;
1467 bool link; 1509 bool link;
1468 1510
1469 if (hw->media_type != e1000_media_type_copper) { 1511 if (hw->phy.media_type != e1000_media_type_copper) {
1470 hw_dbg(hw, "Phy info is only valid for copper media\n"); 1512 hw_dbg(hw, "Phy info is only valid for copper media\n");
1471 return -E1000_ERR_CONFIG; 1513 return -E1000_ERR_CONFIG;
1472 } 1514 }
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index a8d3280923e8..f5dacceab95b 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -422,7 +422,7 @@ struct ehea_fw_handle_entry {
422struct ehea_fw_handle_array { 422struct ehea_fw_handle_array {
423 struct ehea_fw_handle_entry *arr; 423 struct ehea_fw_handle_entry *arr;
424 int num_entries; 424 int num_entries;
425 struct semaphore lock; 425 struct mutex lock;
426}; 426};
427 427
428struct ehea_bcmc_reg_entry { 428struct ehea_bcmc_reg_entry {
@@ -435,7 +435,7 @@ struct ehea_bcmc_reg_entry {
435struct ehea_bcmc_reg_array { 435struct ehea_bcmc_reg_array {
436 struct ehea_bcmc_reg_entry *arr; 436 struct ehea_bcmc_reg_entry *arr;
437 int num_entries; 437 int num_entries;
438 struct semaphore lock; 438 struct mutex lock;
439}; 439};
440 440
441#define EHEA_PORT_UP 1 441#define EHEA_PORT_UP 1
@@ -453,7 +453,7 @@ struct ehea_port {
453 struct vlan_group *vgrp; 453 struct vlan_group *vgrp;
454 struct ehea_eq *qp_eq; 454 struct ehea_eq *qp_eq;
455 struct work_struct reset_task; 455 struct work_struct reset_task;
456 struct semaphore port_lock; 456 struct mutex port_lock;
457 char int_aff_name[EHEA_IRQ_NAME_SIZE]; 457 char int_aff_name[EHEA_IRQ_NAME_SIZE];
458 int allmulti; /* Indicates IFF_ALLMULTI state */ 458 int allmulti; /* Indicates IFF_ALLMULTI state */
459 int promisc; /* Indicates IFF_PROMISC state */ 459 int promisc; /* Indicates IFF_PROMISC state */
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index f460b623c077..9ff7538b7595 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -36,6 +36,7 @@
36#include <linux/notifier.h> 36#include <linux/notifier.h>
37#include <linux/reboot.h> 37#include <linux/reboot.h>
38#include <asm/kexec.h> 38#include <asm/kexec.h>
39#include <linux/mutex.h>
39 40
40#include <net/ip.h> 41#include <net/ip.h>
41 42
@@ -99,7 +100,7 @@ static int port_name_cnt;
99static LIST_HEAD(adapter_list); 100static LIST_HEAD(adapter_list);
100u64 ehea_driver_flags; 101u64 ehea_driver_flags;
101struct work_struct ehea_rereg_mr_task; 102struct work_struct ehea_rereg_mr_task;
102struct semaphore dlpar_mem_lock; 103static DEFINE_MUTEX(dlpar_mem_lock);
103struct ehea_fw_handle_array ehea_fw_handles; 104struct ehea_fw_handle_array ehea_fw_handles;
104struct ehea_bcmc_reg_array ehea_bcmc_regs; 105struct ehea_bcmc_reg_array ehea_bcmc_regs;
105 106
@@ -1761,7 +1762,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1761 1762
1762 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); 1763 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1763 1764
1764 down(&ehea_bcmc_regs.lock); 1765 mutex_lock(&ehea_bcmc_regs.lock);
1765 1766
1766 /* Deregister old MAC in pHYP */ 1767 /* Deregister old MAC in pHYP */
1767 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 1768 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
@@ -1779,7 +1780,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1779 1780
1780out_upregs: 1781out_upregs:
1781 ehea_update_bcmc_registrations(); 1782 ehea_update_bcmc_registrations();
1782 up(&ehea_bcmc_regs.lock); 1783 mutex_unlock(&ehea_bcmc_regs.lock);
1783out_free: 1784out_free:
1784 kfree(cb0); 1785 kfree(cb0);
1785out: 1786out:
@@ -1941,7 +1942,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
1941 } 1942 }
1942 ehea_promiscuous(dev, 0); 1943 ehea_promiscuous(dev, 0);
1943 1944
1944 down(&ehea_bcmc_regs.lock); 1945 mutex_lock(&ehea_bcmc_regs.lock);
1945 1946
1946 if (dev->flags & IFF_ALLMULTI) { 1947 if (dev->flags & IFF_ALLMULTI) {
1947 ehea_allmulti(dev, 1); 1948 ehea_allmulti(dev, 1);
@@ -1972,7 +1973,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
1972 } 1973 }
1973out: 1974out:
1974 ehea_update_bcmc_registrations(); 1975 ehea_update_bcmc_registrations();
1975 up(&ehea_bcmc_regs.lock); 1976 mutex_unlock(&ehea_bcmc_regs.lock);
1976 return; 1977 return;
1977} 1978}
1978 1979
@@ -2455,7 +2456,7 @@ static int ehea_up(struct net_device *dev)
2455 if (port->state == EHEA_PORT_UP) 2456 if (port->state == EHEA_PORT_UP)
2456 return 0; 2457 return 0;
2457 2458
2458 down(&ehea_fw_handles.lock); 2459 mutex_lock(&ehea_fw_handles.lock);
2459 2460
2460 ret = ehea_port_res_setup(port, port->num_def_qps, 2461 ret = ehea_port_res_setup(port, port->num_def_qps,
2461 port->num_add_tx_qps); 2462 port->num_add_tx_qps);
@@ -2493,7 +2494,7 @@ static int ehea_up(struct net_device *dev)
2493 } 2494 }
2494 } 2495 }
2495 2496
2496 down(&ehea_bcmc_regs.lock); 2497 mutex_lock(&ehea_bcmc_regs.lock);
2497 2498
2498 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); 2499 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2499 if (ret) { 2500 if (ret) {
@@ -2516,10 +2517,10 @@ out:
2516 ehea_info("Failed starting %s. ret=%i", dev->name, ret); 2517 ehea_info("Failed starting %s. ret=%i", dev->name, ret);
2517 2518
2518 ehea_update_bcmc_registrations(); 2519 ehea_update_bcmc_registrations();
2519 up(&ehea_bcmc_regs.lock); 2520 mutex_unlock(&ehea_bcmc_regs.lock);
2520 2521
2521 ehea_update_firmware_handles(); 2522 ehea_update_firmware_handles();
2522 up(&ehea_fw_handles.lock); 2523 mutex_unlock(&ehea_fw_handles.lock);
2523 2524
2524 return ret; 2525 return ret;
2525} 2526}
@@ -2545,7 +2546,7 @@ static int ehea_open(struct net_device *dev)
2545 int ret; 2546 int ret;
2546 struct ehea_port *port = netdev_priv(dev); 2547 struct ehea_port *port = netdev_priv(dev);
2547 2548
2548 down(&port->port_lock); 2549 mutex_lock(&port->port_lock);
2549 2550
2550 if (netif_msg_ifup(port)) 2551 if (netif_msg_ifup(port))
2551 ehea_info("enabling port %s", dev->name); 2552 ehea_info("enabling port %s", dev->name);
@@ -2556,7 +2557,7 @@ static int ehea_open(struct net_device *dev)
2556 netif_start_queue(dev); 2557 netif_start_queue(dev);
2557 } 2558 }
2558 2559
2559 up(&port->port_lock); 2560 mutex_unlock(&port->port_lock);
2560 2561
2561 return ret; 2562 return ret;
2562} 2563}
@@ -2569,18 +2570,18 @@ static int ehea_down(struct net_device *dev)
2569 if (port->state == EHEA_PORT_DOWN) 2570 if (port->state == EHEA_PORT_DOWN)
2570 return 0; 2571 return 0;
2571 2572
2572 down(&ehea_bcmc_regs.lock); 2573 mutex_lock(&ehea_fw_handles.lock);
2574
2575 mutex_lock(&ehea_bcmc_regs.lock);
2573 ehea_drop_multicast_list(dev); 2576 ehea_drop_multicast_list(dev);
2574 ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 2577 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2575 2578
2576 ehea_free_interrupts(dev); 2579 ehea_free_interrupts(dev);
2577 2580
2578 down(&ehea_fw_handles.lock);
2579
2580 port->state = EHEA_PORT_DOWN; 2581 port->state = EHEA_PORT_DOWN;
2581 2582
2582 ehea_update_bcmc_registrations(); 2583 ehea_update_bcmc_registrations();
2583 up(&ehea_bcmc_regs.lock); 2584 mutex_unlock(&ehea_bcmc_regs.lock);
2584 2585
2585 ret = ehea_clean_all_portres(port); 2586 ret = ehea_clean_all_portres(port);
2586 if (ret) 2587 if (ret)
@@ -2588,7 +2589,7 @@ static int ehea_down(struct net_device *dev)
2588 dev->name, ret); 2589 dev->name, ret);
2589 2590
2590 ehea_update_firmware_handles(); 2591 ehea_update_firmware_handles();
2591 up(&ehea_fw_handles.lock); 2592 mutex_unlock(&ehea_fw_handles.lock);
2592 2593
2593 return ret; 2594 return ret;
2594} 2595}
@@ -2602,11 +2603,11 @@ static int ehea_stop(struct net_device *dev)
2602 ehea_info("disabling port %s", dev->name); 2603 ehea_info("disabling port %s", dev->name);
2603 2604
2604 flush_scheduled_work(); 2605 flush_scheduled_work();
2605 down(&port->port_lock); 2606 mutex_lock(&port->port_lock);
2606 netif_stop_queue(dev); 2607 netif_stop_queue(dev);
2607 port_napi_disable(port); 2608 port_napi_disable(port);
2608 ret = ehea_down(dev); 2609 ret = ehea_down(dev);
2609 up(&port->port_lock); 2610 mutex_unlock(&port->port_lock);
2610 return ret; 2611 return ret;
2611} 2612}
2612 2613
@@ -2820,7 +2821,7 @@ static void ehea_reset_port(struct work_struct *work)
2820 struct net_device *dev = port->netdev; 2821 struct net_device *dev = port->netdev;
2821 2822
2822 port->resets++; 2823 port->resets++;
2823 down(&port->port_lock); 2824 mutex_lock(&port->port_lock);
2824 netif_stop_queue(dev); 2825 netif_stop_queue(dev);
2825 2826
2826 port_napi_disable(port); 2827 port_napi_disable(port);
@@ -2840,7 +2841,7 @@ static void ehea_reset_port(struct work_struct *work)
2840 2841
2841 netif_wake_queue(dev); 2842 netif_wake_queue(dev);
2842out: 2843out:
2843 up(&port->port_lock); 2844 mutex_unlock(&port->port_lock);
2844 return; 2845 return;
2845} 2846}
2846 2847
@@ -2849,7 +2850,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
2849 int ret, i; 2850 int ret, i;
2850 struct ehea_adapter *adapter; 2851 struct ehea_adapter *adapter;
2851 2852
2852 down(&dlpar_mem_lock); 2853 mutex_lock(&dlpar_mem_lock);
2853 ehea_info("LPAR memory enlarged - re-initializing driver"); 2854 ehea_info("LPAR memory enlarged - re-initializing driver");
2854 2855
2855 list_for_each_entry(adapter, &adapter_list, list) 2856 list_for_each_entry(adapter, &adapter_list, list)
@@ -2857,22 +2858,24 @@ static void ehea_rereg_mrs(struct work_struct *work)
2857 /* Shutdown all ports */ 2858 /* Shutdown all ports */
2858 for (i = 0; i < EHEA_MAX_PORTS; i++) { 2859 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2859 struct ehea_port *port = adapter->port[i]; 2860 struct ehea_port *port = adapter->port[i];
2861 struct net_device *dev;
2860 2862
2861 if (port) { 2863 if (!port)
2862 struct net_device *dev = port->netdev; 2864 continue;
2863 2865
2864 if (dev->flags & IFF_UP) { 2866 dev = port->netdev;
2865 down(&port->port_lock); 2867
2866 netif_stop_queue(dev); 2868 if (dev->flags & IFF_UP) {
2867 ehea_flush_sq(port); 2869 mutex_lock(&port->port_lock);
2868 ret = ehea_stop_qps(dev); 2870 netif_stop_queue(dev);
2869 if (ret) { 2871 ehea_flush_sq(port);
2870 up(&port->port_lock); 2872 ret = ehea_stop_qps(dev);
2871 goto out; 2873 if (ret) {
2872 } 2874 mutex_unlock(&port->port_lock);
2873 port_napi_disable(port); 2875 goto out;
2874 up(&port->port_lock);
2875 } 2876 }
2877 port_napi_disable(port);
2878 mutex_unlock(&port->port_lock);
2876 } 2879 }
2877 } 2880 }
2878 2881
@@ -2912,17 +2915,17 @@ static void ehea_rereg_mrs(struct work_struct *work)
2912 struct net_device *dev = port->netdev; 2915 struct net_device *dev = port->netdev;
2913 2916
2914 if (dev->flags & IFF_UP) { 2917 if (dev->flags & IFF_UP) {
2915 down(&port->port_lock); 2918 mutex_lock(&port->port_lock);
2916 port_napi_enable(port); 2919 port_napi_enable(port);
2917 ret = ehea_restart_qps(dev); 2920 ret = ehea_restart_qps(dev);
2918 if (!ret) 2921 if (!ret)
2919 netif_wake_queue(dev); 2922 netif_wake_queue(dev);
2920 up(&port->port_lock); 2923 mutex_unlock(&port->port_lock);
2921 } 2924 }
2922 } 2925 }
2923 } 2926 }
2924 } 2927 }
2925 up(&dlpar_mem_lock); 2928 mutex_unlock(&dlpar_mem_lock);
2926 ehea_info("re-initializing driver complete"); 2929 ehea_info("re-initializing driver complete");
2927out: 2930out:
2928 return; 2931 return;
@@ -3083,7 +3086,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3083 3086
3084 port = netdev_priv(dev); 3087 port = netdev_priv(dev);
3085 3088
3086 sema_init(&port->port_lock, 1); 3089 mutex_init(&port->port_lock);
3087 port->state = EHEA_PORT_DOWN; 3090 port->state = EHEA_PORT_DOWN;
3088 port->sig_comp_iv = sq_entries / 10; 3091 port->sig_comp_iv = sq_entries / 10;
3089 3092
@@ -3362,7 +3365,7 @@ static int __devinit ehea_probe_adapter(struct of_device *dev,
3362 ehea_error("Invalid ibmebus device probed"); 3365 ehea_error("Invalid ibmebus device probed");
3363 return -EINVAL; 3366 return -EINVAL;
3364 } 3367 }
3365 down(&ehea_fw_handles.lock); 3368 mutex_lock(&ehea_fw_handles.lock);
3366 3369
3367 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 3370 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3368 if (!adapter) { 3371 if (!adapter) {
@@ -3446,7 +3449,7 @@ out_free_ad:
3446 3449
3447out: 3450out:
3448 ehea_update_firmware_handles(); 3451 ehea_update_firmware_handles();
3449 up(&ehea_fw_handles.lock); 3452 mutex_unlock(&ehea_fw_handles.lock);
3450 return ret; 3453 return ret;
3451} 3454}
3452 3455
@@ -3465,7 +3468,7 @@ static int __devexit ehea_remove(struct of_device *dev)
3465 3468
3466 flush_scheduled_work(); 3469 flush_scheduled_work();
3467 3470
3468 down(&ehea_fw_handles.lock); 3471 mutex_lock(&ehea_fw_handles.lock);
3469 3472
3470 ibmebus_free_irq(adapter->neq->attr.ist1, adapter); 3473 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3471 tasklet_kill(&adapter->neq_tasklet); 3474 tasklet_kill(&adapter->neq_tasklet);
@@ -3476,7 +3479,7 @@ static int __devexit ehea_remove(struct of_device *dev)
3476 kfree(adapter); 3479 kfree(adapter);
3477 3480
3478 ehea_update_firmware_handles(); 3481 ehea_update_firmware_handles();
3479 up(&ehea_fw_handles.lock); 3482 mutex_unlock(&ehea_fw_handles.lock);
3480 3483
3481 return 0; 3484 return 0;
3482} 3485}
@@ -3563,9 +3566,8 @@ int __init ehea_module_init(void)
3563 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles)); 3566 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3564 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs)); 3567 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3565 3568
3566 sema_init(&dlpar_mem_lock, 1); 3569 mutex_init(&ehea_fw_handles.lock);
3567 sema_init(&ehea_fw_handles.lock, 1); 3570 mutex_init(&ehea_bcmc_regs.lock);
3568 sema_init(&ehea_bcmc_regs.lock, 1);
3569 3571
3570 ret = check_module_parm(); 3572 ret = check_module_parm();
3571 if (ret) 3573 if (ret)
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index fe59c27c09e3..e5e6352556fa 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -198,7 +198,7 @@ static int mpc52xx_fec_init_phy(struct net_device *dev)
198 struct phy_device *phydev; 198 struct phy_device *phydev;
199 char phy_id[BUS_ID_SIZE]; 199 char phy_id[BUS_ID_SIZE];
200 200
201 snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, 201 snprintf(phy_id, BUS_ID_SIZE, "%x:%02x",
202 (unsigned int)dev->base_addr, priv->phy_addr); 202 (unsigned int)dev->base_addr, priv->phy_addr);
203 203
204 priv->link = PHY_DOWN; 204 priv->link = PHY_DOWN;
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 1d0cd1dd955e..f5634447276d 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -124,7 +124,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_i
124 goto out_free; 124 goto out_free;
125 } 125 }
126 126
127 bus->id = res.start; 127 snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
128 bus->priv = priv; 128 bus->priv = priv;
129 129
130 bus->dev = dev; 130 bus->dev = dev;
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 9f088a47d8b1..8c4214b0ee1f 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -29,90 +29,6 @@
29 * along with this program; if not, write to the Free Software 29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 * 31 *
32 * Changelog:
33 * 0.01: 05 Oct 2003: First release that compiles without warnings.
34 * 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
35 * Check all PCI BARs for the register window.
36 * udelay added to mii_rw.
37 * 0.03: 06 Oct 2003: Initialize dev->irq.
38 * 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
39 * 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
40 * 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
41 * irq mask updated
42 * 0.07: 14 Oct 2003: Further irq mask updates.
43 * 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
44 * added into irq handler, NULL check for drain_ring.
45 * 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
46 * requested interrupt sources.
47 * 0.10: 20 Oct 2003: First cleanup for release.
48 * 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
49 * MAC Address init fix, set_multicast cleanup.
50 * 0.12: 23 Oct 2003: Cleanups for release.
51 * 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
52 * Set link speed correctly. start rx before starting
53 * tx (nv_start_rx sets the link speed).
54 * 0.14: 25 Oct 2003: Nic dependant irq mask.
55 * 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
56 * open.
57 * 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
58 * increased to 1628 bytes.
59 * 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
60 * the tx length.
61 * 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
62 * 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
63 * addresses, really stop rx if already running
64 * in nv_start_rx, clean up a bit.
65 * 0.20: 07 Dec 2003: alloc fixes
66 * 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
67 * 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
68 * on close.
69 * 0.23: 26 Jan 2004: various small cleanups
70 * 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
71 * 0.25: 09 Mar 2004: wol support
72 * 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
73 * 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
74 * added CK804/MCP04 device IDs, code fixes
75 * for registers, link status and other minor fixes.
76 * 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
77 * 0.29: 31 Aug 2004: Add backup timer for link change notification.
78 * 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
79 * into nv_close, otherwise reenabling for wol can
80 * cause DMA to kfree'd memory.
81 * 0.31: 14 Nov 2004: ethtool support for getting/setting link
82 * capabilities.
83 * 0.32: 16 Apr 2005: RX_ERROR4 handling added.
84 * 0.33: 16 May 2005: Support for MCP51 added.
85 * 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
86 * 0.35: 26 Jun 2005: Support for MCP55 added.
87 * 0.36: 28 Jun 2005: Add jumbo frame support.
88 * 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
89 * 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
90 * per-packet flags.
91 * 0.39: 18 Jul 2005: Add 64bit descriptor support.
92 * 0.40: 19 Jul 2005: Add support for mac address change.
93 * 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
94 * of nv_remove
95 * 0.42: 06 Aug 2005: Fix lack of link speed initialization
96 * in the second (and later) nv_open call
97 * 0.43: 10 Aug 2005: Add support for tx checksum.
98 * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
99 * 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
100 * 0.46: 20 Oct 2005: Add irq optimization modes.
101 * 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
102 * 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
103 * 0.49: 10 Dec 2005: Fix tso for large buffers.
104 * 0.50: 20 Jan 2006: Add 8021pq tagging support.
105 * 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
106 * 0.52: 20 Jan 2006: Add MSI/MSIX support.
107 * 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
108 * 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
109 * 0.55: 22 Mar 2006: Add flow control (pause frame).
110 * 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
111 * 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
112 * 0.58: 30 Oct 2006: Added support for sideband management unit.
113 * 0.59: 30 Oct 2006: Added support for recoverable error.
114 * 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats.
115 *
116 * Known bugs: 32 * Known bugs:
117 * We suspect that on some hardware no TX done interrupts are generated. 33 * We suspect that on some hardware no TX done interrupts are generated.
118 * This means recovery from netif_stop_queue only happens if the hw timer 34 * This means recovery from netif_stop_queue only happens if the hw timer
@@ -123,11 +39,6 @@
123 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
124 * superfluous timer interrupts from the nic. 40 * superfluous timer interrupts from the nic.
125 */ 41 */
126#ifdef CONFIG_FORCEDETH_NAPI
127#define DRIVERNAPI "-NAPI"
128#else
129#define DRIVERNAPI
130#endif
131#define FORCEDETH_VERSION "0.61" 42#define FORCEDETH_VERSION "0.61"
132#define DRV_NAME "forcedeth" 43#define DRV_NAME "forcedeth"
133 44
@@ -930,6 +841,13 @@ static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
930 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2; 841 return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
931} 842}
932 843
844static bool nv_optimized(struct fe_priv *np)
845{
846 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
847 return false;
848 return true;
849}
850
933static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 851static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
934 int delay, int delaymax, const char *msg) 852 int delay, int delaymax, const char *msg)
935{ 853{
@@ -966,7 +884,7 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
966 struct fe_priv *np = get_nvpriv(dev); 884 struct fe_priv *np = get_nvpriv(dev);
967 u8 __iomem *base = get_hwbase(dev); 885 u8 __iomem *base = get_hwbase(dev);
968 886
969 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 887 if (!nv_optimized(np)) {
970 if (rxtx_flags & NV_SETUP_RX_RING) { 888 if (rxtx_flags & NV_SETUP_RX_RING) {
971 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 889 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
972 } 890 }
@@ -989,7 +907,7 @@ static void free_rings(struct net_device *dev)
989{ 907{
990 struct fe_priv *np = get_nvpriv(dev); 908 struct fe_priv *np = get_nvpriv(dev);
991 909
992 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 910 if (!nv_optimized(np)) {
993 if (np->rx_ring.orig) 911 if (np->rx_ring.orig)
994 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 912 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
995 np->rx_ring.orig, np->ring_addr); 913 np->rx_ring.orig, np->ring_addr);
@@ -1435,6 +1353,18 @@ static void nv_stop_tx(struct net_device *dev)
1435 base + NvRegTransmitPoll); 1353 base + NvRegTransmitPoll);
1436} 1354}
1437 1355
1356static void nv_start_rxtx(struct net_device *dev)
1357{
1358 nv_start_rx(dev);
1359 nv_start_tx(dev);
1360}
1361
1362static void nv_stop_rxtx(struct net_device *dev)
1363{
1364 nv_stop_rx(dev);
1365 nv_stop_tx(dev);
1366}
1367
1438static void nv_txrx_reset(struct net_device *dev) 1368static void nv_txrx_reset(struct net_device *dev)
1439{ 1369{
1440 struct fe_priv *np = netdev_priv(dev); 1370 struct fe_priv *np = netdev_priv(dev);
@@ -1657,7 +1587,7 @@ static void nv_do_rx_refill(unsigned long data)
1657 } else { 1587 } else {
1658 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector); 1588 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1659 } 1589 }
1660 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1590 if (!nv_optimized(np))
1661 retcode = nv_alloc_rx(dev); 1591 retcode = nv_alloc_rx(dev);
1662 else 1592 else
1663 retcode = nv_alloc_rx_optimized(dev); 1593 retcode = nv_alloc_rx_optimized(dev);
@@ -1682,8 +1612,10 @@ static void nv_init_rx(struct net_device *dev)
1682{ 1612{
1683 struct fe_priv *np = netdev_priv(dev); 1613 struct fe_priv *np = netdev_priv(dev);
1684 int i; 1614 int i;
1615
1685 np->get_rx = np->put_rx = np->first_rx = np->rx_ring; 1616 np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
1686 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1617
1618 if (!nv_optimized(np))
1687 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1]; 1619 np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
1688 else 1620 else
1689 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1]; 1621 np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
@@ -1691,7 +1623,7 @@ static void nv_init_rx(struct net_device *dev)
1691 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1]; 1623 np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
1692 1624
1693 for (i = 0; i < np->rx_ring_size; i++) { 1625 for (i = 0; i < np->rx_ring_size; i++) {
1694 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1626 if (!nv_optimized(np)) {
1695 np->rx_ring.orig[i].flaglen = 0; 1627 np->rx_ring.orig[i].flaglen = 0;
1696 np->rx_ring.orig[i].buf = 0; 1628 np->rx_ring.orig[i].buf = 0;
1697 } else { 1629 } else {
@@ -1709,8 +1641,10 @@ static void nv_init_tx(struct net_device *dev)
1709{ 1641{
1710 struct fe_priv *np = netdev_priv(dev); 1642 struct fe_priv *np = netdev_priv(dev);
1711 int i; 1643 int i;
1644
1712 np->get_tx = np->put_tx = np->first_tx = np->tx_ring; 1645 np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
1713 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1646
1647 if (!nv_optimized(np))
1714 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1]; 1648 np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
1715 else 1649 else
1716 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1]; 1650 np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
@@ -1721,7 +1655,7 @@ static void nv_init_tx(struct net_device *dev)
1721 np->tx_end_flip = NULL; 1655 np->tx_end_flip = NULL;
1722 1656
1723 for (i = 0; i < np->tx_ring_size; i++) { 1657 for (i = 0; i < np->tx_ring_size; i++) {
1724 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1658 if (!nv_optimized(np)) {
1725 np->tx_ring.orig[i].flaglen = 0; 1659 np->tx_ring.orig[i].flaglen = 0;
1726 np->tx_ring.orig[i].buf = 0; 1660 np->tx_ring.orig[i].buf = 0;
1727 } else { 1661 } else {
@@ -1744,7 +1678,8 @@ static int nv_init_ring(struct net_device *dev)
1744 1678
1745 nv_init_tx(dev); 1679 nv_init_tx(dev);
1746 nv_init_rx(dev); 1680 nv_init_rx(dev);
1747 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1681
1682 if (!nv_optimized(np))
1748 return nv_alloc_rx(dev); 1683 return nv_alloc_rx(dev);
1749 else 1684 else
1750 return nv_alloc_rx_optimized(dev); 1685 return nv_alloc_rx_optimized(dev);
@@ -1775,7 +1710,7 @@ static void nv_drain_tx(struct net_device *dev)
1775 unsigned int i; 1710 unsigned int i;
1776 1711
1777 for (i = 0; i < np->tx_ring_size; i++) { 1712 for (i = 0; i < np->tx_ring_size; i++) {
1778 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1713 if (!nv_optimized(np)) {
1779 np->tx_ring.orig[i].flaglen = 0; 1714 np->tx_ring.orig[i].flaglen = 0;
1780 np->tx_ring.orig[i].buf = 0; 1715 np->tx_ring.orig[i].buf = 0;
1781 } else { 1716 } else {
@@ -1802,7 +1737,7 @@ static void nv_drain_rx(struct net_device *dev)
1802 int i; 1737 int i;
1803 1738
1804 for (i = 0; i < np->rx_ring_size; i++) { 1739 for (i = 0; i < np->rx_ring_size; i++) {
1805 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1740 if (!nv_optimized(np)) {
1806 np->rx_ring.orig[i].flaglen = 0; 1741 np->rx_ring.orig[i].flaglen = 0;
1807 np->rx_ring.orig[i].buf = 0; 1742 np->rx_ring.orig[i].buf = 0;
1808 } else { 1743 } else {
@@ -1823,7 +1758,7 @@ static void nv_drain_rx(struct net_device *dev)
1823 } 1758 }
1824} 1759}
1825 1760
1826static void drain_ring(struct net_device *dev) 1761static void nv_drain_rxtx(struct net_device *dev)
1827{ 1762{
1828 nv_drain_tx(dev); 1763 nv_drain_tx(dev);
1829 nv_drain_rx(dev); 1764 nv_drain_rx(dev);
@@ -2260,7 +2195,7 @@ static void nv_tx_timeout(struct net_device *dev)
2260 } 2195 }
2261 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 2196 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
2262 for (i=0;i<np->tx_ring_size;i+= 4) { 2197 for (i=0;i<np->tx_ring_size;i+= 4) {
2263 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 2198 if (!nv_optimized(np)) {
2264 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 2199 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2265 i, 2200 i,
2266 le32_to_cpu(np->tx_ring.orig[i].buf), 2201 le32_to_cpu(np->tx_ring.orig[i].buf),
@@ -2296,7 +2231,7 @@ static void nv_tx_timeout(struct net_device *dev)
2296 nv_stop_tx(dev); 2231 nv_stop_tx(dev);
2297 2232
2298 /* 2) check that the packets were not sent already: */ 2233 /* 2) check that the packets were not sent already: */
2299 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 2234 if (!nv_optimized(np))
2300 nv_tx_done(dev); 2235 nv_tx_done(dev);
2301 else 2236 else
2302 nv_tx_done_optimized(dev, np->tx_ring_size); 2237 nv_tx_done_optimized(dev, np->tx_ring_size);
@@ -2663,12 +2598,10 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
2663 netif_tx_lock_bh(dev); 2598 netif_tx_lock_bh(dev);
2664 spin_lock(&np->lock); 2599 spin_lock(&np->lock);
2665 /* stop engines */ 2600 /* stop engines */
2666 nv_stop_rx(dev); 2601 nv_stop_rxtx(dev);
2667 nv_stop_tx(dev);
2668 nv_txrx_reset(dev); 2602 nv_txrx_reset(dev);
2669 /* drain rx queue */ 2603 /* drain rx queue */
2670 nv_drain_rx(dev); 2604 nv_drain_rxtx(dev);
2671 nv_drain_tx(dev);
2672 /* reinit driver view of the rx queue */ 2605 /* reinit driver view of the rx queue */
2673 set_bufsize(dev); 2606 set_bufsize(dev);
2674 if (nv_init_ring(dev)) { 2607 if (nv_init_ring(dev)) {
@@ -2685,8 +2618,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
2685 pci_push(base); 2618 pci_push(base);
2686 2619
2687 /* restart rx engine */ 2620 /* restart rx engine */
2688 nv_start_rx(dev); 2621 nv_start_rxtx(dev);
2689 nv_start_tx(dev);
2690 spin_unlock(&np->lock); 2622 spin_unlock(&np->lock);
2691 netif_tx_unlock_bh(dev); 2623 netif_tx_unlock_bh(dev);
2692 nv_enable_irq(dev); 2624 nv_enable_irq(dev);
@@ -3393,7 +3325,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3393 unsigned long flags; 3325 unsigned long flags;
3394 int pkts, retcode; 3326 int pkts, retcode;
3395 3327
3396 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 3328 if (!nv_optimized(np)) {
3397 pkts = nv_rx_process(dev, budget); 3329 pkts = nv_rx_process(dev, budget);
3398 retcode = nv_alloc_rx(dev); 3330 retcode = nv_alloc_rx(dev);
3399 } else { 3331 } else {
@@ -3634,7 +3566,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3634 if (intr_test) { 3566 if (intr_test) {
3635 handler = nv_nic_irq_test; 3567 handler = nv_nic_irq_test;
3636 } else { 3568 } else {
3637 if (np->desc_ver == DESC_VER_3) 3569 if (nv_optimized(np))
3638 handler = nv_nic_irq_optimized; 3570 handler = nv_nic_irq_optimized;
3639 else 3571 else
3640 handler = nv_nic_irq; 3572 handler = nv_nic_irq;
@@ -3787,12 +3719,10 @@ static void nv_do_nic_poll(unsigned long data)
3787 netif_tx_lock_bh(dev); 3719 netif_tx_lock_bh(dev);
3788 spin_lock(&np->lock); 3720 spin_lock(&np->lock);
3789 /* stop engines */ 3721 /* stop engines */
3790 nv_stop_rx(dev); 3722 nv_stop_rxtx(dev);
3791 nv_stop_tx(dev);
3792 nv_txrx_reset(dev); 3723 nv_txrx_reset(dev);
3793 /* drain rx queue */ 3724 /* drain rx queue */
3794 nv_drain_rx(dev); 3725 nv_drain_rxtx(dev);
3795 nv_drain_tx(dev);
3796 /* reinit driver view of the rx queue */ 3726 /* reinit driver view of the rx queue */
3797 set_bufsize(dev); 3727 set_bufsize(dev);
3798 if (nv_init_ring(dev)) { 3728 if (nv_init_ring(dev)) {
@@ -3809,8 +3739,7 @@ static void nv_do_nic_poll(unsigned long data)
3809 pci_push(base); 3739 pci_push(base);
3810 3740
3811 /* restart rx engine */ 3741 /* restart rx engine */
3812 nv_start_rx(dev); 3742 nv_start_rxtx(dev);
3813 nv_start_tx(dev);
3814 spin_unlock(&np->lock); 3743 spin_unlock(&np->lock);
3815 netif_tx_unlock_bh(dev); 3744 netif_tx_unlock_bh(dev);
3816 } 3745 }
@@ -3821,7 +3750,7 @@ static void nv_do_nic_poll(unsigned long data)
3821 pci_push(base); 3750 pci_push(base);
3822 3751
3823 if (!using_multi_irqs(dev)) { 3752 if (!using_multi_irqs(dev)) {
3824 if (np->desc_ver == DESC_VER_3) 3753 if (nv_optimized(np))
3825 nv_nic_irq_optimized(0, dev); 3754 nv_nic_irq_optimized(0, dev);
3826 else 3755 else
3827 nv_nic_irq(0, dev); 3756 nv_nic_irq(0, dev);
@@ -3860,7 +3789,8 @@ static void nv_do_stats_poll(unsigned long data)
3860 nv_get_hw_stats(dev); 3789 nv_get_hw_stats(dev);
3861 3790
3862 if (!np->in_shutdown) 3791 if (!np->in_shutdown)
3863 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 3792 mod_timer(&np->stats_poll,
3793 round_jiffies(jiffies + STATS_INTERVAL));
3864} 3794}
3865 3795
3866static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 3796static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
@@ -4018,8 +3948,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4018 netif_tx_lock_bh(dev); 3948 netif_tx_lock_bh(dev);
4019 spin_lock(&np->lock); 3949 spin_lock(&np->lock);
4020 /* stop engines */ 3950 /* stop engines */
4021 nv_stop_rx(dev); 3951 nv_stop_rxtx(dev);
4022 nv_stop_tx(dev);
4023 spin_unlock(&np->lock); 3952 spin_unlock(&np->lock);
4024 netif_tx_unlock_bh(dev); 3953 netif_tx_unlock_bh(dev);
4025 } 3954 }
@@ -4125,8 +4054,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4125 } 4054 }
4126 4055
4127 if (netif_running(dev)) { 4056 if (netif_running(dev)) {
4128 nv_start_rx(dev); 4057 nv_start_rxtx(dev);
4129 nv_start_tx(dev);
4130 nv_enable_irq(dev); 4058 nv_enable_irq(dev);
4131 } 4059 }
4132 4060
@@ -4169,8 +4097,7 @@ static int nv_nway_reset(struct net_device *dev)
4169 netif_tx_lock_bh(dev); 4097 netif_tx_lock_bh(dev);
4170 spin_lock(&np->lock); 4098 spin_lock(&np->lock);
4171 /* stop engines */ 4099 /* stop engines */
4172 nv_stop_rx(dev); 4100 nv_stop_rxtx(dev);
4173 nv_stop_tx(dev);
4174 spin_unlock(&np->lock); 4101 spin_unlock(&np->lock);
4175 netif_tx_unlock_bh(dev); 4102 netif_tx_unlock_bh(dev);
4176 printk(KERN_INFO "%s: link down.\n", dev->name); 4103 printk(KERN_INFO "%s: link down.\n", dev->name);
@@ -4190,8 +4117,7 @@ static int nv_nway_reset(struct net_device *dev)
4190 } 4117 }
4191 4118
4192 if (netif_running(dev)) { 4119 if (netif_running(dev)) {
4193 nv_start_rx(dev); 4120 nv_start_rxtx(dev);
4194 nv_start_tx(dev);
4195 nv_enable_irq(dev); 4121 nv_enable_irq(dev);
4196 } 4122 }
4197 ret = 0; 4123 ret = 0;
@@ -4248,7 +4174,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4248 } 4174 }
4249 4175
4250 /* allocate new rings */ 4176 /* allocate new rings */
4251 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4177 if (!nv_optimized(np)) {
4252 rxtx_ring = pci_alloc_consistent(np->pci_dev, 4178 rxtx_ring = pci_alloc_consistent(np->pci_dev,
4253 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4179 sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4254 &ring_addr); 4180 &ring_addr);
@@ -4261,7 +4187,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4261 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL); 4187 tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
4262 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) { 4188 if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
4263 /* fall back to old rings */ 4189 /* fall back to old rings */
4264 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4190 if (!nv_optimized(np)) {
4265 if (rxtx_ring) 4191 if (rxtx_ring)
4266 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending), 4192 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
4267 rxtx_ring, ring_addr); 4193 rxtx_ring, ring_addr);
@@ -4282,12 +4208,10 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4282 netif_tx_lock_bh(dev); 4208 netif_tx_lock_bh(dev);
4283 spin_lock(&np->lock); 4209 spin_lock(&np->lock);
4284 /* stop engines */ 4210 /* stop engines */
4285 nv_stop_rx(dev); 4211 nv_stop_rxtx(dev);
4286 nv_stop_tx(dev);
4287 nv_txrx_reset(dev); 4212 nv_txrx_reset(dev);
4288 /* drain queues */ 4213 /* drain queues */
4289 nv_drain_rx(dev); 4214 nv_drain_rxtx(dev);
4290 nv_drain_tx(dev);
4291 /* delete queues */ 4215 /* delete queues */
4292 free_rings(dev); 4216 free_rings(dev);
4293 } 4217 }
@@ -4295,7 +4219,8 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4295 /* set new values */ 4219 /* set new values */
4296 np->rx_ring_size = ring->rx_pending; 4220 np->rx_ring_size = ring->rx_pending;
4297 np->tx_ring_size = ring->tx_pending; 4221 np->tx_ring_size = ring->tx_pending;
4298 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4222
4223 if (!nv_optimized(np)) {
4299 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4224 np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
4300 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4225 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4301 } else { 4226 } else {
@@ -4327,8 +4252,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4327 pci_push(base); 4252 pci_push(base);
4328 4253
4329 /* restart engines */ 4254 /* restart engines */
4330 nv_start_rx(dev); 4255 nv_start_rxtx(dev);
4331 nv_start_tx(dev);
4332 spin_unlock(&np->lock); 4256 spin_unlock(&np->lock);
4333 netif_tx_unlock_bh(dev); 4257 netif_tx_unlock_bh(dev);
4334 nv_enable_irq(dev); 4258 nv_enable_irq(dev);
@@ -4369,8 +4293,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
4369 netif_tx_lock_bh(dev); 4293 netif_tx_lock_bh(dev);
4370 spin_lock(&np->lock); 4294 spin_lock(&np->lock);
4371 /* stop engines */ 4295 /* stop engines */
4372 nv_stop_rx(dev); 4296 nv_stop_rxtx(dev);
4373 nv_stop_tx(dev);
4374 spin_unlock(&np->lock); 4297 spin_unlock(&np->lock);
4375 netif_tx_unlock_bh(dev); 4298 netif_tx_unlock_bh(dev);
4376 } 4299 }
@@ -4411,8 +4334,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
4411 } 4334 }
4412 4335
4413 if (netif_running(dev)) { 4336 if (netif_running(dev)) {
4414 nv_start_rx(dev); 4337 nv_start_rxtx(dev);
4415 nv_start_tx(dev);
4416 nv_enable_irq(dev); 4338 nv_enable_irq(dev);
4417 } 4339 }
4418 return 0; 4340 return 0;
@@ -4648,8 +4570,7 @@ static int nv_loopback_test(struct net_device *dev)
4648 pci_push(base); 4570 pci_push(base);
4649 4571
4650 /* restart rx engine */ 4572 /* restart rx engine */
4651 nv_start_rx(dev); 4573 nv_start_rxtx(dev);
4652 nv_start_tx(dev);
4653 4574
4654 /* setup packet for tx */ 4575 /* setup packet for tx */
4655 pkt_len = ETH_DATA_LEN; 4576 pkt_len = ETH_DATA_LEN;
@@ -4667,7 +4588,7 @@ static int nv_loopback_test(struct net_device *dev)
4667 for (i = 0; i < pkt_len; i++) 4588 for (i = 0; i < pkt_len; i++)
4668 pkt_data[i] = (u8)(i & 0xff); 4589 pkt_data[i] = (u8)(i & 0xff);
4669 4590
4670 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4591 if (!nv_optimized(np)) {
4671 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr); 4592 np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
4672 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra); 4593 np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
4673 } else { 4594 } else {
@@ -4681,7 +4602,7 @@ static int nv_loopback_test(struct net_device *dev)
4681 msleep(500); 4602 msleep(500);
4682 4603
4683 /* check for rx of the packet */ 4604 /* check for rx of the packet */
4684 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 4605 if (!nv_optimized(np)) {
4685 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen); 4606 flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
4686 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver); 4607 len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
4687 4608
@@ -4727,12 +4648,10 @@ static int nv_loopback_test(struct net_device *dev)
4727 dev_kfree_skb_any(tx_skb); 4648 dev_kfree_skb_any(tx_skb);
4728 out: 4649 out:
4729 /* stop engines */ 4650 /* stop engines */
4730 nv_stop_rx(dev); 4651 nv_stop_rxtx(dev);
4731 nv_stop_tx(dev);
4732 nv_txrx_reset(dev); 4652 nv_txrx_reset(dev);
4733 /* drain rx queue */ 4653 /* drain rx queue */
4734 nv_drain_rx(dev); 4654 nv_drain_rxtx(dev);
4735 nv_drain_tx(dev);
4736 4655
4737 if (netif_running(dev)) { 4656 if (netif_running(dev)) {
4738 writel(misc1_flags, base + NvRegMisc1); 4657 writel(misc1_flags, base + NvRegMisc1);
@@ -4770,12 +4689,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
4770 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4689 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4771 } 4690 }
4772 /* stop engines */ 4691 /* stop engines */
4773 nv_stop_rx(dev); 4692 nv_stop_rxtx(dev);
4774 nv_stop_tx(dev);
4775 nv_txrx_reset(dev); 4693 nv_txrx_reset(dev);
4776 /* drain rx queue */ 4694 /* drain rx queue */
4777 nv_drain_rx(dev); 4695 nv_drain_rxtx(dev);
4778 nv_drain_tx(dev);
4779 spin_unlock_irq(&np->lock); 4696 spin_unlock_irq(&np->lock);
4780 netif_tx_unlock_bh(dev); 4697 netif_tx_unlock_bh(dev);
4781 } 4698 }
@@ -4816,8 +4733,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
4816 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4733 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
4817 pci_push(base); 4734 pci_push(base);
4818 /* restart rx engine */ 4735 /* restart rx engine */
4819 nv_start_rx(dev); 4736 nv_start_rxtx(dev);
4820 nv_start_tx(dev);
4821 netif_start_queue(dev); 4737 netif_start_queue(dev);
4822#ifdef CONFIG_FORCEDETH_NAPI 4738#ifdef CONFIG_FORCEDETH_NAPI
4823 napi_enable(&np->napi); 4739 napi_enable(&np->napi);
@@ -5046,8 +4962,7 @@ static int nv_open(struct net_device *dev)
5046 * to init hw */ 4962 * to init hw */
5047 np->linkspeed = 0; 4963 np->linkspeed = 0;
5048 ret = nv_update_linkspeed(dev); 4964 ret = nv_update_linkspeed(dev);
5049 nv_start_rx(dev); 4965 nv_start_rxtx(dev);
5050 nv_start_tx(dev);
5051 netif_start_queue(dev); 4966 netif_start_queue(dev);
5052#ifdef CONFIG_FORCEDETH_NAPI 4967#ifdef CONFIG_FORCEDETH_NAPI
5053 napi_enable(&np->napi); 4968 napi_enable(&np->napi);
@@ -5064,13 +4979,14 @@ static int nv_open(struct net_device *dev)
5064 4979
5065 /* start statistics timer */ 4980 /* start statistics timer */
5066 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2)) 4981 if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2))
5067 mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL); 4982 mod_timer(&np->stats_poll,
4983 round_jiffies(jiffies + STATS_INTERVAL));
5068 4984
5069 spin_unlock_irq(&np->lock); 4985 spin_unlock_irq(&np->lock);
5070 4986
5071 return 0; 4987 return 0;
5072out_drain: 4988out_drain:
5073 drain_ring(dev); 4989 nv_drain_rxtx(dev);
5074 return ret; 4990 return ret;
5075} 4991}
5076 4992
@@ -5093,8 +5009,7 @@ static int nv_close(struct net_device *dev)
5093 5009
5094 netif_stop_queue(dev); 5010 netif_stop_queue(dev);
5095 spin_lock_irq(&np->lock); 5011 spin_lock_irq(&np->lock);
5096 nv_stop_tx(dev); 5012 nv_stop_rxtx(dev);
5097 nv_stop_rx(dev);
5098 nv_txrx_reset(dev); 5013 nv_txrx_reset(dev);
5099 5014
5100 /* disable interrupts on the nic or we will lock up */ 5015 /* disable interrupts on the nic or we will lock up */
@@ -5107,7 +5022,7 @@ static int nv_close(struct net_device *dev)
5107 5022
5108 nv_free_irq(dev); 5023 nv_free_irq(dev);
5109 5024
5110 drain_ring(dev); 5025 nv_drain_rxtx(dev);
5111 5026
5112 if (np->wolenabled) { 5027 if (np->wolenabled) {
5113 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags); 5028 writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
@@ -5267,7 +5182,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5267 np->rx_ring_size = RX_RING_DEFAULT; 5182 np->rx_ring_size = RX_RING_DEFAULT;
5268 np->tx_ring_size = TX_RING_DEFAULT; 5183 np->tx_ring_size = TX_RING_DEFAULT;
5269 5184
5270 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 5185 if (!nv_optimized(np)) {
5271 np->rx_ring.orig = pci_alloc_consistent(pci_dev, 5186 np->rx_ring.orig = pci_alloc_consistent(pci_dev,
5272 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size), 5187 sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
5273 &np->ring_addr); 5188 &np->ring_addr);
@@ -5289,7 +5204,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5289 5204
5290 dev->open = nv_open; 5205 dev->open = nv_open;
5291 dev->stop = nv_close; 5206 dev->stop = nv_close;
5292 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 5207
5208 if (!nv_optimized(np))
5293 dev->hard_start_xmit = nv_start_xmit; 5209 dev->hard_start_xmit = nv_start_xmit;
5294 else 5210 else
5295 dev->hard_start_xmit = nv_start_xmit_optimized; 5211 dev->hard_start_xmit = nv_start_xmit_optimized;
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 940e2041ba38..67b4b0728fce 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1178,7 +1178,7 @@ static int __devinit find_phy(struct device_node *np,
1178 1178
1179 data = of_get_property(np, "fixed-link", NULL); 1179 data = of_get_property(np, "fixed-link", NULL);
1180 if (data) { 1180 if (data) {
1181 snprintf(fpi->bus_id, 16, PHY_ID_FMT, 0, *data); 1181 snprintf(fpi->bus_id, 16, "%x:%02x", 0, *data);
1182 return 0; 1182 return 0;
1183 } 1183 }
1184 1184
@@ -1202,7 +1202,7 @@ static int __devinit find_phy(struct device_node *np,
1202 if (!data || len != 4) 1202 if (!data || len != 4)
1203 goto out_put_mdio; 1203 goto out_put_mdio;
1204 1204
1205 snprintf(fpi->bus_id, 16, PHY_ID_FMT, res.start, *data); 1205 snprintf(fpi->bus_id, 16, "%x:%02x", res.start, *data);
1206 1206
1207out_put_mdio: 1207out_put_mdio:
1208 of_node_put(mdionode); 1208 of_node_put(mdionode);
diff --git a/drivers/net/fs_enet/mii-bitbang.c b/drivers/net/fs_enet/mii-bitbang.c
index b8e4a736a130..1620030cd33c 100644
--- a/drivers/net/fs_enet/mii-bitbang.c
+++ b/drivers/net/fs_enet/mii-bitbang.c
@@ -130,7 +130,7 @@ static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
130 * we get is an int, and the odds of multiple bitbang mdio buses 130 * we get is an int, and the odds of multiple bitbang mdio buses
131 * is low enough that it's not worth going too crazy. 131 * is low enough that it's not worth going too crazy.
132 */ 132 */
133 bus->id = res.start; 133 snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
134 134
135 data = of_get_property(np, "fsl,mdio-pin", &len); 135 data = of_get_property(np, "fsl,mdio-pin", &len);
136 if (!data || len != 4) 136 if (!data || len != 4)
@@ -307,7 +307,7 @@ static int __devinit fs_enet_mdio_probe(struct device *dev)
307 return -ENOMEM; 307 return -ENOMEM;
308 308
309 new_bus->name = "BB MII Bus", 309 new_bus->name = "BB MII Bus",
310 new_bus->id = pdev->id; 310 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
311 311
312 new_bus->phy_mask = ~0x9; 312 new_bus->phy_mask = ~0x9;
313 pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data; 313 pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data;
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index a89cf15090b8..ba75efc9f5b5 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -196,7 +196,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
196 if (ret) 196 if (ret)
197 return ret; 197 return ret;
198 198
199 new_bus->id = res.start; 199 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
200 200
201 fec->fecp = ioremap(res.start, res.end - res.start + 1); 201 fec->fecp = ioremap(res.start, res.end - res.start + 1);
202 if (!fec->fecp) 202 if (!fec->fecp)
@@ -309,7 +309,7 @@ static int __devinit fs_enet_fec_mdio_probe(struct device *dev)
309 new_bus->read = &fs_enet_fec_mii_read, 309 new_bus->read = &fs_enet_fec_mii_read,
310 new_bus->write = &fs_enet_fec_mii_write, 310 new_bus->write = &fs_enet_fec_mii_write,
311 new_bus->reset = &fs_enet_fec_mii_reset, 311 new_bus->reset = &fs_enet_fec_mii_reset,
312 new_bus->id = pdev->id; 312 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
313 313
314 pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data; 314 pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data;
315 315
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 718cf77e345a..c8c3df737d73 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -1185,7 +1185,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1185 int frame_size = new_mtu + ETH_HLEN; 1185 int frame_size = new_mtu + ETH_HLEN;
1186 1186
1187 if (priv->vlan_enable) 1187 if (priv->vlan_enable)
1188 frame_size += VLAN_ETH_HLEN; 1188 frame_size += VLAN_HLEN;
1189 1189
1190 if (gfar_uses_fcb(priv)) 1190 if (gfar_uses_fcb(priv))
1191 frame_size += GMAC_FCB_LEN; 1191 frame_size += GMAC_FCB_LEN;
@@ -1250,17 +1250,12 @@ static void gfar_timeout(struct net_device *dev)
1250} 1250}
1251 1251
1252/* Interrupt Handler for Transmit complete */ 1252/* Interrupt Handler for Transmit complete */
1253static irqreturn_t gfar_transmit(int irq, void *dev_id) 1253int gfar_clean_tx_ring(struct net_device *dev)
1254{ 1254{
1255 struct net_device *dev = (struct net_device *) dev_id;
1256 struct gfar_private *priv = netdev_priv(dev);
1257 struct txbd8 *bdp; 1255 struct txbd8 *bdp;
1256 struct gfar_private *priv = netdev_priv(dev);
1257 int howmany = 0;
1258 1258
1259 /* Clear IEVENT */
1260 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1261
1262 /* Lock priv */
1263 spin_lock(&priv->txlock);
1264 bdp = priv->dirty_tx; 1259 bdp = priv->dirty_tx;
1265 while ((bdp->status & TXBD_READY) == 0) { 1260 while ((bdp->status & TXBD_READY) == 0) {
1266 /* If dirty_tx and cur_tx are the same, then either the */ 1261 /* If dirty_tx and cur_tx are the same, then either the */
@@ -1269,7 +1264,7 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
1269 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0)) 1264 if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
1270 break; 1265 break;
1271 1266
1272 dev->stats.tx_packets++; 1267 howmany++;
1273 1268
1274 /* Deferred means some collisions occurred during transmit, */ 1269 /* Deferred means some collisions occurred during transmit, */
1275 /* but we eventually sent the packet. */ 1270 /* but we eventually sent the packet. */
@@ -1278,11 +1273,15 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
1278 1273
1279 /* Free the sk buffer associated with this TxBD */ 1274 /* Free the sk buffer associated with this TxBD */
1280 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]); 1275 dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
1276
1281 priv->tx_skbuff[priv->skb_dirtytx] = NULL; 1277 priv->tx_skbuff[priv->skb_dirtytx] = NULL;
1282 priv->skb_dirtytx = 1278 priv->skb_dirtytx =
1283 (priv->skb_dirtytx + 1279 (priv->skb_dirtytx +
1284 1) & TX_RING_MOD_MASK(priv->tx_ring_size); 1280 1) & TX_RING_MOD_MASK(priv->tx_ring_size);
1285 1281
1282 /* Clean BD length for empty detection */
1283 bdp->length = 0;
1284
1286 /* update bdp to point at next bd in the ring (wrapping if necessary) */ 1285 /* update bdp to point at next bd in the ring (wrapping if necessary) */
1287 if (bdp->status & TXBD_WRAP) 1286 if (bdp->status & TXBD_WRAP)
1288 bdp = priv->tx_bd_base; 1287 bdp = priv->tx_bd_base;
@@ -1297,13 +1296,32 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
1297 netif_wake_queue(dev); 1296 netif_wake_queue(dev);
1298 } /* while ((bdp->status & TXBD_READY) == 0) */ 1297 } /* while ((bdp->status & TXBD_READY) == 0) */
1299 1298
1299 dev->stats.tx_packets += howmany;
1300
1301 return howmany;
1302}
1303
1304/* Interrupt Handler for Transmit complete */
1305static irqreturn_t gfar_transmit(int irq, void *dev_id)
1306{
1307 struct net_device *dev = (struct net_device *) dev_id;
1308 struct gfar_private *priv = netdev_priv(dev);
1309
1310 /* Clear IEVENT */
1311 gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
1312
1313 /* Lock priv */
1314 spin_lock(&priv->txlock);
1315
1316 gfar_clean_tx_ring(dev);
1317
1300 /* If we are coalescing the interrupts, reset the timer */ 1318 /* If we are coalescing the interrupts, reset the timer */
1301 /* Otherwise, clear it */ 1319 /* Otherwise, clear it */
1302 if (priv->txcoalescing) 1320 if (likely(priv->txcoalescing)) {
1321 gfar_write(&priv->regs->txic, 0);
1303 gfar_write(&priv->regs->txic, 1322 gfar_write(&priv->regs->txic,
1304 mk_ic_value(priv->txcount, priv->txtime)); 1323 mk_ic_value(priv->txcount, priv->txtime));
1305 else 1324 }
1306 gfar_write(&priv->regs->txic, 0);
1307 1325
1308 spin_unlock(&priv->txlock); 1326 spin_unlock(&priv->txlock);
1309 1327
@@ -1392,15 +1410,15 @@ irqreturn_t gfar_receive(int irq, void *dev_id)
1392 unsigned long flags; 1410 unsigned long flags;
1393#endif 1411#endif
1394 1412
1395 /* Clear IEVENT, so rx interrupt isn't called again
1396 * because of this interrupt */
1397 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1398
1399 /* support NAPI */ 1413 /* support NAPI */
1400#ifdef CONFIG_GFAR_NAPI 1414#ifdef CONFIG_GFAR_NAPI
1415 /* Clear IEVENT, so interrupts aren't called again
1416 * because of the packets that have already arrived */
1417 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
1418
1401 if (netif_rx_schedule_prep(dev, &priv->napi)) { 1419 if (netif_rx_schedule_prep(dev, &priv->napi)) {
1402 tempval = gfar_read(&priv->regs->imask); 1420 tempval = gfar_read(&priv->regs->imask);
1403 tempval &= IMASK_RX_DISABLED; 1421 tempval &= IMASK_RTX_DISABLED;
1404 gfar_write(&priv->regs->imask, tempval); 1422 gfar_write(&priv->regs->imask, tempval);
1405 1423
1406 __netif_rx_schedule(dev, &priv->napi); 1424 __netif_rx_schedule(dev, &priv->napi);
@@ -1411,17 +1429,20 @@ irqreturn_t gfar_receive(int irq, void *dev_id)
1411 gfar_read(&priv->regs->imask)); 1429 gfar_read(&priv->regs->imask));
1412 } 1430 }
1413#else 1431#else
1432 /* Clear IEVENT, so rx interrupt isn't called again
1433 * because of this interrupt */
1434 gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
1414 1435
1415 spin_lock_irqsave(&priv->rxlock, flags); 1436 spin_lock_irqsave(&priv->rxlock, flags);
1416 gfar_clean_rx_ring(dev, priv->rx_ring_size); 1437 gfar_clean_rx_ring(dev, priv->rx_ring_size);
1417 1438
1418 /* If we are coalescing interrupts, update the timer */ 1439 /* If we are coalescing interrupts, update the timer */
1419 /* Otherwise, clear it */ 1440 /* Otherwise, clear it */
1420 if (priv->rxcoalescing) 1441 if (likely(priv->rxcoalescing)) {
1442 gfar_write(&priv->regs->rxic, 0);
1421 gfar_write(&priv->regs->rxic, 1443 gfar_write(&priv->regs->rxic,
1422 mk_ic_value(priv->rxcount, priv->rxtime)); 1444 mk_ic_value(priv->rxcount, priv->rxtime));
1423 else 1445 }
1424 gfar_write(&priv->regs->rxic, 0);
1425 1446
1426 spin_unlock_irqrestore(&priv->rxlock, flags); 1447 spin_unlock_irqrestore(&priv->rxlock, flags);
1427#endif 1448#endif
@@ -1526,9 +1547,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1526 rmb(); 1547 rmb();
1527 skb = priv->rx_skbuff[priv->skb_currx]; 1548 skb = priv->rx_skbuff[priv->skb_currx];
1528 1549
1529 if (!(bdp->status & 1550 if ((bdp->status & RXBD_LAST) && !(bdp->status & RXBD_ERR)) {
1530 (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET
1531 | RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) {
1532 /* Increment the number of packets */ 1551 /* Increment the number of packets */
1533 dev->stats.rx_packets++; 1552 dev->stats.rx_packets++;
1534 howmany++; 1553 howmany++;
@@ -1582,6 +1601,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1582 struct gfar_private *priv = container_of(napi, struct gfar_private, napi); 1601 struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
1583 struct net_device *dev = priv->dev; 1602 struct net_device *dev = priv->dev;
1584 int howmany; 1603 int howmany;
1604 unsigned long flags;
1605
1606 /* If we fail to get the lock, don't bother with the TX BDs */
1607 if (spin_trylock_irqsave(&priv->txlock, flags)) {
1608 gfar_clean_tx_ring(dev);
1609 spin_unlock_irqrestore(&priv->txlock, flags);
1610 }
1585 1611
1586 howmany = gfar_clean_rx_ring(dev, budget); 1612 howmany = gfar_clean_rx_ring(dev, budget);
1587 1613
@@ -1595,11 +1621,11 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1595 1621
1596 /* If we are coalescing interrupts, update the timer */ 1622 /* If we are coalescing interrupts, update the timer */
1597 /* Otherwise, clear it */ 1623 /* Otherwise, clear it */
1598 if (priv->rxcoalescing) 1624 if (likely(priv->rxcoalescing)) {
1625 gfar_write(&priv->regs->rxic, 0);
1599 gfar_write(&priv->regs->rxic, 1626 gfar_write(&priv->regs->rxic,
1600 mk_ic_value(priv->rxcount, priv->rxtime)); 1627 mk_ic_value(priv->rxcount, priv->rxtime));
1601 else 1628 }
1602 gfar_write(&priv->regs->rxic, 0);
1603 } 1629 }
1604 1630
1605 return howmany; 1631 return howmany;
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 46cd7735e6fe..0d0883609469 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -102,7 +102,7 @@ extern const char gfar_driver_version[];
102#define DEFAULT_FIFO_TX_STARVE 0x40 102#define DEFAULT_FIFO_TX_STARVE 0x40
103#define DEFAULT_FIFO_TX_STARVE_OFF 0x80 103#define DEFAULT_FIFO_TX_STARVE_OFF 0x80
104#define DEFAULT_BD_STASH 1 104#define DEFAULT_BD_STASH 1
105#define DEFAULT_STASH_LENGTH 64 105#define DEFAULT_STASH_LENGTH 96
106#define DEFAULT_STASH_INDEX 0 106#define DEFAULT_STASH_INDEX 0
107 107
108/* The number of Exact Match registers */ 108/* The number of Exact Match registers */
@@ -124,11 +124,18 @@ extern const char gfar_driver_version[];
124 124
125#define DEFAULT_TX_COALESCE 1 125#define DEFAULT_TX_COALESCE 1
126#define DEFAULT_TXCOUNT 16 126#define DEFAULT_TXCOUNT 16
127#define DEFAULT_TXTIME 4 127#define DEFAULT_TXTIME 21
128 128
129#define DEFAULT_RXTIME 21
130
131/* Non NAPI Case */
132#ifndef CONFIG_GFAR_NAPI
129#define DEFAULT_RX_COALESCE 1 133#define DEFAULT_RX_COALESCE 1
130#define DEFAULT_RXCOUNT 16 134#define DEFAULT_RXCOUNT 16
131#define DEFAULT_RXTIME 4 135#else
136#define DEFAULT_RX_COALESCE 0
137#define DEFAULT_RXCOUNT 0
138#endif /* CONFIG_GFAR_NAPI */
132 139
133#define TBIPA_VALUE 0x1f 140#define TBIPA_VALUE 0x1f
134#define MIIMCFG_INIT_VALUE 0x00000007 141#define MIIMCFG_INIT_VALUE 0x00000007
@@ -242,6 +249,7 @@ extern const char gfar_driver_version[];
242#define IEVENT_PERR 0x00000001 249#define IEVENT_PERR 0x00000001
243#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0) 250#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0)
244#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF) 251#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF)
252#define IEVENT_RTX_MASK (IEVENT_RX_MASK | IEVENT_TX_MASK)
245#define IEVENT_ERR_MASK \ 253#define IEVENT_ERR_MASK \
246(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \ 254(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \
247 IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \ 255 IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \
@@ -269,11 +277,12 @@ extern const char gfar_driver_version[];
269#define IMASK_FIQ 0x00000004 277#define IMASK_FIQ 0x00000004
270#define IMASK_DPE 0x00000002 278#define IMASK_DPE 0x00000002
271#define IMASK_PERR 0x00000001 279#define IMASK_PERR 0x00000001
272#define IMASK_RX_DISABLED ~(IMASK_RXFEN0 | IMASK_BSY)
273#define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \ 280#define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \
274 IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \ 281 IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
275 IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \ 282 IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
276 | IMASK_PERR) 283 | IMASK_PERR)
284#define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \
285 & IMASK_DEFAULT)
277 286
278/* Fifo management */ 287/* Fifo management */
279#define FIFO_TX_THR_MASK 0x01ff 288#define FIFO_TX_THR_MASK 0x01ff
@@ -340,6 +349,9 @@ extern const char gfar_driver_version[];
340#define RXBD_OVERRUN 0x0002 349#define RXBD_OVERRUN 0x0002
341#define RXBD_TRUNCATED 0x0001 350#define RXBD_TRUNCATED 0x0001
342#define RXBD_STATS 0x01ff 351#define RXBD_STATS 0x01ff
352#define RXBD_ERR (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET \
353 | RXBD_CRCERR | RXBD_OVERRUN \
354 | RXBD_TRUNCATED)
343 355
344/* Rx FCB status field bits */ 356/* Rx FCB status field bits */
345#define RXFCB_VLN 0x8000 357#define RXFCB_VLN 0x8000
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index 24327629bf03..b8898927236a 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -173,7 +173,7 @@ int gfar_mdio_probe(struct device *dev)
173 new_bus->read = &gfar_mdio_read, 173 new_bus->read = &gfar_mdio_read,
174 new_bus->write = &gfar_mdio_write, 174 new_bus->write = &gfar_mdio_write,
175 new_bus->reset = &gfar_mdio_reset, 175 new_bus->reset = &gfar_mdio_reset,
176 new_bus->id = pdev->id; 176 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
177 177
178 pdata = (struct gianfar_mdio_data *)pdev->dev.platform_data; 178 pdata = (struct gianfar_mdio_data *)pdev->dev.platform_data;
179 179
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index 5ddf8b0c34f9..5f4b4c6c9f76 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -172,7 +172,7 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
172 struct ethhdr *eth; 172 struct ethhdr *eth;
173 struct bpqdev *bpq; 173 struct bpqdev *bpq;
174 174
175 if (dev->nd_net != &init_net) 175 if (dev_net(dev) != &init_net)
176 goto drop; 176 goto drop;
177 177
178 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 178 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
@@ -553,7 +553,7 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi
553{ 553{
554 struct net_device *dev = (struct net_device *)ptr; 554 struct net_device *dev = (struct net_device *)ptr;
555 555
556 if (dev->nd_net != &init_net) 556 if (dev_net(dev) != &init_net)
557 return NOTIFY_DONE; 557 return NOTIFY_DONE;
558 558
559 if (!dev_is_ethdev(dev)) 559 if (!dev_is_ethdev(dev))
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 57772bebff56..bb31e09899fc 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1259,26 +1259,7 @@ static void ibmveth_proc_unregister_driver(void)
1259 remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net); 1259 remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net);
1260} 1260}
1261 1261
1262static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos) 1262static int ibmveth_show(struct seq_file *seq, void *v)
1263{
1264 if (*pos == 0) {
1265 return (void *)1;
1266 } else {
1267 return NULL;
1268 }
1269}
1270
1271static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1272{
1273 ++*pos;
1274 return NULL;
1275}
1276
1277static void ibmveth_seq_stop(struct seq_file *seq, void *v)
1278{
1279}
1280
1281static int ibmveth_seq_show(struct seq_file *seq, void *v)
1282{ 1263{
1283 struct ibmveth_adapter *adapter = seq->private; 1264 struct ibmveth_adapter *adapter = seq->private;
1284 char *current_mac = ((char*) &adapter->netdev->dev_addr); 1265 char *current_mac = ((char*) &adapter->netdev->dev_addr);
@@ -1302,27 +1283,10 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v)
1302 1283
1303 return 0; 1284 return 0;
1304} 1285}
1305static struct seq_operations ibmveth_seq_ops = {
1306 .start = ibmveth_seq_start,
1307 .next = ibmveth_seq_next,
1308 .stop = ibmveth_seq_stop,
1309 .show = ibmveth_seq_show,
1310};
1311 1286
1312static int ibmveth_proc_open(struct inode *inode, struct file *file) 1287static int ibmveth_proc_open(struct inode *inode, struct file *file)
1313{ 1288{
1314 struct seq_file *seq; 1289 return single_open(file, ibmveth_show, PDE(inode)->data);
1315 struct proc_dir_entry *proc;
1316 int rc;
1317
1318 rc = seq_open(file, &ibmveth_seq_ops);
1319 if (!rc) {
1320 /* recover the pointer buried in proc_dir_entry data */
1321 seq = file->private_data;
1322 proc = PDE(inode);
1323 seq->private = proc->data;
1324 }
1325 return rc;
1326} 1290}
1327 1291
1328static const struct file_operations ibmveth_proc_fops = { 1292static const struct file_operations ibmveth_proc_fops = {
@@ -1330,7 +1294,7 @@ static const struct file_operations ibmveth_proc_fops = {
1330 .open = ibmveth_proc_open, 1294 .open = ibmveth_proc_open,
1331 .read = seq_read, 1295 .read = seq_read,
1332 .llseek = seq_lseek, 1296 .llseek = seq_lseek,
1333 .release = seq_release, 1297 .release = single_release,
1334}; 1298};
1335 1299
1336static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) 1300static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 3d2e7217e9af..16f9c756aa46 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -117,8 +117,8 @@ struct ixgb_buffer {
117 struct sk_buff *skb; 117 struct sk_buff *skb;
118 dma_addr_t dma; 118 dma_addr_t dma;
119 unsigned long time_stamp; 119 unsigned long time_stamp;
120 uint16_t length; 120 u16 length;
121 uint16_t next_to_watch; 121 u16 next_to_watch;
122}; 122};
123 123
124struct ixgb_desc_ring { 124struct ixgb_desc_ring {
@@ -152,13 +152,12 @@ struct ixgb_desc_ring {
152struct ixgb_adapter { 152struct ixgb_adapter {
153 struct timer_list watchdog_timer; 153 struct timer_list watchdog_timer;
154 struct vlan_group *vlgrp; 154 struct vlan_group *vlgrp;
155 uint32_t bd_number; 155 u32 bd_number;
156 uint32_t rx_buffer_len; 156 u32 rx_buffer_len;
157 uint32_t part_num; 157 u32 part_num;
158 uint16_t link_speed; 158 u16 link_speed;
159 uint16_t link_duplex; 159 u16 link_duplex;
160 spinlock_t tx_lock; 160 spinlock_t tx_lock;
161 atomic_t irq_sem;
162 struct work_struct tx_timeout_task; 161 struct work_struct tx_timeout_task;
163 162
164 struct timer_list blink_timer; 163 struct timer_list blink_timer;
@@ -168,20 +167,20 @@ struct ixgb_adapter {
168 struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; 167 struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
169 unsigned int restart_queue; 168 unsigned int restart_queue;
170 unsigned long timeo_start; 169 unsigned long timeo_start;
171 uint32_t tx_cmd_type; 170 u32 tx_cmd_type;
172 uint64_t hw_csum_tx_good; 171 u64 hw_csum_tx_good;
173 uint64_t hw_csum_tx_error; 172 u64 hw_csum_tx_error;
174 uint32_t tx_int_delay; 173 u32 tx_int_delay;
175 uint32_t tx_timeout_count; 174 u32 tx_timeout_count;
176 boolean_t tx_int_delay_enable; 175 bool tx_int_delay_enable;
177 boolean_t detect_tx_hung; 176 bool detect_tx_hung;
178 177
179 /* RX */ 178 /* RX */
180 struct ixgb_desc_ring rx_ring; 179 struct ixgb_desc_ring rx_ring;
181 uint64_t hw_csum_rx_error; 180 u64 hw_csum_rx_error;
182 uint64_t hw_csum_rx_good; 181 u64 hw_csum_rx_good;
183 uint32_t rx_int_delay; 182 u32 rx_int_delay;
184 boolean_t rx_csum; 183 bool rx_csum;
185 184
186 /* OS defined structs */ 185 /* OS defined structs */
187 struct napi_struct napi; 186 struct napi_struct napi;
@@ -193,8 +192,17 @@ struct ixgb_adapter {
193 struct ixgb_hw hw; 192 struct ixgb_hw hw;
194 u16 msg_enable; 193 u16 msg_enable;
195 struct ixgb_hw_stats stats; 194 struct ixgb_hw_stats stats;
196 uint32_t alloc_rx_buff_failed; 195 u32 alloc_rx_buff_failed;
197 boolean_t have_msi; 196 bool have_msi;
197 unsigned long flags;
198};
199
200enum ixgb_state_t {
201 /* TBD
202 __IXGB_TESTING,
203 __IXGB_RESETTING,
204 */
205 __IXGB_DOWN
198}; 206};
199 207
200/* Exported from other modules */ 208/* Exported from other modules */
@@ -203,4 +211,14 @@ extern void ixgb_set_ethtool_ops(struct net_device *netdev);
203extern char ixgb_driver_name[]; 211extern char ixgb_driver_name[];
204extern const char ixgb_driver_version[]; 212extern const char ixgb_driver_version[];
205 213
214extern int ixgb_up(struct ixgb_adapter *adapter);
215extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
216extern void ixgb_reset(struct ixgb_adapter *adapter);
217extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
218extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
219extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
220extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
221extern void ixgb_update_stats(struct ixgb_adapter *adapter);
222
223
206#endif /* _IXGB_H_ */ 224#endif /* _IXGB_H_ */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index e8eb0fd6c576..2f7ed52c7502 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -29,14 +29,14 @@
29#include "ixgb_hw.h" 29#include "ixgb_hw.h"
30#include "ixgb_ee.h" 30#include "ixgb_ee.h"
31/* Local prototypes */ 31/* Local prototypes */
32static uint16_t ixgb_shift_in_bits(struct ixgb_hw *hw); 32static u16 ixgb_shift_in_bits(struct ixgb_hw *hw);
33 33
34static void ixgb_shift_out_bits(struct ixgb_hw *hw, 34static void ixgb_shift_out_bits(struct ixgb_hw *hw,
35 uint16_t data, 35 u16 data,
36 uint16_t count); 36 u16 count);
37static void ixgb_standby_eeprom(struct ixgb_hw *hw); 37static void ixgb_standby_eeprom(struct ixgb_hw *hw);
38 38
39static boolean_t ixgb_wait_eeprom_command(struct ixgb_hw *hw); 39static bool ixgb_wait_eeprom_command(struct ixgb_hw *hw);
40 40
41static void ixgb_cleanup_eeprom(struct ixgb_hw *hw); 41static void ixgb_cleanup_eeprom(struct ixgb_hw *hw);
42 42
@@ -48,7 +48,7 @@ static void ixgb_cleanup_eeprom(struct ixgb_hw *hw);
48 *****************************************************************************/ 48 *****************************************************************************/
49static void 49static void
50ixgb_raise_clock(struct ixgb_hw *hw, 50ixgb_raise_clock(struct ixgb_hw *hw,
51 uint32_t *eecd_reg) 51 u32 *eecd_reg)
52{ 52{
53 /* Raise the clock input to the EEPROM (by setting the SK bit), and then 53 /* Raise the clock input to the EEPROM (by setting the SK bit), and then
54 * wait 50 microseconds. 54 * wait 50 microseconds.
@@ -67,7 +67,7 @@ ixgb_raise_clock(struct ixgb_hw *hw,
67 *****************************************************************************/ 67 *****************************************************************************/
68static void 68static void
69ixgb_lower_clock(struct ixgb_hw *hw, 69ixgb_lower_clock(struct ixgb_hw *hw,
70 uint32_t *eecd_reg) 70 u32 *eecd_reg)
71{ 71{
72 /* Lower the clock input to the EEPROM (by clearing the SK bit), and then 72 /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
73 * wait 50 microseconds. 73 * wait 50 microseconds.
@@ -87,11 +87,11 @@ ixgb_lower_clock(struct ixgb_hw *hw,
87 *****************************************************************************/ 87 *****************************************************************************/
88static void 88static void
89ixgb_shift_out_bits(struct ixgb_hw *hw, 89ixgb_shift_out_bits(struct ixgb_hw *hw,
90 uint16_t data, 90 u16 data,
91 uint16_t count) 91 u16 count)
92{ 92{
93 uint32_t eecd_reg; 93 u32 eecd_reg;
94 uint32_t mask; 94 u32 mask;
95 95
96 /* We need to shift "count" bits out to the EEPROM. So, value in the 96 /* We need to shift "count" bits out to the EEPROM. So, value in the
97 * "data" parameter will be shifted out to the EEPROM one bit at a time. 97 * "data" parameter will be shifted out to the EEPROM one bit at a time.
@@ -133,12 +133,12 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
133 * 133 *
134 * hw - Struct containing variables accessed by shared code 134 * hw - Struct containing variables accessed by shared code
135 *****************************************************************************/ 135 *****************************************************************************/
136static uint16_t 136static u16
137ixgb_shift_in_bits(struct ixgb_hw *hw) 137ixgb_shift_in_bits(struct ixgb_hw *hw)
138{ 138{
139 uint32_t eecd_reg; 139 u32 eecd_reg;
140 uint32_t i; 140 u32 i;
141 uint16_t data; 141 u16 data;
142 142
143 /* In order to read a register from the EEPROM, we need to shift 16 bits 143 /* In order to read a register from the EEPROM, we need to shift 16 bits
144 * in from the EEPROM. Bits are "shifted in" by raising the clock input to 144 * in from the EEPROM. Bits are "shifted in" by raising the clock input to
@@ -179,7 +179,7 @@ ixgb_shift_in_bits(struct ixgb_hw *hw)
179static void 179static void
180ixgb_setup_eeprom(struct ixgb_hw *hw) 180ixgb_setup_eeprom(struct ixgb_hw *hw)
181{ 181{
182 uint32_t eecd_reg; 182 u32 eecd_reg;
183 183
184 eecd_reg = IXGB_READ_REG(hw, EECD); 184 eecd_reg = IXGB_READ_REG(hw, EECD);
185 185
@@ -201,7 +201,7 @@ ixgb_setup_eeprom(struct ixgb_hw *hw)
201static void 201static void
202ixgb_standby_eeprom(struct ixgb_hw *hw) 202ixgb_standby_eeprom(struct ixgb_hw *hw)
203{ 203{
204 uint32_t eecd_reg; 204 u32 eecd_reg;
205 205
206 eecd_reg = IXGB_READ_REG(hw, EECD); 206 eecd_reg = IXGB_READ_REG(hw, EECD);
207 207
@@ -235,7 +235,7 @@ ixgb_standby_eeprom(struct ixgb_hw *hw)
235static void 235static void
236ixgb_clock_eeprom(struct ixgb_hw *hw) 236ixgb_clock_eeprom(struct ixgb_hw *hw)
237{ 237{
238 uint32_t eecd_reg; 238 u32 eecd_reg;
239 239
240 eecd_reg = IXGB_READ_REG(hw, EECD); 240 eecd_reg = IXGB_READ_REG(hw, EECD);
241 241
@@ -259,7 +259,7 @@ ixgb_clock_eeprom(struct ixgb_hw *hw)
259static void 259static void
260ixgb_cleanup_eeprom(struct ixgb_hw *hw) 260ixgb_cleanup_eeprom(struct ixgb_hw *hw)
261{ 261{
262 uint32_t eecd_reg; 262 u32 eecd_reg;
263 263
264 eecd_reg = IXGB_READ_REG(hw, EECD); 264 eecd_reg = IXGB_READ_REG(hw, EECD);
265 265
@@ -279,14 +279,14 @@ ixgb_cleanup_eeprom(struct ixgb_hw *hw)
279 * The command is done when the EEPROM's data out pin goes high. 279 * The command is done when the EEPROM's data out pin goes high.
280 * 280 *
281 * Returns: 281 * Returns:
282 * TRUE: EEPROM data pin is high before timeout. 282 * true: EEPROM data pin is high before timeout.
283 * FALSE: Time expired. 283 * false: Time expired.
284 *****************************************************************************/ 284 *****************************************************************************/
285static boolean_t 285static bool
286ixgb_wait_eeprom_command(struct ixgb_hw *hw) 286ixgb_wait_eeprom_command(struct ixgb_hw *hw)
287{ 287{
288 uint32_t eecd_reg; 288 u32 eecd_reg;
289 uint32_t i; 289 u32 i;
290 290
291 /* Toggle the CS line. This in effect tells to EEPROM to actually execute 291 /* Toggle the CS line. This in effect tells to EEPROM to actually execute
292 * the command in question. 292 * the command in question.
@@ -301,12 +301,12 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
301 eecd_reg = IXGB_READ_REG(hw, EECD); 301 eecd_reg = IXGB_READ_REG(hw, EECD);
302 302
303 if(eecd_reg & IXGB_EECD_DO) 303 if(eecd_reg & IXGB_EECD_DO)
304 return (TRUE); 304 return (true);
305 305
306 udelay(50); 306 udelay(50);
307 } 307 }
308 ASSERT(0); 308 ASSERT(0);
309 return (FALSE); 309 return (false);
310} 310}
311 311
312/****************************************************************************** 312/******************************************************************************
@@ -319,22 +319,22 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
319 * valid. 319 * valid.
320 * 320 *
321 * Returns: 321 * Returns:
322 * TRUE: Checksum is valid 322 * true: Checksum is valid
323 * FALSE: Checksum is not valid. 323 * false: Checksum is not valid.
324 *****************************************************************************/ 324 *****************************************************************************/
325boolean_t 325bool
326ixgb_validate_eeprom_checksum(struct ixgb_hw *hw) 326ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
327{ 327{
328 uint16_t checksum = 0; 328 u16 checksum = 0;
329 uint16_t i; 329 u16 i;
330 330
331 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) 331 for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
332 checksum += ixgb_read_eeprom(hw, i); 332 checksum += ixgb_read_eeprom(hw, i);
333 333
334 if(checksum == (uint16_t) EEPROM_SUM) 334 if(checksum == (u16) EEPROM_SUM)
335 return (TRUE); 335 return (true);
336 else 336 else
337 return (FALSE); 337 return (false);
338} 338}
339 339
340/****************************************************************************** 340/******************************************************************************
@@ -348,13 +348,13 @@ ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
348void 348void
349ixgb_update_eeprom_checksum(struct ixgb_hw *hw) 349ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
350{ 350{
351 uint16_t checksum = 0; 351 u16 checksum = 0;
352 uint16_t i; 352 u16 i;
353 353
354 for(i = 0; i < EEPROM_CHECKSUM_REG; i++) 354 for(i = 0; i < EEPROM_CHECKSUM_REG; i++)
355 checksum += ixgb_read_eeprom(hw, i); 355 checksum += ixgb_read_eeprom(hw, i);
356 356
357 checksum = (uint16_t) EEPROM_SUM - checksum; 357 checksum = (u16) EEPROM_SUM - checksum;
358 358
359 ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum); 359 ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum);
360 return; 360 return;
@@ -372,7 +372,7 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
372 * 372 *
373 *****************************************************************************/ 373 *****************************************************************************/
374void 374void
375ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data) 375ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data)
376{ 376{
377 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 377 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
378 378
@@ -425,11 +425,11 @@ ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data)
425 * Returns: 425 * Returns:
426 * The 16-bit value read from the eeprom 426 * The 16-bit value read from the eeprom
427 *****************************************************************************/ 427 *****************************************************************************/
428uint16_t 428u16
429ixgb_read_eeprom(struct ixgb_hw *hw, 429ixgb_read_eeprom(struct ixgb_hw *hw,
430 uint16_t offset) 430 u16 offset)
431{ 431{
432 uint16_t data; 432 u16 data;
433 433
434 /* Prepare the EEPROM for reading */ 434 /* Prepare the EEPROM for reading */
435 ixgb_setup_eeprom(hw); 435 ixgb_setup_eeprom(hw);
@@ -457,14 +457,14 @@ ixgb_read_eeprom(struct ixgb_hw *hw,
457 * hw - Struct containing variables accessed by shared code 457 * hw - Struct containing variables accessed by shared code
458 * 458 *
459 * Returns: 459 * Returns:
460 * TRUE: if eeprom read is successful 460 * true: if eeprom read is successful
461 * FALSE: otherwise. 461 * false: otherwise.
462 *****************************************************************************/ 462 *****************************************************************************/
463boolean_t 463bool
464ixgb_get_eeprom_data(struct ixgb_hw *hw) 464ixgb_get_eeprom_data(struct ixgb_hw *hw)
465{ 465{
466 uint16_t i; 466 u16 i;
467 uint16_t checksum = 0; 467 u16 checksum = 0;
468 struct ixgb_ee_map_type *ee_map; 468 struct ixgb_ee_map_type *ee_map;
469 469
470 DEBUGFUNC("ixgb_get_eeprom_data"); 470 DEBUGFUNC("ixgb_get_eeprom_data");
@@ -473,27 +473,27 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
473 473
474 DEBUGOUT("ixgb_ee: Reading eeprom data\n"); 474 DEBUGOUT("ixgb_ee: Reading eeprom data\n");
475 for(i = 0; i < IXGB_EEPROM_SIZE ; i++) { 475 for(i = 0; i < IXGB_EEPROM_SIZE ; i++) {
476 uint16_t ee_data; 476 u16 ee_data;
477 ee_data = ixgb_read_eeprom(hw, i); 477 ee_data = ixgb_read_eeprom(hw, i);
478 checksum += ee_data; 478 checksum += ee_data;
479 hw->eeprom[i] = cpu_to_le16(ee_data); 479 hw->eeprom[i] = cpu_to_le16(ee_data);
480 } 480 }
481 481
482 if (checksum != (uint16_t) EEPROM_SUM) { 482 if (checksum != (u16) EEPROM_SUM) {
483 DEBUGOUT("ixgb_ee: Checksum invalid.\n"); 483 DEBUGOUT("ixgb_ee: Checksum invalid.\n");
484 /* clear the init_ctrl_reg_1 to signify that the cache is 484 /* clear the init_ctrl_reg_1 to signify that the cache is
485 * invalidated */ 485 * invalidated */
486 ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); 486 ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
487 return (FALSE); 487 return (false);
488 } 488 }
489 489
490 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) 490 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
491 != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { 491 != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
492 DEBUGOUT("ixgb_ee: Signature invalid.\n"); 492 DEBUGOUT("ixgb_ee: Signature invalid.\n");
493 return(FALSE); 493 return(false);
494 } 494 }
495 495
496 return(TRUE); 496 return(true);
497} 497}
498 498
499/****************************************************************************** 499/******************************************************************************
@@ -503,17 +503,17 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
503 * hw - Struct containing variables accessed by shared code 503 * hw - Struct containing variables accessed by shared code
504 * 504 *
505 * Returns: 505 * Returns:
506 * TRUE: eeprom signature was good and the eeprom read was successful 506 * true: eeprom signature was good and the eeprom read was successful
507 * FALSE: otherwise. 507 * false: otherwise.
508 ******************************************************************************/ 508 ******************************************************************************/
509static boolean_t 509static bool
510ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw) 510ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
511{ 511{
512 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 512 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
513 513
514 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) 514 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
515 == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { 515 == cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
516 return (TRUE); 516 return (true);
517 } else { 517 } else {
518 return ixgb_get_eeprom_data(hw); 518 return ixgb_get_eeprom_data(hw);
519 } 519 }
@@ -529,11 +529,11 @@ ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
529 * Word at indexed offset in eeprom, if valid, 0 otherwise. 529 * Word at indexed offset in eeprom, if valid, 0 otherwise.
530 ******************************************************************************/ 530 ******************************************************************************/
531__le16 531__le16
532ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index) 532ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index)
533{ 533{
534 534
535 if ((index < IXGB_EEPROM_SIZE) && 535 if ((index < IXGB_EEPROM_SIZE) &&
536 (ixgb_check_and_get_eeprom_data(hw) == TRUE)) { 536 (ixgb_check_and_get_eeprom_data(hw) == true)) {
537 return(hw->eeprom[index]); 537 return(hw->eeprom[index]);
538 } 538 }
539 539
@@ -550,14 +550,14 @@ ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index)
550 ******************************************************************************/ 550 ******************************************************************************/
551void 551void
552ixgb_get_ee_mac_addr(struct ixgb_hw *hw, 552ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
553 uint8_t *mac_addr) 553 u8 *mac_addr)
554{ 554{
555 int i; 555 int i;
556 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 556 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
557 557
558 DEBUGFUNC("ixgb_get_ee_mac_addr"); 558 DEBUGFUNC("ixgb_get_ee_mac_addr");
559 559
560 if (ixgb_check_and_get_eeprom_data(hw) == TRUE) { 560 if (ixgb_check_and_get_eeprom_data(hw) == true) {
561 for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) { 561 for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) {
562 mac_addr[i] = ee_map->mac_addr[i]; 562 mac_addr[i] = ee_map->mac_addr[i];
563 DEBUGOUT2("mac(%d) = %.2X\n", i, mac_addr[i]); 563 DEBUGOUT2("mac(%d) = %.2X\n", i, mac_addr[i]);
@@ -574,10 +574,10 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
574 * Returns: 574 * Returns:
575 * PBA number if EEPROM contents are valid, 0 otherwise 575 * PBA number if EEPROM contents are valid, 0 otherwise
576 ******************************************************************************/ 576 ******************************************************************************/
577uint32_t 577u32
578ixgb_get_ee_pba_number(struct ixgb_hw *hw) 578ixgb_get_ee_pba_number(struct ixgb_hw *hw)
579{ 579{
580 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 580 if (ixgb_check_and_get_eeprom_data(hw) == true)
581 return (le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG]) 581 return (le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
582 | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16)); 582 | (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16));
583 583
@@ -593,12 +593,12 @@ ixgb_get_ee_pba_number(struct ixgb_hw *hw)
593 * Returns: 593 * Returns:
594 * Device Id if EEPROM contents are valid, 0 otherwise 594 * Device Id if EEPROM contents are valid, 0 otherwise
595 ******************************************************************************/ 595 ******************************************************************************/
596uint16_t 596u16
597ixgb_get_ee_device_id(struct ixgb_hw *hw) 597ixgb_get_ee_device_id(struct ixgb_hw *hw)
598{ 598{
599 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 599 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
600 600
601 if(ixgb_check_and_get_eeprom_data(hw) == TRUE) 601 if (ixgb_check_and_get_eeprom_data(hw) == true)
602 return (le16_to_cpu(ee_map->device_id)); 602 return (le16_to_cpu(ee_map->device_id));
603 603
604 return (0); 604 return (0);
diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h
index 7908bf3005ed..4b7bd0d4a8a9 100644
--- a/drivers/net/ixgb/ixgb_ee.h
+++ b/drivers/net/ixgb/ixgb_ee.h
@@ -75,7 +75,7 @@
75 75
76/* EEPROM structure */ 76/* EEPROM structure */
77struct ixgb_ee_map_type { 77struct ixgb_ee_map_type {
78 uint8_t mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; 78 u8 mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS];
79 __le16 compatibility; 79 __le16 compatibility;
80 __le16 reserved1[4]; 80 __le16 reserved1[4];
81 __le32 pba_number; 81 __le32 pba_number;
@@ -88,19 +88,19 @@ struct ixgb_ee_map_type {
88 __le16 oem_reserved[16]; 88 __le16 oem_reserved[16];
89 __le16 swdpins_reg; 89 __le16 swdpins_reg;
90 __le16 circuit_ctrl_reg; 90 __le16 circuit_ctrl_reg;
91 uint8_t d3_power; 91 u8 d3_power;
92 uint8_t d0_power; 92 u8 d0_power;
93 __le16 reserved2[28]; 93 __le16 reserved2[28];
94 __le16 checksum; 94 __le16 checksum;
95}; 95};
96 96
97/* EEPROM Functions */ 97/* EEPROM Functions */
98uint16_t ixgb_read_eeprom(struct ixgb_hw *hw, uint16_t reg); 98u16 ixgb_read_eeprom(struct ixgb_hw *hw, u16 reg);
99 99
100boolean_t ixgb_validate_eeprom_checksum(struct ixgb_hw *hw); 100bool ixgb_validate_eeprom_checksum(struct ixgb_hw *hw);
101 101
102void ixgb_update_eeprom_checksum(struct ixgb_hw *hw); 102void ixgb_update_eeprom_checksum(struct ixgb_hw *hw);
103 103
104void ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t reg, uint16_t data); 104void ixgb_write_eeprom(struct ixgb_hw *hw, u16 reg, u16 data);
105 105
106#endif /* IXGB_EE_H */ 106#endif /* IXGB_EE_H */
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 75f3a68ee354..8464d8a013b0 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -32,15 +32,6 @@
32 32
33#include <asm/uaccess.h> 33#include <asm/uaccess.h>
34 34
35extern int ixgb_up(struct ixgb_adapter *adapter);
36extern void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
37extern void ixgb_reset(struct ixgb_adapter *adapter);
38extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
39extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
40extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
41extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
42extern void ixgb_update_stats(struct ixgb_adapter *adapter);
43
44#define IXGB_ALL_RAR_ENTRIES 16 35#define IXGB_ALL_RAR_ENTRIES 16
45 36
46struct ixgb_stats { 37struct ixgb_stats {
@@ -136,7 +127,7 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
136 return -EINVAL; 127 return -EINVAL;
137 128
138 if(netif_running(adapter->netdev)) { 129 if(netif_running(adapter->netdev)) {
139 ixgb_down(adapter, TRUE); 130 ixgb_down(adapter, true);
140 ixgb_reset(adapter); 131 ixgb_reset(adapter);
141 ixgb_up(adapter); 132 ixgb_up(adapter);
142 ixgb_set_speed_duplex(netdev); 133 ixgb_set_speed_duplex(netdev);
@@ -185,7 +176,7 @@ ixgb_set_pauseparam(struct net_device *netdev,
185 hw->fc.type = ixgb_fc_none; 176 hw->fc.type = ixgb_fc_none;
186 177
187 if(netif_running(adapter->netdev)) { 178 if(netif_running(adapter->netdev)) {
188 ixgb_down(adapter, TRUE); 179 ixgb_down(adapter, true);
189 ixgb_up(adapter); 180 ixgb_up(adapter);
190 ixgb_set_speed_duplex(netdev); 181 ixgb_set_speed_duplex(netdev);
191 } else 182 } else
@@ -194,7 +185,7 @@ ixgb_set_pauseparam(struct net_device *netdev,
194 return 0; 185 return 0;
195} 186}
196 187
197static uint32_t 188static u32
198ixgb_get_rx_csum(struct net_device *netdev) 189ixgb_get_rx_csum(struct net_device *netdev)
199{ 190{
200 struct ixgb_adapter *adapter = netdev_priv(netdev); 191 struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -203,14 +194,14 @@ ixgb_get_rx_csum(struct net_device *netdev)
203} 194}
204 195
205static int 196static int
206ixgb_set_rx_csum(struct net_device *netdev, uint32_t data) 197ixgb_set_rx_csum(struct net_device *netdev, u32 data)
207{ 198{
208 struct ixgb_adapter *adapter = netdev_priv(netdev); 199 struct ixgb_adapter *adapter = netdev_priv(netdev);
209 200
210 adapter->rx_csum = data; 201 adapter->rx_csum = data;
211 202
212 if(netif_running(netdev)) { 203 if(netif_running(netdev)) {
213 ixgb_down(adapter,TRUE); 204 ixgb_down(adapter, true);
214 ixgb_up(adapter); 205 ixgb_up(adapter);
215 ixgb_set_speed_duplex(netdev); 206 ixgb_set_speed_duplex(netdev);
216 } else 207 } else
@@ -218,14 +209,14 @@ ixgb_set_rx_csum(struct net_device *netdev, uint32_t data)
218 return 0; 209 return 0;
219} 210}
220 211
221static uint32_t 212static u32
222ixgb_get_tx_csum(struct net_device *netdev) 213ixgb_get_tx_csum(struct net_device *netdev)
223{ 214{
224 return (netdev->features & NETIF_F_HW_CSUM) != 0; 215 return (netdev->features & NETIF_F_HW_CSUM) != 0;
225} 216}
226 217
227static int 218static int
228ixgb_set_tx_csum(struct net_device *netdev, uint32_t data) 219ixgb_set_tx_csum(struct net_device *netdev, u32 data)
229{ 220{
230 if (data) 221 if (data)
231 netdev->features |= NETIF_F_HW_CSUM; 222 netdev->features |= NETIF_F_HW_CSUM;
@@ -236,7 +227,7 @@ ixgb_set_tx_csum(struct net_device *netdev, uint32_t data)
236} 227}
237 228
238static int 229static int
239ixgb_set_tso(struct net_device *netdev, uint32_t data) 230ixgb_set_tso(struct net_device *netdev, u32 data)
240{ 231{
241 if(data) 232 if(data)
242 netdev->features |= NETIF_F_TSO; 233 netdev->features |= NETIF_F_TSO;
@@ -245,7 +236,7 @@ ixgb_set_tso(struct net_device *netdev, uint32_t data)
245 return 0; 236 return 0;
246} 237}
247 238
248static uint32_t 239static u32
249ixgb_get_msglevel(struct net_device *netdev) 240ixgb_get_msglevel(struct net_device *netdev)
250{ 241{
251 struct ixgb_adapter *adapter = netdev_priv(netdev); 242 struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -253,7 +244,7 @@ ixgb_get_msglevel(struct net_device *netdev)
253} 244}
254 245
255static void 246static void
256ixgb_set_msglevel(struct net_device *netdev, uint32_t data) 247ixgb_set_msglevel(struct net_device *netdev, u32 data)
257{ 248{
258 struct ixgb_adapter *adapter = netdev_priv(netdev); 249 struct ixgb_adapter *adapter = netdev_priv(netdev);
259 adapter->msg_enable = data; 250 adapter->msg_enable = data;
@@ -263,7 +254,7 @@ ixgb_set_msglevel(struct net_device *netdev, uint32_t data)
263static int 254static int
264ixgb_get_regs_len(struct net_device *netdev) 255ixgb_get_regs_len(struct net_device *netdev)
265{ 256{
266#define IXGB_REG_DUMP_LEN 136*sizeof(uint32_t) 257#define IXGB_REG_DUMP_LEN 136*sizeof(u32)
267 return IXGB_REG_DUMP_LEN; 258 return IXGB_REG_DUMP_LEN;
268} 259}
269 260
@@ -273,9 +264,9 @@ ixgb_get_regs(struct net_device *netdev,
273{ 264{
274 struct ixgb_adapter *adapter = netdev_priv(netdev); 265 struct ixgb_adapter *adapter = netdev_priv(netdev);
275 struct ixgb_hw *hw = &adapter->hw; 266 struct ixgb_hw *hw = &adapter->hw;
276 uint32_t *reg = p; 267 u32 *reg = p;
277 uint32_t *reg_start = reg; 268 u32 *reg_start = reg;
278 uint8_t i; 269 u8 i;
279 270
280 /* the 1 (one) below indicates an attempt at versioning, if the 271 /* the 1 (one) below indicates an attempt at versioning, if the
281 * interface in ethtool or the driver changes, this 1 should be 272 * interface in ethtool or the driver changes, this 1 should be
@@ -404,7 +395,7 @@ ixgb_get_regs(struct net_device *netdev,
404 *reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */ 395 *reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */
405 *reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */ 396 *reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */
406 397
407 regs->len = (reg - reg_start) * sizeof(uint32_t); 398 regs->len = (reg - reg_start) * sizeof(u32);
408} 399}
409 400
410static int 401static int
@@ -416,7 +407,7 @@ ixgb_get_eeprom_len(struct net_device *netdev)
416 407
417static int 408static int
418ixgb_get_eeprom(struct net_device *netdev, 409ixgb_get_eeprom(struct net_device *netdev,
419 struct ethtool_eeprom *eeprom, uint8_t *bytes) 410 struct ethtool_eeprom *eeprom, u8 *bytes)
420{ 411{
421 struct ixgb_adapter *adapter = netdev_priv(netdev); 412 struct ixgb_adapter *adapter = netdev_priv(netdev);
422 struct ixgb_hw *hw = &adapter->hw; 413 struct ixgb_hw *hw = &adapter->hw;
@@ -454,7 +445,7 @@ ixgb_get_eeprom(struct net_device *netdev,
454 eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i)); 445 eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i));
455 } 446 }
456 447
457 memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1), 448 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
458 eeprom->len); 449 eeprom->len);
459 kfree(eeprom_buff); 450 kfree(eeprom_buff);
460 451
@@ -464,14 +455,14 @@ geeprom_error:
464 455
465static int 456static int
466ixgb_set_eeprom(struct net_device *netdev, 457ixgb_set_eeprom(struct net_device *netdev,
467 struct ethtool_eeprom *eeprom, uint8_t *bytes) 458 struct ethtool_eeprom *eeprom, u8 *bytes)
468{ 459{
469 struct ixgb_adapter *adapter = netdev_priv(netdev); 460 struct ixgb_adapter *adapter = netdev_priv(netdev);
470 struct ixgb_hw *hw = &adapter->hw; 461 struct ixgb_hw *hw = &adapter->hw;
471 uint16_t *eeprom_buff; 462 u16 *eeprom_buff;
472 void *ptr; 463 void *ptr;
473 int max_len, first_word, last_word; 464 int max_len, first_word, last_word;
474 uint16_t i; 465 u16 i;
475 466
476 if(eeprom->len == 0) 467 if(eeprom->len == 0)
477 return -EINVAL; 468 return -EINVAL;
@@ -570,14 +561,14 @@ ixgb_set_ringparam(struct net_device *netdev,
570 return -EINVAL; 561 return -EINVAL;
571 562
572 if(netif_running(adapter->netdev)) 563 if(netif_running(adapter->netdev))
573 ixgb_down(adapter,TRUE); 564 ixgb_down(adapter, true);
574 565
575 rxdr->count = max(ring->rx_pending,(uint32_t)MIN_RXD); 566 rxdr->count = max(ring->rx_pending,(u32)MIN_RXD);
576 rxdr->count = min(rxdr->count,(uint32_t)MAX_RXD); 567 rxdr->count = min(rxdr->count,(u32)MAX_RXD);
577 rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE); 568 rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
578 569
579 txdr->count = max(ring->tx_pending,(uint32_t)MIN_TXD); 570 txdr->count = max(ring->tx_pending,(u32)MIN_TXD);
580 txdr->count = min(txdr->count,(uint32_t)MAX_TXD); 571 txdr->count = min(txdr->count,(u32)MAX_TXD);
581 txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE); 572 txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
582 573
583 if(netif_running(adapter->netdev)) { 574 if(netif_running(adapter->netdev)) {
@@ -633,7 +624,7 @@ ixgb_led_blink_callback(unsigned long data)
633} 624}
634 625
635static int 626static int
636ixgb_phys_id(struct net_device *netdev, uint32_t data) 627ixgb_phys_id(struct net_device *netdev, u32 data)
637{ 628{
638 struct ixgb_adapter *adapter = netdev_priv(netdev); 629 struct ixgb_adapter *adapter = netdev_priv(netdev);
639 630
@@ -669,7 +660,7 @@ ixgb_get_sset_count(struct net_device *netdev, int sset)
669 660
670static void 661static void
671ixgb_get_ethtool_stats(struct net_device *netdev, 662ixgb_get_ethtool_stats(struct net_device *netdev,
672 struct ethtool_stats *stats, uint64_t *data) 663 struct ethtool_stats *stats, u64 *data)
673{ 664{
674 struct ixgb_adapter *adapter = netdev_priv(netdev); 665 struct ixgb_adapter *adapter = netdev_priv(netdev);
675 int i; 666 int i;
@@ -678,12 +669,12 @@ ixgb_get_ethtool_stats(struct net_device *netdev,
678 for(i = 0; i < IXGB_STATS_LEN; i++) { 669 for(i = 0; i < IXGB_STATS_LEN; i++) {
679 char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset; 670 char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset;
680 data[i] = (ixgb_gstrings_stats[i].sizeof_stat == 671 data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
681 sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; 672 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
682 } 673 }
683} 674}
684 675
685static void 676static void
686ixgb_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) 677ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
687{ 678{
688 int i; 679 int i;
689 680
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 80a8b9888225..04d2003e24e1 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -35,13 +35,13 @@
35 35
36/* Local function prototypes */ 36/* Local function prototypes */
37 37
38static uint32_t ixgb_hash_mc_addr(struct ixgb_hw *hw, uint8_t * mc_addr); 38static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr);
39 39
40static void ixgb_mta_set(struct ixgb_hw *hw, uint32_t hash_value); 40static void ixgb_mta_set(struct ixgb_hw *hw, u32 hash_value);
41 41
42static void ixgb_get_bus_info(struct ixgb_hw *hw); 42static void ixgb_get_bus_info(struct ixgb_hw *hw);
43 43
44static boolean_t ixgb_link_reset(struct ixgb_hw *hw); 44static bool ixgb_link_reset(struct ixgb_hw *hw);
45 45
46static void ixgb_optics_reset(struct ixgb_hw *hw); 46static void ixgb_optics_reset(struct ixgb_hw *hw);
47 47
@@ -55,18 +55,18 @@ static void ixgb_clear_vfta(struct ixgb_hw *hw);
55 55
56static void ixgb_init_rx_addrs(struct ixgb_hw *hw); 56static void ixgb_init_rx_addrs(struct ixgb_hw *hw);
57 57
58static uint16_t ixgb_read_phy_reg(struct ixgb_hw *hw, 58static u16 ixgb_read_phy_reg(struct ixgb_hw *hw,
59 uint32_t reg_address, 59 u32 reg_address,
60 uint32_t phy_address, 60 u32 phy_address,
61 uint32_t device_type); 61 u32 device_type);
62 62
63static boolean_t ixgb_setup_fc(struct ixgb_hw *hw); 63static bool ixgb_setup_fc(struct ixgb_hw *hw);
64 64
65static boolean_t mac_addr_valid(uint8_t *mac_addr); 65static bool mac_addr_valid(u8 *mac_addr);
66 66
67static uint32_t ixgb_mac_reset(struct ixgb_hw *hw) 67static u32 ixgb_mac_reset(struct ixgb_hw *hw)
68{ 68{
69 uint32_t ctrl_reg; 69 u32 ctrl_reg;
70 70
71 ctrl_reg = IXGB_CTRL0_RST | 71 ctrl_reg = IXGB_CTRL0_RST |
72 IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */ 72 IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */
@@ -114,11 +114,11 @@ static uint32_t ixgb_mac_reset(struct ixgb_hw *hw)
114 * 114 *
115 * hw - Struct containing variables accessed by shared code 115 * hw - Struct containing variables accessed by shared code
116 *****************************************************************************/ 116 *****************************************************************************/
117boolean_t 117bool
118ixgb_adapter_stop(struct ixgb_hw *hw) 118ixgb_adapter_stop(struct ixgb_hw *hw)
119{ 119{
120 uint32_t ctrl_reg; 120 u32 ctrl_reg;
121 uint32_t icr_reg; 121 u32 icr_reg;
122 122
123 DEBUGFUNC("ixgb_adapter_stop"); 123 DEBUGFUNC("ixgb_adapter_stop");
124 124
@@ -127,13 +127,13 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
127 */ 127 */
128 if(hw->adapter_stopped) { 128 if(hw->adapter_stopped) {
129 DEBUGOUT("Exiting because the adapter is already stopped!!!\n"); 129 DEBUGOUT("Exiting because the adapter is already stopped!!!\n");
130 return FALSE; 130 return false;
131 } 131 }
132 132
133 /* Set the Adapter Stopped flag so other driver functions stop 133 /* Set the Adapter Stopped flag so other driver functions stop
134 * touching the Hardware. 134 * touching the Hardware.
135 */ 135 */
136 hw->adapter_stopped = TRUE; 136 hw->adapter_stopped = true;
137 137
138 /* Clear interrupt mask to stop board from generating interrupts */ 138 /* Clear interrupt mask to stop board from generating interrupts */
139 DEBUGOUT("Masking off all interrupts\n"); 139 DEBUGOUT("Masking off all interrupts\n");
@@ -179,8 +179,8 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
179static ixgb_xpak_vendor 179static ixgb_xpak_vendor
180ixgb_identify_xpak_vendor(struct ixgb_hw *hw) 180ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
181{ 181{
182 uint32_t i; 182 u32 i;
183 uint16_t vendor_name[5]; 183 u16 vendor_name[5];
184 ixgb_xpak_vendor xpak_vendor; 184 ixgb_xpak_vendor xpak_vendor;
185 185
186 DEBUGFUNC("ixgb_identify_xpak_vendor"); 186 DEBUGFUNC("ixgb_identify_xpak_vendor");
@@ -286,15 +286,15 @@ ixgb_identify_phy(struct ixgb_hw *hw)
286 * Leaves the transmit and receive units disabled and uninitialized. 286 * Leaves the transmit and receive units disabled and uninitialized.
287 * 287 *
288 * Returns: 288 * Returns:
289 * TRUE if successful, 289 * true if successful,
290 * FALSE if unrecoverable problems were encountered. 290 * false if unrecoverable problems were encountered.
291 *****************************************************************************/ 291 *****************************************************************************/
292boolean_t 292bool
293ixgb_init_hw(struct ixgb_hw *hw) 293ixgb_init_hw(struct ixgb_hw *hw)
294{ 294{
295 uint32_t i; 295 u32 i;
296 uint32_t ctrl_reg; 296 u32 ctrl_reg;
297 boolean_t status; 297 bool status;
298 298
299 DEBUGFUNC("ixgb_init_hw"); 299 DEBUGFUNC("ixgb_init_hw");
300 300
@@ -318,9 +318,8 @@ ixgb_init_hw(struct ixgb_hw *hw)
318 /* Delay a few ms just to allow the reset to complete */ 318 /* Delay a few ms just to allow the reset to complete */
319 msleep(IXGB_DELAY_AFTER_EE_RESET); 319 msleep(IXGB_DELAY_AFTER_EE_RESET);
320 320
321 if (ixgb_get_eeprom_data(hw) == FALSE) { 321 if (!ixgb_get_eeprom_data(hw))
322 return(FALSE); 322 return false;
323 }
324 323
325 /* Use the device id to determine the type of phy/transceiver. */ 324 /* Use the device id to determine the type of phy/transceiver. */
326 hw->device_id = ixgb_get_ee_device_id(hw); 325 hw->device_id = ixgb_get_ee_device_id(hw);
@@ -337,11 +336,11 @@ ixgb_init_hw(struct ixgb_hw *hw)
337 */ 336 */
338 if (!mac_addr_valid(hw->curr_mac_addr)) { 337 if (!mac_addr_valid(hw->curr_mac_addr)) {
339 DEBUGOUT("MAC address invalid after ixgb_init_rx_addrs\n"); 338 DEBUGOUT("MAC address invalid after ixgb_init_rx_addrs\n");
340 return(FALSE); 339 return(false);
341 } 340 }
342 341
343 /* tell the routines in this file they can access hardware again */ 342 /* tell the routines in this file they can access hardware again */
344 hw->adapter_stopped = FALSE; 343 hw->adapter_stopped = false;
345 344
346 /* Fill in the bus_info structure */ 345 /* Fill in the bus_info structure */
347 ixgb_get_bus_info(hw); 346 ixgb_get_bus_info(hw);
@@ -378,7 +377,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
378static void 377static void
379ixgb_init_rx_addrs(struct ixgb_hw *hw) 378ixgb_init_rx_addrs(struct ixgb_hw *hw)
380{ 379{
381 uint32_t i; 380 u32 i;
382 381
383 DEBUGFUNC("ixgb_init_rx_addrs"); 382 DEBUGFUNC("ixgb_init_rx_addrs");
384 383
@@ -438,13 +437,13 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
438 *****************************************************************************/ 437 *****************************************************************************/
439void 438void
440ixgb_mc_addr_list_update(struct ixgb_hw *hw, 439ixgb_mc_addr_list_update(struct ixgb_hw *hw,
441 uint8_t *mc_addr_list, 440 u8 *mc_addr_list,
442 uint32_t mc_addr_count, 441 u32 mc_addr_count,
443 uint32_t pad) 442 u32 pad)
444{ 443{
445 uint32_t hash_value; 444 u32 hash_value;
446 uint32_t i; 445 u32 i;
447 uint32_t rar_used_count = 1; /* RAR[0] is used for our MAC address */ 446 u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */
448 447
449 DEBUGFUNC("ixgb_mc_addr_list_update"); 448 DEBUGFUNC("ixgb_mc_addr_list_update");
450 449
@@ -516,11 +515,11 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
516 * Returns: 515 * Returns:
517 * The hash value 516 * The hash value
518 *****************************************************************************/ 517 *****************************************************************************/
519static uint32_t 518static u32
520ixgb_hash_mc_addr(struct ixgb_hw *hw, 519ixgb_hash_mc_addr(struct ixgb_hw *hw,
521 uint8_t *mc_addr) 520 u8 *mc_addr)
522{ 521{
523 uint32_t hash_value = 0; 522 u32 hash_value = 0;
524 523
525 DEBUGFUNC("ixgb_hash_mc_addr"); 524 DEBUGFUNC("ixgb_hash_mc_addr");
526 525
@@ -534,18 +533,18 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
534 case 0: 533 case 0:
535 /* [47:36] i.e. 0x563 for above example address */ 534 /* [47:36] i.e. 0x563 for above example address */
536 hash_value = 535 hash_value =
537 ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4)); 536 ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
538 break; 537 break;
539 case 1: /* [46:35] i.e. 0xAC6 for above example address */ 538 case 1: /* [46:35] i.e. 0xAC6 for above example address */
540 hash_value = 539 hash_value =
541 ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5)); 540 ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
542 break; 541 break;
543 case 2: /* [45:34] i.e. 0x5D8 for above example address */ 542 case 2: /* [45:34] i.e. 0x5D8 for above example address */
544 hash_value = 543 hash_value =
545 ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6)); 544 ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
546 break; 545 break;
547 case 3: /* [43:32] i.e. 0x634 for above example address */ 546 case 3: /* [43:32] i.e. 0x634 for above example address */
548 hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8)); 547 hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
549 break; 548 break;
550 default: 549 default:
551 /* Invalid mc_filter_type, what should we do? */ 550 /* Invalid mc_filter_type, what should we do? */
@@ -566,10 +565,10 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
566 *****************************************************************************/ 565 *****************************************************************************/
567static void 566static void
568ixgb_mta_set(struct ixgb_hw *hw, 567ixgb_mta_set(struct ixgb_hw *hw,
569 uint32_t hash_value) 568 u32 hash_value)
570{ 569{
571 uint32_t hash_bit, hash_reg; 570 u32 hash_bit, hash_reg;
572 uint32_t mta_reg; 571 u32 mta_reg;
573 572
574 /* The MTA is a register array of 128 32-bit registers. 573 /* The MTA is a register array of 128 32-bit registers.
575 * It is treated like an array of 4096 bits. We want to set 574 * It is treated like an array of 4096 bits. We want to set
@@ -600,23 +599,23 @@ ixgb_mta_set(struct ixgb_hw *hw,
600 *****************************************************************************/ 599 *****************************************************************************/
601void 600void
602ixgb_rar_set(struct ixgb_hw *hw, 601ixgb_rar_set(struct ixgb_hw *hw,
603 uint8_t *addr, 602 u8 *addr,
604 uint32_t index) 603 u32 index)
605{ 604{
606 uint32_t rar_low, rar_high; 605 u32 rar_low, rar_high;
607 606
608 DEBUGFUNC("ixgb_rar_set"); 607 DEBUGFUNC("ixgb_rar_set");
609 608
610 /* HW expects these in little endian so we reverse the byte order 609 /* HW expects these in little endian so we reverse the byte order
611 * from network order (big endian) to little endian 610 * from network order (big endian) to little endian
612 */ 611 */
613 rar_low = ((uint32_t) addr[0] | 612 rar_low = ((u32) addr[0] |
614 ((uint32_t)addr[1] << 8) | 613 ((u32)addr[1] << 8) |
615 ((uint32_t)addr[2] << 16) | 614 ((u32)addr[2] << 16) |
616 ((uint32_t)addr[3] << 24)); 615 ((u32)addr[3] << 24));
617 616
618 rar_high = ((uint32_t) addr[4] | 617 rar_high = ((u32) addr[4] |
619 ((uint32_t)addr[5] << 8) | 618 ((u32)addr[5] << 8) |
620 IXGB_RAH_AV); 619 IXGB_RAH_AV);
621 620
622 IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); 621 IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
@@ -633,8 +632,8 @@ ixgb_rar_set(struct ixgb_hw *hw,
633 *****************************************************************************/ 632 *****************************************************************************/
634void 633void
635ixgb_write_vfta(struct ixgb_hw *hw, 634ixgb_write_vfta(struct ixgb_hw *hw,
636 uint32_t offset, 635 u32 offset,
637 uint32_t value) 636 u32 value)
638{ 637{
639 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value); 638 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
640 return; 639 return;
@@ -648,7 +647,7 @@ ixgb_write_vfta(struct ixgb_hw *hw,
648static void 647static void
649ixgb_clear_vfta(struct ixgb_hw *hw) 648ixgb_clear_vfta(struct ixgb_hw *hw)
650{ 649{
651 uint32_t offset; 650 u32 offset;
652 651
653 for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++) 652 for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
654 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0); 653 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
@@ -661,12 +660,12 @@ ixgb_clear_vfta(struct ixgb_hw *hw)
661 * hw - Struct containing variables accessed by shared code 660 * hw - Struct containing variables accessed by shared code
662 *****************************************************************************/ 661 *****************************************************************************/
663 662
664static boolean_t 663static bool
665ixgb_setup_fc(struct ixgb_hw *hw) 664ixgb_setup_fc(struct ixgb_hw *hw)
666{ 665{
667 uint32_t ctrl_reg; 666 u32 ctrl_reg;
668 uint32_t pap_reg = 0; /* by default, assume no pause time */ 667 u32 pap_reg = 0; /* by default, assume no pause time */
669 boolean_t status = TRUE; 668 bool status = true;
670 669
671 DEBUGFUNC("ixgb_setup_fc"); 670 DEBUGFUNC("ixgb_setup_fc");
672 671
@@ -763,15 +762,15 @@ ixgb_setup_fc(struct ixgb_hw *hw)
763 * This requires that first an address cycle command is sent, followed by a 762 * This requires that first an address cycle command is sent, followed by a
764 * read command. 763 * read command.
765 *****************************************************************************/ 764 *****************************************************************************/
766static uint16_t 765static u16
767ixgb_read_phy_reg(struct ixgb_hw *hw, 766ixgb_read_phy_reg(struct ixgb_hw *hw,
768 uint32_t reg_address, 767 u32 reg_address,
769 uint32_t phy_address, 768 u32 phy_address,
770 uint32_t device_type) 769 u32 device_type)
771{ 770{
772 uint32_t i; 771 u32 i;
773 uint32_t data; 772 u32 data;
774 uint32_t command = 0; 773 u32 command = 0;
775 774
776 ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS); 775 ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
777 ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS); 776 ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
@@ -836,7 +835,7 @@ ixgb_read_phy_reg(struct ixgb_hw *hw,
836 */ 835 */
837 data = IXGB_READ_REG(hw, MSRWD); 836 data = IXGB_READ_REG(hw, MSRWD);
838 data >>= IXGB_MSRWD_READ_DATA_SHIFT; 837 data >>= IXGB_MSRWD_READ_DATA_SHIFT;
839 return((uint16_t) data); 838 return((u16) data);
840} 839}
841 840
842/****************************************************************************** 841/******************************************************************************
@@ -858,20 +857,20 @@ ixgb_read_phy_reg(struct ixgb_hw *hw,
858 *****************************************************************************/ 857 *****************************************************************************/
859static void 858static void
860ixgb_write_phy_reg(struct ixgb_hw *hw, 859ixgb_write_phy_reg(struct ixgb_hw *hw,
861 uint32_t reg_address, 860 u32 reg_address,
862 uint32_t phy_address, 861 u32 phy_address,
863 uint32_t device_type, 862 u32 device_type,
864 uint16_t data) 863 u16 data)
865{ 864{
866 uint32_t i; 865 u32 i;
867 uint32_t command = 0; 866 u32 command = 0;
868 867
869 ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS); 868 ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
870 ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS); 869 ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
871 ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE); 870 ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
872 871
873 /* Put the data in the MDIO Read/Write Data register */ 872 /* Put the data in the MDIO Read/Write Data register */
874 IXGB_WRITE_REG(hw, MSRWD, (uint32_t)data); 873 IXGB_WRITE_REG(hw, MSRWD, (u32)data);
875 874
876 /* Setup and write the address cycle command */ 875 /* Setup and write the address cycle command */
877 command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) | 876 command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
@@ -940,8 +939,8 @@ ixgb_write_phy_reg(struct ixgb_hw *hw,
940void 939void
941ixgb_check_for_link(struct ixgb_hw *hw) 940ixgb_check_for_link(struct ixgb_hw *hw)
942{ 941{
943 uint32_t status_reg; 942 u32 status_reg;
944 uint32_t xpcss_reg; 943 u32 xpcss_reg;
945 944
946 DEBUGFUNC("ixgb_check_for_link"); 945 DEBUGFUNC("ixgb_check_for_link");
947 946
@@ -950,7 +949,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
950 949
951 if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) && 950 if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
952 (status_reg & IXGB_STATUS_LU)) { 951 (status_reg & IXGB_STATUS_LU)) {
953 hw->link_up = TRUE; 952 hw->link_up = true;
954 } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) && 953 } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
955 (status_reg & IXGB_STATUS_LU)) { 954 (status_reg & IXGB_STATUS_LU)) {
956 DEBUGOUT("XPCSS Not Aligned while Status:LU is set.\n"); 955 DEBUGOUT("XPCSS Not Aligned while Status:LU is set.\n");
@@ -974,10 +973,10 @@ ixgb_check_for_link(struct ixgb_hw *hw)
974 * 973 *
975 * Called by any function that needs to check the link status of the adapter. 974 * Called by any function that needs to check the link status of the adapter.
976 *****************************************************************************/ 975 *****************************************************************************/
977boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw) 976bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
978{ 977{
979 uint32_t newLFC, newRFC; 978 u32 newLFC, newRFC;
980 boolean_t bad_link_returncode = FALSE; 979 bool bad_link_returncode = false;
981 980
982 if (hw->phy_type == ixgb_phy_type_txn17401) { 981 if (hw->phy_type == ixgb_phy_type_txn17401) {
983 newLFC = IXGB_READ_REG(hw, LFC); 982 newLFC = IXGB_READ_REG(hw, LFC);
@@ -986,7 +985,7 @@ boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw)
986 || (hw->lastRFC + 250 < newRFC)) { 985 || (hw->lastRFC + 250 < newRFC)) {
987 DEBUGOUT 986 DEBUGOUT
988 ("BAD LINK! too many LFC/RFC since last check\n"); 987 ("BAD LINK! too many LFC/RFC since last check\n");
989 bad_link_returncode = TRUE; 988 bad_link_returncode = true;
990 } 989 }
991 hw->lastLFC = newLFC; 990 hw->lastLFC = newLFC;
992 hw->lastRFC = newRFC; 991 hw->lastRFC = newRFC;
@@ -1003,7 +1002,7 @@ boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw)
1003static void 1002static void
1004ixgb_clear_hw_cntrs(struct ixgb_hw *hw) 1003ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
1005{ 1004{
1006 volatile uint32_t temp_reg; 1005 volatile u32 temp_reg;
1007 1006
1008 DEBUGFUNC("ixgb_clear_hw_cntrs"); 1007 DEBUGFUNC("ixgb_clear_hw_cntrs");
1009 1008
@@ -1084,7 +1083,7 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
1084void 1083void
1085ixgb_led_on(struct ixgb_hw *hw) 1084ixgb_led_on(struct ixgb_hw *hw)
1086{ 1085{
1087 uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0); 1086 u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
1088 1087
1089 /* To turn on the LED, clear software-definable pin 0 (SDP0). */ 1088 /* To turn on the LED, clear software-definable pin 0 (SDP0). */
1090 ctrl0_reg &= ~IXGB_CTRL0_SDP0; 1089 ctrl0_reg &= ~IXGB_CTRL0_SDP0;
@@ -1100,7 +1099,7 @@ ixgb_led_on(struct ixgb_hw *hw)
1100void 1099void
1101ixgb_led_off(struct ixgb_hw *hw) 1100ixgb_led_off(struct ixgb_hw *hw)
1102{ 1101{
1103 uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0); 1102 u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
1104 1103
1105 /* To turn off the LED, set software-definable pin 0 (SDP0). */ 1104 /* To turn off the LED, set software-definable pin 0 (SDP0). */
1106 ctrl0_reg |= IXGB_CTRL0_SDP0; 1105 ctrl0_reg |= IXGB_CTRL0_SDP0;
@@ -1116,7 +1115,7 @@ ixgb_led_off(struct ixgb_hw *hw)
1116static void 1115static void
1117ixgb_get_bus_info(struct ixgb_hw *hw) 1116ixgb_get_bus_info(struct ixgb_hw *hw)
1118{ 1117{
1119 uint32_t status_reg; 1118 u32 status_reg;
1120 1119
1121 status_reg = IXGB_READ_REG(hw, STATUS); 1120 status_reg = IXGB_READ_REG(hw, STATUS);
1122 1121
@@ -1155,21 +1154,21 @@ ixgb_get_bus_info(struct ixgb_hw *hw)
1155 * mac_addr - pointer to MAC address. 1154 * mac_addr - pointer to MAC address.
1156 * 1155 *
1157 *****************************************************************************/ 1156 *****************************************************************************/
1158static boolean_t 1157static bool
1159mac_addr_valid(uint8_t *mac_addr) 1158mac_addr_valid(u8 *mac_addr)
1160{ 1159{
1161 boolean_t is_valid = TRUE; 1160 bool is_valid = true;
1162 DEBUGFUNC("mac_addr_valid"); 1161 DEBUGFUNC("mac_addr_valid");
1163 1162
1164 /* Make sure it is not a multicast address */ 1163 /* Make sure it is not a multicast address */
1165 if (IS_MULTICAST(mac_addr)) { 1164 if (IS_MULTICAST(mac_addr)) {
1166 DEBUGOUT("MAC address is multicast\n"); 1165 DEBUGOUT("MAC address is multicast\n");
1167 is_valid = FALSE; 1166 is_valid = false;
1168 } 1167 }
1169 /* Not a broadcast address */ 1168 /* Not a broadcast address */
1170 else if (IS_BROADCAST(mac_addr)) { 1169 else if (IS_BROADCAST(mac_addr)) {
1171 DEBUGOUT("MAC address is broadcast\n"); 1170 DEBUGOUT("MAC address is broadcast\n");
1172 is_valid = FALSE; 1171 is_valid = false;
1173 } 1172 }
1174 /* Reject the zero address */ 1173 /* Reject the zero address */
1175 else if (mac_addr[0] == 0 && 1174 else if (mac_addr[0] == 0 &&
@@ -1179,7 +1178,7 @@ mac_addr_valid(uint8_t *mac_addr)
1179 mac_addr[4] == 0 && 1178 mac_addr[4] == 0 &&
1180 mac_addr[5] == 0) { 1179 mac_addr[5] == 0) {
1181 DEBUGOUT("MAC address is all zeros\n"); 1180 DEBUGOUT("MAC address is all zeros\n");
1182 is_valid = FALSE; 1181 is_valid = false;
1183 } 1182 }
1184 return (is_valid); 1183 return (is_valid);
1185} 1184}
@@ -1190,12 +1189,12 @@ mac_addr_valid(uint8_t *mac_addr)
1190 * 1189 *
1191 * hw - Struct containing variables accessed by shared code 1190 * hw - Struct containing variables accessed by shared code
1192 *****************************************************************************/ 1191 *****************************************************************************/
1193static boolean_t 1192static bool
1194ixgb_link_reset(struct ixgb_hw *hw) 1193ixgb_link_reset(struct ixgb_hw *hw)
1195{ 1194{
1196 boolean_t link_status = FALSE; 1195 bool link_status = false;
1197 uint8_t wait_retries = MAX_RESET_ITERATIONS; 1196 u8 wait_retries = MAX_RESET_ITERATIONS;
1198 uint8_t lrst_retries = MAX_RESET_ITERATIONS; 1197 u8 lrst_retries = MAX_RESET_ITERATIONS;
1199 1198
1200 do { 1199 do {
1201 /* Reset the link */ 1200 /* Reset the link */
@@ -1208,7 +1207,7 @@ ixgb_link_reset(struct ixgb_hw *hw)
1208 link_status = 1207 link_status =
1209 ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU) 1208 ((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU)
1210 && (IXGB_READ_REG(hw, XPCSS) & 1209 && (IXGB_READ_REG(hw, XPCSS) &
1211 IXGB_XPCSS_ALIGN_STATUS)) ? TRUE : FALSE; 1210 IXGB_XPCSS_ALIGN_STATUS)) ? true : false;
1212 } while (!link_status && --wait_retries); 1211 } while (!link_status && --wait_retries);
1213 1212
1214 } while (!link_status && --lrst_retries); 1213 } while (!link_status && --lrst_retries);
@@ -1225,7 +1224,7 @@ static void
1225ixgb_optics_reset(struct ixgb_hw *hw) 1224ixgb_optics_reset(struct ixgb_hw *hw)
1226{ 1225{
1227 if (hw->phy_type == ixgb_phy_type_txn17401) { 1226 if (hw->phy_type == ixgb_phy_type_txn17401) {
1228 uint16_t mdio_reg; 1227 u16 mdio_reg;
1229 1228
1230 ixgb_write_phy_reg(hw, 1229 ixgb_write_phy_reg(hw,
1231 MDIO_PMA_PMD_CR1, 1230 MDIO_PMA_PMD_CR1,
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index 4f176ff2b786..39cfa47bea69 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -538,8 +538,8 @@ struct ixgb_rx_desc {
538 __le64 buff_addr; 538 __le64 buff_addr;
539 __le16 length; 539 __le16 length;
540 __le16 reserved; 540 __le16 reserved;
541 uint8_t status; 541 u8 status;
542 uint8_t errors; 542 u8 errors;
543 __le16 special; 543 __le16 special;
544}; 544};
545 545
@@ -570,8 +570,8 @@ struct ixgb_rx_desc {
570struct ixgb_tx_desc { 570struct ixgb_tx_desc {
571 __le64 buff_addr; 571 __le64 buff_addr;
572 __le32 cmd_type_len; 572 __le32 cmd_type_len;
573 uint8_t status; 573 u8 status;
574 uint8_t popts; 574 u8 popts;
575 __le16 vlan; 575 __le16 vlan;
576}; 576};
577 577
@@ -595,15 +595,15 @@ struct ixgb_tx_desc {
595#define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */ 595#define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */
596 596
597struct ixgb_context_desc { 597struct ixgb_context_desc {
598 uint8_t ipcss; 598 u8 ipcss;
599 uint8_t ipcso; 599 u8 ipcso;
600 __le16 ipcse; 600 __le16 ipcse;
601 uint8_t tucss; 601 u8 tucss;
602 uint8_t tucso; 602 u8 tucso;
603 __le16 tucse; 603 __le16 tucse;
604 __le32 cmd_type_len; 604 __le32 cmd_type_len;
605 uint8_t status; 605 u8 status;
606 uint8_t hdr_len; 606 u8 hdr_len;
607 __le16 mss; 607 __le16 mss;
608}; 608};
609 609
@@ -637,33 +637,33 @@ struct ixgb_context_desc {
637 637
638/* This structure takes a 64k flash and maps it for identification commands */ 638/* This structure takes a 64k flash and maps it for identification commands */
639struct ixgb_flash_buffer { 639struct ixgb_flash_buffer {
640 uint8_t manufacturer_id; 640 u8 manufacturer_id;
641 uint8_t device_id; 641 u8 device_id;
642 uint8_t filler1[0x2AA8]; 642 u8 filler1[0x2AA8];
643 uint8_t cmd2; 643 u8 cmd2;
644 uint8_t filler2[0x2AAA]; 644 u8 filler2[0x2AAA];
645 uint8_t cmd1; 645 u8 cmd1;
646 uint8_t filler3[0xAAAA]; 646 u8 filler3[0xAAAA];
647}; 647};
648 648
649/* 649/*
650 * This is a little-endian specific check. 650 * This is a little-endian specific check.
651 */ 651 */
652#define IS_MULTICAST(Address) \ 652#define IS_MULTICAST(Address) \
653 (boolean_t)(((uint8_t *)(Address))[0] & ((uint8_t)0x01)) 653 (bool)(((u8 *)(Address))[0] & ((u8)0x01))
654 654
655/* 655/*
656 * Check whether an address is broadcast. 656 * Check whether an address is broadcast.
657 */ 657 */
658#define IS_BROADCAST(Address) \ 658#define IS_BROADCAST(Address) \
659 ((((uint8_t *)(Address))[0] == ((uint8_t)0xff)) && (((uint8_t *)(Address))[1] == ((uint8_t)0xff))) 659 ((((u8 *)(Address))[0] == ((u8)0xff)) && (((u8 *)(Address))[1] == ((u8)0xff)))
660 660
661/* Flow control parameters */ 661/* Flow control parameters */
662struct ixgb_fc { 662struct ixgb_fc {
663 uint32_t high_water; /* Flow Control High-water */ 663 u32 high_water; /* Flow Control High-water */
664 uint32_t low_water; /* Flow Control Low-water */ 664 u32 low_water; /* Flow Control Low-water */
665 uint16_t pause_time; /* Flow Control Pause timer */ 665 u16 pause_time; /* Flow Control Pause timer */
666 boolean_t send_xon; /* Flow control send XON */ 666 bool send_xon; /* Flow control send XON */
667 ixgb_fc_type type; /* Type of flow control */ 667 ixgb_fc_type type; /* Type of flow control */
668}; 668};
669 669
@@ -685,139 +685,139 @@ struct ixgb_bus {
685}; 685};
686 686
687struct ixgb_hw { 687struct ixgb_hw {
688 uint8_t __iomem *hw_addr;/* Base Address of the hardware */ 688 u8 __iomem *hw_addr;/* Base Address of the hardware */
689 void *back; /* Pointer to OS-dependent struct */ 689 void *back; /* Pointer to OS-dependent struct */
690 struct ixgb_fc fc; /* Flow control parameters */ 690 struct ixgb_fc fc; /* Flow control parameters */
691 struct ixgb_bus bus; /* Bus parameters */ 691 struct ixgb_bus bus; /* Bus parameters */
692 uint32_t phy_id; /* Phy Identifier */ 692 u32 phy_id; /* Phy Identifier */
693 uint32_t phy_addr; /* XGMII address of Phy */ 693 u32 phy_addr; /* XGMII address of Phy */
694 ixgb_mac_type mac_type; /* Identifier for MAC controller */ 694 ixgb_mac_type mac_type; /* Identifier for MAC controller */
695 ixgb_phy_type phy_type; /* Transceiver/phy identifier */ 695 ixgb_phy_type phy_type; /* Transceiver/phy identifier */
696 uint32_t max_frame_size; /* Maximum frame size supported */ 696 u32 max_frame_size; /* Maximum frame size supported */
697 uint32_t mc_filter_type; /* Multicast filter hash type */ 697 u32 mc_filter_type; /* Multicast filter hash type */
698 uint32_t num_mc_addrs; /* Number of current Multicast addrs */ 698 u32 num_mc_addrs; /* Number of current Multicast addrs */
699 uint8_t curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; /* Individual address currently programmed in MAC */ 699 u8 curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; /* Individual address currently programmed in MAC */
700 uint32_t num_tx_desc; /* Number of Transmit descriptors */ 700 u32 num_tx_desc; /* Number of Transmit descriptors */
701 uint32_t num_rx_desc; /* Number of Receive descriptors */ 701 u32 num_rx_desc; /* Number of Receive descriptors */
702 uint32_t rx_buffer_size; /* Size of Receive buffer */ 702 u32 rx_buffer_size; /* Size of Receive buffer */
703 boolean_t link_up; /* TRUE if link is valid */ 703 bool link_up; /* true if link is valid */
704 boolean_t adapter_stopped; /* State of adapter */ 704 bool adapter_stopped; /* State of adapter */
705 uint16_t device_id; /* device id from PCI configuration space */ 705 u16 device_id; /* device id from PCI configuration space */
706 uint16_t vendor_id; /* vendor id from PCI configuration space */ 706 u16 vendor_id; /* vendor id from PCI configuration space */
707 uint8_t revision_id; /* revision id from PCI configuration space */ 707 u8 revision_id; /* revision id from PCI configuration space */
708 uint16_t subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */ 708 u16 subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */
709 uint16_t subsystem_id; /* subsystem id from PCI configuration space */ 709 u16 subsystem_id; /* subsystem id from PCI configuration space */
710 uint32_t bar0; /* Base Address registers */ 710 u32 bar0; /* Base Address registers */
711 uint32_t bar1; 711 u32 bar1;
712 uint32_t bar2; 712 u32 bar2;
713 uint32_t bar3; 713 u32 bar3;
714 uint16_t pci_cmd_word; /* PCI command register id from PCI configuration space */ 714 u16 pci_cmd_word; /* PCI command register id from PCI configuration space */
715 __le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */ 715 __le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */
716 unsigned long io_base; /* Our I/O mapped location */ 716 unsigned long io_base; /* Our I/O mapped location */
717 uint32_t lastLFC; 717 u32 lastLFC;
718 uint32_t lastRFC; 718 u32 lastRFC;
719}; 719};
720 720
721/* Statistics reported by the hardware */ 721/* Statistics reported by the hardware */
722struct ixgb_hw_stats { 722struct ixgb_hw_stats {
723 uint64_t tprl; 723 u64 tprl;
724 uint64_t tprh; 724 u64 tprh;
725 uint64_t gprcl; 725 u64 gprcl;
726 uint64_t gprch; 726 u64 gprch;
727 uint64_t bprcl; 727 u64 bprcl;
728 uint64_t bprch; 728 u64 bprch;
729 uint64_t mprcl; 729 u64 mprcl;
730 uint64_t mprch; 730 u64 mprch;
731 uint64_t uprcl; 731 u64 uprcl;
732 uint64_t uprch; 732 u64 uprch;
733 uint64_t vprcl; 733 u64 vprcl;
734 uint64_t vprch; 734 u64 vprch;
735 uint64_t jprcl; 735 u64 jprcl;
736 uint64_t jprch; 736 u64 jprch;
737 uint64_t gorcl; 737 u64 gorcl;
738 uint64_t gorch; 738 u64 gorch;
739 uint64_t torl; 739 u64 torl;
740 uint64_t torh; 740 u64 torh;
741 uint64_t rnbc; 741 u64 rnbc;
742 uint64_t ruc; 742 u64 ruc;
743 uint64_t roc; 743 u64 roc;
744 uint64_t rlec; 744 u64 rlec;
745 uint64_t crcerrs; 745 u64 crcerrs;
746 uint64_t icbc; 746 u64 icbc;
747 uint64_t ecbc; 747 u64 ecbc;
748 uint64_t mpc; 748 u64 mpc;
749 uint64_t tptl; 749 u64 tptl;
750 uint64_t tpth; 750 u64 tpth;
751 uint64_t gptcl; 751 u64 gptcl;
752 uint64_t gptch; 752 u64 gptch;
753 uint64_t bptcl; 753 u64 bptcl;
754 uint64_t bptch; 754 u64 bptch;
755 uint64_t mptcl; 755 u64 mptcl;
756 uint64_t mptch; 756 u64 mptch;
757 uint64_t uptcl; 757 u64 uptcl;
758 uint64_t uptch; 758 u64 uptch;
759 uint64_t vptcl; 759 u64 vptcl;
760 uint64_t vptch; 760 u64 vptch;
761 uint64_t jptcl; 761 u64 jptcl;
762 uint64_t jptch; 762 u64 jptch;
763 uint64_t gotcl; 763 u64 gotcl;
764 uint64_t gotch; 764 u64 gotch;
765 uint64_t totl; 765 u64 totl;
766 uint64_t toth; 766 u64 toth;
767 uint64_t dc; 767 u64 dc;
768 uint64_t plt64c; 768 u64 plt64c;
769 uint64_t tsctc; 769 u64 tsctc;
770 uint64_t tsctfc; 770 u64 tsctfc;
771 uint64_t ibic; 771 u64 ibic;
772 uint64_t rfc; 772 u64 rfc;
773 uint64_t lfc; 773 u64 lfc;
774 uint64_t pfrc; 774 u64 pfrc;
775 uint64_t pftc; 775 u64 pftc;
776 uint64_t mcfrc; 776 u64 mcfrc;
777 uint64_t mcftc; 777 u64 mcftc;
778 uint64_t xonrxc; 778 u64 xonrxc;
779 uint64_t xontxc; 779 u64 xontxc;
780 uint64_t xoffrxc; 780 u64 xoffrxc;
781 uint64_t xofftxc; 781 u64 xofftxc;
782 uint64_t rjc; 782 u64 rjc;
783}; 783};
784 784
785/* Function Prototypes */ 785/* Function Prototypes */
786extern boolean_t ixgb_adapter_stop(struct ixgb_hw *hw); 786extern bool ixgb_adapter_stop(struct ixgb_hw *hw);
787extern boolean_t ixgb_init_hw(struct ixgb_hw *hw); 787extern bool ixgb_init_hw(struct ixgb_hw *hw);
788extern boolean_t ixgb_adapter_start(struct ixgb_hw *hw); 788extern bool ixgb_adapter_start(struct ixgb_hw *hw);
789extern void ixgb_check_for_link(struct ixgb_hw *hw); 789extern void ixgb_check_for_link(struct ixgb_hw *hw);
790extern boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw); 790extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
791 791
792extern void ixgb_rar_set(struct ixgb_hw *hw, 792extern void ixgb_rar_set(struct ixgb_hw *hw,
793 uint8_t *addr, 793 u8 *addr,
794 uint32_t index); 794 u32 index);
795 795
796 796
797/* Filters (multicast, vlan, receive) */ 797/* Filters (multicast, vlan, receive) */
798extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw, 798extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
799 uint8_t *mc_addr_list, 799 u8 *mc_addr_list,
800 uint32_t mc_addr_count, 800 u32 mc_addr_count,
801 uint32_t pad); 801 u32 pad);
802 802
803/* Vfta functions */ 803/* Vfta functions */
804extern void ixgb_write_vfta(struct ixgb_hw *hw, 804extern void ixgb_write_vfta(struct ixgb_hw *hw,
805 uint32_t offset, 805 u32 offset,
806 uint32_t value); 806 u32 value);
807 807
808/* Access functions to eeprom data */ 808/* Access functions to eeprom data */
809void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr); 809void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr);
810uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw); 810u32 ixgb_get_ee_pba_number(struct ixgb_hw *hw);
811uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw); 811u16 ixgb_get_ee_device_id(struct ixgb_hw *hw);
812boolean_t ixgb_get_eeprom_data(struct ixgb_hw *hw); 812bool ixgb_get_eeprom_data(struct ixgb_hw *hw);
813__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index); 813__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index);
814 814
815/* Everything else */ 815/* Everything else */
816void ixgb_led_on(struct ixgb_hw *hw); 816void ixgb_led_on(struct ixgb_hw *hw);
817void ixgb_led_off(struct ixgb_hw *hw); 817void ixgb_led_off(struct ixgb_hw *hw);
818void ixgb_write_pci_cfg(struct ixgb_hw *hw, 818void ixgb_write_pci_cfg(struct ixgb_hw *hw,
819 uint32_t reg, 819 u32 reg,
820 uint16_t * value); 820 u16 * value);
821 821
822 822
823#endif /* _IXGB_HW_H_ */ 823#endif /* _IXGB_HW_H_ */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 6738b4d097fe..cb8daddafa29 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -67,7 +67,7 @@ MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
67/* Local Function Prototypes */ 67/* Local Function Prototypes */
68 68
69int ixgb_up(struct ixgb_adapter *adapter); 69int ixgb_up(struct ixgb_adapter *adapter);
70void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog); 70void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
71void ixgb_reset(struct ixgb_adapter *adapter); 71void ixgb_reset(struct ixgb_adapter *adapter);
72int ixgb_setup_tx_resources(struct ixgb_adapter *adapter); 72int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
73int ixgb_setup_rx_resources(struct ixgb_adapter *adapter); 73int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
@@ -94,22 +94,22 @@ static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
94static int ixgb_change_mtu(struct net_device *netdev, int new_mtu); 94static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
95static int ixgb_set_mac(struct net_device *netdev, void *p); 95static int ixgb_set_mac(struct net_device *netdev, void *p);
96static irqreturn_t ixgb_intr(int irq, void *data); 96static irqreturn_t ixgb_intr(int irq, void *data);
97static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter); 97static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
98 98
99#ifdef CONFIG_IXGB_NAPI 99#ifdef CONFIG_IXGB_NAPI
100static int ixgb_clean(struct napi_struct *napi, int budget); 100static int ixgb_clean(struct napi_struct *napi, int budget);
101static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter, 101static bool ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
102 int *work_done, int work_to_do); 102 int *work_done, int work_to_do);
103#else 103#else
104static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter); 104static bool ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
105#endif 105#endif
106static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter); 106static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
107static void ixgb_tx_timeout(struct net_device *dev); 107static void ixgb_tx_timeout(struct net_device *dev);
108static void ixgb_tx_timeout_task(struct work_struct *work); 108static void ixgb_tx_timeout_task(struct work_struct *work);
109static void ixgb_vlan_rx_register(struct net_device *netdev, 109static void ixgb_vlan_rx_register(struct net_device *netdev,
110 struct vlan_group *grp); 110 struct vlan_group *grp);
111static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid); 111static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
112static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid); 112static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
113static void ixgb_restore_vlan(struct ixgb_adapter *adapter); 113static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
114 114
115#ifdef CONFIG_NET_POLL_CONTROLLER 115#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -197,7 +197,6 @@ module_exit(ixgb_exit_module);
197static void 197static void
198ixgb_irq_disable(struct ixgb_adapter *adapter) 198ixgb_irq_disable(struct ixgb_adapter *adapter)
199{ 199{
200 atomic_inc(&adapter->irq_sem);
201 IXGB_WRITE_REG(&adapter->hw, IMC, ~0); 200 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
202 IXGB_WRITE_FLUSH(&adapter->hw); 201 IXGB_WRITE_FLUSH(&adapter->hw);
203 synchronize_irq(adapter->pdev->irq); 202 synchronize_irq(adapter->pdev->irq);
@@ -211,14 +210,12 @@ ixgb_irq_disable(struct ixgb_adapter *adapter)
211static void 210static void
212ixgb_irq_enable(struct ixgb_adapter *adapter) 211ixgb_irq_enable(struct ixgb_adapter *adapter)
213{ 212{
214 if(atomic_dec_and_test(&adapter->irq_sem)) { 213 u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
215 u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | 214 IXGB_INT_TXDW | IXGB_INT_LSC;
216 IXGB_INT_TXDW | IXGB_INT_LSC; 215 if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
217 if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID) 216 val |= IXGB_INT_GPI0;
218 val |= IXGB_INT_GPI0; 217 IXGB_WRITE_REG(&adapter->hw, IMS, val);
219 IXGB_WRITE_REG(&adapter->hw, IMS, val); 218 IXGB_WRITE_FLUSH(&adapter->hw);
220 IXGB_WRITE_FLUSH(&adapter->hw);
221 }
222} 219}
223 220
224int 221int
@@ -274,7 +271,7 @@ ixgb_up(struct ixgb_adapter *adapter)
274 271
275 if(hw->max_frame_size > 272 if(hw->max_frame_size >
276 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) { 273 IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
277 uint32_t ctrl0 = IXGB_READ_REG(hw, CTRL0); 274 u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
278 275
279 if(!(ctrl0 & IXGB_CTRL0_JFE)) { 276 if(!(ctrl0 & IXGB_CTRL0_JFE)) {
280 ctrl0 |= IXGB_CTRL0_JFE; 277 ctrl0 |= IXGB_CTRL0_JFE;
@@ -283,26 +280,30 @@ ixgb_up(struct ixgb_adapter *adapter)
283 } 280 }
284 } 281 }
285 282
286 mod_timer(&adapter->watchdog_timer, jiffies); 283 clear_bit(__IXGB_DOWN, &adapter->flags);
287 284
288#ifdef CONFIG_IXGB_NAPI 285#ifdef CONFIG_IXGB_NAPI
289 napi_enable(&adapter->napi); 286 napi_enable(&adapter->napi);
290#endif 287#endif
291 ixgb_irq_enable(adapter); 288 ixgb_irq_enable(adapter);
292 289
290 mod_timer(&adapter->watchdog_timer, jiffies);
291
293 return 0; 292 return 0;
294} 293}
295 294
296void 295void
297ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog) 296ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
298{ 297{
299 struct net_device *netdev = adapter->netdev; 298 struct net_device *netdev = adapter->netdev;
300 299
300 /* prevent the interrupt handler from restarting watchdog */
301 set_bit(__IXGB_DOWN, &adapter->flags);
302
301#ifdef CONFIG_IXGB_NAPI 303#ifdef CONFIG_IXGB_NAPI
302 napi_disable(&adapter->napi); 304 napi_disable(&adapter->napi);
303 atomic_set(&adapter->irq_sem, 0);
304#endif 305#endif
305 306 /* waiting for NAPI to complete can re-enable interrupts */
306 ixgb_irq_disable(adapter); 307 ixgb_irq_disable(adapter);
307 free_irq(adapter->pdev->irq, netdev); 308 free_irq(adapter->pdev->irq, netdev);
308 309
@@ -589,9 +590,9 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
589 /* enable flow control to be programmed */ 590 /* enable flow control to be programmed */
590 hw->fc.send_xon = 1; 591 hw->fc.send_xon = 1;
591 592
592 atomic_set(&adapter->irq_sem, 1);
593 spin_lock_init(&adapter->tx_lock); 593 spin_lock_init(&adapter->tx_lock);
594 594
595 set_bit(__IXGB_DOWN, &adapter->flags);
595 return 0; 596 return 0;
596} 597}
597 598
@@ -656,7 +657,7 @@ ixgb_close(struct net_device *netdev)
656{ 657{
657 struct ixgb_adapter *adapter = netdev_priv(netdev); 658 struct ixgb_adapter *adapter = netdev_priv(netdev);
658 659
659 ixgb_down(adapter, TRUE); 660 ixgb_down(adapter, true);
660 661
661 ixgb_free_tx_resources(adapter); 662 ixgb_free_tx_resources(adapter);
662 ixgb_free_rx_resources(adapter); 663 ixgb_free_rx_resources(adapter);
@@ -717,9 +718,9 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
717static void 718static void
718ixgb_configure_tx(struct ixgb_adapter *adapter) 719ixgb_configure_tx(struct ixgb_adapter *adapter)
719{ 720{
720 uint64_t tdba = adapter->tx_ring.dma; 721 u64 tdba = adapter->tx_ring.dma;
721 uint32_t tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc); 722 u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
722 uint32_t tctl; 723 u32 tctl;
723 struct ixgb_hw *hw = &adapter->hw; 724 struct ixgb_hw *hw = &adapter->hw;
724 725
725 /* Setup the Base and Length of the Tx Descriptor Ring 726 /* Setup the Base and Length of the Tx Descriptor Ring
@@ -805,7 +806,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
805static void 806static void
806ixgb_setup_rctl(struct ixgb_adapter *adapter) 807ixgb_setup_rctl(struct ixgb_adapter *adapter)
807{ 808{
808 uint32_t rctl; 809 u32 rctl;
809 810
810 rctl = IXGB_READ_REG(&adapter->hw, RCTL); 811 rctl = IXGB_READ_REG(&adapter->hw, RCTL);
811 812
@@ -840,12 +841,12 @@ ixgb_setup_rctl(struct ixgb_adapter *adapter)
840static void 841static void
841ixgb_configure_rx(struct ixgb_adapter *adapter) 842ixgb_configure_rx(struct ixgb_adapter *adapter)
842{ 843{
843 uint64_t rdba = adapter->rx_ring.dma; 844 u64 rdba = adapter->rx_ring.dma;
844 uint32_t rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc); 845 u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
845 struct ixgb_hw *hw = &adapter->hw; 846 struct ixgb_hw *hw = &adapter->hw;
846 uint32_t rctl; 847 u32 rctl;
847 uint32_t rxcsum; 848 u32 rxcsum;
848 uint32_t rxdctl; 849 u32 rxdctl;
849 850
850 /* make sure receives are disabled while setting up the descriptors */ 851 /* make sure receives are disabled while setting up the descriptors */
851 852
@@ -881,7 +882,7 @@ ixgb_configure_rx(struct ixgb_adapter *adapter)
881 IXGB_WRITE_REG(hw, RXDCTL, rxdctl); 882 IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
882 883
883 /* Enable Receive Checksum Offload for TCP and UDP */ 884 /* Enable Receive Checksum Offload for TCP and UDP */
884 if(adapter->rx_csum == TRUE) { 885 if (adapter->rx_csum) {
885 rxcsum = IXGB_READ_REG(hw, RXCSUM); 886 rxcsum = IXGB_READ_REG(hw, RXCSUM);
886 rxcsum |= IXGB_RXCSUM_TUOFL; 887 rxcsum |= IXGB_RXCSUM_TUOFL;
887 IXGB_WRITE_REG(hw, RXCSUM, rxcsum); 888 IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
@@ -1078,7 +1079,7 @@ ixgb_set_multi(struct net_device *netdev)
1078 struct ixgb_adapter *adapter = netdev_priv(netdev); 1079 struct ixgb_adapter *adapter = netdev_priv(netdev);
1079 struct ixgb_hw *hw = &adapter->hw; 1080 struct ixgb_hw *hw = &adapter->hw;
1080 struct dev_mc_list *mc_ptr; 1081 struct dev_mc_list *mc_ptr;
1081 uint32_t rctl; 1082 u32 rctl;
1082 int i; 1083 int i;
1083 1084
1084 /* Check for Promiscuous and All Multicast modes */ 1085 /* Check for Promiscuous and All Multicast modes */
@@ -1098,7 +1099,7 @@ ixgb_set_multi(struct net_device *netdev)
1098 rctl |= IXGB_RCTL_MPE; 1099 rctl |= IXGB_RCTL_MPE;
1099 IXGB_WRITE_REG(hw, RCTL, rctl); 1100 IXGB_WRITE_REG(hw, RCTL, rctl);
1100 } else { 1101 } else {
1101 uint8_t mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES * 1102 u8 mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES *
1102 IXGB_ETH_LENGTH_OF_ADDRESS]; 1103 IXGB_ETH_LENGTH_OF_ADDRESS];
1103 1104
1104 IXGB_WRITE_REG(hw, RCTL, rctl); 1105 IXGB_WRITE_REG(hw, RCTL, rctl);
@@ -1164,7 +1165,7 @@ ixgb_watchdog(unsigned long data)
1164 } 1165 }
1165 1166
1166 /* Force detection of hung controller every watchdog period */ 1167 /* Force detection of hung controller every watchdog period */
1167 adapter->detect_tx_hung = TRUE; 1168 adapter->detect_tx_hung = true;
1168 1169
1169 /* generate an interrupt to force clean up of any stragglers */ 1170 /* generate an interrupt to force clean up of any stragglers */
1170 IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW); 1171 IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
@@ -1182,8 +1183,8 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1182{ 1183{
1183 struct ixgb_context_desc *context_desc; 1184 struct ixgb_context_desc *context_desc;
1184 unsigned int i; 1185 unsigned int i;
1185 uint8_t ipcss, ipcso, tucss, tucso, hdr_len; 1186 u8 ipcss, ipcso, tucss, tucso, hdr_len;
1186 uint16_t ipcse, tucse, mss; 1187 u16 ipcse, tucse, mss;
1187 int err; 1188 int err;
1188 1189
1189 if (likely(skb_is_gso(skb))) { 1190 if (likely(skb_is_gso(skb))) {
@@ -1243,12 +1244,12 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
1243 return 0; 1244 return 0;
1244} 1245}
1245 1246
1246static boolean_t 1247static bool
1247ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb) 1248ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1248{ 1249{
1249 struct ixgb_context_desc *context_desc; 1250 struct ixgb_context_desc *context_desc;
1250 unsigned int i; 1251 unsigned int i;
1251 uint8_t css, cso; 1252 u8 css, cso;
1252 1253
1253 if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 1254 if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1254 struct ixgb_buffer *buffer_info; 1255 struct ixgb_buffer *buffer_info;
@@ -1264,7 +1265,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1264 context_desc->tucso = cso; 1265 context_desc->tucso = cso;
1265 context_desc->tucse = 0; 1266 context_desc->tucse = 0;
1266 /* zero out any previously existing data in one instruction */ 1267 /* zero out any previously existing data in one instruction */
1267 *(uint32_t *)&(context_desc->ipcss) = 0; 1268 *(u32 *)&(context_desc->ipcss) = 0;
1268 context_desc->status = 0; 1269 context_desc->status = 0;
1269 context_desc->hdr_len = 0; 1270 context_desc->hdr_len = 0;
1270 context_desc->mss = 0; 1271 context_desc->mss = 0;
@@ -1275,10 +1276,10 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1275 if(++i == adapter->tx_ring.count) i = 0; 1276 if(++i == adapter->tx_ring.count) i = 0;
1276 adapter->tx_ring.next_to_use = i; 1277 adapter->tx_ring.next_to_use = i;
1277 1278
1278 return TRUE; 1279 return true;
1279 } 1280 }
1280 1281
1281 return FALSE; 1282 return false;
1282} 1283}
1283 1284
1284#define IXGB_MAX_TXD_PWR 14 1285#define IXGB_MAX_TXD_PWR 14
@@ -1371,9 +1372,9 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1371 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; 1372 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1372 struct ixgb_tx_desc *tx_desc = NULL; 1373 struct ixgb_tx_desc *tx_desc = NULL;
1373 struct ixgb_buffer *buffer_info; 1374 struct ixgb_buffer *buffer_info;
1374 uint32_t cmd_type_len = adapter->tx_cmd_type; 1375 u32 cmd_type_len = adapter->tx_cmd_type;
1375 uint8_t status = 0; 1376 u8 status = 0;
1376 uint8_t popts = 0; 1377 u8 popts = 0;
1377 unsigned int i; 1378 unsigned int i;
1378 1379
1379 if(tx_flags & IXGB_TX_FLAGS_TSO) { 1380 if(tx_flags & IXGB_TX_FLAGS_TSO) {
@@ -1464,14 +1465,18 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1464 int vlan_id = 0; 1465 int vlan_id = 0;
1465 int tso; 1466 int tso;
1466 1467
1468 if (test_bit(__IXGB_DOWN, &adapter->flags)) {
1469 dev_kfree_skb(skb);
1470 return NETDEV_TX_OK;
1471 }
1472
1467 if(skb->len <= 0) { 1473 if(skb->len <= 0) {
1468 dev_kfree_skb_any(skb); 1474 dev_kfree_skb_any(skb);
1469 return 0; 1475 return 0;
1470 } 1476 }
1471 1477
1472#ifdef NETIF_F_LLTX 1478#ifdef NETIF_F_LLTX
1473 local_irq_save(flags); 1479 if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
1474 if (!spin_trylock(&adapter->tx_lock)) {
1475 /* Collision - tell upper layer to requeue */ 1480 /* Collision - tell upper layer to requeue */
1476 local_irq_restore(flags); 1481 local_irq_restore(flags);
1477 return NETDEV_TX_LOCKED; 1482 return NETDEV_TX_LOCKED;
@@ -1548,7 +1553,7 @@ ixgb_tx_timeout_task(struct work_struct *work)
1548 container_of(work, struct ixgb_adapter, tx_timeout_task); 1553 container_of(work, struct ixgb_adapter, tx_timeout_task);
1549 1554
1550 adapter->tx_timeout_count++; 1555 adapter->tx_timeout_count++;
1551 ixgb_down(adapter, TRUE); 1556 ixgb_down(adapter, true);
1552 ixgb_up(adapter); 1557 ixgb_up(adapter);
1553} 1558}
1554 1559
@@ -1595,7 +1600,7 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
1595 netdev->mtu = new_mtu; 1600 netdev->mtu = new_mtu;
1596 1601
1597 if ((old_max_frame != max_frame) && netif_running(netdev)) { 1602 if ((old_max_frame != max_frame) && netif_running(netdev)) {
1598 ixgb_down(adapter, TRUE); 1603 ixgb_down(adapter, true);
1599 ixgb_up(adapter); 1604 ixgb_up(adapter);
1600 } 1605 }
1601 1606
@@ -1745,7 +1750,7 @@ ixgb_intr(int irq, void *data)
1745 struct net_device *netdev = data; 1750 struct net_device *netdev = data;
1746 struct ixgb_adapter *adapter = netdev_priv(netdev); 1751 struct ixgb_adapter *adapter = netdev_priv(netdev);
1747 struct ixgb_hw *hw = &adapter->hw; 1752 struct ixgb_hw *hw = &adapter->hw;
1748 uint32_t icr = IXGB_READ_REG(hw, ICR); 1753 u32 icr = IXGB_READ_REG(hw, ICR);
1749#ifndef CONFIG_IXGB_NAPI 1754#ifndef CONFIG_IXGB_NAPI
1750 unsigned int i; 1755 unsigned int i;
1751#endif 1756#endif
@@ -1753,9 +1758,9 @@ ixgb_intr(int irq, void *data)
1753 if(unlikely(!icr)) 1758 if(unlikely(!icr))
1754 return IRQ_NONE; /* Not our interrupt */ 1759 return IRQ_NONE; /* Not our interrupt */
1755 1760
1756 if(unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) { 1761 if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
1757 mod_timer(&adapter->watchdog_timer, jiffies); 1762 if (!test_bit(__IXGB_DOWN, &adapter->flags))
1758 } 1763 mod_timer(&adapter->watchdog_timer, jiffies);
1759 1764
1760#ifdef CONFIG_IXGB_NAPI 1765#ifdef CONFIG_IXGB_NAPI
1761 if (netif_rx_schedule_prep(netdev, &adapter->napi)) { 1766 if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
@@ -1764,7 +1769,6 @@ ixgb_intr(int irq, void *data)
1764 of the posted write is intentionally left out. 1769 of the posted write is intentionally left out.
1765 */ 1770 */
1766 1771
1767 atomic_inc(&adapter->irq_sem);
1768 IXGB_WRITE_REG(&adapter->hw, IMC, ~0); 1772 IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
1769 __netif_rx_schedule(netdev, &adapter->napi); 1773 __netif_rx_schedule(netdev, &adapter->napi);
1770 } 1774 }
@@ -1812,7 +1816,7 @@ ixgb_clean(struct napi_struct *napi, int budget)
1812 * @adapter: board private structure 1816 * @adapter: board private structure
1813 **/ 1817 **/
1814 1818
1815static boolean_t 1819static bool
1816ixgb_clean_tx_irq(struct ixgb_adapter *adapter) 1820ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1817{ 1821{
1818 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; 1822 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
@@ -1820,7 +1824,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1820 struct ixgb_tx_desc *tx_desc, *eop_desc; 1824 struct ixgb_tx_desc *tx_desc, *eop_desc;
1821 struct ixgb_buffer *buffer_info; 1825 struct ixgb_buffer *buffer_info;
1822 unsigned int i, eop; 1826 unsigned int i, eop;
1823 boolean_t cleaned = FALSE; 1827 bool cleaned = false;
1824 1828
1825 i = tx_ring->next_to_clean; 1829 i = tx_ring->next_to_clean;
1826 eop = tx_ring->buffer_info[i].next_to_watch; 1830 eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1828,7 +1832,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1828 1832
1829 while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) { 1833 while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
1830 1834
1831 for(cleaned = FALSE; !cleaned; ) { 1835 for (cleaned = false; !cleaned; ) {
1832 tx_desc = IXGB_TX_DESC(*tx_ring, i); 1836 tx_desc = IXGB_TX_DESC(*tx_ring, i);
1833 buffer_info = &tx_ring->buffer_info[i]; 1837 buffer_info = &tx_ring->buffer_info[i];
1834 1838
@@ -1839,7 +1843,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1839 1843
1840 ixgb_unmap_and_free_tx_resource(adapter, buffer_info); 1844 ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
1841 1845
1842 *(uint32_t *)&(tx_desc->status) = 0; 1846 *(u32 *)&(tx_desc->status) = 0;
1843 1847
1844 cleaned = (i == eop); 1848 cleaned = (i == eop);
1845 if(++i == tx_ring->count) i = 0; 1849 if(++i == tx_ring->count) i = 0;
@@ -1862,7 +1866,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1862 if(adapter->detect_tx_hung) { 1866 if(adapter->detect_tx_hung) {
1863 /* detect a transmit hang in hardware, this serializes the 1867 /* detect a transmit hang in hardware, this serializes the
1864 * check with the clearing of time_stamp and movement of i */ 1868 * check with the clearing of time_stamp and movement of i */
1865 adapter->detect_tx_hung = FALSE; 1869 adapter->detect_tx_hung = false;
1866 if (tx_ring->buffer_info[eop].dma && 1870 if (tx_ring->buffer_info[eop].dma &&
1867 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ) 1871 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1868 && !(IXGB_READ_REG(&adapter->hw, STATUS) & 1872 && !(IXGB_READ_REG(&adapter->hw, STATUS) &
@@ -1932,7 +1936,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
1932 * @adapter: board private structure 1936 * @adapter: board private structure
1933 **/ 1937 **/
1934 1938
1935static boolean_t 1939static bool
1936#ifdef CONFIG_IXGB_NAPI 1940#ifdef CONFIG_IXGB_NAPI
1937ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do) 1941ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1938#else 1942#else
@@ -1944,9 +1948,9 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1944 struct pci_dev *pdev = adapter->pdev; 1948 struct pci_dev *pdev = adapter->pdev;
1945 struct ixgb_rx_desc *rx_desc, *next_rxd; 1949 struct ixgb_rx_desc *rx_desc, *next_rxd;
1946 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer; 1950 struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
1947 uint32_t length; 1951 u32 length;
1948 unsigned int i, j; 1952 unsigned int i, j;
1949 boolean_t cleaned = FALSE; 1953 bool cleaned = false;
1950 1954
1951 i = rx_ring->next_to_clean; 1955 i = rx_ring->next_to_clean;
1952 rx_desc = IXGB_RX_DESC(*rx_ring, i); 1956 rx_desc = IXGB_RX_DESC(*rx_ring, i);
@@ -1980,7 +1984,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
1980 next_skb = next_buffer->skb; 1984 next_skb = next_buffer->skb;
1981 prefetch(next_skb); 1985 prefetch(next_skb);
1982 1986
1983 cleaned = TRUE; 1987 cleaned = true;
1984 1988
1985 pci_unmap_single(pdev, 1989 pci_unmap_single(pdev,
1986 buffer_info->dma, 1990 buffer_info->dma,
@@ -2162,7 +2166,7 @@ static void
2162ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 2166ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2163{ 2167{
2164 struct ixgb_adapter *adapter = netdev_priv(netdev); 2168 struct ixgb_adapter *adapter = netdev_priv(netdev);
2165 uint32_t ctrl, rctl; 2169 u32 ctrl, rctl;
2166 2170
2167 ixgb_irq_disable(adapter); 2171 ixgb_irq_disable(adapter);
2168 adapter->vlgrp = grp; 2172 adapter->vlgrp = grp;
@@ -2193,14 +2197,16 @@ ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2193 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl); 2197 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
2194 } 2198 }
2195 2199
2196 ixgb_irq_enable(adapter); 2200 /* don't enable interrupts unless we are UP */
2201 if (adapter->netdev->flags & IFF_UP)
2202 ixgb_irq_enable(adapter);
2197} 2203}
2198 2204
2199static void 2205static void
2200ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid) 2206ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
2201{ 2207{
2202 struct ixgb_adapter *adapter = netdev_priv(netdev); 2208 struct ixgb_adapter *adapter = netdev_priv(netdev);
2203 uint32_t vfta, index; 2209 u32 vfta, index;
2204 2210
2205 /* add VID to filter table */ 2211 /* add VID to filter table */
2206 2212
@@ -2211,18 +2217,20 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
2211} 2217}
2212 2218
2213static void 2219static void
2214ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) 2220ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2215{ 2221{
2216 struct ixgb_adapter *adapter = netdev_priv(netdev); 2222 struct ixgb_adapter *adapter = netdev_priv(netdev);
2217 uint32_t vfta, index; 2223 u32 vfta, index;
2218 2224
2219 ixgb_irq_disable(adapter); 2225 ixgb_irq_disable(adapter);
2220 2226
2221 vlan_group_set_device(adapter->vlgrp, vid, NULL); 2227 vlan_group_set_device(adapter->vlgrp, vid, NULL);
2222 2228
2223 ixgb_irq_enable(adapter); 2229 /* don't enable interrupts unless we are UP */
2230 if (adapter->netdev->flags & IFF_UP)
2231 ixgb_irq_enable(adapter);
2224 2232
2225 /* remove VID from filter table*/ 2233 /* remove VID from filter table */
2226 2234
2227 index = (vid >> 5) & 0x7F; 2235 index = (vid >> 5) & 0x7F;
2228 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index); 2236 vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
@@ -2236,7 +2244,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
2236 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp); 2244 ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
2237 2245
2238 if(adapter->vlgrp) { 2246 if(adapter->vlgrp) {
2239 uint16_t vid; 2247 u16 vid;
2240 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 2248 for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
2241 if(!vlan_group_get_device(adapter->vlgrp, vid)) 2249 if(!vlan_group_get_device(adapter->vlgrp, vid))
2242 continue; 2250 continue;
@@ -2277,7 +2285,7 @@ static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
2277 struct ixgb_adapter *adapter = netdev_priv(netdev); 2285 struct ixgb_adapter *adapter = netdev_priv(netdev);
2278 2286
2279 if(netif_running(netdev)) 2287 if(netif_running(netdev))
2280 ixgb_down(adapter, TRUE); 2288 ixgb_down(adapter, true);
2281 2289
2282 pci_disable_device(pdev); 2290 pci_disable_device(pdev);
2283 2291
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
index 9e04a6b3ae0d..4be1b273e1b8 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -39,13 +39,6 @@
39#include <linux/interrupt.h> 39#include <linux/interrupt.h>
40#include <linux/sched.h> 40#include <linux/sched.h>
41 41
42typedef enum {
43#undef FALSE
44 FALSE = 0,
45#undef TRUE
46 TRUE = 1
47} boolean_t;
48
49#undef ASSERT 42#undef ASSERT
50#define ASSERT(x) if(!(x)) BUG() 43#define ASSERT(x) if(!(x)) BUG()
51#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B) 44#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index d0bf206632ca..d98113472a89 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -36,6 +36,9 @@
36#include "ixgbe_type.h" 36#include "ixgbe_type.h"
37#include "ixgbe_common.h" 37#include "ixgbe_common.h"
38 38
39#ifdef CONFIG_DCA
40#include <linux/dca.h>
41#endif
39 42
40#define IXGBE_ERR(args...) printk(KERN_ERR "ixgbe: " args) 43#define IXGBE_ERR(args...) printk(KERN_ERR "ixgbe: " args)
41 44
@@ -120,7 +123,6 @@ struct ixgbe_queue_stats {
120}; 123};
121 124
122struct ixgbe_ring { 125struct ixgbe_ring {
123 struct ixgbe_adapter *adapter; /* backlink */
124 void *desc; /* descriptor ring memory */ 126 void *desc; /* descriptor ring memory */
125 dma_addr_t dma; /* phys. address of descriptor ring */ 127 dma_addr_t dma; /* phys. address of descriptor ring */
126 unsigned int size; /* length in bytes */ 128 unsigned int size; /* length in bytes */
@@ -128,6 +130,7 @@ struct ixgbe_ring {
128 unsigned int next_to_use; 130 unsigned int next_to_use;
129 unsigned int next_to_clean; 131 unsigned int next_to_clean;
130 132
133 int queue_index; /* needed for multiqueue queue management */
131 union { 134 union {
132 struct ixgbe_tx_buffer *tx_buffer_info; 135 struct ixgbe_tx_buffer *tx_buffer_info;
133 struct ixgbe_rx_buffer *rx_buffer_info; 136 struct ixgbe_rx_buffer *rx_buffer_info;
@@ -136,8 +139,21 @@ struct ixgbe_ring {
136 u16 head; 139 u16 head;
137 u16 tail; 140 u16 tail;
138 141
142 unsigned int total_bytes;
143 unsigned int total_packets;
139 144
145 u16 reg_idx; /* holds the special value that gets the hardware register
146 * offset associated with this ring, which is different
147 * for DCE and RSS modes */
148
149#ifdef CONFIG_DCA
150 /* cpu for tx queue */
151 int cpu;
152#endif
140 struct ixgbe_queue_stats stats; 153 struct ixgbe_queue_stats stats;
154 u8 v_idx; /* maps directly to the index for this ring in the hardware
155 * vector array, can also be used for finding the bit in EICR
156 * and friends that represents the vector for this ring */
141 157
142 u32 eims_value; 158 u32 eims_value;
143 u16 itr_register; 159 u16 itr_register;
@@ -146,6 +162,33 @@ struct ixgbe_ring {
146 u16 work_limit; /* max work per interrupt */ 162 u16 work_limit; /* max work per interrupt */
147}; 163};
148 164
165#define RING_F_VMDQ 1
166#define RING_F_RSS 2
167#define IXGBE_MAX_RSS_INDICES 16
168#define IXGBE_MAX_VMDQ_INDICES 16
169struct ixgbe_ring_feature {
170 int indices;
171 int mask;
172};
173
174#define MAX_RX_QUEUES 64
175#define MAX_TX_QUEUES 32
176
177/* MAX_MSIX_Q_VECTORS of these are allocated,
178 * but we only use one per queue-specific vector.
179 */
180struct ixgbe_q_vector {
181 struct ixgbe_adapter *adapter;
182 struct napi_struct napi;
183 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
184 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
185 u8 rxr_count; /* Rx ring count assigned to this vector */
186 u8 txr_count; /* Tx ring count assigned to this vector */
187 u8 tx_eitr;
188 u8 rx_eitr;
189 u32 eitr;
190};
191
149/* Helper macros to switch between ints/sec and what the register uses. 192/* Helper macros to switch between ints/sec and what the register uses.
150 * And yes, it's the same math going both ways. 193 * And yes, it's the same math going both ways.
151 */ 194 */
@@ -166,6 +209,14 @@ struct ixgbe_ring {
166 209
167#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128 210#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
168 211
212#define OTHER_VECTOR 1
213#define NON_Q_VECTORS (OTHER_VECTOR)
214
215#define MAX_MSIX_Q_VECTORS 16
216#define MIN_MSIX_Q_VECTORS 2
217#define MAX_MSIX_COUNT (MAX_MSIX_Q_VECTORS + NON_Q_VECTORS)
218#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
219
169/* board specific private data structure */ 220/* board specific private data structure */
170struct ixgbe_adapter { 221struct ixgbe_adapter {
171 struct timer_list watchdog_timer; 222 struct timer_list watchdog_timer;
@@ -173,10 +224,16 @@ struct ixgbe_adapter {
173 u16 bd_number; 224 u16 bd_number;
174 u16 rx_buf_len; 225 u16 rx_buf_len;
175 struct work_struct reset_task; 226 struct work_struct reset_task;
227 struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
228 char name[MAX_MSIX_COUNT][IFNAMSIZ + 5];
229
230 /* Interrupt Throttle Rate */
231 u32 itr_setting;
232 u16 eitr_low;
233 u16 eitr_high;
176 234
177 /* TX */ 235 /* TX */
178 struct ixgbe_ring *tx_ring; /* One per active queue */ 236 struct ixgbe_ring *tx_ring; /* One per active queue */
179 struct napi_struct napi;
180 u64 restart_queue; 237 u64 restart_queue;
181 u64 lsc_int; 238 u64 lsc_int;
182 u64 hw_tso_ctxt; 239 u64 hw_tso_ctxt;
@@ -192,22 +249,27 @@ struct ixgbe_adapter {
192 u64 non_eop_descs; 249 u64 non_eop_descs;
193 int num_tx_queues; 250 int num_tx_queues;
194 int num_rx_queues; 251 int num_rx_queues;
252 int num_msix_vectors;
253 struct ixgbe_ring_feature ring_feature[3];
195 struct msix_entry *msix_entries; 254 struct msix_entry *msix_entries;
196 255
197 u64 rx_hdr_split; 256 u64 rx_hdr_split;
198 u32 alloc_rx_page_failed; 257 u32 alloc_rx_page_failed;
199 u32 alloc_rx_buff_failed; 258 u32 alloc_rx_buff_failed;
200 259
260 /* Some features need tri-state capability,
261 * thus the additional *_CAPABLE flags.
262 */
201 u32 flags; 263 u32 flags;
202#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1) 264#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1 << 0)
203#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) 265#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
204#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 2) 266#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 2)
205#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3) 267#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
206#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4) 268#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
207 269#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
208 /* Interrupt Throttle Rate */ 270#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 6)
209 u32 rx_eitr; 271#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 7)
210 u32 tx_eitr; 272#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8)
211 273
212 /* OS defined structs */ 274 /* OS defined structs */
213 struct net_device *netdev; 275 struct net_device *netdev;
@@ -218,7 +280,10 @@ struct ixgbe_adapter {
218 struct ixgbe_hw hw; 280 struct ixgbe_hw hw;
219 u16 msg_enable; 281 u16 msg_enable;
220 struct ixgbe_hw_stats stats; 282 struct ixgbe_hw_stats stats;
221 char lsc_name[IFNAMSIZ + 5]; 283
284 /* Interrupt Throttle Rate */
285 u32 rx_eitr;
286 u32 tx_eitr;
222 287
223 unsigned long state; 288 unsigned long state;
224 u64 tx_busy; 289 u64 tx_busy;
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index a119cbd8dbb8..4e463778bcfd 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -246,13 +246,26 @@ static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
246 246
247static int ixgbe_set_tso(struct net_device *netdev, u32 data) 247static int ixgbe_set_tso(struct net_device *netdev, u32 data)
248{ 248{
249
250 if (data) { 249 if (data) {
251 netdev->features |= NETIF_F_TSO; 250 netdev->features |= NETIF_F_TSO;
252 netdev->features |= NETIF_F_TSO6; 251 netdev->features |= NETIF_F_TSO6;
253 } else { 252 } else {
253#ifdef CONFIG_NETDEVICES_MULTIQUEUE
254 struct ixgbe_adapter *adapter = netdev_priv(netdev);
255 int i;
256#endif
257 netif_stop_queue(netdev);
258#ifdef CONFIG_NETDEVICES_MULTIQUEUE
259 for (i = 0; i < adapter->num_tx_queues; i++)
260 netif_stop_subqueue(netdev, i);
261#endif
254 netdev->features &= ~NETIF_F_TSO; 262 netdev->features &= ~NETIF_F_TSO;
255 netdev->features &= ~NETIF_F_TSO6; 263 netdev->features &= ~NETIF_F_TSO6;
264#ifdef CONFIG_NETDEVICES_MULTIQUEUE
265 for (i = 0; i < adapter->num_tx_queues; i++)
266 netif_start_subqueue(netdev, i);
267#endif
268 netif_start_queue(netdev);
256 } 269 }
257 return 0; 270 return 0;
258} 271}
@@ -873,13 +886,13 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
873{ 886{
874 struct ixgbe_adapter *adapter = netdev_priv(netdev); 887 struct ixgbe_adapter *adapter = netdev_priv(netdev);
875 888
876 if (adapter->rx_eitr == 0) 889 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
877 ec->rx_coalesce_usecs = 0; 890 ec->rx_coalesce_usecs = adapter->rx_eitr;
878 else 891 else
879 ec->rx_coalesce_usecs = 1000000 / adapter->rx_eitr; 892 ec->rx_coalesce_usecs = 1000000 / adapter->rx_eitr;
880 893
881 if (adapter->tx_eitr == 0) 894 if (adapter->tx_eitr < IXGBE_MIN_ITR_USECS)
882 ec->tx_coalesce_usecs = 0; 895 ec->tx_coalesce_usecs = adapter->tx_eitr;
883 else 896 else
884 ec->tx_coalesce_usecs = 1000000 / adapter->tx_eitr; 897 ec->tx_coalesce_usecs = 1000000 / adapter->tx_eitr;
885 898
@@ -893,22 +906,26 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
893 struct ixgbe_adapter *adapter = netdev_priv(netdev); 906 struct ixgbe_adapter *adapter = netdev_priv(netdev);
894 907
895 if ((ec->rx_coalesce_usecs > IXGBE_MAX_ITR_USECS) || 908 if ((ec->rx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
896 ((ec->rx_coalesce_usecs > 0) && 909 ((ec->rx_coalesce_usecs != 0) &&
910 (ec->rx_coalesce_usecs != 1) &&
911 (ec->rx_coalesce_usecs != 3) &&
897 (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS))) 912 (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
898 return -EINVAL; 913 return -EINVAL;
899 if ((ec->tx_coalesce_usecs > IXGBE_MAX_ITR_USECS) || 914 if ((ec->tx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
900 ((ec->tx_coalesce_usecs > 0) && 915 ((ec->tx_coalesce_usecs != 0) &&
916 (ec->tx_coalesce_usecs != 1) &&
917 (ec->tx_coalesce_usecs != 3) &&
901 (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS))) 918 (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
902 return -EINVAL; 919 return -EINVAL;
903 920
904 /* convert to rate of irq's per second */ 921 /* convert to rate of irq's per second */
905 if (ec->rx_coalesce_usecs == 0) 922 if (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
906 adapter->rx_eitr = 0; 923 adapter->rx_eitr = ec->rx_coalesce_usecs;
907 else 924 else
908 adapter->rx_eitr = (1000000 / ec->rx_coalesce_usecs); 925 adapter->rx_eitr = (1000000 / ec->rx_coalesce_usecs);
909 926
910 if (ec->tx_coalesce_usecs == 0) 927 if (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
911 adapter->tx_eitr = 0; 928 adapter->tx_eitr = ec->rx_coalesce_usecs;
912 else 929 else
913 adapter->tx_eitr = (1000000 / ec->tx_coalesce_usecs); 930 adapter->tx_eitr = (1000000 / ec->tx_coalesce_usecs);
914 931
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index c2095ce531c9..cb371a8c24a7 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -48,7 +48,7 @@ char ixgbe_driver_name[] = "ixgbe";
48static const char ixgbe_driver_string[] = 48static const char ixgbe_driver_string[] =
49 "Intel(R) 10 Gigabit PCI Express Network Driver"; 49 "Intel(R) 10 Gigabit PCI Express Network Driver";
50 50
51#define DRV_VERSION "1.1.18" 51#define DRV_VERSION "1.3.18-k2"
52const char ixgbe_driver_version[] = DRV_VERSION; 52const char ixgbe_driver_version[] = DRV_VERSION;
53static const char ixgbe_copyright[] = 53static const char ixgbe_copyright[] =
54 "Copyright (c) 1999-2007 Intel Corporation."; 54 "Copyright (c) 1999-2007 Intel Corporation.";
@@ -80,6 +80,16 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
80}; 80};
81MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); 81MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl);
82 82
83#ifdef CONFIG_DCA
84static int ixgbe_notify_dca(struct notifier_block *, unsigned long event,
85 void *p);
86static struct notifier_block dca_notifier = {
87 .notifier_call = ixgbe_notify_dca,
88 .next = NULL,
89 .priority = 0
90};
91#endif
92
83MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>"); 93MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
84MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); 94MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
85MODULE_LICENSE("GPL"); 95MODULE_LICENSE("GPL");
@@ -256,26 +266,125 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_adapter *adapter,
256 * sees the new next_to_clean. 266 * sees the new next_to_clean.
257 */ 267 */
258 smp_mb(); 268 smp_mb();
269#ifdef CONFIG_NETDEVICES_MULTIQUEUE
270 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
271 !test_bit(__IXGBE_DOWN, &adapter->state)) {
272 netif_wake_subqueue(netdev, tx_ring->queue_index);
273 adapter->restart_queue++;
274 }
275#else
259 if (netif_queue_stopped(netdev) && 276 if (netif_queue_stopped(netdev) &&
260 !test_bit(__IXGBE_DOWN, &adapter->state)) { 277 !test_bit(__IXGBE_DOWN, &adapter->state)) {
261 netif_wake_queue(netdev); 278 netif_wake_queue(netdev);
262 adapter->restart_queue++; 279 adapter->restart_queue++;
263 } 280 }
281#endif
264 } 282 }
265 283
266 if (adapter->detect_tx_hung) 284 if (adapter->detect_tx_hung)
267 if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc)) 285 if (ixgbe_check_tx_hang(adapter, tx_ring, eop, eop_desc))
286#ifdef CONFIG_NETDEVICES_MULTIQUEUE
287 netif_stop_subqueue(netdev, tx_ring->queue_index);
288#else
268 netif_stop_queue(netdev); 289 netif_stop_queue(netdev);
290#endif
269 291
270 if (total_tx_packets >= tx_ring->work_limit) 292 if (total_tx_packets >= tx_ring->work_limit)
271 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value); 293 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, tx_ring->eims_value);
272 294
295 tx_ring->total_bytes += total_tx_bytes;
296 tx_ring->total_packets += total_tx_packets;
273 adapter->net_stats.tx_bytes += total_tx_bytes; 297 adapter->net_stats.tx_bytes += total_tx_bytes;
274 adapter->net_stats.tx_packets += total_tx_packets; 298 adapter->net_stats.tx_packets += total_tx_packets;
275 cleaned = total_tx_packets ? true : false; 299 cleaned = total_tx_packets ? true : false;
276 return cleaned; 300 return cleaned;
277} 301}
278 302
303#ifdef CONFIG_DCA
304static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
305 struct ixgbe_ring *rxr)
306{
307 u32 rxctrl;
308 int cpu = get_cpu();
309 int q = rxr - adapter->rx_ring;
310
311 if (rxr->cpu != cpu) {
312 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
313 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
314 rxctrl |= dca_get_tag(cpu);
315 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
316 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
317 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
318 rxr->cpu = cpu;
319 }
320 put_cpu();
321}
322
323static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
324 struct ixgbe_ring *txr)
325{
326 u32 txctrl;
327 int cpu = get_cpu();
328 int q = txr - adapter->tx_ring;
329
330 if (txr->cpu != cpu) {
331 txctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q));
332 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
333 txctrl |= dca_get_tag(cpu);
334 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
335 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_TXCTRL(q), txctrl);
336 txr->cpu = cpu;
337 }
338 put_cpu();
339}
340
341static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
342{
343 int i;
344
345 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
346 return;
347
348 for (i = 0; i < adapter->num_tx_queues; i++) {
349 adapter->tx_ring[i].cpu = -1;
350 ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
351 }
352 for (i = 0; i < adapter->num_rx_queues; i++) {
353 adapter->rx_ring[i].cpu = -1;
354 ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
355 }
356}
357
358static int __ixgbe_notify_dca(struct device *dev, void *data)
359{
360 struct net_device *netdev = dev_get_drvdata(dev);
361 struct ixgbe_adapter *adapter = netdev_priv(netdev);
362 unsigned long event = *(unsigned long *)data;
363
364 switch (event) {
365 case DCA_PROVIDER_ADD:
366 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
367 /* Always use CB2 mode, difference is masked
368 * in the CB driver. */
369 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
370 if (dca_add_requester(dev) == 0) {
371 ixgbe_setup_dca(adapter);
372 break;
373 }
374 /* Fall Through since DCA is disabled. */
375 case DCA_PROVIDER_REMOVE:
376 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
377 dca_remove_requester(dev);
378 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
379 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
380 }
381 break;
382 }
383
384 return 0;
385}
386
387#endif /* CONFIG_DCA */
279/** 388/**
280 * ixgbe_receive_skb - Send a completed packet up the stack 389 * ixgbe_receive_skb - Send a completed packet up the stack
281 * @adapter: board private structure 390 * @adapter: board private structure
@@ -556,10 +665,15 @@ next_desc:
556 adapter->net_stats.rx_bytes += total_rx_bytes; 665 adapter->net_stats.rx_bytes += total_rx_bytes;
557 adapter->net_stats.rx_packets += total_rx_packets; 666 adapter->net_stats.rx_packets += total_rx_packets;
558 667
668 rx_ring->total_packets += total_rx_packets;
669 rx_ring->total_bytes += total_rx_bytes;
670 adapter->net_stats.rx_bytes += total_rx_bytes;
671 adapter->net_stats.rx_packets += total_rx_packets;
672
559 return cleaned; 673 return cleaned;
560} 674}
561 675
562#define IXGBE_MAX_INTR 10 676static int ixgbe_clean_rxonly(struct napi_struct *, int);
563/** 677/**
564 * ixgbe_configure_msix - Configure MSI-X hardware 678 * ixgbe_configure_msix - Configure MSI-X hardware
565 * @adapter: board private structure 679 * @adapter: board private structure
@@ -569,28 +683,195 @@ next_desc:
569 **/ 683 **/
570static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) 684static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
571{ 685{
572 int i, vector = 0; 686 struct ixgbe_q_vector *q_vector;
687 int i, j, q_vectors, v_idx, r_idx;
688 u32 mask;
573 689
574 for (i = 0; i < adapter->num_tx_queues; i++) { 690 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
575 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i),
576 IXGBE_MSIX_VECTOR(vector));
577 writel(EITR_INTS_PER_SEC_TO_REG(adapter->tx_eitr),
578 adapter->hw.hw_addr + adapter->tx_ring[i].itr_register);
579 vector++;
580 }
581 691
582 for (i = 0; i < adapter->num_rx_queues; i++) { 692 /* Populate the IVAR table and set the ITR values to the
583 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(i), 693 * corresponding register.
584 IXGBE_MSIX_VECTOR(vector)); 694 */
585 writel(EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr), 695 for (v_idx = 0; v_idx < q_vectors; v_idx++) {
586 adapter->hw.hw_addr + adapter->rx_ring[i].itr_register); 696 q_vector = &adapter->q_vector[v_idx];
587 vector++; 697 /* XXX for_each_bit(...) */
698 r_idx = find_first_bit(q_vector->rxr_idx,
699 adapter->num_rx_queues);
700
701 for (i = 0; i < q_vector->rxr_count; i++) {
702 j = adapter->rx_ring[r_idx].reg_idx;
703 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx);
704 r_idx = find_next_bit(q_vector->rxr_idx,
705 adapter->num_rx_queues,
706 r_idx + 1);
707 }
708 r_idx = find_first_bit(q_vector->txr_idx,
709 adapter->num_tx_queues);
710
711 for (i = 0; i < q_vector->txr_count; i++) {
712 j = adapter->tx_ring[r_idx].reg_idx;
713 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx);
714 r_idx = find_next_bit(q_vector->txr_idx,
715 adapter->num_tx_queues,
716 r_idx + 1);
717 }
718
719 /* if this is a tx only vector use half the irq (tx) rate */
720 if (q_vector->txr_count && !q_vector->rxr_count)
721 q_vector->eitr = adapter->tx_eitr;
722 else
723 /* rx only or mixed */
724 q_vector->eitr = adapter->rx_eitr;
725
726 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx),
727 EITR_INTS_PER_SEC_TO_REG(q_vector->eitr));
588 } 728 }
589 729
590 vector = adapter->num_tx_queues + adapter->num_rx_queues; 730 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx);
591 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, 731 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
592 IXGBE_MSIX_VECTOR(vector)); 732
593 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(vector), 1950); 733 /* set up to autoclear timer, lsc, and the vectors */
734 mask = IXGBE_EIMS_ENABLE_MASK;
735 mask &= ~IXGBE_EIMS_OTHER;
736 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
737}
738
739enum latency_range {
740 lowest_latency = 0,
741 low_latency = 1,
742 bulk_latency = 2,
743 latency_invalid = 255
744};
745
746/**
747 * ixgbe_update_itr - update the dynamic ITR value based on statistics
748 * @adapter: pointer to adapter
749 * @eitr: eitr setting (ints per sec) to give last timeslice
750 * @itr_setting: current throttle rate in ints/second
751 * @packets: the number of packets during this measurement interval
752 * @bytes: the number of bytes during this measurement interval
753 *
754 * Stores a new ITR value based on packets and byte
755 * counts during the last interrupt. The advantage of per interrupt
756 * computation is faster updates and more accurate ITR for the current
757 * traffic pattern. Constants in this function were computed
758 * based on theoretical maximum wire speed and thresholds were set based
759 * on testing data as well as attempting to minimize response time
760 * while increasing bulk throughput.
761 * this functionality is controlled by the InterruptThrottleRate module
762 * parameter (see ixgbe_param.c)
763 **/
764static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter,
765 u32 eitr, u8 itr_setting,
766 int packets, int bytes)
767{
768 unsigned int retval = itr_setting;
769 u32 timepassed_us;
770 u64 bytes_perint;
771
772 if (packets == 0)
773 goto update_itr_done;
774
775
776 /* simple throttlerate management
777 * 0-20MB/s lowest (100000 ints/s)
778 * 20-100MB/s low (20000 ints/s)
779 * 100-1249MB/s bulk (8000 ints/s)
780 */
781 /* what was last interrupt timeslice? */
782 timepassed_us = 1000000/eitr;
783 bytes_perint = bytes / timepassed_us; /* bytes/usec */
784
785 switch (itr_setting) {
786 case lowest_latency:
787 if (bytes_perint > adapter->eitr_low)
788 retval = low_latency;
789 break;
790 case low_latency:
791 if (bytes_perint > adapter->eitr_high)
792 retval = bulk_latency;
793 else if (bytes_perint <= adapter->eitr_low)
794 retval = lowest_latency;
795 break;
796 case bulk_latency:
797 if (bytes_perint <= adapter->eitr_high)
798 retval = low_latency;
799 break;
800 }
801
802update_itr_done:
803 return retval;
804}
805
806static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
807{
808 struct ixgbe_adapter *adapter = q_vector->adapter;
809 struct ixgbe_hw *hw = &adapter->hw;
810 u32 new_itr;
811 u8 current_itr, ret_itr;
812 int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) /
813 sizeof(struct ixgbe_q_vector);
814 struct ixgbe_ring *rx_ring, *tx_ring;
815
816 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
817 for (i = 0; i < q_vector->txr_count; i++) {
818 tx_ring = &(adapter->tx_ring[r_idx]);
819 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
820 q_vector->tx_eitr,
821 tx_ring->total_packets,
822 tx_ring->total_bytes);
823 /* if the result for this queue would decrease interrupt
824 * rate for this vector then use that result */
825 q_vector->tx_eitr = ((q_vector->tx_eitr > ret_itr) ?
826 q_vector->tx_eitr - 1 : ret_itr);
827 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
828 r_idx + 1);
829 }
830
831 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
832 for (i = 0; i < q_vector->rxr_count; i++) {
833 rx_ring = &(adapter->rx_ring[r_idx]);
834 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
835 q_vector->rx_eitr,
836 rx_ring->total_packets,
837 rx_ring->total_bytes);
838 /* if the result for this queue would decrease interrupt
839 * rate for this vector then use that result */
840 q_vector->rx_eitr = ((q_vector->rx_eitr > ret_itr) ?
841 q_vector->rx_eitr - 1 : ret_itr);
842 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
843 r_idx + 1);
844 }
845
846 current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
847
848 switch (current_itr) {
849 /* counts and packets in update_itr are dependent on these numbers */
850 case lowest_latency:
851 new_itr = 100000;
852 break;
853 case low_latency:
854 new_itr = 20000; /* aka hwitr = ~200 */
855 break;
856 case bulk_latency:
857 default:
858 new_itr = 8000;
859 break;
860 }
861
862 if (new_itr != q_vector->eitr) {
863 u32 itr_reg;
864 /* do an exponential smoothing */
865 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
866 q_vector->eitr = new_itr;
867 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
868 /* must write high and low 16 bits to reset counter */
869 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx,
870 itr_reg);
871 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16);
872 }
873
874 return;
594} 875}
595 876
596static irqreturn_t ixgbe_msix_lsc(int irq, void *data) 877static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
@@ -614,153 +895,302 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
614 895
615static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data) 896static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
616{ 897{
617 struct ixgbe_ring *txr = data; 898 struct ixgbe_q_vector *q_vector = data;
618 struct ixgbe_adapter *adapter = txr->adapter; 899 struct ixgbe_adapter *adapter = q_vector->adapter;
900 struct ixgbe_ring *txr;
901 int i, r_idx;
619 902
620 ixgbe_clean_tx_irq(adapter, txr); 903 if (!q_vector->txr_count)
904 return IRQ_HANDLED;
905
906 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
907 for (i = 0; i < q_vector->txr_count; i++) {
908 txr = &(adapter->tx_ring[r_idx]);
909#ifdef CONFIG_DCA
910 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
911 ixgbe_update_tx_dca(adapter, txr);
912#endif
913 txr->total_bytes = 0;
914 txr->total_packets = 0;
915 ixgbe_clean_tx_irq(adapter, txr);
916 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
917 r_idx + 1);
918 }
621 919
622 return IRQ_HANDLED; 920 return IRQ_HANDLED;
623} 921}
624 922
923/**
924 * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
925 * @irq: unused
926 * @data: pointer to our q_vector struct for this interrupt vector
927 **/
625static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data) 928static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
626{ 929{
627 struct ixgbe_ring *rxr = data; 930 struct ixgbe_q_vector *q_vector = data;
628 struct ixgbe_adapter *adapter = rxr->adapter; 931 struct ixgbe_adapter *adapter = q_vector->adapter;
932 struct ixgbe_ring *rxr;
933 int r_idx;
934
935 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
936 if (!q_vector->rxr_count)
937 return IRQ_HANDLED;
938
939 rxr = &(adapter->rx_ring[r_idx]);
940 /* disable interrupts on this vector only */
941 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->v_idx);
942 rxr->total_bytes = 0;
943 rxr->total_packets = 0;
944 netif_rx_schedule(adapter->netdev, &q_vector->napi);
945
946 return IRQ_HANDLED;
947}
948
949static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
950{
951 ixgbe_msix_clean_rx(irq, data);
952 ixgbe_msix_clean_tx(irq, data);
629 953
630 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rxr->eims_value);
631 netif_rx_schedule(adapter->netdev, &adapter->napi);
632 return IRQ_HANDLED; 954 return IRQ_HANDLED;
633} 955}
634 956
957/**
958 * ixgbe_clean_rxonly - msix (aka one shot) rx clean routine
959 * @napi: napi struct with our devices info in it
960 * @budget: amount of work driver is allowed to do this pass, in packets
961 *
962 **/
635static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) 963static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
636{ 964{
637 struct ixgbe_adapter *adapter = container_of(napi, 965 struct ixgbe_q_vector *q_vector =
638 struct ixgbe_adapter, napi); 966 container_of(napi, struct ixgbe_q_vector, napi);
639 struct net_device *netdev = adapter->netdev; 967 struct ixgbe_adapter *adapter = q_vector->adapter;
968 struct ixgbe_ring *rxr;
640 int work_done = 0; 969 int work_done = 0;
641 struct ixgbe_ring *rxr = adapter->rx_ring; 970 long r_idx;
642 971
643 /* Keep link state information with original netdev */ 972 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
644 if (!netif_carrier_ok(netdev)) 973 rxr = &(adapter->rx_ring[r_idx]);
645 goto quit_polling; 974#ifdef CONFIG_DCA
975 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
976 ixgbe_update_rx_dca(adapter, rxr);
977#endif
646 978
647 ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget); 979 ixgbe_clean_rx_irq(adapter, rxr, &work_done, budget);
648 980
649 /* If no Tx and not enough Rx work done, exit the polling mode */ 981 /* If all Rx work done, exit the polling mode */
650 if ((work_done < budget) || !netif_running(netdev)) { 982 if (work_done < budget) {
651quit_polling: 983 netif_rx_complete(adapter->netdev, napi);
652 netif_rx_complete(netdev, napi); 984 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
985 ixgbe_set_itr_msix(q_vector);
653 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 986 if (!test_bit(__IXGBE_DOWN, &adapter->state))
654 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, 987 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rxr->v_idx);
655 rxr->eims_value);
656 } 988 }
657 989
658 return work_done; 990 return work_done;
659} 991}
660 992
993static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
994 int r_idx)
995{
996 a->q_vector[v_idx].adapter = a;
997 set_bit(r_idx, a->q_vector[v_idx].rxr_idx);
998 a->q_vector[v_idx].rxr_count++;
999 a->rx_ring[r_idx].v_idx = 1 << v_idx;
1000}
1001
1002static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
1003 int r_idx)
1004{
1005 a->q_vector[v_idx].adapter = a;
1006 set_bit(r_idx, a->q_vector[v_idx].txr_idx);
1007 a->q_vector[v_idx].txr_count++;
1008 a->tx_ring[r_idx].v_idx = 1 << v_idx;
1009}
1010
661/** 1011/**
662 * ixgbe_setup_msix - Initialize MSI-X interrupts 1012 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
1013 * @adapter: board private structure to initialize
1014 * @vectors: allotted vector count for descriptor rings
663 * 1015 *
664 * ixgbe_setup_msix allocates MSI-X vectors and requests 1016 * This function maps descriptor rings to the queue-specific vectors
665 * interrutps from the kernel. 1017 * we were allotted through the MSI-X enabling code. Ideally, we'd have
1018 * one vector per ring/queue, but on a constrained vector budget, we
1019 * group the rings as "efficiently" as possible. You would add new
1020 * mapping configurations in here.
666 **/ 1021 **/
667static int ixgbe_setup_msix(struct ixgbe_adapter *adapter) 1022static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
668{ 1023 int vectors)
669 struct net_device *netdev = adapter->netdev; 1024{
670 int i, int_vector = 0, err = 0; 1025 int v_start = 0;
671 int max_msix_count; 1026 int rxr_idx = 0, txr_idx = 0;
1027 int rxr_remaining = adapter->num_rx_queues;
1028 int txr_remaining = adapter->num_tx_queues;
1029 int i, j;
1030 int rqpv, tqpv;
1031 int err = 0;
1032
1033 /* No mapping required if MSI-X is disabled. */
1034 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1035 goto out;
672 1036
673 /* +1 for the LSC interrupt */ 1037 /*
674 max_msix_count = adapter->num_rx_queues + adapter->num_tx_queues + 1; 1038 * The ideal configuration...
675 adapter->msix_entries = kcalloc(max_msix_count, 1039 * We have enough vectors to map one per queue.
676 sizeof(struct msix_entry), GFP_KERNEL); 1040 */
677 if (!adapter->msix_entries) 1041 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
678 return -ENOMEM; 1042 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
1043 map_vector_to_rxq(adapter, v_start, rxr_idx);
679 1044
680 for (i = 0; i < max_msix_count; i++) 1045 for (; txr_idx < txr_remaining; v_start++, txr_idx++)
681 adapter->msix_entries[i].entry = i; 1046 map_vector_to_txq(adapter, v_start, txr_idx);
682 1047
683 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
684 max_msix_count);
685 if (err)
686 goto out; 1048 goto out;
1049 }
687 1050
688 for (i = 0; i < adapter->num_tx_queues; i++) { 1051 /*
689 sprintf(adapter->tx_ring[i].name, "%s-tx%d", netdev->name, i); 1052 * If we don't have enough vectors for a 1-to-1
690 err = request_irq(adapter->msix_entries[int_vector].vector, 1053 * mapping, we'll have to group them so there are
691 &ixgbe_msix_clean_tx, 1054 * multiple queues per vector.
692 0, 1055 */
693 adapter->tx_ring[i].name, 1056 /* Re-adjusting *qpv takes care of the remainder. */
694 &(adapter->tx_ring[i])); 1057 for (i = v_start; i < vectors; i++) {
695 if (err) { 1058 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
696 DPRINTK(PROBE, ERR, 1059 for (j = 0; j < rqpv; j++) {
697 "request_irq failed for MSIX interrupt " 1060 map_vector_to_rxq(adapter, i, rxr_idx);
698 "Error: %d\n", err); 1061 rxr_idx++;
699 goto release_irqs; 1062 rxr_remaining--;
1063 }
1064 }
1065 for (i = v_start; i < vectors; i++) {
1066 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
1067 for (j = 0; j < tqpv; j++) {
1068 map_vector_to_txq(adapter, i, txr_idx);
1069 txr_idx++;
1070 txr_remaining--;
700 } 1071 }
701 adapter->tx_ring[i].eims_value =
702 (1 << IXGBE_MSIX_VECTOR(int_vector));
703 adapter->tx_ring[i].itr_register = IXGBE_EITR(int_vector);
704 int_vector++;
705 } 1072 }
706 1073
707 for (i = 0; i < adapter->num_rx_queues; i++) { 1074out:
708 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 1075 return err;
709 sprintf(adapter->rx_ring[i].name, 1076}
710 "%s-rx%d", netdev->name, i); 1077
711 else 1078/**
712 memcpy(adapter->rx_ring[i].name, 1079 * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
713 netdev->name, IFNAMSIZ); 1080 * @adapter: board private structure
714 err = request_irq(adapter->msix_entries[int_vector].vector, 1081 *
715 &ixgbe_msix_clean_rx, 0, 1082 * ixgbe_request_msix_irqs allocates MSI-X vectors and requests
716 adapter->rx_ring[i].name, 1083 * interrupts from the kernel.
717 &(adapter->rx_ring[i])); 1084 **/
1085static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
1086{
1087 struct net_device *netdev = adapter->netdev;
1088 irqreturn_t (*handler)(int, void *);
1089 int i, vector, q_vectors, err;
1090
1091 /* Decrement for Other and TCP Timer vectors */
1092 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1093
1094 /* Map the Tx/Rx rings to the vectors we were allotted. */
1095 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
1096 if (err)
1097 goto out;
1098
1099#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
1100 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
1101 &ixgbe_msix_clean_many)
1102 for (vector = 0; vector < q_vectors; vector++) {
1103 handler = SET_HANDLER(&adapter->q_vector[vector]);
1104 sprintf(adapter->name[vector], "%s:v%d-%s",
1105 netdev->name, vector,
1106 (handler == &ixgbe_msix_clean_rx) ? "Rx" :
1107 ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx"));
1108 err = request_irq(adapter->msix_entries[vector].vector,
1109 handler, 0, adapter->name[vector],
1110 &(adapter->q_vector[vector]));
718 if (err) { 1111 if (err) {
719 DPRINTK(PROBE, ERR, 1112 DPRINTK(PROBE, ERR,
720 "request_irq failed for MSIX interrupt " 1113 "request_irq failed for MSIX interrupt "
721 "Error: %d\n", err); 1114 "Error: %d\n", err);
722 goto release_irqs; 1115 goto free_queue_irqs;
723 } 1116 }
724
725 adapter->rx_ring[i].eims_value =
726 (1 << IXGBE_MSIX_VECTOR(int_vector));
727 adapter->rx_ring[i].itr_register = IXGBE_EITR(int_vector);
728 int_vector++;
729 } 1117 }
730 1118
731 sprintf(adapter->lsc_name, "%s-lsc", netdev->name); 1119 sprintf(adapter->name[vector], "%s:lsc", netdev->name);
732 err = request_irq(adapter->msix_entries[int_vector].vector, 1120 err = request_irq(adapter->msix_entries[vector].vector,
733 &ixgbe_msix_lsc, 0, adapter->lsc_name, netdev); 1121 &ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
734 if (err) { 1122 if (err) {
735 DPRINTK(PROBE, ERR, 1123 DPRINTK(PROBE, ERR,
736 "request_irq for msix_lsc failed: %d\n", err); 1124 "request_irq for msix_lsc failed: %d\n", err);
737 goto release_irqs; 1125 goto free_queue_irqs;
738 } 1126 }
739 1127
740 /* FIXME: implement netif_napi_remove() instead */
741 adapter->napi.poll = ixgbe_clean_rxonly;
742 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
743 return 0; 1128 return 0;
744 1129
745release_irqs: 1130free_queue_irqs:
746 int_vector--; 1131 for (i = vector - 1; i >= 0; i--)
747 for (; int_vector >= adapter->num_tx_queues; int_vector--) 1132 free_irq(adapter->msix_entries[--vector].vector,
748 free_irq(adapter->msix_entries[int_vector].vector, 1133 &(adapter->q_vector[i]));
749 &(adapter->rx_ring[int_vector - 1134 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
750 adapter->num_tx_queues])); 1135 pci_disable_msix(adapter->pdev);
751
752 for (; int_vector >= 0; int_vector--)
753 free_irq(adapter->msix_entries[int_vector].vector,
754 &(adapter->tx_ring[int_vector]));
755out:
756 kfree(adapter->msix_entries); 1136 kfree(adapter->msix_entries);
757 adapter->msix_entries = NULL; 1137 adapter->msix_entries = NULL;
758 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 1138out:
759 return err; 1139 return err;
760} 1140}
761 1141
1142static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1143{
1144 struct ixgbe_hw *hw = &adapter->hw;
1145 struct ixgbe_q_vector *q_vector = adapter->q_vector;
1146 u8 current_itr;
1147 u32 new_itr = q_vector->eitr;
1148 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
1149 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
1150
1151 q_vector->tx_eitr = ixgbe_update_itr(adapter, new_itr,
1152 q_vector->tx_eitr,
1153 tx_ring->total_packets,
1154 tx_ring->total_bytes);
1155 q_vector->rx_eitr = ixgbe_update_itr(adapter, new_itr,
1156 q_vector->rx_eitr,
1157 rx_ring->total_packets,
1158 rx_ring->total_bytes);
1159
1160 current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr);
1161
1162 switch (current_itr) {
1163 /* counts and packets in update_itr are dependent on these numbers */
1164 case lowest_latency:
1165 new_itr = 100000;
1166 break;
1167 case low_latency:
1168 new_itr = 20000; /* aka hwitr = ~200 */
1169 break;
1170 case bulk_latency:
1171 new_itr = 8000;
1172 break;
1173 default:
1174 break;
1175 }
1176
1177 if (new_itr != q_vector->eitr) {
1178 u32 itr_reg;
1179 /* do an exponential smoothing */
1180 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
1181 q_vector->eitr = new_itr;
1182 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
1183 /* must write high and low 16 bits to reset counter */
1184 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), itr_reg | (itr_reg)<<16);
1185 }
1186
1187 return;
1188}
1189
1190static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter);
1191
762/** 1192/**
763 * ixgbe_intr - Interrupt Handler 1193 * ixgbe_intr - legacy mode Interrupt Handler
764 * @irq: interrupt number 1194 * @irq: interrupt number
765 * @data: pointer to a network interface device structure 1195 * @data: pointer to a network interface device structure
766 * @pt_regs: CPU registers structure 1196 * @pt_regs: CPU registers structure
@@ -772,8 +1202,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
772 struct ixgbe_hw *hw = &adapter->hw; 1202 struct ixgbe_hw *hw = &adapter->hw;
773 u32 eicr; 1203 u32 eicr;
774 1204
775 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
776 1205
1206 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
1207 * therefore no explict interrupt disable is necessary */
1208 eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
777 if (!eicr) 1209 if (!eicr)
778 return IRQ_NONE; /* Not our interrupt */ 1210 return IRQ_NONE; /* Not our interrupt */
779 1211
@@ -782,16 +1214,33 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
782 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1214 if (!test_bit(__IXGBE_DOWN, &adapter->state))
783 mod_timer(&adapter->watchdog_timer, jiffies); 1215 mod_timer(&adapter->watchdog_timer, jiffies);
784 } 1216 }
785 if (netif_rx_schedule_prep(netdev, &adapter->napi)) { 1217
786 /* Disable interrupts and register for poll. The flush of the 1218
787 * posted write is intentionally left out. */ 1219 if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) {
788 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 1220 adapter->tx_ring[0].total_packets = 0;
789 __netif_rx_schedule(netdev, &adapter->napi); 1221 adapter->tx_ring[0].total_bytes = 0;
1222 adapter->rx_ring[0].total_packets = 0;
1223 adapter->rx_ring[0].total_bytes = 0;
1224 /* would disable interrupts here but EIAM disabled it */
1225 __netif_rx_schedule(netdev, &adapter->q_vector[0].napi);
790 } 1226 }
791 1227
792 return IRQ_HANDLED; 1228 return IRQ_HANDLED;
793} 1229}
794 1230
1231static inline void ixgbe_reset_q_vectors(struct ixgbe_adapter *adapter)
1232{
1233 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1234
1235 for (i = 0; i < q_vectors; i++) {
1236 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
1237 bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
1238 bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
1239 q_vector->rxr_count = 0;
1240 q_vector->txr_count = 0;
1241 }
1242}
1243
795/** 1244/**
796 * ixgbe_request_irq - initialize interrupts 1245 * ixgbe_request_irq - initialize interrupts
797 * @adapter: board private structure 1246 * @adapter: board private structure
@@ -799,40 +1248,24 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
799 * Attempts to configure interrupts using the best available 1248 * Attempts to configure interrupts using the best available
800 * capabilities of the hardware and kernel. 1249 * capabilities of the hardware and kernel.
801 **/ 1250 **/
802static int ixgbe_request_irq(struct ixgbe_adapter *adapter, u32 *num_rx_queues) 1251static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
803{ 1252{
804 struct net_device *netdev = adapter->netdev; 1253 struct net_device *netdev = adapter->netdev;
805 int flags, err; 1254 int err;
806 irq_handler_t handler = ixgbe_intr;
807
808 flags = IRQF_SHARED;
809
810 err = ixgbe_setup_msix(adapter);
811 if (!err)
812 goto request_done;
813
814 /*
815 * if we can't do MSI-X, fall through and try MSI
816 * No need to reallocate memory since we're decreasing the number of
817 * queues. We just won't use the other ones, also it is freed correctly
818 * on ixgbe_remove.
819 */
820 *num_rx_queues = 1;
821 1255
822 /* do MSI */ 1256 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
823 err = pci_enable_msi(adapter->pdev); 1257 err = ixgbe_request_msix_irqs(adapter);
824 if (!err) { 1258 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
825 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 1259 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0,
826 flags &= ~IRQF_SHARED; 1260 netdev->name, netdev);
827 handler = &ixgbe_intr; 1261 } else {
1262 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED,
1263 netdev->name, netdev);
828 } 1264 }
829 1265
830 err = request_irq(adapter->pdev->irq, handler, flags,
831 netdev->name, netdev);
832 if (err) 1266 if (err)
833 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err); 1267 DPRINTK(PROBE, ERR, "request_irq failed, Error %d\n", err);
834 1268
835request_done:
836 return err; 1269 return err;
837} 1270}
838 1271
@@ -841,28 +1274,22 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
841 struct net_device *netdev = adapter->netdev; 1274 struct net_device *netdev = adapter->netdev;
842 1275
843 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1276 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
844 int i; 1277 int i, q_vectors;
845 1278
846 for (i = 0; i < adapter->num_tx_queues; i++) 1279 q_vectors = adapter->num_msix_vectors;
847 free_irq(adapter->msix_entries[i].vector, 1280
848 &(adapter->tx_ring[i])); 1281 i = q_vectors - 1;
849 for (i = 0; i < adapter->num_rx_queues; i++)
850 free_irq(adapter->msix_entries[i +
851 adapter->num_tx_queues].vector,
852 &(adapter->rx_ring[i]));
853 i = adapter->num_rx_queues + adapter->num_tx_queues;
854 free_irq(adapter->msix_entries[i].vector, netdev); 1282 free_irq(adapter->msix_entries[i].vector, netdev);
855 pci_disable_msix(adapter->pdev);
856 kfree(adapter->msix_entries);
857 adapter->msix_entries = NULL;
858 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
859 return;
860 }
861 1283
862 free_irq(adapter->pdev->irq, netdev); 1284 i--;
863 if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1285 for (; i >= 0; i--) {
864 pci_disable_msi(adapter->pdev); 1286 free_irq(adapter->msix_entries[i].vector,
865 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; 1287 &(adapter->q_vector[i]));
1288 }
1289
1290 ixgbe_reset_q_vectors(adapter);
1291 } else {
1292 free_irq(adapter->pdev->irq, netdev);
866 } 1293 }
867} 1294}
868 1295
@@ -874,7 +1301,13 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
874{ 1301{
875 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 1302 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
876 IXGBE_WRITE_FLUSH(&adapter->hw); 1303 IXGBE_WRITE_FLUSH(&adapter->hw);
877 synchronize_irq(adapter->pdev->irq); 1304 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1305 int i;
1306 for (i = 0; i < adapter->num_msix_vectors; i++)
1307 synchronize_irq(adapter->msix_entries[i].vector);
1308 } else {
1309 synchronize_irq(adapter->pdev->irq);
1310 }
878} 1311}
879 1312
880/** 1313/**
@@ -883,12 +1316,9 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
883 **/ 1316 **/
884static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter) 1317static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
885{ 1318{
886 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) 1319 u32 mask;
887 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, 1320 mask = IXGBE_EIMS_ENABLE_MASK;
888 (IXGBE_EIMS_ENABLE_MASK & 1321 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
889 ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC)));
890 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS,
891 IXGBE_EIMS_ENABLE_MASK);
892 IXGBE_WRITE_FLUSH(&adapter->hw); 1322 IXGBE_WRITE_FLUSH(&adapter->hw);
893} 1323}
894 1324
@@ -898,20 +1328,18 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
898 **/ 1328 **/
899static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter) 1329static void ixgbe_configure_msi_and_legacy(struct ixgbe_adapter *adapter)
900{ 1330{
901 int i;
902 struct ixgbe_hw *hw = &adapter->hw; 1331 struct ixgbe_hw *hw = &adapter->hw;
903 1332
904 if (adapter->rx_eitr) 1333 IXGBE_WRITE_REG(hw, IXGBE_EITR(0),
905 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), 1334 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr));
906 EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr));
907
908 /* for re-triggering the interrupt in non-NAPI mode */
909 adapter->rx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0));
910 adapter->tx_ring[0].eims_value = (1 << IXGBE_MSIX_VECTOR(0));
911 1335
912 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0); 1336 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0);
913 for (i = 0; i < adapter->num_tx_queues; i++) 1337 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0);
914 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(i), i); 1338
1339 map_vector_to_rxq(adapter, 0, 0);
1340 map_vector_to_txq(adapter, 0, 0);
1341
1342 DPRINTK(HW, INFO, "Legacy interrupt IVAR setup done\n");
915} 1343}
916 1344
917/** 1345/**
@@ -924,23 +1352,29 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
924{ 1352{
925 u64 tdba; 1353 u64 tdba;
926 struct ixgbe_hw *hw = &adapter->hw; 1354 struct ixgbe_hw *hw = &adapter->hw;
927 u32 i, tdlen; 1355 u32 i, j, tdlen, txctrl;
928 1356
929 /* Setup the HW Tx Head and Tail descriptor pointers */ 1357 /* Setup the HW Tx Head and Tail descriptor pointers */
930 for (i = 0; i < adapter->num_tx_queues; i++) { 1358 for (i = 0; i < adapter->num_tx_queues; i++) {
1359 j = adapter->tx_ring[i].reg_idx;
931 tdba = adapter->tx_ring[i].dma; 1360 tdba = adapter->tx_ring[i].dma;
932 tdlen = adapter->tx_ring[i].count * 1361 tdlen = adapter->tx_ring[i].count *
933 sizeof(union ixgbe_adv_tx_desc); 1362 sizeof(union ixgbe_adv_tx_desc);
934 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(i), (tdba & DMA_32BIT_MASK)); 1363 IXGBE_WRITE_REG(hw, IXGBE_TDBAL(j),
935 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(i), (tdba >> 32)); 1364 (tdba & DMA_32BIT_MASK));
936 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(i), tdlen); 1365 IXGBE_WRITE_REG(hw, IXGBE_TDBAH(j), (tdba >> 32));
937 IXGBE_WRITE_REG(hw, IXGBE_TDH(i), 0); 1366 IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
938 IXGBE_WRITE_REG(hw, IXGBE_TDT(i), 0); 1367 IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
939 adapter->tx_ring[i].head = IXGBE_TDH(i); 1368 IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
940 adapter->tx_ring[i].tail = IXGBE_TDT(i); 1369 adapter->tx_ring[i].head = IXGBE_TDH(j);
1370 adapter->tx_ring[i].tail = IXGBE_TDT(j);
1371 /* Disable Tx Head Writeback RO bit, since this hoses
1372 * bookkeeping if things aren't delivered in order.
1373 */
1374 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
1375 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
1376 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), txctrl);
941 } 1377 }
942
943 IXGBE_WRITE_REG(hw, IXGBE_TIPG, IXGBE_TIPG_FIBER_DEFAULT);
944} 1378}
945 1379
946#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ 1380#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
@@ -959,13 +1393,12 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
959 struct ixgbe_hw *hw = &adapter->hw; 1393 struct ixgbe_hw *hw = &adapter->hw;
960 struct net_device *netdev = adapter->netdev; 1394 struct net_device *netdev = adapter->netdev;
961 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1395 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1396 int i, j;
962 u32 rdlen, rxctrl, rxcsum; 1397 u32 rdlen, rxctrl, rxcsum;
963 u32 random[10]; 1398 u32 random[10];
964 u32 reta, mrqc;
965 int i;
966 u32 fctrl, hlreg0; 1399 u32 fctrl, hlreg0;
967 u32 srrctl;
968 u32 pages; 1400 u32 pages;
1401 u32 reta = 0, mrqc, srrctl;
969 1402
970 /* Decide whether to use packet split mode or not */ 1403 /* Decide whether to use packet split mode or not */
971 if (netdev->mtu > ETH_DATA_LEN) 1404 if (netdev->mtu > ETH_DATA_LEN)
@@ -985,6 +1418,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
985 1418
986 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 1419 fctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
987 fctrl |= IXGBE_FCTRL_BAM; 1420 fctrl |= IXGBE_FCTRL_BAM;
1421 fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */
988 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl); 1422 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, fctrl);
989 1423
990 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); 1424 hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
@@ -1036,37 +1470,23 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1036 adapter->rx_ring[i].tail = IXGBE_RDT(i); 1470 adapter->rx_ring[i].tail = IXGBE_RDT(i);
1037 } 1471 }
1038 1472
1039 if (adapter->num_rx_queues > 1) { 1473 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
1040 /* Random 40bytes used as random key in RSS hash function */
1041 get_random_bytes(&random[0], 40);
1042
1043 switch (adapter->num_rx_queues) {
1044 case 8:
1045 case 4:
1046 /* Bits [3:0] in each byte refers the Rx queue no */
1047 reta = 0x00010203;
1048 break;
1049 case 2:
1050 reta = 0x00010001;
1051 break;
1052 default:
1053 reta = 0x00000000;
1054 break;
1055 }
1056
1057 /* Fill out redirection table */ 1474 /* Fill out redirection table */
1058 for (i = 0; i < 32; i++) { 1475 for (i = 0, j = 0; i < 128; i++, j++) {
1059 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i, reta); 1476 if (j == adapter->ring_feature[RING_F_RSS].indices)
1060 if (adapter->num_rx_queues > 4) { 1477 j = 0;
1061 i++; 1478 /* reta = 4-byte sliding window of
1062 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RETA(0), i, 1479 * 0x00..(indices-1)(indices-1)00..etc. */
1063 0x04050607); 1480 reta = (reta << 8) | (j * 0x11);
1064 } 1481 if ((i & 3) == 3)
1482 IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta);
1065 } 1483 }
1066 1484
1067 /* Fill out hash function seeds */ 1485 /* Fill out hash function seeds */
1486 /* XXX use a random constant here to glue certain flows */
1487 get_random_bytes(&random[0], 40);
1068 for (i = 0; i < 10; i++) 1488 for (i = 0; i < 10; i++)
1069 IXGBE_WRITE_REG_ARRAY(hw, IXGBE_RSSRK(0), i, random[i]); 1489 IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), random[i]);
1070 1490
1071 mrqc = IXGBE_MRQC_RSSEN 1491 mrqc = IXGBE_MRQC_RSSEN
1072 /* Perform hash on these packet types */ 1492 /* Perform hash on these packet types */
@@ -1080,26 +1500,23 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
1080 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP 1500 | IXGBE_MRQC_RSS_FIELD_IPV6_UDP
1081 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP; 1501 | IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP;
1082 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); 1502 IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
1503 }
1504
1505 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1083 1506
1084 /* Multiqueue and packet checksumming are mutually exclusive. */ 1507 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
1085 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); 1508 adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
1509 /* Disable indicating checksum in descriptor, enables
1510 * RSS hash */
1086 rxcsum |= IXGBE_RXCSUM_PCSD; 1511 rxcsum |= IXGBE_RXCSUM_PCSD;
1087 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1088 } else {
1089 /* Enable Receive Checksum Offload for TCP and UDP */
1090 rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
1091 if (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED) {
1092 /* Enable IPv4 payload checksum for UDP fragments
1093 * Must be used in conjunction with packet-split. */
1094 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1095 } else {
1096 /* don't need to clear IPPCSE as it defaults to 0 */
1097 }
1098 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1099 } 1512 }
1100 /* Enable Receives */ 1513 if (!(rxcsum & IXGBE_RXCSUM_PCSD)) {
1101 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); 1514 /* Enable IPv4 payload checksum for UDP fragments
1102 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1515 * if PCSD is not set */
1516 rxcsum |= IXGBE_RXCSUM_IPPCSE;
1517 }
1518
1519 IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum);
1103} 1520}
1104 1521
1105static void ixgbe_vlan_rx_register(struct net_device *netdev, 1522static void ixgbe_vlan_rx_register(struct net_device *netdev,
@@ -1219,6 +1636,42 @@ static void ixgbe_set_multi(struct net_device *netdev)
1219 1636
1220} 1637}
1221 1638
1639static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
1640{
1641 int q_idx;
1642 struct ixgbe_q_vector *q_vector;
1643 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1644
1645 /* legacy and MSI only use one vector */
1646 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1647 q_vectors = 1;
1648
1649 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1650 q_vector = &adapter->q_vector[q_idx];
1651 if (!q_vector->rxr_count)
1652 continue;
1653 napi_enable(&q_vector->napi);
1654 }
1655}
1656
1657static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
1658{
1659 int q_idx;
1660 struct ixgbe_q_vector *q_vector;
1661 int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
1662
1663 /* legacy and MSI only use one vector */
1664 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1665 q_vectors = 1;
1666
1667 for (q_idx = 0; q_idx < q_vectors; q_idx++) {
1668 q_vector = &adapter->q_vector[q_idx];
1669 if (!q_vector->rxr_count)
1670 continue;
1671 napi_disable(&q_vector->napi);
1672 }
1673}
1674
1222static void ixgbe_configure(struct ixgbe_adapter *adapter) 1675static void ixgbe_configure(struct ixgbe_adapter *adapter)
1223{ 1676{
1224 struct net_device *netdev = adapter->netdev; 1677 struct net_device *netdev = adapter->netdev;
@@ -1238,30 +1691,35 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
1238static int ixgbe_up_complete(struct ixgbe_adapter *adapter) 1691static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1239{ 1692{
1240 struct net_device *netdev = adapter->netdev; 1693 struct net_device *netdev = adapter->netdev;
1241 int i;
1242 u32 gpie = 0;
1243 struct ixgbe_hw *hw = &adapter->hw; 1694 struct ixgbe_hw *hw = &adapter->hw;
1244 u32 txdctl, rxdctl, mhadd; 1695 int i, j = 0;
1245 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1696 int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1697 u32 txdctl, rxdctl, mhadd;
1698 u32 gpie;
1246 1699
1247 ixgbe_get_hw_control(adapter); 1700 ixgbe_get_hw_control(adapter);
1248 1701
1249 if (adapter->flags & (IXGBE_FLAG_MSIX_ENABLED | 1702 if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) ||
1250 IXGBE_FLAG_MSI_ENABLED)) { 1703 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) {
1251 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1704 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1252 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | 1705 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME |
1253 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); 1706 IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD);
1254 } else { 1707 } else {
1255 /* MSI only */ 1708 /* MSI only */
1256 gpie = (IXGBE_GPIE_EIAME | 1709 gpie = 0;
1257 IXGBE_GPIE_PBA_SUPPORT);
1258 } 1710 }
1259 IXGBE_WRITE_REG(&adapter->hw, IXGBE_GPIE, gpie); 1711 /* XXX: to interrupt immediately for EICS writes, enable this */
1260 gpie = IXGBE_READ_REG(&adapter->hw, IXGBE_GPIE); 1712 /* gpie |= IXGBE_GPIE_EIMEN; */
1713 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
1261 } 1714 }
1262 1715
1263 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); 1716 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
1717 /* legacy interrupts, use EIAM to auto-mask when reading EICR,
1718 * specifically only auto mask tx and rx interrupts */
1719 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
1720 }
1264 1721
1722 mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
1265 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { 1723 if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
1266 mhadd &= ~IXGBE_MHADD_MFS_MASK; 1724 mhadd &= ~IXGBE_MHADD_MFS_MASK;
1267 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT; 1725 mhadd |= max_frame << IXGBE_MHADD_MFS_SHIFT;
@@ -1270,15 +1728,21 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1270 } 1728 }
1271 1729
1272 for (i = 0; i < adapter->num_tx_queues; i++) { 1730 for (i = 0; i < adapter->num_tx_queues; i++) {
1273 txdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(i)); 1731 j = adapter->tx_ring[i].reg_idx;
1732 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
1274 txdctl |= IXGBE_TXDCTL_ENABLE; 1733 txdctl |= IXGBE_TXDCTL_ENABLE;
1275 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(i), txdctl); 1734 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
1276 } 1735 }
1277 1736
1278 for (i = 0; i < adapter->num_rx_queues; i++) { 1737 for (i = 0; i < adapter->num_rx_queues; i++) {
1279 rxdctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(i)); 1738 j = adapter->rx_ring[i].reg_idx;
1739 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
1740 /* enable PTHRESH=32 descriptors (half the internal cache)
1741 * and HTHRESH=0 descriptors (to minimize latency on fetch),
1742 * this also removes a pesky rx_no_buffer_count increment */
1743 rxdctl |= 0x0020;
1280 rxdctl |= IXGBE_RXDCTL_ENABLE; 1744 rxdctl |= IXGBE_RXDCTL_ENABLE;
1281 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(i), rxdctl); 1745 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), rxdctl);
1282 } 1746 }
1283 /* enable all receives */ 1747 /* enable all receives */
1284 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1748 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
@@ -1291,7 +1755,11 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
1291 ixgbe_configure_msi_and_legacy(adapter); 1755 ixgbe_configure_msi_and_legacy(adapter);
1292 1756
1293 clear_bit(__IXGBE_DOWN, &adapter->state); 1757 clear_bit(__IXGBE_DOWN, &adapter->state);
1294 napi_enable(&adapter->napi); 1758 ixgbe_napi_enable_all(adapter);
1759
1760 /* clear any pending interrupts, may auto mask */
1761 IXGBE_READ_REG(hw, IXGBE_EICR);
1762
1295 ixgbe_irq_enable(adapter); 1763 ixgbe_irq_enable(adapter);
1296 1764
1297 /* bring the link up in the watchdog, this could race with our first 1765 /* bring the link up in the watchdog, this could race with our first
@@ -1333,7 +1801,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
1333{ 1801{
1334 struct net_device *netdev = pci_get_drvdata(pdev); 1802 struct net_device *netdev = pci_get_drvdata(pdev);
1335 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1803 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1336 u32 err, num_rx_queues = adapter->num_rx_queues; 1804 u32 err;
1337 1805
1338 pci_set_power_state(pdev, PCI_D0); 1806 pci_set_power_state(pdev, PCI_D0);
1339 pci_restore_state(pdev); 1807 pci_restore_state(pdev);
@@ -1349,7 +1817,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
1349 pci_enable_wake(pdev, PCI_D3cold, 0); 1817 pci_enable_wake(pdev, PCI_D3cold, 0);
1350 1818
1351 if (netif_running(netdev)) { 1819 if (netif_running(netdev)) {
1352 err = ixgbe_request_irq(adapter, &num_rx_queues); 1820 err = ixgbe_request_irq(adapter);
1353 if (err) 1821 if (err)
1354 return err; 1822 return err;
1355 } 1823 }
@@ -1449,27 +1917,27 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
1449} 1917}
1450 1918
1451/** 1919/**
1452 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues 1920 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues
1453 * @adapter: board private structure 1921 * @adapter: board private structure
1454 **/ 1922 **/
1455static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter) 1923static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
1456{ 1924{
1457 int i; 1925 int i;
1458 1926
1459 for (i = 0; i < adapter->num_tx_queues; i++) 1927 for (i = 0; i < adapter->num_rx_queues; i++)
1460 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]); 1928 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
1461} 1929}
1462 1930
1463/** 1931/**
1464 * ixgbe_clean_all_rx_rings - Free Rx Buffers for all queues 1932 * ixgbe_clean_all_tx_rings - Free Tx Buffers for all queues
1465 * @adapter: board private structure 1933 * @adapter: board private structure
1466 **/ 1934 **/
1467static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter) 1935static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
1468{ 1936{
1469 int i; 1937 int i;
1470 1938
1471 for (i = 0; i < adapter->num_rx_queues; i++) 1939 for (i = 0; i < adapter->num_tx_queues; i++)
1472 ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]); 1940 ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
1473} 1941}
1474 1942
1475void ixgbe_down(struct ixgbe_adapter *adapter) 1943void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -1493,10 +1961,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
1493 IXGBE_WRITE_FLUSH(&adapter->hw); 1961 IXGBE_WRITE_FLUSH(&adapter->hw);
1494 msleep(10); 1962 msleep(10);
1495 1963
1496 napi_disable(&adapter->napi);
1497
1498 ixgbe_irq_disable(adapter); 1964 ixgbe_irq_disable(adapter);
1499 1965
1966 ixgbe_napi_disable_all(adapter);
1500 del_timer_sync(&adapter->watchdog_timer); 1967 del_timer_sync(&adapter->watchdog_timer);
1501 1968
1502 netif_carrier_off(netdev); 1969 netif_carrier_off(netdev);
@@ -1547,27 +2014,37 @@ static void ixgbe_shutdown(struct pci_dev *pdev)
1547} 2014}
1548 2015
1549/** 2016/**
1550 * ixgbe_clean - NAPI Rx polling callback 2017 * ixgbe_poll - NAPI Rx polling callback
1551 * @adapter: board private structure 2018 * @napi: structure for representing this polling device
2019 * @budget: how many packets driver is allowed to clean
2020 *
2021 * This function is used for legacy and MSI, NAPI mode
1552 **/ 2022 **/
1553static int ixgbe_clean(struct napi_struct *napi, int budget) 2023static int ixgbe_poll(struct napi_struct *napi, int budget)
1554{ 2024{
1555 struct ixgbe_adapter *adapter = container_of(napi, 2025 struct ixgbe_q_vector *q_vector = container_of(napi,
1556 struct ixgbe_adapter, napi); 2026 struct ixgbe_q_vector, napi);
1557 struct net_device *netdev = adapter->netdev; 2027 struct ixgbe_adapter *adapter = q_vector->adapter;
1558 int tx_cleaned = 0, work_done = 0; 2028 int tx_cleaned = 0, work_done = 0;
1559 2029
1560 /* In non-MSIX case, there is no multi-Tx/Rx queue */ 2030#ifdef CONFIG_DCA
2031 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
2032 ixgbe_update_tx_dca(adapter, adapter->tx_ring);
2033 ixgbe_update_rx_dca(adapter, adapter->rx_ring);
2034 }
2035#endif
2036
1561 tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring); 2037 tx_cleaned = ixgbe_clean_tx_irq(adapter, adapter->tx_ring);
1562 ixgbe_clean_rx_irq(adapter, &adapter->rx_ring[0], &work_done, 2038 ixgbe_clean_rx_irq(adapter, adapter->rx_ring, &work_done, budget);
1563 budget);
1564 2039
1565 if (tx_cleaned) 2040 if (tx_cleaned)
1566 work_done = budget; 2041 work_done = budget;
1567 2042
1568 /* If budget not fully consumed, exit the polling mode */ 2043 /* If budget not fully consumed, exit the polling mode */
1569 if (work_done < budget) { 2044 if (work_done < budget) {
1570 netif_rx_complete(netdev, napi); 2045 netif_rx_complete(adapter->netdev, napi);
2046 if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
2047 ixgbe_set_itr(adapter);
1571 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2048 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1572 ixgbe_irq_enable(adapter); 2049 ixgbe_irq_enable(adapter);
1573 } 2050 }
@@ -1597,6 +2074,136 @@ static void ixgbe_reset_task(struct work_struct *work)
1597 ixgbe_reinit_locked(adapter); 2074 ixgbe_reinit_locked(adapter);
1598} 2075}
1599 2076
2077static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
2078 int vectors)
2079{
2080 int err, vector_threshold;
2081
2082 /* We'll want at least 3 (vector_threshold):
2083 * 1) TxQ[0] Cleanup
2084 * 2) RxQ[0] Cleanup
2085 * 3) Other (Link Status Change, etc.)
2086 * 4) TCP Timer (optional)
2087 */
2088 vector_threshold = MIN_MSIX_COUNT;
2089
2090 /* The more we get, the more we will assign to Tx/Rx Cleanup
2091 * for the separate queues...where Rx Cleanup >= Tx Cleanup.
2092 * Right now, we simply care about how many we'll get; we'll
2093 * set them up later while requesting irq's.
2094 */
2095 while (vectors >= vector_threshold) {
2096 err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2097 vectors);
2098 if (!err) /* Success in acquiring all requested vectors. */
2099 break;
2100 else if (err < 0)
2101 vectors = 0; /* Nasty failure, quit now */
2102 else /* err == number of vectors we should try again with */
2103 vectors = err;
2104 }
2105
2106 if (vectors < vector_threshold) {
2107 /* Can't allocate enough MSI-X interrupts? Oh well.
2108 * This just means we'll go with either a single MSI
2109 * vector or fall back to legacy interrupts.
2110 */
2111 DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n");
2112 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2113 kfree(adapter->msix_entries);
2114 adapter->msix_entries = NULL;
2115 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2116 adapter->num_tx_queues = 1;
2117 adapter->num_rx_queues = 1;
2118 } else {
2119 adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
2120 adapter->num_msix_vectors = vectors;
2121 }
2122}
2123
2124static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
2125{
2126 int nrq, ntq;
2127 int feature_mask = 0, rss_i, rss_m;
2128
2129 /* Number of supported queues */
2130 switch (adapter->hw.mac.type) {
2131 case ixgbe_mac_82598EB:
2132 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2133 rss_m = 0;
2134 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2135
2136 switch (adapter->flags & feature_mask) {
2137 case (IXGBE_FLAG_RSS_ENABLED):
2138 rss_m = 0xF;
2139 nrq = rss_i;
2140#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2141 ntq = rss_i;
2142#else
2143 ntq = 1;
2144#endif
2145 break;
2146 case 0:
2147 default:
2148 rss_i = 0;
2149 rss_m = 0;
2150 nrq = 1;
2151 ntq = 1;
2152 break;
2153 }
2154
2155 adapter->ring_feature[RING_F_RSS].indices = rss_i;
2156 adapter->ring_feature[RING_F_RSS].mask = rss_m;
2157 break;
2158 default:
2159 nrq = 1;
2160 ntq = 1;
2161 break;
2162 }
2163
2164 adapter->num_rx_queues = nrq;
2165 adapter->num_tx_queues = ntq;
2166}
2167
2168/**
2169 * ixgbe_cache_ring_register - Descriptor ring to register mapping
2170 * @adapter: board private structure to initialize
2171 *
2172 * Once we know the feature-set enabled for the device, we'll cache
2173 * the register offset the descriptor ring is assigned to.
2174 **/
2175static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
2176{
2177 /* TODO: Remove all uses of the indices in the cases where multiple
2178 * features are OR'd together, if the feature set makes sense.
2179 */
2180 int feature_mask = 0, rss_i;
2181 int i, txr_idx, rxr_idx;
2182
2183 /* Number of supported queues */
2184 switch (adapter->hw.mac.type) {
2185 case ixgbe_mac_82598EB:
2186 rss_i = adapter->ring_feature[RING_F_RSS].indices;
2187 txr_idx = 0;
2188 rxr_idx = 0;
2189 feature_mask |= IXGBE_FLAG_RSS_ENABLED;
2190 switch (adapter->flags & feature_mask) {
2191 case (IXGBE_FLAG_RSS_ENABLED):
2192 for (i = 0; i < adapter->num_rx_queues; i++)
2193 adapter->rx_ring[i].reg_idx = i;
2194 for (i = 0; i < adapter->num_tx_queues; i++)
2195 adapter->tx_ring[i].reg_idx = i;
2196 break;
2197 case 0:
2198 default:
2199 break;
2200 }
2201 break;
2202 default:
2203 break;
2204 }
2205}
2206
1600/** 2207/**
1601 * ixgbe_alloc_queues - Allocate memory for all rings 2208 * ixgbe_alloc_queues - Allocate memory for all rings
1602 * @adapter: board private structure to initialize 2209 * @adapter: board private structure to initialize
@@ -1612,25 +2219,167 @@ static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
1612 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 2219 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1613 sizeof(struct ixgbe_ring), GFP_KERNEL); 2220 sizeof(struct ixgbe_ring), GFP_KERNEL);
1614 if (!adapter->tx_ring) 2221 if (!adapter->tx_ring)
1615 return -ENOMEM; 2222 goto err_tx_ring_allocation;
1616
1617 for (i = 0; i < adapter->num_tx_queues; i++)
1618 adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
1619 2223
1620 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 2224 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1621 sizeof(struct ixgbe_ring), GFP_KERNEL); 2225 sizeof(struct ixgbe_ring), GFP_KERNEL);
1622 if (!adapter->rx_ring) { 2226 if (!adapter->rx_ring)
1623 kfree(adapter->tx_ring); 2227 goto err_rx_ring_allocation;
1624 return -ENOMEM;
1625 }
1626 2228
2229 for (i = 0; i < adapter->num_tx_queues; i++) {
2230 adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD;
2231 adapter->tx_ring[i].queue_index = i;
2232 }
1627 for (i = 0; i < adapter->num_rx_queues; i++) { 2233 for (i = 0; i < adapter->num_rx_queues; i++) {
1628 adapter->rx_ring[i].adapter = adapter;
1629 adapter->rx_ring[i].itr_register = IXGBE_EITR(i);
1630 adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD; 2234 adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD;
2235 adapter->rx_ring[i].queue_index = i;
2236 }
2237
2238 ixgbe_cache_ring_register(adapter);
2239
2240 return 0;
2241
2242err_rx_ring_allocation:
2243 kfree(adapter->tx_ring);
2244err_tx_ring_allocation:
2245 return -ENOMEM;
2246}
2247
2248/**
2249 * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
2250 * @adapter: board private structure to initialize
2251 *
2252 * Attempt to configure the interrupts using the best available
2253 * capabilities of the hardware and the kernel.
2254 **/
2255static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
2256 *adapter)
2257{
2258 int err = 0;
2259 int vector, v_budget;
2260
2261 /*
2262 * It's easy to be greedy for MSI-X vectors, but it really
2263 * doesn't do us much good if we have a lot more vectors
2264 * than CPU's. So let's be conservative and only ask for
2265 * (roughly) twice the number of vectors as there are CPU's.
2266 */
2267 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
2268 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
2269
2270 /*
2271 * At the same time, hardware can only support a maximum of
2272 * MAX_MSIX_COUNT vectors. With features such as RSS and VMDq,
2273 * we can easily reach upwards of 64 Rx descriptor queues and
2274 * 32 Tx queues. Thus, we cap it off in those rare cases where
2275 * the cpu count also exceeds our vector limit.
2276 */
2277 v_budget = min(v_budget, MAX_MSIX_COUNT);
2278
2279 /* A failure in MSI-X entry allocation isn't fatal, but it does
2280 * mean we disable MSI-X capabilities of the adapter. */
2281 adapter->msix_entries = kcalloc(v_budget,
2282 sizeof(struct msix_entry), GFP_KERNEL);
2283 if (!adapter->msix_entries) {
2284 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
2285 ixgbe_set_num_queues(adapter);
2286 kfree(adapter->tx_ring);
2287 kfree(adapter->rx_ring);
2288 err = ixgbe_alloc_queues(adapter);
2289 if (err) {
2290 DPRINTK(PROBE, ERR, "Unable to allocate memory "
2291 "for queues\n");
2292 goto out;
2293 }
2294
2295 goto try_msi;
2296 }
2297
2298 for (vector = 0; vector < v_budget; vector++)
2299 adapter->msix_entries[vector].entry = vector;
2300
2301 ixgbe_acquire_msix_vectors(adapter, v_budget);
2302
2303 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
2304 goto out;
2305
2306try_msi:
2307 err = pci_enable_msi(adapter->pdev);
2308 if (!err) {
2309 adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
2310 } else {
2311 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, "
2312 "falling back to legacy. Error: %d\n", err);
2313 /* reset err */
2314 err = 0;
2315 }
2316
2317out:
2318#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2319 /* Notify the stack of the (possibly) reduced Tx Queue count. */
2320 adapter->netdev->egress_subqueue_count = adapter->num_tx_queues;
2321#endif
2322
2323 return err;
2324}
2325
2326static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
2327{
2328 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2329 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
2330 pci_disable_msix(adapter->pdev);
2331 kfree(adapter->msix_entries);
2332 adapter->msix_entries = NULL;
2333 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
2334 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
2335 pci_disable_msi(adapter->pdev);
1631 } 2336 }
2337 return;
2338}
2339
2340/**
2341 * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
2342 * @adapter: board private structure to initialize
2343 *
2344 * We determine which interrupt scheme to use based on...
2345 * - Kernel support (MSI, MSI-X)
2346 * - which can be user-defined (via MODULE_PARAM)
2347 * - Hardware queue count (num_*_queues)
2348 * - defined by miscellaneous hardware support/features (RSS, etc.)
2349 **/
2350static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
2351{
2352 int err;
2353
2354 /* Number of supported queues */
2355 ixgbe_set_num_queues(adapter);
2356
2357 err = ixgbe_alloc_queues(adapter);
2358 if (err) {
2359 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
2360 goto err_alloc_queues;
2361 }
2362
2363 err = ixgbe_set_interrupt_capability(adapter);
2364 if (err) {
2365 DPRINTK(PROBE, ERR, "Unable to setup interrupt capabilities\n");
2366 goto err_set_interrupt;
2367 }
2368
2369 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, "
2370 "Tx Queue count = %u\n",
2371 (adapter->num_rx_queues > 1) ? "Enabled" :
2372 "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
2373
2374 set_bit(__IXGBE_DOWN, &adapter->state);
1632 2375
1633 return 0; 2376 return 0;
2377
2378err_set_interrupt:
2379 kfree(adapter->tx_ring);
2380 kfree(adapter->rx_ring);
2381err_alloc_queues:
2382 return err;
1634} 2383}
1635 2384
1636/** 2385/**
@@ -1645,11 +2394,22 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
1645{ 2394{
1646 struct ixgbe_hw *hw = &adapter->hw; 2395 struct ixgbe_hw *hw = &adapter->hw;
1647 struct pci_dev *pdev = adapter->pdev; 2396 struct pci_dev *pdev = adapter->pdev;
2397 unsigned int rss;
2398
2399 /* Set capability flags */
2400 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus());
2401 adapter->ring_feature[RING_F_RSS].indices = rss;
2402 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
2403
2404 /* Enable Dynamic interrupt throttling by default */
2405 adapter->rx_eitr = 1;
2406 adapter->tx_eitr = 1;
1648 2407
1649 /* default flow control settings */ 2408 /* default flow control settings */
1650 hw->fc.original_type = ixgbe_fc_full; 2409 hw->fc.original_type = ixgbe_fc_full;
1651 hw->fc.type = ixgbe_fc_full; 2410 hw->fc.type = ixgbe_fc_full;
1652 2411
2412 /* select 10G link by default */
1653 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; 2413 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN;
1654 if (hw->mac.ops.reset(hw)) { 2414 if (hw->mac.ops.reset(hw)) {
1655 dev_err(&pdev->dev, "HW Init failed\n"); 2415 dev_err(&pdev->dev, "HW Init failed\n");
@@ -1667,16 +2427,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
1667 return -EIO; 2427 return -EIO;
1668 } 2428 }
1669 2429
1670 /* Set the default values */ 2430 /* enable rx csum by default */
1671 adapter->num_rx_queues = IXGBE_DEFAULT_RXQ;
1672 adapter->num_tx_queues = 1;
1673 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED; 2431 adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
1674 2432
1675 if (ixgbe_alloc_queues(adapter)) {
1676 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1677 return -ENOMEM;
1678 }
1679
1680 set_bit(__IXGBE_DOWN, &adapter->state); 2433 set_bit(__IXGBE_DOWN, &adapter->state);
1681 2434
1682 return 0; 2435 return 0;
@@ -1716,7 +2469,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
1716 return -ENOMEM; 2469 return -ENOMEM;
1717 } 2470 }
1718 2471
1719 txdr->adapter = adapter;
1720 txdr->next_to_use = 0; 2472 txdr->next_to_use = 0;
1721 txdr->next_to_clean = 0; 2473 txdr->next_to_clean = 0;
1722 txdr->work_limit = txdr->count; 2474 txdr->work_limit = txdr->count;
@@ -1735,7 +2487,7 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
1735 struct ixgbe_ring *rxdr) 2487 struct ixgbe_ring *rxdr)
1736{ 2488{
1737 struct pci_dev *pdev = adapter->pdev; 2489 struct pci_dev *pdev = adapter->pdev;
1738 int size, desc_len; 2490 int size;
1739 2491
1740 size = sizeof(struct ixgbe_rx_buffer) * rxdr->count; 2492 size = sizeof(struct ixgbe_rx_buffer) * rxdr->count;
1741 rxdr->rx_buffer_info = vmalloc(size); 2493 rxdr->rx_buffer_info = vmalloc(size);
@@ -1746,10 +2498,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
1746 } 2498 }
1747 memset(rxdr->rx_buffer_info, 0, size); 2499 memset(rxdr->rx_buffer_info, 0, size);
1748 2500
1749 desc_len = sizeof(union ixgbe_adv_rx_desc);
1750
1751 /* Round up to nearest 4K */ 2501 /* Round up to nearest 4K */
1752 rxdr->size = rxdr->count * desc_len; 2502 rxdr->size = rxdr->count * sizeof(union ixgbe_adv_rx_desc);
1753 rxdr->size = ALIGN(rxdr->size, 4096); 2503 rxdr->size = ALIGN(rxdr->size, 4096);
1754 2504
1755 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 2505 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
@@ -1763,7 +2513,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
1763 2513
1764 rxdr->next_to_clean = 0; 2514 rxdr->next_to_clean = 0;
1765 rxdr->next_to_use = 0; 2515 rxdr->next_to_use = 0;
1766 rxdr->adapter = adapter;
1767 2516
1768 return 0; 2517 return 0;
1769} 2518}
@@ -1841,8 +2590,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
1841} 2590}
1842 2591
1843/** 2592/**
1844 * ixgbe_setup_all_tx_resources - wrapper to allocate Tx resources 2593 * ixgbe_setup_all_tx_resources - allocate all queues Tx resources
1845 * (Descriptors) for all queues
1846 * @adapter: board private structure 2594 * @adapter: board private structure
1847 * 2595 *
1848 * If this function returns with an error, then it's possible one or 2596 * If this function returns with an error, then it's possible one or
@@ -1868,8 +2616,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
1868} 2616}
1869 2617
1870/** 2618/**
1871 * ixgbe_setup_all_rx_resources - wrapper to allocate Rx resources 2619 * ixgbe_setup_all_rx_resources - allocate all queues Rx resources
1872 * (Descriptors) for all queues
1873 * @adapter: board private structure 2620 * @adapter: board private structure
1874 * 2621 *
1875 * If this function returns with an error, then it's possible one or 2622 * If this function returns with an error, then it's possible one or
@@ -1911,6 +2658,9 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
1911 (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) 2658 (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE))
1912 return -EINVAL; 2659 return -EINVAL;
1913 2660
2661 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
2662 netdev->mtu, new_mtu);
2663 /* must set new MTU before calling down or up */
1914 netdev->mtu = new_mtu; 2664 netdev->mtu = new_mtu;
1915 2665
1916 if (netif_running(netdev)) 2666 if (netif_running(netdev))
@@ -1935,23 +2685,16 @@ static int ixgbe_open(struct net_device *netdev)
1935{ 2685{
1936 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2686 struct ixgbe_adapter *adapter = netdev_priv(netdev);
1937 int err; 2687 int err;
1938 u32 num_rx_queues = adapter->num_rx_queues;
1939 2688
1940 /* disallow open during test */ 2689 /* disallow open during test */
1941 if (test_bit(__IXGBE_TESTING, &adapter->state)) 2690 if (test_bit(__IXGBE_TESTING, &adapter->state))
1942 return -EBUSY; 2691 return -EBUSY;
1943 2692
1944try_intr_reinit:
1945 /* allocate transmit descriptors */ 2693 /* allocate transmit descriptors */
1946 err = ixgbe_setup_all_tx_resources(adapter); 2694 err = ixgbe_setup_all_tx_resources(adapter);
1947 if (err) 2695 if (err)
1948 goto err_setup_tx; 2696 goto err_setup_tx;
1949 2697
1950 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
1951 num_rx_queues = 1;
1952 adapter->num_rx_queues = num_rx_queues;
1953 }
1954
1955 /* allocate receive descriptors */ 2698 /* allocate receive descriptors */
1956 err = ixgbe_setup_all_rx_resources(adapter); 2699 err = ixgbe_setup_all_rx_resources(adapter);
1957 if (err) 2700 if (err)
@@ -1959,31 +2702,10 @@ try_intr_reinit:
1959 2702
1960 ixgbe_configure(adapter); 2703 ixgbe_configure(adapter);
1961 2704
1962 err = ixgbe_request_irq(adapter, &num_rx_queues); 2705 err = ixgbe_request_irq(adapter);
1963 if (err) 2706 if (err)
1964 goto err_req_irq; 2707 goto err_req_irq;
1965 2708
1966 /* ixgbe_request might have reduced num_rx_queues */
1967 if (num_rx_queues < adapter->num_rx_queues) {
1968 /* We didn't get MSI-X, so we need to release everything,
1969 * set our Rx queue count to num_rx_queues, and redo the
1970 * whole init process.
1971 */
1972 ixgbe_free_irq(adapter);
1973 if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1974 pci_disable_msi(adapter->pdev);
1975 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
1976 }
1977 ixgbe_free_all_rx_resources(adapter);
1978 ixgbe_free_all_tx_resources(adapter);
1979 adapter->num_rx_queues = num_rx_queues;
1980
1981 /* Reset the hardware, and start over. */
1982 ixgbe_reset(adapter);
1983
1984 goto try_intr_reinit;
1985 }
1986
1987 err = ixgbe_up_complete(adapter); 2709 err = ixgbe_up_complete(adapter);
1988 if (err) 2710 if (err)
1989 goto err_up; 2711 goto err_up;
@@ -2119,6 +2841,9 @@ static void ixgbe_watchdog(unsigned long data)
2119 struct net_device *netdev = adapter->netdev; 2841 struct net_device *netdev = adapter->netdev;
2120 bool link_up; 2842 bool link_up;
2121 u32 link_speed = 0; 2843 u32 link_speed = 0;
2844#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2845 int i;
2846#endif
2122 2847
2123 adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up); 2848 adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up);
2124 2849
@@ -2140,6 +2865,10 @@ static void ixgbe_watchdog(unsigned long data)
2140 2865
2141 netif_carrier_on(netdev); 2866 netif_carrier_on(netdev);
2142 netif_wake_queue(netdev); 2867 netif_wake_queue(netdev);
2868#ifdef CONFIG_NETDEVICES_MULTIQUEUE
2869 for (i = 0; i < adapter->num_tx_queues; i++)
2870 netif_wake_subqueue(netdev, i);
2871#endif
2143 } else { 2872 } else {
2144 /* Force detection of hung controller */ 2873 /* Force detection of hung controller */
2145 adapter->detect_tx_hung = true; 2874 adapter->detect_tx_hung = true;
@@ -2154,10 +2883,23 @@ static void ixgbe_watchdog(unsigned long data)
2154 2883
2155 ixgbe_update_stats(adapter); 2884 ixgbe_update_stats(adapter);
2156 2885
2157 /* Reset the timer */ 2886 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
2158 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2887 /* Cause software interrupt to ensure rx rings are cleaned */
2888 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
2889 u32 eics =
2890 (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1;
2891 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, eics);
2892 } else {
2893 /* for legacy and MSI interrupts don't set any bits that
2894 * are enabled for EIAM, because this operation would
2895 * set *both* EIMS and EICS for any bit in EIAM */
2896 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
2897 (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
2898 }
2899 /* Reset the timer */
2159 mod_timer(&adapter->watchdog_timer, 2900 mod_timer(&adapter->watchdog_timer,
2160 round_jiffies(jiffies + 2 * HZ)); 2901 round_jiffies(jiffies + 2 * HZ));
2902 }
2161} 2903}
2162 2904
2163static int ixgbe_tso(struct ixgbe_adapter *adapter, 2905static int ixgbe_tso(struct ixgbe_adapter *adapter,
@@ -2170,7 +2912,6 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
2170 struct ixgbe_tx_buffer *tx_buffer_info; 2912 struct ixgbe_tx_buffer *tx_buffer_info;
2171 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; 2913 u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
2172 u32 mss_l4len_idx = 0, l4len; 2914 u32 mss_l4len_idx = 0, l4len;
2173 *hdr_len = 0;
2174 2915
2175 if (skb_is_gso(skb)) { 2916 if (skb_is_gso(skb)) {
2176 if (skb_header_cloned(skb)) { 2917 if (skb_header_cloned(skb)) {
@@ -2454,7 +3195,11 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
2454{ 3195{
2455 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3196 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2456 3197
3198#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3199 netif_stop_subqueue(netdev, tx_ring->queue_index);
3200#else
2457 netif_stop_queue(netdev); 3201 netif_stop_queue(netdev);
3202#endif
2458 /* Herbert's original patch had: 3203 /* Herbert's original patch had:
2459 * smp_mb__after_netif_stop_queue(); 3204 * smp_mb__after_netif_stop_queue();
2460 * but since that doesn't exist yet, just open code it. */ 3205 * but since that doesn't exist yet, just open code it. */
@@ -2466,7 +3211,11 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
2466 return -EBUSY; 3211 return -EBUSY;
2467 3212
2468 /* A reprieve! - use start_queue because it doesn't call schedule */ 3213 /* A reprieve! - use start_queue because it doesn't call schedule */
3214#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3215 netif_wake_subqueue(netdev, tx_ring->queue_index);
3216#else
2469 netif_wake_queue(netdev); 3217 netif_wake_queue(netdev);
3218#endif
2470 ++adapter->restart_queue; 3219 ++adapter->restart_queue;
2471 return 0; 3220 return 0;
2472} 3221}
@@ -2487,15 +3236,18 @@ static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2487 unsigned int len = skb->len; 3236 unsigned int len = skb->len;
2488 unsigned int first; 3237 unsigned int first;
2489 unsigned int tx_flags = 0; 3238 unsigned int tx_flags = 0;
2490 u8 hdr_len; 3239 u8 hdr_len = 0;
2491 int tso; 3240 int r_idx = 0, tso;
2492 unsigned int mss = 0; 3241 unsigned int mss = 0;
2493 int count = 0; 3242 int count = 0;
2494 unsigned int f; 3243 unsigned int f;
2495 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 3244 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
2496 len -= skb->data_len; 3245 len -= skb->data_len;
3246#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3247 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping;
3248#endif
3249 tx_ring = &adapter->tx_ring[r_idx];
2497 3250
2498 tx_ring = adapter->tx_ring;
2499 3251
2500 if (skb->len <= 0) { 3252 if (skb->len <= 0) {
2501 dev_kfree_skb(skb); 3253 dev_kfree_skb(skb);
@@ -2604,6 +3356,31 @@ static void ixgbe_netpoll(struct net_device *netdev)
2604#endif 3356#endif
2605 3357
2606/** 3358/**
3359 * ixgbe_napi_add_all - prep napi structs for use
3360 * @adapter: private struct
3361 * helper function to napi_add each possible q_vector->napi
3362 */
3363static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter)
3364{
3365 int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3366 int (*poll)(struct napi_struct *, int);
3367
3368 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
3369 poll = &ixgbe_clean_rxonly;
3370 } else {
3371 poll = &ixgbe_poll;
3372 /* only one q_vector for legacy modes */
3373 q_vectors = 1;
3374 }
3375
3376 for (i = 0; i < q_vectors; i++) {
3377 struct ixgbe_q_vector *q_vector = &adapter->q_vector[i];
3378 netif_napi_add(adapter->netdev, &q_vector->napi,
3379 (*poll), 64);
3380 }
3381}
3382
3383/**
2607 * ixgbe_probe - Device Initialization Routine 3384 * ixgbe_probe - Device Initialization Routine
2608 * @pdev: PCI device information struct 3385 * @pdev: PCI device information struct
2609 * @ent: entry in ixgbe_pci_tbl 3386 * @ent: entry in ixgbe_pci_tbl
@@ -2655,7 +3432,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
2655 3432
2656 pci_set_master(pdev); 3433 pci_set_master(pdev);
2657 3434
3435#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3436 netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
3437#else
2658 netdev = alloc_etherdev(sizeof(struct ixgbe_adapter)); 3438 netdev = alloc_etherdev(sizeof(struct ixgbe_adapter));
3439#endif
2659 if (!netdev) { 3440 if (!netdev) {
2660 err = -ENOMEM; 3441 err = -ENOMEM;
2661 goto err_alloc_etherdev; 3442 goto err_alloc_etherdev;
@@ -2696,7 +3477,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
2696 ixgbe_set_ethtool_ops(netdev); 3477 ixgbe_set_ethtool_ops(netdev);
2697 netdev->tx_timeout = &ixgbe_tx_timeout; 3478 netdev->tx_timeout = &ixgbe_tx_timeout;
2698 netdev->watchdog_timeo = 5 * HZ; 3479 netdev->watchdog_timeo = 5 * HZ;
2699 netif_napi_add(netdev, &adapter->napi, ixgbe_clean, 64);
2700 netdev->vlan_rx_register = ixgbe_vlan_rx_register; 3480 netdev->vlan_rx_register = ixgbe_vlan_rx_register;
2701 netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid; 3481 netdev->vlan_rx_add_vid = ixgbe_vlan_rx_add_vid;
2702 netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid; 3482 netdev->vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid;
@@ -2719,6 +3499,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
2719 3499
2720 /* Setup hw api */ 3500 /* Setup hw api */
2721 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3501 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
3502 hw->mac.type = ii->mac;
2722 3503
2723 err = ii->get_invariants(hw); 3504 err = ii->get_invariants(hw);
2724 if (err) 3505 if (err)
@@ -2741,6 +3522,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
2741 if (pci_using_dac) 3522 if (pci_using_dac)
2742 netdev->features |= NETIF_F_HIGHDMA; 3523 netdev->features |= NETIF_F_HIGHDMA;
2743 3524
3525#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3526 netdev->features |= NETIF_F_MULTI_QUEUE;
3527#endif
2744 3528
2745 /* make sure the EEPROM is good */ 3529 /* make sure the EEPROM is good */
2746 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 3530 if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) {
@@ -2770,9 +3554,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
2770 hw->fc.low_water = IXGBE_DEFAULT_FCRTL; 3554 hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
2771 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 3555 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
2772 3556
2773 /* Interrupt Throttle Rate */ 3557 err = ixgbe_init_interrupt_scheme(adapter);
2774 adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS); 3558 if (err)
2775 adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS); 3559 goto err_sw_init;
2776 3560
2777 /* print bus type/speed/width info */ 3561 /* print bus type/speed/width info */
2778 pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status); 3562 pci_read_config_word(pdev, IXGBE_PCI_LINK_STATUS, &link_status);
@@ -2808,12 +3592,27 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
2808 3592
2809 netif_carrier_off(netdev); 3593 netif_carrier_off(netdev);
2810 netif_stop_queue(netdev); 3594 netif_stop_queue(netdev);
3595#ifdef CONFIG_NETDEVICES_MULTIQUEUE
3596 for (i = 0; i < adapter->num_tx_queues; i++)
3597 netif_stop_subqueue(netdev, i);
3598#endif
3599
3600 ixgbe_napi_add_all(adapter);
2811 3601
2812 strcpy(netdev->name, "eth%d"); 3602 strcpy(netdev->name, "eth%d");
2813 err = register_netdev(netdev); 3603 err = register_netdev(netdev);
2814 if (err) 3604 if (err)
2815 goto err_register; 3605 goto err_register;
2816 3606
3607#ifdef CONFIG_DCA
3608 if (dca_add_requester(&pdev->dev) == 0) {
3609 adapter->flags |= IXGBE_FLAG_DCA_ENABLED;
3610 /* always use CB2 mode, difference is masked
3611 * in the CB driver */
3612 IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2);
3613 ixgbe_setup_dca(adapter);
3614 }
3615#endif
2817 3616
2818 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n"); 3617 dev_info(&pdev->dev, "Intel(R) 10 Gigabit Network Connection\n");
2819 cards_found++; 3618 cards_found++;
@@ -2823,6 +3622,7 @@ err_register:
2823 ixgbe_release_hw_control(adapter); 3622 ixgbe_release_hw_control(adapter);
2824err_hw_init: 3623err_hw_init:
2825err_sw_init: 3624err_sw_init:
3625 ixgbe_reset_interrupt_capability(adapter);
2826err_eeprom: 3626err_eeprom:
2827 iounmap(hw->hw_addr); 3627 iounmap(hw->hw_addr);
2828err_ioremap: 3628err_ioremap:
@@ -2854,16 +3654,27 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
2854 3654
2855 flush_scheduled_work(); 3655 flush_scheduled_work();
2856 3656
3657#ifdef CONFIG_DCA
3658 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
3659 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED;
3660 dca_remove_requester(&pdev->dev);
3661 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1);
3662 }
3663
3664#endif
2857 unregister_netdev(netdev); 3665 unregister_netdev(netdev);
2858 3666
2859 ixgbe_release_hw_control(adapter); 3667 ixgbe_reset_interrupt_capability(adapter);
2860 3668
2861 kfree(adapter->tx_ring); 3669 ixgbe_release_hw_control(adapter);
2862 kfree(adapter->rx_ring);
2863 3670
2864 iounmap(adapter->hw.hw_addr); 3671 iounmap(adapter->hw.hw_addr);
2865 pci_release_regions(pdev); 3672 pci_release_regions(pdev);
2866 3673
3674 DPRINTK(PROBE, INFO, "complete\n");
3675 kfree(adapter->tx_ring);
3676 kfree(adapter->rx_ring);
3677
2867 free_netdev(netdev); 3678 free_netdev(netdev);
2868 3679
2869 pci_disable_device(pdev); 3680 pci_disable_device(pdev);
@@ -2975,6 +3786,10 @@ static int __init ixgbe_init_module(void)
2975 3786
2976 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright); 3787 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright);
2977 3788
3789#ifdef CONFIG_DCA
3790 dca_register_notify(&dca_notifier);
3791
3792#endif
2978 ret = pci_register_driver(&ixgbe_driver); 3793 ret = pci_register_driver(&ixgbe_driver);
2979 return ret; 3794 return ret;
2980} 3795}
@@ -2988,8 +3803,25 @@ module_init(ixgbe_init_module);
2988 **/ 3803 **/
2989static void __exit ixgbe_exit_module(void) 3804static void __exit ixgbe_exit_module(void)
2990{ 3805{
3806#ifdef CONFIG_DCA
3807 dca_unregister_notify(&dca_notifier);
3808#endif
2991 pci_unregister_driver(&ixgbe_driver); 3809 pci_unregister_driver(&ixgbe_driver);
2992} 3810}
3811
3812#ifdef CONFIG_DCA
3813static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event,
3814 void *p)
3815{
3816 int ret_val;
3817
3818 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event,
3819 __ixgbe_notify_dca);
3820
3821 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
3822}
3823#endif /* CONFIG_DCA */
3824
2993module_exit(ixgbe_exit_module); 3825module_exit(ixgbe_exit_module);
2994 3826
2995/* ixgbe_main.c */ 3827/* ixgbe_main.c */
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
new file mode 100644
index 000000000000..1d24a73a0e1a
--- /dev/null
+++ b/drivers/net/korina.c
@@ -0,0 +1,1233 @@
1/*
2 * Driver for the IDT RC32434 (Korina) on-chip ethernet controller.
3 *
4 * Copyright 2004 IDT Inc. (rischelp@idt.com)
5 * Copyright 2006 Felix Fietkau <nbd@openwrt.org>
6 * Copyright 2008 Florian Fainelli <florian@openwrt.org>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
14 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
16 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
19 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
20 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 675 Mass Ave, Cambridge, MA 02139, USA.
27 *
28 * Writing to a DMA status register:
29 *
30 * When writing to the status register, you should mask the bit you have
31 * been testing the status register with. Both Tx and Rx DMA registers
32 * should stick to this procedure.
33 */
34
35#include <linux/module.h>
36#include <linux/kernel.h>
37#include <linux/moduleparam.h>
38#include <linux/sched.h>
39#include <linux/ctype.h>
40#include <linux/types.h>
41#include <linux/interrupt.h>
42#include <linux/init.h>
43#include <linux/ioport.h>
44#include <linux/in.h>
45#include <linux/slab.h>
46#include <linux/string.h>
47#include <linux/delay.h>
48#include <linux/netdevice.h>
49#include <linux/etherdevice.h>
50#include <linux/skbuff.h>
51#include <linux/errno.h>
52#include <linux/platform_device.h>
53#include <linux/mii.h>
54#include <linux/ethtool.h>
55#include <linux/crc32.h>
56
57#include <asm/bootinfo.h>
58#include <asm/system.h>
59#include <asm/bitops.h>
60#include <asm/pgtable.h>
61#include <asm/segment.h>
62#include <asm/io.h>
63#include <asm/dma.h>
64
65#include <asm/mach-rc32434/rb.h>
66#include <asm/mach-rc32434/rc32434.h>
67#include <asm/mach-rc32434/eth.h>
68#include <asm/mach-rc32434/dma_v.h>
69
70#define DRV_NAME "korina"
71#define DRV_VERSION "0.10"
72#define DRV_RELDATE "04Mar2008"
73
74#define STATION_ADDRESS_HIGH(dev) (((dev)->dev_addr[0] << 8) | \
75 ((dev)->dev_addr[1]))
76#define STATION_ADDRESS_LOW(dev) (((dev)->dev_addr[2] << 24) | \
77 ((dev)->dev_addr[3] << 16) | \
78 ((dev)->dev_addr[4] << 8) | \
79 ((dev)->dev_addr[5]))
80
81#define MII_CLOCK 1250000 /* no more than 2.5MHz */
82
83/* the following must be powers of two */
84#define KORINA_NUM_RDS 64 /* number of receive descriptors */
85#define KORINA_NUM_TDS 64 /* number of transmit descriptors */
86
87#define KORINA_RBSIZE 536 /* size of one resource buffer = Ether MTU */
88#define KORINA_RDS_MASK (KORINA_NUM_RDS - 1)
89#define KORINA_TDS_MASK (KORINA_NUM_TDS - 1)
90#define RD_RING_SIZE (KORINA_NUM_RDS * sizeof(struct dma_desc))
91#define TD_RING_SIZE (KORINA_NUM_TDS * sizeof(struct dma_desc))
92
93#define TX_TIMEOUT (6000 * HZ / 1000)
94
95enum chain_status { desc_filled, desc_empty };
96#define IS_DMA_FINISHED(X) (((X) & (DMA_DESC_FINI)) != 0)
97#define IS_DMA_DONE(X) (((X) & (DMA_DESC_DONE)) != 0)
98#define RCVPKT_LENGTH(X) (((X) & ETH_RX_LEN) >> ETH_RX_LEN_BIT)
99
100/* Information that need to be kept for each board. */
101struct korina_private {
102 struct eth_regs *eth_regs;
103 struct dma_reg *rx_dma_regs;
104 struct dma_reg *tx_dma_regs;
105 struct dma_desc *td_ring; /* transmit descriptor ring */
106 struct dma_desc *rd_ring; /* receive descriptor ring */
107
108 struct sk_buff *tx_skb[KORINA_NUM_TDS];
109 struct sk_buff *rx_skb[KORINA_NUM_RDS];
110
111 int rx_next_done;
112 int rx_chain_head;
113 int rx_chain_tail;
114 enum chain_status rx_chain_status;
115
116 int tx_next_done;
117 int tx_chain_head;
118 int tx_chain_tail;
119 enum chain_status tx_chain_status;
120 int tx_count;
121 int tx_full;
122
123 int rx_irq;
124 int tx_irq;
125 int ovr_irq;
126 int und_irq;
127
128 spinlock_t lock; /* NIC xmit lock */
129
130 int dma_halt_cnt;
131 int dma_run_cnt;
132 struct napi_struct napi;
133 struct mii_if_info mii_if;
134 struct net_device *dev;
135 int phy_addr;
136};
137
138extern unsigned int idt_cpu_freq;
139
140static inline void korina_start_dma(struct dma_reg *ch, u32 dma_addr)
141{
142 writel(0, &ch->dmandptr);
143 writel(dma_addr, &ch->dmadptr);
144}
145
146static inline void korina_abort_dma(struct net_device *dev,
147 struct dma_reg *ch)
148{
149 if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) {
150 writel(0x10, &ch->dmac);
151
152 while (!(readl(&ch->dmas) & DMA_STAT_HALT))
153 dev->trans_start = jiffies;
154
155 writel(0, &ch->dmas);
156 }
157
158 writel(0, &ch->dmadptr);
159 writel(0, &ch->dmandptr);
160}
161
162static inline void korina_chain_dma(struct dma_reg *ch, u32 dma_addr)
163{
164 writel(dma_addr, &ch->dmandptr);
165}
166
167static void korina_abort_tx(struct net_device *dev)
168{
169 struct korina_private *lp = netdev_priv(dev);
170
171 korina_abort_dma(dev, lp->tx_dma_regs);
172}
173
174static void korina_abort_rx(struct net_device *dev)
175{
176 struct korina_private *lp = netdev_priv(dev);
177
178 korina_abort_dma(dev, lp->rx_dma_regs);
179}
180
181static void korina_start_rx(struct korina_private *lp,
182 struct dma_desc *rd)
183{
184 korina_start_dma(lp->rx_dma_regs, CPHYSADDR(rd));
185}
186
187static void korina_chain_rx(struct korina_private *lp,
188 struct dma_desc *rd)
189{
190 korina_chain_dma(lp->rx_dma_regs, CPHYSADDR(rd));
191}
192
193/* transmit packet */
194static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
195{
196 struct korina_private *lp = netdev_priv(dev);
197 unsigned long flags;
198 u32 length;
199 u32 chain_index;
200 struct dma_desc *td;
201
202 spin_lock_irqsave(&lp->lock, flags);
203
204 td = &lp->td_ring[lp->tx_chain_tail];
205
206 /* stop queue when full, drop pkts if queue already full */
207 if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
208 lp->tx_full = 1;
209
210 if (lp->tx_count == (KORINA_NUM_TDS - 2))
211 netif_stop_queue(dev);
212 else {
213 dev->stats.tx_dropped++;
214 dev_kfree_skb_any(skb);
215 spin_unlock_irqrestore(&lp->lock, flags);
216
217 return NETDEV_TX_BUSY;
218 }
219 }
220
221 lp->tx_count++;
222
223 lp->tx_skb[lp->tx_chain_tail] = skb;
224
225 length = skb->len;
226 dma_cache_wback((u32)skb->data, skb->len);
227
228 /* Setup the transmit descriptor. */
229 dma_cache_inv((u32) td, sizeof(*td));
230 td->ca = CPHYSADDR(skb->data);
231 chain_index = (lp->tx_chain_tail - 1) &
232 KORINA_TDS_MASK;
233
234 if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
235 if (lp->tx_chain_status == desc_empty) {
236 /* Update tail */
237 td->control = DMA_COUNT(length) |
238 DMA_DESC_COF | DMA_DESC_IOF;
239 /* Move tail */
240 lp->tx_chain_tail = chain_index;
241 /* Write to NDPTR */
242 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
243 &lp->tx_dma_regs->dmandptr);
244 /* Move head to tail */
245 lp->tx_chain_head = lp->tx_chain_tail;
246 } else {
247 /* Update tail */
248 td->control = DMA_COUNT(length) |
249 DMA_DESC_COF | DMA_DESC_IOF;
250 /* Link to prev */
251 lp->td_ring[chain_index].control &=
252 ~DMA_DESC_COF;
253 /* Link to prev */
254 lp->td_ring[chain_index].link = CPHYSADDR(td);
255 /* Move tail */
256 lp->tx_chain_tail = chain_index;
257 /* Write to NDPTR */
258 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
259 &(lp->tx_dma_regs->dmandptr));
260 /* Move head to tail */
261 lp->tx_chain_head = lp->tx_chain_tail;
262 lp->tx_chain_status = desc_empty;
263 }
264 } else {
265 if (lp->tx_chain_status == desc_empty) {
266 /* Update tail */
267 td->control = DMA_COUNT(length) |
268 DMA_DESC_COF | DMA_DESC_IOF;
269 /* Move tail */
270 lp->tx_chain_tail = chain_index;
271 lp->tx_chain_status = desc_filled;
272 netif_stop_queue(dev);
273 } else {
274 /* Update tail */
275 td->control = DMA_COUNT(length) |
276 DMA_DESC_COF | DMA_DESC_IOF;
277 lp->td_ring[chain_index].control &=
278 ~DMA_DESC_COF;
279 lp->td_ring[chain_index].link = CPHYSADDR(td);
280 lp->tx_chain_tail = chain_index;
281 }
282 }
283 dma_cache_wback((u32) td, sizeof(*td));
284
285 dev->trans_start = jiffies;
286 spin_unlock_irqrestore(&lp->lock, flags);
287
288 return NETDEV_TX_OK;
289}
290
291static int mdio_read(struct net_device *dev, int mii_id, int reg)
292{
293 struct korina_private *lp = netdev_priv(dev);
294 int ret;
295
296 mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
297
298 writel(0, &lp->eth_regs->miimcfg);
299 writel(0, &lp->eth_regs->miimcmd);
300 writel(mii_id | reg, &lp->eth_regs->miimaddr);
301 writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
302
303 ret = (int)(readl(&lp->eth_regs->miimrdd));
304 return ret;
305}
306
307static void mdio_write(struct net_device *dev, int mii_id, int reg, int val)
308{
309 struct korina_private *lp = netdev_priv(dev);
310
311 mii_id = ((lp->rx_irq == 0x2c ? 1 : 0) << 8);
312
313 writel(0, &lp->eth_regs->miimcfg);
314 writel(1, &lp->eth_regs->miimcmd);
315 writel(mii_id | reg, &lp->eth_regs->miimaddr);
316 writel(ETH_MII_CMD_SCN, &lp->eth_regs->miimcmd);
317 writel(val, &lp->eth_regs->miimwtd);
318}
319
320/* Ethernet Rx DMA interrupt */
321static irqreturn_t korina_rx_dma_interrupt(int irq, void *dev_id)
322{
323 struct net_device *dev = dev_id;
324 struct korina_private *lp = netdev_priv(dev);
325 u32 dmas, dmasm;
326 irqreturn_t retval;
327
328 dmas = readl(&lp->rx_dma_regs->dmas);
329 if (dmas & (DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR)) {
330 netif_rx_schedule_prep(dev, &lp->napi);
331
332 dmasm = readl(&lp->rx_dma_regs->dmasm);
333 writel(dmasm | (DMA_STAT_DONE |
334 DMA_STAT_HALT | DMA_STAT_ERR),
335 &lp->rx_dma_regs->dmasm);
336
337 if (dmas & DMA_STAT_ERR)
338 printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name);
339
340 retval = IRQ_HANDLED;
341 } else
342 retval = IRQ_NONE;
343
344 return retval;
345}
346
347static int korina_rx(struct net_device *dev, int limit)
348{
349 struct korina_private *lp = netdev_priv(dev);
350 struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
351 struct sk_buff *skb, *skb_new;
352 u8 *pkt_buf;
353 u32 devcs, pkt_len, dmas, rx_free_desc;
354 int count;
355
356 dma_cache_inv((u32)rd, sizeof(*rd));
357
358 for (count = 0; count < limit; count++) {
359
360 devcs = rd->devcs;
361
362 /* Update statistics counters */
363 if (devcs & ETH_RX_CRC)
364 dev->stats.rx_crc_errors++;
365 if (devcs & ETH_RX_LOR)
366 dev->stats.rx_length_errors++;
367 if (devcs & ETH_RX_LE)
368 dev->stats.rx_length_errors++;
369 if (devcs & ETH_RX_OVR)
370 dev->stats.rx_over_errors++;
371 if (devcs & ETH_RX_CV)
372 dev->stats.rx_frame_errors++;
373 if (devcs & ETH_RX_CES)
374 dev->stats.rx_length_errors++;
375 if (devcs & ETH_RX_MP)
376 dev->stats.multicast++;
377
378 if ((devcs & ETH_RX_LD) != ETH_RX_LD) {
379 /* check that this is a whole packet
380 * WARNING: DMA_FD bit incorrectly set
381 * in Rc32434 (errata ref #077) */
382 dev->stats.rx_errors++;
383 dev->stats.rx_dropped++;
384 }
385
386 while ((rx_free_desc = KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
387 /* init the var. used for the later
388 * operations within the while loop */
389 skb_new = NULL;
390 pkt_len = RCVPKT_LENGTH(devcs);
391 skb = lp->rx_skb[lp->rx_next_done];
392
393 if ((devcs & ETH_RX_ROK)) {
394 /* must be the (first and) last
395 * descriptor then */
396 pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
397
398 /* invalidate the cache */
399 dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
400
401 /* Malloc up new buffer. */
402 skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
403
404 if (!skb_new)
405 break;
406 /* Do not count the CRC */
407 skb_put(skb, pkt_len - 4);
408 skb->protocol = eth_type_trans(skb, dev);
409
410 /* Pass the packet to upper layers */
411 netif_receive_skb(skb);
412 dev->last_rx = jiffies;
413 dev->stats.rx_packets++;
414 dev->stats.rx_bytes += pkt_len;
415
416 /* Update the mcast stats */
417 if (devcs & ETH_RX_MP)
418 dev->stats.multicast++;
419
420 lp->rx_skb[lp->rx_next_done] = skb_new;
421 }
422
423 rd->devcs = 0;
424
425 /* Restore descriptor's curr_addr */
426 if (skb_new)
427 rd->ca = CPHYSADDR(skb_new->data);
428 else
429 rd->ca = CPHYSADDR(skb->data);
430
431 rd->control = DMA_COUNT(KORINA_RBSIZE) |
432 DMA_DESC_COD | DMA_DESC_IOD;
433 lp->rd_ring[(lp->rx_next_done - 1) &
434 KORINA_RDS_MASK].control &=
435 ~DMA_DESC_COD;
436
437 lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
438 dma_cache_wback((u32)rd, sizeof(*rd));
439 rd = &lp->rd_ring[lp->rx_next_done];
440 writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
441 }
442 }
443
444 dmas = readl(&lp->rx_dma_regs->dmas);
445
446 if (dmas & DMA_STAT_HALT) {
447 writel(~(DMA_STAT_HALT | DMA_STAT_ERR),
448 &lp->rx_dma_regs->dmas);
449
450 lp->dma_halt_cnt++;
451 rd->devcs = 0;
452 skb = lp->rx_skb[lp->rx_next_done];
453 rd->ca = CPHYSADDR(skb->data);
454 dma_cache_wback((u32)rd, sizeof(*rd));
455 korina_chain_rx(lp, rd);
456 }
457
458 return count;
459}
460
461static int korina_poll(struct napi_struct *napi, int budget)
462{
463 struct korina_private *lp =
464 container_of(napi, struct korina_private, napi);
465 struct net_device *dev = lp->dev;
466 int work_done;
467
468 work_done = korina_rx(dev, budget);
469 if (work_done < budget) {
470 netif_rx_complete(dev, napi);
471
472 writel(readl(&lp->rx_dma_regs->dmasm) &
473 ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
474 &lp->rx_dma_regs->dmasm);
475 }
476 return work_done;
477}
478
479/*
480 * Set or clear the multicast filter for this adaptor.
481 */
482static void korina_multicast_list(struct net_device *dev)
483{
484 struct korina_private *lp = netdev_priv(dev);
485 unsigned long flags;
486 struct dev_mc_list *dmi = dev->mc_list;
487 u32 recognise = ETH_ARC_AB; /* always accept broadcasts */
488 int i;
489
490 /* Set promiscuous mode */
491 if (dev->flags & IFF_PROMISC)
492 recognise |= ETH_ARC_PRO;
493
494 else if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 4))
495 /* All multicast and broadcast */
496 recognise |= ETH_ARC_AM;
497
498 /* Build the hash table */
499 if (dev->mc_count > 4) {
500 u16 hash_table[4];
501 u32 crc;
502
503 for (i = 0; i < 4; i++)
504 hash_table[i] = 0;
505
506 for (i = 0; i < dev->mc_count; i++) {
507 char *addrs = dmi->dmi_addr;
508
509 dmi = dmi->next;
510
511 if (!(*addrs & 1))
512 continue;
513
514 crc = ether_crc_le(6, addrs);
515 crc >>= 26;
516 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
517 }
518 /* Accept filtered multicast */
519 recognise |= ETH_ARC_AFM;
520
521 /* Fill the MAC hash tables with their values */
522 writel((u32)(hash_table[1] << 16 | hash_table[0]),
523 &lp->eth_regs->ethhash0);
524 writel((u32)(hash_table[3] << 16 | hash_table[2]),
525 &lp->eth_regs->ethhash1);
526 }
527
528 spin_lock_irqsave(&lp->lock, flags);
529 writel(recognise, &lp->eth_regs->etharc);
530 spin_unlock_irqrestore(&lp->lock, flags);
531}
532
533static void korina_tx(struct net_device *dev)
534{
535 struct korina_private *lp = netdev_priv(dev);
536 struct dma_desc *td = &lp->td_ring[lp->tx_next_done];
537 u32 devcs;
538 u32 dmas;
539
540 spin_lock(&lp->lock);
541
542 /* Process all desc that are done */
543 while (IS_DMA_FINISHED(td->control)) {
544 if (lp->tx_full == 1) {
545 netif_wake_queue(dev);
546 lp->tx_full = 0;
547 }
548
549 devcs = lp->td_ring[lp->tx_next_done].devcs;
550 if ((devcs & (ETH_TX_FD | ETH_TX_LD)) !=
551 (ETH_TX_FD | ETH_TX_LD)) {
552 dev->stats.tx_errors++;
553 dev->stats.tx_dropped++;
554
555 /* Should never happen */
556 printk(KERN_ERR DRV_NAME "%s: split tx ignored\n",
557 dev->name);
558 } else if (devcs & ETH_TX_TOK) {
559 dev->stats.tx_packets++;
560 dev->stats.tx_bytes +=
561 lp->tx_skb[lp->tx_next_done]->len;
562 } else {
563 dev->stats.tx_errors++;
564 dev->stats.tx_dropped++;
565
566 /* Underflow */
567 if (devcs & ETH_TX_UND)
568 dev->stats.tx_fifo_errors++;
569
570 /* Oversized frame */
571 if (devcs & ETH_TX_OF)
572 dev->stats.tx_aborted_errors++;
573
574 /* Excessive deferrals */
575 if (devcs & ETH_TX_ED)
576 dev->stats.tx_carrier_errors++;
577
578 /* Collisions: medium busy */
579 if (devcs & ETH_TX_EC)
580 dev->stats.collisions++;
581
582 /* Late collision */
583 if (devcs & ETH_TX_LC)
584 dev->stats.tx_window_errors++;
585 }
586
587 /* We must always free the original skb */
588 if (lp->tx_skb[lp->tx_next_done]) {
589 dev_kfree_skb_any(lp->tx_skb[lp->tx_next_done]);
590 lp->tx_skb[lp->tx_next_done] = NULL;
591 }
592
593 lp->td_ring[lp->tx_next_done].control = DMA_DESC_IOF;
594 lp->td_ring[lp->tx_next_done].devcs = ETH_TX_FD | ETH_TX_LD;
595 lp->td_ring[lp->tx_next_done].link = 0;
596 lp->td_ring[lp->tx_next_done].ca = 0;
597 lp->tx_count--;
598
599 /* Go on to next transmission */
600 lp->tx_next_done = (lp->tx_next_done + 1) & KORINA_TDS_MASK;
601 td = &lp->td_ring[lp->tx_next_done];
602
603 }
604
605 /* Clear the DMA status register */
606 dmas = readl(&lp->tx_dma_regs->dmas);
607 writel(~dmas, &lp->tx_dma_regs->dmas);
608
609 writel(readl(&lp->tx_dma_regs->dmasm) &
610 ~(DMA_STAT_FINI | DMA_STAT_ERR),
611 &lp->tx_dma_regs->dmasm);
612
613 spin_unlock(&lp->lock);
614}
615
616static irqreturn_t
617korina_tx_dma_interrupt(int irq, void *dev_id)
618{
619 struct net_device *dev = dev_id;
620 struct korina_private *lp = netdev_priv(dev);
621 u32 dmas, dmasm;
622 irqreturn_t retval;
623
624 dmas = readl(&lp->tx_dma_regs->dmas);
625
626 if (dmas & (DMA_STAT_FINI | DMA_STAT_ERR)) {
627 korina_tx(dev);
628
629 dmasm = readl(&lp->tx_dma_regs->dmasm);
630 writel(dmasm | (DMA_STAT_FINI | DMA_STAT_ERR),
631 &lp->tx_dma_regs->dmasm);
632
633 if (lp->tx_chain_status == desc_filled &&
634 (readl(&(lp->tx_dma_regs->dmandptr)) == 0)) {
635 writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
636 &(lp->tx_dma_regs->dmandptr));
637 lp->tx_chain_status = desc_empty;
638 lp->tx_chain_head = lp->tx_chain_tail;
639 dev->trans_start = jiffies;
640 }
641 if (dmas & DMA_STAT_ERR)
642 printk(KERN_ERR DRV_NAME "%s: DMA error\n", dev->name);
643
644 retval = IRQ_HANDLED;
645 } else
646 retval = IRQ_NONE;
647
648 return retval;
649}
650
651
652static void korina_check_media(struct net_device *dev, unsigned int init_media)
653{
654 struct korina_private *lp = netdev_priv(dev);
655
656 mii_check_media(&lp->mii_if, 0, init_media);
657
658 if (lp->mii_if.full_duplex)
659 writel(readl(&lp->eth_regs->ethmac2) | ETH_MAC2_FD,
660 &lp->eth_regs->ethmac2);
661 else
662 writel(readl(&lp->eth_regs->ethmac2) & ~ETH_MAC2_FD,
663 &lp->eth_regs->ethmac2);
664}
665
666static void korina_set_carrier(struct mii_if_info *mii)
667{
668 if (mii->force_media) {
669 /* autoneg is off: Link is always assumed to be up */
670 if (!netif_carrier_ok(mii->dev))
671 netif_carrier_on(mii->dev);
672 } else /* Let MMI library update carrier status */
673 korina_check_media(mii->dev, 0);
674}
675
676static int korina_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
677{
678 struct korina_private *lp = netdev_priv(dev);
679 struct mii_ioctl_data *data = if_mii(rq);
680 int rc;
681
682 if (!netif_running(dev))
683 return -EINVAL;
684 spin_lock_irq(&lp->lock);
685 rc = generic_mii_ioctl(&lp->mii_if, data, cmd, NULL);
686 spin_unlock_irq(&lp->lock);
687 korina_set_carrier(&lp->mii_if);
688
689 return rc;
690}
691
692/* ethtool helpers */
693static void netdev_get_drvinfo(struct net_device *dev,
694 struct ethtool_drvinfo *info)
695{
696 struct korina_private *lp = netdev_priv(dev);
697
698 strcpy(info->driver, DRV_NAME);
699 strcpy(info->version, DRV_VERSION);
700 strcpy(info->bus_info, lp->dev->name);
701}
702
703static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
704{
705 struct korina_private *lp = netdev_priv(dev);
706 int rc;
707
708 spin_lock_irq(&lp->lock);
709 rc = mii_ethtool_gset(&lp->mii_if, cmd);
710 spin_unlock_irq(&lp->lock);
711
712 return rc;
713}
714
715static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
716{
717 struct korina_private *lp = netdev_priv(dev);
718 int rc;
719
720 spin_lock_irq(&lp->lock);
721 rc = mii_ethtool_sset(&lp->mii_if, cmd);
722 spin_unlock_irq(&lp->lock);
723 korina_set_carrier(&lp->mii_if);
724
725 return rc;
726}
727
728static u32 netdev_get_link(struct net_device *dev)
729{
730 struct korina_private *lp = netdev_priv(dev);
731
732 return mii_link_ok(&lp->mii_if);
733}
734
735static struct ethtool_ops netdev_ethtool_ops = {
736 .get_drvinfo = netdev_get_drvinfo,
737 .get_settings = netdev_get_settings,
738 .set_settings = netdev_set_settings,
739 .get_link = netdev_get_link,
740};
741
742static void korina_alloc_ring(struct net_device *dev)
743{
744 struct korina_private *lp = netdev_priv(dev);
745 int i;
746
747 /* Initialize the transmit descriptors */
748 for (i = 0; i < KORINA_NUM_TDS; i++) {
749 lp->td_ring[i].control = DMA_DESC_IOF;
750 lp->td_ring[i].devcs = ETH_TX_FD | ETH_TX_LD;
751 lp->td_ring[i].ca = 0;
752 lp->td_ring[i].link = 0;
753 }
754 lp->tx_next_done = lp->tx_chain_head = lp->tx_chain_tail =
755 lp->tx_full = lp->tx_count = 0;
756 lp->tx_chain_status = desc_empty;
757
758 /* Initialize the receive descriptors */
759 for (i = 0; i < KORINA_NUM_RDS; i++) {
760 struct sk_buff *skb = lp->rx_skb[i];
761
762 skb = dev_alloc_skb(KORINA_RBSIZE + 2);
763 if (!skb)
764 break;
765 skb_reserve(skb, 2);
766 lp->rx_skb[i] = skb;
767 lp->rd_ring[i].control = DMA_DESC_IOD |
768 DMA_COUNT(KORINA_RBSIZE);
769 lp->rd_ring[i].devcs = 0;
770 lp->rd_ring[i].ca = CPHYSADDR(skb->data);
771 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[i+1]);
772 }
773
774 /* loop back */
775 lp->rd_ring[i].link = CPHYSADDR(&lp->rd_ring[0]);
776 lp->rx_next_done = 0;
777
778 lp->rd_ring[i].control |= DMA_DESC_COD;
779 lp->rx_chain_head = 0;
780 lp->rx_chain_tail = 0;
781 lp->rx_chain_status = desc_empty;
782}
783
784static void korina_free_ring(struct net_device *dev)
785{
786 struct korina_private *lp = netdev_priv(dev);
787 int i;
788
789 for (i = 0; i < KORINA_NUM_RDS; i++) {
790 lp->rd_ring[i].control = 0;
791 if (lp->rx_skb[i])
792 dev_kfree_skb_any(lp->rx_skb[i]);
793 lp->rx_skb[i] = NULL;
794 }
795
796 for (i = 0; i < KORINA_NUM_TDS; i++) {
797 lp->td_ring[i].control = 0;
798 if (lp->tx_skb[i])
799 dev_kfree_skb_any(lp->tx_skb[i]);
800 lp->tx_skb[i] = NULL;
801 }
802}
803
804/*
805 * Initialize the RC32434 ethernet controller.
806 */
807static int korina_init(struct net_device *dev)
808{
809 struct korina_private *lp = netdev_priv(dev);
810
811 /* Disable DMA */
812 korina_abort_tx(dev);
813 korina_abort_rx(dev);
814
815 /* reset ethernet logic */
816 writel(0, &lp->eth_regs->ethintfc);
817 while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP))
818 dev->trans_start = jiffies;
819
820 /* Enable Ethernet Interface */
821 writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc);
822
823 /* Allocate rings */
824 korina_alloc_ring(dev);
825
826 writel(0, &lp->rx_dma_regs->dmas);
827 /* Start Rx DMA */
828 korina_start_rx(lp, &lp->rd_ring[0]);
829
830 writel(readl(&lp->tx_dma_regs->dmasm) &
831 ~(DMA_STAT_FINI | DMA_STAT_ERR),
832 &lp->tx_dma_regs->dmasm);
833 writel(readl(&lp->rx_dma_regs->dmasm) &
834 ~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
835 &lp->rx_dma_regs->dmasm);
836
837 /* Accept only packets destined for this Ethernet device address */
838 writel(ETH_ARC_AB, &lp->eth_regs->etharc);
839
840 /* Set all Ether station address registers to their initial values */
841 writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal0);
842 writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah0);
843
844 writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal1);
845 writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah1);
846
847 writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal2);
848 writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah2);
849
850 writel(STATION_ADDRESS_LOW(dev), &lp->eth_regs->ethsal3);
851 writel(STATION_ADDRESS_HIGH(dev), &lp->eth_regs->ethsah3);
852
853
854 /* Frame Length Checking, Pad Enable, CRC Enable, Full Duplex set */
855 writel(ETH_MAC2_PE | ETH_MAC2_CEN | ETH_MAC2_FD,
856 &lp->eth_regs->ethmac2);
857
858 /* Back to back inter-packet-gap */
859 writel(0x15, &lp->eth_regs->ethipgt);
860 /* Non - Back to back inter-packet-gap */
861 writel(0x12, &lp->eth_regs->ethipgr);
862
863 /* Management Clock Prescaler Divisor
864 * Clock independent setting */
865 writel(((idt_cpu_freq) / MII_CLOCK + 1) & ~1,
866 &lp->eth_regs->ethmcp);
867
868 /* don't transmit until fifo contains 48b */
869 writel(48, &lp->eth_regs->ethfifott);
870
871 writel(ETH_MAC1_RE, &lp->eth_regs->ethmac1);
872
873 napi_enable(&lp->napi);
874 netif_start_queue(dev);
875
876 return 0;
877}
878
879/*
880 * Restart the RC32434 ethernet controller.
881 * FIXME: check the return status where we call it
882 */
883static int korina_restart(struct net_device *dev)
884{
885 struct korina_private *lp = netdev_priv(dev);
886 int ret = 0;
887
888 /*
889 * Disable interrupts
890 */
891 disable_irq(lp->rx_irq);
892 disable_irq(lp->tx_irq);
893 disable_irq(lp->ovr_irq);
894 disable_irq(lp->und_irq);
895
896 writel(readl(&lp->tx_dma_regs->dmasm) |
897 DMA_STAT_FINI | DMA_STAT_ERR,
898 &lp->tx_dma_regs->dmasm);
899 writel(readl(&lp->rx_dma_regs->dmasm) |
900 DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
901 &lp->rx_dma_regs->dmasm);
902
903 korina_free_ring(dev);
904
905 ret = korina_init(dev);
906 if (ret < 0) {
907 printk(KERN_ERR DRV_NAME "%s: cannot restart device\n",
908 dev->name);
909 return ret;
910 }
911 korina_multicast_list(dev);
912
913 enable_irq(lp->und_irq);
914 enable_irq(lp->ovr_irq);
915 enable_irq(lp->tx_irq);
916 enable_irq(lp->rx_irq);
917
918 return ret;
919}
920
921static void korina_clear_and_restart(struct net_device *dev, u32 value)
922{
923 struct korina_private *lp = netdev_priv(dev);
924
925 netif_stop_queue(dev);
926 writel(value, &lp->eth_regs->ethintfc);
927 korina_restart(dev);
928}
929
930/* Ethernet Tx Underflow interrupt */
931static irqreturn_t korina_und_interrupt(int irq, void *dev_id)
932{
933 struct net_device *dev = dev_id;
934 struct korina_private *lp = netdev_priv(dev);
935 unsigned int und;
936
937 spin_lock(&lp->lock);
938
939 und = readl(&lp->eth_regs->ethintfc);
940
941 if (und & ETH_INT_FC_UND)
942 korina_clear_and_restart(dev, und & ~ETH_INT_FC_UND);
943
944 spin_unlock(&lp->lock);
945
946 return IRQ_HANDLED;
947}
948
949static void korina_tx_timeout(struct net_device *dev)
950{
951 struct korina_private *lp = netdev_priv(dev);
952 unsigned long flags;
953
954 spin_lock_irqsave(&lp->lock, flags);
955 korina_restart(dev);
956 spin_unlock_irqrestore(&lp->lock, flags);
957}
958
959/* Ethernet Rx Overflow interrupt */
960static irqreturn_t
961korina_ovr_interrupt(int irq, void *dev_id)
962{
963 struct net_device *dev = dev_id;
964 struct korina_private *lp = netdev_priv(dev);
965 unsigned int ovr;
966
967 spin_lock(&lp->lock);
968 ovr = readl(&lp->eth_regs->ethintfc);
969
970 if (ovr & ETH_INT_FC_OVR)
971 korina_clear_and_restart(dev, ovr & ~ETH_INT_FC_OVR);
972
973 spin_unlock(&lp->lock);
974
975 return IRQ_HANDLED;
976}
977
978#ifdef CONFIG_NET_POLL_CONTROLLER
979static void korina_poll_controller(struct net_device *dev)
980{
981 disable_irq(dev->irq);
982 korina_tx_dma_interrupt(dev->irq, dev);
983 enable_irq(dev->irq);
984}
985#endif
986
987static int korina_open(struct net_device *dev)
988{
989 struct korina_private *lp = netdev_priv(dev);
990 int ret = 0;
991
992 /* Initialize */
993 ret = korina_init(dev);
994 if (ret < 0) {
995 printk(KERN_ERR DRV_NAME "%s: cannot open device\n", dev->name);
996 goto out;
997 }
998
999 /* Install the interrupt handler
1000 * that handles the Done Finished
1001 * Ovr and Und Events */
1002 ret = request_irq(lp->rx_irq, &korina_rx_dma_interrupt,
1003 IRQF_SHARED | IRQF_DISABLED, "Korina ethernet Rx", dev);
1004 if (ret < 0) {
1005 printk(KERN_ERR DRV_NAME "%s: unable to get Rx DMA IRQ %d\n",
1006 dev->name, lp->rx_irq);
1007 goto err_release;
1008 }
1009 ret = request_irq(lp->tx_irq, &korina_tx_dma_interrupt,
1010 IRQF_SHARED | IRQF_DISABLED, "Korina ethernet Tx", dev);
1011 if (ret < 0) {
1012 printk(KERN_ERR DRV_NAME "%s: unable to get Tx DMA IRQ %d\n",
1013 dev->name, lp->tx_irq);
1014 goto err_free_rx_irq;
1015 }
1016
1017 /* Install handler for overrun error. */
1018 ret = request_irq(lp->ovr_irq, &korina_ovr_interrupt,
1019 IRQF_SHARED | IRQF_DISABLED, "Ethernet Overflow", dev);
1020 if (ret < 0) {
1021 printk(KERN_ERR DRV_NAME"%s: unable to get OVR IRQ %d\n",
1022 dev->name, lp->ovr_irq);
1023 goto err_free_tx_irq;
1024 }
1025
1026 /* Install handler for underflow error. */
1027 ret = request_irq(lp->und_irq, &korina_und_interrupt,
1028 IRQF_SHARED | IRQF_DISABLED, "Ethernet Underflow", dev);
1029 if (ret < 0) {
1030 printk(KERN_ERR DRV_NAME "%s: unable to get UND IRQ %d\n",
1031 dev->name, lp->und_irq);
1032 goto err_free_ovr_irq;
1033 }
1034
1035err_free_ovr_irq:
1036 free_irq(lp->ovr_irq, dev);
1037err_free_tx_irq:
1038 free_irq(lp->tx_irq, dev);
1039err_free_rx_irq:
1040 free_irq(lp->rx_irq, dev);
1041err_release:
1042 korina_free_ring(dev);
1043 goto out;
1044out:
1045 return ret;
1046}
1047
1048static int korina_close(struct net_device *dev)
1049{
1050 struct korina_private *lp = netdev_priv(dev);
1051 u32 tmp;
1052
1053 /* Disable interrupts */
1054 disable_irq(lp->rx_irq);
1055 disable_irq(lp->tx_irq);
1056 disable_irq(lp->ovr_irq);
1057 disable_irq(lp->und_irq);
1058
1059 korina_abort_tx(dev);
1060 tmp = readl(&lp->tx_dma_regs->dmasm);
1061 tmp = tmp | DMA_STAT_FINI | DMA_STAT_ERR;
1062 writel(tmp, &lp->tx_dma_regs->dmasm);
1063
1064 korina_abort_rx(dev);
1065 tmp = readl(&lp->rx_dma_regs->dmasm);
1066 tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
1067 writel(tmp, &lp->rx_dma_regs->dmasm);
1068
1069 korina_free_ring(dev);
1070
1071 free_irq(lp->rx_irq, dev);
1072 free_irq(lp->tx_irq, dev);
1073 free_irq(lp->ovr_irq, dev);
1074 free_irq(lp->und_irq, dev);
1075
1076 return 0;
1077}
1078
1079static int korina_probe(struct platform_device *pdev)
1080{
1081 struct korina_device *bif = platform_get_drvdata(pdev);
1082 struct korina_private *lp;
1083 struct net_device *dev;
1084 struct resource *r;
1085 int retval, err;
1086
1087 dev = alloc_etherdev(sizeof(struct korina_private));
1088 if (!dev) {
1089 printk(KERN_ERR DRV_NAME ": alloc_etherdev failed\n");
1090 return -ENOMEM;
1091 }
1092 SET_NETDEV_DEV(dev, &pdev->dev);
1093 platform_set_drvdata(pdev, dev);
1094 lp = netdev_priv(dev);
1095
1096 bif->dev = dev;
1097 memcpy(dev->dev_addr, bif->mac, 6);
1098
1099 lp->rx_irq = platform_get_irq_byname(pdev, "korina_rx");
1100 lp->tx_irq = platform_get_irq_byname(pdev, "korina_tx");
1101 lp->ovr_irq = platform_get_irq_byname(pdev, "korina_ovr");
1102 lp->und_irq = platform_get_irq_byname(pdev, "korina_und");
1103
1104 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
1105 dev->base_addr = r->start;
1106 lp->eth_regs = ioremap_nocache(r->start, r->end - r->start);
1107 if (!lp->eth_regs) {
1108 printk(KERN_ERR DRV_NAME "cannot remap registers\n");
1109 retval = -ENXIO;
1110 goto probe_err_out;
1111 }
1112
1113 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
1114 lp->rx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
1115 if (!lp->rx_dma_regs) {
1116 printk(KERN_ERR DRV_NAME "cannot remap Rx DMA registers\n");
1117 retval = -ENXIO;
1118 goto probe_err_dma_rx;
1119 }
1120
1121 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
1122 lp->tx_dma_regs = ioremap_nocache(r->start, r->end - r->start);
1123 if (!lp->tx_dma_regs) {
1124 printk(KERN_ERR DRV_NAME "cannot remap Tx DMA registers\n");
1125 retval = -ENXIO;
1126 goto probe_err_dma_tx;
1127 }
1128
1129 lp->td_ring = kmalloc(TD_RING_SIZE + RD_RING_SIZE, GFP_KERNEL);
1130 if (!lp->td_ring) {
1131 printk(KERN_ERR DRV_NAME "cannot allocate descriptors\n");
1132 retval = -ENOMEM;
1133 goto probe_err_td_ring;
1134 }
1135
1136 dma_cache_inv((unsigned long)(lp->td_ring),
1137 TD_RING_SIZE + RD_RING_SIZE);
1138
1139 /* now convert TD_RING pointer to KSEG1 */
1140 lp->td_ring = (struct dma_desc *)KSEG1ADDR(lp->td_ring);
1141 lp->rd_ring = &lp->td_ring[KORINA_NUM_TDS];
1142
1143 spin_lock_init(&lp->lock);
1144 /* just use the rx dma irq */
1145 dev->irq = lp->rx_irq;
1146 lp->dev = dev;
1147
1148 dev->open = korina_open;
1149 dev->stop = korina_close;
1150 dev->hard_start_xmit = korina_send_packet;
1151 dev->set_multicast_list = &korina_multicast_list;
1152 dev->ethtool_ops = &netdev_ethtool_ops;
1153 dev->tx_timeout = korina_tx_timeout;
1154 dev->watchdog_timeo = TX_TIMEOUT;
1155 dev->do_ioctl = &korina_ioctl;
1156#ifdef CONFIG_NET_POLL_CONTROLLER
1157 dev->poll_controller = korina_poll_controller;
1158#endif
1159 netif_napi_add(dev, &lp->napi, korina_poll, 64);
1160
1161 lp->phy_addr = (((lp->rx_irq == 0x2c? 1:0) << 8) | 0x05);
1162 lp->mii_if.dev = dev;
1163 lp->mii_if.mdio_read = mdio_read;
1164 lp->mii_if.mdio_write = mdio_write;
1165 lp->mii_if.phy_id = lp->phy_addr;
1166 lp->mii_if.phy_id_mask = 0x1f;
1167 lp->mii_if.reg_num_mask = 0x1f;
1168
1169 err = register_netdev(dev);
1170 if (err) {
1171 printk(KERN_ERR DRV_NAME
1172 ": cannot register net device %d\n", err);
1173 retval = -EINVAL;
1174 goto probe_err_register;
1175 }
1176 return 0;
1177
1178probe_err_register:
1179 kfree(lp->td_ring);
1180probe_err_td_ring:
1181 iounmap(lp->tx_dma_regs);
1182probe_err_dma_tx:
1183 iounmap(lp->rx_dma_regs);
1184probe_err_dma_rx:
1185 iounmap(lp->eth_regs);
1186probe_err_out:
1187 free_netdev(dev);
1188 return retval;
1189}
1190
1191static int korina_remove(struct platform_device *pdev)
1192{
1193 struct korina_device *bif = platform_get_drvdata(pdev);
1194 struct korina_private *lp = netdev_priv(bif->dev);
1195
1196 if (lp->eth_regs)
1197 iounmap(lp->eth_regs);
1198 if (lp->rx_dma_regs)
1199 iounmap(lp->rx_dma_regs);
1200 if (lp->tx_dma_regs)
1201 iounmap(lp->tx_dma_regs);
1202
1203 platform_set_drvdata(pdev, NULL);
1204 unregister_netdev(bif->dev);
1205 free_netdev(bif->dev);
1206
1207 return 0;
1208}
1209
1210static struct platform_driver korina_driver = {
1211 .driver.name = "korina",
1212 .probe = korina_probe,
1213 .remove = korina_remove,
1214};
1215
1216static int __init korina_init_module(void)
1217{
1218 return platform_driver_register(&korina_driver);
1219}
1220
1221static void korina_cleanup_module(void)
1222{
1223 return platform_driver_unregister(&korina_driver);
1224}
1225
1226module_init(korina_init_module);
1227module_exit(korina_cleanup_module);
1228
1229MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
1230MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
1231MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
1232MODULE_DESCRIPTION("IDT RC32434 (Korina) Ethernet driver");
1233MODULE_LICENSE("GPL");
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index f2a6e7132241..41b774baac4d 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -258,7 +258,7 @@ static __net_init int loopback_net_init(struct net *net)
258 if (!dev) 258 if (!dev)
259 goto out; 259 goto out;
260 260
261 dev->nd_net = net; 261 dev_net_set(dev, net);
262 err = register_netdev(dev); 262 err = register_netdev(dev);
263 if (err) 263 if (err)
264 goto out_free_netdev; 264 goto out_free_netdev;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 489c7c3b90d9..d513bb8a4902 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -246,7 +246,7 @@ static int macb_mii_init(struct macb *bp)
246 bp->mii_bus.read = &macb_mdio_read; 246 bp->mii_bus.read = &macb_mdio_read;
247 bp->mii_bus.write = &macb_mdio_write; 247 bp->mii_bus.write = &macb_mdio_write;
248 bp->mii_bus.reset = &macb_mdio_reset; 248 bp->mii_bus.reset = &macb_mdio_reset;
249 bp->mii_bus.id = bp->pdev->id; 249 snprintf(bp->mii_bus.id, MII_BUS_ID_SIZE, "%x", bp->pdev->id);
250 bp->mii_bus.priv = bp; 250 bp->mii_bus.priv = bp;
251 bp->mii_bus.dev = &bp->dev->dev; 251 bp->mii_bus.dev = &bp->dev->dev;
252 pdata = bp->pdev->dev.platform_data; 252 pdata = bp->pdev->dev.platform_data;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index f651a816b280..2056cfc624dc 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -402,7 +402,7 @@ static int macvlan_newlink(struct net_device *dev,
402 if (!tb[IFLA_LINK]) 402 if (!tb[IFLA_LINK])
403 return -EINVAL; 403 return -EINVAL;
404 404
405 lowerdev = __dev_get_by_index(dev->nd_net, nla_get_u32(tb[IFLA_LINK])); 405 lowerdev = __dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK]));
406 if (lowerdev == NULL) 406 if (lowerdev == NULL)
407 return -ENODEV; 407 return -ENODEV;
408 408
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 771139e283af..601ffd69ebc8 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -3,7 +3,8 @@
3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com> 3 * Copyright (C) 2002 Matthew Dharm <mdharm@momenco.com>
4 * 4 *
5 * Based on the 64360 driver from: 5 * Based on the 64360 driver from:
6 * Copyright (C) 2002 rabeeh@galileo.co.il 6 * Copyright (C) 2002 Rabeeh Khoury <rabeeh@galileo.co.il>
7 * Rabeeh Khoury <rabeeh@marvell.com>
7 * 8 *
8 * Copyright (C) 2003 PMC-Sierra, Inc., 9 * Copyright (C) 2003 PMC-Sierra, Inc.,
9 * written by Manish Lachwani 10 * written by Manish Lachwani
@@ -16,6 +17,9 @@
16 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com> 17 * Copyright (C) 2004 Steven J. Hill <sjhill1@rockwellcollins.com>
17 * <sjhill@realitydiluted.com> 18 * <sjhill@realitydiluted.com>
18 * 19 *
20 * Copyright (C) 2007-2008 Marvell Semiconductor
21 * Lennert Buytenhek <buytenh@marvell.com>
22 *
19 * This program is free software; you can redistribute it and/or 23 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License 24 * modify it under the terms of the GNU General Public License
21 * as published by the Free Software Foundation; either version 2 25 * as published by the Free Software Foundation; either version 2
@@ -63,20 +67,6 @@
63#define MV643XX_TX_FAST_REFILL 67#define MV643XX_TX_FAST_REFILL
64#undef MV643XX_COAL 68#undef MV643XX_COAL
65 69
66/*
67 * Number of RX / TX descriptors on RX / TX rings.
68 * Note that allocating RX descriptors is done by allocating the RX
69 * ring AND a preallocated RX buffers (skb's) for each descriptor.
70 * The TX descriptors only allocates the TX descriptors ring,
71 * with no pre allocated TX buffers (skb's are allocated by higher layers.
72 */
73
74/* Default TX ring size is 1000 descriptors */
75#define MV643XX_DEFAULT_TX_QUEUE_SIZE 1000
76
77/* Default RX ring size is 400 descriptors */
78#define MV643XX_DEFAULT_RX_QUEUE_SIZE 400
79
80#define MV643XX_TX_COAL 100 70#define MV643XX_TX_COAL 100
81#ifdef MV643XX_COAL 71#ifdef MV643XX_COAL
82#define MV643XX_RX_COAL 100 72#define MV643XX_RX_COAL 100
@@ -434,14 +424,6 @@ typedef enum _eth_func_ret_status {
434 ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */ 424 ETH_QUEUE_LAST_RESOURCE /* Ring resources about to exhaust. */
435} ETH_FUNC_RET_STATUS; 425} ETH_FUNC_RET_STATUS;
436 426
437typedef enum _eth_target {
438 ETH_TARGET_DRAM,
439 ETH_TARGET_DEVICE,
440 ETH_TARGET_CBS,
441 ETH_TARGET_PCI0,
442 ETH_TARGET_PCI1
443} ETH_TARGET;
444
445/* These are for big-endian machines. Little endian needs different 427/* These are for big-endian machines. Little endian needs different
446 * definitions. 428 * definitions.
447 */ 429 */
@@ -586,43 +568,44 @@ struct mv643xx_private {
586 568
587/* Static function declarations */ 569/* Static function declarations */
588static void eth_port_init(struct mv643xx_private *mp); 570static void eth_port_init(struct mv643xx_private *mp);
589static void eth_port_reset(unsigned int eth_port_num); 571static void eth_port_reset(struct mv643xx_private *mp);
590static void eth_port_start(struct net_device *dev); 572static void eth_port_start(struct net_device *dev);
591 573
592static void ethernet_phy_reset(unsigned int eth_port_num); 574static void ethernet_phy_reset(struct mv643xx_private *mp);
593 575
594static void eth_port_write_smi_reg(unsigned int eth_port_num, 576static void eth_port_write_smi_reg(struct mv643xx_private *mp,
595 unsigned int phy_reg, unsigned int value); 577 unsigned int phy_reg, unsigned int value);
596 578
597static void eth_port_read_smi_reg(unsigned int eth_port_num, 579static void eth_port_read_smi_reg(struct mv643xx_private *mp,
598 unsigned int phy_reg, unsigned int *value); 580 unsigned int phy_reg, unsigned int *value);
599 581
600static void eth_clear_mib_counters(unsigned int eth_port_num); 582static void eth_clear_mib_counters(struct mv643xx_private *mp);
601 583
602static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, 584static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
603 struct pkt_info *p_pkt_info); 585 struct pkt_info *p_pkt_info);
604static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, 586static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
605 struct pkt_info *p_pkt_info); 587 struct pkt_info *p_pkt_info);
606 588
607static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr); 589static void eth_port_uc_addr_get(struct mv643xx_private *mp,
608static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr); 590 unsigned char *p_addr);
591static void eth_port_uc_addr_set(struct mv643xx_private *mp,
592 unsigned char *p_addr);
609static void eth_port_set_multicast_list(struct net_device *); 593static void eth_port_set_multicast_list(struct net_device *);
610static void mv643xx_eth_port_enable_tx(unsigned int port_num, 594static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp,
611 unsigned int queues); 595 unsigned int queues);
612static void mv643xx_eth_port_enable_rx(unsigned int port_num, 596static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp,
613 unsigned int queues); 597 unsigned int queues);
614static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num); 598static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp);
615static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num); 599static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp);
616static int mv643xx_eth_open(struct net_device *); 600static int mv643xx_eth_open(struct net_device *);
617static int mv643xx_eth_stop(struct net_device *); 601static int mv643xx_eth_stop(struct net_device *);
618static int mv643xx_eth_change_mtu(struct net_device *, int); 602static void eth_port_init_mac_tables(struct mv643xx_private *mp);
619static void eth_port_init_mac_tables(unsigned int eth_port_num);
620#ifdef MV643XX_NAPI 603#ifdef MV643XX_NAPI
621static int mv643xx_poll(struct napi_struct *napi, int budget); 604static int mv643xx_poll(struct napi_struct *napi, int budget);
622#endif 605#endif
623static int ethernet_phy_get(unsigned int eth_port_num); 606static int ethernet_phy_get(struct mv643xx_private *mp);
624static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr); 607static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr);
625static int ethernet_phy_detect(unsigned int eth_port_num); 608static int ethernet_phy_detect(struct mv643xx_private *mp);
626static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location); 609static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location);
627static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val); 610static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val);
628static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); 611static int mv643xx_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
@@ -636,12 +619,12 @@ static void __iomem *mv643xx_eth_base;
636/* used to protect SMI_REG, which is shared across ports */ 619/* used to protect SMI_REG, which is shared across ports */
637static DEFINE_SPINLOCK(mv643xx_eth_phy_lock); 620static DEFINE_SPINLOCK(mv643xx_eth_phy_lock);
638 621
639static inline u32 mv_read(int offset) 622static inline u32 rdl(struct mv643xx_private *mp, int offset)
640{ 623{
641 return readl(mv643xx_eth_base + offset); 624 return readl(mv643xx_eth_base + offset);
642} 625}
643 626
644static inline void mv_write(int offset, u32 data) 627static inline void wrl(struct mv643xx_private *mp, int offset, u32 data)
645{ 628{
646 writel(data, mv643xx_eth_base + offset); 629 writel(data, mv643xx_eth_base + offset);
647} 630}
@@ -659,18 +642,19 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
659 return -EINVAL; 642 return -EINVAL;
660 643
661 dev->mtu = new_mtu; 644 dev->mtu = new_mtu;
645 if (!netif_running(dev))
646 return 0;
647
662 /* 648 /*
663 * Stop then re-open the interface. This will allocate RX skb's with 649 * Stop and then re-open the interface. This will allocate RX
664 * the new MTU. 650 * skbs of the new MTU.
665 * There is a possible danger that the open will not successed, due 651 * There is a possible danger that the open will not succeed,
666 * to memory is full, which might fail the open function. 652 * due to memory being full, which might fail the open function.
667 */ 653 */
668 if (netif_running(dev)) { 654 mv643xx_eth_stop(dev);
669 mv643xx_eth_stop(dev); 655 if (mv643xx_eth_open(dev)) {
670 if (mv643xx_eth_open(dev)) 656 printk(KERN_ERR "%s: Fatal error on opening device\n",
671 printk(KERN_ERR 657 dev->name);
672 "%s: Fatal error on opening device\n",
673 dev->name);
674 } 658 }
675 659
676 return 0; 660 return 0;
@@ -748,10 +732,9 @@ static inline void mv643xx_eth_rx_refill_descs_timer_wrapper(unsigned long data)
748static void mv643xx_eth_update_mac_address(struct net_device *dev) 732static void mv643xx_eth_update_mac_address(struct net_device *dev)
749{ 733{
750 struct mv643xx_private *mp = netdev_priv(dev); 734 struct mv643xx_private *mp = netdev_priv(dev);
751 unsigned int port_num = mp->port_num;
752 735
753 eth_port_init_mac_tables(port_num); 736 eth_port_init_mac_tables(mp);
754 eth_port_uc_addr_set(port_num, dev->dev_addr); 737 eth_port_uc_addr_set(mp, dev->dev_addr);
755} 738}
756 739
757/* 740/*
@@ -767,12 +750,12 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
767 struct mv643xx_private *mp = netdev_priv(dev); 750 struct mv643xx_private *mp = netdev_priv(dev);
768 u32 config_reg; 751 u32 config_reg;
769 752
770 config_reg = mv_read(PORT_CONFIG_REG(mp->port_num)); 753 config_reg = rdl(mp, PORT_CONFIG_REG(mp->port_num));
771 if (dev->flags & IFF_PROMISC) 754 if (dev->flags & IFF_PROMISC)
772 config_reg |= (u32) UNICAST_PROMISCUOUS_MODE; 755 config_reg |= (u32) UNICAST_PROMISCUOUS_MODE;
773 else 756 else
774 config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE; 757 config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE;
775 mv_write(PORT_CONFIG_REG(mp->port_num), config_reg); 758 wrl(mp, PORT_CONFIG_REG(mp->port_num), config_reg);
776 759
777 eth_port_set_multicast_list(dev); 760 eth_port_set_multicast_list(dev);
778} 761}
@@ -826,14 +809,14 @@ static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly)
826{ 809{
827 struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private, 810 struct mv643xx_private *mp = container_of(ugly, struct mv643xx_private,
828 tx_timeout_task); 811 tx_timeout_task);
829 struct net_device *dev = mp->mii.dev; /* yuck */ 812 struct net_device *dev = mp->dev;
830 813
831 if (!netif_running(dev)) 814 if (!netif_running(dev))
832 return; 815 return;
833 816
834 netif_stop_queue(dev); 817 netif_stop_queue(dev);
835 818
836 eth_port_reset(mp->port_num); 819 eth_port_reset(mp);
837 eth_port_start(dev); 820 eth_port_start(dev);
838 821
839 if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB) 822 if (mp->tx_ring_size - mp->tx_desc_count >= MAX_DESCS_PER_SKB)
@@ -845,7 +828,7 @@ static void mv643xx_eth_tx_timeout_task(struct work_struct *ugly)
845 * 828 *
846 * If force is non-zero, frees uncompleted descriptors as well 829 * If force is non-zero, frees uncompleted descriptors as well
847 */ 830 */
848int mv643xx_eth_free_tx_descs(struct net_device *dev, int force) 831static int mv643xx_eth_free_tx_descs(struct net_device *dev, int force)
849{ 832{
850 struct mv643xx_private *mp = netdev_priv(dev); 833 struct mv643xx_private *mp = netdev_priv(dev);
851 struct eth_tx_desc *desc; 834 struct eth_tx_desc *desc;
@@ -1008,7 +991,7 @@ static void mv643xx_eth_update_pscr(struct net_device *dev,
1008 u32 o_pscr, n_pscr; 991 u32 o_pscr, n_pscr;
1009 unsigned int queues; 992 unsigned int queues;
1010 993
1011 o_pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); 994 o_pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num));
1012 n_pscr = o_pscr; 995 n_pscr = o_pscr;
1013 996
1014 /* clear speed, duplex and rx buffer size fields */ 997 /* clear speed, duplex and rx buffer size fields */
@@ -1031,16 +1014,16 @@ static void mv643xx_eth_update_pscr(struct net_device *dev,
1031 1014
1032 if (n_pscr != o_pscr) { 1015 if (n_pscr != o_pscr) {
1033 if ((o_pscr & SERIAL_PORT_ENABLE) == 0) 1016 if ((o_pscr & SERIAL_PORT_ENABLE) == 0)
1034 mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); 1017 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
1035 else { 1018 else {
1036 queues = mv643xx_eth_port_disable_tx(port_num); 1019 queues = mv643xx_eth_port_disable_tx(mp);
1037 1020
1038 o_pscr &= ~SERIAL_PORT_ENABLE; 1021 o_pscr &= ~SERIAL_PORT_ENABLE;
1039 mv_write(PORT_SERIAL_CONTROL_REG(port_num), o_pscr); 1022 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), o_pscr);
1040 mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); 1023 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
1041 mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); 1024 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), n_pscr);
1042 if (queues) 1025 if (queues)
1043 mv643xx_eth_port_enable_tx(port_num, queues); 1026 mv643xx_eth_port_enable_tx(mp, queues);
1044 } 1027 }
1045 } 1028 }
1046} 1029}
@@ -1064,13 +1047,13 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
1064 unsigned int port_num = mp->port_num; 1047 unsigned int port_num = mp->port_num;
1065 1048
1066 /* Read interrupt cause registers */ 1049 /* Read interrupt cause registers */
1067 eth_int_cause = mv_read(INTERRUPT_CAUSE_REG(port_num)) & 1050 eth_int_cause = rdl(mp, INTERRUPT_CAUSE_REG(port_num)) &
1068 ETH_INT_UNMASK_ALL; 1051 ETH_INT_UNMASK_ALL;
1069 if (eth_int_cause & ETH_INT_CAUSE_EXT) { 1052 if (eth_int_cause & ETH_INT_CAUSE_EXT) {
1070 eth_int_cause_ext = mv_read( 1053 eth_int_cause_ext = rdl(mp,
1071 INTERRUPT_CAUSE_EXTEND_REG(port_num)) & 1054 INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
1072 ETH_INT_UNMASK_ALL_EXT; 1055 ETH_INT_UNMASK_ALL_EXT;
1073 mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 1056 wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num),
1074 ~eth_int_cause_ext); 1057 ~eth_int_cause_ext);
1075 } 1058 }
1076 1059
@@ -1081,8 +1064,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
1081 if (mii_link_ok(&mp->mii)) { 1064 if (mii_link_ok(&mp->mii)) {
1082 mii_ethtool_gset(&mp->mii, &cmd); 1065 mii_ethtool_gset(&mp->mii, &cmd);
1083 mv643xx_eth_update_pscr(dev, &cmd); 1066 mv643xx_eth_update_pscr(dev, &cmd);
1084 mv643xx_eth_port_enable_tx(port_num, 1067 mv643xx_eth_port_enable_tx(mp, ETH_TX_QUEUES_ENABLED);
1085 ETH_TX_QUEUES_ENABLED);
1086 if (!netif_carrier_ok(dev)) { 1068 if (!netif_carrier_ok(dev)) {
1087 netif_carrier_on(dev); 1069 netif_carrier_on(dev);
1088 if (mp->tx_ring_size - mp->tx_desc_count >= 1070 if (mp->tx_ring_size - mp->tx_desc_count >=
@@ -1098,10 +1080,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
1098#ifdef MV643XX_NAPI 1080#ifdef MV643XX_NAPI
1099 if (eth_int_cause & ETH_INT_CAUSE_RX) { 1081 if (eth_int_cause & ETH_INT_CAUSE_RX) {
1100 /* schedule the NAPI poll routine to maintain port */ 1082 /* schedule the NAPI poll routine to maintain port */
1101 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 1083 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
1102 1084
1103 /* wait for previous write to complete */ 1085 /* wait for previous write to complete */
1104 mv_read(INTERRUPT_MASK_REG(port_num)); 1086 rdl(mp, INTERRUPT_MASK_REG(port_num));
1105 1087
1106 netif_rx_schedule(dev, &mp->napi); 1088 netif_rx_schedule(dev, &mp->napi);
1107 } 1089 }
@@ -1136,7 +1118,7 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
1136 * , and the required delay of the interrupt in usec. 1118 * , and the required delay of the interrupt in usec.
1137 * 1119 *
1138 * INPUT: 1120 * INPUT:
1139 * unsigned int eth_port_num Ethernet port number 1121 * struct mv643xx_private *mp Ethernet port
1140 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units 1122 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
1141 * unsigned int delay Delay in usec 1123 * unsigned int delay Delay in usec
1142 * 1124 *
@@ -1147,15 +1129,16 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id)
1147 * The interrupt coalescing value set in the gigE port. 1129 * The interrupt coalescing value set in the gigE port.
1148 * 1130 *
1149 */ 1131 */
1150static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num, 1132static unsigned int eth_port_set_rx_coal(struct mv643xx_private *mp,
1151 unsigned int t_clk, unsigned int delay) 1133 unsigned int t_clk, unsigned int delay)
1152{ 1134{
1135 unsigned int port_num = mp->port_num;
1153 unsigned int coal = ((t_clk / 1000000) * delay) / 64; 1136 unsigned int coal = ((t_clk / 1000000) * delay) / 64;
1154 1137
1155 /* Set RX Coalescing mechanism */ 1138 /* Set RX Coalescing mechanism */
1156 mv_write(SDMA_CONFIG_REG(eth_port_num), 1139 wrl(mp, SDMA_CONFIG_REG(port_num),
1157 ((coal & 0x3fff) << 8) | 1140 ((coal & 0x3fff) << 8) |
1158 (mv_read(SDMA_CONFIG_REG(eth_port_num)) 1141 (rdl(mp, SDMA_CONFIG_REG(port_num))
1159 & 0xffc000ff)); 1142 & 0xffc000ff));
1160 1143
1161 return coal; 1144 return coal;
@@ -1174,7 +1157,7 @@ static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num,
1174 * MV-643xx chip and the required delay in the interrupt in uSec 1157 * MV-643xx chip and the required delay in the interrupt in uSec
1175 * 1158 *
1176 * INPUT: 1159 * INPUT:
1177 * unsigned int eth_port_num Ethernet port number 1160 * struct mv643xx_private *mp Ethernet port
1178 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units 1161 * unsigned int t_clk t_clk of the MV-643xx chip in HZ units
1179 * unsigned int delay Delay in uSeconds 1162 * unsigned int delay Delay in uSeconds
1180 * 1163 *
@@ -1185,13 +1168,14 @@ static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num,
1185 * The interrupt coalescing value set in the gigE port. 1168 * The interrupt coalescing value set in the gigE port.
1186 * 1169 *
1187 */ 1170 */
1188static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num, 1171static unsigned int eth_port_set_tx_coal(struct mv643xx_private *mp,
1189 unsigned int t_clk, unsigned int delay) 1172 unsigned int t_clk, unsigned int delay)
1190{ 1173{
1191 unsigned int coal; 1174 unsigned int coal = ((t_clk / 1000000) * delay) / 64;
1192 coal = ((t_clk / 1000000) * delay) / 64; 1175
1193 /* Set TX Coalescing mechanism */ 1176 /* Set TX Coalescing mechanism */
1194 mv_write(TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), coal << 4); 1177 wrl(mp, TX_FIFO_URGENT_THRESHOLD_REG(mp->port_num), coal << 4);
1178
1195 return coal; 1179 return coal;
1196} 1180}
1197 1181
@@ -1327,16 +1311,15 @@ static int mv643xx_eth_open(struct net_device *dev)
1327 int err; 1311 int err;
1328 1312
1329 /* Clear any pending ethernet port interrupts */ 1313 /* Clear any pending ethernet port interrupts */
1330 mv_write(INTERRUPT_CAUSE_REG(port_num), 0); 1314 wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0);
1331 mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 1315 wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
1332 /* wait for previous write to complete */ 1316 /* wait for previous write to complete */
1333 mv_read (INTERRUPT_CAUSE_EXTEND_REG(port_num)); 1317 rdl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num));
1334 1318
1335 err = request_irq(dev->irq, mv643xx_eth_int_handler, 1319 err = request_irq(dev->irq, mv643xx_eth_int_handler,
1336 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); 1320 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
1337 if (err) { 1321 if (err) {
1338 printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n", 1322 printk(KERN_ERR "%s: Can not assign IRQ\n", dev->name);
1339 port_num);
1340 return -EAGAIN; 1323 return -EAGAIN;
1341 } 1324 }
1342 1325
@@ -1430,17 +1413,17 @@ static int mv643xx_eth_open(struct net_device *dev)
1430 1413
1431#ifdef MV643XX_COAL 1414#ifdef MV643XX_COAL
1432 mp->rx_int_coal = 1415 mp->rx_int_coal =
1433 eth_port_set_rx_coal(port_num, 133000000, MV643XX_RX_COAL); 1416 eth_port_set_rx_coal(mp, 133000000, MV643XX_RX_COAL);
1434#endif 1417#endif
1435 1418
1436 mp->tx_int_coal = 1419 mp->tx_int_coal =
1437 eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); 1420 eth_port_set_tx_coal(mp, 133000000, MV643XX_TX_COAL);
1438 1421
1439 /* Unmask phy and link status changes interrupts */ 1422 /* Unmask phy and link status changes interrupts */
1440 mv_write(INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT); 1423 wrl(mp, INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT);
1441 1424
1442 /* Unmask RX buffer and TX end interrupt */ 1425 /* Unmask RX buffer and TX end interrupt */
1443 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 1426 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
1444 1427
1445 return 0; 1428 return 0;
1446 1429
@@ -1459,7 +1442,7 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev)
1459 struct mv643xx_private *mp = netdev_priv(dev); 1442 struct mv643xx_private *mp = netdev_priv(dev);
1460 1443
1461 /* Stop Tx Queues */ 1444 /* Stop Tx Queues */
1462 mv643xx_eth_port_disable_tx(mp->port_num); 1445 mv643xx_eth_port_disable_tx(mp);
1463 1446
1464 /* Free outstanding skb's on TX ring */ 1447 /* Free outstanding skb's on TX ring */
1465 mv643xx_eth_free_all_tx_descs(dev); 1448 mv643xx_eth_free_all_tx_descs(dev);
@@ -1477,11 +1460,10 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev)
1477static void mv643xx_eth_free_rx_rings(struct net_device *dev) 1460static void mv643xx_eth_free_rx_rings(struct net_device *dev)
1478{ 1461{
1479 struct mv643xx_private *mp = netdev_priv(dev); 1462 struct mv643xx_private *mp = netdev_priv(dev);
1480 unsigned int port_num = mp->port_num;
1481 int curr; 1463 int curr;
1482 1464
1483 /* Stop RX Queues */ 1465 /* Stop RX Queues */
1484 mv643xx_eth_port_disable_rx(port_num); 1466 mv643xx_eth_port_disable_rx(mp);
1485 1467
1486 /* Free preallocated skb's on RX rings */ 1468 /* Free preallocated skb's on RX rings */
1487 for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) { 1469 for (curr = 0; mp->rx_desc_count && curr < mp->rx_ring_size; curr++) {
@@ -1520,9 +1502,9 @@ static int mv643xx_eth_stop(struct net_device *dev)
1520 unsigned int port_num = mp->port_num; 1502 unsigned int port_num = mp->port_num;
1521 1503
1522 /* Mask all interrupts on ethernet port */ 1504 /* Mask all interrupts on ethernet port */
1523 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 1505 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
1524 /* wait for previous write to complete */ 1506 /* wait for previous write to complete */
1525 mv_read(INTERRUPT_MASK_REG(port_num)); 1507 rdl(mp, INTERRUPT_MASK_REG(port_num));
1526 1508
1527#ifdef MV643XX_NAPI 1509#ifdef MV643XX_NAPI
1528 napi_disable(&mp->napi); 1510 napi_disable(&mp->napi);
@@ -1530,7 +1512,7 @@ static int mv643xx_eth_stop(struct net_device *dev)
1530 netif_carrier_off(dev); 1512 netif_carrier_off(dev);
1531 netif_stop_queue(dev); 1513 netif_stop_queue(dev);
1532 1514
1533 eth_port_reset(mp->port_num); 1515 eth_port_reset(mp);
1534 1516
1535 mv643xx_eth_free_tx_rings(dev); 1517 mv643xx_eth_free_tx_rings(dev);
1536 mv643xx_eth_free_rx_rings(dev); 1518 mv643xx_eth_free_rx_rings(dev);
@@ -1561,15 +1543,15 @@ static int mv643xx_poll(struct napi_struct *napi, int budget)
1561#endif 1543#endif
1562 1544
1563 work_done = 0; 1545 work_done = 0;
1564 if ((mv_read(RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) 1546 if ((rdl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num)))
1565 != (u32) mp->rx_used_desc_q) 1547 != (u32) mp->rx_used_desc_q)
1566 work_done = mv643xx_eth_receive_queue(dev, budget); 1548 work_done = mv643xx_eth_receive_queue(dev, budget);
1567 1549
1568 if (work_done < budget) { 1550 if (work_done < budget) {
1569 netif_rx_complete(dev, napi); 1551 netif_rx_complete(dev, napi);
1570 mv_write(INTERRUPT_CAUSE_REG(port_num), 0); 1552 wrl(mp, INTERRUPT_CAUSE_REG(port_num), 0);
1571 mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); 1553 wrl(mp, INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
1572 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 1554 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
1573 } 1555 }
1574 1556
1575 return work_done; 1557 return work_done;
@@ -1723,7 +1705,7 @@ static void eth_tx_submit_descs_for_skb(struct mv643xx_private *mp,
1723 1705
1724 /* ensure all descriptors are written before poking hardware */ 1706 /* ensure all descriptors are written before poking hardware */
1725 wmb(); 1707 wmb();
1726 mv643xx_eth_port_enable_tx(mp->port_num, ETH_TX_QUEUES_ENABLED); 1708 mv643xx_eth_port_enable_tx(mp, ETH_TX_QUEUES_ENABLED);
1727 1709
1728 mp->tx_desc_count += nr_frags + 1; 1710 mp->tx_desc_count += nr_frags + 1;
1729} 1711}
@@ -1739,25 +1721,23 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1739 unsigned long flags; 1721 unsigned long flags;
1740 1722
1741 BUG_ON(netif_queue_stopped(dev)); 1723 BUG_ON(netif_queue_stopped(dev));
1742 BUG_ON(skb == NULL); 1724
1725 if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
1726 stats->tx_dropped++;
1727 printk(KERN_DEBUG "%s: failed to linearize tiny "
1728 "unaligned fragment\n", dev->name);
1729 return NETDEV_TX_BUSY;
1730 }
1731
1732 spin_lock_irqsave(&mp->lock, flags);
1743 1733
1744 if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) { 1734 if (mp->tx_ring_size - mp->tx_desc_count < MAX_DESCS_PER_SKB) {
1745 printk(KERN_ERR "%s: transmit with queue full\n", dev->name); 1735 printk(KERN_ERR "%s: transmit with queue full\n", dev->name);
1746 netif_stop_queue(dev); 1736 netif_stop_queue(dev);
1747 return 1; 1737 spin_unlock_irqrestore(&mp->lock, flags);
1748 } 1738 return NETDEV_TX_BUSY;
1749
1750 if (has_tiny_unaligned_frags(skb)) {
1751 if (__skb_linearize(skb)) {
1752 stats->tx_dropped++;
1753 printk(KERN_DEBUG "%s: failed to linearize tiny "
1754 "unaligned fragment\n", dev->name);
1755 return 1;
1756 }
1757 } 1739 }
1758 1740
1759 spin_lock_irqsave(&mp->lock, flags);
1760
1761 eth_tx_submit_descs_for_skb(mp, skb); 1741 eth_tx_submit_descs_for_skb(mp, skb);
1762 stats->tx_bytes += skb->len; 1742 stats->tx_bytes += skb->len;
1763 stats->tx_packets++; 1743 stats->tx_packets++;
@@ -1768,7 +1748,7 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1768 1748
1769 spin_unlock_irqrestore(&mp->lock, flags); 1749 spin_unlock_irqrestore(&mp->lock, flags);
1770 1750
1771 return 0; /* success */ 1751 return NETDEV_TX_OK;
1772} 1752}
1773 1753
1774#ifdef CONFIG_NET_POLL_CONTROLLER 1754#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1777,13 +1757,13 @@ static void mv643xx_netpoll(struct net_device *netdev)
1777 struct mv643xx_private *mp = netdev_priv(netdev); 1757 struct mv643xx_private *mp = netdev_priv(netdev);
1778 int port_num = mp->port_num; 1758 int port_num = mp->port_num;
1779 1759
1780 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); 1760 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL);
1781 /* wait for previous write to complete */ 1761 /* wait for previous write to complete */
1782 mv_read(INTERRUPT_MASK_REG(port_num)); 1762 rdl(mp, INTERRUPT_MASK_REG(port_num));
1783 1763
1784 mv643xx_eth_int_handler(netdev->irq, netdev); 1764 mv643xx_eth_int_handler(netdev->irq, netdev);
1785 1765
1786 mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); 1766 wrl(mp, INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL);
1787} 1767}
1788#endif 1768#endif
1789 1769
@@ -1900,7 +1880,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1900 port_num = mp->port_num = pd->port_number; 1880 port_num = mp->port_num = pd->port_number;
1901 1881
1902 /* set default config values */ 1882 /* set default config values */
1903 eth_port_uc_addr_get(port_num, dev->dev_addr); 1883 eth_port_uc_addr_get(mp, dev->dev_addr);
1904 mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE; 1884 mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE;
1905 mp->tx_ring_size = PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; 1885 mp->tx_ring_size = PORT_DEFAULT_TRANSMIT_QUEUE_SIZE;
1906 1886
@@ -1908,7 +1888,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1908 memcpy(dev->dev_addr, pd->mac_addr, 6); 1888 memcpy(dev->dev_addr, pd->mac_addr, 6);
1909 1889
1910 if (pd->phy_addr || pd->force_phy_addr) 1890 if (pd->phy_addr || pd->force_phy_addr)
1911 ethernet_phy_set(port_num, pd->phy_addr); 1891 ethernet_phy_set(mp, pd->phy_addr);
1912 1892
1913 if (pd->rx_queue_size) 1893 if (pd->rx_queue_size)
1914 mp->rx_ring_size = pd->rx_queue_size; 1894 mp->rx_ring_size = pd->rx_queue_size;
@@ -1933,19 +1913,18 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
1933 mp->mii.dev = dev; 1913 mp->mii.dev = dev;
1934 mp->mii.mdio_read = mv643xx_mdio_read; 1914 mp->mii.mdio_read = mv643xx_mdio_read;
1935 mp->mii.mdio_write = mv643xx_mdio_write; 1915 mp->mii.mdio_write = mv643xx_mdio_write;
1936 mp->mii.phy_id = ethernet_phy_get(port_num); 1916 mp->mii.phy_id = ethernet_phy_get(mp);
1937 mp->mii.phy_id_mask = 0x3f; 1917 mp->mii.phy_id_mask = 0x3f;
1938 mp->mii.reg_num_mask = 0x1f; 1918 mp->mii.reg_num_mask = 0x1f;
1939 1919
1940 err = ethernet_phy_detect(port_num); 1920 err = ethernet_phy_detect(mp);
1941 if (err) { 1921 if (err) {
1942 pr_debug("MV643xx ethernet port %d: " 1922 pr_debug("%s: No PHY detected at addr %d\n",
1943 "No PHY detected at addr %d\n", 1923 dev->name, ethernet_phy_get(mp));
1944 port_num, ethernet_phy_get(port_num));
1945 goto out; 1924 goto out;
1946 } 1925 }
1947 1926
1948 ethernet_phy_reset(port_num); 1927 ethernet_phy_reset(mp);
1949 mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii); 1928 mp->mii.supports_gmii = mii_check_gmii_support(&mp->mii);
1950 mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd); 1929 mv643xx_init_ethtool_cmd(dev, mp->mii.phy_id, speed, duplex, &cmd);
1951 mv643xx_eth_update_pscr(dev, &cmd); 1930 mv643xx_eth_update_pscr(dev, &cmd);
@@ -2006,9 +1985,11 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
2006 1985
2007static int mv643xx_eth_shared_probe(struct platform_device *pdev) 1986static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2008{ 1987{
1988 static int mv643xx_version_printed = 0;
2009 struct resource *res; 1989 struct resource *res;
2010 1990
2011 printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n"); 1991 if (!mv643xx_version_printed++)
1992 printk(KERN_NOTICE "MV-643xx 10/100/1000 Ethernet Driver\n");
2012 1993
2013 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1994 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2014 if (res == NULL) 1995 if (res == NULL)
@@ -2037,10 +2018,10 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev)
2037 unsigned int port_num = mp->port_num; 2018 unsigned int port_num = mp->port_num;
2038 2019
2039 /* Mask all interrupts on ethernet port */ 2020 /* Mask all interrupts on ethernet port */
2040 mv_write(INTERRUPT_MASK_REG(port_num), 0); 2021 wrl(mp, INTERRUPT_MASK_REG(port_num), 0);
2041 mv_read (INTERRUPT_MASK_REG(port_num)); 2022 rdl(mp, INTERRUPT_MASK_REG(port_num));
2042 2023
2043 eth_port_reset(port_num); 2024 eth_port_reset(mp);
2044} 2025}
2045 2026
2046static struct platform_driver mv643xx_eth_driver = { 2027static struct platform_driver mv643xx_eth_driver = {
@@ -2229,12 +2210,9 @@ MODULE_ALIAS("platform:mv643xx_eth");
2229 * return_info Tx/Rx user resource return information. 2210 * return_info Tx/Rx user resource return information.
2230 */ 2211 */
2231 2212
2232/* PHY routines */
2233static int ethernet_phy_get(unsigned int eth_port_num);
2234static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr);
2235
2236/* Ethernet Port routines */ 2213/* Ethernet Port routines */
2237static void eth_port_set_filter_table_entry(int table, unsigned char entry); 2214static void eth_port_set_filter_table_entry(struct mv643xx_private *mp,
2215 int table, unsigned char entry);
2238 2216
2239/* 2217/*
2240 * eth_port_init - Initialize the Ethernet port driver 2218 * eth_port_init - Initialize the Ethernet port driver
@@ -2264,9 +2242,9 @@ static void eth_port_init(struct mv643xx_private *mp)
2264{ 2242{
2265 mp->rx_resource_err = 0; 2243 mp->rx_resource_err = 0;
2266 2244
2267 eth_port_reset(mp->port_num); 2245 eth_port_reset(mp);
2268 2246
2269 eth_port_init_mac_tables(mp->port_num); 2247 eth_port_init_mac_tables(mp);
2270} 2248}
2271 2249
2272/* 2250/*
@@ -2306,28 +2284,28 @@ static void eth_port_start(struct net_device *dev)
2306 2284
2307 /* Assignment of Tx CTRP of given queue */ 2285 /* Assignment of Tx CTRP of given queue */
2308 tx_curr_desc = mp->tx_curr_desc_q; 2286 tx_curr_desc = mp->tx_curr_desc_q;
2309 mv_write(TX_CURRENT_QUEUE_DESC_PTR_0(port_num), 2287 wrl(mp, TX_CURRENT_QUEUE_DESC_PTR_0(port_num),
2310 (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); 2288 (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc));
2311 2289
2312 /* Assignment of Rx CRDP of given queue */ 2290 /* Assignment of Rx CRDP of given queue */
2313 rx_curr_desc = mp->rx_curr_desc_q; 2291 rx_curr_desc = mp->rx_curr_desc_q;
2314 mv_write(RX_CURRENT_QUEUE_DESC_PTR_0(port_num), 2292 wrl(mp, RX_CURRENT_QUEUE_DESC_PTR_0(port_num),
2315 (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); 2293 (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc));
2316 2294
2317 /* Add the assigned Ethernet address to the port's address table */ 2295 /* Add the assigned Ethernet address to the port's address table */
2318 eth_port_uc_addr_set(port_num, dev->dev_addr); 2296 eth_port_uc_addr_set(mp, dev->dev_addr);
2319 2297
2320 /* Assign port configuration and command. */ 2298 /* Assign port configuration and command. */
2321 mv_write(PORT_CONFIG_REG(port_num), 2299 wrl(mp, PORT_CONFIG_REG(port_num),
2322 PORT_CONFIG_DEFAULT_VALUE); 2300 PORT_CONFIG_DEFAULT_VALUE);
2323 2301
2324 mv_write(PORT_CONFIG_EXTEND_REG(port_num), 2302 wrl(mp, PORT_CONFIG_EXTEND_REG(port_num),
2325 PORT_CONFIG_EXTEND_DEFAULT_VALUE); 2303 PORT_CONFIG_EXTEND_DEFAULT_VALUE);
2326 2304
2327 pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); 2305 pscr = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num));
2328 2306
2329 pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS); 2307 pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS);
2330 mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); 2308 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr);
2331 2309
2332 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL | 2310 pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL |
2333 DISABLE_AUTO_NEG_SPEED_GMII | 2311 DISABLE_AUTO_NEG_SPEED_GMII |
@@ -2335,32 +2313,34 @@ static void eth_port_start(struct net_device *dev)
2335 DO_NOT_FORCE_LINK_FAIL | 2313 DO_NOT_FORCE_LINK_FAIL |
2336 SERIAL_PORT_CONTROL_RESERVED; 2314 SERIAL_PORT_CONTROL_RESERVED;
2337 2315
2338 mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); 2316 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr);
2339 2317
2340 pscr |= SERIAL_PORT_ENABLE; 2318 pscr |= SERIAL_PORT_ENABLE;
2341 mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); 2319 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), pscr);
2342 2320
2343 /* Assign port SDMA configuration */ 2321 /* Assign port SDMA configuration */
2344 mv_write(SDMA_CONFIG_REG(port_num), 2322 wrl(mp, SDMA_CONFIG_REG(port_num),
2345 PORT_SDMA_CONFIG_DEFAULT_VALUE); 2323 PORT_SDMA_CONFIG_DEFAULT_VALUE);
2346 2324
2347 /* Enable port Rx. */ 2325 /* Enable port Rx. */
2348 mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED); 2326 mv643xx_eth_port_enable_rx(mp, ETH_RX_QUEUES_ENABLED);
2349 2327
2350 /* Disable port bandwidth limits by clearing MTU register */ 2328 /* Disable port bandwidth limits by clearing MTU register */
2351 mv_write(MAXIMUM_TRANSMIT_UNIT(port_num), 0); 2329 wrl(mp, MAXIMUM_TRANSMIT_UNIT(port_num), 0);
2352 2330
2353 /* save phy settings across reset */ 2331 /* save phy settings across reset */
2354 mv643xx_get_settings(dev, &ethtool_cmd); 2332 mv643xx_get_settings(dev, &ethtool_cmd);
2355 ethernet_phy_reset(mp->port_num); 2333 ethernet_phy_reset(mp);
2356 mv643xx_set_settings(dev, &ethtool_cmd); 2334 mv643xx_set_settings(dev, &ethtool_cmd);
2357} 2335}
2358 2336
2359/* 2337/*
2360 * eth_port_uc_addr_set - Write a MAC address into the port's hw registers 2338 * eth_port_uc_addr_set - Write a MAC address into the port's hw registers
2361 */ 2339 */
2362static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr) 2340static void eth_port_uc_addr_set(struct mv643xx_private *mp,
2341 unsigned char *p_addr)
2363{ 2342{
2343 unsigned int port_num = mp->port_num;
2364 unsigned int mac_h; 2344 unsigned int mac_h;
2365 unsigned int mac_l; 2345 unsigned int mac_l;
2366 int table; 2346 int table;
@@ -2369,24 +2349,26 @@ static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr)
2369 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | 2349 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
2370 (p_addr[3] << 0); 2350 (p_addr[3] << 0);
2371 2351
2372 mv_write(MAC_ADDR_LOW(port_num), mac_l); 2352 wrl(mp, MAC_ADDR_LOW(port_num), mac_l);
2373 mv_write(MAC_ADDR_HIGH(port_num), mac_h); 2353 wrl(mp, MAC_ADDR_HIGH(port_num), mac_h);
2374 2354
2375 /* Accept frames with this address */ 2355 /* Accept frames with this address */
2376 table = DA_FILTER_UNICAST_TABLE_BASE(port_num); 2356 table = DA_FILTER_UNICAST_TABLE_BASE(port_num);
2377 eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f); 2357 eth_port_set_filter_table_entry(mp, table, p_addr[5] & 0x0f);
2378} 2358}
2379 2359
2380/* 2360/*
2381 * eth_port_uc_addr_get - Read the MAC address from the port's hw registers 2361 * eth_port_uc_addr_get - Read the MAC address from the port's hw registers
2382 */ 2362 */
2383static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr) 2363static void eth_port_uc_addr_get(struct mv643xx_private *mp,
2364 unsigned char *p_addr)
2384{ 2365{
2366 unsigned int port_num = mp->port_num;
2385 unsigned int mac_h; 2367 unsigned int mac_h;
2386 unsigned int mac_l; 2368 unsigned int mac_l;
2387 2369
2388 mac_h = mv_read(MAC_ADDR_HIGH(port_num)); 2370 mac_h = rdl(mp, MAC_ADDR_HIGH(port_num));
2389 mac_l = mv_read(MAC_ADDR_LOW(port_num)); 2371 mac_l = rdl(mp, MAC_ADDR_LOW(port_num));
2390 2372
2391 p_addr[0] = (mac_h >> 24) & 0xff; 2373 p_addr[0] = (mac_h >> 24) & 0xff;
2392 p_addr[1] = (mac_h >> 16) & 0xff; 2374 p_addr[1] = (mac_h >> 16) & 0xff;
@@ -2405,7 +2387,8 @@ static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr)
2405 * 3-1 Queue (ETH_Q0=0) 2387 * 3-1 Queue (ETH_Q0=0)
2406 * 7-4 Reserved = 0; 2388 * 7-4 Reserved = 0;
2407 */ 2389 */
2408static void eth_port_set_filter_table_entry(int table, unsigned char entry) 2390static void eth_port_set_filter_table_entry(struct mv643xx_private *mp,
2391 int table, unsigned char entry)
2409{ 2392{
2410 unsigned int table_reg; 2393 unsigned int table_reg;
2411 unsigned int tbl_offset; 2394 unsigned int tbl_offset;
@@ -2415,9 +2398,9 @@ static void eth_port_set_filter_table_entry(int table, unsigned char entry)
2415 reg_offset = entry % 4; /* Entry offset within the register */ 2398 reg_offset = entry % 4; /* Entry offset within the register */
2416 2399
2417 /* Set "accepts frame bit" at specified table entry */ 2400 /* Set "accepts frame bit" at specified table entry */
2418 table_reg = mv_read(table + tbl_offset); 2401 table_reg = rdl(mp, table + tbl_offset);
2419 table_reg |= 0x01 << (8 * reg_offset); 2402 table_reg |= 0x01 << (8 * reg_offset);
2420 mv_write(table + tbl_offset, table_reg); 2403 wrl(mp, table + tbl_offset, table_reg);
2421} 2404}
2422 2405
2423/* 2406/*
@@ -2434,8 +2417,9 @@ static void eth_port_set_filter_table_entry(int table, unsigned char entry)
2434 * In either case, eth_port_set_filter_table_entry() is then called 2417 * In either case, eth_port_set_filter_table_entry() is then called
2435 * to set to set the actual table entry. 2418 * to set to set the actual table entry.
2436 */ 2419 */
2437static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr) 2420static void eth_port_mc_addr(struct mv643xx_private *mp, unsigned char *p_addr)
2438{ 2421{
2422 unsigned int port_num = mp->port_num;
2439 unsigned int mac_h; 2423 unsigned int mac_h;
2440 unsigned int mac_l; 2424 unsigned int mac_l;
2441 unsigned char crc_result = 0; 2425 unsigned char crc_result = 0;
@@ -2446,9 +2430,8 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
2446 2430
2447 if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && 2431 if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) &&
2448 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { 2432 (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) {
2449 table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2433 table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num);
2450 (eth_port_num); 2434 eth_port_set_filter_table_entry(mp, table, p_addr[5]);
2451 eth_port_set_filter_table_entry(table, p_addr[5]);
2452 return; 2435 return;
2453 } 2436 }
2454 2437
@@ -2520,8 +2503,8 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
2520 for (i = 0; i < 8; i++) 2503 for (i = 0; i < 8; i++)
2521 crc_result = crc_result | (crc[i] << i); 2504 crc_result = crc_result | (crc[i] << i);
2522 2505
2523 table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num); 2506 table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num);
2524 eth_port_set_filter_table_entry(table, crc_result); 2507 eth_port_set_filter_table_entry(mp, table, crc_result);
2525} 2508}
2526 2509
2527/* 2510/*
@@ -2550,7 +2533,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2550 * 3-1 Queue ETH_Q0=0 2533 * 3-1 Queue ETH_Q0=0
2551 * 7-4 Reserved = 0; 2534 * 7-4 Reserved = 0;
2552 */ 2535 */
2553 mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); 2536 wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
2554 2537
2555 /* Set all entries in DA filter other multicast 2538 /* Set all entries in DA filter other multicast
2556 * table (Ex_dFOMT) 2539 * table (Ex_dFOMT)
@@ -2560,7 +2543,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2560 * 3-1 Queue ETH_Q0=0 2543 * 3-1 Queue ETH_Q0=0
2561 * 7-4 Reserved = 0; 2544 * 7-4 Reserved = 0;
2562 */ 2545 */
2563 mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); 2546 wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
2564 } 2547 }
2565 return; 2548 return;
2566 } 2549 }
@@ -2570,11 +2553,11 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2570 */ 2553 */
2571 for (table_index = 0; table_index <= 0xFC; table_index += 4) { 2554 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2572 /* Clear DA filter special multicast table (Ex_dFSMT) */ 2555 /* Clear DA filter special multicast table (Ex_dFSMT) */
2573 mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2556 wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
2574 (eth_port_num) + table_index, 0); 2557 (eth_port_num) + table_index, 0);
2575 2558
2576 /* Clear DA filter other multicast table (Ex_dFOMT) */ 2559 /* Clear DA filter other multicast table (Ex_dFOMT) */
2577 mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE 2560 wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE
2578 (eth_port_num) + table_index, 0); 2561 (eth_port_num) + table_index, 0);
2579 } 2562 }
2580 2563
@@ -2583,7 +2566,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2583 (i < 256) && (mc_list != NULL) && (i < dev->mc_count); 2566 (i < 256) && (mc_list != NULL) && (i < dev->mc_count);
2584 i++, mc_list = mc_list->next) 2567 i++, mc_list = mc_list->next)
2585 if (mc_list->dmi_addrlen == 6) 2568 if (mc_list->dmi_addrlen == 6)
2586 eth_port_mc_addr(eth_port_num, mc_list->dmi_addr); 2569 eth_port_mc_addr(mp, mc_list->dmi_addr);
2587} 2570}
2588 2571
2589/* 2572/*
@@ -2594,7 +2577,7 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2594 * Other Multicast) and set each entry to 0. 2577 * Other Multicast) and set each entry to 0.
2595 * 2578 *
2596 * INPUT: 2579 * INPUT:
2597 * unsigned int eth_port_num Ethernet Port number. 2580 * struct mv643xx_private *mp Ethernet Port.
2598 * 2581 *
2599 * OUTPUT: 2582 * OUTPUT:
2600 * Multicast and Unicast packets are rejected. 2583 * Multicast and Unicast packets are rejected.
@@ -2602,22 +2585,23 @@ static void eth_port_set_multicast_list(struct net_device *dev)
2602 * RETURN: 2585 * RETURN:
2603 * None. 2586 * None.
2604 */ 2587 */
2605static void eth_port_init_mac_tables(unsigned int eth_port_num) 2588static void eth_port_init_mac_tables(struct mv643xx_private *mp)
2606{ 2589{
2590 unsigned int port_num = mp->port_num;
2607 int table_index; 2591 int table_index;
2608 2592
2609 /* Clear DA filter unicast table (Ex_dFUT) */ 2593 /* Clear DA filter unicast table (Ex_dFUT) */
2610 for (table_index = 0; table_index <= 0xC; table_index += 4) 2594 for (table_index = 0; table_index <= 0xC; table_index += 4)
2611 mv_write(DA_FILTER_UNICAST_TABLE_BASE 2595 wrl(mp, DA_FILTER_UNICAST_TABLE_BASE(port_num) +
2612 (eth_port_num) + table_index, 0); 2596 table_index, 0);
2613 2597
2614 for (table_index = 0; table_index <= 0xFC; table_index += 4) { 2598 for (table_index = 0; table_index <= 0xFC; table_index += 4) {
2615 /* Clear DA filter special multicast table (Ex_dFSMT) */ 2599 /* Clear DA filter special multicast table (Ex_dFSMT) */
2616 mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE 2600 wrl(mp, DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(port_num) +
2617 (eth_port_num) + table_index, 0); 2601 table_index, 0);
2618 /* Clear DA filter other multicast table (Ex_dFOMT) */ 2602 /* Clear DA filter other multicast table (Ex_dFOMT) */
2619 mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE 2603 wrl(mp, DA_FILTER_OTHER_MULTICAST_TABLE_BASE(port_num) +
2620 (eth_port_num) + table_index, 0); 2604 table_index, 0);
2621 } 2605 }
2622} 2606}
2623 2607
@@ -2629,7 +2613,7 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num)
2629 * A read from the MIB counter will reset the counter. 2613 * A read from the MIB counter will reset the counter.
2630 * 2614 *
2631 * INPUT: 2615 * INPUT:
2632 * unsigned int eth_port_num Ethernet Port number. 2616 * struct mv643xx_private *mp Ethernet Port.
2633 * 2617 *
2634 * OUTPUT: 2618 * OUTPUT:
2635 * After reading all MIB counters, the counters resets. 2619 * After reading all MIB counters, the counters resets.
@@ -2638,19 +2622,20 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num)
2638 * MIB counter value. 2622 * MIB counter value.
2639 * 2623 *
2640 */ 2624 */
2641static void eth_clear_mib_counters(unsigned int eth_port_num) 2625static void eth_clear_mib_counters(struct mv643xx_private *mp)
2642{ 2626{
2627 unsigned int port_num = mp->port_num;
2643 int i; 2628 int i;
2644 2629
2645 /* Perform dummy reads from MIB counters */ 2630 /* Perform dummy reads from MIB counters */
2646 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; 2631 for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION;
2647 i += 4) 2632 i += 4)
2648 mv_read(MIB_COUNTERS_BASE(eth_port_num) + i); 2633 rdl(mp, MIB_COUNTERS_BASE(port_num) + i);
2649} 2634}
2650 2635
2651static inline u32 read_mib(struct mv643xx_private *mp, int offset) 2636static inline u32 read_mib(struct mv643xx_private *mp, int offset)
2652{ 2637{
2653 return mv_read(MIB_COUNTERS_BASE(mp->port_num) + offset); 2638 return rdl(mp, MIB_COUNTERS_BASE(mp->port_num) + offset);
2654} 2639}
2655 2640
2656static void eth_update_mib_counters(struct mv643xx_private *mp) 2641static void eth_update_mib_counters(struct mv643xx_private *mp)
@@ -2686,7 +2671,7 @@ static void eth_update_mib_counters(struct mv643xx_private *mp)
2686 * the specified port. 2671 * the specified port.
2687 * 2672 *
2688 * INPUT: 2673 * INPUT:
2689 * unsigned int eth_port_num Ethernet Port number. 2674 * struct mv643xx_private *mp Ethernet Port.
2690 * 2675 *
2691 * OUTPUT: 2676 * OUTPUT:
2692 * None 2677 * None
@@ -2696,22 +2681,22 @@ static void eth_update_mib_counters(struct mv643xx_private *mp)
2696 * -ENODEV on failure 2681 * -ENODEV on failure
2697 * 2682 *
2698 */ 2683 */
2699static int ethernet_phy_detect(unsigned int port_num) 2684static int ethernet_phy_detect(struct mv643xx_private *mp)
2700{ 2685{
2701 unsigned int phy_reg_data0; 2686 unsigned int phy_reg_data0;
2702 int auto_neg; 2687 int auto_neg;
2703 2688
2704 eth_port_read_smi_reg(port_num, 0, &phy_reg_data0); 2689 eth_port_read_smi_reg(mp, 0, &phy_reg_data0);
2705 auto_neg = phy_reg_data0 & 0x1000; 2690 auto_neg = phy_reg_data0 & 0x1000;
2706 phy_reg_data0 ^= 0x1000; /* invert auto_neg */ 2691 phy_reg_data0 ^= 0x1000; /* invert auto_neg */
2707 eth_port_write_smi_reg(port_num, 0, phy_reg_data0); 2692 eth_port_write_smi_reg(mp, 0, phy_reg_data0);
2708 2693
2709 eth_port_read_smi_reg(port_num, 0, &phy_reg_data0); 2694 eth_port_read_smi_reg(mp, 0, &phy_reg_data0);
2710 if ((phy_reg_data0 & 0x1000) == auto_neg) 2695 if ((phy_reg_data0 & 0x1000) == auto_neg)
2711 return -ENODEV; /* change didn't take */ 2696 return -ENODEV; /* change didn't take */
2712 2697
2713 phy_reg_data0 ^= 0x1000; 2698 phy_reg_data0 ^= 0x1000;
2714 eth_port_write_smi_reg(port_num, 0, phy_reg_data0); 2699 eth_port_write_smi_reg(mp, 0, phy_reg_data0);
2715 return 0; 2700 return 0;
2716} 2701}
2717 2702
@@ -2722,7 +2707,7 @@ static int ethernet_phy_detect(unsigned int port_num)
2722 * This routine returns the given ethernet port PHY address. 2707 * This routine returns the given ethernet port PHY address.
2723 * 2708 *
2724 * INPUT: 2709 * INPUT:
2725 * unsigned int eth_port_num Ethernet Port number. 2710 * struct mv643xx_private *mp Ethernet Port.
2726 * 2711 *
2727 * OUTPUT: 2712 * OUTPUT:
2728 * None. 2713 * None.
@@ -2731,13 +2716,13 @@ static int ethernet_phy_detect(unsigned int port_num)
2731 * PHY address. 2716 * PHY address.
2732 * 2717 *
2733 */ 2718 */
2734static int ethernet_phy_get(unsigned int eth_port_num) 2719static int ethernet_phy_get(struct mv643xx_private *mp)
2735{ 2720{
2736 unsigned int reg_data; 2721 unsigned int reg_data;
2737 2722
2738 reg_data = mv_read(PHY_ADDR_REG); 2723 reg_data = rdl(mp, PHY_ADDR_REG);
2739 2724
2740 return ((reg_data >> (5 * eth_port_num)) & 0x1f); 2725 return ((reg_data >> (5 * mp->port_num)) & 0x1f);
2741} 2726}
2742 2727
2743/* 2728/*
@@ -2747,7 +2732,7 @@ static int ethernet_phy_get(unsigned int eth_port_num)
2747 * This routine sets the given ethernet port PHY address. 2732 * This routine sets the given ethernet port PHY address.
2748 * 2733 *
2749 * INPUT: 2734 * INPUT:
2750 * unsigned int eth_port_num Ethernet Port number. 2735 * struct mv643xx_private *mp Ethernet Port.
2751 * int phy_addr PHY address. 2736 * int phy_addr PHY address.
2752 * 2737 *
2753 * OUTPUT: 2738 * OUTPUT:
@@ -2757,15 +2742,15 @@ static int ethernet_phy_get(unsigned int eth_port_num)
2757 * None. 2742 * None.
2758 * 2743 *
2759 */ 2744 */
2760static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr) 2745static void ethernet_phy_set(struct mv643xx_private *mp, int phy_addr)
2761{ 2746{
2762 u32 reg_data; 2747 u32 reg_data;
2763 int addr_shift = 5 * eth_port_num; 2748 int addr_shift = 5 * mp->port_num;
2764 2749
2765 reg_data = mv_read(PHY_ADDR_REG); 2750 reg_data = rdl(mp, PHY_ADDR_REG);
2766 reg_data &= ~(0x1f << addr_shift); 2751 reg_data &= ~(0x1f << addr_shift);
2767 reg_data |= (phy_addr & 0x1f) << addr_shift; 2752 reg_data |= (phy_addr & 0x1f) << addr_shift;
2768 mv_write(PHY_ADDR_REG, reg_data); 2753 wrl(mp, PHY_ADDR_REG, reg_data);
2769} 2754}
2770 2755
2771/* 2756/*
@@ -2775,7 +2760,7 @@ static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr)
2775 * This routine utilizes the SMI interface to reset the ethernet port PHY. 2760 * This routine utilizes the SMI interface to reset the ethernet port PHY.
2776 * 2761 *
2777 * INPUT: 2762 * INPUT:
2778 * unsigned int eth_port_num Ethernet Port number. 2763 * struct mv643xx_private *mp Ethernet Port.
2779 * 2764 *
2780 * OUTPUT: 2765 * OUTPUT:
2781 * The PHY is reset. 2766 * The PHY is reset.
@@ -2784,51 +2769,52 @@ static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr)
2784 * None. 2769 * None.
2785 * 2770 *
2786 */ 2771 */
2787static void ethernet_phy_reset(unsigned int eth_port_num) 2772static void ethernet_phy_reset(struct mv643xx_private *mp)
2788{ 2773{
2789 unsigned int phy_reg_data; 2774 unsigned int phy_reg_data;
2790 2775
2791 /* Reset the PHY */ 2776 /* Reset the PHY */
2792 eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); 2777 eth_port_read_smi_reg(mp, 0, &phy_reg_data);
2793 phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */ 2778 phy_reg_data |= 0x8000; /* Set bit 15 to reset the PHY */
2794 eth_port_write_smi_reg(eth_port_num, 0, phy_reg_data); 2779 eth_port_write_smi_reg(mp, 0, phy_reg_data);
2795 2780
2796 /* wait for PHY to come out of reset */ 2781 /* wait for PHY to come out of reset */
2797 do { 2782 do {
2798 udelay(1); 2783 udelay(1);
2799 eth_port_read_smi_reg(eth_port_num, 0, &phy_reg_data); 2784 eth_port_read_smi_reg(mp, 0, &phy_reg_data);
2800 } while (phy_reg_data & 0x8000); 2785 } while (phy_reg_data & 0x8000);
2801} 2786}
2802 2787
2803static void mv643xx_eth_port_enable_tx(unsigned int port_num, 2788static void mv643xx_eth_port_enable_tx(struct mv643xx_private *mp,
2804 unsigned int queues) 2789 unsigned int queues)
2805{ 2790{
2806 mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), queues); 2791 wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(mp->port_num), queues);
2807} 2792}
2808 2793
2809static void mv643xx_eth_port_enable_rx(unsigned int port_num, 2794static void mv643xx_eth_port_enable_rx(struct mv643xx_private *mp,
2810 unsigned int queues) 2795 unsigned int queues)
2811{ 2796{
2812 mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), queues); 2797 wrl(mp, RECEIVE_QUEUE_COMMAND_REG(mp->port_num), queues);
2813} 2798}
2814 2799
2815static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) 2800static unsigned int mv643xx_eth_port_disable_tx(struct mv643xx_private *mp)
2816{ 2801{
2802 unsigned int port_num = mp->port_num;
2817 u32 queues; 2803 u32 queues;
2818 2804
2819 /* Stop Tx port activity. Check port Tx activity. */ 2805 /* Stop Tx port activity. Check port Tx activity. */
2820 queues = mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF; 2806 queues = rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF;
2821 if (queues) { 2807 if (queues) {
2822 /* Issue stop command for active queues only */ 2808 /* Issue stop command for active queues only */
2823 mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8)); 2809 wrl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8));
2824 2810
2825 /* Wait for all Tx activity to terminate. */ 2811 /* Wait for all Tx activity to terminate. */
2826 /* Check port cause register that all Tx queues are stopped */ 2812 /* Check port cause register that all Tx queues are stopped */
2827 while (mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF) 2813 while (rdl(mp, TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF)
2828 udelay(PHY_WAIT_MICRO_SECONDS); 2814 udelay(PHY_WAIT_MICRO_SECONDS);
2829 2815
2830 /* Wait for Tx FIFO to empty */ 2816 /* Wait for Tx FIFO to empty */
2831 while (mv_read(PORT_STATUS_REG(port_num)) & 2817 while (rdl(mp, PORT_STATUS_REG(port_num)) &
2832 ETH_PORT_TX_FIFO_EMPTY) 2818 ETH_PORT_TX_FIFO_EMPTY)
2833 udelay(PHY_WAIT_MICRO_SECONDS); 2819 udelay(PHY_WAIT_MICRO_SECONDS);
2834 } 2820 }
@@ -2836,19 +2822,20 @@ static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num)
2836 return queues; 2822 return queues;
2837} 2823}
2838 2824
2839static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num) 2825static unsigned int mv643xx_eth_port_disable_rx(struct mv643xx_private *mp)
2840{ 2826{
2827 unsigned int port_num = mp->port_num;
2841 u32 queues; 2828 u32 queues;
2842 2829
2843 /* Stop Rx port activity. Check port Rx activity. */ 2830 /* Stop Rx port activity. Check port Rx activity. */
2844 queues = mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF; 2831 queues = rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF;
2845 if (queues) { 2832 if (queues) {
2846 /* Issue stop command for active queues only */ 2833 /* Issue stop command for active queues only */
2847 mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8)); 2834 wrl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8));
2848 2835
2849 /* Wait for all Rx activity to terminate. */ 2836 /* Wait for all Rx activity to terminate. */
2850 /* Check port cause register that all Rx queues are stopped */ 2837 /* Check port cause register that all Rx queues are stopped */
2851 while (mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF) 2838 while (rdl(mp, RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF)
2852 udelay(PHY_WAIT_MICRO_SECONDS); 2839 udelay(PHY_WAIT_MICRO_SECONDS);
2853 } 2840 }
2854 2841
@@ -2864,7 +2851,7 @@ static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num)
2864 * idle state after this command is performed and the port is disabled. 2851 * idle state after this command is performed and the port is disabled.
2865 * 2852 *
2866 * INPUT: 2853 * INPUT:
2867 * unsigned int eth_port_num Ethernet Port number. 2854 * struct mv643xx_private *mp Ethernet Port.
2868 * 2855 *
2869 * OUTPUT: 2856 * OUTPUT:
2870 * Channel activity is halted. 2857 * Channel activity is halted.
@@ -2873,22 +2860,23 @@ static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num)
2873 * None. 2860 * None.
2874 * 2861 *
2875 */ 2862 */
2876static void eth_port_reset(unsigned int port_num) 2863static void eth_port_reset(struct mv643xx_private *mp)
2877{ 2864{
2865 unsigned int port_num = mp->port_num;
2878 unsigned int reg_data; 2866 unsigned int reg_data;
2879 2867
2880 mv643xx_eth_port_disable_tx(port_num); 2868 mv643xx_eth_port_disable_tx(mp);
2881 mv643xx_eth_port_disable_rx(port_num); 2869 mv643xx_eth_port_disable_rx(mp);
2882 2870
2883 /* Clear all MIB counters */ 2871 /* Clear all MIB counters */
2884 eth_clear_mib_counters(port_num); 2872 eth_clear_mib_counters(mp);
2885 2873
2886 /* Reset the Enable bit in the Configuration Register */ 2874 /* Reset the Enable bit in the Configuration Register */
2887 reg_data = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); 2875 reg_data = rdl(mp, PORT_SERIAL_CONTROL_REG(port_num));
2888 reg_data &= ~(SERIAL_PORT_ENABLE | 2876 reg_data &= ~(SERIAL_PORT_ENABLE |
2889 DO_NOT_FORCE_LINK_FAIL | 2877 DO_NOT_FORCE_LINK_FAIL |
2890 FORCE_LINK_PASS); 2878 FORCE_LINK_PASS);
2891 mv_write(PORT_SERIAL_CONTROL_REG(port_num), reg_data); 2879 wrl(mp, PORT_SERIAL_CONTROL_REG(port_num), reg_data);
2892} 2880}
2893 2881
2894 2882
@@ -2900,7 +2888,7 @@ static void eth_port_reset(unsigned int port_num)
2900 * order to perform PHY register read. 2888 * order to perform PHY register read.
2901 * 2889 *
2902 * INPUT: 2890 * INPUT:
2903 * unsigned int port_num Ethernet Port number. 2891 * struct mv643xx_private *mp Ethernet Port.
2904 * unsigned int phy_reg PHY register address offset. 2892 * unsigned int phy_reg PHY register address offset.
2905 * unsigned int *value Register value buffer. 2893 * unsigned int *value Register value buffer.
2906 * 2894 *
@@ -2912,10 +2900,10 @@ static void eth_port_reset(unsigned int port_num)
2912 * true otherwise. 2900 * true otherwise.
2913 * 2901 *
2914 */ 2902 */
2915static void eth_port_read_smi_reg(unsigned int port_num, 2903static void eth_port_read_smi_reg(struct mv643xx_private *mp,
2916 unsigned int phy_reg, unsigned int *value) 2904 unsigned int phy_reg, unsigned int *value)
2917{ 2905{
2918 int phy_addr = ethernet_phy_get(port_num); 2906 int phy_addr = ethernet_phy_get(mp);
2919 unsigned long flags; 2907 unsigned long flags;
2920 int i; 2908 int i;
2921 2909
@@ -2923,27 +2911,27 @@ static void eth_port_read_smi_reg(unsigned int port_num,
2923 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); 2911 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
2924 2912
2925 /* wait for the SMI register to become available */ 2913 /* wait for the SMI register to become available */
2926 for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) { 2914 for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) {
2927 if (i == PHY_WAIT_ITERATIONS) { 2915 if (i == PHY_WAIT_ITERATIONS) {
2928 printk("mv643xx PHY busy timeout, port %d\n", port_num); 2916 printk("%s: PHY busy timeout\n", mp->dev->name);
2929 goto out; 2917 goto out;
2930 } 2918 }
2931 udelay(PHY_WAIT_MICRO_SECONDS); 2919 udelay(PHY_WAIT_MICRO_SECONDS);
2932 } 2920 }
2933 2921
2934 mv_write(SMI_REG, 2922 wrl(mp, SMI_REG,
2935 (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); 2923 (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ);
2936 2924
2937 /* now wait for the data to be valid */ 2925 /* now wait for the data to be valid */
2938 for (i = 0; !(mv_read(SMI_REG) & ETH_SMI_READ_VALID); i++) { 2926 for (i = 0; !(rdl(mp, SMI_REG) & ETH_SMI_READ_VALID); i++) {
2939 if (i == PHY_WAIT_ITERATIONS) { 2927 if (i == PHY_WAIT_ITERATIONS) {
2940 printk("mv643xx PHY read timeout, port %d\n", port_num); 2928 printk("%s: PHY read timeout\n", mp->dev->name);
2941 goto out; 2929 goto out;
2942 } 2930 }
2943 udelay(PHY_WAIT_MICRO_SECONDS); 2931 udelay(PHY_WAIT_MICRO_SECONDS);
2944 } 2932 }
2945 2933
2946 *value = mv_read(SMI_REG) & 0xffff; 2934 *value = rdl(mp, SMI_REG) & 0xffff;
2947out: 2935out:
2948 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); 2936 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
2949} 2937}
@@ -2956,7 +2944,7 @@ out:
2956 * order to perform writes to PHY registers. 2944 * order to perform writes to PHY registers.
2957 * 2945 *
2958 * INPUT: 2946 * INPUT:
2959 * unsigned int eth_port_num Ethernet Port number. 2947 * struct mv643xx_private *mp Ethernet Port.
2960 * unsigned int phy_reg PHY register address offset. 2948 * unsigned int phy_reg PHY register address offset.
2961 * unsigned int value Register value. 2949 * unsigned int value Register value.
2962 * 2950 *
@@ -2968,29 +2956,28 @@ out:
2968 * true otherwise. 2956 * true otherwise.
2969 * 2957 *
2970 */ 2958 */
2971static void eth_port_write_smi_reg(unsigned int eth_port_num, 2959static void eth_port_write_smi_reg(struct mv643xx_private *mp,
2972 unsigned int phy_reg, unsigned int value) 2960 unsigned int phy_reg, unsigned int value)
2973{ 2961{
2974 int phy_addr; 2962 int phy_addr;
2975 int i; 2963 int i;
2976 unsigned long flags; 2964 unsigned long flags;
2977 2965
2978 phy_addr = ethernet_phy_get(eth_port_num); 2966 phy_addr = ethernet_phy_get(mp);
2979 2967
2980 /* the SMI register is a shared resource */ 2968 /* the SMI register is a shared resource */
2981 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); 2969 spin_lock_irqsave(&mv643xx_eth_phy_lock, flags);
2982 2970
2983 /* wait for the SMI register to become available */ 2971 /* wait for the SMI register to become available */
2984 for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) { 2972 for (i = 0; rdl(mp, SMI_REG) & ETH_SMI_BUSY; i++) {
2985 if (i == PHY_WAIT_ITERATIONS) { 2973 if (i == PHY_WAIT_ITERATIONS) {
2986 printk("mv643xx PHY busy timeout, port %d\n", 2974 printk("%s: PHY busy timeout\n", mp->dev->name);
2987 eth_port_num);
2988 goto out; 2975 goto out;
2989 } 2976 }
2990 udelay(PHY_WAIT_MICRO_SECONDS); 2977 udelay(PHY_WAIT_MICRO_SECONDS);
2991 } 2978 }
2992 2979
2993 mv_write(SMI_REG, (phy_addr << 16) | (phy_reg << 21) | 2980 wrl(mp, SMI_REG, (phy_addr << 16) | (phy_reg << 21) |
2994 ETH_SMI_OPCODE_WRITE | (value & 0xffff)); 2981 ETH_SMI_OPCODE_WRITE | (value & 0xffff));
2995out: 2982out:
2996 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); 2983 spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags);
@@ -3001,17 +2988,17 @@ out:
3001 */ 2988 */
3002static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location) 2989static int mv643xx_mdio_read(struct net_device *dev, int phy_id, int location)
3003{ 2990{
3004 int val;
3005 struct mv643xx_private *mp = netdev_priv(dev); 2991 struct mv643xx_private *mp = netdev_priv(dev);
2992 int val;
3006 2993
3007 eth_port_read_smi_reg(mp->port_num, location, &val); 2994 eth_port_read_smi_reg(mp, location, &val);
3008 return val; 2995 return val;
3009} 2996}
3010 2997
3011static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val) 2998static void mv643xx_mdio_write(struct net_device *dev, int phy_id, int location, int val)
3012{ 2999{
3013 struct mv643xx_private *mp = netdev_priv(dev); 3000 struct mv643xx_private *mp = netdev_priv(dev);
3014 eth_port_write_smi_reg(mp->port_num, location, val); 3001 eth_port_write_smi_reg(mp, location, val);
3015} 3002}
3016 3003
3017/* 3004/*
@@ -3156,7 +3143,7 @@ struct mv643xx_stats {
3156 int stat_offset; 3143 int stat_offset;
3157}; 3144};
3158 3145
3159#define MV643XX_STAT(m) sizeof(((struct mv643xx_private *)0)->m), \ 3146#define MV643XX_STAT(m) FIELD_SIZEOF(struct mv643xx_private, m), \
3160 offsetof(struct mv643xx_private, m) 3147 offsetof(struct mv643xx_private, m)
3161 3148
3162static const struct mv643xx_stats mv643xx_gstrings_stats[] = { 3149static const struct mv643xx_stats mv643xx_gstrings_stats[] = {
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 385f69c14387..46119bb3770a 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -511,10 +511,10 @@ enum PhyCtrl_bits {
511/* Note that using only 32 bit fields simplifies conversion to big-endian 511/* Note that using only 32 bit fields simplifies conversion to big-endian
512 architectures. */ 512 architectures. */
513struct netdev_desc { 513struct netdev_desc {
514 u32 next_desc; 514 __le32 next_desc;
515 s32 cmd_status; 515 __le32 cmd_status;
516 u32 addr; 516 __le32 addr;
517 u32 software_use; 517 __le32 software_use;
518}; 518};
519 519
520/* Bits in network_desc.status */ 520/* Bits in network_desc.status */
@@ -786,7 +786,8 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
786 struct netdev_private *np; 786 struct netdev_private *np;
787 int i, option, irq, chip_idx = ent->driver_data; 787 int i, option, irq, chip_idx = ent->driver_data;
788 static int find_cnt = -1; 788 static int find_cnt = -1;
789 unsigned long iostart, iosize; 789 resource_size_t iostart;
790 unsigned long iosize;
790 void __iomem *ioaddr; 791 void __iomem *ioaddr;
791 const int pcibar = 1; /* PCI base address register */ 792 const int pcibar = 1; /* PCI base address register */
792 int prev_eedata; 793 int prev_eedata;
@@ -946,10 +947,11 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
946 goto err_create_file; 947 goto err_create_file;
947 948
948 if (netif_msg_drv(np)) { 949 if (netif_msg_drv(np)) {
949 printk(KERN_INFO "natsemi %s: %s at %#08lx " 950 printk(KERN_INFO "natsemi %s: %s at %#08llx "
950 "(%s), %s, IRQ %d", 951 "(%s), %s, IRQ %d",
951 dev->name, natsemi_pci_info[chip_idx].name, iostart, 952 dev->name, natsemi_pci_info[chip_idx].name,
952 pci_name(np->pci_dev), print_mac(mac, dev->dev_addr), irq); 953 (unsigned long long)iostart, pci_name(np->pci_dev),
954 print_mac(mac, dev->dev_addr), irq);
953 if (dev->if_port == PORT_TP) 955 if (dev->if_port == PORT_TP)
954 printk(", port TP.\n"); 956 printk(", port TP.\n");
955 else if (np->ignore_phy) 957 else if (np->ignore_phy)
@@ -2018,7 +2020,7 @@ static void drain_rx(struct net_device *dev)
2018 /* Free all the skbuffs in the Rx queue. */ 2020 /* Free all the skbuffs in the Rx queue. */
2019 for (i = 0; i < RX_RING_SIZE; i++) { 2021 for (i = 0; i < RX_RING_SIZE; i++) {
2020 np->rx_ring[i].cmd_status = 0; 2022 np->rx_ring[i].cmd_status = 0;
2021 np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */ 2023 np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
2022 if (np->rx_skbuff[i]) { 2024 if (np->rx_skbuff[i]) {
2023 pci_unmap_single(np->pci_dev, 2025 pci_unmap_single(np->pci_dev,
2024 np->rx_dma[i], buflen, 2026 np->rx_dma[i], buflen,
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 7f20a03623a0..8cb29f5b1038 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -95,23 +95,6 @@
95 95
96#define ADDR_IN_WINDOW1(off) \ 96#define ADDR_IN_WINDOW1(off) \
97 ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0 97 ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
98/*
99 * In netxen_nic_down(), we must wait for any pending callback requests into
100 * netxen_watchdog_task() to complete; eg otherwise the watchdog_timer could be
101 * reenabled right after it is deleted in netxen_nic_down(). FLUSH_SCHEDULED_WORK()
102 * does this synchronization.
103 *
104 * Normally, schedule_work()/flush_scheduled_work() could have worked, but
105 * netxen_nic_close() is invoked with kernel rtnl lock held. netif_carrier_off()
106 * call in netxen_nic_close() triggers a schedule_work(&linkwatch_work), and a
107 * subsequent call to flush_scheduled_work() in netxen_nic_down() would cause
108 * linkwatch_event() to be executed which also attempts to acquire the rtnl
109 * lock thus causing a deadlock.
110 */
111
112#define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
113#define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
114extern struct workqueue_struct *netxen_workq;
115 98
116/* 99/*
117 * normalize a 64MB crb address to 32MB PCI window 100 * normalize a 64MB crb address to 32MB PCI window
@@ -1050,7 +1033,6 @@ void netxen_halt_pegs(struct netxen_adapter *adapter);
1050int netxen_rom_se(struct netxen_adapter *adapter, int addr); 1033int netxen_rom_se(struct netxen_adapter *adapter, int addr);
1051 1034
1052/* Functions from netxen_nic_isr.c */ 1035/* Functions from netxen_nic_isr.c */
1053int netxen_nic_link_ok(struct netxen_adapter *adapter);
1054void netxen_initialize_adapter_sw(struct netxen_adapter *adapter); 1036void netxen_initialize_adapter_sw(struct netxen_adapter *adapter);
1055void netxen_initialize_adapter_hw(struct netxen_adapter *adapter); 1037void netxen_initialize_adapter_hw(struct netxen_adapter *adapter);
1056void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr, 1038void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr,
diff --git a/drivers/net/netxen/netxen_nic_isr.c b/drivers/net/netxen/netxen_nic_isr.c
index c81313b717bd..f487615f4063 100644
--- a/drivers/net/netxen/netxen_nic_isr.c
+++ b/drivers/net/netxen/netxen_nic_isr.c
@@ -172,6 +172,7 @@ void netxen_nic_gbe_handle_phy_intr(struct netxen_adapter *adapter)
172 netxen_nic_isr_other(adapter); 172 netxen_nic_isr_other(adapter);
173} 173}
174 174
175#if 0
175int netxen_nic_link_ok(struct netxen_adapter *adapter) 176int netxen_nic_link_ok(struct netxen_adapter *adapter)
176{ 177{
177 switch (adapter->ahw.board_type) { 178 switch (adapter->ahw.board_type) {
@@ -189,6 +190,7 @@ int netxen_nic_link_ok(struct netxen_adapter *adapter)
189 190
190 return 0; 191 return 0;
191} 192}
193#endif /* 0 */
192 194
193void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter) 195void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter)
194{ 196{
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index a8fb439a4d03..7144c255ce54 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -86,7 +86,24 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
86 86
87MODULE_DEVICE_TABLE(pci, netxen_pci_tbl); 87MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
88 88
89struct workqueue_struct *netxen_workq; 89/*
90 * In netxen_nic_down(), we must wait for any pending callback requests into
91 * netxen_watchdog_task() to complete; eg otherwise the watchdog_timer could be
92 * reenabled right after it is deleted in netxen_nic_down().
93 * FLUSH_SCHEDULED_WORK() does this synchronization.
94 *
95 * Normally, schedule_work()/flush_scheduled_work() could have worked, but
96 * netxen_nic_close() is invoked with kernel rtnl lock held. netif_carrier_off()
97 * call in netxen_nic_close() triggers a schedule_work(&linkwatch_work), and a
98 * subsequent call to flush_scheduled_work() in netxen_nic_down() would cause
99 * linkwatch_event() to be executed which also attempts to acquire the rtnl
100 * lock thus causing a deadlock.
101 */
102
103static struct workqueue_struct *netxen_workq;
104#define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
105#define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
106
90static void netxen_watchdog(unsigned long); 107static void netxen_watchdog(unsigned long);
91 108
92static void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter, 109static void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index 26aa8fe1fb2d..a316dcc8a06d 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -134,10 +134,10 @@ static int fifo = 0x8; /* don't change */
134#define ni_disint() { outb(0, dev->base_addr + NI52_INTDIS); } 134#define ni_disint() { outb(0, dev->base_addr + NI52_INTDIS); }
135#define ni_enaint() { outb(0, dev->base_addr + NI52_INTENA); } 135#define ni_enaint() { outb(0, dev->base_addr + NI52_INTENA); }
136 136
137#define make32(ptr16) (p->memtop + (short) (ptr16)) 137#define make32(ptr16) ((void __iomem *)(p->memtop + (short) (ptr16)))
138#define make24(ptr32) ((unsigned long)(ptr32)) - p->base 138#define make24(ptr32) ((char __iomem *)(ptr32)) - p->base
139#define make16(ptr32) ((unsigned short) ((unsigned long)(ptr32)\ 139#define make16(ptr32) ((unsigned short) ((char __iomem *)(ptr32)\
140 - (unsigned long) p->memtop)) 140 - p->memtop))
141 141
142/******************* how to calculate the buffers ***************************** 142/******************* how to calculate the buffers *****************************
143 143
@@ -179,34 +179,35 @@ static void ni52_timeout(struct net_device *dev);
179 179
180/* helper-functions */ 180/* helper-functions */
181static int init586(struct net_device *dev); 181static int init586(struct net_device *dev);
182static int check586(struct net_device *dev, char *where, unsigned size); 182static int check586(struct net_device *dev, unsigned size);
183static void alloc586(struct net_device *dev); 183static void alloc586(struct net_device *dev);
184static void startrecv586(struct net_device *dev); 184static void startrecv586(struct net_device *dev);
185static void *alloc_rfa(struct net_device *dev, void *ptr); 185static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr);
186static void ni52_rcv_int(struct net_device *dev); 186static void ni52_rcv_int(struct net_device *dev);
187static void ni52_xmt_int(struct net_device *dev); 187static void ni52_xmt_int(struct net_device *dev);
188static void ni52_rnr_int(struct net_device *dev); 188static void ni52_rnr_int(struct net_device *dev);
189 189
190struct priv { 190struct priv {
191 struct net_device_stats stats; 191 struct net_device_stats stats;
192 unsigned long base; 192 char __iomem *base;
193 char *memtop; 193 char __iomem *mapped;
194 char __iomem *memtop;
194 spinlock_t spinlock; 195 spinlock_t spinlock;
195 int reset; 196 int reset;
196 struct rfd_struct *rfd_last, *rfd_top, *rfd_first; 197 struct rfd_struct __iomem *rfd_last, *rfd_top, *rfd_first;
197 struct scp_struct *scp; 198 struct scp_struct __iomem *scp;
198 struct iscp_struct *iscp; 199 struct iscp_struct __iomem *iscp;
199 struct scb_struct *scb; 200 struct scb_struct __iomem *scb;
200 struct tbd_struct *xmit_buffs[NUM_XMIT_BUFFS]; 201 struct tbd_struct __iomem *xmit_buffs[NUM_XMIT_BUFFS];
201#if (NUM_XMIT_BUFFS == 1) 202#if (NUM_XMIT_BUFFS == 1)
202 struct transmit_cmd_struct *xmit_cmds[2]; 203 struct transmit_cmd_struct __iomem *xmit_cmds[2];
203 struct nop_cmd_struct *nop_cmds[2]; 204 struct nop_cmd_struct __iomem *nop_cmds[2];
204#else 205#else
205 struct transmit_cmd_struct *xmit_cmds[NUM_XMIT_BUFFS]; 206 struct transmit_cmd_struct __iomem *xmit_cmds[NUM_XMIT_BUFFS];
206 struct nop_cmd_struct *nop_cmds[NUM_XMIT_BUFFS]; 207 struct nop_cmd_struct __iomem *nop_cmds[NUM_XMIT_BUFFS];
207#endif 208#endif
208 int nop_point, num_recv_buffs; 209 int nop_point, num_recv_buffs;
209 char *xmit_cbuffs[NUM_XMIT_BUFFS]; 210 char __iomem *xmit_cbuffs[NUM_XMIT_BUFFS];
210 int xmit_count, xmit_last; 211 int xmit_count, xmit_last;
211}; 212};
212 213
@@ -240,7 +241,8 @@ static void wait_for_scb_cmd_ruc(struct net_device *dev)
240 udelay(4); 241 udelay(4);
241 if (i == 16383) { 242 if (i == 16383) {
242 printk(KERN_ERR "%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n", 243 printk(KERN_ERR "%s: scb_cmd (ruc) timed out: %04x,%04x .. disabling i82586!!\n",
243 dev->name, p->scb->cmd_ruc, p->scb->rus); 244 dev->name, readb(&p->scb->cmd_ruc),
245 readb(&p->scb->rus));
244 if (!p->reset) { 246 if (!p->reset) {
245 p->reset = 1; 247 p->reset = 1;
246 ni_reset586(); 248 ni_reset586();
@@ -249,9 +251,9 @@ static void wait_for_scb_cmd_ruc(struct net_device *dev)
249 } 251 }
250} 252}
251 253
252static void wait_for_stat_compl(void *p) 254static void wait_for_stat_compl(void __iomem *p)
253{ 255{
254 struct nop_cmd_struct *addr = p; 256 struct nop_cmd_struct __iomem *addr = p;
255 int i; 257 int i;
256 for (i = 0; i < 32767; i++) { 258 for (i = 0; i < 32767; i++) {
257 if (readw(&((addr)->cmd_status)) & STAT_COMPL) 259 if (readw(&((addr)->cmd_status)) & STAT_COMPL)
@@ -293,47 +295,58 @@ static int ni52_open(struct net_device *dev)
293 return 0; /* most done by init */ 295 return 0; /* most done by init */
294} 296}
295 297
298static int check_iscp(struct net_device *dev, void __iomem *addr)
299{
300 struct iscp_struct __iomem *iscp = addr;
301 struct priv *p = dev->priv;
302 memset_io(iscp, 0, sizeof(struct iscp_struct));
303
304 writel(make24(iscp), &p->scp->iscp);
305 writeb(1, &iscp->busy);
306
307 ni_reset586();
308 ni_attn586();
309 mdelay(32); /* wait a while... */
310 /* i82586 clears 'busy' after successful init */
311 if (readb(&iscp->busy))
312 return 0;
313 return 1;
314}
315
296/********************************************** 316/**********************************************
297 * Check to see if there's an 82586 out there. 317 * Check to see if there's an 82586 out there.
298 */ 318 */
299static int check586(struct net_device *dev, char *where, unsigned size) 319static int check586(struct net_device *dev, unsigned size)
300{ 320{
301 struct priv pb; 321 struct priv *p = dev->priv;
302 struct priv *p = /* (struct priv *) dev->priv*/ &pb;
303 char *iscp_addrs[2];
304 int i; 322 int i;
305 323
306 p->base = (unsigned long) isa_bus_to_virt((unsigned long)where) 324 p->mapped = ioremap(dev->mem_start, size);
307 + size - 0x01000000; 325 if (!p->mapped)
308 p->memtop = isa_bus_to_virt((unsigned long)where) + size; 326 return 0;
309 p->scp = (struct scp_struct *)(p->base + SCP_DEFAULT_ADDRESS); 327
310 memset_io((char *)p->scp, 0, sizeof(struct scp_struct)); 328 p->base = p->mapped + size - 0x01000000;
329 p->memtop = p->mapped + size;
330 p->scp = (struct scp_struct __iomem *)(p->base + SCP_DEFAULT_ADDRESS);
331 p->scb = (struct scb_struct __iomem *) p->mapped;
332 p->iscp = (struct iscp_struct __iomem *)p->scp - 1;
333 memset_io(p->scp, 0, sizeof(struct scp_struct));
311 for (i = 0; i < sizeof(struct scp_struct); i++) 334 for (i = 0; i < sizeof(struct scp_struct); i++)
312 /* memory was writeable? */ 335 /* memory was writeable? */
313 if (readb((char *)p->scp + i)) 336 if (readb((char __iomem *)p->scp + i))
314 return 0; 337 goto Enodev;
315 writeb(SYSBUSVAL, &p->scp->sysbus); /* 1 = 8Bit-Bus, 0 = 16 Bit */ 338 writeb(SYSBUSVAL, &p->scp->sysbus); /* 1 = 8Bit-Bus, 0 = 16 Bit */
316 if (readb(&p->scp->sysbus) != SYSBUSVAL) 339 if (readb(&p->scp->sysbus) != SYSBUSVAL)
317 return 0; 340 goto Enodev;
318
319 iscp_addrs[0] = isa_bus_to_virt((unsigned long)where);
320 iscp_addrs[1] = (char *) p->scp - sizeof(struct iscp_struct);
321 341
322 for (i = 0; i < 2; i++) { 342 if (!check_iscp(dev, p->mapped))
323 p->iscp = (struct iscp_struct *) iscp_addrs[i]; 343 goto Enodev;
324 memset_io((char *)p->iscp, 0, sizeof(struct iscp_struct)); 344 if (!check_iscp(dev, p->iscp))
325 345 goto Enodev;
326 writel(make24(p->iscp), &p->scp->iscp);
327 writeb(1, &p->iscp->busy);
328
329 ni_reset586();
330 ni_attn586();
331 mdelay(32); /* wait a while... */
332 /* i82586 clears 'busy' after successful init */
333 if (readb(&p->iscp->busy))
334 return 0;
335 }
336 return 1; 346 return 1;
347Enodev:
348 iounmap(p->mapped);
349 return 0;
337} 350}
338 351
339/****************************************************************** 352/******************************************************************
@@ -346,13 +359,6 @@ static void alloc586(struct net_device *dev)
346 ni_reset586(); 359 ni_reset586();
347 mdelay(32); 360 mdelay(32);
348 361
349 spin_lock_init(&p->spinlock);
350
351 p->scp = (struct scp_struct *) (p->base + SCP_DEFAULT_ADDRESS);
352 p->scb = (struct scb_struct *) isa_bus_to_virt(dev->mem_start);
353 p->iscp = (struct iscp_struct *)
354 ((char *)p->scp - sizeof(struct iscp_struct));
355
356 memset_io(p->iscp, 0, sizeof(struct iscp_struct)); 362 memset_io(p->iscp, 0, sizeof(struct iscp_struct));
357 memset_io(p->scp , 0, sizeof(struct scp_struct)); 363 memset_io(p->scp , 0, sizeof(struct scp_struct));
358 364
@@ -371,7 +377,7 @@ static void alloc586(struct net_device *dev)
371 377
372 p->reset = 0; 378 p->reset = 0;
373 379
374 memset_io((char *)p->scb, 0, sizeof(struct scb_struct)); 380 memset_io(p->scb, 0, sizeof(struct scb_struct));
375} 381}
376 382
377/* set: io,irq,memstart,memend or set it when calling insmod */ 383/* set: io,irq,memstart,memend or set it when calling insmod */
@@ -387,12 +393,15 @@ struct net_device * __init ni52_probe(int unit)
387{ 393{
388 struct net_device *dev = alloc_etherdev(sizeof(struct priv)); 394 struct net_device *dev = alloc_etherdev(sizeof(struct priv));
389 static int ports[] = {0x300, 0x280, 0x360 , 0x320 , 0x340, 0}; 395 static int ports[] = {0x300, 0x280, 0x360 , 0x320 , 0x340, 0};
396 struct priv *p;
390 int *port; 397 int *port;
391 int err = 0; 398 int err = 0;
392 399
393 if (!dev) 400 if (!dev)
394 return ERR_PTR(-ENOMEM); 401 return ERR_PTR(-ENOMEM);
395 402
403 p = dev->priv;
404
396 if (unit >= 0) { 405 if (unit >= 0) {
397 sprintf(dev->name, "eth%d", unit); 406 sprintf(dev->name, "eth%d", unit);
398 netdev_boot_setup_check(dev); 407 netdev_boot_setup_check(dev);
@@ -427,6 +436,7 @@ got_it:
427 goto out1; 436 goto out1;
428 return dev; 437 return dev;
429out1: 438out1:
439 iounmap(p->mapped);
430 release_region(dev->base_addr, NI52_TOTAL_SIZE); 440 release_region(dev->base_addr, NI52_TOTAL_SIZE);
431out: 441out:
432 free_netdev(dev); 442 free_netdev(dev);
@@ -436,12 +446,15 @@ out:
436static int __init ni52_probe1(struct net_device *dev, int ioaddr) 446static int __init ni52_probe1(struct net_device *dev, int ioaddr)
437{ 447{
438 int i, size, retval; 448 int i, size, retval;
449 struct priv *priv = dev->priv;
439 450
440 dev->base_addr = ioaddr; 451 dev->base_addr = ioaddr;
441 dev->irq = irq; 452 dev->irq = irq;
442 dev->mem_start = memstart; 453 dev->mem_start = memstart;
443 dev->mem_end = memend; 454 dev->mem_end = memend;
444 455
456 spin_lock_init(&priv->spinlock);
457
445 if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME)) 458 if (!request_region(ioaddr, NI52_TOTAL_SIZE, DRV_NAME))
446 return -EBUSY; 459 return -EBUSY;
447 460
@@ -474,7 +487,7 @@ static int __init ni52_probe1(struct net_device *dev, int ioaddr)
474 retval = -ENODEV; 487 retval = -ENODEV;
475 goto out; 488 goto out;
476 } 489 }
477 if (!check586(dev, (char *)dev->mem_start, size)) { 490 if (!check586(dev, size)) {
478 printk(KERN_ERR "?memcheck, Can't find memory at 0x%lx with size %d!\n", dev->mem_start, size); 491 printk(KERN_ERR "?memcheck, Can't find memory at 0x%lx with size %d!\n", dev->mem_start, size);
479 retval = -ENODEV; 492 retval = -ENODEV;
480 goto out; 493 goto out;
@@ -483,9 +496,9 @@ static int __init ni52_probe1(struct net_device *dev, int ioaddr)
483 if (dev->mem_start != 0) { 496 if (dev->mem_start != 0) {
484 /* no auto-mem-probe */ 497 /* no auto-mem-probe */
485 size = 0x4000; /* check for 16K mem */ 498 size = 0x4000; /* check for 16K mem */
486 if (!check586(dev, (char *) dev->mem_start, size)) { 499 if (!check586(dev, size)) {
487 size = 0x2000; /* check for 8K mem */ 500 size = 0x2000; /* check for 8K mem */
488 if (!check586(dev, (char *)dev->mem_start, size)) { 501 if (!check586(dev, size)) {
489 printk(KERN_ERR "?memprobe, Can't find memory at 0x%lx!\n", dev->mem_start); 502 printk(KERN_ERR "?memprobe, Can't find memory at 0x%lx!\n", dev->mem_start);
490 retval = -ENODEV; 503 retval = -ENODEV;
491 goto out; 504 goto out;
@@ -504,11 +517,11 @@ static int __init ni52_probe1(struct net_device *dev, int ioaddr)
504 } 517 }
505 dev->mem_start = memaddrs[i]; 518 dev->mem_start = memaddrs[i];
506 size = 0x2000; /* check for 8K mem */ 519 size = 0x2000; /* check for 8K mem */
507 if (check586(dev, (char *)dev->mem_start, size)) 520 if (check586(dev, size))
508 /* 8K-check */ 521 /* 8K-check */
509 break; 522 break;
510 size = 0x4000; /* check for 16K mem */ 523 size = 0x4000; /* check for 16K mem */
511 if (check586(dev, (char *)dev->mem_start, size)) 524 if (check586(dev, size))
512 /* 16K-check */ 525 /* 16K-check */
513 break; 526 break;
514 } 527 }
@@ -517,19 +530,13 @@ static int __init ni52_probe1(struct net_device *dev, int ioaddr)
517 dev->mem_end = dev->mem_start + size; 530 dev->mem_end = dev->mem_start + size;
518#endif 531#endif
519 532
520 memset((char *)dev->priv, 0, sizeof(struct priv));
521
522 ((struct priv *)(dev->priv))->memtop =
523 isa_bus_to_virt(dev->mem_start) + size;
524 ((struct priv *)(dev->priv))->base = (unsigned long)
525 isa_bus_to_virt(dev->mem_start) + size - 0x01000000;
526 alloc586(dev); 533 alloc586(dev);
527 534
528 /* set number of receive-buffs according to memsize */ 535 /* set number of receive-buffs according to memsize */
529 if (size == 0x2000) 536 if (size == 0x2000)
530 ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_8; 537 priv->num_recv_buffs = NUM_RECV_BUFFS_8;
531 else 538 else
532 ((struct priv *) dev->priv)->num_recv_buffs = NUM_RECV_BUFFS_16; 539 priv->num_recv_buffs = NUM_RECV_BUFFS_16;
533 540
534 printk(KERN_DEBUG "Memaddr: 0x%lx, Memsize: %d, ", 541 printk(KERN_DEBUG "Memaddr: 0x%lx, Memsize: %d, ",
535 dev->mem_start, size); 542 dev->mem_start, size);
@@ -546,6 +553,7 @@ static int __init ni52_probe1(struct net_device *dev, int ioaddr)
546 if (!dev->irq) { 553 if (!dev->irq) {
547 printk("?autoirq, Failed to detect IRQ line!\n"); 554 printk("?autoirq, Failed to detect IRQ line!\n");
548 retval = -EAGAIN; 555 retval = -EAGAIN;
556 iounmap(priv->mapped);
549 goto out; 557 goto out;
550 } 558 }
551 printk("IRQ %d (autodetected).\n", dev->irq); 559 printk("IRQ %d (autodetected).\n", dev->irq);
@@ -578,19 +586,19 @@ out:
578 586
579static int init586(struct net_device *dev) 587static int init586(struct net_device *dev)
580{ 588{
581 void *ptr; 589 void __iomem *ptr;
582 int i, result = 0; 590 int i, result = 0;
583 struct priv *p = (struct priv *)dev->priv; 591 struct priv *p = (struct priv *)dev->priv;
584 struct configure_cmd_struct *cfg_cmd; 592 struct configure_cmd_struct __iomem *cfg_cmd;
585 struct iasetup_cmd_struct *ias_cmd; 593 struct iasetup_cmd_struct __iomem *ias_cmd;
586 struct tdr_cmd_struct *tdr_cmd; 594 struct tdr_cmd_struct __iomem *tdr_cmd;
587 struct mcsetup_cmd_struct *mc_cmd; 595 struct mcsetup_cmd_struct __iomem *mc_cmd;
588 struct dev_mc_list *dmi = dev->mc_list; 596 struct dev_mc_list *dmi = dev->mc_list;
589 int num_addrs = dev->mc_count; 597 int num_addrs = dev->mc_count;
590 598
591 ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct)); 599 ptr = p->scb + 1;
592 600
593 cfg_cmd = (struct configure_cmd_struct *)ptr; /* configure-command */ 601 cfg_cmd = ptr; /* configure-command */
594 writew(0, &cfg_cmd->cmd_status); 602 writew(0, &cfg_cmd->cmd_status);
595 writew(CMD_CONFIGURE | CMD_LAST, &cfg_cmd->cmd_cmd); 603 writew(CMD_CONFIGURE | CMD_LAST, &cfg_cmd->cmd_cmd);
596 writew(0xFFFF, &cfg_cmd->cmd_link); 604 writew(0xFFFF, &cfg_cmd->cmd_link);
@@ -609,7 +617,7 @@ static int init586(struct net_device *dev)
609 writeb(0xf2, &cfg_cmd->time_high); 617 writeb(0xf2, &cfg_cmd->time_high);
610 writeb(0x00, &cfg_cmd->promisc);; 618 writeb(0x00, &cfg_cmd->promisc);;
611 if (dev->flags & IFF_ALLMULTI) { 619 if (dev->flags & IFF_ALLMULTI) {
612 int len = ((char *) p->iscp - (char *) ptr - 8) / 6; 620 int len = ((char __iomem *)p->iscp - (char __iomem *)ptr - 8) / 6;
613 if (num_addrs > len) { 621 if (num_addrs > len) {
614 printk(KERN_ERR "%s: switching to promisc. mode\n", 622 printk(KERN_ERR "%s: switching to promisc. mode\n",
615 dev->name); 623 dev->name);
@@ -620,7 +628,7 @@ static int init586(struct net_device *dev)
620 writeb(0x01, &cfg_cmd->promisc); 628 writeb(0x01, &cfg_cmd->promisc);
621 writeb(0x00, &cfg_cmd->carr_coll); 629 writeb(0x00, &cfg_cmd->carr_coll);
622 writew(make16(cfg_cmd), &p->scb->cbl_offset); 630 writew(make16(cfg_cmd), &p->scb->cbl_offset);
623 writew(0, &p->scb->cmd_ruc); 631 writeb(0, &p->scb->cmd_ruc);
624 632
625 writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */ 633 writeb(CUC_START, &p->scb->cmd_cuc); /* cmd.-unit start */
626 ni_attn586(); 634 ni_attn586();
@@ -638,13 +646,13 @@ static int init586(struct net_device *dev)
638 * individual address setup 646 * individual address setup
639 */ 647 */
640 648
641 ias_cmd = (struct iasetup_cmd_struct *)ptr; 649 ias_cmd = ptr;
642 650
643 writew(0, &ias_cmd->cmd_status); 651 writew(0, &ias_cmd->cmd_status);
644 writew(CMD_IASETUP | CMD_LAST, &ias_cmd->cmd_cmd); 652 writew(CMD_IASETUP | CMD_LAST, &ias_cmd->cmd_cmd);
645 writew(0xffff, &ias_cmd->cmd_link); 653 writew(0xffff, &ias_cmd->cmd_link);
646 654
647 memcpy_toio((char *)&ias_cmd->iaddr, (char *)dev->dev_addr, ETH_ALEN); 655 memcpy_toio(&ias_cmd->iaddr, (char *)dev->dev_addr, ETH_ALEN);
648 656
649 writew(make16(ias_cmd), &p->scb->cbl_offset); 657 writew(make16(ias_cmd), &p->scb->cbl_offset);
650 658
@@ -663,7 +671,7 @@ static int init586(struct net_device *dev)
663 * TDR, wire check .. e.g. no resistor e.t.c 671 * TDR, wire check .. e.g. no resistor e.t.c
664 */ 672 */
665 673
666 tdr_cmd = (struct tdr_cmd_struct *)ptr; 674 tdr_cmd = ptr;
667 675
668 writew(0, &tdr_cmd->cmd_status); 676 writew(0, &tdr_cmd->cmd_status);
669 writew(CMD_TDR | CMD_LAST, &tdr_cmd->cmd_cmd); 677 writew(CMD_TDR | CMD_LAST, &tdr_cmd->cmd_cmd);
@@ -707,14 +715,14 @@ static int init586(struct net_device *dev)
707 * Multicast setup 715 * Multicast setup
708 */ 716 */
709 if (num_addrs && !(dev->flags & IFF_PROMISC)) { 717 if (num_addrs && !(dev->flags & IFF_PROMISC)) {
710 mc_cmd = (struct mcsetup_cmd_struct *) ptr; 718 mc_cmd = ptr;
711 writew(0, &mc_cmd->cmd_status); 719 writew(0, &mc_cmd->cmd_status);
712 writew(CMD_MCSETUP | CMD_LAST, &mc_cmd->cmd_cmd); 720 writew(CMD_MCSETUP | CMD_LAST, &mc_cmd->cmd_cmd);
713 writew(0xffff, &mc_cmd->cmd_link); 721 writew(0xffff, &mc_cmd->cmd_link);
714 writew(num_addrs * 6, &mc_cmd->mc_cnt); 722 writew(num_addrs * 6, &mc_cmd->mc_cnt);
715 723
716 for (i = 0; i < num_addrs; i++, dmi = dmi->next) 724 for (i = 0; i < num_addrs; i++, dmi = dmi->next)
717 memcpy_toio((char *) mc_cmd->mc_list[i], 725 memcpy_toio(mc_cmd->mc_list[i],
718 dmi->dmi_addr, 6); 726 dmi->dmi_addr, 6);
719 727
720 writew(make16(mc_cmd), &p->scb->cbl_offset); 728 writew(make16(mc_cmd), &p->scb->cbl_offset);
@@ -733,43 +741,43 @@ static int init586(struct net_device *dev)
733 */ 741 */
734#if (NUM_XMIT_BUFFS == 1) 742#if (NUM_XMIT_BUFFS == 1)
735 for (i = 0; i < 2; i++) { 743 for (i = 0; i < 2; i++) {
736 p->nop_cmds[i] = (struct nop_cmd_struct *)ptr; 744 p->nop_cmds[i] = ptr;
737 writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd); 745 writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
738 writew(0, &p->nop_cmds[i]->cmd_status); 746 writew(0, &p->nop_cmds[i]->cmd_status);
739 writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link); 747 writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
740 ptr = (char *) ptr + sizeof(struct nop_cmd_struct); 748 ptr = ptr + sizeof(struct nop_cmd_struct);
741 } 749 }
742#else 750#else
743 for (i = 0; i < NUM_XMIT_BUFFS; i++) { 751 for (i = 0; i < NUM_XMIT_BUFFS; i++) {
744 p->nop_cmds[i] = (struct nop_cmd_struct *)ptr; 752 p->nop_cmds[i] = ptr;
745 writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd); 753 writew(CMD_NOP, &p->nop_cmds[i]->cmd_cmd);
746 writew(0, &p->nop_cmds[i]->cmd_status); 754 writew(0, &p->nop_cmds[i]->cmd_status);
747 writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link); 755 writew(make16(p->nop_cmds[i]), &p->nop_cmds[i]->cmd_link);
748 ptr = (char *) ptr + sizeof(struct nop_cmd_struct); 756 ptr = ptr + sizeof(struct nop_cmd_struct);
749 } 757 }
750#endif 758#endif
751 759
752 ptr = alloc_rfa(dev, (void *)ptr); /* init receive-frame-area */ 760 ptr = alloc_rfa(dev, ptr); /* init receive-frame-area */
753 761
754 /* 762 /*
755 * alloc xmit-buffs / init xmit_cmds 763 * alloc xmit-buffs / init xmit_cmds
756 */ 764 */
757 for (i = 0; i < NUM_XMIT_BUFFS; i++) { 765 for (i = 0; i < NUM_XMIT_BUFFS; i++) {
758 /* Transmit cmd/buff 0 */ 766 /* Transmit cmd/buff 0 */
759 p->xmit_cmds[i] = (struct transmit_cmd_struct *)ptr; 767 p->xmit_cmds[i] = ptr;
760 ptr = (char *) ptr + sizeof(struct transmit_cmd_struct); 768 ptr = ptr + sizeof(struct transmit_cmd_struct);
761 p->xmit_cbuffs[i] = (char *)ptr; /* char-buffs */ 769 p->xmit_cbuffs[i] = ptr; /* char-buffs */
762 ptr = (char *) ptr + XMIT_BUFF_SIZE; 770 ptr = ptr + XMIT_BUFF_SIZE;
763 p->xmit_buffs[i] = (struct tbd_struct *)ptr; /* TBD */ 771 p->xmit_buffs[i] = ptr; /* TBD */
764 ptr = (char *) ptr + sizeof(struct tbd_struct); 772 ptr = ptr + sizeof(struct tbd_struct);
765 if ((void *)ptr > (void *)p->iscp) { 773 if ((void __iomem *)ptr > (void __iomem *)p->iscp) {
766 printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n", 774 printk(KERN_ERR "%s: not enough shared-mem for your configuration!\n",
767 dev->name); 775 dev->name);
768 return 1; 776 return 1;
769 } 777 }
770 memset_io((char *)(p->xmit_cmds[i]), 0, 778 memset_io(p->xmit_cmds[i], 0,
771 sizeof(struct transmit_cmd_struct)); 779 sizeof(struct transmit_cmd_struct));
772 memset_io((char *)(p->xmit_buffs[i]), 0, 780 memset_io(p->xmit_buffs[i], 0,
773 sizeof(struct tbd_struct)); 781 sizeof(struct tbd_struct));
774 writew(make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]), 782 writew(make16(p->nop_cmds[(i+1)%NUM_XMIT_BUFFS]),
775 &p->xmit_cmds[i]->cmd_link); 783 &p->xmit_cmds[i]->cmd_link);
@@ -816,14 +824,14 @@ static int init586(struct net_device *dev)
816 * It sets up the Receive Frame Area (RFA). 824 * It sets up the Receive Frame Area (RFA).
817 */ 825 */
818 826
819static void *alloc_rfa(struct net_device *dev, void *ptr) 827static void __iomem *alloc_rfa(struct net_device *dev, void __iomem *ptr)
820{ 828{
821 struct rfd_struct *rfd = (struct rfd_struct *)ptr; 829 struct rfd_struct __iomem *rfd = ptr;
822 struct rbd_struct *rbd; 830 struct rbd_struct __iomem *rbd;
823 int i; 831 int i;
824 struct priv *p = (struct priv *) dev->priv; 832 struct priv *p = (struct priv *) dev->priv;
825 833
826 memset_io((char *) rfd, 0, 834 memset_io(rfd, 0,
827 sizeof(struct rfd_struct) * (p->num_recv_buffs + rfdadd)); 835 sizeof(struct rfd_struct) * (p->num_recv_buffs + rfdadd));
828 p->rfd_first = rfd; 836 p->rfd_first = rfd;
829 837
@@ -835,20 +843,19 @@ static void *alloc_rfa(struct net_device *dev, void *ptr)
835 /* RU suspend */ 843 /* RU suspend */
836 writeb(RFD_SUSP, &rfd[p->num_recv_buffs-1+rfdadd].last); 844 writeb(RFD_SUSP, &rfd[p->num_recv_buffs-1+rfdadd].last);
837 845
838 ptr = (void *) (rfd + (p->num_recv_buffs + rfdadd)); 846 ptr = rfd + (p->num_recv_buffs + rfdadd);
839 847
840 rbd = (struct rbd_struct *) ptr; 848 rbd = ptr;
841 ptr = (void *) (rbd + p->num_recv_buffs); 849 ptr = rbd + p->num_recv_buffs;
842 850
843 /* clr descriptors */ 851 /* clr descriptors */
844 memset_io((char *)rbd, 0, 852 memset_io(rbd, 0, sizeof(struct rbd_struct) * (p->num_recv_buffs));
845 sizeof(struct rbd_struct) * (p->num_recv_buffs));
846 853
847 for (i = 0; i < p->num_recv_buffs; i++) { 854 for (i = 0; i < p->num_recv_buffs; i++) {
848 writew(make16(rbd + (i+1) % p->num_recv_buffs), &rbd[i].next); 855 writew(make16(rbd + (i+1) % p->num_recv_buffs), &rbd[i].next);
849 writew(RECV_BUFF_SIZE, &rbd[i].size); 856 writew(RECV_BUFF_SIZE, &rbd[i].size);
850 writel(make24(ptr), &rbd[i].buffer); 857 writel(make24(ptr), &rbd[i].buffer);
851 ptr = (char *) ptr + RECV_BUFF_SIZE; 858 ptr = ptr + RECV_BUFF_SIZE;
852 } 859 }
853 p->rfd_top = p->rfd_first; 860 p->rfd_top = p->rfd_first;
854 p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd); 861 p->rfd_last = p->rfd_first + (p->num_recv_buffs - 1 + rfdadd);
@@ -892,7 +899,7 @@ static irqreturn_t ni52_interrupt(int irq, void *dev_id)
892 if (readb(&p->scb->rus) & RU_SUSPEND) { 899 if (readb(&p->scb->rus) & RU_SUSPEND) {
893 /* special case: RU_SUSPEND */ 900 /* special case: RU_SUSPEND */
894 wait_for_scb_cmd(dev); 901 wait_for_scb_cmd(dev);
895 p->scb->cmd_ruc = RUC_RESUME; 902 writeb(RUC_RESUME, &p->scb->cmd_ruc);
896 ni_attn586(); 903 ni_attn586();
897 wait_for_scb_cmd_ruc(dev); 904 wait_for_scb_cmd_ruc(dev);
898 } else { 905 } else {
@@ -919,7 +926,7 @@ static irqreturn_t ni52_interrupt(int irq, void *dev_id)
919 926
920 /* Wait for ack. (ni52_xmt_int can be faster than ack!!) */ 927 /* Wait for ack. (ni52_xmt_int can be faster than ack!!) */
921 wait_for_scb_cmd(dev); 928 wait_for_scb_cmd(dev);
922 if (p->scb->cmd_cuc) { /* timed out? */ 929 if (readb(&p->scb->cmd_cuc)) { /* timed out? */
923 printk(KERN_ERR "%s: Acknowledge timed out.\n", 930 printk(KERN_ERR "%s: Acknowledge timed out.\n",
924 dev->name); 931 dev->name);
925 ni_disint(); 932 ni_disint();
@@ -942,14 +949,14 @@ static void ni52_rcv_int(struct net_device *dev)
942 int status, cnt = 0; 949 int status, cnt = 0;
943 unsigned short totlen; 950 unsigned short totlen;
944 struct sk_buff *skb; 951 struct sk_buff *skb;
945 struct rbd_struct *rbd; 952 struct rbd_struct __iomem *rbd;
946 struct priv *p = (struct priv *)dev->priv; 953 struct priv *p = (struct priv *)dev->priv;
947 954
948 if (debuglevel > 0) 955 if (debuglevel > 0)
949 printk("R"); 956 printk("R");
950 957
951 for (; (status = readb(&p->rfd_top->stat_high)) & RFD_COMPL;) { 958 for (; (status = readb(&p->rfd_top->stat_high)) & RFD_COMPL;) {
952 rbd = (struct rbd_struct *) make32(p->rfd_top->rbd_offset); 959 rbd = make32(readw(&p->rfd_top->rbd_offset));
953 if (status & RFD_OK) { /* frame received without error? */ 960 if (status & RFD_OK) { /* frame received without error? */
954 totlen = readw(&rbd->status); 961 totlen = readw(&rbd->status);
955 if (totlen & RBD_LAST) { 962 if (totlen & RBD_LAST) {
@@ -960,7 +967,7 @@ static void ni52_rcv_int(struct net_device *dev)
960 if (skb != NULL) { 967 if (skb != NULL) {
961 skb_reserve(skb, 2); 968 skb_reserve(skb, 2);
962 skb_put(skb, totlen); 969 skb_put(skb, totlen);
963 skb_copy_to_linear_data(skb, (char *)p->base + (unsigned long) rbd->buffer, totlen); 970 memcpy_fromio(skb->data, p->base + readl(&rbd->buffer), totlen);
964 skb->protocol = eth_type_trans(skb, dev); 971 skb->protocol = eth_type_trans(skb, dev);
965 netif_rx(skb); 972 netif_rx(skb);
966 dev->last_rx = jiffies; 973 dev->last_rx = jiffies;
@@ -979,7 +986,7 @@ static void ni52_rcv_int(struct net_device *dev)
979 break; 986 break;
980 } 987 }
981 writew(0, &rbd->status); 988 writew(0, &rbd->status);
982 rbd = (struct rbd_struct *) make32(readl(&rbd->next)); 989 rbd = make32(readw(&rbd->next));
983 } 990 }
984 totlen += rstat & RBD_MASK; 991 totlen += rstat & RBD_MASK;
985 writew(0, &rbd->status); 992 writew(0, &rbd->status);
@@ -997,7 +1004,7 @@ static void ni52_rcv_int(struct net_device *dev)
997 writew(0xffff, &p->rfd_top->rbd_offset); 1004 writew(0xffff, &p->rfd_top->rbd_offset);
998 writeb(0, &p->rfd_last->last); /* delete RFD_SUSP */ 1005 writeb(0, &p->rfd_last->last); /* delete RFD_SUSP */
999 p->rfd_last = p->rfd_top; 1006 p->rfd_last = p->rfd_top;
1000 p->rfd_top = (struct rfd_struct *) make32(p->rfd_top->next); /* step to next RFD */ 1007 p->rfd_top = make32(readw(&p->rfd_top->next)); /* step to next RFD */
1001 writew(make16(p->rfd_top), &p->scb->rfa_offset); 1008 writew(make16(p->rfd_top), &p->scb->rfa_offset);
1002 1009
1003 if (debuglevel > 0) 1010 if (debuglevel > 0)
@@ -1042,11 +1049,12 @@ static void ni52_rnr_int(struct net_device *dev)
1042 ni_attn586(); 1049 ni_attn586();
1043 wait_for_scb_cmd_ruc(dev); /* wait for accept cmd. */ 1050 wait_for_scb_cmd_ruc(dev); /* wait for accept cmd. */
1044 1051
1045 alloc_rfa(dev, (char *)p->rfd_first); 1052 alloc_rfa(dev, p->rfd_first);
1046 /* maybe add a check here, before restarting the RU */ 1053 /* maybe add a check here, before restarting the RU */
1047 startrecv586(dev); /* restart RU */ 1054 startrecv586(dev); /* restart RU */
1048 1055
1049 printk(KERN_ERR "%s: Receive-Unit restarted. Status: %04x\n", dev->name, p->scb->rus); 1056 printk(KERN_ERR "%s: Receive-Unit restarted. Status: %04x\n",
1057 dev->name, readb(&p->scb->rus));
1050 1058
1051} 1059}
1052 1060
@@ -1178,12 +1186,11 @@ static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev)
1178 1186
1179 netif_stop_queue(dev); 1187 netif_stop_queue(dev);
1180 1188
1181 skb_copy_from_linear_data(skb, (char *)p->xmit_cbuffs[p->xmit_count], 1189 memcpy_toio(p->xmit_cbuffs[p->xmit_count], skb->data, skb->len);
1182 skb->len);
1183 len = skb->len; 1190 len = skb->len;
1184 if (len < ETH_ZLEN) { 1191 if (len < ETH_ZLEN) {
1185 len = ETH_ZLEN; 1192 len = ETH_ZLEN;
1186 memset((char *)p->xmit_cbuffs[p->xmit_count]+skb->len, 0, 1193 memset_io(p->xmit_cbuffs[p->xmit_count]+skb->len, 0,
1187 len - skb->len); 1194 len - skb->len);
1188 } 1195 }
1189 1196
@@ -1191,14 +1198,14 @@ static int ni52_send_packet(struct sk_buff *skb, struct net_device *dev)
1191# ifdef NO_NOPCOMMANDS 1198# ifdef NO_NOPCOMMANDS
1192 1199
1193#ifdef DEBUG 1200#ifdef DEBUG
1194 if (p->scb->cus & CU_ACTIVE) { 1201 if (readb(&p->scb->cus) & CU_ACTIVE) {
1195 printk(KERN_ERR "%s: Hmmm .. CU is still running and we wanna send a new packet.\n", dev->name); 1202 printk(KERN_ERR "%s: Hmmm .. CU is still running and we wanna send a new packet.\n", dev->name);
1196 printk(KERN_ERR "%s: stat: %04x %04x\n", 1203 printk(KERN_ERR "%s: stat: %04x %04x\n",
1197 dev->name, readb(&p->scb->cus), 1204 dev->name, readb(&p->scb->cus),
1198 readw(&p->xmit_cmds[0]->cmd_status)); 1205 readw(&p->xmit_cmds[0]->cmd_status));
1199 } 1206 }
1200#endif 1207#endif
1201 writew(TBD_LAST | len, &p->xmit_buffs[0]->size);; 1208 writew(TBD_LAST | len, &p->xmit_buffs[0]->size);
1202 for (i = 0; i < 16; i++) { 1209 for (i = 0; i < 16; i++) {
1203 writew(0, &p->xmit_cmds[0]->cmd_status); 1210 writew(0, &p->xmit_cmds[0]->cmd_status);
1204 wait_for_scb_cmd(dev); 1211 wait_for_scb_cmd(dev);
@@ -1330,7 +1337,9 @@ int __init init_module(void)
1330 1337
1331void __exit cleanup_module(void) 1338void __exit cleanup_module(void)
1332{ 1339{
1340 struct priv *p = dev_ni52->priv;
1333 unregister_netdev(dev_ni52); 1341 unregister_netdev(dev_ni52);
1342 iounmap(p->mapped);
1334 release_region(dev_ni52->base_addr, NI52_TOTAL_SIZE); 1343 release_region(dev_ni52->base_addr, NI52_TOTAL_SIZE);
1335 free_netdev(dev_ni52); 1344 free_netdev(dev_ni52);
1336} 1345}
diff --git a/drivers/net/ni52.h b/drivers/net/ni52.h
index 1f28a4d1a319..0a03b2883327 100644
--- a/drivers/net/ni52.h
+++ b/drivers/net/ni52.h
@@ -39,8 +39,8 @@ struct scp_struct
39 u16 zero_dum0; /* has to be zero */ 39 u16 zero_dum0; /* has to be zero */
40 u8 sysbus; /* 0=16Bit,1=8Bit */ 40 u8 sysbus; /* 0=16Bit,1=8Bit */
41 u8 zero_dum1; /* has to be zero for 586 */ 41 u8 zero_dum1; /* has to be zero for 586 */
42 u8 zero_dum2; 42 u16 zero_dum2;
43 u8 zero_dum3; 43 u16 zero_dum3;
44 u32 iscp; /* pointer to the iscp-block */ 44 u32 iscp; /* pointer to the iscp-block */
45}; 45};
46 46
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index d11ba61baa4f..7565c2d7f30e 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -113,6 +113,8 @@ do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
113#define niu_unlock_parent(np, flags) \ 113#define niu_unlock_parent(np, flags) \
114 spin_unlock_irqrestore(&np->parent->lock, flags) 114 spin_unlock_irqrestore(&np->parent->lock, flags)
115 115
116static int serdes_init_10g_serdes(struct niu *np);
117
116static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg, 118static int __niu_wait_bits_clear_mac(struct niu *np, unsigned long reg,
117 u64 bits, int limit, int delay) 119 u64 bits, int limit, int delay)
118{ 120{
@@ -706,6 +708,251 @@ static int serdes_init_1g(struct niu *np)
706 return 0; 708 return 0;
707} 709}
708 710
711static int serdes_init_1g_serdes(struct niu *np)
712{
713 struct niu_link_config *lp = &np->link_config;
714 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
715 u64 ctrl_val, test_cfg_val, sig, mask, val;
716 int err;
717 u64 reset_val, val_rd;
718
719 val = ENET_SERDES_PLL_HRATE0 | ENET_SERDES_PLL_HRATE1 |
720 ENET_SERDES_PLL_HRATE2 | ENET_SERDES_PLL_HRATE3 |
721 ENET_SERDES_PLL_FBDIV0;
722 switch (np->port) {
723 case 0:
724 reset_val = ENET_SERDES_RESET_0;
725 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
726 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
727 pll_cfg = ENET_SERDES_0_PLL_CFG;
728 break;
729 case 1:
730 reset_val = ENET_SERDES_RESET_1;
731 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
732 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
733 pll_cfg = ENET_SERDES_1_PLL_CFG;
734 break;
735
736 default:
737 return -EINVAL;
738 }
739 ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
740 ENET_SERDES_CTRL_SDET_1 |
741 ENET_SERDES_CTRL_SDET_2 |
742 ENET_SERDES_CTRL_SDET_3 |
743 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
744 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
745 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
746 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
747 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
748 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
749 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
750 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
751 test_cfg_val = 0;
752
753 if (lp->loopback_mode == LOOPBACK_PHY) {
754 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
755 ENET_SERDES_TEST_MD_0_SHIFT) |
756 (ENET_TEST_MD_PAD_LOOPBACK <<
757 ENET_SERDES_TEST_MD_1_SHIFT) |
758 (ENET_TEST_MD_PAD_LOOPBACK <<
759 ENET_SERDES_TEST_MD_2_SHIFT) |
760 (ENET_TEST_MD_PAD_LOOPBACK <<
761 ENET_SERDES_TEST_MD_3_SHIFT));
762 }
763
764 nw64(ENET_SERDES_RESET, reset_val);
765 mdelay(20);
766 val_rd = nr64(ENET_SERDES_RESET);
767 val_rd &= ~reset_val;
768 nw64(pll_cfg, val);
769 nw64(ctrl_reg, ctrl_val);
770 nw64(test_cfg_reg, test_cfg_val);
771 nw64(ENET_SERDES_RESET, val_rd);
772 mdelay(2000);
773
774 /* Initialize all 4 lanes of the SERDES. */
775 for (i = 0; i < 4; i++) {
776 u32 rxtx_ctrl, glue0;
777
778 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
779 if (err)
780 return err;
781 err = esr_read_glue0(np, i, &glue0);
782 if (err)
783 return err;
784
785 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
786 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
787 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
788
789 glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
790 ESR_GLUE_CTRL0_THCNT |
791 ESR_GLUE_CTRL0_BLTIME);
792 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
793 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
794 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
795 (BLTIME_300_CYCLES <<
796 ESR_GLUE_CTRL0_BLTIME_SHIFT));
797
798 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
799 if (err)
800 return err;
801 err = esr_write_glue0(np, i, glue0);
802 if (err)
803 return err;
804 }
805
806
807 sig = nr64(ESR_INT_SIGNALS);
808 switch (np->port) {
809 case 0:
810 val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
811 mask = val;
812 break;
813
814 case 1:
815 val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
816 mask = val;
817 break;
818
819 default:
820 return -EINVAL;
821 }
822
823 if ((sig & mask) != val) {
824 dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
825 "[%08x]\n", np->port, (int) (sig & mask), (int) val);
826 return -ENODEV;
827 }
828
829 return 0;
830}
831
832static int link_status_1g_serdes(struct niu *np, int *link_up_p)
833{
834 struct niu_link_config *lp = &np->link_config;
835 int link_up;
836 u64 val;
837 u16 current_speed;
838 unsigned long flags;
839 u8 current_duplex;
840
841 link_up = 0;
842 current_speed = SPEED_INVALID;
843 current_duplex = DUPLEX_INVALID;
844
845 spin_lock_irqsave(&np->lock, flags);
846
847 val = nr64_pcs(PCS_MII_STAT);
848
849 if (val & PCS_MII_STAT_LINK_STATUS) {
850 link_up = 1;
851 current_speed = SPEED_1000;
852 current_duplex = DUPLEX_FULL;
853 }
854
855 lp->active_speed = current_speed;
856 lp->active_duplex = current_duplex;
857 spin_unlock_irqrestore(&np->lock, flags);
858
859 *link_up_p = link_up;
860 return 0;
861}
862
863
864static int link_status_10g_serdes(struct niu *np, int *link_up_p)
865{
866 unsigned long flags;
867 struct niu_link_config *lp = &np->link_config;
868 int link_up = 0;
869 int link_ok = 1;
870 u64 val, val2;
871 u16 current_speed;
872 u8 current_duplex;
873
874 if (!(np->flags & NIU_FLAGS_10G))
875 return link_status_1g_serdes(np, link_up_p);
876
877 current_speed = SPEED_INVALID;
878 current_duplex = DUPLEX_INVALID;
879 spin_lock_irqsave(&np->lock, flags);
880
881 val = nr64_xpcs(XPCS_STATUS(0));
882 val2 = nr64_mac(XMAC_INTER2);
883 if (val2 & 0x01000000)
884 link_ok = 0;
885
886 if ((val & 0x1000ULL) && link_ok) {
887 link_up = 1;
888 current_speed = SPEED_10000;
889 current_duplex = DUPLEX_FULL;
890 }
891 lp->active_speed = current_speed;
892 lp->active_duplex = current_duplex;
893 spin_unlock_irqrestore(&np->lock, flags);
894 *link_up_p = link_up;
895 return 0;
896}
897
898
899static int link_status_1g_rgmii(struct niu *np, int *link_up_p)
900{
901 struct niu_link_config *lp = &np->link_config;
902 u16 current_speed, bmsr;
903 unsigned long flags;
904 u8 current_duplex;
905 int err, link_up;
906
907 link_up = 0;
908 current_speed = SPEED_INVALID;
909 current_duplex = DUPLEX_INVALID;
910
911 spin_lock_irqsave(&np->lock, flags);
912
913 err = -EINVAL;
914
915 err = mii_read(np, np->phy_addr, MII_BMSR);
916 if (err < 0)
917 goto out;
918
919 bmsr = err;
920 if (bmsr & BMSR_LSTATUS) {
921 u16 adv, lpa, common, estat;
922
923 err = mii_read(np, np->phy_addr, MII_ADVERTISE);
924 if (err < 0)
925 goto out;
926 adv = err;
927
928 err = mii_read(np, np->phy_addr, MII_LPA);
929 if (err < 0)
930 goto out;
931 lpa = err;
932
933 common = adv & lpa;
934
935 err = mii_read(np, np->phy_addr, MII_ESTATUS);
936 if (err < 0)
937 goto out;
938 estat = err;
939 link_up = 1;
940 current_speed = SPEED_1000;
941 current_duplex = DUPLEX_FULL;
942
943 }
944 lp->active_speed = current_speed;
945 lp->active_duplex = current_duplex;
946 err = 0;
947
948out:
949 spin_unlock_irqrestore(&np->lock, flags);
950
951 *link_up_p = link_up;
952 return err;
953}
954
955
709static int bcm8704_reset(struct niu *np) 956static int bcm8704_reset(struct niu *np)
710{ 957{
711 int err, limit; 958 int err, limit;
@@ -1022,6 +1269,69 @@ static int mii_reset(struct niu *np)
1022 return 0; 1269 return 0;
1023} 1270}
1024 1271
1272
1273
1274static int xcvr_init_1g_rgmii(struct niu *np)
1275{
1276 int err;
1277 u64 val;
1278 u16 bmcr, bmsr, estat;
1279
1280 val = nr64(MIF_CONFIG);
1281 val &= ~MIF_CONFIG_INDIRECT_MODE;
1282 nw64(MIF_CONFIG, val);
1283
1284 err = mii_reset(np);
1285 if (err)
1286 return err;
1287
1288 err = mii_read(np, np->phy_addr, MII_BMSR);
1289 if (err < 0)
1290 return err;
1291 bmsr = err;
1292
1293 estat = 0;
1294 if (bmsr & BMSR_ESTATEN) {
1295 err = mii_read(np, np->phy_addr, MII_ESTATUS);
1296 if (err < 0)
1297 return err;
1298 estat = err;
1299 }
1300
1301 bmcr = 0;
1302 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1303 if (err)
1304 return err;
1305
1306 if (bmsr & BMSR_ESTATEN) {
1307 u16 ctrl1000 = 0;
1308
1309 if (estat & ESTATUS_1000_TFULL)
1310 ctrl1000 |= ADVERTISE_1000FULL;
1311 err = mii_write(np, np->phy_addr, MII_CTRL1000, ctrl1000);
1312 if (err)
1313 return err;
1314 }
1315
1316 bmcr = (BMCR_SPEED1000 | BMCR_FULLDPLX);
1317
1318 err = mii_write(np, np->phy_addr, MII_BMCR, bmcr);
1319 if (err)
1320 return err;
1321
1322 err = mii_read(np, np->phy_addr, MII_BMCR);
1323 if (err < 0)
1324 return err;
1325 bmcr = mii_read(np, np->phy_addr, MII_BMCR);
1326
1327 err = mii_read(np, np->phy_addr, MII_BMSR);
1328 if (err < 0)
1329 return err;
1330
1331 return 0;
1332}
1333
1334
1025static int mii_init_common(struct niu *np) 1335static int mii_init_common(struct niu *np)
1026{ 1336{
1027 struct niu_link_config *lp = &np->link_config; 1337 struct niu_link_config *lp = &np->link_config;
@@ -1429,6 +1739,16 @@ static void niu_timer(unsigned long __opaque)
1429 add_timer(&np->timer); 1739 add_timer(&np->timer);
1430} 1740}
1431 1741
1742static const struct niu_phy_ops phy_ops_10g_serdes = {
1743 .serdes_init = serdes_init_10g_serdes,
1744 .link_status = link_status_10g_serdes,
1745};
1746
1747static const struct niu_phy_ops phy_ops_1g_rgmii = {
1748 .xcvr_init = xcvr_init_1g_rgmii,
1749 .link_status = link_status_1g_rgmii,
1750};
1751
1432static const struct niu_phy_ops phy_ops_10g_fiber_niu = { 1752static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
1433 .serdes_init = serdes_init_niu, 1753 .serdes_init = serdes_init_niu,
1434 .xcvr_init = xcvr_init_10g, 1754 .xcvr_init = xcvr_init_10g,
@@ -1487,6 +1807,152 @@ static const struct niu_phy_template phy_template_1g_copper = {
1487 .phy_addr_base = 0, 1807 .phy_addr_base = 0,
1488}; 1808};
1489 1809
1810static const struct niu_phy_template phy_template_1g_rgmii = {
1811 .ops = &phy_ops_1g_rgmii,
1812 .phy_addr_base = 0,
1813};
1814
1815static const struct niu_phy_template phy_template_10g_serdes = {
1816 .ops = &phy_ops_10g_serdes,
1817 .phy_addr_base = 0,
1818};
1819
1820static int niu_atca_port_num[4] = {
1821 0, 0, 11, 10
1822};
1823
1824static int serdes_init_10g_serdes(struct niu *np)
1825{
1826 struct niu_link_config *lp = &np->link_config;
1827 unsigned long ctrl_reg, test_cfg_reg, pll_cfg, i;
1828 u64 ctrl_val, test_cfg_val, sig, mask, val;
1829 int err;
1830 u64 reset_val;
1831
1832 switch (np->port) {
1833 case 0:
1834 reset_val = ENET_SERDES_RESET_0;
1835 ctrl_reg = ENET_SERDES_0_CTRL_CFG;
1836 test_cfg_reg = ENET_SERDES_0_TEST_CFG;
1837 pll_cfg = ENET_SERDES_0_PLL_CFG;
1838 break;
1839 case 1:
1840 reset_val = ENET_SERDES_RESET_1;
1841 ctrl_reg = ENET_SERDES_1_CTRL_CFG;
1842 test_cfg_reg = ENET_SERDES_1_TEST_CFG;
1843 pll_cfg = ENET_SERDES_1_PLL_CFG;
1844 break;
1845
1846 default:
1847 return -EINVAL;
1848 }
1849 ctrl_val = (ENET_SERDES_CTRL_SDET_0 |
1850 ENET_SERDES_CTRL_SDET_1 |
1851 ENET_SERDES_CTRL_SDET_2 |
1852 ENET_SERDES_CTRL_SDET_3 |
1853 (0x5 << ENET_SERDES_CTRL_EMPH_0_SHIFT) |
1854 (0x5 << ENET_SERDES_CTRL_EMPH_1_SHIFT) |
1855 (0x5 << ENET_SERDES_CTRL_EMPH_2_SHIFT) |
1856 (0x5 << ENET_SERDES_CTRL_EMPH_3_SHIFT) |
1857 (0x1 << ENET_SERDES_CTRL_LADJ_0_SHIFT) |
1858 (0x1 << ENET_SERDES_CTRL_LADJ_1_SHIFT) |
1859 (0x1 << ENET_SERDES_CTRL_LADJ_2_SHIFT) |
1860 (0x1 << ENET_SERDES_CTRL_LADJ_3_SHIFT));
1861 test_cfg_val = 0;
1862
1863 if (lp->loopback_mode == LOOPBACK_PHY) {
1864 test_cfg_val |= ((ENET_TEST_MD_PAD_LOOPBACK <<
1865 ENET_SERDES_TEST_MD_0_SHIFT) |
1866 (ENET_TEST_MD_PAD_LOOPBACK <<
1867 ENET_SERDES_TEST_MD_1_SHIFT) |
1868 (ENET_TEST_MD_PAD_LOOPBACK <<
1869 ENET_SERDES_TEST_MD_2_SHIFT) |
1870 (ENET_TEST_MD_PAD_LOOPBACK <<
1871 ENET_SERDES_TEST_MD_3_SHIFT));
1872 }
1873
1874 esr_reset(np);
1875 nw64(pll_cfg, ENET_SERDES_PLL_FBDIV2);
1876 nw64(ctrl_reg, ctrl_val);
1877 nw64(test_cfg_reg, test_cfg_val);
1878
1879 /* Initialize all 4 lanes of the SERDES. */
1880 for (i = 0; i < 4; i++) {
1881 u32 rxtx_ctrl, glue0;
1882
1883 err = esr_read_rxtx_ctrl(np, i, &rxtx_ctrl);
1884 if (err)
1885 return err;
1886 err = esr_read_glue0(np, i, &glue0);
1887 if (err)
1888 return err;
1889
1890 rxtx_ctrl &= ~(ESR_RXTX_CTRL_VMUXLO);
1891 rxtx_ctrl |= (ESR_RXTX_CTRL_ENSTRETCH |
1892 (2 << ESR_RXTX_CTRL_VMUXLO_SHIFT));
1893
1894 glue0 &= ~(ESR_GLUE_CTRL0_SRATE |
1895 ESR_GLUE_CTRL0_THCNT |
1896 ESR_GLUE_CTRL0_BLTIME);
1897 glue0 |= (ESR_GLUE_CTRL0_RXLOSENAB |
1898 (0xf << ESR_GLUE_CTRL0_SRATE_SHIFT) |
1899 (0xff << ESR_GLUE_CTRL0_THCNT_SHIFT) |
1900 (BLTIME_300_CYCLES <<
1901 ESR_GLUE_CTRL0_BLTIME_SHIFT));
1902
1903 err = esr_write_rxtx_ctrl(np, i, rxtx_ctrl);
1904 if (err)
1905 return err;
1906 err = esr_write_glue0(np, i, glue0);
1907 if (err)
1908 return err;
1909 }
1910
1911
1912 sig = nr64(ESR_INT_SIGNALS);
1913 switch (np->port) {
1914 case 0:
1915 mask = ESR_INT_SIGNALS_P0_BITS;
1916 val = (ESR_INT_SRDY0_P0 |
1917 ESR_INT_DET0_P0 |
1918 ESR_INT_XSRDY_P0 |
1919 ESR_INT_XDP_P0_CH3 |
1920 ESR_INT_XDP_P0_CH2 |
1921 ESR_INT_XDP_P0_CH1 |
1922 ESR_INT_XDP_P0_CH0);
1923 break;
1924
1925 case 1:
1926 mask = ESR_INT_SIGNALS_P1_BITS;
1927 val = (ESR_INT_SRDY0_P1 |
1928 ESR_INT_DET0_P1 |
1929 ESR_INT_XSRDY_P1 |
1930 ESR_INT_XDP_P1_CH3 |
1931 ESR_INT_XDP_P1_CH2 |
1932 ESR_INT_XDP_P1_CH1 |
1933 ESR_INT_XDP_P1_CH0);
1934 break;
1935
1936 default:
1937 return -EINVAL;
1938 }
1939
1940 if ((sig & mask) != val) {
1941 int err;
1942 err = serdes_init_1g_serdes(np);
1943 if (!err) {
1944 np->flags &= ~NIU_FLAGS_10G;
1945 np->mac_xcvr = MAC_XCVR_PCS;
1946 } else {
1947 dev_err(np->device, PFX "Port %u 10G/1G SERDES Link Failed \n",
1948 np->port);
1949 return -ENODEV;
1950 }
1951 }
1952
1953 return 0;
1954}
1955
1490static int niu_determine_phy_disposition(struct niu *np) 1956static int niu_determine_phy_disposition(struct niu *np)
1491{ 1957{
1492 struct niu_parent *parent = np->parent; 1958 struct niu_parent *parent = np->parent;
@@ -1498,7 +1964,10 @@ static int niu_determine_phy_disposition(struct niu *np)
1498 tp = &phy_template_niu; 1964 tp = &phy_template_niu;
1499 phy_addr_off += np->port; 1965 phy_addr_off += np->port;
1500 } else { 1966 } else {
1501 switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER)) { 1967 switch (np->flags &
1968 (NIU_FLAGS_10G |
1969 NIU_FLAGS_FIBER |
1970 NIU_FLAGS_XCVR_SERDES)) {
1502 case 0: 1971 case 0:
1503 /* 1G copper */ 1972 /* 1G copper */
1504 tp = &phy_template_1g_copper; 1973 tp = &phy_template_1g_copper;
@@ -1529,6 +1998,25 @@ static int niu_determine_phy_disposition(struct niu *np)
1529 phy_addr_off += np->port; 1998 phy_addr_off += np->port;
1530 break; 1999 break;
1531 2000
2001 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
2002 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
2003 case NIU_FLAGS_XCVR_SERDES:
2004 switch(np->port) {
2005 case 0:
2006 case 1:
2007 tp = &phy_template_10g_serdes;
2008 break;
2009 case 2:
2010 case 3:
2011 tp = &phy_template_1g_rgmii;
2012 break;
2013 default:
2014 return -EINVAL;
2015 break;
2016 }
2017 phy_addr_off = niu_atca_port_num[np->port];
2018 break;
2019
1532 default: 2020 default:
1533 return -EINVAL; 2021 return -EINVAL;
1534 } 2022 }
@@ -4139,6 +4627,12 @@ static void niu_init_xif_xmac(struct niu *np)
4139 struct niu_link_config *lp = &np->link_config; 4627 struct niu_link_config *lp = &np->link_config;
4140 u64 val; 4628 u64 val;
4141 4629
4630 if (np->flags & NIU_FLAGS_XCVR_SERDES) {
4631 val = nr64(MIF_CONFIG);
4632 val |= MIF_CONFIG_ATCA_GE;
4633 nw64(MIF_CONFIG, val);
4634 }
4635
4142 val = nr64_mac(XMAC_CONFIG); 4636 val = nr64_mac(XMAC_CONFIG);
4143 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC; 4637 val &= ~XMAC_CONFIG_SEL_POR_CLK_SRC;
4144 4638
@@ -4155,7 +4649,8 @@ static void niu_init_xif_xmac(struct niu *np)
4155 val &= ~XMAC_CONFIG_LFS_DISABLE; 4649 val &= ~XMAC_CONFIG_LFS_DISABLE;
4156 } else { 4650 } else {
4157 val |= XMAC_CONFIG_LFS_DISABLE; 4651 val |= XMAC_CONFIG_LFS_DISABLE;
4158 if (!(np->flags & NIU_FLAGS_FIBER)) 4652 if (!(np->flags & NIU_FLAGS_FIBER) &&
4653 !(np->flags & NIU_FLAGS_XCVR_SERDES))
4159 val |= XMAC_CONFIG_1G_PCS_BYPASS; 4654 val |= XMAC_CONFIG_1G_PCS_BYPASS;
4160 else 4655 else
4161 val &= ~XMAC_CONFIG_1G_PCS_BYPASS; 4656 val &= ~XMAC_CONFIG_1G_PCS_BYPASS;
@@ -4224,16 +4719,26 @@ static void niu_init_xif(struct niu *np)
4224 4719
4225static void niu_pcs_mii_reset(struct niu *np) 4720static void niu_pcs_mii_reset(struct niu *np)
4226{ 4721{
4722 int limit = 1000;
4227 u64 val = nr64_pcs(PCS_MII_CTL); 4723 u64 val = nr64_pcs(PCS_MII_CTL);
4228 val |= PCS_MII_CTL_RST; 4724 val |= PCS_MII_CTL_RST;
4229 nw64_pcs(PCS_MII_CTL, val); 4725 nw64_pcs(PCS_MII_CTL, val);
4726 while ((--limit >= 0) && (val & PCS_MII_CTL_RST)) {
4727 udelay(100);
4728 val = nr64_pcs(PCS_MII_CTL);
4729 }
4230} 4730}
4231 4731
4232static void niu_xpcs_reset(struct niu *np) 4732static void niu_xpcs_reset(struct niu *np)
4233{ 4733{
4734 int limit = 1000;
4234 u64 val = nr64_xpcs(XPCS_CONTROL1); 4735 u64 val = nr64_xpcs(XPCS_CONTROL1);
4235 val |= XPCS_CONTROL1_RESET; 4736 val |= XPCS_CONTROL1_RESET;
4236 nw64_xpcs(XPCS_CONTROL1, val); 4737 nw64_xpcs(XPCS_CONTROL1, val);
4738 while ((--limit >= 0) && (val & XPCS_CONTROL1_RESET)) {
4739 udelay(100);
4740 val = nr64_xpcs(XPCS_CONTROL1);
4741 }
4237} 4742}
4238 4743
4239static int niu_init_pcs(struct niu *np) 4744static int niu_init_pcs(struct niu *np)
@@ -4241,7 +4746,9 @@ static int niu_init_pcs(struct niu *np)
4241 struct niu_link_config *lp = &np->link_config; 4746 struct niu_link_config *lp = &np->link_config;
4242 u64 val; 4747 u64 val;
4243 4748
4244 switch (np->flags & (NIU_FLAGS_10G | NIU_FLAGS_FIBER)) { 4749 switch (np->flags & (NIU_FLAGS_10G |
4750 NIU_FLAGS_FIBER |
4751 NIU_FLAGS_XCVR_SERDES)) {
4245 case NIU_FLAGS_FIBER: 4752 case NIU_FLAGS_FIBER:
4246 /* 1G fiber */ 4753 /* 1G fiber */
4247 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE); 4754 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
@@ -4251,6 +4758,8 @@ static int niu_init_pcs(struct niu *np)
4251 4758
4252 case NIU_FLAGS_10G: 4759 case NIU_FLAGS_10G:
4253 case NIU_FLAGS_10G | NIU_FLAGS_FIBER: 4760 case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
4761 case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
4762 /* 10G SERDES */
4254 if (!(np->flags & NIU_FLAGS_XMAC)) 4763 if (!(np->flags & NIU_FLAGS_XMAC))
4255 return -EINVAL; 4764 return -EINVAL;
4256 4765
@@ -4273,8 +4782,18 @@ static int niu_init_pcs(struct niu *np)
4273 (void) nr64_xpcs(XPCS_SYMERR_CNT23); 4782 (void) nr64_xpcs(XPCS_SYMERR_CNT23);
4274 break; 4783 break;
4275 4784
4785
4786 case NIU_FLAGS_XCVR_SERDES:
4787 /* 1G SERDES */
4788 niu_pcs_mii_reset(np);
4789 nw64_pcs(PCS_CONF, PCS_CONF_MASK | PCS_CONF_ENABLE);
4790 nw64_pcs(PCS_DPATH_MODE, 0);
4791 break;
4792
4276 case 0: 4793 case 0:
4277 /* 1G copper */ 4794 /* 1G copper */
4795 case NIU_FLAGS_XCVR_SERDES | NIU_FLAGS_FIBER:
4796 /* 1G RGMII FIBER */
4278 nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII); 4797 nw64_pcs(PCS_DPATH_MODE, PCS_DPATH_MODE_MII);
4279 niu_pcs_mii_reset(np); 4798 niu_pcs_mii_reset(np);
4280 break; 4799 break;
@@ -6268,7 +6787,19 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
6268 return; 6787 return;
6269 } 6788 }
6270 6789
6271 if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) { 6790 if (!strcmp(np->vpd.model, "SUNW,CP3220") ||
6791 !strcmp(np->vpd.model, "SUNW,CP3260")) {
6792 np->flags |= NIU_FLAGS_10G;
6793 np->flags &= ~NIU_FLAGS_FIBER;
6794 np->flags |= NIU_FLAGS_XCVR_SERDES;
6795 np->mac_xcvr = MAC_XCVR_PCS;
6796 if (np->port > 1) {
6797 np->flags |= NIU_FLAGS_FIBER;
6798 np->flags &= ~NIU_FLAGS_10G;
6799 }
6800 if (np->flags & NIU_FLAGS_10G)
6801 np->mac_xcvr = MAC_XCVR_XPCS;
6802 } else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
6272 dev_err(np->device, PFX "Illegal phy string [%s].\n", 6803 dev_err(np->device, PFX "Illegal phy string [%s].\n",
6273 np->vpd.phy_type); 6804 np->vpd.phy_type);
6274 dev_err(np->device, PFX "Falling back to SPROM.\n"); 6805 dev_err(np->device, PFX "Falling back to SPROM.\n");
@@ -6731,80 +7262,93 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
6731 u32 val; 7262 u32 val;
6732 int err; 7263 int err;
6733 7264
6734 err = fill_phy_probe_info(np, parent, info);
6735 if (err)
6736 return err;
6737 7265
6738 num_10g = count_10g_ports(info, &lowest_10g); 7266 if (!strcmp(np->vpd.model, "SUNW,CP3220") ||
6739 num_1g = count_1g_ports(info, &lowest_1g); 7267 !strcmp(np->vpd.model, "SUNW,CP3260")) {
6740 7268 num_10g = 0;
6741 switch ((num_10g << 4) | num_1g) { 7269 num_1g = 2;
6742 case 0x24: 7270 parent->plat_type = PLAT_TYPE_ATCA_CP3220;
6743 if (lowest_1g == 10) 7271 parent->num_ports = 4;
6744 parent->plat_type = PLAT_TYPE_VF_P0; 7272 val = (phy_encode(PORT_TYPE_1G, 0) |
6745 else if (lowest_1g == 26) 7273 phy_encode(PORT_TYPE_1G, 1) |
6746 parent->plat_type = PLAT_TYPE_VF_P1;
6747 else
6748 goto unknown_vg_1g_port;
6749
6750 /* fallthru */
6751 case 0x22:
6752 val = (phy_encode(PORT_TYPE_10G, 0) |
6753 phy_encode(PORT_TYPE_10G, 1) |
6754 phy_encode(PORT_TYPE_1G, 2) | 7274 phy_encode(PORT_TYPE_1G, 2) |
6755 phy_encode(PORT_TYPE_1G, 3)); 7275 phy_encode(PORT_TYPE_1G, 3));
6756 break; 7276 } else {
6757 7277 err = fill_phy_probe_info(np, parent, info);
6758 case 0x20: 7278 if (err)
6759 val = (phy_encode(PORT_TYPE_10G, 0) | 7279 return err;
6760 phy_encode(PORT_TYPE_10G, 1));
6761 break;
6762 7280
6763 case 0x10: 7281 num_10g = count_10g_ports(info, &lowest_10g);
6764 val = phy_encode(PORT_TYPE_10G, np->port); 7282 num_1g = count_1g_ports(info, &lowest_1g);
6765 break;
6766 7283
6767 case 0x14: 7284 switch ((num_10g << 4) | num_1g) {
6768 if (lowest_1g == 10) 7285 case 0x24:
6769 parent->plat_type = PLAT_TYPE_VF_P0; 7286 if (lowest_1g == 10)
6770 else if (lowest_1g == 26) 7287 parent->plat_type = PLAT_TYPE_VF_P0;
6771 parent->plat_type = PLAT_TYPE_VF_P1; 7288 else if (lowest_1g == 26)
6772 else 7289 parent->plat_type = PLAT_TYPE_VF_P1;
6773 goto unknown_vg_1g_port; 7290 else
7291 goto unknown_vg_1g_port;
6774 7292
6775 /* fallthru */ 7293 /* fallthru */
6776 case 0x13: 7294 case 0x22:
6777 if ((lowest_10g & 0x7) == 0)
6778 val = (phy_encode(PORT_TYPE_10G, 0) | 7295 val = (phy_encode(PORT_TYPE_10G, 0) |
6779 phy_encode(PORT_TYPE_1G, 1) |
6780 phy_encode(PORT_TYPE_1G, 2) |
6781 phy_encode(PORT_TYPE_1G, 3));
6782 else
6783 val = (phy_encode(PORT_TYPE_1G, 0) |
6784 phy_encode(PORT_TYPE_10G, 1) | 7296 phy_encode(PORT_TYPE_10G, 1) |
6785 phy_encode(PORT_TYPE_1G, 2) | 7297 phy_encode(PORT_TYPE_1G, 2) |
6786 phy_encode(PORT_TYPE_1G, 3)); 7298 phy_encode(PORT_TYPE_1G, 3));
6787 break; 7299 break;
6788 7300
6789 case 0x04: 7301 case 0x20:
6790 if (lowest_1g == 10) 7302 val = (phy_encode(PORT_TYPE_10G, 0) |
6791 parent->plat_type = PLAT_TYPE_VF_P0; 7303 phy_encode(PORT_TYPE_10G, 1));
6792 else if (lowest_1g == 26) 7304 break;
6793 parent->plat_type = PLAT_TYPE_VF_P1;
6794 else
6795 goto unknown_vg_1g_port;
6796 7305
6797 val = (phy_encode(PORT_TYPE_1G, 0) | 7306 case 0x10:
6798 phy_encode(PORT_TYPE_1G, 1) | 7307 val = phy_encode(PORT_TYPE_10G, np->port);
6799 phy_encode(PORT_TYPE_1G, 2) | 7308 break;
6800 phy_encode(PORT_TYPE_1G, 3));
6801 break;
6802 7309
6803 default: 7310 case 0x14:
6804 printk(KERN_ERR PFX "Unsupported port config " 7311 if (lowest_1g == 10)
6805 "10G[%d] 1G[%d]\n", 7312 parent->plat_type = PLAT_TYPE_VF_P0;
6806 num_10g, num_1g); 7313 else if (lowest_1g == 26)
6807 return -EINVAL; 7314 parent->plat_type = PLAT_TYPE_VF_P1;
7315 else
7316 goto unknown_vg_1g_port;
7317
7318 /* fallthru */
7319 case 0x13:
7320 if ((lowest_10g & 0x7) == 0)
7321 val = (phy_encode(PORT_TYPE_10G, 0) |
7322 phy_encode(PORT_TYPE_1G, 1) |
7323 phy_encode(PORT_TYPE_1G, 2) |
7324 phy_encode(PORT_TYPE_1G, 3));
7325 else
7326 val = (phy_encode(PORT_TYPE_1G, 0) |
7327 phy_encode(PORT_TYPE_10G, 1) |
7328 phy_encode(PORT_TYPE_1G, 2) |
7329 phy_encode(PORT_TYPE_1G, 3));
7330 break;
7331
7332 case 0x04:
7333 if (lowest_1g == 10)
7334 parent->plat_type = PLAT_TYPE_VF_P0;
7335 else if (lowest_1g == 26)
7336 parent->plat_type = PLAT_TYPE_VF_P1;
7337 else
7338 goto unknown_vg_1g_port;
7339
7340 val = (phy_encode(PORT_TYPE_1G, 0) |
7341 phy_encode(PORT_TYPE_1G, 1) |
7342 phy_encode(PORT_TYPE_1G, 2) |
7343 phy_encode(PORT_TYPE_1G, 3));
7344 break;
7345
7346 default:
7347 printk(KERN_ERR PFX "Unsupported port config "
7348 "10G[%d] 1G[%d]\n",
7349 num_10g, num_1g);
7350 return -EINVAL;
7351 }
6808 } 7352 }
6809 7353
6810 parent->port_phy = val; 7354 parent->port_phy = val;
@@ -7599,14 +8143,25 @@ static void __devinit niu_device_announce(struct niu *np)
7599 pr_info("%s: NIU Ethernet %s\n", 8143 pr_info("%s: NIU Ethernet %s\n",
7600 dev->name, print_mac(mac, dev->dev_addr)); 8144 dev->name, print_mac(mac, dev->dev_addr));
7601 8145
7602 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n", 8146 if (np->parent->plat_type == PLAT_TYPE_ATCA_CP3220) {
7603 dev->name, 8147 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
7604 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"), 8148 dev->name,
7605 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"), 8149 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
7606 (np->flags & NIU_FLAGS_FIBER ? "FIBER" : "COPPER"), 8150 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
7607 (np->mac_xcvr == MAC_XCVR_MII ? "MII" : 8151 (np->flags & NIU_FLAGS_FIBER ? "RGMII FIBER" : "SERDES"),
7608 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")), 8152 (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
7609 np->vpd.phy_type); 8153 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
8154 np->vpd.phy_type);
8155 } else {
8156 pr_info("%s: Port type[%s] mode[%s:%s] XCVR[%s] phy[%s]\n",
8157 dev->name,
8158 (np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
8159 (np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
8160 (np->flags & NIU_FLAGS_FIBER ? "FIBER" : "COPPER"),
8161 (np->mac_xcvr == MAC_XCVR_MII ? "MII" :
8162 (np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
8163 np->vpd.phy_type);
8164 }
7610} 8165}
7611 8166
7612static int __devinit niu_pci_init_one(struct pci_dev *pdev, 8167static int __devinit niu_pci_init_one(struct pci_dev *pdev,
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 59dc05fcd371..336aed08b275 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -3061,6 +3061,7 @@ struct niu_parent {
3061#define PLAT_TYPE_NIU 0x02 3061#define PLAT_TYPE_NIU 0x02
3062#define PLAT_TYPE_VF_P0 0x03 3062#define PLAT_TYPE_VF_P0 0x03
3063#define PLAT_TYPE_VF_P1 0x04 3063#define PLAT_TYPE_VF_P1 0x04
3064#define PLAT_TYPE_ATCA_CP3220 0x08
3064 3065
3065 u8 num_ports; 3066 u8 num_ports;
3066 3067
@@ -3198,10 +3199,11 @@ struct niu {
3198 struct niu_parent *parent; 3199 struct niu_parent *parent;
3199 3200
3200 u32 flags; 3201 u32 flags;
3202#define NIU_FLAGS_VPD_VALID 0x00800000 /* VPD has valid version */
3201#define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */ 3203#define NIU_FLAGS_MSIX 0x00400000 /* MSI-X in use */
3202#define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */ 3204#define NIU_FLAGS_MCAST 0x00200000 /* multicast filter enabled */
3203#define NIU_FLAGS_PROMISC 0x00100000 /* PROMISC enabled */ 3205#define NIU_FLAGS_PROMISC 0x00100000 /* PROMISC enabled */
3204#define NIU_FLAGS_VPD_VALID 0x00080000 /* VPD has valid version */ 3206#define NIU_FLAGS_XCVR_SERDES 0x00080000 /* 0=PHY 1=SERDES */
3205#define NIU_FLAGS_10G 0x00040000 /* 0=1G 1=10G */ 3207#define NIU_FLAGS_10G 0x00040000 /* 0=1G 1=10G */
3206#define NIU_FLAGS_FIBER 0x00020000 /* 0=COPPER 1=FIBER */ 3208#define NIU_FLAGS_FIBER 0x00020000 /* 0=COPPER 1=FIBER */
3207#define NIU_FLAGS_XMAC 0x00010000 /* 0=BMAC 1=XMAC */ 3209#define NIU_FLAGS_XMAC 0x00010000 /* 0=BMAC 1=XMAC */
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 2e39e0285d8f..bcd7f9814ed8 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -1012,7 +1012,7 @@ static int pasemi_mac_phy_init(struct net_device *dev)
1012 goto err; 1012 goto err;
1013 1013
1014 phy_id = *prop; 1014 phy_id = *prop;
1015 snprintf(mac->phy_id, BUS_ID_SIZE, PHY_ID_FMT, (int)r.start, phy_id); 1015 snprintf(mac->phy_id, BUS_ID_SIZE, "%x:%02x", (int)r.start, phy_id);
1016 1016
1017 of_node_put(phy_dn); 1017 of_node_put(phy_dn);
1018 1018
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 5b80358af658..60c5cfe96918 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -99,6 +99,41 @@ static int bcm54xx_config_intr(struct phy_device *phydev)
99 return err; 99 return err;
100} 100}
101 101
102static int bcm5481_config_aneg(struct phy_device *phydev)
103{
104 int ret;
105
106 /* Aneg firsly. */
107 ret = genphy_config_aneg(phydev);
108
109 /* Then we can set up the delay. */
110 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) {
111 u16 reg;
112
113 /*
114 * There is no BCM5481 specification available, so down
115 * here is everything we know about "register 0x18". This
116 * at least helps BCM5481 to successfuly receive packets
117 * on MPC8360E-RDK board. Peter Barada <peterb@logicpd.com>
118 * says: "This sets delay between the RXD and RXC signals
119 * instead of using trace lengths to achieve timing".
120 */
121
122 /* Set RDX clk delay. */
123 reg = 0x7 | (0x7 << 12);
124 phy_write(phydev, 0x18, reg);
125
126 reg = phy_read(phydev, 0x18);
127 /* Set RDX-RXC skew. */
128 reg |= (1 << 8);
129 /* Write bits 14:0. */
130 reg |= (1 << 15);
131 phy_write(phydev, 0x18, reg);
132 }
133
134 return ret;
135}
136
102static struct phy_driver bcm5411_driver = { 137static struct phy_driver bcm5411_driver = {
103 .phy_id = 0x00206070, 138 .phy_id = 0x00206070,
104 .phy_id_mask = 0xfffffff0, 139 .phy_id_mask = 0xfffffff0,
@@ -141,8 +176,36 @@ static struct phy_driver bcm5461_driver = {
141 .driver = { .owner = THIS_MODULE }, 176 .driver = { .owner = THIS_MODULE },
142}; 177};
143 178
179static struct phy_driver bcm5464_driver = {
180 .phy_id = 0x002060b0,
181 .phy_id_mask = 0xfffffff0,
182 .name = "Broadcom BCM5464",
183 .features = PHY_GBIT_FEATURES,
184 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
185 .config_init = bcm54xx_config_init,
186 .config_aneg = genphy_config_aneg,
187 .read_status = genphy_read_status,
188 .ack_interrupt = bcm54xx_ack_interrupt,
189 .config_intr = bcm54xx_config_intr,
190 .driver = { .owner = THIS_MODULE },
191};
192
193static struct phy_driver bcm5481_driver = {
194 .phy_id = 0x0143bca0,
195 .phy_id_mask = 0xfffffff0,
196 .name = "Broadcom BCM5481",
197 .features = PHY_GBIT_FEATURES,
198 .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
199 .config_init = bcm54xx_config_init,
200 .config_aneg = bcm5481_config_aneg,
201 .read_status = genphy_read_status,
202 .ack_interrupt = bcm54xx_ack_interrupt,
203 .config_intr = bcm54xx_config_intr,
204 .driver = { .owner = THIS_MODULE },
205};
206
144static struct phy_driver bcm5482_driver = { 207static struct phy_driver bcm5482_driver = {
145 .phy_id = 0x0143bcb0, 208 .phy_id = 0x0143bcb0,
146 .phy_id_mask = 0xfffffff0, 209 .phy_id_mask = 0xfffffff0,
147 .name = "Broadcom BCM5482", 210 .name = "Broadcom BCM5482",
148 .features = PHY_GBIT_FEATURES, 211 .features = PHY_GBIT_FEATURES,
@@ -168,12 +231,22 @@ static int __init broadcom_init(void)
168 ret = phy_driver_register(&bcm5461_driver); 231 ret = phy_driver_register(&bcm5461_driver);
169 if (ret) 232 if (ret)
170 goto out_5461; 233 goto out_5461;
234 ret = phy_driver_register(&bcm5464_driver);
235 if (ret)
236 goto out_5464;
237 ret = phy_driver_register(&bcm5481_driver);
238 if (ret)
239 goto out_5481;
171 ret = phy_driver_register(&bcm5482_driver); 240 ret = phy_driver_register(&bcm5482_driver);
172 if (ret) 241 if (ret)
173 goto out_5482; 242 goto out_5482;
174 return ret; 243 return ret;
175 244
176out_5482: 245out_5482:
246 phy_driver_unregister(&bcm5481_driver);
247out_5481:
248 phy_driver_unregister(&bcm5464_driver);
249out_5464:
177 phy_driver_unregister(&bcm5461_driver); 250 phy_driver_unregister(&bcm5461_driver);
178out_5461: 251out_5461:
179 phy_driver_unregister(&bcm5421_driver); 252 phy_driver_unregister(&bcm5421_driver);
@@ -186,6 +259,8 @@ out_5411:
186static void __exit broadcom_exit(void) 259static void __exit broadcom_exit(void)
187{ 260{
188 phy_driver_unregister(&bcm5482_driver); 261 phy_driver_unregister(&bcm5482_driver);
262 phy_driver_unregister(&bcm5481_driver);
263 phy_driver_unregister(&bcm5464_driver);
189 phy_driver_unregister(&bcm5461_driver); 264 phy_driver_unregister(&bcm5461_driver);
190 phy_driver_unregister(&bcm5421_driver); 265 phy_driver_unregister(&bcm5421_driver);
191 phy_driver_unregister(&bcm5411_driver); 266 phy_driver_unregister(&bcm5411_driver);
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index ca9b040f9ad9..4e07956a483b 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -213,7 +213,7 @@ static int __init fixed_mdio_bus_init(void)
213 goto err_pdev; 213 goto err_pdev;
214 } 214 }
215 215
216 fmb->mii_bus.id = 0; 216 snprintf(fmb->mii_bus.id, MII_BUS_ID_SIZE, "0");
217 fmb->mii_bus.name = "Fixed MDIO Bus"; 217 fmb->mii_bus.name = "Fixed MDIO Bus";
218 fmb->mii_bus.dev = &pdev->dev; 218 fmb->mii_bus.dev = &pdev->dev;
219 fmb->mii_bus.read = &fixed_mdio_read; 219 fmb->mii_bus.read = &fixed_mdio_read;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index f4c4fd85425f..8b1121b02f98 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -86,35 +86,55 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
86EXPORT_SYMBOL(phy_device_create); 86EXPORT_SYMBOL(phy_device_create);
87 87
88/** 88/**
89 * get_phy_device - reads the specified PHY device and returns its @phy_device struct 89 * get_phy_id - reads the specified addr for its ID.
90 * @bus: the target MII bus 90 * @bus: the target MII bus
91 * @addr: PHY address on the MII bus 91 * @addr: PHY address on the MII bus
92 * @phy_id: where to store the ID retrieved.
92 * 93 *
93 * Description: Reads the ID registers of the PHY at @addr on the 94 * Description: Reads the ID registers of the PHY at @addr on the
94 * @bus, then allocates and returns the phy_device to represent it. 95 * @bus, stores it in @phy_id and returns zero on success.
95 */ 96 */
96struct phy_device * get_phy_device(struct mii_bus *bus, int addr) 97int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id)
97{ 98{
98 int phy_reg; 99 int phy_reg;
99 u32 phy_id;
100 struct phy_device *dev = NULL;
101 100
102 /* Grab the bits from PHYIR1, and put them 101 /* Grab the bits from PHYIR1, and put them
103 * in the upper half */ 102 * in the upper half */
104 phy_reg = bus->read(bus, addr, MII_PHYSID1); 103 phy_reg = bus->read(bus, addr, MII_PHYSID1);
105 104
106 if (phy_reg < 0) 105 if (phy_reg < 0)
107 return ERR_PTR(phy_reg); 106 return -EIO;
108 107
109 phy_id = (phy_reg & 0xffff) << 16; 108 *phy_id = (phy_reg & 0xffff) << 16;
110 109
111 /* Grab the bits from PHYIR2, and put them in the lower half */ 110 /* Grab the bits from PHYIR2, and put them in the lower half */
112 phy_reg = bus->read(bus, addr, MII_PHYSID2); 111 phy_reg = bus->read(bus, addr, MII_PHYSID2);
113 112
114 if (phy_reg < 0) 113 if (phy_reg < 0)
115 return ERR_PTR(phy_reg); 114 return -EIO;
115
116 *phy_id |= (phy_reg & 0xffff);
117
118 return 0;
119}
120
121/**
122 * get_phy_device - reads the specified PHY device and returns its @phy_device struct
123 * @bus: the target MII bus
124 * @addr: PHY address on the MII bus
125 *
126 * Description: Reads the ID registers of the PHY at @addr on the
127 * @bus, then allocates and returns the phy_device to represent it.
128 */
129struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
130{
131 struct phy_device *dev = NULL;
132 u32 phy_id;
133 int r;
116 134
117 phy_id |= (phy_reg & 0xffff); 135 r = get_phy_id(bus, addr, &phy_id);
136 if (r)
137 return ERR_PTR(r);
118 138
119 /* If the phy_id is all Fs, there is no device there */ 139 /* If the phy_id is all Fs, there is no device there */
120 if (0xffffffff == phy_id) 140 if (0xffffffff == phy_id)
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index ac0ac98b19cd..4fad4ddb3504 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -301,7 +301,7 @@ static int pppoe_device_event(struct notifier_block *this,
301{ 301{
302 struct net_device *dev = (struct net_device *) ptr; 302 struct net_device *dev = (struct net_device *) ptr;
303 303
304 if (dev->nd_net != &init_net) 304 if (dev_net(dev) != &init_net)
305 return NOTIFY_DONE; 305 return NOTIFY_DONE;
306 306
307 /* Only look at sockets that are using this specific device. */ 307 /* Only look at sockets that are using this specific device. */
@@ -392,7 +392,7 @@ static int pppoe_rcv(struct sk_buff *skb,
392 if (!(skb = skb_share_check(skb, GFP_ATOMIC))) 392 if (!(skb = skb_share_check(skb, GFP_ATOMIC)))
393 goto out; 393 goto out;
394 394
395 if (dev->nd_net != &init_net) 395 if (dev_net(dev) != &init_net)
396 goto drop; 396 goto drop;
397 397
398 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) 398 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
@@ -424,7 +424,7 @@ static int pppoe_disc_rcv(struct sk_buff *skb,
424 struct pppoe_hdr *ph; 424 struct pppoe_hdr *ph;
425 struct pppox_sock *po; 425 struct pppox_sock *po;
426 426
427 if (dev->nd_net != &init_net) 427 if (dev_net(dev) != &init_net)
428 goto abort; 428 goto abort;
429 429
430 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) 430 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index c16de5129a71..0d32123085e9 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -87,7 +87,7 @@ static inline int wpa2_capable(void)
87 87
88static inline int precise_ie(void) 88static inline int precise_ie(void)
89{ 89{
90 return 0; /* FIXME */ 90 return (0 <= ps3_compare_firmware_version(2, 2, 0));
91} 91}
92/* 92/*
93 * post_eurus_cmd helpers 93 * post_eurus_cmd helpers
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index a6aeb9d60443..b7f7b2227d56 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2472,8 +2472,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
2472 2472
2473 if (seg_cnt == 1) { 2473 if (seg_cnt == 1) {
2474 /* Terminate the last segment. */ 2474 /* Terminate the last segment. */
2475 oal_entry->len = 2475 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2476 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2477 } else { 2476 } else {
2478 oal = tx_cb->oal; 2477 oal = tx_cb->oal;
2479 for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) { 2478 for (completed_segs=0; completed_segs<frag_cnt; completed_segs++,seg++) {
@@ -2530,8 +2529,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
2530 frag->size); 2529 frag->size);
2531 } 2530 }
2532 /* Terminate the last segment. */ 2531 /* Terminate the last segment. */
2533 oal_entry->len = 2532 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
2534 cpu_to_le32(le32_to_cpu(oal_entry->len) | OAL_LAST_ENTRY);
2535 } 2533 }
2536 2534
2537 return NETDEV_TX_OK; 2535 return NETDEV_TX_OK;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index c082cf0b1ac6..dcbe01b0ca0d 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -50,6 +50,8 @@
50 * Possible values '1' for enable , '0' for disable. 50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode 51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode. 52 * and enable in non-promiscuous mode.
53 * multiq: This parameter used to enable/disable MULTIQUEUE support.
54 * Possible values '1' for enable and '0' for disable. Default is '0'
53 ************************************************************************/ 55 ************************************************************************/
54 56
55#include <linux/module.h> 57#include <linux/module.h>
@@ -386,6 +388,26 @@ static void s2io_vlan_rx_register(struct net_device *dev,
386/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */ 388/* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
387static int vlan_strip_flag; 389static int vlan_strip_flag;
388 390
391/* Unregister the vlan */
392static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
393{
394 int i;
395 struct s2io_nic *nic = dev->priv;
396 unsigned long flags[MAX_TX_FIFOS];
397 struct mac_info *mac_control = &nic->mac_control;
398 struct config_param *config = &nic->config;
399
400 for (i = 0; i < config->tx_fifo_num; i++)
401 spin_lock_irqsave(&mac_control->fifos[i].tx_lock, flags[i]);
402
403 if (nic->vlgrp)
404 vlan_group_set_device(nic->vlgrp, vid, NULL);
405
406 for (i = config->tx_fifo_num - 1; i >= 0; i--)
407 spin_unlock_irqrestore(&mac_control->fifos[i].tx_lock,
408 flags[i]);
409}
410
389/* 411/*
390 * Constants to be programmed into the Xena's registers, to configure 412 * Constants to be programmed into the Xena's registers, to configure
391 * the XAUI. 413 * the XAUI.
@@ -456,10 +478,9 @@ MODULE_VERSION(DRV_VERSION);
456 478
457 479
458/* Module Loadable parameters. */ 480/* Module Loadable parameters. */
459S2IO_PARM_INT(tx_fifo_num, 1); 481S2IO_PARM_INT(tx_fifo_num, FIFO_DEFAULT_NUM);
460S2IO_PARM_INT(rx_ring_num, 1); 482S2IO_PARM_INT(rx_ring_num, 1);
461 483S2IO_PARM_INT(multiq, 0);
462
463S2IO_PARM_INT(rx_ring_mode, 1); 484S2IO_PARM_INT(rx_ring_mode, 1);
464S2IO_PARM_INT(use_continuous_tx_intrs, 1); 485S2IO_PARM_INT(use_continuous_tx_intrs, 1);
465S2IO_PARM_INT(rmac_pause_time, 0x100); 486S2IO_PARM_INT(rmac_pause_time, 0x100);
@@ -469,6 +490,8 @@ S2IO_PARM_INT(shared_splits, 0);
469S2IO_PARM_INT(tmac_util_period, 5); 490S2IO_PARM_INT(tmac_util_period, 5);
470S2IO_PARM_INT(rmac_util_period, 5); 491S2IO_PARM_INT(rmac_util_period, 5);
471S2IO_PARM_INT(l3l4hdr_size, 128); 492S2IO_PARM_INT(l3l4hdr_size, 128);
493/* 0 is no steering, 1 is Priority steering, 2 is Default steering */
494S2IO_PARM_INT(tx_steering_type, TX_DEFAULT_STEERING);
472/* Frequency of Rx desc syncs expressed as power of 2 */ 495/* Frequency of Rx desc syncs expressed as power of 2 */
473S2IO_PARM_INT(rxsync_frequency, 3); 496S2IO_PARM_INT(rxsync_frequency, 3);
474/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */ 497/* Interrupt type. Values can be 0(INTA), 2(MSI_X) */
@@ -533,6 +556,101 @@ static struct pci_driver s2io_driver = {
533/* A simplifier macro used both by init and free shared_mem Fns(). */ 556/* A simplifier macro used both by init and free shared_mem Fns(). */
534#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each) 557#define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
535 558
559/* netqueue manipulation helper functions */
560static inline void s2io_stop_all_tx_queue(struct s2io_nic *sp)
561{
562 int i;
563#ifdef CONFIG_NETDEVICES_MULTIQUEUE
564 if (sp->config.multiq) {
565 for (i = 0; i < sp->config.tx_fifo_num; i++)
566 netif_stop_subqueue(sp->dev, i);
567 } else
568#endif
569 {
570 for (i = 0; i < sp->config.tx_fifo_num; i++)
571 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_STOP;
572 netif_stop_queue(sp->dev);
573 }
574}
575
576static inline void s2io_stop_tx_queue(struct s2io_nic *sp, int fifo_no)
577{
578#ifdef CONFIG_NETDEVICES_MULTIQUEUE
579 if (sp->config.multiq)
580 netif_stop_subqueue(sp->dev, fifo_no);
581 else
582#endif
583 {
584 sp->mac_control.fifos[fifo_no].queue_state =
585 FIFO_QUEUE_STOP;
586 netif_stop_queue(sp->dev);
587 }
588}
589
590static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
591{
592 int i;
593#ifdef CONFIG_NETDEVICES_MULTIQUEUE
594 if (sp->config.multiq) {
595 for (i = 0; i < sp->config.tx_fifo_num; i++)
596 netif_start_subqueue(sp->dev, i);
597 } else
598#endif
599 {
600 for (i = 0; i < sp->config.tx_fifo_num; i++)
601 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
602 netif_start_queue(sp->dev);
603 }
604}
605
606static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
607{
608#ifdef CONFIG_NETDEVICES_MULTIQUEUE
609 if (sp->config.multiq)
610 netif_start_subqueue(sp->dev, fifo_no);
611 else
612#endif
613 {
614 sp->mac_control.fifos[fifo_no].queue_state =
615 FIFO_QUEUE_START;
616 netif_start_queue(sp->dev);
617 }
618}
619
620static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
621{
622 int i;
623#ifdef CONFIG_NETDEVICES_MULTIQUEUE
624 if (sp->config.multiq) {
625 for (i = 0; i < sp->config.tx_fifo_num; i++)
626 netif_wake_subqueue(sp->dev, i);
627 } else
628#endif
629 {
630 for (i = 0; i < sp->config.tx_fifo_num; i++)
631 sp->mac_control.fifos[i].queue_state = FIFO_QUEUE_START;
632 netif_wake_queue(sp->dev);
633 }
634}
635
636static inline void s2io_wake_tx_queue(
637 struct fifo_info *fifo, int cnt, u8 multiq)
638{
639
640#ifdef CONFIG_NETDEVICES_MULTIQUEUE
641 if (multiq) {
642 if (cnt && __netif_subqueue_stopped(fifo->dev, fifo->fifo_no))
643 netif_wake_subqueue(fifo->dev, fifo->fifo_no);
644 } else
645#endif
646 if (cnt && (fifo->queue_state == FIFO_QUEUE_STOP)) {
647 if (netif_queue_stopped(fifo->dev)) {
648 fifo->queue_state = FIFO_QUEUE_START;
649 netif_wake_queue(fifo->dev);
650 }
651 }
652}
653
536/** 654/**
537 * init_shared_mem - Allocation and Initialization of Memory 655 * init_shared_mem - Allocation and Initialization of Memory
538 * @nic: Device private variable. 656 * @nic: Device private variable.
@@ -614,6 +732,7 @@ static int init_shared_mem(struct s2io_nic *nic)
614 mac_control->fifos[i].fifo_no = i; 732 mac_control->fifos[i].fifo_no = i;
615 mac_control->fifos[i].nic = nic; 733 mac_control->fifos[i].nic = nic;
616 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2; 734 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
735 mac_control->fifos[i].dev = dev;
617 736
618 for (j = 0; j < page_num; j++) { 737 for (j = 0; j < page_num; j++) {
619 int k = 0; 738 int k = 0;
@@ -2948,7 +3067,7 @@ static void rx_intr_handler(struct ring_info *ring_data)
2948 struct lro *lro = &nic->lro0_n[i]; 3067 struct lro *lro = &nic->lro0_n[i];
2949 if (lro->in_use) { 3068 if (lro->in_use) {
2950 update_L3L4_header(nic, lro); 3069 update_L3L4_header(nic, lro);
2951 queue_rx_frame(lro->parent); 3070 queue_rx_frame(lro->parent, lro->vlan_tag);
2952 clear_lro_session(lro); 3071 clear_lro_session(lro);
2953 } 3072 }
2954 } 3073 }
@@ -2972,10 +3091,10 @@ static void rx_intr_handler(struct ring_info *ring_data)
2972static void tx_intr_handler(struct fifo_info *fifo_data) 3091static void tx_intr_handler(struct fifo_info *fifo_data)
2973{ 3092{
2974 struct s2io_nic *nic = fifo_data->nic; 3093 struct s2io_nic *nic = fifo_data->nic;
2975 struct net_device *dev = (struct net_device *) nic->dev;
2976 struct tx_curr_get_info get_info, put_info; 3094 struct tx_curr_get_info get_info, put_info;
2977 struct sk_buff *skb; 3095 struct sk_buff *skb = NULL;
2978 struct TxD *txdlp; 3096 struct TxD *txdlp;
3097 int pkt_cnt = 0;
2979 unsigned long flags = 0; 3098 unsigned long flags = 0;
2980 u8 err_mask; 3099 u8 err_mask;
2981 3100
@@ -3036,6 +3155,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data)
3036 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n"); 3155 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
3037 return; 3156 return;
3038 } 3157 }
3158 pkt_cnt++;
3039 3159
3040 /* Updating the statistics block */ 3160 /* Updating the statistics block */
3041 nic->stats.tx_bytes += skb->len; 3161 nic->stats.tx_bytes += skb->len;
@@ -3051,8 +3171,7 @@ static void tx_intr_handler(struct fifo_info *fifo_data)
3051 get_info.offset; 3171 get_info.offset;
3052 } 3172 }
3053 3173
3054 if (netif_queue_stopped(dev)) 3174 s2io_wake_tx_queue(fifo_data, pkt_cnt, nic->config.multiq);
3055 netif_wake_queue(dev);
3056 3175
3057 spin_unlock_irqrestore(&fifo_data->tx_lock, flags); 3176 spin_unlock_irqrestore(&fifo_data->tx_lock, flags);
3058} 3177}
@@ -3933,8 +4052,7 @@ static int s2io_open(struct net_device *dev)
3933 err = -ENODEV; 4052 err = -ENODEV;
3934 goto hw_init_failed; 4053 goto hw_init_failed;
3935 } 4054 }
3936 4055 s2io_start_all_tx_queue(sp);
3937 netif_start_queue(dev);
3938 return 0; 4056 return 0;
3939 4057
3940hw_init_failed: 4058hw_init_failed:
@@ -3979,8 +4097,7 @@ static int s2io_close(struct net_device *dev)
3979 if (!is_s2io_card_up(sp)) 4097 if (!is_s2io_card_up(sp))
3980 return 0; 4098 return 0;
3981 4099
3982 netif_stop_queue(dev); 4100 s2io_stop_all_tx_queue(sp);
3983
3984 /* delete all populated mac entries */ 4101 /* delete all populated mac entries */
3985 for (offset = 1; offset < config->max_mc_addr; offset++) { 4102 for (offset = 1; offset < config->max_mc_addr; offset++) {
3986 tmp64 = do_s2io_read_unicast_mc(sp, offset); 4103 tmp64 = do_s2io_read_unicast_mc(sp, offset);
@@ -4016,11 +4133,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4016 struct TxFIFO_element __iomem *tx_fifo; 4133 struct TxFIFO_element __iomem *tx_fifo;
4017 unsigned long flags = 0; 4134 unsigned long flags = 0;
4018 u16 vlan_tag = 0; 4135 u16 vlan_tag = 0;
4019 int vlan_priority = 0;
4020 struct fifo_info *fifo = NULL; 4136 struct fifo_info *fifo = NULL;
4021 struct mac_info *mac_control; 4137 struct mac_info *mac_control;
4022 struct config_param *config; 4138 struct config_param *config;
4139 int do_spin_lock = 1;
4023 int offload_type; 4140 int offload_type;
4141 int enable_per_list_interrupt = 0;
4024 struct swStat *stats = &sp->mac_control.stats_info->sw_stat; 4142 struct swStat *stats = &sp->mac_control.stats_info->sw_stat;
4025 4143
4026 mac_control = &sp->mac_control; 4144 mac_control = &sp->mac_control;
@@ -4042,15 +4160,67 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4042 } 4160 }
4043 4161
4044 queue = 0; 4162 queue = 0;
4045 /* Get Fifo number to Transmit based on vlan priority */ 4163 if (sp->vlgrp && vlan_tx_tag_present(skb))
4046 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
4047 vlan_tag = vlan_tx_tag_get(skb); 4164 vlan_tag = vlan_tx_tag_get(skb);
4048 vlan_priority = vlan_tag >> 13; 4165 if (sp->config.tx_steering_type == TX_DEFAULT_STEERING) {
4049 queue = config->fifo_mapping[vlan_priority]; 4166 if (skb->protocol == htons(ETH_P_IP)) {
4167 struct iphdr *ip;
4168 struct tcphdr *th;
4169 ip = ip_hdr(skb);
4170
4171 if ((ip->frag_off & htons(IP_OFFSET|IP_MF)) == 0) {
4172 th = (struct tcphdr *)(((unsigned char *)ip) +
4173 ip->ihl*4);
4174
4175 if (ip->protocol == IPPROTO_TCP) {
4176 queue_len = sp->total_tcp_fifos;
4177 queue = (ntohs(th->source) +
4178 ntohs(th->dest)) &
4179 sp->fifo_selector[queue_len - 1];
4180 if (queue >= queue_len)
4181 queue = queue_len - 1;
4182 } else if (ip->protocol == IPPROTO_UDP) {
4183 queue_len = sp->total_udp_fifos;
4184 queue = (ntohs(th->source) +
4185 ntohs(th->dest)) &
4186 sp->fifo_selector[queue_len - 1];
4187 if (queue >= queue_len)
4188 queue = queue_len - 1;
4189 queue += sp->udp_fifo_idx;
4190 if (skb->len > 1024)
4191 enable_per_list_interrupt = 1;
4192 do_spin_lock = 0;
4193 }
4194 }
4195 }
4196 } else if (sp->config.tx_steering_type == TX_PRIORITY_STEERING)
4197 /* get fifo number based on skb->priority value */
4198 queue = config->fifo_mapping
4199 [skb->priority & (MAX_TX_FIFOS - 1)];
4200 fifo = &mac_control->fifos[queue];
4201
4202 if (do_spin_lock)
4203 spin_lock_irqsave(&fifo->tx_lock, flags);
4204 else {
4205 if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
4206 return NETDEV_TX_LOCKED;
4207 }
4208
4209#ifdef CONFIG_NETDEVICES_MULTIQUEUE
4210 if (sp->config.multiq) {
4211 if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
4212 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4213 return NETDEV_TX_BUSY;
4214 }
4215 } else
4216#endif
4217 if (unlikely(fifo->queue_state == FIFO_QUEUE_STOP)) {
4218 if (netif_queue_stopped(dev)) {
4219 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4220 return NETDEV_TX_BUSY;
4221 }
4050 } 4222 }
4051 4223
4052 fifo = &mac_control->fifos[queue];
4053 spin_lock_irqsave(&fifo->tx_lock, flags);
4054 put_off = (u16) fifo->tx_curr_put_info.offset; 4224 put_off = (u16) fifo->tx_curr_put_info.offset;
4055 get_off = (u16) fifo->tx_curr_get_info.offset; 4225 get_off = (u16) fifo->tx_curr_get_info.offset;
4056 txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr; 4226 txdp = (struct TxD *) fifo->list_info[put_off].list_virt_addr;
@@ -4060,7 +4230,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4060 if (txdp->Host_Control || 4230 if (txdp->Host_Control ||
4061 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) { 4231 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
4062 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n"); 4232 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
4063 netif_stop_queue(dev); 4233 s2io_stop_tx_queue(sp, fifo->fifo_no);
4064 dev_kfree_skb(skb); 4234 dev_kfree_skb(skb);
4065 spin_unlock_irqrestore(&fifo->tx_lock, flags); 4235 spin_unlock_irqrestore(&fifo->tx_lock, flags);
4066 return 0; 4236 return 0;
@@ -4079,8 +4249,10 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4079 txdp->Control_1 |= TXD_GATHER_CODE_FIRST; 4249 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
4080 txdp->Control_1 |= TXD_LIST_OWN_XENA; 4250 txdp->Control_1 |= TXD_LIST_OWN_XENA;
4081 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no); 4251 txdp->Control_2 |= TXD_INT_NUMBER(fifo->fifo_no);
4082 4252 if (enable_per_list_interrupt)
4083 if (sp->vlgrp && vlan_tx_tag_present(skb)) { 4253 if (put_off & (queue_len >> 5))
4254 txdp->Control_2 |= TXD_INT_TYPE_PER_LIST;
4255 if (vlan_tag) {
4084 txdp->Control_2 |= TXD_VLAN_ENABLE; 4256 txdp->Control_2 |= TXD_VLAN_ENABLE;
4085 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag); 4257 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4086 } 4258 }
@@ -4095,11 +4267,12 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4095 txdp->Control_1 |= TXD_UFO_MSS(ufo_size); 4267 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
4096 txdp->Control_1 |= TXD_BUFFER0_SIZE(8); 4268 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
4097#ifdef __BIG_ENDIAN 4269#ifdef __BIG_ENDIAN
4270 /* both variants do cpu_to_be64(be32_to_cpu(...)) */
4098 fifo->ufo_in_band_v[put_off] = 4271 fifo->ufo_in_band_v[put_off] =
4099 (u64)skb_shinfo(skb)->ip6_frag_id; 4272 (__force u64)skb_shinfo(skb)->ip6_frag_id;
4100#else 4273#else
4101 fifo->ufo_in_band_v[put_off] = 4274 fifo->ufo_in_band_v[put_off] =
4102 (u64)skb_shinfo(skb)->ip6_frag_id << 32; 4275 (__force u64)skb_shinfo(skb)->ip6_frag_id << 32;
4103#endif 4276#endif
4104 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v; 4277 txdp->Host_Control = (unsigned long)fifo->ufo_in_band_v;
4105 txdp->Buffer_Pointer = pci_map_single(sp->pdev, 4278 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
@@ -4166,7 +4339,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4166 DBG_PRINT(TX_DBG, 4339 DBG_PRINT(TX_DBG,
4167 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n", 4340 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
4168 put_off, get_off); 4341 put_off, get_off);
4169 netif_stop_queue(dev); 4342 s2io_stop_tx_queue(sp, fifo->fifo_no);
4170 } 4343 }
4171 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize; 4344 mac_control->stats_info->sw_stat.mem_allocated += skb->truesize;
4172 dev->trans_start = jiffies; 4345 dev->trans_start = jiffies;
@@ -4178,7 +4351,7 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4178 return 0; 4351 return 0;
4179pci_map_failed: 4352pci_map_failed:
4180 stats->pci_map_fail_cnt++; 4353 stats->pci_map_fail_cnt++;
4181 netif_stop_queue(dev); 4354 s2io_stop_tx_queue(sp, fifo->fifo_no);
4182 stats->mem_freed += skb->truesize; 4355 stats->mem_freed += skb->truesize;
4183 dev_kfree_skb(skb); 4356 dev_kfree_skb(skb);
4184 spin_unlock_irqrestore(&fifo->tx_lock, flags); 4357 spin_unlock_irqrestore(&fifo->tx_lock, flags);
@@ -4590,7 +4763,7 @@ static void s2io_handle_errors(void * dev_id)
4590 return; 4763 return;
4591 4764
4592reset: 4765reset:
4593 netif_stop_queue(dev); 4766 s2io_stop_all_tx_queue(sp);
4594 schedule_work(&sp->rst_timer_task); 4767 schedule_work(&sp->rst_timer_task);
4595 sw_stat->soft_reset_cnt++; 4768 sw_stat->soft_reset_cnt++;
4596 return; 4769 return;
@@ -6577,16 +6750,15 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
6577 6750
6578 dev->mtu = new_mtu; 6751 dev->mtu = new_mtu;
6579 if (netif_running(dev)) { 6752 if (netif_running(dev)) {
6753 s2io_stop_all_tx_queue(sp);
6580 s2io_card_down(sp); 6754 s2io_card_down(sp);
6581 netif_stop_queue(dev);
6582 ret = s2io_card_up(sp); 6755 ret = s2io_card_up(sp);
6583 if (ret) { 6756 if (ret) {
6584 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", 6757 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6585 __FUNCTION__); 6758 __FUNCTION__);
6586 return ret; 6759 return ret;
6587 } 6760 }
6588 if (netif_queue_stopped(dev)) 6761 s2io_wake_all_tx_queue(sp);
6589 netif_wake_queue(dev);
6590 } else { /* Device is down */ 6762 } else { /* Device is down */
6591 struct XENA_dev_config __iomem *bar0 = sp->bar0; 6763 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6592 u64 val64 = new_mtu; 6764 u64 val64 = new_mtu;
@@ -6694,7 +6866,7 @@ static void s2io_set_link(struct work_struct *work)
6694 } else { 6866 } else {
6695 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name); 6867 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
6696 DBG_PRINT(ERR_DBG, "device is not Quiescent\n"); 6868 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
6697 netif_stop_queue(dev); 6869 s2io_stop_all_tx_queue(nic);
6698 } 6870 }
6699 } 6871 }
6700 val64 = readq(&bar0->adapter_control); 6872 val64 = readq(&bar0->adapter_control);
@@ -6921,11 +7093,11 @@ static int s2io_add_isr(struct s2io_nic * sp)
6921 if(!(sp->msix_info[i].addr && 7093 if(!(sp->msix_info[i].addr &&
6922 sp->msix_info[i].data)) { 7094 sp->msix_info[i].data)) {
6923 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " 7095 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
6924 "Data:0x%lx\n",sp->desc[i], 7096 "Data:0x%llx\n",sp->desc[i],
6925 (unsigned long long) 7097 (unsigned long long)
6926 sp->msix_info[i].addr, 7098 sp->msix_info[i].addr,
6927 (unsigned long) 7099 (unsigned long long)
6928 ntohl(sp->msix_info[i].data)); 7100 sp->msix_info[i].data);
6929 } else { 7101 } else {
6930 msix_tx_cnt++; 7102 msix_tx_cnt++;
6931 } 7103 }
@@ -6939,11 +7111,11 @@ static int s2io_add_isr(struct s2io_nic * sp)
6939 if(!(sp->msix_info[i].addr && 7111 if(!(sp->msix_info[i].addr &&
6940 sp->msix_info[i].data)) { 7112 sp->msix_info[i].data)) {
6941 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " 7113 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx "
6942 "Data:0x%lx\n",sp->desc[i], 7114 "Data:0x%llx\n",sp->desc[i],
6943 (unsigned long long) 7115 (unsigned long long)
6944 sp->msix_info[i].addr, 7116 sp->msix_info[i].addr,
6945 (unsigned long) 7117 (unsigned long long)
6946 ntohl(sp->msix_info[i].data)); 7118 sp->msix_info[i].data);
6947 } else { 7119 } else {
6948 msix_rx_cnt++; 7120 msix_rx_cnt++;
6949 } 7121 }
@@ -7184,7 +7356,7 @@ static void s2io_restart_nic(struct work_struct *work)
7184 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", 7356 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
7185 dev->name); 7357 dev->name);
7186 } 7358 }
7187 netif_wake_queue(dev); 7359 s2io_wake_all_tx_queue(sp);
7188 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n", 7360 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
7189 dev->name); 7361 dev->name);
7190out_unlock: 7362out_unlock:
@@ -7374,7 +7546,8 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7374 { 7546 {
7375 lro_append_pkt(sp, lro, 7547 lro_append_pkt(sp, lro,
7376 skb, tcp_len); 7548 skb, tcp_len);
7377 queue_rx_frame(lro->parent); 7549 queue_rx_frame(lro->parent,
7550 lro->vlan_tag);
7378 clear_lro_session(lro); 7551 clear_lro_session(lro);
7379 sp->mac_control.stats_info-> 7552 sp->mac_control.stats_info->
7380 sw_stat.flush_max_pkts++; 7553 sw_stat.flush_max_pkts++;
@@ -7385,7 +7558,8 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7385 lro->frags_len; 7558 lro->frags_len;
7386 sp->mac_control.stats_info-> 7559 sp->mac_control.stats_info->
7387 sw_stat.sending_both++; 7560 sw_stat.sending_both++;
7388 queue_rx_frame(lro->parent); 7561 queue_rx_frame(lro->parent,
7562 lro->vlan_tag);
7389 clear_lro_session(lro); 7563 clear_lro_session(lro);
7390 goto send_up; 7564 goto send_up;
7391 case 0: /* sessions exceeded */ 7565 case 0: /* sessions exceeded */
@@ -7411,31 +7585,12 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7411 */ 7585 */
7412 skb->ip_summed = CHECKSUM_NONE; 7586 skb->ip_summed = CHECKSUM_NONE;
7413 } 7587 }
7414 } else { 7588 } else
7415 skb->ip_summed = CHECKSUM_NONE; 7589 skb->ip_summed = CHECKSUM_NONE;
7416 } 7590
7417 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; 7591 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
7418 if (!sp->lro) {
7419 skb->protocol = eth_type_trans(skb, dev);
7420 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
7421 vlan_strip_flag)) {
7422 /* Queueing the vlan frame to the upper layer */
7423 if (napi)
7424 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
7425 RXD_GET_VLAN_TAG(rxdp->Control_2));
7426 else
7427 vlan_hwaccel_rx(skb, sp->vlgrp,
7428 RXD_GET_VLAN_TAG(rxdp->Control_2));
7429 } else {
7430 if (napi)
7431 netif_receive_skb(skb);
7432 else
7433 netif_rx(skb);
7434 }
7435 } else {
7436send_up: 7592send_up:
7437 queue_rx_frame(skb); 7593 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
7438 }
7439 dev->last_rx = jiffies; 7594 dev->last_rx = jiffies;
7440aggregate: 7595aggregate:
7441 atomic_dec(&sp->rx_bufs_left[ring_no]); 7596 atomic_dec(&sp->rx_bufs_left[ring_no]);
@@ -7463,6 +7618,7 @@ static void s2io_link(struct s2io_nic * sp, int link)
7463 init_tti(sp, link); 7618 init_tti(sp, link);
7464 if (link == LINK_DOWN) { 7619 if (link == LINK_DOWN) {
7465 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name); 7620 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
7621 s2io_stop_all_tx_queue(sp);
7466 netif_carrier_off(dev); 7622 netif_carrier_off(dev);
7467 if(sp->mac_control.stats_info->sw_stat.link_up_cnt) 7623 if(sp->mac_control.stats_info->sw_stat.link_up_cnt)
7468 sp->mac_control.stats_info->sw_stat.link_up_time = 7624 sp->mac_control.stats_info->sw_stat.link_up_time =
@@ -7475,6 +7631,7 @@ static void s2io_link(struct s2io_nic * sp, int link)
7475 jiffies - sp->start_time; 7631 jiffies - sp->start_time;
7476 sp->mac_control.stats_info->sw_stat.link_up_cnt++; 7632 sp->mac_control.stats_info->sw_stat.link_up_cnt++;
7477 netif_carrier_on(dev); 7633 netif_carrier_on(dev);
7634 s2io_wake_all_tx_queue(sp);
7478 } 7635 }
7479 } 7636 }
7480 sp->last_link_state = link; 7637 sp->last_link_state = link;
@@ -7511,20 +7668,48 @@ static void s2io_init_pci(struct s2io_nic * sp)
7511 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd); 7668 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
7512} 7669}
7513 7670
7514static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type) 7671static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7672 u8 *dev_multiq)
7515{ 7673{
7516 if ((tx_fifo_num > MAX_TX_FIFOS) || 7674 if ((tx_fifo_num > MAX_TX_FIFOS) ||
7517 (tx_fifo_num < FIFO_DEFAULT_NUM)) { 7675 (tx_fifo_num < 1)) {
7518 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos " 7676 DBG_PRINT(ERR_DBG, "s2io: Requested number of tx fifos "
7519 "(%d) not supported\n", tx_fifo_num); 7677 "(%d) not supported\n", tx_fifo_num);
7520 tx_fifo_num = 7678
7521 ((tx_fifo_num > MAX_TX_FIFOS)? MAX_TX_FIFOS : 7679 if (tx_fifo_num < 1)
7522 ((tx_fifo_num < FIFO_DEFAULT_NUM) ? FIFO_DEFAULT_NUM : 7680 tx_fifo_num = 1;
7523 tx_fifo_num)); 7681 else
7682 tx_fifo_num = MAX_TX_FIFOS;
7683
7524 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num); 7684 DBG_PRINT(ERR_DBG, "s2io: Default to %d ", tx_fifo_num);
7525 DBG_PRINT(ERR_DBG, "tx fifos\n"); 7685 DBG_PRINT(ERR_DBG, "tx fifos\n");
7526 } 7686 }
7527 7687
7688#ifndef CONFIG_NETDEVICES_MULTIQUEUE
7689 if (multiq) {
7690 DBG_PRINT(ERR_DBG, "s2io: Multiqueue support not enabled\n");
7691 multiq = 0;
7692 }
7693#endif
7694 if (multiq)
7695 *dev_multiq = multiq;
7696
7697 if (tx_steering_type && (1 == tx_fifo_num)) {
7698 if (tx_steering_type != TX_DEFAULT_STEERING)
7699 DBG_PRINT(ERR_DBG,
7700 "s2io: Tx steering is not supported with "
7701 "one fifo. Disabling Tx steering.\n");
7702 tx_steering_type = NO_STEERING;
7703 }
7704
7705 if ((tx_steering_type < NO_STEERING) ||
7706 (tx_steering_type > TX_DEFAULT_STEERING)) {
7707 DBG_PRINT(ERR_DBG, "s2io: Requested transmit steering not "
7708 "supported\n");
7709 DBG_PRINT(ERR_DBG, "s2io: Disabling transmit steering\n");
7710 tx_steering_type = NO_STEERING;
7711 }
7712
7528 if ( rx_ring_num > 8) { 7713 if ( rx_ring_num > 8) {
7529 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not " 7714 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
7530 "supported\n"); 7715 "supported\n");
@@ -7616,9 +7801,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7616 struct config_param *config; 7801 struct config_param *config;
7617 int mode; 7802 int mode;
7618 u8 dev_intr_type = intr_type; 7803 u8 dev_intr_type = intr_type;
7804 u8 dev_multiq = 0;
7619 DECLARE_MAC_BUF(mac); 7805 DECLARE_MAC_BUF(mac);
7620 7806
7621 if ((ret = s2io_verify_parm(pdev, &dev_intr_type))) 7807 ret = s2io_verify_parm(pdev, &dev_intr_type, &dev_multiq);
7808 if (ret)
7622 return ret; 7809 return ret;
7623 7810
7624 if ((ret = pci_enable_device(pdev))) { 7811 if ((ret = pci_enable_device(pdev))) {
@@ -7649,7 +7836,11 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7649 pci_disable_device(pdev); 7836 pci_disable_device(pdev);
7650 return -ENODEV; 7837 return -ENODEV;
7651 } 7838 }
7652 7839#ifdef CONFIG_NETDEVICES_MULTIQUEUE
7840 if (dev_multiq)
7841 dev = alloc_etherdev_mq(sizeof(struct s2io_nic), tx_fifo_num);
7842 else
7843#endif
7653 dev = alloc_etherdev(sizeof(struct s2io_nic)); 7844 dev = alloc_etherdev(sizeof(struct s2io_nic));
7654 if (dev == NULL) { 7845 if (dev == NULL) {
7655 DBG_PRINT(ERR_DBG, "Device allocation failed\n"); 7846 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
@@ -7698,17 +7889,45 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7698 config = &sp->config; 7889 config = &sp->config;
7699 7890
7700 config->napi = napi; 7891 config->napi = napi;
7892 config->tx_steering_type = tx_steering_type;
7701 7893
7702 /* Tx side parameters. */ 7894 /* Tx side parameters. */
7703 config->tx_fifo_num = tx_fifo_num; 7895 if (config->tx_steering_type == TX_PRIORITY_STEERING)
7704 for (i = 0; i < MAX_TX_FIFOS; i++) { 7896 config->tx_fifo_num = MAX_TX_FIFOS;
7897 else
7898 config->tx_fifo_num = tx_fifo_num;
7899
7900 /* Initialize the fifos used for tx steering */
7901 if (config->tx_fifo_num < 5) {
7902 if (config->tx_fifo_num == 1)
7903 sp->total_tcp_fifos = 1;
7904 else
7905 sp->total_tcp_fifos = config->tx_fifo_num - 1;
7906 sp->udp_fifo_idx = config->tx_fifo_num - 1;
7907 sp->total_udp_fifos = 1;
7908 sp->other_fifo_idx = sp->total_tcp_fifos - 1;
7909 } else {
7910 sp->total_tcp_fifos = (tx_fifo_num - FIFO_UDP_MAX_NUM -
7911 FIFO_OTHER_MAX_NUM);
7912 sp->udp_fifo_idx = sp->total_tcp_fifos;
7913 sp->total_udp_fifos = FIFO_UDP_MAX_NUM;
7914 sp->other_fifo_idx = sp->udp_fifo_idx + FIFO_UDP_MAX_NUM;
7915 }
7916
7917 config->multiq = dev_multiq;
7918 for (i = 0; i < config->tx_fifo_num; i++) {
7705 config->tx_cfg[i].fifo_len = tx_fifo_len[i]; 7919 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
7706 config->tx_cfg[i].fifo_priority = i; 7920 config->tx_cfg[i].fifo_priority = i;
7707 } 7921 }
7708 7922
7709 /* mapping the QoS priority to the configured fifos */ 7923 /* mapping the QoS priority to the configured fifos */
7710 for (i = 0; i < MAX_TX_FIFOS; i++) 7924 for (i = 0; i < MAX_TX_FIFOS; i++)
7711 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i]; 7925 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num - 1][i];
7926
7927 /* map the hashing selector table to the configured fifos */
7928 for (i = 0; i < config->tx_fifo_num; i++)
7929 sp->fifo_selector[i] = fifo_selector[i];
7930
7712 7931
7713 config->tx_intr_type = TXD_INT_TYPE_UTILZ; 7932 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
7714 for (i = 0; i < config->tx_fifo_num; i++) { 7933 for (i = 0; i < config->tx_fifo_num; i++) {
@@ -7793,6 +8012,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7793 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops); 8012 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7794 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 8013 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7795 dev->vlan_rx_register = s2io_vlan_rx_register; 8014 dev->vlan_rx_register = s2io_vlan_rx_register;
8015 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7796 8016
7797 /* 8017 /*
7798 * will use eth_mac_addr() for dev->set_mac_address 8018 * will use eth_mac_addr() for dev->set_mac_address
@@ -7813,7 +8033,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7813 dev->features |= NETIF_F_UFO; 8033 dev->features |= NETIF_F_UFO;
7814 dev->features |= NETIF_F_HW_CSUM; 8034 dev->features |= NETIF_F_HW_CSUM;
7815 } 8035 }
7816 8036#ifdef CONFIG_NETDEVICES_MULTIQUEUE
8037 if (config->multiq)
8038 dev->features |= NETIF_F_MULTI_QUEUE;
8039#endif
7817 dev->tx_timeout = &s2io_tx_watchdog; 8040 dev->tx_timeout = &s2io_tx_watchdog;
7818 dev->watchdog_timeo = WATCH_DOG_TIMEOUT; 8041 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7819 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic); 8042 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
@@ -7962,6 +8185,10 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7962 8185
7963 if (napi) 8186 if (napi)
7964 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); 8187 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
8188
8189 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
8190 sp->config.tx_fifo_num);
8191
7965 switch(sp->config.intr_type) { 8192 switch(sp->config.intr_type) {
7966 case INTA: 8193 case INTA:
7967 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); 8194 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
@@ -7970,6 +8197,29 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
7970 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name); 8197 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7971 break; 8198 break;
7972 } 8199 }
8200 if (sp->config.multiq) {
8201 for (i = 0; i < sp->config.tx_fifo_num; i++)
8202 mac_control->fifos[i].multiq = config->multiq;
8203 DBG_PRINT(ERR_DBG, "%s: Multiqueue support enabled\n",
8204 dev->name);
8205 } else
8206 DBG_PRINT(ERR_DBG, "%s: Multiqueue support disabled\n",
8207 dev->name);
8208
8209 switch (sp->config.tx_steering_type) {
8210 case NO_STEERING:
8211 DBG_PRINT(ERR_DBG, "%s: No steering enabled for"
8212 " transmit\n", dev->name);
8213 break;
8214 case TX_PRIORITY_STEERING:
8215 DBG_PRINT(ERR_DBG, "%s: Priority steering enabled for"
8216 " transmit\n", dev->name);
8217 break;
8218 case TX_DEFAULT_STEERING:
8219 DBG_PRINT(ERR_DBG, "%s: Default steering enabled for"
8220 " transmit\n", dev->name);
8221 }
8222
7973 if (sp->lro) 8223 if (sp->lro)
7974 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n", 8224 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7975 dev->name); 8225 dev->name);
@@ -8064,7 +8314,8 @@ module_init(s2io_starter);
8064module_exit(s2io_closer); 8314module_exit(s2io_closer);
8065 8315
8066static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip, 8316static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8067 struct tcphdr **tcp, struct RxD_t *rxdp) 8317 struct tcphdr **tcp, struct RxD_t *rxdp,
8318 struct s2io_nic *sp)
8068{ 8319{
8069 int ip_off; 8320 int ip_off;
8070 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len; 8321 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
@@ -8075,19 +8326,20 @@ static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
8075 return -1; 8326 return -1;
8076 } 8327 }
8077 8328
8078 /* TODO: 8329 /* Checking for DIX type or DIX type with VLAN */
8079 * By default the VLAN field in the MAC is stripped by the card, if this 8330 if ((l2_type == 0)
8080 * feature is turned off in rx_pa_cfg register, then the ip_off field 8331 || (l2_type == 4)) {
8081 * has to be shifted by a further 2 bytes 8332 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
8082 */ 8333 /*
8083 switch (l2_type) { 8334 * If vlan stripping is disabled and the frame is VLAN tagged,
8084 case 0: /* DIX type */ 8335 * shift the offset by the VLAN header size bytes.
8085 case 4: /* DIX type with VLAN */ 8336 */
8086 ip_off = HEADER_ETHERNET_II_802_3_SIZE; 8337 if ((!vlan_strip_flag) &&
8087 break; 8338 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG))
8339 ip_off += HEADER_VLAN_SIZE;
8340 } else {
8088 /* LLC, SNAP etc are considered non-mergeable */ 8341 /* LLC, SNAP etc are considered non-mergeable */
8089 default: 8342 return -1;
8090 return -1;
8091 } 8343 }
8092 8344
8093 *ip = (struct iphdr *)((u8 *)buffer + ip_off); 8345 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
@@ -8114,7 +8366,7 @@ static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
8114} 8366}
8115 8367
8116static void initiate_new_session(struct lro *lro, u8 *l2h, 8368static void initiate_new_session(struct lro *lro, u8 *l2h,
8117 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len) 8369 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag)
8118{ 8370{
8119 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8371 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
8120 lro->l2h = l2h; 8372 lro->l2h = l2h;
@@ -8125,6 +8377,7 @@ static void initiate_new_session(struct lro *lro, u8 *l2h,
8125 lro->sg_num = 1; 8377 lro->sg_num = 1;
8126 lro->total_len = ntohs(ip->tot_len); 8378 lro->total_len = ntohs(ip->tot_len);
8127 lro->frags_len = 0; 8379 lro->frags_len = 0;
8380 lro->vlan_tag = vlan_tag;
8128 /* 8381 /*
8129 * check if we saw TCP timestamp. Other consistency checks have 8382 * check if we saw TCP timestamp. Other consistency checks have
8130 * already been done. 8383 * already been done.
@@ -8256,15 +8509,16 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8256 struct iphdr *ip; 8509 struct iphdr *ip;
8257 struct tcphdr *tcph; 8510 struct tcphdr *tcph;
8258 int ret = 0, i; 8511 int ret = 0, i;
8512 u16 vlan_tag = 0;
8259 8513
8260 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp, 8514 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
8261 rxdp))) { 8515 rxdp, sp))) {
8262 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n", 8516 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
8263 ip->saddr, ip->daddr); 8517 ip->saddr, ip->daddr);
8264 } else { 8518 } else
8265 return ret; 8519 return ret;
8266 }
8267 8520
8521 vlan_tag = RXD_GET_VLAN_TAG(rxdp->Control_2);
8268 tcph = (struct tcphdr *)*tcp; 8522 tcph = (struct tcphdr *)*tcp;
8269 *tcp_len = get_l4_pyld_length(ip, tcph); 8523 *tcp_len = get_l4_pyld_length(ip, tcph);
8270 for (i=0; i<MAX_LRO_SESSIONS; i++) { 8524 for (i=0; i<MAX_LRO_SESSIONS; i++) {
@@ -8324,7 +8578,8 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
8324 8578
8325 switch (ret) { 8579 switch (ret) {
8326 case 3: 8580 case 3:
8327 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len); 8581 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len,
8582 vlan_tag);
8328 break; 8583 break;
8329 case 2: 8584 case 2:
8330 update_L3L4_header(sp, *lro); 8585 update_L3L4_header(sp, *lro);
@@ -8352,15 +8607,25 @@ static void clear_lro_session(struct lro *lro)
8352 memset(lro, 0, lro_struct_size); 8607 memset(lro, 0, lro_struct_size);
8353} 8608}
8354 8609
8355static void queue_rx_frame(struct sk_buff *skb) 8610static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag)
8356{ 8611{
8357 struct net_device *dev = skb->dev; 8612 struct net_device *dev = skb->dev;
8613 struct s2io_nic *sp = dev->priv;
8358 8614
8359 skb->protocol = eth_type_trans(skb, dev); 8615 skb->protocol = eth_type_trans(skb, dev);
8360 if (napi) 8616 if (sp->vlgrp && vlan_tag
8361 netif_receive_skb(skb); 8617 && (vlan_strip_flag)) {
8362 else 8618 /* Queueing the vlan frame to the upper layer */
8363 netif_rx(skb); 8619 if (sp->config.napi)
8620 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
8621 else
8622 vlan_hwaccel_rx(skb, sp->vlgrp, vlan_tag);
8623 } else {
8624 if (sp->config.napi)
8625 netif_receive_skb(skb);
8626 else
8627 netif_rx(skb);
8628 }
8364} 8629}
8365 8630
8366static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro, 8631static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 64b88eb48287..e68fdf7e4260 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -360,7 +360,10 @@ struct stat_block {
360#define MAX_TX_FIFOS 8 360#define MAX_TX_FIFOS 8
361#define MAX_RX_RINGS 8 361#define MAX_RX_RINGS 8
362 362
363#define FIFO_DEFAULT_NUM 1 363#define FIFO_DEFAULT_NUM 5
364#define FIFO_UDP_MAX_NUM 2 /* 0 - even, 1 -odd ports */
365#define FIFO_OTHER_MAX_NUM 1
366
364 367
365#define MAX_RX_DESC_1 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 127 ) 368#define MAX_RX_DESC_1 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 127 )
366#define MAX_RX_DESC_2 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 85 ) 369#define MAX_RX_DESC_2 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 85 )
@@ -379,6 +382,8 @@ static int fifo_map[][MAX_TX_FIFOS] = {
379 {0, 1, 2, 3, 4, 5, 6, 7}, 382 {0, 1, 2, 3, 4, 5, 6, 7},
380}; 383};
381 384
385static u16 fifo_selector[MAX_TX_FIFOS] = {0, 1, 3, 3, 7, 7, 7, 7};
386
382/* Maintains Per FIFO related information. */ 387/* Maintains Per FIFO related information. */
383struct tx_fifo_config { 388struct tx_fifo_config {
384#define MAX_AVAILABLE_TXDS 8192 389#define MAX_AVAILABLE_TXDS 8192
@@ -431,6 +436,12 @@ struct config_param {
431/* Tx Side */ 436/* Tx Side */
432 u32 tx_fifo_num; /*Number of Tx FIFOs */ 437 u32 tx_fifo_num; /*Number of Tx FIFOs */
433 438
439 /* 0-No steering, 1-Priority steering, 2-Default fifo map */
440#define NO_STEERING 0
441#define TX_PRIORITY_STEERING 0x1
442#define TX_DEFAULT_STEERING 0x2
443 u8 tx_steering_type;
444
434 u8 fifo_mapping[MAX_TX_FIFOS]; 445 u8 fifo_mapping[MAX_TX_FIFOS];
435 struct tx_fifo_config tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */ 446 struct tx_fifo_config tx_cfg[MAX_TX_FIFOS]; /*Per-Tx FIFO config */
436 u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */ 447 u32 max_txds; /*Max no. of Tx buffer descriptor per TxDL */
@@ -464,6 +475,7 @@ struct config_param {
464 int max_mc_addr; /* xena=64 herc=256 */ 475 int max_mc_addr; /* xena=64 herc=256 */
465 int max_mac_addr; /* xena=16 herc=64 */ 476 int max_mac_addr; /* xena=16 herc=64 */
466 int mc_start_offset; /* xena=16 herc=64 */ 477 int mc_start_offset; /* xena=16 herc=64 */
478 u8 multiq;
467}; 479};
468 480
469/* Structure representing MAC Addrs */ 481/* Structure representing MAC Addrs */
@@ -534,6 +546,7 @@ struct RxD_t {
534#define RXD_OWN_XENA s2BIT(7) 546#define RXD_OWN_XENA s2BIT(7)
535#define RXD_T_CODE (s2BIT(12)|s2BIT(13)|s2BIT(14)|s2BIT(15)) 547#define RXD_T_CODE (s2BIT(12)|s2BIT(13)|s2BIT(14)|s2BIT(15))
536#define RXD_FRAME_PROTO vBIT(0xFFFF,24,8) 548#define RXD_FRAME_PROTO vBIT(0xFFFF,24,8)
549#define RXD_FRAME_VLAN_TAG s2BIT(24)
537#define RXD_FRAME_PROTO_IPV4 s2BIT(27) 550#define RXD_FRAME_PROTO_IPV4 s2BIT(27)
538#define RXD_FRAME_PROTO_IPV6 s2BIT(28) 551#define RXD_FRAME_PROTO_IPV6 s2BIT(28)
539#define RXD_FRAME_IP_FRAG s2BIT(29) 552#define RXD_FRAME_IP_FRAG s2BIT(29)
@@ -720,6 +733,15 @@ struct fifo_info {
720 * the buffers 733 * the buffers
721 */ 734 */
722 struct tx_curr_get_info tx_curr_get_info; 735 struct tx_curr_get_info tx_curr_get_info;
736#define FIFO_QUEUE_START 0
737#define FIFO_QUEUE_STOP 1
738 int queue_state;
739
740 /* copy of sp->dev pointer */
741 struct net_device *dev;
742
743 /* copy of multiq status */
744 u8 multiq;
723 745
724 /* Per fifo lock */ 746 /* Per fifo lock */
725 spinlock_t tx_lock; 747 spinlock_t tx_lock;
@@ -808,10 +830,11 @@ struct lro {
808 int sg_num; 830 int sg_num;
809 int in_use; 831 int in_use;
810 __be16 window; 832 __be16 window;
833 u16 vlan_tag;
811 u32 cur_tsval; 834 u32 cur_tsval;
812 __be32 cur_tsecr; 835 __be32 cur_tsecr;
813 u8 saw_ts; 836 u8 saw_ts;
814}; 837} ____cacheline_aligned;
815 838
816/* These flags represent the devices temporary state */ 839/* These flags represent the devices temporary state */
817enum s2io_device_state_t 840enum s2io_device_state_t
@@ -885,6 +908,27 @@ struct s2io_nic {
885 */ 908 */
886 int rx_csum; 909 int rx_csum;
887 910
911 /* Below variables are used for fifo selection to transmit a packet */
912 u16 fifo_selector[MAX_TX_FIFOS];
913
914 /* Total fifos for tcp packets */
915 u8 total_tcp_fifos;
916
917 /*
918 * Beginning index of udp for udp packets
919 * Value will be equal to
920 * (tx_fifo_num - FIFO_UDP_MAX_NUM - FIFO_OTHER_MAX_NUM)
921 */
922 u8 udp_fifo_idx;
923
924 u8 total_udp_fifos;
925
926 /*
927 * Beginning index of fifo for all other packets
928 * Value will be equal to (tx_fifo_num - FIFO_OTHER_MAX_NUM)
929 */
930 u8 other_fifo_idx;
931
888 /* after blink, the adapter must be restored with original 932 /* after blink, the adapter must be restored with original
889 * values. 933 * values.
890 */ 934 */
@@ -1087,7 +1131,7 @@ static int
1087s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro, 1131s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
1088 struct RxD_t *rxdp, struct s2io_nic *sp); 1132 struct RxD_t *rxdp, struct s2io_nic *sp);
1089static void clear_lro_session(struct lro *lro); 1133static void clear_lro_session(struct lro *lro);
1090static void queue_rx_frame(struct sk_buff *skb); 1134static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag);
1091static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro); 1135static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro);
1092static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro, 1136static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
1093 struct sk_buff *skb, u32 tcp_len); 1137 struct sk_buff *skb, u32 tcp_len);
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index 487f9d2ac5b4..5986cec17f19 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -88,31 +88,31 @@ static int sb1000_close(struct net_device *dev);
88 88
89 89
90/* SB1000 hardware routines to be used during open/configuration phases */ 90/* SB1000 hardware routines to be used during open/configuration phases */
91static inline int card_wait_for_busy_clear(const int ioaddr[], 91static int card_wait_for_busy_clear(const int ioaddr[],
92 const char* name); 92 const char* name);
93static inline int card_wait_for_ready(const int ioaddr[], const char* name, 93static int card_wait_for_ready(const int ioaddr[], const char* name,
94 unsigned char in[]); 94 unsigned char in[]);
95static int card_send_command(const int ioaddr[], const char* name, 95static int card_send_command(const int ioaddr[], const char* name,
96 const unsigned char out[], unsigned char in[]); 96 const unsigned char out[], unsigned char in[]);
97 97
98/* SB1000 hardware routines to be used during frame rx interrupt */ 98/* SB1000 hardware routines to be used during frame rx interrupt */
99static inline int sb1000_wait_for_ready(const int ioaddr[], const char* name); 99static int sb1000_wait_for_ready(const int ioaddr[], const char* name);
100static inline int sb1000_wait_for_ready_clear(const int ioaddr[], 100static int sb1000_wait_for_ready_clear(const int ioaddr[],
101 const char* name); 101 const char* name);
102static inline void sb1000_send_command(const int ioaddr[], const char* name, 102static void sb1000_send_command(const int ioaddr[], const char* name,
103 const unsigned char out[]); 103 const unsigned char out[]);
104static inline void sb1000_read_status(const int ioaddr[], unsigned char in[]); 104static void sb1000_read_status(const int ioaddr[], unsigned char in[]);
105static inline void sb1000_issue_read_command(const int ioaddr[], 105static void sb1000_issue_read_command(const int ioaddr[],
106 const char* name); 106 const char* name);
107 107
108/* SB1000 commands for open/configuration */ 108/* SB1000 commands for open/configuration */
109static inline int sb1000_reset(const int ioaddr[], const char* name); 109static int sb1000_reset(const int ioaddr[], const char* name);
110static inline int sb1000_check_CRC(const int ioaddr[], const char* name); 110static int sb1000_check_CRC(const int ioaddr[], const char* name);
111static inline int sb1000_start_get_set_command(const int ioaddr[], 111static inline int sb1000_start_get_set_command(const int ioaddr[],
112 const char* name); 112 const char* name);
113static inline int sb1000_end_get_set_command(const int ioaddr[], 113static int sb1000_end_get_set_command(const int ioaddr[],
114 const char* name); 114 const char* name);
115static inline int sb1000_activate(const int ioaddr[], const char* name); 115static int sb1000_activate(const int ioaddr[], const char* name);
116static int sb1000_get_firmware_version(const int ioaddr[], 116static int sb1000_get_firmware_version(const int ioaddr[],
117 const char* name, unsigned char version[], int do_end); 117 const char* name, unsigned char version[], int do_end);
118static int sb1000_get_frequency(const int ioaddr[], const char* name, 118static int sb1000_get_frequency(const int ioaddr[], const char* name,
@@ -125,8 +125,8 @@ static int sb1000_set_PIDs(const int ioaddr[], const char* name,
125 const short PID[]); 125 const short PID[]);
126 126
127/* SB1000 commands for frame rx interrupt */ 127/* SB1000 commands for frame rx interrupt */
128static inline int sb1000_rx(struct net_device *dev); 128static int sb1000_rx(struct net_device *dev);
129static inline void sb1000_error_dpc(struct net_device *dev); 129static void sb1000_error_dpc(struct net_device *dev);
130 130
131static const struct pnp_device_id sb1000_pnp_ids[] = { 131static const struct pnp_device_id sb1000_pnp_ids[] = {
132 { "GIC1000", 0 }, 132 { "GIC1000", 0 },
@@ -250,7 +250,7 @@ static struct pnp_driver sb1000_driver = {
250static const int TimeOutJiffies = (875 * HZ) / 100; 250static const int TimeOutJiffies = (875 * HZ) / 100;
251 251
252/* Card Wait For Busy Clear (cannot be used during an interrupt) */ 252/* Card Wait For Busy Clear (cannot be used during an interrupt) */
253static inline int 253static int
254card_wait_for_busy_clear(const int ioaddr[], const char* name) 254card_wait_for_busy_clear(const int ioaddr[], const char* name)
255{ 255{
256 unsigned char a; 256 unsigned char a;
@@ -274,7 +274,7 @@ card_wait_for_busy_clear(const int ioaddr[], const char* name)
274} 274}
275 275
276/* Card Wait For Ready (cannot be used during an interrupt) */ 276/* Card Wait For Ready (cannot be used during an interrupt) */
277static inline int 277static int
278card_wait_for_ready(const int ioaddr[], const char* name, unsigned char in[]) 278card_wait_for_ready(const int ioaddr[], const char* name, unsigned char in[])
279{ 279{
280 unsigned char a; 280 unsigned char a;
@@ -354,7 +354,7 @@ card_send_command(const int ioaddr[], const char* name,
354static const int Sb1000TimeOutJiffies = 7 * HZ; 354static const int Sb1000TimeOutJiffies = 7 * HZ;
355 355
356/* Card Wait For Ready (to be used during frame rx) */ 356/* Card Wait For Ready (to be used during frame rx) */
357static inline int 357static int
358sb1000_wait_for_ready(const int ioaddr[], const char* name) 358sb1000_wait_for_ready(const int ioaddr[], const char* name)
359{ 359{
360 unsigned long timeout; 360 unsigned long timeout;
@@ -380,7 +380,7 @@ sb1000_wait_for_ready(const int ioaddr[], const char* name)
380} 380}
381 381
382/* Card Wait For Ready Clear (to be used during frame rx) */ 382/* Card Wait For Ready Clear (to be used during frame rx) */
383static inline int 383static int
384sb1000_wait_for_ready_clear(const int ioaddr[], const char* name) 384sb1000_wait_for_ready_clear(const int ioaddr[], const char* name)
385{ 385{
386 unsigned long timeout; 386 unsigned long timeout;
@@ -405,7 +405,7 @@ sb1000_wait_for_ready_clear(const int ioaddr[], const char* name)
405} 405}
406 406
407/* Card Send Command (to be used during frame rx) */ 407/* Card Send Command (to be used during frame rx) */
408static inline void 408static void
409sb1000_send_command(const int ioaddr[], const char* name, 409sb1000_send_command(const int ioaddr[], const char* name,
410 const unsigned char out[]) 410 const unsigned char out[])
411{ 411{
@@ -422,7 +422,7 @@ sb1000_send_command(const int ioaddr[], const char* name,
422} 422}
423 423
424/* Card Read Status (to be used during frame rx) */ 424/* Card Read Status (to be used during frame rx) */
425static inline void 425static void
426sb1000_read_status(const int ioaddr[], unsigned char in[]) 426sb1000_read_status(const int ioaddr[], unsigned char in[])
427{ 427{
428 in[1] = inb(ioaddr[0] + 1); 428 in[1] = inb(ioaddr[0] + 1);
@@ -434,10 +434,10 @@ sb1000_read_status(const int ioaddr[], unsigned char in[])
434} 434}
435 435
436/* Issue Read Command (to be used during frame rx) */ 436/* Issue Read Command (to be used during frame rx) */
437static inline void 437static void
438sb1000_issue_read_command(const int ioaddr[], const char* name) 438sb1000_issue_read_command(const int ioaddr[], const char* name)
439{ 439{
440 const unsigned char Command0[6] = {0x20, 0x00, 0x00, 0x01, 0x00, 0x00}; 440 static const unsigned char Command0[6] = {0x20, 0x00, 0x00, 0x01, 0x00, 0x00};
441 441
442 sb1000_wait_for_ready_clear(ioaddr, name); 442 sb1000_wait_for_ready_clear(ioaddr, name);
443 outb(0xa0, ioaddr[0] + 6); 443 outb(0xa0, ioaddr[0] + 6);
@@ -450,12 +450,13 @@ sb1000_issue_read_command(const int ioaddr[], const char* name)
450 * SB1000 commands for open/configuration 450 * SB1000 commands for open/configuration
451 */ 451 */
452/* reset SB1000 card */ 452/* reset SB1000 card */
453static inline int 453static int
454sb1000_reset(const int ioaddr[], const char* name) 454sb1000_reset(const int ioaddr[], const char* name)
455{ 455{
456 static const unsigned char Command0[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
457
456 unsigned char st[7]; 458 unsigned char st[7];
457 int port, status; 459 int port, status;
458 const unsigned char Command0[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
459 460
460 port = ioaddr[1] + 6; 461 port = ioaddr[1] + 6;
461 outb(0x4, port); 462 outb(0x4, port);
@@ -479,12 +480,13 @@ sb1000_reset(const int ioaddr[], const char* name)
479} 480}
480 481
481/* check SB1000 firmware CRC */ 482/* check SB1000 firmware CRC */
482static inline int 483static int
483sb1000_check_CRC(const int ioaddr[], const char* name) 484sb1000_check_CRC(const int ioaddr[], const char* name)
484{ 485{
486 static const unsigned char Command0[6] = {0x80, 0x1f, 0x00, 0x00, 0x00, 0x00};
487
485 unsigned char st[7]; 488 unsigned char st[7];
486 int crc, status; 489 int crc, status;
487 const unsigned char Command0[6] = {0x80, 0x1f, 0x00, 0x00, 0x00, 0x00};
488 490
489 /* check CRC */ 491 /* check CRC */
490 if ((status = card_send_command(ioaddr, name, Command0, st))) 492 if ((status = card_send_command(ioaddr, name, Command0, st)))
@@ -498,32 +500,35 @@ sb1000_check_CRC(const int ioaddr[], const char* name)
498static inline int 500static inline int
499sb1000_start_get_set_command(const int ioaddr[], const char* name) 501sb1000_start_get_set_command(const int ioaddr[], const char* name)
500{ 502{
503 static const unsigned char Command0[6] = {0x80, 0x1b, 0x00, 0x00, 0x00, 0x00};
504
501 unsigned char st[7]; 505 unsigned char st[7];
502 const unsigned char Command0[6] = {0x80, 0x1b, 0x00, 0x00, 0x00, 0x00};
503 506
504 return card_send_command(ioaddr, name, Command0, st); 507 return card_send_command(ioaddr, name, Command0, st);
505} 508}
506 509
507static inline int 510static int
508sb1000_end_get_set_command(const int ioaddr[], const char* name) 511sb1000_end_get_set_command(const int ioaddr[], const char* name)
509{ 512{
513 static const unsigned char Command0[6] = {0x80, 0x1b, 0x02, 0x00, 0x00, 0x00};
514 static const unsigned char Command1[6] = {0x20, 0x00, 0x00, 0x00, 0x00, 0x00};
515
510 unsigned char st[7]; 516 unsigned char st[7];
511 int status; 517 int status;
512 const unsigned char Command0[6] = {0x80, 0x1b, 0x02, 0x00, 0x00, 0x00};
513 const unsigned char Command1[6] = {0x20, 0x00, 0x00, 0x00, 0x00, 0x00};
514 518
515 if ((status = card_send_command(ioaddr, name, Command0, st))) 519 if ((status = card_send_command(ioaddr, name, Command0, st)))
516 return status; 520 return status;
517 return card_send_command(ioaddr, name, Command1, st); 521 return card_send_command(ioaddr, name, Command1, st);
518} 522}
519 523
520static inline int 524static int
521sb1000_activate(const int ioaddr[], const char* name) 525sb1000_activate(const int ioaddr[], const char* name)
522{ 526{
527 static const unsigned char Command0[6] = {0x80, 0x11, 0x00, 0x00, 0x00, 0x00};
528 static const unsigned char Command1[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
529
523 unsigned char st[7]; 530 unsigned char st[7];
524 int status; 531 int status;
525 const unsigned char Command0[6] = {0x80, 0x11, 0x00, 0x00, 0x00, 0x00};
526 const unsigned char Command1[6] = {0x80, 0x16, 0x00, 0x00, 0x00, 0x00};
527 532
528 ssleep(1); 533 ssleep(1);
529 if ((status = card_send_command(ioaddr, name, Command0, st))) 534 if ((status = card_send_command(ioaddr, name, Command0, st)))
@@ -544,9 +549,10 @@ static int
544sb1000_get_firmware_version(const int ioaddr[], const char* name, 549sb1000_get_firmware_version(const int ioaddr[], const char* name,
545 unsigned char version[], int do_end) 550 unsigned char version[], int do_end)
546{ 551{
552 static const unsigned char Command0[6] = {0x80, 0x23, 0x00, 0x00, 0x00, 0x00};
553
547 unsigned char st[7]; 554 unsigned char st[7];
548 int status; 555 int status;
549 const unsigned char Command0[6] = {0x80, 0x23, 0x00, 0x00, 0x00, 0x00};
550 556
551 if ((status = sb1000_start_get_set_command(ioaddr, name))) 557 if ((status = sb1000_start_get_set_command(ioaddr, name)))
552 return status; 558 return status;
@@ -566,9 +572,10 @@ sb1000_get_firmware_version(const int ioaddr[], const char* name,
566static int 572static int
567sb1000_get_frequency(const int ioaddr[], const char* name, int* frequency) 573sb1000_get_frequency(const int ioaddr[], const char* name, int* frequency)
568{ 574{
575 static const unsigned char Command0[6] = {0x80, 0x44, 0x00, 0x00, 0x00, 0x00};
576
569 unsigned char st[7]; 577 unsigned char st[7];
570 int status; 578 int status;
571 const unsigned char Command0[6] = {0x80, 0x44, 0x00, 0x00, 0x00, 0x00};
572 579
573 udelay(1000); 580 udelay(1000);
574 if ((status = sb1000_start_get_set_command(ioaddr, name))) 581 if ((status = sb1000_start_get_set_command(ioaddr, name)))
@@ -613,12 +620,13 @@ sb1000_set_frequency(const int ioaddr[], const char* name, int frequency)
613static int 620static int
614sb1000_get_PIDs(const int ioaddr[], const char* name, short PID[]) 621sb1000_get_PIDs(const int ioaddr[], const char* name, short PID[])
615{ 622{
623 static const unsigned char Command0[6] = {0x80, 0x40, 0x00, 0x00, 0x00, 0x00};
624 static const unsigned char Command1[6] = {0x80, 0x41, 0x00, 0x00, 0x00, 0x00};
625 static const unsigned char Command2[6] = {0x80, 0x42, 0x00, 0x00, 0x00, 0x00};
626 static const unsigned char Command3[6] = {0x80, 0x43, 0x00, 0x00, 0x00, 0x00};
627
616 unsigned char st[7]; 628 unsigned char st[7];
617 int status; 629 int status;
618 const unsigned char Command0[6] = {0x80, 0x40, 0x00, 0x00, 0x00, 0x00};
619 const unsigned char Command1[6] = {0x80, 0x41, 0x00, 0x00, 0x00, 0x00};
620 const unsigned char Command2[6] = {0x80, 0x42, 0x00, 0x00, 0x00, 0x00};
621 const unsigned char Command3[6] = {0x80, 0x43, 0x00, 0x00, 0x00, 0x00};
622 630
623 udelay(1000); 631 udelay(1000);
624 if ((status = sb1000_start_get_set_command(ioaddr, name))) 632 if ((status = sb1000_start_get_set_command(ioaddr, name)))
@@ -647,6 +655,8 @@ sb1000_get_PIDs(const int ioaddr[], const char* name, short PID[])
647static int 655static int
648sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[]) 656sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[])
649{ 657{
658 static const unsigned char Command4[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
659
650 unsigned char st[7]; 660 unsigned char st[7];
651 short p; 661 short p;
652 int status; 662 int status;
@@ -654,7 +664,6 @@ sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[])
654 unsigned char Command1[6] = {0x80, 0x32, 0x00, 0x00, 0x00, 0x00}; 664 unsigned char Command1[6] = {0x80, 0x32, 0x00, 0x00, 0x00, 0x00};
655 unsigned char Command2[6] = {0x80, 0x33, 0x00, 0x00, 0x00, 0x00}; 665 unsigned char Command2[6] = {0x80, 0x33, 0x00, 0x00, 0x00, 0x00};
656 unsigned char Command3[6] = {0x80, 0x34, 0x00, 0x00, 0x00, 0x00}; 666 unsigned char Command3[6] = {0x80, 0x34, 0x00, 0x00, 0x00, 0x00};
657 const unsigned char Command4[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
658 667
659 udelay(1000); 668 udelay(1000);
660 if ((status = sb1000_start_get_set_command(ioaddr, name))) 669 if ((status = sb1000_start_get_set_command(ioaddr, name)))
@@ -694,7 +703,7 @@ sb1000_set_PIDs(const int ioaddr[], const char* name, const short PID[])
694} 703}
695 704
696 705
697static inline void 706static void
698sb1000_print_status_buffer(const char* name, unsigned char st[], 707sb1000_print_status_buffer(const char* name, unsigned char st[],
699 unsigned char buffer[], int size) 708 unsigned char buffer[], int size)
700{ 709{
@@ -725,7 +734,7 @@ sb1000_print_status_buffer(const char* name, unsigned char st[],
725/* receive a single frame and assemble datagram 734/* receive a single frame and assemble datagram
726 * (this is the heart of the interrupt routine) 735 * (this is the heart of the interrupt routine)
727 */ 736 */
728static inline int 737static int
729sb1000_rx(struct net_device *dev) 738sb1000_rx(struct net_device *dev)
730{ 739{
731 740
@@ -888,14 +897,15 @@ dropped_frame:
888 return -1; 897 return -1;
889} 898}
890 899
891static inline void 900static void
892sb1000_error_dpc(struct net_device *dev) 901sb1000_error_dpc(struct net_device *dev)
893{ 902{
903 static const unsigned char Command0[6] = {0x80, 0x26, 0x00, 0x00, 0x00, 0x00};
904
894 char *name; 905 char *name;
895 unsigned char st[5]; 906 unsigned char st[5];
896 int ioaddr[2]; 907 int ioaddr[2];
897 struct sb1000_private *lp = netdev_priv(dev); 908 struct sb1000_private *lp = netdev_priv(dev);
898 const unsigned char Command0[6] = {0x80, 0x26, 0x00, 0x00, 0x00, 0x00};
899 const int ErrorDpcCounterInitialize = 200; 909 const int ErrorDpcCounterInitialize = 200;
900 910
901 ioaddr[0] = dev->base_addr; 911 ioaddr[0] = dev->base_addr;
@@ -1077,14 +1087,15 @@ sb1000_start_xmit(struct sk_buff *skb, struct net_device *dev)
1077/* SB1000 interrupt handler. */ 1087/* SB1000 interrupt handler. */
1078static irqreturn_t sb1000_interrupt(int irq, void *dev_id) 1088static irqreturn_t sb1000_interrupt(int irq, void *dev_id)
1079{ 1089{
1090 static const unsigned char Command0[6] = {0x80, 0x2c, 0x00, 0x00, 0x00, 0x00};
1091 static const unsigned char Command1[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
1092
1080 char *name; 1093 char *name;
1081 unsigned char st; 1094 unsigned char st;
1082 int ioaddr[2]; 1095 int ioaddr[2];
1083 struct net_device *dev = dev_id; 1096 struct net_device *dev = dev_id;
1084 struct sb1000_private *lp = netdev_priv(dev); 1097 struct sb1000_private *lp = netdev_priv(dev);
1085 1098
1086 const unsigned char Command0[6] = {0x80, 0x2c, 0x00, 0x00, 0x00, 0x00};
1087 const unsigned char Command1[6] = {0x80, 0x2e, 0x00, 0x00, 0x00, 0x00};
1088 const int MaxRxErrorCount = 6; 1099 const int MaxRxErrorCount = 6;
1089 1100
1090 ioaddr[0] = dev->base_addr; 1101 ioaddr[0] = dev->base_addr;
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 7b53d658e337..888b7dec9866 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2374,7 +2374,7 @@ static int sbmac_init(struct platform_device *pldev, long long base)
2374 dev->name, base, print_mac(mac, eaddr)); 2374 dev->name, base, print_mac(mac, eaddr));
2375 2375
2376 sc->mii_bus.name = sbmac_mdio_string; 2376 sc->mii_bus.name = sbmac_mdio_string;
2377 sc->mii_bus.id = idx; 2377 snprintf(sc->mii_bus.id, MII_BUS_ID_SIZE, "%x", idx);
2378 sc->mii_bus.priv = sc; 2378 sc->mii_bus.priv = sc;
2379 sc->mii_bus.read = sbmac_mii_read; 2379 sc->mii_bus.read = sbmac_mii_read;
2380 sc->mii_bus.write = sbmac_mii_write; 2380 sc->mii_bus.write = sbmac_mii_write;
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 15fcee55284e..f64a860029b7 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -311,7 +311,6 @@ struct sc92031_priv {
311 311
312 /* for dev->get_stats */ 312 /* for dev->get_stats */
313 long rx_value; 313 long rx_value;
314 struct net_device_stats stats;
315}; 314};
316 315
317/* I don't know which registers can be safely read; however, I can guess 316/* I don't know which registers can be safely read; however, I can guess
@@ -421,7 +420,7 @@ static void _sc92031_tx_clear(struct net_device *dev)
421 420
422 while (priv->tx_head - priv->tx_tail > 0) { 421 while (priv->tx_head - priv->tx_tail > 0) {
423 priv->tx_tail++; 422 priv->tx_tail++;
424 priv->stats.tx_dropped++; 423 dev->stats.tx_dropped++;
425 } 424 }
426 priv->tx_head = priv->tx_tail = 0; 425 priv->tx_head = priv->tx_tail = 0;
427} 426}
@@ -676,27 +675,27 @@ static void _sc92031_tx_tasklet(struct net_device *dev)
676 priv->tx_tail++; 675 priv->tx_tail++;
677 676
678 if (tx_status & TxStatOK) { 677 if (tx_status & TxStatOK) {
679 priv->stats.tx_bytes += tx_status & 0x1fff; 678 dev->stats.tx_bytes += tx_status & 0x1fff;
680 priv->stats.tx_packets++; 679 dev->stats.tx_packets++;
681 /* Note: TxCarrierLost is always asserted at 100mbps. */ 680 /* Note: TxCarrierLost is always asserted at 100mbps. */
682 priv->stats.collisions += (tx_status >> 22) & 0xf; 681 dev->stats.collisions += (tx_status >> 22) & 0xf;
683 } 682 }
684 683
685 if (tx_status & (TxOutOfWindow | TxAborted)) { 684 if (tx_status & (TxOutOfWindow | TxAborted)) {
686 priv->stats.tx_errors++; 685 dev->stats.tx_errors++;
687 686
688 if (tx_status & TxAborted) 687 if (tx_status & TxAborted)
689 priv->stats.tx_aborted_errors++; 688 dev->stats.tx_aborted_errors++;
690 689
691 if (tx_status & TxCarrierLost) 690 if (tx_status & TxCarrierLost)
692 priv->stats.tx_carrier_errors++; 691 dev->stats.tx_carrier_errors++;
693 692
694 if (tx_status & TxOutOfWindow) 693 if (tx_status & TxOutOfWindow)
695 priv->stats.tx_window_errors++; 694 dev->stats.tx_window_errors++;
696 } 695 }
697 696
698 if (tx_status & TxUnderrun) 697 if (tx_status & TxUnderrun)
699 priv->stats.tx_fifo_errors++; 698 dev->stats.tx_fifo_errors++;
700 } 699 }
701 700
702 if (priv->tx_tail != old_tx_tail) 701 if (priv->tx_tail != old_tx_tail)
@@ -704,27 +703,29 @@ static void _sc92031_tx_tasklet(struct net_device *dev)
704 netif_wake_queue(dev); 703 netif_wake_queue(dev);
705} 704}
706 705
707static void _sc92031_rx_tasklet_error(u32 rx_status, 706static void _sc92031_rx_tasklet_error(struct net_device *dev,
708 struct sc92031_priv *priv, unsigned rx_size) 707 u32 rx_status, unsigned rx_size)
709{ 708{
710 if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) { 709 if(rx_size > (MAX_ETH_FRAME_SIZE + 4) || rx_size < 16) {
711 priv->stats.rx_errors++; 710 dev->stats.rx_errors++;
712 priv->stats.rx_length_errors++; 711 dev->stats.rx_length_errors++;
713 } 712 }
714 713
715 if (!(rx_status & RxStatesOK)) { 714 if (!(rx_status & RxStatesOK)) {
716 priv->stats.rx_errors++; 715 dev->stats.rx_errors++;
717 716
718 if (rx_status & (RxHugeFrame | RxSmallFrame)) 717 if (rx_status & (RxHugeFrame | RxSmallFrame))
719 priv->stats.rx_length_errors++; 718 dev->stats.rx_length_errors++;
720 719
721 if (rx_status & RxBadAlign) 720 if (rx_status & RxBadAlign)
722 priv->stats.rx_frame_errors++; 721 dev->stats.rx_frame_errors++;
723 722
724 if (!(rx_status & RxCRCOK)) 723 if (!(rx_status & RxCRCOK))
725 priv->stats.rx_crc_errors++; 724 dev->stats.rx_crc_errors++;
726 } else 725 } else {
726 struct sc92031_priv *priv = netdev_priv(dev);
727 priv->rx_loss++; 727 priv->rx_loss++;
728 }
728} 729}
729 730
730static void _sc92031_rx_tasklet(struct net_device *dev) 731static void _sc92031_rx_tasklet(struct net_device *dev)
@@ -783,7 +784,7 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
783 || rx_size > (MAX_ETH_FRAME_SIZE + 4) 784 || rx_size > (MAX_ETH_FRAME_SIZE + 4)
784 || rx_size < 16 785 || rx_size < 16
785 || !(rx_status & RxStatesOK))) { 786 || !(rx_status & RxStatesOK))) {
786 _sc92031_rx_tasklet_error(rx_status, priv, rx_size); 787 _sc92031_rx_tasklet_error(dev, rx_status, rx_size);
787 break; 788 break;
788 } 789 }
789 790
@@ -795,7 +796,7 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
795 796
796 rx_len -= rx_size_align + 4; 797 rx_len -= rx_size_align + 4;
797 798
798 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN); 799 skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN);
799 if (unlikely(!skb)) { 800 if (unlikely(!skb)) {
800 if (printk_ratelimit()) 801 if (printk_ratelimit())
801 printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n", 802 printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
@@ -818,11 +819,11 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
818 dev->last_rx = jiffies; 819 dev->last_rx = jiffies;
819 netif_rx(skb); 820 netif_rx(skb);
820 821
821 priv->stats.rx_bytes += pkt_size; 822 dev->stats.rx_bytes += pkt_size;
822 priv->stats.rx_packets++; 823 dev->stats.rx_packets++;
823 824
824 if (rx_status & Rx_Multicast) 825 if (rx_status & Rx_Multicast)
825 priv->stats.multicast++; 826 dev->stats.multicast++;
826 827
827 next: 828 next:
828 rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN; 829 rx_ring_offset = (rx_ring_offset + rx_size_align) % RX_BUF_LEN;
@@ -835,13 +836,11 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
835 836
836static void _sc92031_link_tasklet(struct net_device *dev) 837static void _sc92031_link_tasklet(struct net_device *dev)
837{ 838{
838 struct sc92031_priv *priv = netdev_priv(dev);
839
840 if (_sc92031_check_media(dev)) 839 if (_sc92031_check_media(dev))
841 netif_wake_queue(dev); 840 netif_wake_queue(dev);
842 else { 841 else {
843 netif_stop_queue(dev); 842 netif_stop_queue(dev);
844 priv->stats.tx_carrier_errors++; 843 dev->stats.tx_carrier_errors++;
845 } 844 }
846} 845}
847 846
@@ -866,11 +865,11 @@ static void sc92031_tasklet(unsigned long data)
866 _sc92031_rx_tasklet(dev); 865 _sc92031_rx_tasklet(dev);
867 866
868 if (intr_status & RxOverflow) 867 if (intr_status & RxOverflow)
869 priv->stats.rx_errors++; 868 dev->stats.rx_errors++;
870 869
871 if (intr_status & TimeOut) { 870 if (intr_status & TimeOut) {
872 priv->stats.rx_errors++; 871 dev->stats.rx_errors++;
873 priv->stats.rx_length_errors++; 872 dev->stats.rx_length_errors++;
874 } 873 }
875 874
876 if (intr_status & (LinkFail | LinkOK)) 875 if (intr_status & (LinkFail | LinkOK))
@@ -936,38 +935,36 @@ static struct net_device_stats *sc92031_get_stats(struct net_device *dev)
936 935
937 if (temp == 0xffff) { 936 if (temp == 0xffff) {
938 priv->rx_value += temp; 937 priv->rx_value += temp;
939 priv->stats.rx_fifo_errors = priv->rx_value; 938 dev->stats.rx_fifo_errors = priv->rx_value;
940 } else { 939 } else
941 priv->stats.rx_fifo_errors = temp + priv->rx_value; 940 dev->stats.rx_fifo_errors = temp + priv->rx_value;
942 }
943 941
944 spin_unlock_bh(&priv->lock); 942 spin_unlock_bh(&priv->lock);
945 } 943 }
946 944
947 return &priv->stats; 945 return &dev->stats;
948} 946}
949 947
950static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev) 948static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
951{ 949{
952 int err = 0;
953 struct sc92031_priv *priv = netdev_priv(dev); 950 struct sc92031_priv *priv = netdev_priv(dev);
954 void __iomem *port_base = priv->port_base; 951 void __iomem *port_base = priv->port_base;
955
956 unsigned len; 952 unsigned len;
957 unsigned entry; 953 unsigned entry;
958 u32 tx_status; 954 u32 tx_status;
959 955
956 if (skb_padto(skb, ETH_ZLEN))
957 return NETDEV_TX_OK;
958
960 if (unlikely(skb->len > TX_BUF_SIZE)) { 959 if (unlikely(skb->len > TX_BUF_SIZE)) {
961 err = -EMSGSIZE; 960 dev->stats.tx_dropped++;
962 priv->stats.tx_dropped++;
963 goto out; 961 goto out;
964 } 962 }
965 963
966 spin_lock(&priv->lock); 964 spin_lock(&priv->lock);
967 965
968 if (unlikely(!netif_carrier_ok(dev))) { 966 if (unlikely(!netif_carrier_ok(dev))) {
969 err = -ENOLINK; 967 dev->stats.tx_dropped++;
970 priv->stats.tx_dropped++;
971 goto out_unlock; 968 goto out_unlock;
972 } 969 }
973 970
@@ -978,11 +975,6 @@ static int sc92031_start_xmit(struct sk_buff *skb, struct net_device *dev)
978 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); 975 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE);
979 976
980 len = skb->len; 977 len = skb->len;
981 if (unlikely(len < ETH_ZLEN)) {
982 memset(priv->tx_bufs + entry * TX_BUF_SIZE + len,
983 0, ETH_ZLEN - len);
984 len = ETH_ZLEN;
985 }
986 978
987 wmb(); 979 wmb();
988 980
@@ -1009,7 +1001,7 @@ out_unlock:
1009out: 1001out:
1010 dev_kfree_skb(skb); 1002 dev_kfree_skb(skb);
1011 1003
1012 return err; 1004 return NETDEV_TX_OK;
1013} 1005}
1014 1006
1015static int sc92031_open(struct net_device *dev) 1007static int sc92031_open(struct net_device *dev)
diff --git a/drivers/net/sk98lin/Makefile b/drivers/net/sk98lin/Makefile
deleted file mode 100644
index afd900d5d730..000000000000
--- a/drivers/net/sk98lin/Makefile
+++ /dev/null
@@ -1,87 +0,0 @@
1#
2# Makefile for the SysKonnect SK-98xx device driver.
3#
4
5
6#
7# Standalone driver params
8# SKPARAM += -DSK_KERNEL_24
9# SKPARAM += -DSK_KERNEL_24_26
10# SKPARAM += -DSK_KERNEL_26
11# SKPARAM += -DSK_KERNEL_22_24
12
13obj-$(CONFIG_SK98LIN) += sk98lin.o
14sk98lin-objs := \
15 skge.o \
16 skethtool.o \
17 skdim.o \
18 skaddr.o \
19 skgehwt.o \
20 skgeinit.o \
21 skgepnmi.o \
22 skgesirq.o \
23 ski2c.o \
24 sklm80.o \
25 skqueue.o \
26 skrlmt.o \
27 sktimer.o \
28 skvpd.o \
29 skxmac2.o
30
31# DBGDEF = \
32# -DDEBUG
33
34ifdef DEBUG
35DBGDEF += \
36-DSK_DEBUG_CHKMOD=0x00000000L \
37-DSK_DEBUG_CHKCAT=0x00000000L
38endif
39
40
41# **** possible debug modules for SK_DEBUG_CHKMOD *****************
42# SK_DBGMOD_MERR 0x00000001L /* general module error indication */
43# SK_DBGMOD_HWM 0x00000002L /* Hardware init module */
44# SK_DBGMOD_RLMT 0x00000004L /* RLMT module */
45# SK_DBGMOD_VPD 0x00000008L /* VPD module */
46# SK_DBGMOD_I2C 0x00000010L /* I2C module */
47# SK_DBGMOD_PNMI 0x00000020L /* PNMI module */
48# SK_DBGMOD_CSUM 0x00000040L /* CSUM module */
49# SK_DBGMOD_ADDR 0x00000080L /* ADDR module */
50# SK_DBGMOD_DRV 0x00010000L /* DRV module */
51
52# **** possible debug categories for SK_DEBUG_CHKCAT **************
53# *** common modules ***
54# SK_DBGCAT_INIT 0x00000001L module/driver initialization
55# SK_DBGCAT_CTRL 0x00000002L controlling: add/rmv MCA/MAC and other controls (IOCTL)
56# SK_DBGCAT_ERR 0x00000004L error handling paths
57# SK_DBGCAT_TX 0x00000008L transmit path
58# SK_DBGCAT_RX 0x00000010L receive path
59# SK_DBGCAT_IRQ 0x00000020L general IRQ handling
60# SK_DBGCAT_QUEUE 0x00000040L any queue management
61# SK_DBGCAT_DUMP 0x00000080L large data output e.g. hex dump
62# SK_DBGCAT_FATAL 0x00000100L large data output e.g. hex dump
63
64# *** driver (file skge.c) ***
65# SK_DBGCAT_DRV_ENTRY 0x00010000 entry points
66# SK_DBGCAT_DRV_??? 0x00020000 not used
67# SK_DBGCAT_DRV_MCA 0x00040000 multicast
68# SK_DBGCAT_DRV_TX_PROGRESS 0x00080000 tx path
69# SK_DBGCAT_DRV_RX_PROGRESS 0x00100000 rx path
70# SK_DBGCAT_DRV_PROGRESS 0x00200000 general runtime
71# SK_DBGCAT_DRV_??? 0x00400000 not used
72# SK_DBGCAT_DRV_PROM 0x00800000 promiscuous mode
73# SK_DBGCAT_DRV_TX_FRAME 0x01000000 display tx frames
74# SK_DBGCAT_DRV_ERROR 0x02000000 error conditions
75# SK_DBGCAT_DRV_INT_SRC 0x04000000 interrupts sources
76# SK_DBGCAT_DRV_EVENT 0x08000000 driver events
77
78EXTRA_CFLAGS += -Idrivers/net/sk98lin -DSK_DIAG_SUPPORT -DGENESIS -DYUKON $(DBGDEF) $(SKPARAM)
79
80clean:
81 rm -f core *.o *.a *.s
82
83
84
85
86
87
diff --git a/drivers/net/sk98lin/h/lm80.h b/drivers/net/sk98lin/h/lm80.h
deleted file mode 100644
index 4e2dbbf78000..000000000000
--- a/drivers/net/sk98lin/h/lm80.h
+++ /dev/null
@@ -1,179 +0,0 @@
1/******************************************************************************
2 *
3 * Name: lm80.h
4 * Project: Gigabit Ethernet Adapters, Common Modules
5 * Version: $Revision: 1.6 $
6 * Date: $Date: 2003/05/13 17:26:52 $
7 * Purpose: Contains all defines for the LM80 Chip
8 * (National Semiconductor).
9 *
10 ******************************************************************************/
11
12/******************************************************************************
13 *
14 * (C)Copyright 1998-2002 SysKonnect.
15 * (C)Copyright 2002-2003 Marvell.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * The information in this file is provided "AS IS" without warranty.
23 *
24 ******************************************************************************/
25
26#ifndef __INC_LM80_H
27#define __INC_LM80_H
28
29#ifdef __cplusplus
30extern "C" {
31#endif /* __cplusplus */
32
33/* defines ********************************************************************/
34
35/*
36 * LM80 register definition
37 *
38 * All registers are 8 bit wide
39 */
40#define LM80_CFG 0x00 /* Configuration Register */
41#define LM80_ISRC_1 0x01 /* Interrupt Status Register 1 */
42#define LM80_ISRC_2 0x02 /* Interrupt Status Register 2 */
43#define LM80_IMSK_1 0x03 /* Interrupt Mask Register 1 */
44#define LM80_IMSK_2 0x04 /* Interrupt Mask Register 2 */
45#define LM80_FAN_CTRL 0x05 /* Fan Devisor/RST#/OS# Register */
46#define LM80_TEMP_CTRL 0x06 /* OS# Config, Temp Res. Reg */
47 /* 0x07 - 0x1f reserved */
48 /* current values */
49#define LM80_VT0_IN 0x20 /* current Voltage 0 value */
50#define LM80_VT1_IN 0x21 /* current Voltage 1 value */
51#define LM80_VT2_IN 0x22 /* current Voltage 2 value */
52#define LM80_VT3_IN 0x23 /* current Voltage 3 value */
53#define LM80_VT4_IN 0x24 /* current Voltage 4 value */
54#define LM80_VT5_IN 0x25 /* current Voltage 5 value */
55#define LM80_VT6_IN 0x26 /* current Voltage 6 value */
56#define LM80_TEMP_IN 0x27 /* current Temperature value */
57#define LM80_FAN1_IN 0x28 /* current Fan 1 count */
58#define LM80_FAN2_IN 0x29 /* current Fan 2 count */
59 /* limit values */
60#define LM80_VT0_HIGH_LIM 0x2a /* high limit val for Voltage 0 */
61#define LM80_VT0_LOW_LIM 0x2b /* low limit val for Voltage 0 */
62#define LM80_VT1_HIGH_LIM 0x2c /* high limit val for Voltage 1 */
63#define LM80_VT1_LOW_LIM 0x2d /* low limit val for Voltage 1 */
64#define LM80_VT2_HIGH_LIM 0x2e /* high limit val for Voltage 2 */
65#define LM80_VT2_LOW_LIM 0x2f /* low limit val for Voltage 2 */
66#define LM80_VT3_HIGH_LIM 0x30 /* high limit val for Voltage 3 */
67#define LM80_VT3_LOW_LIM 0x31 /* low limit val for Voltage 3 */
68#define LM80_VT4_HIGH_LIM 0x32 /* high limit val for Voltage 4 */
69#define LM80_VT4_LOW_LIM 0x33 /* low limit val for Voltage 4 */
70#define LM80_VT5_HIGH_LIM 0x34 /* high limit val for Voltage 5 */
71#define LM80_VT5_LOW_LIM 0x35 /* low limit val for Voltage 5 */
72#define LM80_VT6_HIGH_LIM 0x36 /* high limit val for Voltage 6 */
73#define LM80_VT6_LOW_LIM 0x37 /* low limit val for Voltage 6 */
74#define LM80_THOT_LIM_UP 0x38 /* hot temperature limit (high) */
75#define LM80_THOT_LIM_LO 0x39 /* hot temperature limit (low) */
76#define LM80_TOS_LIM_UP 0x3a /* OS temperature limit (high) */
77#define LM80_TOS_LIM_LO 0x3b /* OS temperature limit (low) */
78#define LM80_FAN1_COUNT_LIM 0x3c /* Fan 1 count limit (high) */
79#define LM80_FAN2_COUNT_LIM 0x3d /* Fan 2 count limit (low) */
80 /* 0x3e - 0x3f reserved */
81
82/*
83 * LM80 bit definitions
84 */
85
86/* LM80_CFG Configuration Register */
87#define LM80_CFG_START (1<<0) /* start monitoring operation */
88#define LM80_CFG_INT_ENA (1<<1) /* enables the INT# Interrupt output */
89#define LM80_CFG_INT_POL (1<<2) /* INT# pol: 0 act low, 1 act high */
90#define LM80_CFG_INT_CLR (1<<3) /* disables INT#/RST_OUT#/OS# outputs */
91#define LM80_CFG_RESET (1<<4) /* signals a reset */
92#define LM80_CFG_CHASS_CLR (1<<5) /* clears Chassis Intrusion (CI) pin */
93#define LM80_CFG_GPO (1<<6) /* drives the GPO# pin */
94#define LM80_CFG_INIT (1<<7) /* restore power on defaults */
95
96/* LM80_ISRC_1 Interrupt Status Register 1 */
97/* LM80_IMSK_1 Interrupt Mask Register 1 */
98#define LM80_IS_VT0 (1<<0) /* limit exceeded for Voltage 0 */
99#define LM80_IS_VT1 (1<<1) /* limit exceeded for Voltage 1 */
100#define LM80_IS_VT2 (1<<2) /* limit exceeded for Voltage 2 */
101#define LM80_IS_VT3 (1<<3) /* limit exceeded for Voltage 3 */
102#define LM80_IS_VT4 (1<<4) /* limit exceeded for Voltage 4 */
103#define LM80_IS_VT5 (1<<5) /* limit exceeded for Voltage 5 */
104#define LM80_IS_VT6 (1<<6) /* limit exceeded for Voltage 6 */
105#define LM80_IS_INT_IN (1<<7) /* state of INT_IN# */
106
107/* LM80_ISRC_2 Interrupt Status Register 2 */
108/* LM80_IMSK_2 Interrupt Mask Register 2 */
109#define LM80_IS_TEMP (1<<0) /* HOT temperature limit exceeded */
110#define LM80_IS_BTI (1<<1) /* state of BTI# pin */
111#define LM80_IS_FAN1 (1<<2) /* count limit exceeded for Fan 1 */
112#define LM80_IS_FAN2 (1<<3) /* count limit exceeded for Fan 2 */
113#define LM80_IS_CI (1<<4) /* Chassis Intrusion occured */
114#define LM80_IS_OS (1<<5) /* OS temperature limit exceeded */
115 /* bit 6 and 7 are reserved in LM80_ISRC_2 */
116#define LM80_IS_HT_IRQ_MD (1<<6) /* Hot temperature interrupt mode */
117#define LM80_IS_OT_IRQ_MD (1<<7) /* OS temperature interrupt mode */
118
119/* LM80_FAN_CTRL Fan Devisor/RST#/OS# Register */
120#define LM80_FAN1_MD_SEL (1<<0) /* Fan 1 mode select */
121#define LM80_FAN2_MD_SEL (1<<1) /* Fan 2 mode select */
122#define LM80_FAN1_PRM_CTL (3<<2) /* Fan 1 speed control */
123#define LM80_FAN2_PRM_CTL (3<<4) /* Fan 2 speed control */
124#define LM80_FAN_OS_ENA (1<<6) /* enable OS mode on RST_OUT#/OS# pins*/
125#define LM80_FAN_RST_ENA (1<<7) /* sets RST_OUT#/OS# pins in RST mode */
126
127/* LM80_TEMP_CTRL OS# Config, Temp Res. Reg */
128#define LM80_TEMP_OS_STAT (1<<0) /* mirrors the state of RST_OUT#/OS# */
129#define LM80_TEMP_OS_POL (1<<1) /* select OS# polarity */
130#define LM80_TEMP_OS_MODE (1<<2) /* selects Interrupt mode */
131#define LM80_TEMP_RES (1<<3) /* selects 9 or 11 bit temp resulution*/
132#define LM80_TEMP_LSB (0xf<<4)/* 4 LSBs of 11 bit temp data */
133#define LM80_TEMP_LSB_9 (1<<7) /* LSB of 9 bit temperature data */
134
135 /* 0x07 - 0x1f reserved */
136/* LM80_VT0_IN current Voltage 0 value */
137/* LM80_VT1_IN current Voltage 1 value */
138/* LM80_VT2_IN current Voltage 2 value */
139/* LM80_VT3_IN current Voltage 3 value */
140/* LM80_VT4_IN current Voltage 4 value */
141/* LM80_VT5_IN current Voltage 5 value */
142/* LM80_VT6_IN current Voltage 6 value */
143/* LM80_TEMP_IN current temperature value */
144/* LM80_FAN1_IN current Fan 1 count */
145/* LM80_FAN2_IN current Fan 2 count */
146/* LM80_VT0_HIGH_LIM high limit val for Voltage 0 */
147/* LM80_VT0_LOW_LIM low limit val for Voltage 0 */
148/* LM80_VT1_HIGH_LIM high limit val for Voltage 1 */
149/* LM80_VT1_LOW_LIM low limit val for Voltage 1 */
150/* LM80_VT2_HIGH_LIM high limit val for Voltage 2 */
151/* LM80_VT2_LOW_LIM low limit val for Voltage 2 */
152/* LM80_VT3_HIGH_LIM high limit val for Voltage 3 */
153/* LM80_VT3_LOW_LIM low limit val for Voltage 3 */
154/* LM80_VT4_HIGH_LIM high limit val for Voltage 4 */
155/* LM80_VT4_LOW_LIM low limit val for Voltage 4 */
156/* LM80_VT5_HIGH_LIM high limit val for Voltage 5 */
157/* LM80_VT5_LOW_LIM low limit val for Voltage 5 */
158/* LM80_VT6_HIGH_LIM high limit val for Voltage 6 */
159/* LM80_VT6_LOW_LIM low limit val for Voltage 6 */
160/* LM80_THOT_LIM_UP hot temperature limit (high) */
161/* LM80_THOT_LIM_LO hot temperature limit (low) */
162/* LM80_TOS_LIM_UP OS temperature limit (high) */
163/* LM80_TOS_LIM_LO OS temperature limit (low) */
164/* LM80_FAN1_COUNT_LIM Fan 1 count limit (high) */
165/* LM80_FAN2_COUNT_LIM Fan 2 count limit (low) */
166 /* 0x3e - 0x3f reserved */
167
168#define LM80_ADDR 0x28 /* LM80 default addr */
169
170/* typedefs *******************************************************************/
171
172
173/* function prototypes ********************************************************/
174
175#ifdef __cplusplus
176}
177#endif /* __cplusplus */
178
179#endif /* __INC_LM80_H */
diff --git a/drivers/net/sk98lin/h/skaddr.h b/drivers/net/sk98lin/h/skaddr.h
deleted file mode 100644
index 423ad063d09b..000000000000
--- a/drivers/net/sk98lin/h/skaddr.h
+++ /dev/null
@@ -1,285 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skaddr.h
4 * Project: Gigabit Ethernet Adapters, ADDR-Modul
5 * Version: $Revision: 1.29 $
6 * Date: $Date: 2003/05/13 16:57:24 $
7 * Purpose: Header file for Address Management (MC, UC, Prom).
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/******************************************************************************
26 *
27 * Description:
28 *
29 * This module is intended to manage multicast addresses and promiscuous mode
30 * on GEnesis adapters.
31 *
32 * Include File Hierarchy:
33 *
34 * "skdrv1st.h"
35 * ...
36 * "sktypes.h"
37 * "skqueue.h"
38 * "skaddr.h"
39 * ...
40 * "skdrv2nd.h"
41 *
42 ******************************************************************************/
43
44#ifndef __INC_SKADDR_H
45#define __INC_SKADDR_H
46
47#ifdef __cplusplus
48extern "C" {
49#endif /* cplusplus */
50
51/* defines ********************************************************************/
52
53#define SK_MAC_ADDR_LEN 6 /* Length of MAC address. */
54#define SK_MAX_ADDRS 14 /* #Addrs for exact match. */
55
56/* ----- Common return values ----- */
57
58#define SK_ADDR_SUCCESS 0 /* Function returned successfully. */
59#define SK_ADDR_ILLEGAL_PORT 100 /* Port number too high. */
60#define SK_ADDR_TOO_EARLY 101 /* Function called too early. */
61
62/* ----- Clear/Add flag bits ----- */
63
64#define SK_ADDR_PERMANENT 1 /* RLMT Address */
65
66/* ----- Additional Clear flag bits ----- */
67
68#define SK_MC_SW_ONLY 2 /* Do not update HW when clearing. */
69
70/* ----- Override flag bits ----- */
71
72#define SK_ADDR_LOGICAL_ADDRESS 0
73#define SK_ADDR_VIRTUAL_ADDRESS (SK_ADDR_LOGICAL_ADDRESS) /* old */
74#define SK_ADDR_PHYSICAL_ADDRESS 1
75#define SK_ADDR_CLEAR_LOGICAL 2
76#define SK_ADDR_SET_LOGICAL 4
77
78/* ----- Override return values ----- */
79
80#define SK_ADDR_OVERRIDE_SUCCESS (SK_ADDR_SUCCESS)
81#define SK_ADDR_DUPLICATE_ADDRESS 1
82#define SK_ADDR_MULTICAST_ADDRESS 2
83
84/* ----- Partitioning of excact match table ----- */
85
86#define SK_ADDR_EXACT_MATCHES 16 /* #Exact match entries. */
87
88#define SK_ADDR_FIRST_MATCH_RLMT 1
89#define SK_ADDR_LAST_MATCH_RLMT 2
90#define SK_ADDR_FIRST_MATCH_DRV 3
91#define SK_ADDR_LAST_MATCH_DRV (SK_ADDR_EXACT_MATCHES - 1)
92
93/* ----- SkAddrMcAdd/SkAddrMcUpdate return values ----- */
94
95#define SK_MC_FILTERING_EXACT 0 /* Exact filtering. */
96#define SK_MC_FILTERING_INEXACT 1 /* Inexact filtering. */
97
98/* ----- Additional SkAddrMcAdd return values ----- */
99
100#define SK_MC_ILLEGAL_ADDRESS 2 /* Illegal address. */
101#define SK_MC_ILLEGAL_PORT 3 /* Illegal port (not the active one). */
102#define SK_MC_RLMT_OVERFLOW 4 /* Too many RLMT mc addresses. */
103
104/* Promiscuous mode bits ----- */
105
106#define SK_PROM_MODE_NONE 0 /* Normal receive. */
107#define SK_PROM_MODE_LLC 1 /* Receive all LLC frames. */
108#define SK_PROM_MODE_ALL_MC 2 /* Receive all multicast frames. */
109/* #define SK_PROM_MODE_NON_LLC 4 */ /* Receive all non-LLC frames. */
110
111/* Macros */
112
113#ifdef OLD_STUFF
114#ifndef SK_ADDR_EQUAL
115/*
116 * "&" instead of "&&" allows better optimization on IA-64.
117 * The replacement is safe here, as all bytes exist.
118 */
119#ifndef SK_ADDR_DWORD_COMPARE
120#define SK_ADDR_EQUAL(A1,A2) ( \
121 (((SK_U8 *)(A1))[5] == ((SK_U8 *)(A2))[5]) & \
122 (((SK_U8 *)(A1))[4] == ((SK_U8 *)(A2))[4]) & \
123 (((SK_U8 *)(A1))[3] == ((SK_U8 *)(A2))[3]) & \
124 (((SK_U8 *)(A1))[2] == ((SK_U8 *)(A2))[2]) & \
125 (((SK_U8 *)(A1))[1] == ((SK_U8 *)(A2))[1]) & \
126 (((SK_U8 *)(A1))[0] == ((SK_U8 *)(A2))[0]))
127#else /* SK_ADDR_DWORD_COMPARE */
128#define SK_ADDR_EQUAL(A1,A2) ( \
129 (*(SK_U32 *)&(((SK_U8 *)(A1))[2]) == *(SK_U32 *)&(((SK_U8 *)(A2))[2])) & \
130 (*(SK_U32 *)&(((SK_U8 *)(A1))[0]) == *(SK_U32 *)&(((SK_U8 *)(A2))[0])))
131#endif /* SK_ADDR_DWORD_COMPARE */
132#endif /* SK_ADDR_EQUAL */
133#endif /* 0 */
134
135#ifndef SK_ADDR_EQUAL
136#ifndef SK_ADDR_DWORD_COMPARE
137#define SK_ADDR_EQUAL(A1,A2) ( \
138 (((SK_U8 SK_FAR *)(A1))[5] == ((SK_U8 SK_FAR *)(A2))[5]) & \
139 (((SK_U8 SK_FAR *)(A1))[4] == ((SK_U8 SK_FAR *)(A2))[4]) & \
140 (((SK_U8 SK_FAR *)(A1))[3] == ((SK_U8 SK_FAR *)(A2))[3]) & \
141 (((SK_U8 SK_FAR *)(A1))[2] == ((SK_U8 SK_FAR *)(A2))[2]) & \
142 (((SK_U8 SK_FAR *)(A1))[1] == ((SK_U8 SK_FAR *)(A2))[1]) & \
143 (((SK_U8 SK_FAR *)(A1))[0] == ((SK_U8 SK_FAR *)(A2))[0]))
144#else /* SK_ADDR_DWORD_COMPARE */
145#define SK_ADDR_EQUAL(A1,A2) ( \
146 (*(SK_U16 SK_FAR *)&(((SK_U8 SK_FAR *)(A1))[4]) == \
147 *(SK_U16 SK_FAR *)&(((SK_U8 SK_FAR *)(A2))[4])) && \
148 (*(SK_U32 SK_FAR *)&(((SK_U8 SK_FAR *)(A1))[0]) == \
149 *(SK_U32 SK_FAR *)&(((SK_U8 SK_FAR *)(A2))[0])))
150#endif /* SK_ADDR_DWORD_COMPARE */
151#endif /* SK_ADDR_EQUAL */
152
153/* typedefs *******************************************************************/
154
155typedef struct s_MacAddr {
156 SK_U8 a[SK_MAC_ADDR_LEN];
157} SK_MAC_ADDR;
158
159
160/* SK_FILTER is used to ensure alignment of the filter. */
161typedef union s_InexactFilter {
162 SK_U8 Bytes[8];
163 SK_U64 Val; /* Dummy entry for alignment only. */
164} SK_FILTER64;
165
166
167typedef struct s_AddrNet SK_ADDR_NET;
168
169
170typedef struct s_AddrPort {
171
172/* ----- Public part (read-only) ----- */
173
174 SK_MAC_ADDR CurrentMacAddress; /* Current physical MAC Address. */
175 SK_MAC_ADDR PermanentMacAddress; /* Permanent physical MAC Address. */
176 int PromMode; /* Promiscuous Mode. */
177
178/* ----- Private part ----- */
179
180 SK_MAC_ADDR PreviousMacAddress; /* Prev. phys. MAC Address. */
181 SK_BOOL CurrentMacAddressSet; /* CurrentMacAddress is set. */
182 SK_U8 Align01;
183
184 SK_U32 FirstExactMatchRlmt;
185 SK_U32 NextExactMatchRlmt;
186 SK_U32 FirstExactMatchDrv;
187 SK_U32 NextExactMatchDrv;
188 SK_MAC_ADDR Exact[SK_ADDR_EXACT_MATCHES];
189 SK_FILTER64 InexactFilter; /* For 64-bit hash register. */
190 SK_FILTER64 InexactRlmtFilter; /* For 64-bit hash register. */
191 SK_FILTER64 InexactDrvFilter; /* For 64-bit hash register. */
192} SK_ADDR_PORT;
193
194
195struct s_AddrNet {
196/* ----- Public part (read-only) ----- */
197
198 SK_MAC_ADDR CurrentMacAddress; /* Logical MAC Address. */
199 SK_MAC_ADDR PermanentMacAddress; /* Logical MAC Address. */
200
201/* ----- Private part ----- */
202
203 SK_U32 ActivePort; /* View of module ADDR. */
204 SK_BOOL CurrentMacAddressSet; /* CurrentMacAddress is set. */
205 SK_U8 Align01;
206 SK_U16 Align02;
207};
208
209
210typedef struct s_Addr {
211
212/* ----- Public part (read-only) ----- */
213
214 SK_ADDR_NET Net[SK_MAX_NETS];
215 SK_ADDR_PORT Port[SK_MAX_MACS];
216
217/* ----- Private part ----- */
218} SK_ADDR;
219
220/* function prototypes ********************************************************/
221
222#ifndef SK_KR_PROTO
223
224/* Functions provided by SkAddr */
225
226/* ANSI/C++ compliant function prototypes */
227
228extern int SkAddrInit(
229 SK_AC *pAC,
230 SK_IOC IoC,
231 int Level);
232
233extern int SkAddrMcClear(
234 SK_AC *pAC,
235 SK_IOC IoC,
236 SK_U32 PortNumber,
237 int Flags);
238
239extern int SkAddrMcAdd(
240 SK_AC *pAC,
241 SK_IOC IoC,
242 SK_U32 PortNumber,
243 SK_MAC_ADDR *pMc,
244 int Flags);
245
246extern int SkAddrMcUpdate(
247 SK_AC *pAC,
248 SK_IOC IoC,
249 SK_U32 PortNumber);
250
251extern int SkAddrOverride(
252 SK_AC *pAC,
253 SK_IOC IoC,
254 SK_U32 PortNumber,
255 SK_MAC_ADDR SK_FAR *pNewAddr,
256 int Flags);
257
258extern int SkAddrPromiscuousChange(
259 SK_AC *pAC,
260 SK_IOC IoC,
261 SK_U32 PortNumber,
262 int NewPromMode);
263
264#ifndef SK_SLIM
265extern int SkAddrSwap(
266 SK_AC *pAC,
267 SK_IOC IoC,
268 SK_U32 FromPortNumber,
269 SK_U32 ToPortNumber);
270#endif
271
272#else /* defined(SK_KR_PROTO)) */
273
274/* Non-ANSI/C++ compliant function prototypes */
275
276#error KR-style prototypes are not yet provided.
277
278#endif /* defined(SK_KR_PROTO)) */
279
280
281#ifdef __cplusplus
282}
283#endif /* __cplusplus */
284
285#endif /* __INC_SKADDR_H */
diff --git a/drivers/net/sk98lin/h/skcsum.h b/drivers/net/sk98lin/h/skcsum.h
deleted file mode 100644
index 6e256bd9a28c..000000000000
--- a/drivers/net/sk98lin/h/skcsum.h
+++ /dev/null
@@ -1,213 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skcsum.h
4 * Project: GEnesis - SysKonnect SK-NET Gigabit Ethernet (SK-98xx)
5 * Version: $Revision: 1.10 $
6 * Date: $Date: 2003/08/20 13:59:57 $
7 * Purpose: Store/verify Internet checksum in send/receive packets.
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2001 SysKonnect GmbH.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * The information in this file is provided "AS IS" without warranty.
21 *
22 ******************************************************************************/
23
24/******************************************************************************
25 *
26 * Description:
27 *
28 * Public header file for the "GEnesis" common module "CSUM".
29 *
30 * "GEnesis" is an abbreviation of "Gigabit Ethernet Network System in Silicon"
31 * and is the code name of this SysKonnect project.
32 *
33 * Compilation Options:
34 *
35 * SK_USE_CSUM - Define if CSUM is to be used. Otherwise, CSUM will be an
36 * empty module.
37 *
38 * SKCS_OVERWRITE_PROTO - Define to overwrite the default protocol id
39 * definitions. In this case, all SKCS_PROTO_xxx definitions must be made
40 * external.
41 *
42 * SKCS_OVERWRITE_STATUS - Define to overwrite the default return status
43 * definitions. In this case, all SKCS_STATUS_xxx definitions must be made
44 * external.
45 *
46 * Include File Hierarchy:
47 *
48 * "h/skcsum.h"
49 * "h/sktypes.h"
50 * "h/skqueue.h"
51 *
52 ******************************************************************************/
53
54#ifndef __INC_SKCSUM_H
55#define __INC_SKCSUM_H
56
57#include "h/sktypes.h"
58#include "h/skqueue.h"
59
60/* defines ********************************************************************/
61
62/*
63 * Define the default bit flags for 'SKCS_PACKET_INFO.ProtocolFlags' if no user
64 * overwrite.
65 */
66#ifndef SKCS_OVERWRITE_PROTO /* User overwrite? */
67#define SKCS_PROTO_IP 0x1 /* IP (Internet Protocol version 4) */
68#define SKCS_PROTO_TCP 0x2 /* TCP (Transmission Control Protocol) */
69#define SKCS_PROTO_UDP 0x4 /* UDP (User Datagram Protocol) */
70
71/* Indices for protocol statistics. */
72#define SKCS_PROTO_STATS_IP 0
73#define SKCS_PROTO_STATS_UDP 1
74#define SKCS_PROTO_STATS_TCP 2
75#define SKCS_NUM_PROTOCOLS 3 /* Number of supported protocols. */
76#endif /* !SKCS_OVERWRITE_PROTO */
77
78/*
79 * Define the default SKCS_STATUS type and values if no user overwrite.
80 *
81 * SKCS_STATUS_UNKNOWN_IP_VERSION - Not an IP v4 frame.
82 * SKCS_STATUS_IP_CSUM_ERROR - IP checksum error.
83 * SKCS_STATUS_IP_CSUM_ERROR_TCP - IP checksum error in TCP frame.
84 * SKCS_STATUS_IP_CSUM_ERROR_UDP - IP checksum error in UDP frame
85 * SKCS_STATUS_IP_FRAGMENT - IP fragment (IP checksum ok).
86 * SKCS_STATUS_IP_CSUM_OK - IP checksum ok (not a TCP or UDP frame).
87 * SKCS_STATUS_TCP_CSUM_ERROR - TCP checksum error (IP checksum ok).
88 * SKCS_STATUS_UDP_CSUM_ERROR - UDP checksum error (IP checksum ok).
89 * SKCS_STATUS_TCP_CSUM_OK - IP and TCP checksum ok.
90 * SKCS_STATUS_UDP_CSUM_OK - IP and UDP checksum ok.
91 * SKCS_STATUS_IP_CSUM_OK_NO_UDP - IP checksum OK and no UDP checksum.
92 */
93#ifndef SKCS_OVERWRITE_STATUS /* User overwrite? */
94#define SKCS_STATUS int /* Define status type. */
95
96#define SKCS_STATUS_UNKNOWN_IP_VERSION 1
97#define SKCS_STATUS_IP_CSUM_ERROR 2
98#define SKCS_STATUS_IP_FRAGMENT 3
99#define SKCS_STATUS_IP_CSUM_OK 4
100#define SKCS_STATUS_TCP_CSUM_ERROR 5
101#define SKCS_STATUS_UDP_CSUM_ERROR 6
102#define SKCS_STATUS_TCP_CSUM_OK 7
103#define SKCS_STATUS_UDP_CSUM_OK 8
104/* needed for Microsoft */
105#define SKCS_STATUS_IP_CSUM_ERROR_UDP 9
106#define SKCS_STATUS_IP_CSUM_ERROR_TCP 10
107/* UDP checksum may be omitted */
108#define SKCS_STATUS_IP_CSUM_OK_NO_UDP 11
109#endif /* !SKCS_OVERWRITE_STATUS */
110
111/* Clear protocol statistics event. */
112#define SK_CSUM_EVENT_CLEAR_PROTO_STATS 1
113
114/*
115 * Add two values in one's complement.
116 *
117 * Note: One of the two input values may be "longer" than 16-bit, but then the
118 * resulting sum may be 17 bits long. In this case, add zero to the result using
119 * SKCS_OC_ADD() again.
120 *
121 * Result = Value1 + Value2
122 */
123#define SKCS_OC_ADD(Result, Value1, Value2) { \
124 unsigned long Sum; \
125 \
126 Sum = (unsigned long) (Value1) + (unsigned long) (Value2); \
127 /* Add-in any carry. */ \
128 (Result) = (Sum & 0xffff) + (Sum >> 16); \
129}
130
131/*
132 * Subtract two values in one's complement.
133 *
134 * Result = Value1 - Value2
135 */
136#define SKCS_OC_SUB(Result, Value1, Value2) \
137 SKCS_OC_ADD((Result), (Value1), ~(Value2) & 0xffff)
138
139/* typedefs *******************************************************************/
140
141/*
142 * SKCS_PROTO_STATS - The CSUM protocol statistics structure.
143 *
144 * There is one instance of this structure for each protocol supported.
145 */
146typedef struct s_CsProtocolStatistics {
147 SK_U64 RxOkCts; /* Receive checksum ok. */
148 SK_U64 RxUnableCts; /* Unable to verify receive checksum. */
149 SK_U64 RxErrCts; /* Receive checksum error. */
150 SK_U64 TxOkCts; /* Transmit checksum ok. */
151 SK_U64 TxUnableCts; /* Unable to calculate checksum in hw. */
152} SKCS_PROTO_STATS;
153
154/*
155 * s_Csum - The CSUM module context structure.
156 */
157typedef struct s_Csum {
158 /* Enabled receive SK_PROTO_XXX bit flags. */
159 unsigned ReceiveFlags[SK_MAX_NETS];
160#ifdef TX_CSUM
161 unsigned TransmitFlags[SK_MAX_NETS];
162#endif /* TX_CSUM */
163
164 /* The protocol statistics structure; one per supported protocol. */
165 SKCS_PROTO_STATS ProtoStats[SK_MAX_NETS][SKCS_NUM_PROTOCOLS];
166} SK_CSUM;
167
168/*
169 * SKCS_PACKET_INFO - The packet information structure.
170 */
171typedef struct s_CsPacketInfo {
172 /* Bit field specifiying the desired/found protocols. */
173 unsigned ProtocolFlags;
174
175 /* Length of complete IP header, including any option fields. */
176 unsigned IpHeaderLength;
177
178 /* IP header checksum. */
179 unsigned IpHeaderChecksum;
180
181 /* TCP/UDP pseudo header checksum. */
182 unsigned PseudoHeaderChecksum;
183} SKCS_PACKET_INFO;
184
185/* function prototypes ********************************************************/
186
187#ifndef SK_CS_CALCULATE_CHECKSUM
188extern unsigned SkCsCalculateChecksum(
189 void *pData,
190 unsigned Length);
191#endif /* SK_CS_CALCULATE_CHECKSUM */
192
193extern int SkCsEvent(
194 SK_AC *pAc,
195 SK_IOC Ioc,
196 SK_U32 Event,
197 SK_EVPARA Param);
198
199extern SKCS_STATUS SkCsGetReceiveInfo(
200 SK_AC *pAc,
201 void *pIpHeader,
202 unsigned Checksum1,
203 unsigned Checksum2,
204 int NetNumber);
205
206extern void SkCsSetReceiveFlags(
207 SK_AC *pAc,
208 unsigned ReceiveFlags,
209 unsigned *pChecksum1Offset,
210 unsigned *pChecksum2Offset,
211 int NetNumber);
212
213#endif /* __INC_SKCSUM_H */
diff --git a/drivers/net/sk98lin/h/skdebug.h b/drivers/net/sk98lin/h/skdebug.h
deleted file mode 100644
index 3cba171d74b2..000000000000
--- a/drivers/net/sk98lin/h/skdebug.h
+++ /dev/null
@@ -1,74 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skdebug.h
4 * Project: Gigabit Ethernet Adapters, Common Modules
5 * Version: $Revision: 1.14 $
6 * Date: $Date: 2003/05/13 17:26:00 $
7 * Purpose: SK specific DEBUG support
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25#ifndef __INC_SKDEBUG_H
26#define __INC_SKDEBUG_H
27
28#ifdef DEBUG
29#ifndef SK_DBG_MSG
30#define SK_DBG_MSG(pAC,comp,cat,arg) \
31 if ( ((comp) & SK_DBG_CHKMOD(pAC)) && \
32 ((cat) & SK_DBG_CHKCAT(pAC)) ) { \
33 SK_DBG_PRINTF arg ; \
34 }
35#endif
36#else
37#define SK_DBG_MSG(pAC,comp,lev,arg)
38#endif
39
40/* PLS NOTE:
41 * =========
42 * Due to any restrictions of kernel printf routines do not use other
43 * format identifiers as: %x %d %c %s .
44 * Never use any combined format identifiers such as: %lx %ld in your
45 * printf - argument (arg) because some OS specific kernel printfs may
46 * only support some basic identifiers.
47 */
48
49/* Debug modules */
50
51#define SK_DBGMOD_MERR 0x00000001L /* general module error indication */
52#define SK_DBGMOD_HWM 0x00000002L /* Hardware init module */
53#define SK_DBGMOD_RLMT 0x00000004L /* RLMT module */
54#define SK_DBGMOD_VPD 0x00000008L /* VPD module */
55#define SK_DBGMOD_I2C 0x00000010L /* I2C module */
56#define SK_DBGMOD_PNMI 0x00000020L /* PNMI module */
57#define SK_DBGMOD_CSUM 0x00000040L /* CSUM module */
58#define SK_DBGMOD_ADDR 0x00000080L /* ADDR module */
59#define SK_DBGMOD_PECP 0x00000100L /* PECP module */
60#define SK_DBGMOD_POWM 0x00000200L /* Power Management module */
61
62/* Debug events */
63
64#define SK_DBGCAT_INIT 0x00000001L /* module/driver initialization */
65#define SK_DBGCAT_CTRL 0x00000002L /* controlling devices */
66#define SK_DBGCAT_ERR 0x00000004L /* error handling paths */
67#define SK_DBGCAT_TX 0x00000008L /* transmit path */
68#define SK_DBGCAT_RX 0x00000010L /* receive path */
69#define SK_DBGCAT_IRQ 0x00000020L /* general IRQ handling */
70#define SK_DBGCAT_QUEUE 0x00000040L /* any queue management */
71#define SK_DBGCAT_DUMP 0x00000080L /* large data output e.g. hex dump */
72#define SK_DBGCAT_FATAL 0x00000100L /* fatal error */
73
74#endif /* __INC_SKDEBUG_H */
diff --git a/drivers/net/sk98lin/h/skdrv1st.h b/drivers/net/sk98lin/h/skdrv1st.h
deleted file mode 100644
index 91b8d4f45904..000000000000
--- a/drivers/net/sk98lin/h/skdrv1st.h
+++ /dev/null
@@ -1,188 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skdrv1st.h
4 * Project: GEnesis, PCI Gigabit Ethernet Adapter
5 * Version: $Revision: 1.4 $
6 * Date: $Date: 2003/11/12 14:28:14 $
7 * Purpose: First header file for driver and all other modules
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/******************************************************************************
26 *
27 * Description:
28 *
29 * This is the first include file of the driver, which includes all
30 * neccessary system header files and some of the GEnesis header files.
31 * It also defines some basic items.
32 *
33 * Include File Hierarchy:
34 *
35 * see skge.c
36 *
37 ******************************************************************************/
38
39#ifndef __INC_SKDRV1ST_H
40#define __INC_SKDRV1ST_H
41
42typedef struct s_AC SK_AC;
43
44/* Set card versions */
45#define SK_FAR
46
47/* override some default functions with optimized linux functions */
48
49#define SK_PNMI_STORE_U16(p,v) memcpy((char*)(p),(char*)&(v),2)
50#define SK_PNMI_STORE_U32(p,v) memcpy((char*)(p),(char*)&(v),4)
51#define SK_PNMI_STORE_U64(p,v) memcpy((char*)(p),(char*)&(v),8)
52#define SK_PNMI_READ_U16(p,v) memcpy((char*)&(v),(char*)(p),2)
53#define SK_PNMI_READ_U32(p,v) memcpy((char*)&(v),(char*)(p),4)
54#define SK_PNMI_READ_U64(p,v) memcpy((char*)&(v),(char*)(p),8)
55
56#define SK_ADDR_EQUAL(a1,a2) (!memcmp(a1,a2,6))
57
58#include <linux/types.h>
59#include <linux/kernel.h>
60#include <linux/string.h>
61#include <linux/errno.h>
62#include <linux/ioport.h>
63#include <linux/slab.h>
64#include <linux/interrupt.h>
65#include <linux/pci.h>
66#include <linux/bitops.h>
67#include <asm/byteorder.h>
68#include <asm/io.h>
69#include <asm/irq.h>
70#include <linux/netdevice.h>
71#include <linux/etherdevice.h>
72#include <linux/skbuff.h>
73
74#include <linux/init.h>
75#include <asm/uaccess.h>
76#include <net/checksum.h>
77
78#define SK_CS_CALCULATE_CHECKSUM
79#ifndef CONFIG_X86_64
80#define SkCsCalculateChecksum(p,l) ((~ip_compute_csum(p, l)) & 0xffff)
81#else
82#define SkCsCalculateChecksum(p,l) ((~ip_fast_csum(p, l)) & 0xffff)
83#endif
84
85#include "h/sktypes.h"
86#include "h/skerror.h"
87#include "h/skdebug.h"
88#include "h/lm80.h"
89#include "h/xmac_ii.h"
90
91#ifdef __LITTLE_ENDIAN
92#define SK_LITTLE_ENDIAN
93#else
94#define SK_BIG_ENDIAN
95#endif
96
97#define SK_NET_DEVICE net_device
98
99
100/* we use gethrtime(), return unit: nanoseconds */
101#define SK_TICKS_PER_SEC 100
102
103#define SK_MEM_MAPPED_IO
104
105// #define SK_RLMT_SLOW_LOOKAHEAD
106
107#define SK_MAX_MACS 2
108#define SK_MAX_NETS 2
109
110#define SK_IOC char __iomem *
111
112typedef struct s_DrvRlmtMbuf SK_MBUF;
113
114#define SK_CONST64 INT64_C
115#define SK_CONSTU64 UINT64_C
116
117#define SK_MEMCPY(dest,src,size) memcpy(dest,src,size)
118#define SK_MEMCMP(s1,s2,size) memcmp(s1,s2,size)
119#define SK_MEMSET(dest,val,size) memset(dest,val,size)
120#define SK_STRLEN(pStr) strlen((char*)(pStr))
121#define SK_STRNCPY(pDest,pSrc,size) strncpy((char*)(pDest),(char*)(pSrc),size)
122#define SK_STRCMP(pStr1,pStr2) strcmp((char*)(pStr1),(char*)(pStr2))
123
124/* macros to access the adapter */
125#define SK_OUT8(b,a,v) writeb((v), ((b)+(a)))
126#define SK_OUT16(b,a,v) writew((v), ((b)+(a)))
127#define SK_OUT32(b,a,v) writel((v), ((b)+(a)))
128#define SK_IN8(b,a,pv) (*(pv) = readb((b)+(a)))
129#define SK_IN16(b,a,pv) (*(pv) = readw((b)+(a)))
130#define SK_IN32(b,a,pv) (*(pv) = readl((b)+(a)))
131
132#define int8_t char
133#define int16_t short
134#define int32_t long
135#define int64_t long long
136#define uint8_t u_char
137#define uint16_t u_short
138#define uint32_t u_long
139#define uint64_t unsigned long long
140#define t_scalar_t int
141#define t_uscalar_t unsigned int
142#define uintptr_t unsigned long
143
144#define __CONCAT__(A,B) A##B
145
146#define INT32_C(a) __CONCAT__(a,L)
147#define INT64_C(a) __CONCAT__(a,LL)
148#define UINT32_C(a) __CONCAT__(a,UL)
149#define UINT64_C(a) __CONCAT__(a,ULL)
150
151#ifdef DEBUG
152#define SK_DBG_PRINTF printk
153#ifndef SK_DEBUG_CHKMOD
154#define SK_DEBUG_CHKMOD 0
155#endif
156#ifndef SK_DEBUG_CHKCAT
157#define SK_DEBUG_CHKCAT 0
158#endif
159/* those come from the makefile */
160#define SK_DBG_CHKMOD(pAC) (SK_DEBUG_CHKMOD)
161#define SK_DBG_CHKCAT(pAC) (SK_DEBUG_CHKCAT)
162
163extern void SkDbgPrintf(const char *format,...);
164
165#define SK_DBGMOD_DRV 0x00010000
166
167/**** possible driver debug categories ********************************/
168#define SK_DBGCAT_DRV_ENTRY 0x00010000
169#define SK_DBGCAT_DRV_SAP 0x00020000
170#define SK_DBGCAT_DRV_MCA 0x00040000
171#define SK_DBGCAT_DRV_TX_PROGRESS 0x00080000
172#define SK_DBGCAT_DRV_RX_PROGRESS 0x00100000
173#define SK_DBGCAT_DRV_PROGRESS 0x00200000
174#define SK_DBGCAT_DRV_MSG 0x00400000
175#define SK_DBGCAT_DRV_PROM 0x00800000
176#define SK_DBGCAT_DRV_TX_FRAME 0x01000000
177#define SK_DBGCAT_DRV_ERROR 0x02000000
178#define SK_DBGCAT_DRV_INT_SRC 0x04000000
179#define SK_DBGCAT_DRV_EVENT 0x08000000
180
181#endif
182
183#define SK_ERR_LOG SkErrorLog
184
185extern void SkErrorLog(SK_AC*, int, int, char*);
186
187#endif
188
diff --git a/drivers/net/sk98lin/h/skdrv2nd.h b/drivers/net/sk98lin/h/skdrv2nd.h
deleted file mode 100644
index 3fa67171e832..000000000000
--- a/drivers/net/sk98lin/h/skdrv2nd.h
+++ /dev/null
@@ -1,447 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skdrv2nd.h
4 * Project: GEnesis, PCI Gigabit Ethernet Adapter
5 * Version: $Revision: 1.10 $
6 * Date: $Date: 2003/12/11 16:04:45 $
7 * Purpose: Second header file for driver and all other modules
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/******************************************************************************
26 *
27 * Description:
28 *
29 * This is the second include file of the driver, which includes all other
30 * neccessary files and defines all structures and constants used by the
31 * driver and the common modules.
32 *
33 * Include File Hierarchy:
34 *
35 * see skge.c
36 *
37 ******************************************************************************/
38
39#ifndef __INC_SKDRV2ND_H
40#define __INC_SKDRV2ND_H
41
42#include "h/skqueue.h"
43#include "h/skgehwt.h"
44#include "h/sktimer.h"
45#include "h/ski2c.h"
46#include "h/skgepnmi.h"
47#include "h/skvpd.h"
48#include "h/skgehw.h"
49#include "h/skgeinit.h"
50#include "h/skaddr.h"
51#include "h/skgesirq.h"
52#include "h/skcsum.h"
53#include "h/skrlmt.h"
54#include "h/skgedrv.h"
55
56
57extern SK_MBUF *SkDrvAllocRlmtMbuf(SK_AC*, SK_IOC, unsigned);
58extern void SkDrvFreeRlmtMbuf(SK_AC*, SK_IOC, SK_MBUF*);
59extern SK_U64 SkOsGetTime(SK_AC*);
60extern int SkPciReadCfgDWord(SK_AC*, int, SK_U32*);
61extern int SkPciReadCfgWord(SK_AC*, int, SK_U16*);
62extern int SkPciReadCfgByte(SK_AC*, int, SK_U8*);
63extern int SkPciWriteCfgWord(SK_AC*, int, SK_U16);
64extern int SkPciWriteCfgByte(SK_AC*, int, SK_U8);
65extern int SkDrvEvent(SK_AC*, SK_IOC IoC, SK_U32, SK_EVPARA);
66
67#ifdef SK_DIAG_SUPPORT
68extern int SkDrvEnterDiagMode(SK_AC *pAc);
69extern int SkDrvLeaveDiagMode(SK_AC *pAc);
70#endif
71
72struct s_DrvRlmtMbuf {
73 SK_MBUF *pNext; /* Pointer to next RLMT Mbuf. */
74 SK_U8 *pData; /* Data buffer (virtually contig.). */
75 unsigned Size; /* Data buffer size. */
76 unsigned Length; /* Length of packet (<= Size). */
77 SK_U32 PortIdx; /* Receiving/transmitting port. */
78#ifdef SK_RLMT_MBUF_PRIVATE
79 SK_RLMT_MBUF Rlmt; /* Private part for RLMT. */
80#endif /* SK_RLMT_MBUF_PRIVATE */
81 struct sk_buff *pOs; /* Pointer to message block */
82};
83
84
85/*
86 * Time macros
87 */
88#if SK_TICKS_PER_SEC == 100
89#define SK_PNMI_HUNDREDS_SEC(t) (t)
90#else
91#define SK_PNMI_HUNDREDS_SEC(t) ((((unsigned long)t) * 100) / \
92 (SK_TICKS_PER_SEC))
93#endif
94
95/*
96 * New SkOsGetTime
97 */
98#define SkOsGetTimeCurrent(pAC, pUsec) {\
99 struct timeval t;\
100 do_gettimeofday(&t);\
101 *pUsec = ((((t.tv_sec) * 1000000L)+t.tv_usec)/10000);\
102}
103
104
105/*
106 * ioctl definitions
107 */
108#define SK_IOCTL_BASE (SIOCDEVPRIVATE)
109#define SK_IOCTL_GETMIB (SK_IOCTL_BASE + 0)
110#define SK_IOCTL_SETMIB (SK_IOCTL_BASE + 1)
111#define SK_IOCTL_PRESETMIB (SK_IOCTL_BASE + 2)
112#define SK_IOCTL_GEN (SK_IOCTL_BASE + 3)
113#define SK_IOCTL_DIAG (SK_IOCTL_BASE + 4)
114
115typedef struct s_IOCTL SK_GE_IOCTL;
116
117struct s_IOCTL {
118 char __user * pData;
119 unsigned int Len;
120};
121
122
123/*
124 * define sizes of descriptor rings in bytes
125 */
126
127#define TX_RING_SIZE (8*1024)
128#define RX_RING_SIZE (24*1024)
129
130/*
131 * Buffer size for ethernet packets
132 */
133#define ETH_BUF_SIZE 1540
134#define ETH_MAX_MTU 1514
135#define ETH_MIN_MTU 60
136#define ETH_MULTICAST_BIT 0x01
137#define SK_JUMBO_MTU 9000
138
139/*
140 * transmit priority selects the queue: LOW=asynchron, HIGH=synchron
141 */
142#define TX_PRIO_LOW 0
143#define TX_PRIO_HIGH 1
144
145/*
146 * alignment of rx/tx descriptors
147 */
148#define DESCR_ALIGN 64
149
150/*
151 * definitions for pnmi. TODO
152 */
153#define SK_DRIVER_RESET(pAC, IoC) 0
154#define SK_DRIVER_SENDEVENT(pAC, IoC) 0
155#define SK_DRIVER_SELFTEST(pAC, IoC) 0
156/* For get mtu you must add an own function */
157#define SK_DRIVER_GET_MTU(pAc,IoC,i) 0
158#define SK_DRIVER_SET_MTU(pAc,IoC,i,v) 0
159#define SK_DRIVER_PRESET_MTU(pAc,IoC,i,v) 0
160
161/*
162** Interim definition of SK_DRV_TIMER placed in this file until
163** common modules have been finalized
164*/
165#define SK_DRV_TIMER 11
166#define SK_DRV_MODERATION_TIMER 1
167#define SK_DRV_MODERATION_TIMER_LENGTH 1000000 /* 1 second */
168#define SK_DRV_RX_CLEANUP_TIMER 2
169#define SK_DRV_RX_CLEANUP_TIMER_LENGTH 1000000 /* 100 millisecs */
170
171/*
172** Definitions regarding transmitting frames
173** any calculating any checksum.
174*/
175#define C_LEN_ETHERMAC_HEADER_DEST_ADDR 6
176#define C_LEN_ETHERMAC_HEADER_SRC_ADDR 6
177#define C_LEN_ETHERMAC_HEADER_LENTYPE 2
178#define C_LEN_ETHERMAC_HEADER ( (C_LEN_ETHERMAC_HEADER_DEST_ADDR) + \
179 (C_LEN_ETHERMAC_HEADER_SRC_ADDR) + \
180 (C_LEN_ETHERMAC_HEADER_LENTYPE) )
181
182#define C_LEN_ETHERMTU_MINSIZE 46
183#define C_LEN_ETHERMTU_MAXSIZE_STD 1500
184#define C_LEN_ETHERMTU_MAXSIZE_JUMBO 9000
185
186#define C_LEN_ETHERNET_MINSIZE ( (C_LEN_ETHERMAC_HEADER) + \
187 (C_LEN_ETHERMTU_MINSIZE) )
188
189#define C_OFFSET_IPHEADER C_LEN_ETHERMAC_HEADER
190#define C_OFFSET_IPHEADER_IPPROTO 9
191#define C_OFFSET_TCPHEADER_TCPCS 16
192#define C_OFFSET_UDPHEADER_UDPCS 6
193
194#define C_OFFSET_IPPROTO ( (C_LEN_ETHERMAC_HEADER) + \
195 (C_OFFSET_IPHEADER_IPPROTO) )
196
197#define C_PROTO_ID_UDP 17 /* refer to RFC 790 or Stevens' */
198#define C_PROTO_ID_TCP 6 /* TCP/IP illustrated for details */
199
200/* TX and RX descriptors *****************************************************/
201
202typedef struct s_RxD RXD; /* the receive descriptor */
203
204struct s_RxD {
205 volatile SK_U32 RBControl; /* Receive Buffer Control */
206 SK_U32 VNextRxd; /* Next receive descriptor,low dword */
207 SK_U32 VDataLow; /* Receive buffer Addr, low dword */
208 SK_U32 VDataHigh; /* Receive buffer Addr, high dword */
209 SK_U32 FrameStat; /* Receive Frame Status word */
210 SK_U32 TimeStamp; /* Time stamp from XMAC */
211 SK_U32 TcpSums; /* TCP Sum 2 / TCP Sum 1 */
212 SK_U32 TcpSumStarts; /* TCP Sum Start 2 / TCP Sum Start 1 */
213 RXD *pNextRxd; /* Pointer to next Rxd */
214 struct sk_buff *pMBuf; /* Pointer to Linux' socket buffer */
215};
216
217typedef struct s_TxD TXD; /* the transmit descriptor */
218
219struct s_TxD {
220 volatile SK_U32 TBControl; /* Transmit Buffer Control */
221 SK_U32 VNextTxd; /* Next transmit descriptor,low dword */
222 SK_U32 VDataLow; /* Transmit Buffer Addr, low dword */
223 SK_U32 VDataHigh; /* Transmit Buffer Addr, high dword */
224 SK_U32 FrameStat; /* Transmit Frame Status Word */
225 SK_U32 TcpSumOfs; /* Reserved / TCP Sum Offset */
226 SK_U16 TcpSumSt; /* TCP Sum Start */
227 SK_U16 TcpSumWr; /* TCP Sum Write */
228 SK_U32 TcpReserved; /* not used */
229 TXD *pNextTxd; /* Pointer to next Txd */
230 struct sk_buff *pMBuf; /* Pointer to Linux' socket buffer */
231};
232
233/* Used interrupt bits in the interrupts source register *********************/
234
235#define DRIVER_IRQS ((IS_IRQ_SW) | \
236 (IS_R1_F) |(IS_R2_F) | \
237 (IS_XS1_F) |(IS_XA1_F) | \
238 (IS_XS2_F) |(IS_XA2_F))
239
240#define SPECIAL_IRQS ((IS_HW_ERR) |(IS_I2C_READY) | \
241 (IS_EXT_REG) |(IS_TIMINT) | \
242 (IS_PA_TO_RX1) |(IS_PA_TO_RX2) | \
243 (IS_PA_TO_TX1) |(IS_PA_TO_TX2) | \
244 (IS_MAC1) |(IS_LNK_SYNC_M1)| \
245 (IS_MAC2) |(IS_LNK_SYNC_M2)| \
246 (IS_R1_C) |(IS_R2_C) | \
247 (IS_XS1_C) |(IS_XA1_C) | \
248 (IS_XS2_C) |(IS_XA2_C))
249
250#define IRQ_MASK ((IS_IRQ_SW) | \
251 (IS_R1_B) |(IS_R1_F) |(IS_R2_B) |(IS_R2_F) | \
252 (IS_XS1_B) |(IS_XS1_F) |(IS_XA1_B)|(IS_XA1_F)| \
253 (IS_XS2_B) |(IS_XS2_F) |(IS_XA2_B)|(IS_XA2_F)| \
254 (IS_HW_ERR) |(IS_I2C_READY)| \
255 (IS_EXT_REG) |(IS_TIMINT) | \
256 (IS_PA_TO_RX1) |(IS_PA_TO_RX2)| \
257 (IS_PA_TO_TX1) |(IS_PA_TO_TX2)| \
258 (IS_MAC1) |(IS_MAC2) | \
259 (IS_R1_C) |(IS_R2_C) | \
260 (IS_XS1_C) |(IS_XA1_C) | \
261 (IS_XS2_C) |(IS_XA2_C))
262
263#define IRQ_HWE_MASK (IS_ERR_MSK) /* enable all HW irqs */
264
265typedef struct s_DevNet DEV_NET;
266
267struct s_DevNet {
268 int PortNr;
269 int NetNr;
270 SK_AC *pAC;
271};
272
273typedef struct s_TxPort TX_PORT;
274
275struct s_TxPort {
276 /* the transmit descriptor rings */
277 caddr_t pTxDescrRing; /* descriptor area memory */
278 SK_U64 VTxDescrRing; /* descr. area bus virt. addr. */
279 TXD *pTxdRingHead; /* Head of Tx rings */
280 TXD *pTxdRingTail; /* Tail of Tx rings */
281 TXD *pTxdRingPrev; /* descriptor sent previously */
282 int TxdRingFree; /* # of free entrys */
283 spinlock_t TxDesRingLock; /* serialize descriptor accesses */
284 SK_IOC HwAddr; /* bmu registers address */
285 int PortIndex; /* index number of port (0 or 1) */
286};
287
288typedef struct s_RxPort RX_PORT;
289
290struct s_RxPort {
291 /* the receive descriptor rings */
292 caddr_t pRxDescrRing; /* descriptor area memory */
293 SK_U64 VRxDescrRing; /* descr. area bus virt. addr. */
294 RXD *pRxdRingHead; /* Head of Rx rings */
295 RXD *pRxdRingTail; /* Tail of Rx rings */
296 RXD *pRxdRingPrev; /* descriptor given to BMU previously */
297 int RxdRingFree; /* # of free entrys */
298 int RxCsum; /* use receive checksum hardware */
299 spinlock_t RxDesRingLock; /* serialize descriptor accesses */
300 int RxFillLimit; /* limit for buffers in ring */
301 SK_IOC HwAddr; /* bmu registers address */
302 int PortIndex; /* index number of port (0 or 1) */
303};
304
305/* Definitions needed for interrupt moderation *******************************/
306
307#define IRQ_EOF_AS_TX ((IS_XA1_F) | (IS_XA2_F))
308#define IRQ_EOF_SY_TX ((IS_XS1_F) | (IS_XS2_F))
309#define IRQ_MASK_TX_ONLY ((IRQ_EOF_AS_TX)| (IRQ_EOF_SY_TX))
310#define IRQ_MASK_RX_ONLY ((IS_R1_F) | (IS_R2_F))
311#define IRQ_MASK_SP_ONLY (SPECIAL_IRQS)
312#define IRQ_MASK_TX_RX ((IRQ_MASK_TX_ONLY)| (IRQ_MASK_RX_ONLY))
313#define IRQ_MASK_SP_RX ((SPECIAL_IRQS) | (IRQ_MASK_RX_ONLY))
314#define IRQ_MASK_SP_TX ((SPECIAL_IRQS) | (IRQ_MASK_TX_ONLY))
315#define IRQ_MASK_RX_TX_SP ((SPECIAL_IRQS) | (IRQ_MASK_TX_RX))
316
317#define C_INT_MOD_NONE 1
318#define C_INT_MOD_STATIC 2
319#define C_INT_MOD_DYNAMIC 4
320
321#define C_CLK_FREQ_GENESIS 53215000 /* shorter: 53.125 MHz */
322#define C_CLK_FREQ_YUKON 78215000 /* shorter: 78.125 MHz */
323
324#define C_INTS_PER_SEC_DEFAULT 2000
325#define C_INT_MOD_ENABLE_PERCENTAGE 50 /* if higher 50% enable */
326#define C_INT_MOD_DISABLE_PERCENTAGE 50 /* if lower 50% disable */
327#define C_INT_MOD_IPS_LOWER_RANGE 30
328#define C_INT_MOD_IPS_UPPER_RANGE 40000
329
330
331typedef struct s_DynIrqModInfo DIM_INFO;
332struct s_DynIrqModInfo {
333 unsigned long PrevTimeVal;
334 unsigned int PrevSysLoad;
335 unsigned int PrevUsedTime;
336 unsigned int PrevTotalTime;
337 int PrevUsedDescrRatio;
338 int NbrProcessedDescr;
339 SK_U64 PrevPort0RxIntrCts;
340 SK_U64 PrevPort1RxIntrCts;
341 SK_U64 PrevPort0TxIntrCts;
342 SK_U64 PrevPort1TxIntrCts;
343 SK_BOOL ModJustEnabled; /* Moderation just enabled yes/no */
344
345 int MaxModIntsPerSec; /* Moderation Threshold */
346 int MaxModIntsPerSecUpperLimit; /* Upper limit for DIM */
347 int MaxModIntsPerSecLowerLimit; /* Lower limit for DIM */
348
349 long MaskIrqModeration; /* ModIrqType (eg. 'TxRx') */
350 SK_BOOL DisplayStats; /* Stats yes/no */
351 SK_BOOL AutoSizing; /* Resize DIM-timer on/off */
352 int IntModTypeSelect; /* EnableIntMod (eg. 'dynamic') */
353
354 SK_TIMER ModTimer; /* just some timer */
355};
356
357typedef struct s_PerStrm PER_STRM;
358
359#define SK_ALLOC_IRQ 0x00000001
360
361#ifdef SK_DIAG_SUPPORT
362#define DIAG_ACTIVE 1
363#define DIAG_NOTACTIVE 0
364#endif
365
366/****************************************************************************
367 * Per board structure / Adapter Context structure:
368 * Allocated within attach(9e) and freed within detach(9e).
369 * Contains all 'per device' necessary handles, flags, locks etc.:
370 */
371struct s_AC {
372 SK_GEINIT GIni; /* GE init struct */
373 SK_PNMI Pnmi; /* PNMI data struct */
374 SK_VPD vpd; /* vpd data struct */
375 SK_QUEUE Event; /* Event queue */
376 SK_HWT Hwt; /* Hardware Timer control struct */
377 SK_TIMCTRL Tim; /* Software Timer control struct */
378 SK_I2C I2c; /* I2C relevant data structure */
379 SK_ADDR Addr; /* for Address module */
380 SK_CSUM Csum; /* for checksum module */
381 SK_RLMT Rlmt; /* for rlmt module */
382 spinlock_t SlowPathLock; /* Normal IRQ lock */
383 struct timer_list BlinkTimer; /* for LED blinking */
384 int LedsOn;
385 SK_PNMI_STRUCT_DATA PnmiStruct; /* structure to get all Pnmi-Data */
386 int RlmtMode; /* link check mode to set */
387 int RlmtNets; /* Number of nets */
388
389 SK_IOC IoBase; /* register set of adapter */
390 int BoardLevel; /* level of active hw init (0-2) */
391
392 SK_U32 AllocFlag; /* flag allocation of resources */
393 struct pci_dev *PciDev; /* for access to pci config space */
394 struct SK_NET_DEVICE *dev[2]; /* pointer to device struct */
395
396 int RxBufSize; /* length of receive buffers */
397 struct net_device_stats stats; /* linux 'netstat -i' statistics */
398 int Index; /* internal board index number */
399
400 /* adapter RAM sizes for queues of active port */
401 int RxQueueSize; /* memory used for receive queue */
402 int TxSQueueSize; /* memory used for sync. tx queue */
403 int TxAQueueSize; /* memory used for async. tx queue */
404
405 int PromiscCount; /* promiscuous mode counter */
406 int AllMultiCount; /* allmulticast mode counter */
407 int MulticCount; /* number of different MC */
408 /* addresses for this board */
409 /* (may be more than HW can)*/
410
411 int HWRevision; /* Hardware revision */
412 int ActivePort; /* the active XMAC port */
413 int MaxPorts; /* number of activated ports */
414 int TxDescrPerRing; /* # of descriptors per tx ring */
415 int RxDescrPerRing; /* # of descriptors per rx ring */
416
417 caddr_t pDescrMem; /* Pointer to the descriptor area */
418 dma_addr_t pDescrMemDMA; /* PCI DMA address of area */
419
420 /* the port structures with descriptor rings */
421 TX_PORT TxPort[SK_MAX_MACS][2];
422 RX_PORT RxPort[SK_MAX_MACS];
423
424 SK_BOOL CheckQueue; /* check event queue soon */
425 SK_TIMER DrvCleanupTimer;/* to check for pending descriptors */
426 DIM_INFO DynIrqModInfo; /* all data related to DIM */
427
428 /* Only for tests */
429 int PortDown;
430 int ChipsetType; /* Chipset family type
431 * 0 == Genesis family support
432 * 1 == Yukon family support
433 */
434#ifdef SK_DIAG_SUPPORT
435 SK_U32 DiagModeActive; /* is diag active? */
436 SK_BOOL DiagFlowCtrl; /* for control purposes */
437 SK_PNMI_STRUCT_DATA PnmiBackup; /* backup structure for all Pnmi-Data */
438 SK_BOOL WasIfUp[SK_MAX_MACS]; /* for OpenClose while
439 * DIAG is busy with NIC
440 */
441#endif
442
443};
444
445
446#endif /* __INC_SKDRV2ND_H */
447
diff --git a/drivers/net/sk98lin/h/skerror.h b/drivers/net/sk98lin/h/skerror.h
deleted file mode 100644
index da062f766238..000000000000
--- a/drivers/net/sk98lin/h/skerror.h
+++ /dev/null
@@ -1,55 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skerror.h
4 * Project: Gigabit Ethernet Adapters, Common Modules
5 * Version: $Revision: 1.7 $
6 * Date: $Date: 2003/05/13 17:25:13 $
7 * Purpose: SK specific Error log support
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25#ifndef _INC_SKERROR_H_
26#define _INC_SKERROR_H_
27
28/*
29 * Define Error Classes
30 */
31#define SK_ERRCL_OTHER (0) /* Other error */
32#define SK_ERRCL_CONFIG (1L<<0) /* Configuration error */
33#define SK_ERRCL_INIT (1L<<1) /* Initialization error */
34#define SK_ERRCL_NORES (1L<<2) /* Out of Resources error */
35#define SK_ERRCL_SW (1L<<3) /* Internal Software error */
36#define SK_ERRCL_HW (1L<<4) /* Hardware Failure */
37#define SK_ERRCL_COMM (1L<<5) /* Communication error */
38
39
40/*
41 * Define Error Code Bases
42 */
43#define SK_ERRBASE_RLMT 100 /* Base Error number for RLMT */
44#define SK_ERRBASE_HWINIT 200 /* Base Error number for HWInit */
45#define SK_ERRBASE_VPD 300 /* Base Error number for VPD */
46#define SK_ERRBASE_PNMI 400 /* Base Error number for PNMI */
47#define SK_ERRBASE_CSUM 500 /* Base Error number for Checksum */
48#define SK_ERRBASE_SIRQ 600 /* Base Error number for Special IRQ */
49#define SK_ERRBASE_I2C 700 /* Base Error number for I2C module */
50#define SK_ERRBASE_QUEUE 800 /* Base Error number for Scheduler */
51#define SK_ERRBASE_ADDR 900 /* Base Error number for Address module */
52#define SK_ERRBASE_PECP 1000 /* Base Error number for PECP */
53#define SK_ERRBASE_DRV 1100 /* Base Error number for Driver */
54
55#endif /* _INC_SKERROR_H_ */
diff --git a/drivers/net/sk98lin/h/skgedrv.h b/drivers/net/sk98lin/h/skgedrv.h
deleted file mode 100644
index 44fd4c3de818..000000000000
--- a/drivers/net/sk98lin/h/skgedrv.h
+++ /dev/null
@@ -1,51 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skgedrv.h
4 * Project: Gigabit Ethernet Adapters, Common Modules
5 * Version: $Revision: 1.10 $
6 * Date: $Date: 2003/07/04 12:25:01 $
7 * Purpose: Interface with the driver
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25#ifndef __INC_SKGEDRV_H_
26#define __INC_SKGEDRV_H_
27
28/* defines ********************************************************************/
29
30/*
31 * Define the driver events.
32 * Usually the events are defined by the destination module.
33 * In case of the driver we put the definition of the events here.
34 */
35#define SK_DRV_PORT_RESET 1 /* The port needs to be reset */
36#define SK_DRV_NET_UP 2 /* The net is operational */
37#define SK_DRV_NET_DOWN 3 /* The net is down */
38#define SK_DRV_SWITCH_SOFT 4 /* Ports switch with both links connected */
39#define SK_DRV_SWITCH_HARD 5 /* Port switch due to link failure */
40#define SK_DRV_RLMT_SEND 6 /* Send a RLMT packet */
41#define SK_DRV_ADAP_FAIL 7 /* The whole adapter fails */
42#define SK_DRV_PORT_FAIL 8 /* One port fails */
43#define SK_DRV_SWITCH_INTERN 9 /* Port switch by the driver itself */
44#define SK_DRV_POWER_DOWN 10 /* Power down mode */
45#define SK_DRV_TIMER 11 /* Timer for free use */
46#ifdef SK_NO_RLMT
47#define SK_DRV_LINK_UP 12 /* Link Up event for driver */
48#define SK_DRV_LINK_DOWN 13 /* Link Down event for driver */
49#endif
50#define SK_DRV_DOWNSHIFT_DET 14 /* Downshift 4-Pair / 2-Pair (YUKON only) */
51#endif /* __INC_SKGEDRV_H_ */
diff --git a/drivers/net/sk98lin/h/skgehw.h b/drivers/net/sk98lin/h/skgehw.h
deleted file mode 100644
index f6282b7956db..000000000000
--- a/drivers/net/sk98lin/h/skgehw.h
+++ /dev/null
@@ -1,2126 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skgehw.h
4 * Project: Gigabit Ethernet Adapters, Common Modules
5 * Version: $Revision: 1.56 $
6 * Date: $Date: 2003/09/23 09:01:00 $
7 * Purpose: Defines and Macros for the Gigabit Ethernet Adapter Product Family
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25#ifndef __INC_SKGEHW_H
26#define __INC_SKGEHW_H
27
28#ifdef __cplusplus
29extern "C" {
30#endif /* __cplusplus */
31
32/* defines ********************************************************************/
33
34#define BIT_31 (1UL << 31)
35#define BIT_30 (1L << 30)
36#define BIT_29 (1L << 29)
37#define BIT_28 (1L << 28)
38#define BIT_27 (1L << 27)
39#define BIT_26 (1L << 26)
40#define BIT_25 (1L << 25)
41#define BIT_24 (1L << 24)
42#define BIT_23 (1L << 23)
43#define BIT_22 (1L << 22)
44#define BIT_21 (1L << 21)
45#define BIT_20 (1L << 20)
46#define BIT_19 (1L << 19)
47#define BIT_18 (1L << 18)
48#define BIT_17 (1L << 17)
49#define BIT_16 (1L << 16)
50#define BIT_15 (1L << 15)
51#define BIT_14 (1L << 14)
52#define BIT_13 (1L << 13)
53#define BIT_12 (1L << 12)
54#define BIT_11 (1L << 11)
55#define BIT_10 (1L << 10)
56#define BIT_9 (1L << 9)
57#define BIT_8 (1L << 8)
58#define BIT_7 (1L << 7)
59#define BIT_6 (1L << 6)
60#define BIT_5 (1L << 5)
61#define BIT_4 (1L << 4)
62#define BIT_3 (1L << 3)
63#define BIT_2 (1L << 2)
64#define BIT_1 (1L << 1)
65#define BIT_0 1L
66
67#define BIT_15S (1U << 15)
68#define BIT_14S (1 << 14)
69#define BIT_13S (1 << 13)
70#define BIT_12S (1 << 12)
71#define BIT_11S (1 << 11)
72#define BIT_10S (1 << 10)
73#define BIT_9S (1 << 9)
74#define BIT_8S (1 << 8)
75#define BIT_7S (1 << 7)
76#define BIT_6S (1 << 6)
77#define BIT_5S (1 << 5)
78#define BIT_4S (1 << 4)
79#define BIT_3S (1 << 3)
80#define BIT_2S (1 << 2)
81#define BIT_1S (1 << 1)
82#define BIT_0S 1
83
84#define SHIFT31(x) ((x) << 31)
85#define SHIFT30(x) ((x) << 30)
86#define SHIFT29(x) ((x) << 29)
87#define SHIFT28(x) ((x) << 28)
88#define SHIFT27(x) ((x) << 27)
89#define SHIFT26(x) ((x) << 26)
90#define SHIFT25(x) ((x) << 25)
91#define SHIFT24(x) ((x) << 24)
92#define SHIFT23(x) ((x) << 23)
93#define SHIFT22(x) ((x) << 22)
94#define SHIFT21(x) ((x) << 21)
95#define SHIFT20(x) ((x) << 20)
96#define SHIFT19(x) ((x) << 19)
97#define SHIFT18(x) ((x) << 18)
98#define SHIFT17(x) ((x) << 17)
99#define SHIFT16(x) ((x) << 16)
100#define SHIFT15(x) ((x) << 15)
101#define SHIFT14(x) ((x) << 14)
102#define SHIFT13(x) ((x) << 13)
103#define SHIFT12(x) ((x) << 12)
104#define SHIFT11(x) ((x) << 11)
105#define SHIFT10(x) ((x) << 10)
106#define SHIFT9(x) ((x) << 9)
107#define SHIFT8(x) ((x) << 8)
108#define SHIFT7(x) ((x) << 7)
109#define SHIFT6(x) ((x) << 6)
110#define SHIFT5(x) ((x) << 5)
111#define SHIFT4(x) ((x) << 4)
112#define SHIFT3(x) ((x) << 3)
113#define SHIFT2(x) ((x) << 2)
114#define SHIFT1(x) ((x) << 1)
115#define SHIFT0(x) ((x) << 0)
116
117/*
118 * Configuration Space header
119 * Since this module is used for different OS', those may be
120 * duplicate on some of them (e.g. Linux). But to keep the
121 * common source, we have to live with this...
122 */
123#define PCI_VENDOR_ID 0x00 /* 16 bit Vendor ID */
124#define PCI_DEVICE_ID 0x02 /* 16 bit Device ID */
125#define PCI_COMMAND 0x04 /* 16 bit Command */
126#define PCI_STATUS 0x06 /* 16 bit Status */
127#define PCI_REV_ID 0x08 /* 8 bit Revision ID */
128#define PCI_CLASS_CODE 0x09 /* 24 bit Class Code */
129#define PCI_CACHE_LSZ 0x0c /* 8 bit Cache Line Size */
130#define PCI_LAT_TIM 0x0d /* 8 bit Latency Timer */
131#define PCI_HEADER_T 0x0e /* 8 bit Header Type */
132#define PCI_BIST 0x0f /* 8 bit Built-in selftest */
133#define PCI_BASE_1ST 0x10 /* 32 bit 1st Base address */
134#define PCI_BASE_2ND 0x14 /* 32 bit 2nd Base address */
135 /* Byte 0x18..0x2b: reserved */
136#define PCI_SUB_VID 0x2c /* 16 bit Subsystem Vendor ID */
137#define PCI_SUB_ID 0x2e /* 16 bit Subsystem ID */
138#define PCI_BASE_ROM 0x30 /* 32 bit Expansion ROM Base Address */
139#define PCI_CAP_PTR 0x34 /* 8 bit Capabilities Ptr */
140 /* Byte 0x35..0x3b: reserved */
141#define PCI_IRQ_LINE 0x3c /* 8 bit Interrupt Line */
142#define PCI_IRQ_PIN 0x3d /* 8 bit Interrupt Pin */
143#define PCI_MIN_GNT 0x3e /* 8 bit Min_Gnt */
144#define PCI_MAX_LAT 0x3f /* 8 bit Max_Lat */
145 /* Device Dependent Region */
146#define PCI_OUR_REG_1 0x40 /* 32 bit Our Register 1 */
147#define PCI_OUR_REG_2 0x44 /* 32 bit Our Register 2 */
148 /* Power Management Region */
149#define PCI_PM_CAP_ID 0x48 /* 8 bit Power Management Cap. ID */
150#define PCI_PM_NITEM 0x49 /* 8 bit Next Item Ptr */
151#define PCI_PM_CAP_REG 0x4a /* 16 bit Power Management Capabilities */
152#define PCI_PM_CTL_STS 0x4c /* 16 bit Power Manag. Control/Status */
153 /* Byte 0x4e: reserved */
154#define PCI_PM_DAT_REG 0x4f /* 8 bit Power Manag. Data Register */
155 /* VPD Region */
156#define PCI_VPD_CAP_ID 0x50 /* 8 bit VPD Cap. ID */
157#define PCI_VPD_NITEM 0x51 /* 8 bit Next Item Ptr */
158#define PCI_VPD_ADR_REG 0x52 /* 16 bit VPD Address Register */
159#define PCI_VPD_DAT_REG 0x54 /* 32 bit VPD Data Register */
160 /* Byte 0x58..0x59: reserved */
161#define PCI_SER_LD_CTRL 0x5a /* 16 bit SEEPROM Loader Ctrl (YUKON only) */
162 /* Byte 0x5c..0xff: reserved */
163
164/*
165 * I2C Address (PCI Config)
166 *
167 * Note: The temperature and voltage sensors are relocated on a different
168 * I2C bus.
169 */
170#define I2C_ADDR_VPD 0xa0 /* I2C address for the VPD EEPROM */
171
172/*
173 * Define Bits and Values of the registers
174 */
175/* PCI_COMMAND 16 bit Command */
176 /* Bit 15..11: reserved */
177#define PCI_INT_DIS BIT_10S /* Interrupt INTx# disable (PCI 2.3) */
178#define PCI_FBTEN BIT_9S /* Fast Back-To-Back enable */
179#define PCI_SERREN BIT_8S /* SERR enable */
180#define PCI_ADSTEP BIT_7S /* Address Stepping */
181#define PCI_PERREN BIT_6S /* Parity Report Response enable */
182#define PCI_VGA_SNOOP BIT_5S /* VGA palette snoop */
183#define PCI_MWIEN BIT_4S /* Memory write an inv cycl ena */
184#define PCI_SCYCEN BIT_3S /* Special Cycle enable */
185#define PCI_BMEN BIT_2S /* Bus Master enable */
186#define PCI_MEMEN BIT_1S /* Memory Space Access enable */
187#define PCI_IOEN BIT_0S /* I/O Space Access enable */
188
189#define PCI_COMMAND_VAL (PCI_FBTEN | PCI_SERREN | PCI_PERREN | PCI_MWIEN |\
190 PCI_BMEN | PCI_MEMEN | PCI_IOEN)
191
192/* PCI_STATUS 16 bit Status */
193#define PCI_PERR BIT_15S /* Parity Error */
194#define PCI_SERR BIT_14S /* Signaled SERR */
195#define PCI_RMABORT BIT_13S /* Received Master Abort */
196#define PCI_RTABORT BIT_12S /* Received Target Abort */
197 /* Bit 11: reserved */
198#define PCI_DEVSEL (3<<9) /* Bit 10.. 9: DEVSEL Timing */
199#define PCI_DEV_FAST (0<<9) /* fast */
200#define PCI_DEV_MEDIUM (1<<9) /* medium */
201#define PCI_DEV_SLOW (2<<9) /* slow */
202#define PCI_DATAPERR BIT_8S /* DATA Parity error detected */
203#define PCI_FB2BCAP BIT_7S /* Fast Back-to-Back Capability */
204#define PCI_UDF BIT_6S /* User Defined Features */
205#define PCI_66MHZCAP BIT_5S /* 66 MHz PCI bus clock capable */
206#define PCI_NEWCAP BIT_4S /* New cap. list implemented */
207#define PCI_INT_STAT BIT_3S /* Interrupt INTx# Status (PCI 2.3) */
208 /* Bit 2.. 0: reserved */
209
210#define PCI_ERRBITS (PCI_PERR | PCI_SERR | PCI_RMABORT | PCI_RTABORT |\
211 PCI_DATAPERR)
212
213/* PCI_CLASS_CODE 24 bit Class Code */
214/* Byte 2: Base Class (02) */
215/* Byte 1: SubClass (00) */
216/* Byte 0: Programming Interface (00) */
217
218/* PCI_CACHE_LSZ 8 bit Cache Line Size */
219/* Possible values: 0,2,4,8,16,32,64,128 */
220
221/* PCI_HEADER_T 8 bit Header Type */
222#define PCI_HD_MF_DEV BIT_7S /* 0= single, 1= multi-func dev */
223#define PCI_HD_TYPE 0x7f /* Bit 6..0: Header Layout 0= normal */
224
225/* PCI_BIST 8 bit Built-in selftest */
226/* Built-in Self test not supported (optional) */
227
228/* PCI_BASE_1ST 32 bit 1st Base address */
229#define PCI_MEMSIZE 0x4000L /* use 16 kB Memory Base */
230#define PCI_MEMBASE_MSK 0xffffc000L /* Bit 31..14: Memory Base Address */
231#define PCI_MEMSIZE_MSK 0x00003ff0L /* Bit 13.. 4: Memory Size Req. */
232#define PCI_PREFEN BIT_3 /* Prefetchable */
233#define PCI_MEM_TYP (3L<<2) /* Bit 2.. 1: Memory Type */
234#define PCI_MEM32BIT (0L<<1) /* Base addr anywhere in 32 Bit range */
235#define PCI_MEM1M (1L<<1) /* Base addr below 1 MegaByte */
236#define PCI_MEM64BIT (2L<<1) /* Base addr anywhere in 64 Bit range */
237#define PCI_MEMSPACE BIT_0 /* Memory Space Indicator */
238
239/* PCI_BASE_2ND 32 bit 2nd Base address */
240#define PCI_IOBASE 0xffffff00L /* Bit 31.. 8: I/O Base address */
241#define PCI_IOSIZE 0x000000fcL /* Bit 7.. 2: I/O Size Requirements */
242 /* Bit 1: reserved */
243#define PCI_IOSPACE BIT_0 /* I/O Space Indicator */
244
245/* PCI_BASE_ROM 32 bit Expansion ROM Base Address */
246#define PCI_ROMBASE_MSK 0xfffe0000L /* Bit 31..17: ROM Base address */
247#define PCI_ROMBASE_SIZ (0x1cL<<14) /* Bit 16..14: Treat as Base or Size */
248#define PCI_ROMSIZE (0x38L<<11) /* Bit 13..11: ROM Size Requirements */
249 /* Bit 10.. 1: reserved */
250#define PCI_ROMEN BIT_0 /* Address Decode enable */
251
252/* Device Dependent Region */
253/* PCI_OUR_REG_1 32 bit Our Register 1 */
254 /* Bit 31..29: reserved */
255#define PCI_PHY_COMA BIT_28 /* Set PHY to Coma Mode (YUKON only) */
256#define PCI_TEST_CAL BIT_27 /* Test PCI buffer calib. (YUKON only) */
257#define PCI_EN_CAL BIT_26 /* Enable PCI buffer calib. (YUKON only) */
258#define PCI_VIO BIT_25 /* PCI I/O Voltage, 0 = 3.3V, 1 = 5V */
259#define PCI_DIS_BOOT BIT_24 /* Disable BOOT via ROM */
260#define PCI_EN_IO BIT_23 /* Mapping to I/O space */
261#define PCI_EN_FPROM BIT_22 /* Enable FLASH mapping to memory */
262 /* 1 = Map Flash to memory */
263 /* 0 = Disable addr. dec */
264#define PCI_PAGESIZE (3L<<20) /* Bit 21..20: FLASH Page Size */
265#define PCI_PAGE_16 (0L<<20) /* 16 k pages */
266#define PCI_PAGE_32K (1L<<20) /* 32 k pages */
267#define PCI_PAGE_64K (2L<<20) /* 64 k pages */
268#define PCI_PAGE_128K (3L<<20) /* 128 k pages */
269 /* Bit 19: reserved */
270#define PCI_PAGEREG (7L<<16) /* Bit 18..16: Page Register */
271#define PCI_NOTAR BIT_15 /* No turnaround cycle */
272#define PCI_FORCE_BE BIT_14 /* Assert all BEs on MR */
273#define PCI_DIS_MRL BIT_13 /* Disable Mem Read Line */
274#define PCI_DIS_MRM BIT_12 /* Disable Mem Read Multiple */
275#define PCI_DIS_MWI BIT_11 /* Disable Mem Write & Invalidate */
276#define PCI_DISC_CLS BIT_10 /* Disc: cacheLsz bound */
277#define PCI_BURST_DIS BIT_9 /* Burst Disable */
278#define PCI_DIS_PCI_CLK BIT_8 /* Disable PCI clock driving */
279#define PCI_SKEW_DAS (0xfL<<4) /* Bit 7.. 4: Skew Ctrl, DAS Ext */
280#define PCI_SKEW_BASE 0xfL /* Bit 3.. 0: Skew Ctrl, Base */
281
282
283/* PCI_OUR_REG_2 32 bit Our Register 2 */
284#define PCI_VPD_WR_THR (0xffL<<24) /* Bit 31..24: VPD Write Threshold */
285#define PCI_DEV_SEL (0x7fL<<17) /* Bit 23..17: EEPROM Device Select */
286#define PCI_VPD_ROM_SZ (7L<<14) /* Bit 16..14: VPD ROM Size */
287 /* Bit 13..12: reserved */
288#define PCI_PATCH_DIR (0xfL<<8) /* Bit 11.. 8: Ext Patches dir 3..0 */
289#define PCI_PATCH_DIR_3 BIT_11
290#define PCI_PATCH_DIR_2 BIT_10
291#define PCI_PATCH_DIR_1 BIT_9
292#define PCI_PATCH_DIR_0 BIT_8
293#define PCI_EXT_PATCHS (0xfL<<4) /* Bit 7.. 4: Extended Patches 3..0 */
294#define PCI_EXT_PATCH_3 BIT_7
295#define PCI_EXT_PATCH_2 BIT_6
296#define PCI_EXT_PATCH_1 BIT_5
297#define PCI_EXT_PATCH_0 BIT_4
298#define PCI_EN_DUMMY_RD BIT_3 /* Enable Dummy Read */
299#define PCI_REV_DESC BIT_2 /* Reverse Desc. Bytes */
300 /* Bit 1: reserved */
301#define PCI_USEDATA64 BIT_0 /* Use 64Bit Data bus ext */
302
303
304/* Power Management Region */
305/* PCI_PM_CAP_REG 16 bit Power Management Capabilities */
306#define PCI_PME_SUP_MSK (0x1f<<11) /* Bit 15..11: PM Event Support Mask */
307#define PCI_PME_D3C_SUP BIT_15S /* PME from D3cold Support (if Vaux) */
308#define PCI_PME_D3H_SUP BIT_14S /* PME from D3hot Support */
309#define PCI_PME_D2_SUP BIT_13S /* PME from D2 Support */
310#define PCI_PME_D1_SUP BIT_12S /* PME from D1 Support */
311#define PCI_PME_D0_SUP BIT_11S /* PME from D0 Support */
312#define PCI_PM_D2_SUP BIT_10S /* D2 Support in 33 MHz mode */
313#define PCI_PM_D1_SUP BIT_9S /* D1 Support */
314 /* Bit 8.. 6: reserved */
315#define PCI_PM_DSI BIT_5S /* Device Specific Initialization */
316#define PCI_PM_APS BIT_4S /* Auxialiary Power Source */
317#define PCI_PME_CLOCK BIT_3S /* PM Event Clock */
318#define PCI_PM_VER_MSK 7 /* Bit 2.. 0: PM PCI Spec. version */
319
320/* PCI_PM_CTL_STS 16 bit Power Management Control/Status */
321#define PCI_PME_STATUS BIT_15S /* PME Status (YUKON only) */
322#define PCI_PM_DAT_SCL (3<<13) /* Bit 14..13: Data Reg. scaling factor */
323#define PCI_PM_DAT_SEL (0xf<<9) /* Bit 12.. 9: PM data selector field */
324#define PCI_PME_EN BIT_8S /* Enable PME# generation (YUKON only) */
325 /* Bit 7.. 2: reserved */
326#define PCI_PM_STATE_MSK 3 /* Bit 1.. 0: Power Management State */
327
328#define PCI_PM_STATE_D0 0 /* D0: Operational (default) */
329#define PCI_PM_STATE_D1 1 /* D1: (YUKON only) */
330#define PCI_PM_STATE_D2 2 /* D2: (YUKON only) */
331#define PCI_PM_STATE_D3 3 /* D3: HOT, Power Down and Reset */
332
333/* VPD Region */
334/* PCI_VPD_ADR_REG 16 bit VPD Address Register */
335#define PCI_VPD_FLAG BIT_15S /* starts VPD rd/wr cycle */
336#define PCI_VPD_ADR_MSK 0x7fffL /* Bit 14.. 0: VPD address mask */
337
338/* Control Register File (Address Map) */
339
340/*
341 * Bank 0
342 */
343#define B0_RAP 0x0000 /* 8 bit Register Address Port */
344 /* 0x0001 - 0x0003: reserved */
345#define B0_CTST 0x0004 /* 16 bit Control/Status register */
346#define B0_LED 0x0006 /* 8 Bit LED register */
347#define B0_POWER_CTRL 0x0007 /* 8 Bit Power Control reg (YUKON only) */
348#define B0_ISRC 0x0008 /* 32 bit Interrupt Source Register */
349#define B0_IMSK 0x000c /* 32 bit Interrupt Mask Register */
350#define B0_HWE_ISRC 0x0010 /* 32 bit HW Error Interrupt Src Reg */
351#define B0_HWE_IMSK 0x0014 /* 32 bit HW Error Interrupt Mask Reg */
352#define B0_SP_ISRC 0x0018 /* 32 bit Special Interrupt Source Reg */
353 /* 0x001c: reserved */
354
355/* B0 XMAC 1 registers (GENESIS only) */
356#define B0_XM1_IMSK 0x0020 /* 16 bit r/w XMAC 1 Interrupt Mask Register*/
357 /* 0x0022 - 0x0027: reserved */
358#define B0_XM1_ISRC 0x0028 /* 16 bit ro XMAC 1 Interrupt Status Reg */
359 /* 0x002a - 0x002f: reserved */
360#define B0_XM1_PHY_ADDR 0x0030 /* 16 bit r/w XMAC 1 PHY Address Register */
361 /* 0x0032 - 0x0033: reserved */
362#define B0_XM1_PHY_DATA 0x0034 /* 16 bit r/w XMAC 1 PHY Data Register */
363 /* 0x0036 - 0x003f: reserved */
364
365/* B0 XMAC 2 registers (GENESIS only) */
366#define B0_XM2_IMSK 0x0040 /* 16 bit r/w XMAC 2 Interrupt Mask Register*/
367 /* 0x0042 - 0x0047: reserved */
368#define B0_XM2_ISRC 0x0048 /* 16 bit ro XMAC 2 Interrupt Status Reg */
369 /* 0x004a - 0x004f: reserved */
370#define B0_XM2_PHY_ADDR 0x0050 /* 16 bit r/w XMAC 2 PHY Address Register */
371 /* 0x0052 - 0x0053: reserved */
372#define B0_XM2_PHY_DATA 0x0054 /* 16 bit r/w XMAC 2 PHY Data Register */
373 /* 0x0056 - 0x005f: reserved */
374
375/* BMU Control Status Registers */
376#define B0_R1_CSR 0x0060 /* 32 bit BMU Ctrl/Stat Rx Queue 1 */
377#define B0_R2_CSR 0x0064 /* 32 bit BMU Ctrl/Stat Rx Queue 2 */
378#define B0_XS1_CSR 0x0068 /* 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */
379#define B0_XA1_CSR 0x006c /* 32 bit BMU Ctrl/Stat Async Tx Queue 1*/
380#define B0_XS2_CSR 0x0070 /* 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */
381#define B0_XA2_CSR 0x0074 /* 32 bit BMU Ctrl/Stat Async Tx Queue 2*/
382 /* 0x0078 - 0x007f: reserved */
383
384/*
385 * Bank 1
386 * - completely empty (this is the RAP Block window)
387 * Note: if RAP = 1 this page is reserved
388 */
389
390/*
391 * Bank 2
392 */
393/* NA reg = 48 bit Network Address Register, 3x16 or 8x8 bit readable */
394#define B2_MAC_1 0x0100 /* NA reg MAC Address 1 */
395 /* 0x0106 - 0x0107: reserved */
396#define B2_MAC_2 0x0108 /* NA reg MAC Address 2 */
397 /* 0x010e - 0x010f: reserved */
398#define B2_MAC_3 0x0110 /* NA reg MAC Address 3 */
399 /* 0x0116 - 0x0117: reserved */
400#define B2_CONN_TYP 0x0118 /* 8 bit Connector type */
401#define B2_PMD_TYP 0x0119 /* 8 bit PMD type */
402#define B2_MAC_CFG 0x011a /* 8 bit MAC Configuration / Chip Revision */
403#define B2_CHIP_ID 0x011b /* 8 bit Chip Identification Number */
404 /* Eprom registers are currently of no use */
405#define B2_E_0 0x011c /* 8 bit EPROM Byte 0 (ext. SRAM size */
406#define B2_E_1 0x011d /* 8 bit EPROM Byte 1 (PHY type) */
407#define B2_E_2 0x011e /* 8 bit EPROM Byte 2 */
408#define B2_E_3 0x011f /* 8 bit EPROM Byte 3 */
409#define B2_FAR 0x0120 /* 32 bit Flash-Prom Addr Reg/Cnt */
410#define B2_FDP 0x0124 /* 8 bit Flash-Prom Data Port */
411 /* 0x0125 - 0x0127: reserved */
412#define B2_LD_CTRL 0x0128 /* 8 bit EPROM loader control register */
413#define B2_LD_TEST 0x0129 /* 8 bit EPROM loader test register */
414 /* 0x012a - 0x012f: reserved */
415#define B2_TI_INI 0x0130 /* 32 bit Timer Init Value */
416#define B2_TI_VAL 0x0134 /* 32 bit Timer Value */
417#define B2_TI_CTRL 0x0138 /* 8 bit Timer Control */
418#define B2_TI_TEST 0x0139 /* 8 Bit Timer Test */
419 /* 0x013a - 0x013f: reserved */
420#define B2_IRQM_INI 0x0140 /* 32 bit IRQ Moderation Timer Init Reg.*/
421#define B2_IRQM_VAL 0x0144 /* 32 bit IRQ Moderation Timer Value */
422#define B2_IRQM_CTRL 0x0148 /* 8 bit IRQ Moderation Timer Control */
423#define B2_IRQM_TEST 0x0149 /* 8 bit IRQ Moderation Timer Test */
424#define B2_IRQM_MSK 0x014c /* 32 bit IRQ Moderation Mask */
425#define B2_IRQM_HWE_MSK 0x0150 /* 32 bit IRQ Moderation HW Error Mask */
426 /* 0x0154 - 0x0157: reserved */
427#define B2_TST_CTRL1 0x0158 /* 8 bit Test Control Register 1 */
428#define B2_TST_CTRL2 0x0159 /* 8 bit Test Control Register 2 */
429 /* 0x015a - 0x015b: reserved */
430#define B2_GP_IO 0x015c /* 32 bit General Purpose I/O Register */
431#define B2_I2C_CTRL 0x0160 /* 32 bit I2C HW Control Register */
432#define B2_I2C_DATA 0x0164 /* 32 bit I2C HW Data Register */
433#define B2_I2C_IRQ 0x0168 /* 32 bit I2C HW IRQ Register */
434#define B2_I2C_SW 0x016c /* 32 bit I2C SW Port Register */
435
436/* Blink Source Counter (GENESIS only) */
437#define B2_BSC_INI 0x0170 /* 32 bit Blink Source Counter Init Val */
438#define B2_BSC_VAL 0x0174 /* 32 bit Blink Source Counter Value */
439#define B2_BSC_CTRL 0x0178 /* 8 bit Blink Source Counter Control */
440#define B2_BSC_STAT 0x0179 /* 8 bit Blink Source Counter Status */
441#define B2_BSC_TST 0x017a /* 16 bit Blink Source Counter Test Reg */
442 /* 0x017c - 0x017f: reserved */
443
444/*
445 * Bank 3
446 */
447/* RAM Random Registers */
448#define B3_RAM_ADDR 0x0180 /* 32 bit RAM Address, to read or write */
449#define B3_RAM_DATA_LO 0x0184 /* 32 bit RAM Data Word (low dWord) */
450#define B3_RAM_DATA_HI 0x0188 /* 32 bit RAM Data Word (high dWord) */
451 /* 0x018c - 0x018f: reserved */
452
453/* RAM Interface Registers */
454/*
455 * The HW-Spec. calls this registers Timeout Value 0..11. But this names are
456 * not usable in SW. Please notice these are NOT real timeouts, these are
457 * the number of qWords transferred continuously.
458 */
459#define B3_RI_WTO_R1 0x0190 /* 8 bit WR Timeout Queue R1 (TO0) */
460#define B3_RI_WTO_XA1 0x0191 /* 8 bit WR Timeout Queue XA1 (TO1) */
461#define B3_RI_WTO_XS1 0x0192 /* 8 bit WR Timeout Queue XS1 (TO2) */
462#define B3_RI_RTO_R1 0x0193 /* 8 bit RD Timeout Queue R1 (TO3) */
463#define B3_RI_RTO_XA1 0x0194 /* 8 bit RD Timeout Queue XA1 (TO4) */
464#define B3_RI_RTO_XS1 0x0195 /* 8 bit RD Timeout Queue XS1 (TO5) */
465#define B3_RI_WTO_R2 0x0196 /* 8 bit WR Timeout Queue R2 (TO6) */
466#define B3_RI_WTO_XA2 0x0197 /* 8 bit WR Timeout Queue XA2 (TO7) */
467#define B3_RI_WTO_XS2 0x0198 /* 8 bit WR Timeout Queue XS2 (TO8) */
468#define B3_RI_RTO_R2 0x0199 /* 8 bit RD Timeout Queue R2 (TO9) */
469#define B3_RI_RTO_XA2 0x019a /* 8 bit RD Timeout Queue XA2 (TO10)*/
470#define B3_RI_RTO_XS2 0x019b /* 8 bit RD Timeout Queue XS2 (TO11)*/
471#define B3_RI_TO_VAL 0x019c /* 8 bit Current Timeout Count Val */
472 /* 0x019d - 0x019f: reserved */
473#define B3_RI_CTRL 0x01a0 /* 16 bit RAM Interface Control Register */
474#define B3_RI_TEST 0x01a2 /* 8 bit RAM Interface Test Register */
475 /* 0x01a3 - 0x01af: reserved */
476
477/* MAC Arbiter Registers (GENESIS only) */
478/* these are the no. of qWord transferred continuously and NOT real timeouts */
479#define B3_MA_TOINI_RX1 0x01b0 /* 8 bit Timeout Init Val Rx Path MAC 1 */
480#define B3_MA_TOINI_RX2 0x01b1 /* 8 bit Timeout Init Val Rx Path MAC 2 */
481#define B3_MA_TOINI_TX1 0x01b2 /* 8 bit Timeout Init Val Tx Path MAC 1 */
482#define B3_MA_TOINI_TX2 0x01b3 /* 8 bit Timeout Init Val Tx Path MAC 2 */
483#define B3_MA_TOVAL_RX1 0x01b4 /* 8 bit Timeout Value Rx Path MAC 1 */
484#define B3_MA_TOVAL_RX2 0x01b5 /* 8 bit Timeout Value Rx Path MAC 1 */
485#define B3_MA_TOVAL_TX1 0x01b6 /* 8 bit Timeout Value Tx Path MAC 2 */
486#define B3_MA_TOVAL_TX2 0x01b7 /* 8 bit Timeout Value Tx Path MAC 2 */
487#define B3_MA_TO_CTRL 0x01b8 /* 16 bit MAC Arbiter Timeout Ctrl Reg */
488#define B3_MA_TO_TEST 0x01ba /* 16 bit MAC Arbiter Timeout Test Reg */
489 /* 0x01bc - 0x01bf: reserved */
490#define B3_MA_RCINI_RX1 0x01c0 /* 8 bit Recovery Init Val Rx Path MAC 1 */
491#define B3_MA_RCINI_RX2 0x01c1 /* 8 bit Recovery Init Val Rx Path MAC 2 */
492#define B3_MA_RCINI_TX1 0x01c2 /* 8 bit Recovery Init Val Tx Path MAC 1 */
493#define B3_MA_RCINI_TX2 0x01c3 /* 8 bit Recovery Init Val Tx Path MAC 2 */
494#define B3_MA_RCVAL_RX1 0x01c4 /* 8 bit Recovery Value Rx Path MAC 1 */
495#define B3_MA_RCVAL_RX2 0x01c5 /* 8 bit Recovery Value Rx Path MAC 1 */
496#define B3_MA_RCVAL_TX1 0x01c6 /* 8 bit Recovery Value Tx Path MAC 2 */
497#define B3_MA_RCVAL_TX2 0x01c7 /* 8 bit Recovery Value Tx Path MAC 2 */
498#define B3_MA_RC_CTRL 0x01c8 /* 16 bit MAC Arbiter Recovery Ctrl Reg */
499#define B3_MA_RC_TEST 0x01ca /* 16 bit MAC Arbiter Recovery Test Reg */
500 /* 0x01cc - 0x01cf: reserved */
501
502/* Packet Arbiter Registers (GENESIS only) */
503/* these are real timeouts */
504#define B3_PA_TOINI_RX1 0x01d0 /* 16 bit Timeout Init Val Rx Path MAC 1 */
505 /* 0x01d2 - 0x01d3: reserved */
506#define B3_PA_TOINI_RX2 0x01d4 /* 16 bit Timeout Init Val Rx Path MAC 2 */
507 /* 0x01d6 - 0x01d7: reserved */
508#define B3_PA_TOINI_TX1 0x01d8 /* 16 bit Timeout Init Val Tx Path MAC 1 */
509 /* 0x01da - 0x01db: reserved */
510#define B3_PA_TOINI_TX2 0x01dc /* 16 bit Timeout Init Val Tx Path MAC 2 */
511 /* 0x01de - 0x01df: reserved */
512#define B3_PA_TOVAL_RX1 0x01e0 /* 16 bit Timeout Val Rx Path MAC 1 */
513 /* 0x01e2 - 0x01e3: reserved */
514#define B3_PA_TOVAL_RX2 0x01e4 /* 16 bit Timeout Val Rx Path MAC 2 */
515 /* 0x01e6 - 0x01e7: reserved */
516#define B3_PA_TOVAL_TX1 0x01e8 /* 16 bit Timeout Val Tx Path MAC 1 */
517 /* 0x01ea - 0x01eb: reserved */
518#define B3_PA_TOVAL_TX2 0x01ec /* 16 bit Timeout Val Tx Path MAC 2 */
519 /* 0x01ee - 0x01ef: reserved */
520#define B3_PA_CTRL 0x01f0 /* 16 bit Packet Arbiter Ctrl Register */
521#define B3_PA_TEST 0x01f2 /* 16 bit Packet Arbiter Test Register */
522 /* 0x01f4 - 0x01ff: reserved */
523
524/*
525 * Bank 4 - 5
526 */
527/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */
528#define TXA_ITI_INI 0x0200 /* 32 bit Tx Arb Interval Timer Init Val*/
529#define TXA_ITI_VAL 0x0204 /* 32 bit Tx Arb Interval Timer Value */
530#define TXA_LIM_INI 0x0208 /* 32 bit Tx Arb Limit Counter Init Val */
531#define TXA_LIM_VAL 0x020c /* 32 bit Tx Arb Limit Counter Value */
532#define TXA_CTRL 0x0210 /* 8 bit Tx Arbiter Control Register */
533#define TXA_TEST 0x0211 /* 8 bit Tx Arbiter Test Register */
534#define TXA_STAT 0x0212 /* 8 bit Tx Arbiter Status Register */
535 /* 0x0213 - 0x027f: reserved */
536 /* 0x0280 - 0x0292: MAC 2 */
537 /* 0x0213 - 0x027f: reserved */
538
539/*
540 * Bank 6
541 */
542/* External registers (GENESIS only) */
543#define B6_EXT_REG 0x0300
544
545/*
546 * Bank 7
547 */
548/* This is a copy of the Configuration register file (lower half) */
549#define B7_CFG_SPC 0x0380
550
551/*
552 * Bank 8 - 15
553 */
554/* Receive and Transmit Queue Registers, use Q_ADDR() to access */
555#define B8_Q_REGS 0x0400
556
557/* Queue Register Offsets, use Q_ADDR() to access */
558#define Q_D 0x00 /* 8*32 bit Current Descriptor */
559#define Q_DA_L 0x20 /* 32 bit Current Descriptor Address Low dWord */
560#define Q_DA_H 0x24 /* 32 bit Current Descriptor Address High dWord */
561#define Q_AC_L 0x28 /* 32 bit Current Address Counter Low dWord */
562#define Q_AC_H 0x2c /* 32 bit Current Address Counter High dWord */
563#define Q_BC 0x30 /* 32 bit Current Byte Counter */
564#define Q_CSR 0x34 /* 32 bit BMU Control/Status Register */
565#define Q_F 0x38 /* 32 bit Flag Register */
566#define Q_T1 0x3c /* 32 bit Test Register 1 */
567#define Q_T1_TR 0x3c /* 8 bit Test Register 1 Transfer SM */
568#define Q_T1_WR 0x3d /* 8 bit Test Register 1 Write Descriptor SM */
569#define Q_T1_RD 0x3e /* 8 bit Test Register 1 Read Descriptor SM */
570#define Q_T1_SV 0x3f /* 8 bit Test Register 1 Supervisor SM */
571#define Q_T2 0x40 /* 32 bit Test Register 2 */
572#define Q_T3 0x44 /* 32 bit Test Register 3 */
573 /* 0x48 - 0x7f: reserved */
574
575/*
576 * Bank 16 - 23
577 */
578/* RAM Buffer Registers */
579#define B16_RAM_REGS 0x0800
580
581/* RAM Buffer Register Offsets, use RB_ADDR() to access */
582#define RB_START 0x00 /* 32 bit RAM Buffer Start Address */
583#define RB_END 0x04 /* 32 bit RAM Buffer End Address */
584#define RB_WP 0x08 /* 32 bit RAM Buffer Write Pointer */
585#define RB_RP 0x0c /* 32 bit RAM Buffer Read Pointer */
586#define RB_RX_UTPP 0x10 /* 32 bit Rx Upper Threshold, Pause Pack */
587#define RB_RX_LTPP 0x14 /* 32 bit Rx Lower Threshold, Pause Pack */
588#define RB_RX_UTHP 0x18 /* 32 bit Rx Upper Threshold, High Prio */
589#define RB_RX_LTHP 0x1c /* 32 bit Rx Lower Threshold, High Prio */
590 /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */
591#define RB_PC 0x20 /* 32 bit RAM Buffer Packet Counter */
592#define RB_LEV 0x24 /* 32 bit RAM Buffer Level Register */
593#define RB_CTRL 0x28 /* 8 bit RAM Buffer Control Register */
594#define RB_TST1 0x29 /* 8 bit RAM Buffer Test Register 1 */
595#define RB_TST2 0x2A /* 8 bit RAM Buffer Test Register 2 */
596 /* 0x2c - 0x7f: reserved */
597
598/*
599 * Bank 24
600 */
601/*
602 * Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only)
603 * use MR_ADDR() to access
604 */
605#define RX_MFF_EA 0x0c00 /* 32 bit Receive MAC FIFO End Address */
606#define RX_MFF_WP 0x0c04 /* 32 bit Receive MAC FIFO Write Pointer */
607 /* 0x0c08 - 0x0c0b: reserved */
608#define RX_MFF_RP 0x0c0c /* 32 bit Receive MAC FIFO Read Pointer */
609#define RX_MFF_PC 0x0c10 /* 32 bit Receive MAC FIFO Packet Cnt */
610#define RX_MFF_LEV 0x0c14 /* 32 bit Receive MAC FIFO Level */
611#define RX_MFF_CTRL1 0x0c18 /* 16 bit Receive MAC FIFO Control Reg 1*/
612#define RX_MFF_STAT_TO 0x0c1a /* 8 bit Receive MAC Status Timeout */
613#define RX_MFF_TIST_TO 0x0c1b /* 8 bit Receive MAC Time Stamp Timeout */
614#define RX_MFF_CTRL2 0x0c1c /* 8 bit Receive MAC FIFO Control Reg 2*/
615#define RX_MFF_TST1 0x0c1d /* 8 bit Receive MAC FIFO Test Reg 1 */
616#define RX_MFF_TST2 0x0c1e /* 8 bit Receive MAC FIFO Test Reg 2 */
617 /* 0x0c1f: reserved */
618#define RX_LED_INI 0x0c20 /* 32 bit Receive LED Cnt Init Value */
619#define RX_LED_VAL 0x0c24 /* 32 bit Receive LED Cnt Current Value */
620#define RX_LED_CTRL 0x0c28 /* 8 bit Receive LED Cnt Control Reg */
621#define RX_LED_TST 0x0c29 /* 8 bit Receive LED Cnt Test Register */
622 /* 0x0c2a - 0x0c2f: reserved */
623#define LNK_SYNC_INI 0x0c30 /* 32 bit Link Sync Cnt Init Value */
624#define LNK_SYNC_VAL 0x0c34 /* 32 bit Link Sync Cnt Current Value */
625#define LNK_SYNC_CTRL 0x0c38 /* 8 bit Link Sync Cnt Control Register */
626#define LNK_SYNC_TST 0x0c39 /* 8 bit Link Sync Cnt Test Register */
627 /* 0x0c3a - 0x0c3b: reserved */
628#define LNK_LED_REG 0x0c3c /* 8 bit Link LED Register */
629 /* 0x0c3d - 0x0c3f: reserved */
630
631/* Receive GMAC FIFO (YUKON only), use MR_ADDR() to access */
632#define RX_GMF_EA 0x0c40 /* 32 bit Rx GMAC FIFO End Address */
633#define RX_GMF_AF_THR 0x0c44 /* 32 bit Rx GMAC FIFO Almost Full Thresh. */
634#define RX_GMF_CTRL_T 0x0c48 /* 32 bit Rx GMAC FIFO Control/Test */
635#define RX_GMF_FL_MSK 0x0c4c /* 32 bit Rx GMAC FIFO Flush Mask */
636#define RX_GMF_FL_THR 0x0c50 /* 32 bit Rx GMAC FIFO Flush Threshold */
637 /* 0x0c54 - 0x0c5f: reserved */
638#define RX_GMF_WP 0x0c60 /* 32 bit Rx GMAC FIFO Write Pointer */
639 /* 0x0c64 - 0x0c67: reserved */
640#define RX_GMF_WLEV 0x0c68 /* 32 bit Rx GMAC FIFO Write Level */
641 /* 0x0c6c - 0x0c6f: reserved */
642#define RX_GMF_RP 0x0c70 /* 32 bit Rx GMAC FIFO Read Pointer */
643 /* 0x0c74 - 0x0c77: reserved */
644#define RX_GMF_RLEV 0x0c78 /* 32 bit Rx GMAC FIFO Read Level */
645 /* 0x0c7c - 0x0c7f: reserved */
646
647/*
648 * Bank 25
649 */
650 /* 0x0c80 - 0x0cbf: MAC 2 */
651 /* 0x0cc0 - 0x0cff: reserved */
652
653/*
654 * Bank 26
655 */
656/*
657 * Transmit MAC FIFO and Transmit LED Registers (GENESIS only),
658 * use MR_ADDR() to access
659 */
660#define TX_MFF_EA 0x0d00 /* 32 bit Transmit MAC FIFO End Address */
661#define TX_MFF_WP 0x0d04 /* 32 bit Transmit MAC FIFO WR Pointer */
662#define TX_MFF_WSP 0x0d08 /* 32 bit Transmit MAC FIFO WR Shadow Ptr */
663#define TX_MFF_RP 0x0d0c /* 32 bit Transmit MAC FIFO RD Pointer */
664#define TX_MFF_PC 0x0d10 /* 32 bit Transmit MAC FIFO Packet Cnt */
665#define TX_MFF_LEV 0x0d14 /* 32 bit Transmit MAC FIFO Level */
666#define TX_MFF_CTRL1 0x0d18 /* 16 bit Transmit MAC FIFO Ctrl Reg 1 */
667#define TX_MFF_WAF 0x0d1a /* 8 bit Transmit MAC Wait after flush */
668 /* 0x0c1b: reserved */
669#define TX_MFF_CTRL2 0x0d1c /* 8 bit Transmit MAC FIFO Ctrl Reg 2 */
670#define TX_MFF_TST1 0x0d1d /* 8 bit Transmit MAC FIFO Test Reg 1 */
671#define TX_MFF_TST2 0x0d1e /* 8 bit Transmit MAC FIFO Test Reg 2 */
672 /* 0x0d1f: reserved */
673#define TX_LED_INI 0x0d20 /* 32 bit Transmit LED Cnt Init Value */
674#define TX_LED_VAL 0x0d24 /* 32 bit Transmit LED Cnt Current Val */
675#define TX_LED_CTRL 0x0d28 /* 8 bit Transmit LED Cnt Control Reg */
676#define TX_LED_TST 0x0d29 /* 8 bit Transmit LED Cnt Test Reg */
677 /* 0x0d2a - 0x0d3f: reserved */
678
679/* Transmit GMAC FIFO (YUKON only), use MR_ADDR() to access */
680#define TX_GMF_EA 0x0d40 /* 32 bit Tx GMAC FIFO End Address */
681#define TX_GMF_AE_THR 0x0d44 /* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/
682#define TX_GMF_CTRL_T 0x0d48 /* 32 bit Tx GMAC FIFO Control/Test */
683 /* 0x0d4c - 0x0d5f: reserved */
684#define TX_GMF_WP 0x0d60 /* 32 bit Tx GMAC FIFO Write Pointer */
685#define TX_GMF_WSP 0x0d64 /* 32 bit Tx GMAC FIFO Write Shadow Ptr. */
686#define TX_GMF_WLEV 0x0d68 /* 32 bit Tx GMAC FIFO Write Level */
687 /* 0x0d6c - 0x0d6f: reserved */
688#define TX_GMF_RP 0x0d70 /* 32 bit Tx GMAC FIFO Read Pointer */
689#define TX_GMF_RSTP 0x0d74 /* 32 bit Tx GMAC FIFO Restart Pointer */
690#define TX_GMF_RLEV 0x0d78 /* 32 bit Tx GMAC FIFO Read Level */
691 /* 0x0d7c - 0x0d7f: reserved */
692
693/*
694 * Bank 27
695 */
696 /* 0x0d80 - 0x0dbf: MAC 2 */
697 /* 0x0daa - 0x0dff: reserved */
698
699/*
700 * Bank 28
701 */
702/* Descriptor Poll Timer Registers */
703#define B28_DPT_INI 0x0e00 /* 24 bit Descriptor Poll Timer Init Val */
704#define B28_DPT_VAL 0x0e04 /* 24 bit Descriptor Poll Timer Curr Val */
705#define B28_DPT_CTRL 0x0e08 /* 8 bit Descriptor Poll Timer Ctrl Reg */
706 /* 0x0e09: reserved */
707#define B28_DPT_TST 0x0e0a /* 8 bit Descriptor Poll Timer Test Reg */
708 /* 0x0e0b: reserved */
709
710/* Time Stamp Timer Registers (YUKON only) */
711 /* 0x0e10: reserved */
712#define GMAC_TI_ST_VAL 0x0e14 /* 32 bit Time Stamp Timer Curr Val */
713#define GMAC_TI_ST_CTRL 0x0e18 /* 8 bit Time Stamp Timer Ctrl Reg */
714 /* 0x0e19: reserved */
715#define GMAC_TI_ST_TST 0x0e1a /* 8 bit Time Stamp Timer Test Reg */
716 /* 0x0e1b - 0x0e7f: reserved */
717
718/*
719 * Bank 29
720 */
721 /* 0x0e80 - 0x0efc: reserved */
722
723/*
724 * Bank 30
725 */
726/* GMAC and GPHY Control Registers (YUKON only) */
727#define GMAC_CTRL 0x0f00 /* 32 bit GMAC Control Reg */
728#define GPHY_CTRL 0x0f04 /* 32 bit GPHY Control Reg */
729#define GMAC_IRQ_SRC 0x0f08 /* 8 bit GMAC Interrupt Source Reg */
730 /* 0x0f09 - 0x0f0b: reserved */
731#define GMAC_IRQ_MSK 0x0f0c /* 8 bit GMAC Interrupt Mask Reg */
732 /* 0x0f0d - 0x0f0f: reserved */
733#define GMAC_LINK_CTRL 0x0f10 /* 16 bit Link Control Reg */
734 /* 0x0f14 - 0x0f1f: reserved */
735
736/* Wake-up Frame Pattern Match Control Registers (YUKON only) */
737
738#define WOL_REG_OFFS 0x20 /* HW-Bug: Address is + 0x20 against spec. */
739
740#define WOL_CTRL_STAT 0x0f20 /* 16 bit WOL Control/Status Reg */
741#define WOL_MATCH_CTL 0x0f22 /* 8 bit WOL Match Control Reg */
742#define WOL_MATCH_RES 0x0f23 /* 8 bit WOL Match Result Reg */
743#define WOL_MAC_ADDR_LO 0x0f24 /* 32 bit WOL MAC Address Low */
744#define WOL_MAC_ADDR_HI 0x0f28 /* 16 bit WOL MAC Address High */
745#define WOL_PATT_RPTR 0x0f2c /* 8 bit WOL Pattern Read Ptr */
746
747/* use this macro to access above registers */
748#define WOL_REG(Reg) ((Reg) + (pAC->GIni.GIWolOffs))
749
750
751/* WOL Pattern Length Registers (YUKON only) */
752
753#define WOL_PATT_LEN_LO 0x0f30 /* 32 bit WOL Pattern Length 3..0 */
754#define WOL_PATT_LEN_HI 0x0f34 /* 24 bit WOL Pattern Length 6..4 */
755
756/* WOL Pattern Counter Registers (YUKON only) */
757
758#define WOL_PATT_CNT_0 0x0f38 /* 32 bit WOL Pattern Counter 3..0 */
759#define WOL_PATT_CNT_4 0x0f3c /* 24 bit WOL Pattern Counter 6..4 */
760 /* 0x0f40 - 0x0f7f: reserved */
761
762/*
763 * Bank 31
764 */
765/* 0x0f80 - 0x0fff: reserved */
766
767/*
768 * Bank 32 - 33
769 */
770#define WOL_PATT_RAM_1 0x1000 /* WOL Pattern RAM Link 1 */
771
772/*
773 * Bank 0x22 - 0x3f
774 */
775/* 0x1100 - 0x1fff: reserved */
776
777/*
778 * Bank 0x40 - 0x4f
779 */
780#define BASE_XMAC_1 0x2000 /* XMAC 1 registers */
781
782/*
783 * Bank 0x50 - 0x5f
784 */
785
786#define BASE_GMAC_1 0x2800 /* GMAC 1 registers */
787
788/*
789 * Bank 0x60 - 0x6f
790 */
791#define BASE_XMAC_2 0x3000 /* XMAC 2 registers */
792
793/*
794 * Bank 0x70 - 0x7f
795 */
796#define BASE_GMAC_2 0x3800 /* GMAC 2 registers */
797
798/*
799 * Control Register Bit Definitions:
800 */
801/* B0_RAP 8 bit Register Address Port */
802 /* Bit 7: reserved */
803#define RAP_RAP 0x3f /* Bit 6..0: 0 = block 0,..,6f = block 6f */
804
805/* B0_CTST 16 bit Control/Status register */
806 /* Bit 15..14: reserved */
807#define CS_CLK_RUN_HOT BIT_13S /* CLK_RUN hot m. (YUKON-Lite only) */
808#define CS_CLK_RUN_RST BIT_12S /* CLK_RUN reset (YUKON-Lite only) */
809#define CS_CLK_RUN_ENA BIT_11S /* CLK_RUN enable (YUKON-Lite only) */
810#define CS_VAUX_AVAIL BIT_10S /* VAUX available (YUKON only) */
811#define CS_BUS_CLOCK BIT_9S /* Bus Clock 0/1 = 33/66 MHz */
812#define CS_BUS_SLOT_SZ BIT_8S /* Slot Size 0/1 = 32/64 bit slot */
813#define CS_ST_SW_IRQ BIT_7S /* Set IRQ SW Request */
814#define CS_CL_SW_IRQ BIT_6S /* Clear IRQ SW Request */
815#define CS_STOP_DONE BIT_5S /* Stop Master is finished */
816#define CS_STOP_MAST BIT_4S /* Command Bit to stop the master */
817#define CS_MRST_CLR BIT_3S /* Clear Master reset */
818#define CS_MRST_SET BIT_2S /* Set Master reset */
819#define CS_RST_CLR BIT_1S /* Clear Software reset */
820#define CS_RST_SET BIT_0S /* Set Software reset */
821
822/* B0_LED 8 Bit LED register */
823 /* Bit 7.. 2: reserved */
824#define LED_STAT_ON BIT_1S /* Status LED on */
825#define LED_STAT_OFF BIT_0S /* Status LED off */
826
827/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */
828#define PC_VAUX_ENA BIT_7 /* Switch VAUX Enable */
829#define PC_VAUX_DIS BIT_6 /* Switch VAUX Disable */
830#define PC_VCC_ENA BIT_5 /* Switch VCC Enable */
831#define PC_VCC_DIS BIT_4 /* Switch VCC Disable */
832#define PC_VAUX_ON BIT_3 /* Switch VAUX On */
833#define PC_VAUX_OFF BIT_2 /* Switch VAUX Off */
834#define PC_VCC_ON BIT_1 /* Switch VCC On */
835#define PC_VCC_OFF BIT_0 /* Switch VCC Off */
836
837/* B0_ISRC 32 bit Interrupt Source Register */
838/* B0_IMSK 32 bit Interrupt Mask Register */
839/* B0_SP_ISRC 32 bit Special Interrupt Source Reg */
840/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */
841#define IS_ALL_MSK 0xbfffffffUL /* All Interrupt bits */
842#define IS_HW_ERR BIT_31 /* Interrupt HW Error */
843 /* Bit 30: reserved */
844#define IS_PA_TO_RX1 BIT_29 /* Packet Arb Timeout Rx1 */
845#define IS_PA_TO_RX2 BIT_28 /* Packet Arb Timeout Rx2 */
846#define IS_PA_TO_TX1 BIT_27 /* Packet Arb Timeout Tx1 */
847#define IS_PA_TO_TX2 BIT_26 /* Packet Arb Timeout Tx2 */
848#define IS_I2C_READY BIT_25 /* IRQ on end of I2C Tx */
849#define IS_IRQ_SW BIT_24 /* SW forced IRQ */
850#define IS_EXT_REG BIT_23 /* IRQ from LM80 or PHY (GENESIS only) */
851 /* IRQ from PHY (YUKON only) */
852#define IS_TIMINT BIT_22 /* IRQ from Timer */
853#define IS_MAC1 BIT_21 /* IRQ from MAC 1 */
854#define IS_LNK_SYNC_M1 BIT_20 /* Link Sync Cnt wrap MAC 1 */
855#define IS_MAC2 BIT_19 /* IRQ from MAC 2 */
856#define IS_LNK_SYNC_M2 BIT_18 /* Link Sync Cnt wrap MAC 2 */
857/* Receive Queue 1 */
858#define IS_R1_B BIT_17 /* Q_R1 End of Buffer */
859#define IS_R1_F BIT_16 /* Q_R1 End of Frame */
860#define IS_R1_C BIT_15 /* Q_R1 Encoding Error */
861/* Receive Queue 2 */
862#define IS_R2_B BIT_14 /* Q_R2 End of Buffer */
863#define IS_R2_F BIT_13 /* Q_R2 End of Frame */
864#define IS_R2_C BIT_12 /* Q_R2 Encoding Error */
865/* Synchronous Transmit Queue 1 */
866#define IS_XS1_B BIT_11 /* Q_XS1 End of Buffer */
867#define IS_XS1_F BIT_10 /* Q_XS1 End of Frame */
868#define IS_XS1_C BIT_9 /* Q_XS1 Encoding Error */
869/* Asynchronous Transmit Queue 1 */
870#define IS_XA1_B BIT_8 /* Q_XA1 End of Buffer */
871#define IS_XA1_F BIT_7 /* Q_XA1 End of Frame */
872#define IS_XA1_C BIT_6 /* Q_XA1 Encoding Error */
873/* Synchronous Transmit Queue 2 */
874#define IS_XS2_B BIT_5 /* Q_XS2 End of Buffer */
875#define IS_XS2_F BIT_4 /* Q_XS2 End of Frame */
876#define IS_XS2_C BIT_3 /* Q_XS2 Encoding Error */
877/* Asynchronous Transmit Queue 2 */
878#define IS_XA2_B BIT_2 /* Q_XA2 End of Buffer */
879#define IS_XA2_F BIT_1 /* Q_XA2 End of Frame */
880#define IS_XA2_C BIT_0 /* Q_XA2 Encoding Error */
881
882
883/* B0_HWE_ISRC 32 bit HW Error Interrupt Src Reg */
884/* B0_HWE_IMSK 32 bit HW Error Interrupt Mask Reg */
885/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */
886#define IS_ERR_MSK 0x00000fffL /* All Error bits */
887 /* Bit 31..14: reserved */
888#define IS_IRQ_TIST_OV BIT_13 /* Time Stamp Timer Overflow (YUKON only) */
889#define IS_IRQ_SENSOR BIT_12 /* IRQ from Sensor (YUKON only) */
890#define IS_IRQ_MST_ERR BIT_11 /* IRQ master error detected */
891#define IS_IRQ_STAT BIT_10 /* IRQ status exception */
892#define IS_NO_STAT_M1 BIT_9 /* No Rx Status from MAC 1 */
893#define IS_NO_STAT_M2 BIT_8 /* No Rx Status from MAC 2 */
894#define IS_NO_TIST_M1 BIT_7 /* No Time Stamp from MAC 1 */
895#define IS_NO_TIST_M2 BIT_6 /* No Time Stamp from MAC 2 */
896#define IS_RAM_RD_PAR BIT_5 /* RAM Read Parity Error */
897#define IS_RAM_WR_PAR BIT_4 /* RAM Write Parity Error */
898#define IS_M1_PAR_ERR BIT_3 /* MAC 1 Parity Error */
899#define IS_M2_PAR_ERR BIT_2 /* MAC 2 Parity Error */
900#define IS_R1_PAR_ERR BIT_1 /* Queue R1 Parity Error */
901#define IS_R2_PAR_ERR BIT_0 /* Queue R2 Parity Error */
902
903/* B2_CONN_TYP 8 bit Connector type */
904/* B2_PMD_TYP 8 bit PMD type */
905/* Values of connector and PMD type comply to SysKonnect internal std */
906
907/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */
908#define CFG_CHIP_R_MSK (0xf<<4) /* Bit 7.. 4: Chip Revision */
909 /* Bit 3.. 2: reserved */
910#define CFG_DIS_M2_CLK BIT_1S /* Disable Clock for 2nd MAC */
911#define CFG_SNG_MAC BIT_0S /* MAC Config: 0=2 MACs / 1=1 MAC*/
912
913/* B2_CHIP_ID 8 bit Chip Identification Number */
914#define CHIP_ID_GENESIS 0x0a /* Chip ID for GENESIS */
915#define CHIP_ID_YUKON 0xb0 /* Chip ID for YUKON */
916#define CHIP_ID_YUKON_LITE 0xb1 /* Chip ID for YUKON-Lite (Rev. A1-A3) */
917#define CHIP_ID_YUKON_LP 0xb2 /* Chip ID for YUKON-LP */
918
919#define CHIP_REV_YU_LITE_A1 3 /* Chip Rev. for YUKON-Lite A1,A2 */
920#define CHIP_REV_YU_LITE_A3 7 /* Chip Rev. for YUKON-Lite A3 */
921
922/* B2_FAR 32 bit Flash-Prom Addr Reg/Cnt */
923#define FAR_ADDR 0x1ffffL /* Bit 16.. 0: FPROM Address mask */
924
925/* B2_LD_CTRL 8 bit EPROM loader control register */
926/* Bits are currently reserved */
927
928/* B2_LD_TEST 8 bit EPROM loader test register */
929 /* Bit 7.. 4: reserved */
930#define LD_T_ON BIT_3S /* Loader Test mode on */
931#define LD_T_OFF BIT_2S /* Loader Test mode off */
932#define LD_T_STEP BIT_1S /* Decrement FPROM addr. Counter */
933#define LD_START BIT_0S /* Start loading FPROM */
934
935/*
936 * Timer Section
937 */
938/* B2_TI_CTRL 8 bit Timer control */
939/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */
940 /* Bit 7.. 3: reserved */
941#define TIM_START BIT_2S /* Start Timer */
942#define TIM_STOP BIT_1S /* Stop Timer */
943#define TIM_CLR_IRQ BIT_0S /* Clear Timer IRQ (!IRQM) */
944
945/* B2_TI_TEST 8 Bit Timer Test */
946/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */
947/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */
948 /* Bit 7.. 3: reserved */
949#define TIM_T_ON BIT_2S /* Test mode on */
950#define TIM_T_OFF BIT_1S /* Test mode off */
951#define TIM_T_STEP BIT_0S /* Test step */
952
953/* B28_DPT_INI 32 bit Descriptor Poll Timer Init Val */
954/* B28_DPT_VAL 32 bit Descriptor Poll Timer Curr Val */
955 /* Bit 31..24: reserved */
956#define DPT_MSK 0x00ffffffL /* Bit 23.. 0: Desc Poll Timer Bits */
957
958/* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */
959 /* Bit 7.. 2: reserved */
960#define DPT_START BIT_1S /* Start Descriptor Poll Timer */
961#define DPT_STOP BIT_0S /* Stop Descriptor Poll Timer */
962
963/* B2_E_3 8 bit lower 4 bits used for HW self test result */
964#define B2_E3_RES_MASK 0x0f
965
966/* B2_TST_CTRL1 8 bit Test Control Register 1 */
967#define TST_FRC_DPERR_MR BIT_7S /* force DATAPERR on MST RD */
968#define TST_FRC_DPERR_MW BIT_6S /* force DATAPERR on MST WR */
969#define TST_FRC_DPERR_TR BIT_5S /* force DATAPERR on TRG RD */
970#define TST_FRC_DPERR_TW BIT_4S /* force DATAPERR on TRG WR */
971#define TST_FRC_APERR_M BIT_3S /* force ADDRPERR on MST */
972#define TST_FRC_APERR_T BIT_2S /* force ADDRPERR on TRG */
973#define TST_CFG_WRITE_ON BIT_1S /* Enable Config Reg WR */
974#define TST_CFG_WRITE_OFF BIT_0S /* Disable Config Reg WR */
975
976/* B2_TST_CTRL2 8 bit Test Control Register 2 */
977 /* Bit 7.. 4: reserved */
978 /* force the following error on the next master read/write */
979#define TST_FRC_DPERR_MR64 BIT_3S /* DataPERR RD 64 */
980#define TST_FRC_DPERR_MW64 BIT_2S /* DataPERR WR 64 */
981#define TST_FRC_APERR_1M64 BIT_1S /* AddrPERR on 1. phase */
982#define TST_FRC_APERR_2M64 BIT_0S /* AddrPERR on 2. phase */
983
984/* B2_GP_IO 32 bit General Purpose I/O Register */
985 /* Bit 31..26: reserved */
986#define GP_DIR_9 BIT_25 /* IO_9 direct, 0=In/1=Out */
987#define GP_DIR_8 BIT_24 /* IO_8 direct, 0=In/1=Out */
988#define GP_DIR_7 BIT_23 /* IO_7 direct, 0=In/1=Out */
989#define GP_DIR_6 BIT_22 /* IO_6 direct, 0=In/1=Out */
990#define GP_DIR_5 BIT_21 /* IO_5 direct, 0=In/1=Out */
991#define GP_DIR_4 BIT_20 /* IO_4 direct, 0=In/1=Out */
992#define GP_DIR_3 BIT_19 /* IO_3 direct, 0=In/1=Out */
993#define GP_DIR_2 BIT_18 /* IO_2 direct, 0=In/1=Out */
994#define GP_DIR_1 BIT_17 /* IO_1 direct, 0=In/1=Out */
995#define GP_DIR_0 BIT_16 /* IO_0 direct, 0=In/1=Out */
996 /* Bit 15..10: reserved */
997#define GP_IO_9 BIT_9 /* IO_9 pin */
998#define GP_IO_8 BIT_8 /* IO_8 pin */
999#define GP_IO_7 BIT_7 /* IO_7 pin */
1000#define GP_IO_6 BIT_6 /* IO_6 pin */
1001#define GP_IO_5 BIT_5 /* IO_5 pin */
1002#define GP_IO_4 BIT_4 /* IO_4 pin */
1003#define GP_IO_3 BIT_3 /* IO_3 pin */
1004#define GP_IO_2 BIT_2 /* IO_2 pin */
1005#define GP_IO_1 BIT_1 /* IO_1 pin */
1006#define GP_IO_0 BIT_0 /* IO_0 pin */
1007
1008/* B2_I2C_CTRL 32 bit I2C HW Control Register */
1009#define I2C_FLAG BIT_31 /* Start read/write if WR */
1010#define I2C_ADDR (0x7fffL<<16) /* Bit 30..16: Addr to be RD/WR */
1011#define I2C_DEV_SEL (0x7fL<<9) /* Bit 15.. 9: I2C Device Select */
1012 /* Bit 8.. 5: reserved */
1013#define I2C_BURST_LEN BIT_4 /* Burst Len, 1/4 bytes */
1014#define I2C_DEV_SIZE (7<<1) /* Bit 3.. 1: I2C Device Size */
1015#define I2C_025K_DEV (0<<1) /* 0: 256 Bytes or smal. */
1016#define I2C_05K_DEV (1<<1) /* 1: 512 Bytes */
1017#define I2C_1K_DEV (2<<1) /* 2: 1024 Bytes */
1018#define I2C_2K_DEV (3<<1) /* 3: 2048 Bytes */
1019#define I2C_4K_DEV (4<<1) /* 4: 4096 Bytes */
1020#define I2C_8K_DEV (5<<1) /* 5: 8192 Bytes */
1021#define I2C_16K_DEV (6<<1) /* 6: 16384 Bytes */
1022#define I2C_32K_DEV (7<<1) /* 7: 32768 Bytes */
1023#define I2C_STOP BIT_0 /* Interrupt I2C transfer */
1024
1025/* B2_I2C_IRQ 32 bit I2C HW IRQ Register */
1026 /* Bit 31.. 1 reserved */
1027#define I2C_CLR_IRQ BIT_0 /* Clear I2C IRQ */
1028
1029/* B2_I2C_SW 32 bit (8 bit access) I2C HW SW Port Register */
1030 /* Bit 7.. 3: reserved */
1031#define I2C_DATA_DIR BIT_2S /* direction of I2C_DATA */
1032#define I2C_DATA BIT_1S /* I2C Data Port */
1033#define I2C_CLK BIT_0S /* I2C Clock Port */
1034
1035/*
1036 * I2C Address
1037 */
1038#define I2C_SENS_ADDR LM80_ADDR /* I2C Sensor Address, (Volt and Temp)*/
1039
1040
1041/* B2_BSC_CTRL 8 bit Blink Source Counter Control */
1042 /* Bit 7.. 2: reserved */
1043#define BSC_START BIT_1S /* Start Blink Source Counter */
1044#define BSC_STOP BIT_0S /* Stop Blink Source Counter */
1045
1046/* B2_BSC_STAT 8 bit Blink Source Counter Status */
1047 /* Bit 7.. 1: reserved */
1048#define BSC_SRC BIT_0S /* Blink Source, 0=Off / 1=On */
1049
1050/* B2_BSC_TST 16 bit Blink Source Counter Test Reg */
1051#define BSC_T_ON BIT_2S /* Test mode on */
1052#define BSC_T_OFF BIT_1S /* Test mode off */
1053#define BSC_T_STEP BIT_0S /* Test step */
1054
1055
1056/* B3_RAM_ADDR 32 bit RAM Address, to read or write */
1057 /* Bit 31..19: reserved */
1058#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */
1059
1060/* RAM Interface Registers */
1061/* B3_RI_CTRL 16 bit RAM Iface Control Register */
1062 /* Bit 15..10: reserved */
1063#define RI_CLR_RD_PERR BIT_9S /* Clear IRQ RAM Read Parity Err */
1064#define RI_CLR_WR_PERR BIT_8S /* Clear IRQ RAM Write Parity Err*/
1065 /* Bit 7.. 2: reserved */
1066#define RI_RST_CLR BIT_1S /* Clear RAM Interface Reset */
1067#define RI_RST_SET BIT_0S /* Set RAM Interface Reset */
1068
1069/* B3_RI_TEST 8 bit RAM Iface Test Register */
1070 /* Bit 15.. 4: reserved */
1071#define RI_T_EV BIT_3S /* Timeout Event occured */
1072#define RI_T_ON BIT_2S /* Timeout Timer Test On */
1073#define RI_T_OFF BIT_1S /* Timeout Timer Test Off */
1074#define RI_T_STEP BIT_0S /* Timeout Timer Step */
1075
1076/* MAC Arbiter Registers */
1077/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */
1078 /* Bit 15.. 4: reserved */
1079#define MA_FOE_ON BIT_3S /* XMAC Fast Output Enable ON */
1080#define MA_FOE_OFF BIT_2S /* XMAC Fast Output Enable OFF */
1081#define MA_RST_CLR BIT_1S /* Clear MAC Arbiter Reset */
1082#define MA_RST_SET BIT_0S /* Set MAC Arbiter Reset */
1083
1084/* B3_MA_RC_CTRL 16 bit MAC Arbiter Recovery Ctrl Reg */
1085 /* Bit 15.. 8: reserved */
1086#define MA_ENA_REC_TX2 BIT_7S /* Enable Recovery Timer TX2 */
1087#define MA_DIS_REC_TX2 BIT_6S /* Disable Recovery Timer TX2 */
1088#define MA_ENA_REC_TX1 BIT_5S /* Enable Recovery Timer TX1 */
1089#define MA_DIS_REC_TX1 BIT_4S /* Disable Recovery Timer TX1 */
1090#define MA_ENA_REC_RX2 BIT_3S /* Enable Recovery Timer RX2 */
1091#define MA_DIS_REC_RX2 BIT_2S /* Disable Recovery Timer RX2 */
1092#define MA_ENA_REC_RX1 BIT_1S /* Enable Recovery Timer RX1 */
1093#define MA_DIS_REC_RX1 BIT_0S /* Disable Recovery Timer RX1 */
1094
1095/* Packet Arbiter Registers */
1096/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */
1097 /* Bit 15..14: reserved */
1098#define PA_CLR_TO_TX2 BIT_13S /* Clear IRQ Packet Timeout TX2 */
1099#define PA_CLR_TO_TX1 BIT_12S /* Clear IRQ Packet Timeout TX1 */
1100#define PA_CLR_TO_RX2 BIT_11S /* Clear IRQ Packet Timeout RX2 */
1101#define PA_CLR_TO_RX1 BIT_10S /* Clear IRQ Packet Timeout RX1 */
1102#define PA_ENA_TO_TX2 BIT_9S /* Enable Timeout Timer TX2 */
1103#define PA_DIS_TO_TX2 BIT_8S /* Disable Timeout Timer TX2 */
1104#define PA_ENA_TO_TX1 BIT_7S /* Enable Timeout Timer TX1 */
1105#define PA_DIS_TO_TX1 BIT_6S /* Disable Timeout Timer TX1 */
1106#define PA_ENA_TO_RX2 BIT_5S /* Enable Timeout Timer RX2 */
1107#define PA_DIS_TO_RX2 BIT_4S /* Disable Timeout Timer RX2 */
1108#define PA_ENA_TO_RX1 BIT_3S /* Enable Timeout Timer RX1 */
1109#define PA_DIS_TO_RX1 BIT_2S /* Disable Timeout Timer RX1 */
1110#define PA_RST_CLR BIT_1S /* Clear MAC Arbiter Reset */
1111#define PA_RST_SET BIT_0S /* Set MAC Arbiter Reset */
1112
1113#define PA_ENA_TO_ALL (PA_ENA_TO_RX1 | PA_ENA_TO_RX2 |\
1114 PA_ENA_TO_TX1 | PA_ENA_TO_TX2)
1115
1116/* Rx/Tx Path related Arbiter Test Registers */
1117/* B3_MA_TO_TEST 16 bit MAC Arbiter Timeout Test Reg */
1118/* B3_MA_RC_TEST 16 bit MAC Arbiter Recovery Test Reg */
1119/* B3_PA_TEST 16 bit Packet Arbiter Test Register */
1120/* Bit 15, 11, 7, and 3 are reserved in B3_PA_TEST */
1121#define TX2_T_EV BIT_15S /* TX2 Timeout/Recv Event occured */
1122#define TX2_T_ON BIT_14S /* TX2 Timeout/Recv Timer Test On */
1123#define TX2_T_OFF BIT_13S /* TX2 Timeout/Recv Timer Tst Off */
1124#define TX2_T_STEP BIT_12S /* TX2 Timeout/Recv Timer Step */
1125#define TX1_T_EV BIT_11S /* TX1 Timeout/Recv Event occured */
1126#define TX1_T_ON BIT_10S /* TX1 Timeout/Recv Timer Test On */
1127#define TX1_T_OFF BIT_9S /* TX1 Timeout/Recv Timer Tst Off */
1128#define TX1_T_STEP BIT_8S /* TX1 Timeout/Recv Timer Step */
1129#define RX2_T_EV BIT_7S /* RX2 Timeout/Recv Event occured */
1130#define RX2_T_ON BIT_6S /* RX2 Timeout/Recv Timer Test On */
1131#define RX2_T_OFF BIT_5S /* RX2 Timeout/Recv Timer Tst Off */
1132#define RX2_T_STEP BIT_4S /* RX2 Timeout/Recv Timer Step */
1133#define RX1_T_EV BIT_3S /* RX1 Timeout/Recv Event occured */
1134#define RX1_T_ON BIT_2S /* RX1 Timeout/Recv Timer Test On */
1135#define RX1_T_OFF BIT_1S /* RX1 Timeout/Recv Timer Tst Off */
1136#define RX1_T_STEP BIT_0S /* RX1 Timeout/Recv Timer Step */
1137
1138
1139/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */
1140/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */
1141/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */
1142/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */
1143/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */
1144 /* Bit 31..24: reserved */
1145#define TXA_MAX_VAL 0x00ffffffUL/* Bit 23.. 0: Max TXA Timer/Cnt Val */
1146
1147/* TXA_CTRL 8 bit Tx Arbiter Control Register */
1148#define TXA_ENA_FSYNC BIT_7S /* Enable force of sync Tx queue */
1149#define TXA_DIS_FSYNC BIT_6S /* Disable force of sync Tx queue */
1150#define TXA_ENA_ALLOC BIT_5S /* Enable alloc of free bandwidth */
1151#define TXA_DIS_ALLOC BIT_4S /* Disable alloc of free bandwidth */
1152#define TXA_START_RC BIT_3S /* Start sync Rate Control */
1153#define TXA_STOP_RC BIT_2S /* Stop sync Rate Control */
1154#define TXA_ENA_ARB BIT_1S /* Enable Tx Arbiter */
1155#define TXA_DIS_ARB BIT_0S /* Disable Tx Arbiter */
1156
1157/* TXA_TEST 8 bit Tx Arbiter Test Register */
1158 /* Bit 7.. 6: reserved */
1159#define TXA_INT_T_ON BIT_5S /* Tx Arb Interval Timer Test On */
1160#define TXA_INT_T_OFF BIT_4S /* Tx Arb Interval Timer Test Off */
1161#define TXA_INT_T_STEP BIT_3S /* Tx Arb Interval Timer Step */
1162#define TXA_LIM_T_ON BIT_2S /* Tx Arb Limit Timer Test On */
1163#define TXA_LIM_T_OFF BIT_1S /* Tx Arb Limit Timer Test Off */
1164#define TXA_LIM_T_STEP BIT_0S /* Tx Arb Limit Timer Step */
1165
1166/* TXA_STAT 8 bit Tx Arbiter Status Register */
1167 /* Bit 7.. 1: reserved */
1168#define TXA_PRIO_XS BIT_0S /* sync queue has prio to send */
1169
1170/* Q_BC 32 bit Current Byte Counter */
1171 /* Bit 31..16: reserved */
1172#define BC_MAX 0xffff /* Bit 15.. 0: Byte counter */
1173
1174/* BMU Control Status Registers */
1175/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */
1176/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */
1177/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */
1178/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */
1179/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */
1180/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */
1181/* Q_CSR 32 bit BMU Control/Status Register */
1182 /* Bit 31..25: reserved */
1183#define CSR_SV_IDLE BIT_24 /* BMU SM Idle */
1184 /* Bit 23..22: reserved */
1185#define CSR_DESC_CLR BIT_21 /* Clear Reset for Descr */
1186#define CSR_DESC_SET BIT_20 /* Set Reset for Descr */
1187#define CSR_FIFO_CLR BIT_19 /* Clear Reset for FIFO */
1188#define CSR_FIFO_SET BIT_18 /* Set Reset for FIFO */
1189#define CSR_HPI_RUN BIT_17 /* Release HPI SM */
1190#define CSR_HPI_RST BIT_16 /* Reset HPI SM to Idle */
1191#define CSR_SV_RUN BIT_15 /* Release Supervisor SM */
1192#define CSR_SV_RST BIT_14 /* Reset Supervisor SM */
1193#define CSR_DREAD_RUN BIT_13 /* Release Descr Read SM */
1194#define CSR_DREAD_RST BIT_12 /* Reset Descr Read SM */
1195#define CSR_DWRITE_RUN BIT_11 /* Release Descr Write SM */
1196#define CSR_DWRITE_RST BIT_10 /* Reset Descr Write SM */
1197#define CSR_TRANS_RUN BIT_9 /* Release Transfer SM */
1198#define CSR_TRANS_RST BIT_8 /* Reset Transfer SM */
1199#define CSR_ENA_POL BIT_7 /* Enable Descr Polling */
1200#define CSR_DIS_POL BIT_6 /* Disable Descr Polling */
1201#define CSR_STOP BIT_5 /* Stop Rx/Tx Queue */
1202#define CSR_START BIT_4 /* Start Rx/Tx Queue */
1203#define CSR_IRQ_CL_P BIT_3 /* (Rx) Clear Parity IRQ */
1204#define CSR_IRQ_CL_B BIT_2 /* Clear EOB IRQ */
1205#define CSR_IRQ_CL_F BIT_1 /* Clear EOF IRQ */
1206#define CSR_IRQ_CL_C BIT_0 /* Clear ERR IRQ */
1207
1208#define CSR_SET_RESET (CSR_DESC_SET | CSR_FIFO_SET | CSR_HPI_RST |\
1209 CSR_SV_RST | CSR_DREAD_RST | CSR_DWRITE_RST |\
1210 CSR_TRANS_RST)
1211#define CSR_CLR_RESET (CSR_DESC_CLR | CSR_FIFO_CLR | CSR_HPI_RUN |\
1212 CSR_SV_RUN | CSR_DREAD_RUN | CSR_DWRITE_RUN |\
1213 CSR_TRANS_RUN)
1214
1215/* Q_F 32 bit Flag Register */
1216 /* Bit 31..28: reserved */
1217#define F_ALM_FULL BIT_27 /* Rx FIFO: almost full */
1218#define F_EMPTY BIT_27 /* Tx FIFO: empty flag */
1219#define F_FIFO_EOF BIT_26 /* Tag (EOF Flag) bit in FIFO */
1220#define F_WM_REACHED BIT_25 /* Watermark reached */
1221 /* reserved */
1222#define F_FIFO_LEVEL (0x1fL<<16) /* Bit 23..16: # of Qwords in FIFO */
1223 /* Bit 15..11: reserved */
1224#define F_WATER_MARK 0x0007ffL /* Bit 10.. 0: Watermark */
1225
1226/* Q_T1 32 bit Test Register 1 */
1227/* Holds four State Machine control Bytes */
1228#define SM_CTRL_SV_MSK (0xffL<<24) /* Bit 31..24: Control Supervisor SM */
1229#define SM_CTRL_RD_MSK (0xffL<<16) /* Bit 23..16: Control Read Desc SM */
1230#define SM_CTRL_WR_MSK (0xffL<<8) /* Bit 15.. 8: Control Write Desc SM */
1231#define SM_CTRL_TR_MSK 0xffL /* Bit 7.. 0: Control Transfer SM */
1232
1233/* Q_T1_TR 8 bit Test Register 1 Transfer SM */
1234/* Q_T1_WR 8 bit Test Register 1 Write Descriptor SM */
1235/* Q_T1_RD 8 bit Test Register 1 Read Descriptor SM */
1236/* Q_T1_SV 8 bit Test Register 1 Supervisor SM */
1237
1238/* The control status byte of each machine looks like ... */
1239#define SM_STATE 0xf0 /* Bit 7.. 4: State which shall be loaded */
1240#define SM_LOAD BIT_3S /* Load the SM with SM_STATE */
1241#define SM_TEST_ON BIT_2S /* Switch on SM Test Mode */
1242#define SM_TEST_OFF BIT_1S /* Go off the Test Mode */
1243#define SM_STEP BIT_0S /* Step the State Machine */
1244/* The encoding of the states is not supported by the Diagnostics Tool */
1245
1246/* Q_T2 32 bit Test Register 2 */
1247 /* Bit 31.. 8: reserved */
1248#define T2_AC_T_ON BIT_7 /* Address Counter Test Mode on */
1249#define T2_AC_T_OFF BIT_6 /* Address Counter Test Mode off */
1250#define T2_BC_T_ON BIT_5 /* Byte Counter Test Mode on */
1251#define T2_BC_T_OFF BIT_4 /* Byte Counter Test Mode off */
1252#define T2_STEP04 BIT_3 /* Inc AC/Dec BC by 4 */
1253#define T2_STEP03 BIT_2 /* Inc AC/Dec BC by 3 */
1254#define T2_STEP02 BIT_1 /* Inc AC/Dec BC by 2 */
1255#define T2_STEP01 BIT_0 /* Inc AC/Dec BC by 1 */
1256
1257/* Q_T3 32 bit Test Register 3 */
1258 /* Bit 31.. 7: reserved */
1259#define T3_MUX_MSK (7<<4) /* Bit 6.. 4: Mux Position */
1260 /* Bit 3: reserved */
1261#define T3_VRAM_MSK 7 /* Bit 2.. 0: Virtual RAM Buffer Address */
1262
1263/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */
1264/* RB_START 32 bit RAM Buffer Start Address */
1265/* RB_END 32 bit RAM Buffer End Address */
1266/* RB_WP 32 bit RAM Buffer Write Pointer */
1267/* RB_RP 32 bit RAM Buffer Read Pointer */
1268/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */
1269/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */
1270/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */
1271/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */
1272/* RB_PC 32 bit RAM Buffer Packet Counter */
1273/* RB_LEV 32 bit RAM Buffer Level Register */
1274 /* Bit 31..19: reserved */
1275#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */
1276
1277/* RB_TST2 8 bit RAM Buffer Test Register 2 */
1278 /* Bit 7.. 4: reserved */
1279#define RB_PC_DEC BIT_3S /* Packet Counter Decrem */
1280#define RB_PC_T_ON BIT_2S /* Packet Counter Test On */
1281#define RB_PC_T_OFF BIT_1S /* Packet Counter Tst Off */
1282#define RB_PC_INC BIT_0S /* Packet Counter Increm */
1283
1284/* RB_TST1 8 bit RAM Buffer Test Register 1 */
1285 /* Bit 7: reserved */
1286#define RB_WP_T_ON BIT_6S /* Write Pointer Test On */
1287#define RB_WP_T_OFF BIT_5S /* Write Pointer Test Off */
1288#define RB_WP_INC BIT_4S /* Write Pointer Increm */
1289 /* Bit 3: reserved */
1290#define RB_RP_T_ON BIT_2S /* Read Pointer Test On */
1291#define RB_RP_T_OFF BIT_1S /* Read Pointer Test Off */
1292#define RB_RP_DEC BIT_0S /* Read Pointer Decrement */
1293
1294/* RB_CTRL 8 bit RAM Buffer Control Register */
1295 /* Bit 7.. 6: reserved */
1296#define RB_ENA_STFWD BIT_5S /* Enable Store & Forward */
1297#define RB_DIS_STFWD BIT_4S /* Disable Store & Forward */
1298#define RB_ENA_OP_MD BIT_3S /* Enable Operation Mode */
1299#define RB_DIS_OP_MD BIT_2S /* Disable Operation Mode */
1300#define RB_RST_CLR BIT_1S /* Clear RAM Buf STM Reset */
1301#define RB_RST_SET BIT_0S /* Set RAM Buf STM Reset */
1302
1303
1304/* Receive and Transmit MAC FIFO Registers (GENESIS only) */
1305
1306/* RX_MFF_EA 32 bit Receive MAC FIFO End Address */
1307/* RX_MFF_WP 32 bit Receive MAC FIFO Write Pointer */
1308/* RX_MFF_RP 32 bit Receive MAC FIFO Read Pointer */
1309/* RX_MFF_PC 32 bit Receive MAC FIFO Packet Counter */
1310/* RX_MFF_LEV 32 bit Receive MAC FIFO Level */
1311/* TX_MFF_EA 32 bit Transmit MAC FIFO End Address */
1312/* TX_MFF_WP 32 bit Transmit MAC FIFO Write Pointer */
1313/* TX_MFF_WSP 32 bit Transmit MAC FIFO WR Shadow Pointer */
1314/* TX_MFF_RP 32 bit Transmit MAC FIFO Read Pointer */
1315/* TX_MFF_PC 32 bit Transmit MAC FIFO Packet Cnt */
1316/* TX_MFF_LEV 32 bit Transmit MAC FIFO Level */
1317 /* Bit 31.. 6: reserved */
1318#define MFF_MSK 0x007fL /* Bit 5.. 0: MAC FIFO Address/Ptr Bits */
1319
1320/* RX_MFF_CTRL1 16 bit Receive MAC FIFO Control Reg 1 */
1321 /* Bit 15..14: reserved */
1322#define MFF_ENA_RDY_PAT BIT_13S /* Enable Ready Patch */
1323#define MFF_DIS_RDY_PAT BIT_12S /* Disable Ready Patch */
1324#define MFF_ENA_TIM_PAT BIT_11S /* Enable Timing Patch */
1325#define MFF_DIS_TIM_PAT BIT_10S /* Disable Timing Patch */
1326#define MFF_ENA_ALM_FUL BIT_9S /* Enable AlmostFull Sign */
1327#define MFF_DIS_ALM_FUL BIT_8S /* Disable AlmostFull Sign */
1328#define MFF_ENA_PAUSE BIT_7S /* Enable Pause Signaling */
1329#define MFF_DIS_PAUSE BIT_6S /* Disable Pause Signaling */
1330#define MFF_ENA_FLUSH BIT_5S /* Enable Frame Flushing */
1331#define MFF_DIS_FLUSH BIT_4S /* Disable Frame Flushing */
1332#define MFF_ENA_TIST BIT_3S /* Enable Time Stamp Gener */
1333#define MFF_DIS_TIST BIT_2S /* Disable Time Stamp Gener */
1334#define MFF_CLR_INTIST BIT_1S /* Clear IRQ No Time Stamp */
1335#define MFF_CLR_INSTAT BIT_0S /* Clear IRQ No Status */
1336
1337#define MFF_RX_CTRL_DEF MFF_ENA_TIM_PAT
1338
1339/* TX_MFF_CTRL1 16 bit Transmit MAC FIFO Control Reg 1 */
1340#define MFF_CLR_PERR BIT_15S /* Clear Parity Error IRQ */
1341 /* Bit 14: reserved */
1342#define MFF_ENA_PKT_REC BIT_13S /* Enable Packet Recovery */
1343#define MFF_DIS_PKT_REC BIT_12S /* Disable Packet Recovery */
1344/* MFF_ENA_TIM_PAT (see RX_MFF_CTRL1) Bit 11: Enable Timing Patch */
1345/* MFF_DIS_TIM_PAT (see RX_MFF_CTRL1) Bit 10: Disable Timing Patch */
1346/* MFF_ENA_ALM_FUL (see RX_MFF_CTRL1) Bit 9: Enable Almost Full Sign */
1347/* MFF_DIS_ALM_FUL (see RX_MFF_CTRL1) Bit 8: Disable Almost Full Sign */
1348#define MFF_ENA_W4E BIT_7S /* Enable Wait for Empty */
1349#define MFF_DIS_W4E BIT_6S /* Disable Wait for Empty */
1350/* MFF_ENA_FLUSH (see RX_MFF_CTRL1) Bit 5: Enable Frame Flushing */
1351/* MFF_DIS_FLUSH (see RX_MFF_CTRL1) Bit 4: Disable Frame Flushing */
1352#define MFF_ENA_LOOPB BIT_3S /* Enable Loopback */
1353#define MFF_DIS_LOOPB BIT_2S /* Disable Loopback */
1354#define MFF_CLR_MAC_RST BIT_1S /* Clear XMAC Reset */
1355#define MFF_SET_MAC_RST BIT_0S /* Set XMAC Reset */
1356
1357#define MFF_TX_CTRL_DEF (MFF_ENA_PKT_REC | MFF_ENA_TIM_PAT | MFF_ENA_FLUSH)
1358
1359/* RX_MFF_TST2 8 bit Receive MAC FIFO Test Register 2 */
1360/* TX_MFF_TST2 8 bit Transmit MAC FIFO Test Register 2 */
1361 /* Bit 7: reserved */
1362#define MFF_WSP_T_ON BIT_6S /* Tx: Write Shadow Ptr TestOn */
1363#define MFF_WSP_T_OFF BIT_5S /* Tx: Write Shadow Ptr TstOff */
1364#define MFF_WSP_INC BIT_4S /* Tx: Write Shadow Ptr Increment */
1365#define MFF_PC_DEC BIT_3S /* Packet Counter Decrement */
1366#define MFF_PC_T_ON BIT_2S /* Packet Counter Test On */
1367#define MFF_PC_T_OFF BIT_1S /* Packet Counter Test Off */
1368#define MFF_PC_INC BIT_0S /* Packet Counter Increment */
1369
1370/* RX_MFF_TST1 8 bit Receive MAC FIFO Test Register 1 */
1371/* TX_MFF_TST1 8 bit Transmit MAC FIFO Test Register 1 */
1372 /* Bit 7: reserved */
1373#define MFF_WP_T_ON BIT_6S /* Write Pointer Test On */
1374#define MFF_WP_T_OFF BIT_5S /* Write Pointer Test Off */
1375#define MFF_WP_INC BIT_4S /* Write Pointer Increm */
1376 /* Bit 3: reserved */
1377#define MFF_RP_T_ON BIT_2S /* Read Pointer Test On */
1378#define MFF_RP_T_OFF BIT_1S /* Read Pointer Test Off */
1379#define MFF_RP_DEC BIT_0S /* Read Pointer Decrement */
1380
1381/* RX_MFF_CTRL2 8 bit Receive MAC FIFO Control Reg 2 */
1382/* TX_MFF_CTRL2 8 bit Transmit MAC FIFO Control Reg 2 */
1383 /* Bit 7..4: reserved */
1384#define MFF_ENA_OP_MD BIT_3S /* Enable Operation Mode */
1385#define MFF_DIS_OP_MD BIT_2S /* Disable Operation Mode */
1386#define MFF_RST_CLR BIT_1S /* Clear MAC FIFO Reset */
1387#define MFF_RST_SET BIT_0S /* Set MAC FIFO Reset */
1388
1389
1390/* Link LED Counter Registers (GENESIS only) */
1391
1392/* RX_LED_CTRL 8 bit Receive LED Cnt Control Reg */
1393/* TX_LED_CTRL 8 bit Transmit LED Cnt Control Reg */
1394/* LNK_SYNC_CTRL 8 bit Link Sync Cnt Control Register */
1395 /* Bit 7.. 3: reserved */
1396#define LED_START BIT_2S /* Start Timer */
1397#define LED_STOP BIT_1S /* Stop Timer */
1398#define LED_STATE BIT_0S /* Rx/Tx: LED State, 1=LED on */
1399#define LED_CLR_IRQ BIT_0S /* Lnk: Clear Link IRQ */
1400
1401/* RX_LED_TST 8 bit Receive LED Cnt Test Register */
1402/* TX_LED_TST 8 bit Transmit LED Cnt Test Register */
1403/* LNK_SYNC_TST 8 bit Link Sync Cnt Test Register */
1404 /* Bit 7.. 3: reserved */
1405#define LED_T_ON BIT_2S /* LED Counter Test mode On */
1406#define LED_T_OFF BIT_1S /* LED Counter Test mode Off */
1407#define LED_T_STEP BIT_0S /* LED Counter Step */
1408
1409/* LNK_LED_REG 8 bit Link LED Register */
1410 /* Bit 7.. 6: reserved */
1411#define LED_BLK_ON BIT_5S /* Link LED Blinking On */
1412#define LED_BLK_OFF BIT_4S /* Link LED Blinking Off */
1413#define LED_SYNC_ON BIT_3S /* Use Sync Wire to switch LED */
1414#define LED_SYNC_OFF BIT_2S /* Disable Sync Wire Input */
1415#define LED_ON BIT_1S /* switch LED on */
1416#define LED_OFF BIT_0S /* switch LED off */
1417
1418/* Receive and Transmit GMAC FIFO Registers (YUKON only) */
1419
1420/* RX_GMF_EA 32 bit Rx GMAC FIFO End Address */
1421/* RX_GMF_AF_THR 32 bit Rx GMAC FIFO Almost Full Thresh. */
1422/* RX_GMF_WP 32 bit Rx GMAC FIFO Write Pointer */
1423/* RX_GMF_WLEV 32 bit Rx GMAC FIFO Write Level */
1424/* RX_GMF_RP 32 bit Rx GMAC FIFO Read Pointer */
1425/* RX_GMF_RLEV 32 bit Rx GMAC FIFO Read Level */
1426/* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */
1427/* TX_GMF_AE_THR 32 bit Tx GMAC FIFO Almost Empty Thresh.*/
1428/* TX_GMF_WP 32 bit Tx GMAC FIFO Write Pointer */
1429/* TX_GMF_WSP 32 bit Tx GMAC FIFO Write Shadow Ptr. */
1430/* TX_GMF_WLEV 32 bit Tx GMAC FIFO Write Level */
1431/* TX_GMF_RP 32 bit Tx GMAC FIFO Read Pointer */
1432/* TX_GMF_RSTP 32 bit Tx GMAC FIFO Restart Pointer */
1433/* TX_GMF_RLEV 32 bit Tx GMAC FIFO Read Level */
1434
1435/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
1436 /* Bits 31..15: reserved */
1437#define GMF_WP_TST_ON BIT_14 /* Write Pointer Test On */
1438#define GMF_WP_TST_OFF BIT_13 /* Write Pointer Test Off */
1439#define GMF_WP_STEP BIT_12 /* Write Pointer Step/Increment */
1440 /* Bit 11: reserved */
1441#define GMF_RP_TST_ON BIT_10 /* Read Pointer Test On */
1442#define GMF_RP_TST_OFF BIT_9 /* Read Pointer Test Off */
1443#define GMF_RP_STEP BIT_8 /* Read Pointer Step/Increment */
1444#define GMF_RX_F_FL_ON BIT_7 /* Rx FIFO Flush Mode On */
1445#define GMF_RX_F_FL_OFF BIT_6 /* Rx FIFO Flush Mode Off */
1446#define GMF_CLI_RX_FO BIT_5 /* Clear IRQ Rx FIFO Overrun */
1447#define GMF_CLI_RX_FC BIT_4 /* Clear IRQ Rx Frame Complete */
1448#define GMF_OPER_ON BIT_3 /* Operational Mode On */
1449#define GMF_OPER_OFF BIT_2 /* Operational Mode Off */
1450#define GMF_RST_CLR BIT_1 /* Clear GMAC FIFO Reset */
1451#define GMF_RST_SET BIT_0 /* Set GMAC FIFO Reset */
1452
1453/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
1454 /* Bits 31..19: reserved */
1455#define GMF_WSP_TST_ON BIT_18 /* Write Shadow Pointer Test On */
1456#define GMF_WSP_TST_OFF BIT_17 /* Write Shadow Pointer Test Off */
1457#define GMF_WSP_STEP BIT_16 /* Write Shadow Pointer Step/Increment */
1458 /* Bits 15..7: same as for RX_GMF_CTRL_T */
1459#define GMF_CLI_TX_FU BIT_6 /* Clear IRQ Tx FIFO Underrun */
1460#define GMF_CLI_TX_FC BIT_5 /* Clear IRQ Tx Frame Complete */
1461#define GMF_CLI_TX_PE BIT_4 /* Clear IRQ Tx Parity Error */
1462 /* Bits 3..0: same as for RX_GMF_CTRL_T */
1463
1464#define GMF_RX_CTRL_DEF (GMF_OPER_ON | GMF_RX_F_FL_ON)
1465#define GMF_TX_CTRL_DEF GMF_OPER_ON
1466
1467#define RX_GMF_FL_THR_DEF 0x0a /* Rx GMAC FIFO Flush Threshold default */
1468
1469/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */
1470 /* Bit 7.. 3: reserved */
1471#define GMT_ST_START BIT_2S /* Start Time Stamp Timer */
1472#define GMT_ST_STOP BIT_1S /* Stop Time Stamp Timer */
1473#define GMT_ST_CLR_IRQ BIT_0S /* Clear Time Stamp Timer IRQ */
1474
1475/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */
1476 /* Bits 31.. 8: reserved */
1477#define GMC_H_BURST_ON BIT_7 /* Half Duplex Burst Mode On */
1478#define GMC_H_BURST_OFF BIT_6 /* Half Duplex Burst Mode Off */
1479#define GMC_F_LOOPB_ON BIT_5 /* FIFO Loopback On */
1480#define GMC_F_LOOPB_OFF BIT_4 /* FIFO Loopback Off */
1481#define GMC_PAUSE_ON BIT_3 /* Pause On */
1482#define GMC_PAUSE_OFF BIT_2 /* Pause Off */
1483#define GMC_RST_CLR BIT_1 /* Clear GMAC Reset */
1484#define GMC_RST_SET BIT_0 /* Set GMAC Reset */
1485
1486/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */
1487 /* Bits 31..29: reserved */
1488#define GPC_SEL_BDT BIT_28 /* Select Bi-Dir. Transfer for MDC/MDIO */
1489#define GPC_INT_POL_HI BIT_27 /* IRQ Polarity is Active HIGH */
1490#define GPC_75_OHM BIT_26 /* Use 75 Ohm Termination instead of 50 */
1491#define GPC_DIS_FC BIT_25 /* Disable Automatic Fiber/Copper Detection */
1492#define GPC_DIS_SLEEP BIT_24 /* Disable Energy Detect */
1493#define GPC_HWCFG_M_3 BIT_23 /* HWCFG_MODE[3] */
1494#define GPC_HWCFG_M_2 BIT_22 /* HWCFG_MODE[2] */
1495#define GPC_HWCFG_M_1 BIT_21 /* HWCFG_MODE[1] */
1496#define GPC_HWCFG_M_0 BIT_20 /* HWCFG_MODE[0] */
1497#define GPC_ANEG_0 BIT_19 /* ANEG[0] */
1498#define GPC_ENA_XC BIT_18 /* Enable MDI crossover */
1499#define GPC_DIS_125 BIT_17 /* Disable 125 MHz clock */
1500#define GPC_ANEG_3 BIT_16 /* ANEG[3] */
1501#define GPC_ANEG_2 BIT_15 /* ANEG[2] */
1502#define GPC_ANEG_1 BIT_14 /* ANEG[1] */
1503#define GPC_ENA_PAUSE BIT_13 /* Enable Pause (SYM_OR_REM) */
1504#define GPC_PHYADDR_4 BIT_12 /* Bit 4 of Phy Addr */
1505#define GPC_PHYADDR_3 BIT_11 /* Bit 3 of Phy Addr */
1506#define GPC_PHYADDR_2 BIT_10 /* Bit 2 of Phy Addr */
1507#define GPC_PHYADDR_1 BIT_9 /* Bit 1 of Phy Addr */
1508#define GPC_PHYADDR_0 BIT_8 /* Bit 0 of Phy Addr */
1509 /* Bits 7..2: reserved */
1510#define GPC_RST_CLR BIT_1 /* Clear GPHY Reset */
1511#define GPC_RST_SET BIT_0 /* Set GPHY Reset */
1512
1513#define GPC_HWCFG_GMII_COP (GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | \
1514 GPC_HWCFG_M_1 | GPC_HWCFG_M_0)
1515
1516#define GPC_HWCFG_GMII_FIB ( GPC_HWCFG_M_2 | \
1517 GPC_HWCFG_M_1 | GPC_HWCFG_M_0)
1518
1519#define GPC_ANEG_ADV_ALL_M (GPC_ANEG_3 | GPC_ANEG_2 | \
1520 GPC_ANEG_1 | GPC_ANEG_0)
1521
1522/* forced speed and duplex mode (don't mix with other ANEG bits) */
1523#define GPC_FRC10MBIT_HALF 0
1524#define GPC_FRC10MBIT_FULL GPC_ANEG_0
1525#define GPC_FRC100MBIT_HALF GPC_ANEG_1
1526#define GPC_FRC100MBIT_FULL (GPC_ANEG_0 | GPC_ANEG_1)
1527
1528/* auto-negotiation with limited advertised speeds */
1529/* mix only with master/slave settings (for copper) */
1530#define GPC_ADV_1000_HALF GPC_ANEG_2
1531#define GPC_ADV_1000_FULL GPC_ANEG_3
1532#define GPC_ADV_ALL (GPC_ANEG_2 | GPC_ANEG_3)
1533
1534/* master/slave settings */
1535/* only for copper with 1000 Mbps */
1536#define GPC_FORCE_MASTER 0
1537#define GPC_FORCE_SLAVE GPC_ANEG_0
1538#define GPC_PREF_MASTER GPC_ANEG_1
1539#define GPC_PREF_SLAVE (GPC_ANEG_1 | GPC_ANEG_0)
1540
1541/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */
1542/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */
1543#define GM_IS_TX_CO_OV BIT_5 /* Transmit Counter Overflow IRQ */
1544#define GM_IS_RX_CO_OV BIT_4 /* Receive Counter Overflow IRQ */
1545#define GM_IS_TX_FF_UR BIT_3 /* Transmit FIFO Underrun */
1546#define GM_IS_TX_COMPL BIT_2 /* Frame Transmission Complete */
1547#define GM_IS_RX_FF_OR BIT_1 /* Receive FIFO Overrun */
1548#define GM_IS_RX_COMPL BIT_0 /* Frame Reception Complete */
1549
1550#define GMAC_DEF_MSK (GM_IS_TX_CO_OV | GM_IS_RX_CO_OV | \
1551 GM_IS_TX_FF_UR)
1552
1553/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
1554 /* Bits 15.. 2: reserved */
1555#define GMLC_RST_CLR BIT_1S /* Clear GMAC Link Reset */
1556#define GMLC_RST_SET BIT_0S /* Set GMAC Link Reset */
1557
1558
1559/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */
1560#define WOL_CTL_LINK_CHG_OCC BIT_15S
1561#define WOL_CTL_MAGIC_PKT_OCC BIT_14S
1562#define WOL_CTL_PATTERN_OCC BIT_13S
1563
1564#define WOL_CTL_CLEAR_RESULT BIT_12S
1565
1566#define WOL_CTL_ENA_PME_ON_LINK_CHG BIT_11S
1567#define WOL_CTL_DIS_PME_ON_LINK_CHG BIT_10S
1568#define WOL_CTL_ENA_PME_ON_MAGIC_PKT BIT_9S
1569#define WOL_CTL_DIS_PME_ON_MAGIC_PKT BIT_8S
1570#define WOL_CTL_ENA_PME_ON_PATTERN BIT_7S
1571#define WOL_CTL_DIS_PME_ON_PATTERN BIT_6S
1572
1573#define WOL_CTL_ENA_LINK_CHG_UNIT BIT_5S
1574#define WOL_CTL_DIS_LINK_CHG_UNIT BIT_4S
1575#define WOL_CTL_ENA_MAGIC_PKT_UNIT BIT_3S
1576#define WOL_CTL_DIS_MAGIC_PKT_UNIT BIT_2S
1577#define WOL_CTL_ENA_PATTERN_UNIT BIT_1S
1578#define WOL_CTL_DIS_PATTERN_UNIT BIT_0S
1579
1580#define WOL_CTL_DEFAULT \
1581 (WOL_CTL_DIS_PME_ON_LINK_CHG | \
1582 WOL_CTL_DIS_PME_ON_PATTERN | \
1583 WOL_CTL_DIS_PME_ON_MAGIC_PKT | \
1584 WOL_CTL_DIS_LINK_CHG_UNIT | \
1585 WOL_CTL_DIS_PATTERN_UNIT | \
1586 WOL_CTL_DIS_MAGIC_PKT_UNIT)
1587
1588/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */
1589#define WOL_CTL_PATT_ENA(x) (BIT_0 << (x))
1590
1591#define SK_NUM_WOL_PATTERN 7
1592#define SK_PATTERN_PER_WORD 4
1593#define SK_BITMASK_PATTERN 7
1594#define SK_POW_PATTERN_LENGTH 128
1595
1596#define WOL_LENGTH_MSK 0x7f
1597#define WOL_LENGTH_SHIFT 8
1598
1599
1600/* Receive and Transmit Descriptors ******************************************/
1601
1602/* Transmit Descriptor struct */
1603typedef struct s_HwTxd {
1604 SK_U32 volatile TxCtrl; /* Transmit Buffer Control Field */
1605 SK_U32 TxNext; /* Physical Address Pointer to the next TxD */
1606 SK_U32 TxAdrLo; /* Physical Tx Buffer Address lower dword */
1607 SK_U32 TxAdrHi; /* Physical Tx Buffer Address upper dword */
1608 SK_U32 TxStat; /* Transmit Frame Status Word */
1609#ifndef SK_USE_REV_DESC
1610 SK_U16 TxTcpOffs; /* TCP Checksum Calculation Start Value */
1611 SK_U16 TxRes1; /* 16 bit reserved field */
1612 SK_U16 TxTcpWp; /* TCP Checksum Write Position */
1613 SK_U16 TxTcpSp; /* TCP Checksum Calculation Start Position */
1614#else /* SK_USE_REV_DESC */
1615 SK_U16 TxRes1; /* 16 bit reserved field */
1616 SK_U16 TxTcpOffs; /* TCP Checksum Calculation Start Value */
1617 SK_U16 TxTcpSp; /* TCP Checksum Calculation Start Position */
1618 SK_U16 TxTcpWp; /* TCP Checksum Write Position */
1619#endif /* SK_USE_REV_DESC */
1620 SK_U32 TxRes2; /* 32 bit reserved field */
1621} SK_HWTXD;
1622
1623/* Receive Descriptor struct */
1624typedef struct s_HwRxd {
1625 SK_U32 volatile RxCtrl; /* Receive Buffer Control Field */
1626 SK_U32 RxNext; /* Physical Address Pointer to the next RxD */
1627 SK_U32 RxAdrLo; /* Physical Rx Buffer Address lower dword */
1628 SK_U32 RxAdrHi; /* Physical Rx Buffer Address upper dword */
1629 SK_U32 RxStat; /* Receive Frame Status Word */
1630 SK_U32 RxTiSt; /* Receive Time Stamp (from XMAC on GENESIS) */
1631#ifndef SK_USE_REV_DESC
1632 SK_U16 RxTcpSum1; /* TCP Checksum 1 */
1633 SK_U16 RxTcpSum2; /* TCP Checksum 2 */
1634 SK_U16 RxTcpSp1; /* TCP Checksum Calculation Start Position 1 */
1635 SK_U16 RxTcpSp2; /* TCP Checksum Calculation Start Position 2 */
1636#else /* SK_USE_REV_DESC */
1637 SK_U16 RxTcpSum2; /* TCP Checksum 2 */
1638 SK_U16 RxTcpSum1; /* TCP Checksum 1 */
1639 SK_U16 RxTcpSp2; /* TCP Checksum Calculation Start Position 2 */
1640 SK_U16 RxTcpSp1; /* TCP Checksum Calculation Start Position 1 */
1641#endif /* SK_USE_REV_DESC */
1642} SK_HWRXD;
1643
1644/*
1645 * Drivers which use the reverse descriptor feature (PCI_OUR_REG_2)
1646 * should set the define SK_USE_REV_DESC.
1647 * Structures are 'normaly' not endianess dependent. But in
1648 * this case the SK_U16 fields are bound to bit positions inside the
1649 * descriptor. RxTcpSum1 e.g. must start at bit 0 within the 6.th DWord.
1650 * The bit positions inside a DWord are of course endianess dependent and
1651 * swaps if the DWord is swapped by the hardware.
1652 */
1653
1654
1655/* Descriptor Bit Definition */
1656/* TxCtrl Transmit Buffer Control Field */
1657/* RxCtrl Receive Buffer Control Field */
1658#define BMU_OWN BIT_31 /* OWN bit: 0=host/1=BMU */
1659#define BMU_STF BIT_30 /* Start of Frame */
1660#define BMU_EOF BIT_29 /* End of Frame */
1661#define BMU_IRQ_EOB BIT_28 /* Req "End of Buffer" IRQ */
1662#define BMU_IRQ_EOF BIT_27 /* Req "End of Frame" IRQ */
1663/* TxCtrl specific bits */
1664#define BMU_STFWD BIT_26 /* (Tx) Store & Forward Frame */
1665#define BMU_NO_FCS BIT_25 /* (Tx) Disable MAC FCS (CRC) generation */
1666#define BMU_SW BIT_24 /* (Tx) 1 bit res. for SW use */
1667/* RxCtrl specific bits */
1668#define BMU_DEV_0 BIT_26 /* (Rx) Transfer data to Dev0 */
1669#define BMU_STAT_VAL BIT_25 /* (Rx) Rx Status Valid */
1670#define BMU_TIST_VAL BIT_24 /* (Rx) Rx TimeStamp Valid */
1671 /* Bit 23..16: BMU Check Opcodes */
1672#define BMU_CHECK (0x55L<<16) /* Default BMU check */
1673#define BMU_TCP_CHECK (0x56L<<16) /* Descr with TCP ext */
1674#define BMU_UDP_CHECK (0x57L<<16) /* Descr with UDP ext (YUKON only) */
1675#define BMU_BBC 0xffffL /* Bit 15.. 0: Buffer Byte Counter */
1676
1677/* TxStat Transmit Frame Status Word */
1678/* RxStat Receive Frame Status Word */
1679/*
1680 *Note: TxStat is reserved for ASIC loopback mode only
1681 *
1682 * The Bits of the Status words are defined in xmac_ii.h
1683 * (see XMR_FS bits)
1684 */
1685
1686/* macros ********************************************************************/
1687
1688/* Receive and Transmit Queues */
1689#define Q_R1 0x0000 /* Receive Queue 1 */
1690#define Q_R2 0x0080 /* Receive Queue 2 */
1691#define Q_XS1 0x0200 /* Synchronous Transmit Queue 1 */
1692#define Q_XA1 0x0280 /* Asynchronous Transmit Queue 1 */
1693#define Q_XS2 0x0300 /* Synchronous Transmit Queue 2 */
1694#define Q_XA2 0x0380 /* Asynchronous Transmit Queue 2 */
1695
1696/*
1697 * Macro Q_ADDR()
1698 *
1699 * Use this macro to access the Receive and Transmit Queue Registers.
1700 *
1701 * para:
1702 * Queue Queue to access.
1703 * Values: Q_R1, Q_R2, Q_XS1, Q_XA1, Q_XS2, and Q_XA2
1704 * Offs Queue register offset.
1705 * Values: Q_D, Q_DA_L ... Q_T2, Q_T3
1706 *
1707 * usage SK_IN32(pAC, Q_ADDR(Q_R2, Q_BC), pVal)
1708 */
1709#define Q_ADDR(Queue, Offs) (B8_Q_REGS + (Queue) + (Offs))
1710
1711/*
1712 * Macro RB_ADDR()
1713 *
1714 * Use this macro to access the RAM Buffer Registers.
1715 *
1716 * para:
1717 * Queue Queue to access.
1718 * Values: Q_R1, Q_R2, Q_XS1, Q_XA1, Q_XS2, and Q_XA2
1719 * Offs Queue register offset.
1720 * Values: RB_START, RB_END ... RB_LEV, RB_CTRL
1721 *
1722 * usage SK_IN32(pAC, RB_ADDR(Q_R2, RB_RP), pVal)
1723 */
1724#define RB_ADDR(Queue, Offs) (B16_RAM_REGS + (Queue) + (Offs))
1725
1726
1727/* MAC Related Registers */
1728#define MAC_1 0 /* belongs to the port near the slot */
1729#define MAC_2 1 /* belongs to the port far away from the slot */
1730
1731/*
1732 * Macro MR_ADDR()
1733 *
1734 * Use this macro to access a MAC Related Registers inside the ASIC.
1735 *
1736 * para:
1737 * Mac MAC to access.
1738 * Values: MAC_1, MAC_2
1739 * Offs MAC register offset.
1740 * Values: RX_MFF_EA, RX_MFF_WP ... LNK_LED_REG,
1741 * TX_MFF_EA, TX_MFF_WP ... TX_LED_TST
1742 *
1743 * usage SK_IN32(pAC, MR_ADDR(MAC_1, TX_MFF_EA), pVal)
1744 */
1745#define MR_ADDR(Mac, Offs) (((Mac) << 7) + (Offs))
1746
1747#ifdef SK_LITTLE_ENDIAN
1748#define XM_WORD_LO 0
1749#define XM_WORD_HI 1
1750#else /* !SK_LITTLE_ENDIAN */
1751#define XM_WORD_LO 1
1752#define XM_WORD_HI 0
1753#endif /* !SK_LITTLE_ENDIAN */
1754
1755
1756/*
1757 * macros to access the XMAC (GENESIS only)
1758 *
1759 * XM_IN16(), to read a 16 bit register (e.g. XM_MMU_CMD)
1760 * XM_OUT16(), to write a 16 bit register (e.g. XM_MMU_CMD)
1761 * XM_IN32(), to read a 32 bit register (e.g. XM_TX_EV_CNT)
1762 * XM_OUT32(), to write a 32 bit register (e.g. XM_TX_EV_CNT)
1763 * XM_INADDR(), to read a network address register (e.g. XM_SRC_CHK)
1764 * XM_OUTADDR(), to write a network address register (e.g. XM_SRC_CHK)
1765 * XM_INHASH(), to read the XM_HSM_CHK register
1766 * XM_OUTHASH() to write the XM_HSM_CHK register
1767 *
1768 * para:
1769 * Mac XMAC to access values: MAC_1 or MAC_2
1770 * IoC I/O context needed for SK I/O macros
1771 * Reg XMAC Register to read or write
1772 * (p)Val Value or pointer to the value which should be read or written
1773 *
1774 * usage: XM_OUT16(IoC, MAC_1, XM_MMU_CMD, Value);
1775 */
1776
1777#define XMA(Mac, Reg) \
1778 ((BASE_XMAC_1 + (Mac) * (BASE_XMAC_2 - BASE_XMAC_1)) | ((Reg) << 1))
1779
1780#define XM_IN16(IoC, Mac, Reg, pVal) \
1781 SK_IN16((IoC), XMA((Mac), (Reg)), (pVal))
1782
1783#define XM_OUT16(IoC, Mac, Reg, Val) \
1784 SK_OUT16((IoC), XMA((Mac), (Reg)), (Val))
1785
1786#define XM_IN32(IoC, Mac, Reg, pVal) { \
1787 SK_IN16((IoC), XMA((Mac), (Reg)), \
1788 (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_LO]); \
1789 SK_IN16((IoC), XMA((Mac), (Reg+2)), \
1790 (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_HI]); \
1791}
1792
1793#define XM_OUT32(IoC, Mac, Reg, Val) { \
1794 SK_OUT16((IoC), XMA((Mac), (Reg)), (SK_U16)((Val) & 0xffffL)); \
1795 SK_OUT16((IoC), XMA((Mac), (Reg+2)), (SK_U16)(((Val) >> 16) & 0xffffL));\
1796}
1797
1798/* Remember: we are always writing to / reading from LITTLE ENDIAN memory */
1799
1800#define XM_INADDR(IoC, Mac, Reg, pVal) { \
1801 SK_U16 Word; \
1802 SK_U8 *pByte; \
1803 pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \
1804 SK_IN16((IoC), XMA((Mac), (Reg)), &Word); \
1805 pByte[0] = (SK_U8)(Word & 0x00ff); \
1806 pByte[1] = (SK_U8)((Word >> 8) & 0x00ff); \
1807 SK_IN16((IoC), XMA((Mac), (Reg+2)), &Word); \
1808 pByte[2] = (SK_U8)(Word & 0x00ff); \
1809 pByte[3] = (SK_U8)((Word >> 8) & 0x00ff); \
1810 SK_IN16((IoC), XMA((Mac), (Reg+4)), &Word); \
1811 pByte[4] = (SK_U8)(Word & 0x00ff); \
1812 pByte[5] = (SK_U8)((Word >> 8) & 0x00ff); \
1813}
1814
1815#define XM_OUTADDR(IoC, Mac, Reg, pVal) { \
1816 SK_U8 SK_FAR *pByte; \
1817 pByte = (SK_U8 SK_FAR *)&((SK_U8 SK_FAR *)(pVal))[0]; \
1818 SK_OUT16((IoC), XMA((Mac), (Reg)), (SK_U16) \
1819 (((SK_U16)(pByte[0]) & 0x00ff) | \
1820 (((SK_U16)(pByte[1]) << 8) & 0xff00))); \
1821 SK_OUT16((IoC), XMA((Mac), (Reg+2)), (SK_U16) \
1822 (((SK_U16)(pByte[2]) & 0x00ff) | \
1823 (((SK_U16)(pByte[3]) << 8) & 0xff00))); \
1824 SK_OUT16((IoC), XMA((Mac), (Reg+4)), (SK_U16) \
1825 (((SK_U16)(pByte[4]) & 0x00ff) | \
1826 (((SK_U16)(pByte[5]) << 8) & 0xff00))); \
1827}
1828
1829#define XM_INHASH(IoC, Mac, Reg, pVal) { \
1830 SK_U16 Word; \
1831 SK_U8 SK_FAR *pByte; \
1832 pByte = (SK_U8 SK_FAR *)&((SK_U8 SK_FAR *)(pVal))[0]; \
1833 SK_IN16((IoC), XMA((Mac), (Reg)), &Word); \
1834 pByte[0] = (SK_U8)(Word & 0x00ff); \
1835 pByte[1] = (SK_U8)((Word >> 8) & 0x00ff); \
1836 SK_IN16((IoC), XMA((Mac), (Reg+2)), &Word); \
1837 pByte[2] = (SK_U8)(Word & 0x00ff); \
1838 pByte[3] = (SK_U8)((Word >> 8) & 0x00ff); \
1839 SK_IN16((IoC), XMA((Mac), (Reg+4)), &Word); \
1840 pByte[4] = (SK_U8)(Word & 0x00ff); \
1841 pByte[5] = (SK_U8)((Word >> 8) & 0x00ff); \
1842 SK_IN16((IoC), XMA((Mac), (Reg+6)), &Word); \
1843 pByte[6] = (SK_U8)(Word & 0x00ff); \
1844 pByte[7] = (SK_U8)((Word >> 8) & 0x00ff); \
1845}
1846
1847#define XM_OUTHASH(IoC, Mac, Reg, pVal) { \
1848 SK_U8 SK_FAR *pByte; \
1849 pByte = (SK_U8 SK_FAR *)&((SK_U8 SK_FAR *)(pVal))[0]; \
1850 SK_OUT16((IoC), XMA((Mac), (Reg)), (SK_U16) \
1851 (((SK_U16)(pByte[0]) & 0x00ff)| \
1852 (((SK_U16)(pByte[1]) << 8) & 0xff00))); \
1853 SK_OUT16((IoC), XMA((Mac), (Reg+2)), (SK_U16) \
1854 (((SK_U16)(pByte[2]) & 0x00ff)| \
1855 (((SK_U16)(pByte[3]) << 8) & 0xff00))); \
1856 SK_OUT16((IoC), XMA((Mac), (Reg+4)), (SK_U16) \
1857 (((SK_U16)(pByte[4]) & 0x00ff)| \
1858 (((SK_U16)(pByte[5]) << 8) & 0xff00))); \
1859 SK_OUT16((IoC), XMA((Mac), (Reg+6)), (SK_U16) \
1860 (((SK_U16)(pByte[6]) & 0x00ff)| \
1861 (((SK_U16)(pByte[7]) << 8) & 0xff00))); \
1862}
1863
1864/*
1865 * macros to access the GMAC (YUKON only)
1866 *
1867 * GM_IN16(), to read a 16 bit register (e.g. GM_GP_STAT)
1868 * GM_OUT16(), to write a 16 bit register (e.g. GM_GP_CTRL)
1869 * GM_IN32(), to read a 32 bit register (e.g. GM_)
1870 * GM_OUT32(), to write a 32 bit register (e.g. GM_)
1871 * GM_INADDR(), to read a network address register (e.g. GM_SRC_ADDR_1L)
1872 * GM_OUTADDR(), to write a network address register (e.g. GM_SRC_ADDR_2L)
1873 * GM_INHASH(), to read the GM_MC_ADDR_H1 register
1874 * GM_OUTHASH() to write the GM_MC_ADDR_H1 register
1875 *
1876 * para:
1877 * Mac GMAC to access values: MAC_1 or MAC_2
1878 * IoC I/O context needed for SK I/O macros
1879 * Reg GMAC Register to read or write
1880 * (p)Val Value or pointer to the value which should be read or written
1881 *
1882 * usage: GM_OUT16(IoC, MAC_1, GM_GP_CTRL, Value);
1883 */
1884
1885#define GMA(Mac, Reg) \
1886 ((BASE_GMAC_1 + (Mac) * (BASE_GMAC_2 - BASE_GMAC_1)) | (Reg))
1887
1888#define GM_IN16(IoC, Mac, Reg, pVal) \
1889 SK_IN16((IoC), GMA((Mac), (Reg)), (pVal))
1890
1891#define GM_OUT16(IoC, Mac, Reg, Val) \
1892 SK_OUT16((IoC), GMA((Mac), (Reg)), (Val))
1893
1894#define GM_IN32(IoC, Mac, Reg, pVal) { \
1895 SK_IN16((IoC), GMA((Mac), (Reg)), \
1896 (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_LO]); \
1897 SK_IN16((IoC), GMA((Mac), (Reg+4)), \
1898 (SK_U16 SK_FAR*)&((SK_U16 SK_FAR*)(pVal))[XM_WORD_HI]); \
1899}
1900
1901#define GM_OUT32(IoC, Mac, Reg, Val) { \
1902 SK_OUT16((IoC), GMA((Mac), (Reg)), (SK_U16)((Val) & 0xffffL)); \
1903 SK_OUT16((IoC), GMA((Mac), (Reg+4)), (SK_U16)(((Val) >> 16) & 0xffffL));\
1904}
1905
1906#define GM_INADDR(IoC, Mac, Reg, pVal) { \
1907 SK_U16 Word; \
1908 SK_U8 *pByte; \
1909 pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \
1910 SK_IN16((IoC), GMA((Mac), (Reg)), &Word); \
1911 pByte[0] = (SK_U8)(Word & 0x00ff); \
1912 pByte[1] = (SK_U8)((Word >> 8) & 0x00ff); \
1913 SK_IN16((IoC), GMA((Mac), (Reg+4)), &Word); \
1914 pByte[2] = (SK_U8)(Word & 0x00ff); \
1915 pByte[3] = (SK_U8)((Word >> 8) & 0x00ff); \
1916 SK_IN16((IoC), GMA((Mac), (Reg+8)), &Word); \
1917 pByte[4] = (SK_U8)(Word & 0x00ff); \
1918 pByte[5] = (SK_U8)((Word >> 8) & 0x00ff); \
1919}
1920
1921#define GM_OUTADDR(IoC, Mac, Reg, pVal) { \
1922 SK_U8 SK_FAR *pByte; \
1923 pByte = (SK_U8 SK_FAR *)&((SK_U8 SK_FAR *)(pVal))[0]; \
1924 SK_OUT16((IoC), GMA((Mac), (Reg)), (SK_U16) \
1925 (((SK_U16)(pByte[0]) & 0x00ff) | \
1926 (((SK_U16)(pByte[1]) << 8) & 0xff00))); \
1927 SK_OUT16((IoC), GMA((Mac), (Reg+4)), (SK_U16) \
1928 (((SK_U16)(pByte[2]) & 0x00ff) | \
1929 (((SK_U16)(pByte[3]) << 8) & 0xff00))); \
1930 SK_OUT16((IoC), GMA((Mac), (Reg+8)), (SK_U16) \
1931 (((SK_U16)(pByte[4]) & 0x00ff) | \
1932 (((SK_U16)(pByte[5]) << 8) & 0xff00))); \
1933}
1934
1935#define GM_INHASH(IoC, Mac, Reg, pVal) { \
1936 SK_U16 Word; \
1937 SK_U8 *pByte; \
1938 pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \
1939 SK_IN16((IoC), GMA((Mac), (Reg)), &Word); \
1940 pByte[0] = (SK_U8)(Word & 0x00ff); \
1941 pByte[1] = (SK_U8)((Word >> 8) & 0x00ff); \
1942 SK_IN16((IoC), GMA((Mac), (Reg+4)), &Word); \
1943 pByte[2] = (SK_U8)(Word & 0x00ff); \
1944 pByte[3] = (SK_U8)((Word >> 8) & 0x00ff); \
1945 SK_IN16((IoC), GMA((Mac), (Reg+8)), &Word); \
1946 pByte[4] = (SK_U8)(Word & 0x00ff); \
1947 pByte[5] = (SK_U8)((Word >> 8) & 0x00ff); \
1948 SK_IN16((IoC), GMA((Mac), (Reg+12)), &Word); \
1949 pByte[6] = (SK_U8)(Word & 0x00ff); \
1950 pByte[7] = (SK_U8)((Word >> 8) & 0x00ff); \
1951}
1952
1953#define GM_OUTHASH(IoC, Mac, Reg, pVal) { \
1954 SK_U8 *pByte; \
1955 pByte = (SK_U8 *)&((SK_U8 *)(pVal))[0]; \
1956 SK_OUT16((IoC), GMA((Mac), (Reg)), (SK_U16) \
1957 (((SK_U16)(pByte[0]) & 0x00ff)| \
1958 (((SK_U16)(pByte[1]) << 8) & 0xff00))); \
1959 SK_OUT16((IoC), GMA((Mac), (Reg+4)), (SK_U16) \
1960 (((SK_U16)(pByte[2]) & 0x00ff)| \
1961 (((SK_U16)(pByte[3]) << 8) & 0xff00))); \
1962 SK_OUT16((IoC), GMA((Mac), (Reg+8)), (SK_U16) \
1963 (((SK_U16)(pByte[4]) & 0x00ff)| \
1964 (((SK_U16)(pByte[5]) << 8) & 0xff00))); \
1965 SK_OUT16((IoC), GMA((Mac), (Reg+12)), (SK_U16) \
1966 (((SK_U16)(pByte[6]) & 0x00ff)| \
1967 (((SK_U16)(pByte[7]) << 8) & 0xff00))); \
1968}
1969
1970/*
1971 * Different MAC Types
1972 */
1973#define SK_MAC_XMAC 0 /* Xaqti XMAC II */
1974#define SK_MAC_GMAC 1 /* Marvell GMAC */
1975
1976/*
1977 * Different PHY Types
1978 */
1979#define SK_PHY_XMAC 0 /* integrated in XMAC II */
1980#define SK_PHY_BCOM 1 /* Broadcom BCM5400 */
1981#define SK_PHY_LONE 2 /* Level One LXT1000 */
1982#define SK_PHY_NAT 3 /* National DP83891 */
1983#define SK_PHY_MARV_COPPER 4 /* Marvell 88E1011S */
1984#define SK_PHY_MARV_FIBER 5 /* Marvell 88E1011S working on fiber */
1985
1986/*
1987 * PHY addresses (bits 12..8 of PHY address reg)
1988 */
1989#define PHY_ADDR_XMAC (0<<8)
1990#define PHY_ADDR_BCOM (1<<8)
1991#define PHY_ADDR_LONE (3<<8)
1992#define PHY_ADDR_NAT (0<<8)
1993
1994/* GPHY address (bits 15..11 of SMI control reg) */
1995#define PHY_ADDR_MARV 0
1996
1997/*
1998 * macros to access the PHY
1999 *
2000 * PHY_READ() read a 16 bit value from the PHY
2001 * PHY_WRITE() write a 16 bit value to the PHY
2002 *
2003 * para:
2004 * IoC I/O context needed for SK I/O macros
2005 * pPort Pointer to port struct for PhyAddr
2006 * Mac XMAC to access values: MAC_1 or MAC_2
2007 * PhyReg PHY Register to read or write
2008 * (p)Val Value or pointer to the value which should be read or
2009 * written.
2010 *
2011 * usage: PHY_READ(IoC, pPort, MAC_1, PHY_CTRL, Value);
2012 * Warning: a PHY_READ on an uninitialized PHY (PHY still in reset) never
2013 * comes back. This is checked in DEBUG mode.
2014 */
2015#ifndef DEBUG
2016#define PHY_READ(IoC, pPort, Mac, PhyReg, pVal) { \
2017 SK_U16 Mmu; \
2018 \
2019 XM_OUT16((IoC), (Mac), XM_PHY_ADDR, (PhyReg) | (pPort)->PhyAddr); \
2020 XM_IN16((IoC), (Mac), XM_PHY_DATA, (pVal)); \
2021 if ((pPort)->PhyType != SK_PHY_XMAC) { \
2022 do { \
2023 XM_IN16((IoC), (Mac), XM_MMU_CMD, &Mmu); \
2024 } while ((Mmu & XM_MMU_PHY_RDY) == 0); \
2025 XM_IN16((IoC), (Mac), XM_PHY_DATA, (pVal)); \
2026 } \
2027}
2028#else
2029#define PHY_READ(IoC, pPort, Mac, PhyReg, pVal) { \
2030 SK_U16 Mmu; \
2031 int __i = 0; \
2032 \
2033 XM_OUT16((IoC), (Mac), XM_PHY_ADDR, (PhyReg) | (pPort)->PhyAddr); \
2034 XM_IN16((IoC), (Mac), XM_PHY_DATA, (pVal)); \
2035 if ((pPort)->PhyType != SK_PHY_XMAC) { \
2036 do { \
2037 XM_IN16((IoC), (Mac), XM_MMU_CMD, &Mmu); \
2038 __i++; \
2039 if (__i > 100000) { \
2040 SK_DBG_PRINTF("*****************************\n"); \
2041 SK_DBG_PRINTF("PHY_READ on uninitialized PHY\n"); \
2042 SK_DBG_PRINTF("*****************************\n"); \
2043 break; \
2044 } \
2045 } while ((Mmu & XM_MMU_PHY_RDY) == 0); \
2046 XM_IN16((IoC), (Mac), XM_PHY_DATA, (pVal)); \
2047 } \
2048}
2049#endif /* DEBUG */
2050
2051#define PHY_WRITE(IoC, pPort, Mac, PhyReg, Val) { \
2052 SK_U16 Mmu; \
2053 \
2054 if ((pPort)->PhyType != SK_PHY_XMAC) { \
2055 do { \
2056 XM_IN16((IoC), (Mac), XM_MMU_CMD, &Mmu); \
2057 } while ((Mmu & XM_MMU_PHY_BUSY) != 0); \
2058 } \
2059 XM_OUT16((IoC), (Mac), XM_PHY_ADDR, (PhyReg) | (pPort)->PhyAddr); \
2060 XM_OUT16((IoC), (Mac), XM_PHY_DATA, (Val)); \
2061 if ((pPort)->PhyType != SK_PHY_XMAC) { \
2062 do { \
2063 XM_IN16((IoC), (Mac), XM_MMU_CMD, &Mmu); \
2064 } while ((Mmu & XM_MMU_PHY_BUSY) != 0); \
2065 } \
2066}
2067
2068/*
2069 * Macro PCI_C()
2070 *
2071 * Use this macro to access PCI config register from the I/O space.
2072 *
2073 * para:
2074 * Addr PCI configuration register to access.
2075 * Values: PCI_VENDOR_ID ... PCI_VPD_ADR_REG,
2076 *
2077 * usage SK_IN16(pAC, PCI_C(PCI_VENDOR_ID), pVal);
2078 */
2079#define PCI_C(Addr) (B7_CFG_SPC + (Addr)) /* PCI Config Space */
2080
2081/*
2082 * Macro SK_HW_ADDR(Base, Addr)
2083 *
2084 * Calculates the effective HW address
2085 *
2086 * para:
2087 * Base I/O or memory base address
2088 * Addr Address offset
2089 *
2090 * usage: May be used in SK_INxx and SK_OUTxx macros
2091 * #define SK_IN8(pAC, Addr, pVal) ...\
2092 * *pVal = (SK_U8)inp(SK_HW_ADDR(pAC->Hw.Iop, Addr)))
2093 */
2094#ifdef SK_MEM_MAPPED_IO
2095#define SK_HW_ADDR(Base, Addr) ((Base) + (Addr))
2096#else /* SK_MEM_MAPPED_IO */
2097#define SK_HW_ADDR(Base, Addr) \
2098 ((Base) + (((Addr) & 0x7f) | (((Addr) >> 7 > 0) ? 0x80 : 0)))
2099#endif /* SK_MEM_MAPPED_IO */
2100
2101#define SZ_LONG (sizeof(SK_U32))
2102
2103/*
2104 * Macro SK_HWAC_LINK_LED()
2105 *
2106 * Use this macro to set the link LED mode.
2107 * para:
2108 * pAC Pointer to adapter context struct
2109 * IoC I/O context needed for SK I/O macros
2110 * Port Port number
2111 * Mode Mode to set for this LED
2112 */
2113#define SK_HWAC_LINK_LED(pAC, IoC, Port, Mode) \
2114 SK_OUT8(IoC, MR_ADDR(Port, LNK_LED_REG), Mode);
2115
2116
2117/* typedefs *******************************************************************/
2118
2119
2120/* function prototypes ********************************************************/
2121
2122#ifdef __cplusplus
2123}
2124#endif /* __cplusplus */
2125
2126#endif /* __INC_SKGEHW_H */
diff --git a/drivers/net/sk98lin/h/skgehwt.h b/drivers/net/sk98lin/h/skgehwt.h
deleted file mode 100644
index e6b0016a695c..000000000000
--- a/drivers/net/sk98lin/h/skgehwt.h
+++ /dev/null
@@ -1,48 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skhwt.h
4 * Project: Gigabit Ethernet Adapters, Event Scheduler Module
5 * Version: $Revision: 1.7 $
6 * Date: $Date: 2003/09/16 12:55:08 $
7 * Purpose: Defines for the hardware timer functions
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/*
26 * SKGEHWT.H contains all defines and types for the timer functions
27 */
28
29#ifndef _SKGEHWT_H_
30#define _SKGEHWT_H_
31
32/*
33 * SK Hardware Timer
34 * - needed wherever the HWT module is used
35 * - use in Adapters context name pAC->Hwt
36 */
37typedef struct s_Hwt {
38 SK_U32 TStart; /* HWT start */
39 SK_U32 TStop; /* HWT stop */
40 int TActive; /* HWT: flag : active/inactive */
41} SK_HWT;
42
43extern void SkHwtInit(SK_AC *pAC, SK_IOC Ioc);
44extern void SkHwtStart(SK_AC *pAC, SK_IOC Ioc, SK_U32 Time);
45extern void SkHwtStop(SK_AC *pAC, SK_IOC Ioc);
46extern SK_U32 SkHwtRead(SK_AC *pAC, SK_IOC Ioc);
47extern void SkHwtIsr(SK_AC *pAC, SK_IOC Ioc);
48#endif /* _SKGEHWT_H_ */
diff --git a/drivers/net/sk98lin/h/skgei2c.h b/drivers/net/sk98lin/h/skgei2c.h
deleted file mode 100644
index d9b6f6d8dfe2..000000000000
--- a/drivers/net/sk98lin/h/skgei2c.h
+++ /dev/null
@@ -1,210 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skgei2c.h
4 * Project: Gigabit Ethernet Adapters, TWSI-Module
5 * Version: $Revision: 1.25 $
6 * Date: $Date: 2003/10/20 09:06:05 $
7 * Purpose: Special defines for TWSI
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/*
26 * SKGEI2C.H contains all SK-98xx specific defines for the TWSI handling
27 */
28
29#ifndef _INC_SKGEI2C_H_
30#define _INC_SKGEI2C_H_
31
32/*
33 * Macros to access the B2_I2C_CTRL
34 */
35#define SK_I2C_CTL(IoC, flag, dev, dev_size, reg, burst) \
36 SK_OUT32(IoC, B2_I2C_CTRL,\
37 (flag ? 0x80000000UL : 0x0L) | \
38 (((SK_U32)reg << 16) & I2C_ADDR) | \
39 (((SK_U32)dev << 9) & I2C_DEV_SEL) | \
40 (dev_size & I2C_DEV_SIZE) | \
41 ((burst << 4) & I2C_BURST_LEN))
42
43#define SK_I2C_STOP(IoC) { \
44 SK_U32 I2cCtrl; \
45 SK_IN32(IoC, B2_I2C_CTRL, &I2cCtrl); \
46 SK_OUT32(IoC, B2_I2C_CTRL, I2cCtrl | I2C_STOP); \
47}
48
49#define SK_I2C_GET_CTL(IoC, pI2cCtrl) SK_IN32(IoC, B2_I2C_CTRL, pI2cCtrl)
50
51/*
52 * Macros to access the TWSI SW Registers
53 */
54#define SK_I2C_SET_BIT(IoC, SetBits) { \
55 SK_U8 OrgBits; \
56 SK_IN8(IoC, B2_I2C_SW, &OrgBits); \
57 SK_OUT8(IoC, B2_I2C_SW, OrgBits | (SK_U8)(SetBits)); \
58}
59
60#define SK_I2C_CLR_BIT(IoC, ClrBits) { \
61 SK_U8 OrgBits; \
62 SK_IN8(IoC, B2_I2C_SW, &OrgBits); \
63 SK_OUT8(IoC, B2_I2C_SW, OrgBits & ~((SK_U8)(ClrBits))); \
64}
65
66#define SK_I2C_GET_SW(IoC, pI2cSw) SK_IN8(IoC, B2_I2C_SW, pI2cSw)
67
68/*
69 * define the possible sensor states
70 */
71#define SK_SEN_IDLE 0 /* Idle: sensor not read */
72#define SK_SEN_VALUE 1 /* Value Read cycle */
73#define SK_SEN_VALEXT 2 /* Extended Value Read cycle */
74
75/*
76 * Conversion factor to convert read Voltage sensor to milli Volt
77 * Conversion factor to convert read Temperature sensor to 10th degree Celsius
78 */
79#define SK_LM80_VT_LSB 22 /* 22mV LSB resolution */
80#define SK_LM80_TEMP_LSB 10 /* 1 degree LSB resolution */
81#define SK_LM80_TEMPEXT_LSB 5 /* 0.5 degree LSB resolution for ext. val. */
82
83/*
84 * formula: counter = (22500*60)/(rpm * divisor * pulses/2)
85 * assuming: 6500rpm, 4 pulses, divisor 1
86 */
87#define SK_LM80_FAN_FAKTOR ((22500L*60)/(1*2))
88
89/*
90 * Define sensor management data
91 * Maximum is reached on Genesis copper dual port and Yukon-64
92 * Board specific maximum is in pAC->I2c.MaxSens
93 */
94#define SK_MAX_SENSORS 8 /* maximal no. of installed sensors */
95#define SK_MIN_SENSORS 5 /* minimal no. of installed sensors */
96
97/*
98 * To watch the state machine (SM) use the timer in two ways
99 * instead of one as hitherto
100 */
101#define SK_TIMER_WATCH_SM 0 /* Watch the SM to finish in a spec. time */
102#define SK_TIMER_NEW_GAUGING 1 /* Start a new gauging when timer expires */
103
104/*
105 * Defines for the individual thresholds
106 */
107
108/* Temperature sensor */
109#define SK_SEN_TEMP_HIGH_ERR 800 /* Temperature High Err Threshold */
110#define SK_SEN_TEMP_HIGH_WARN 700 /* Temperature High Warn Threshold */
111#define SK_SEN_TEMP_LOW_WARN 100 /* Temperature Low Warn Threshold */
112#define SK_SEN_TEMP_LOW_ERR 0 /* Temperature Low Err Threshold */
113
114/* VCC which should be 5 V */
115#define SK_SEN_PCI_5V_HIGH_ERR 5588 /* Voltage PCI High Err Threshold */
116#define SK_SEN_PCI_5V_HIGH_WARN 5346 /* Voltage PCI High Warn Threshold */
117#define SK_SEN_PCI_5V_LOW_WARN 4664 /* Voltage PCI Low Warn Threshold */
118#define SK_SEN_PCI_5V_LOW_ERR 4422 /* Voltage PCI Low Err Threshold */
119
120/*
121 * VIO may be 5 V or 3.3 V. Initialization takes two parts:
122 * 1. Initialize lowest lower limit and highest higher limit.
123 * 2. After the first value is read correct the upper or the lower limit to
124 * the appropriate C constant.
125 *
126 * Warning limits are +-5% of the exepected voltage.
127 * Error limits are +-10% of the expected voltage.
128 */
129
130/* Bug fix AF: 16.Aug.2001: Correct the init base of LM80 sensor */
131
132#define SK_SEN_PCI_IO_5V_HIGH_ERR 5566 /* + 10% V PCI-IO High Err Threshold */
133#define SK_SEN_PCI_IO_5V_HIGH_WARN 5324 /* + 5% V PCI-IO High Warn Threshold */
134 /* 5000 mVolt */
135#define SK_SEN_PCI_IO_5V_LOW_WARN 4686 /* - 5% V PCI-IO Low Warn Threshold */
136#define SK_SEN_PCI_IO_5V_LOW_ERR 4444 /* - 10% V PCI-IO Low Err Threshold */
137
138#define SK_SEN_PCI_IO_RANGE_LIMITER 4000 /* 4000 mV range delimiter */
139
140/* correction values for the second pass */
141#define SK_SEN_PCI_IO_3V3_HIGH_ERR 3850 /* + 15% V PCI-IO High Err Threshold */
142#define SK_SEN_PCI_IO_3V3_HIGH_WARN 3674 /* + 10% V PCI-IO High Warn Threshold */
143 /* 3300 mVolt */
144#define SK_SEN_PCI_IO_3V3_LOW_WARN 2926 /* - 10% V PCI-IO Low Warn Threshold */
145#define SK_SEN_PCI_IO_3V3_LOW_ERR 2772 /* - 15% V PCI-IO Low Err Threshold */
146
147/*
148 * VDD voltage
149 */
150#define SK_SEN_VDD_HIGH_ERR 3630 /* Voltage ASIC High Err Threshold */
151#define SK_SEN_VDD_HIGH_WARN 3476 /* Voltage ASIC High Warn Threshold */
152#define SK_SEN_VDD_LOW_WARN 3146 /* Voltage ASIC Low Warn Threshold */
153#define SK_SEN_VDD_LOW_ERR 2970 /* Voltage ASIC Low Err Threshold */
154
155/*
156 * PHY PLL 3V3 voltage
157 */
158#define SK_SEN_PLL_3V3_HIGH_ERR 3630 /* Voltage PMA High Err Threshold */
159#define SK_SEN_PLL_3V3_HIGH_WARN 3476 /* Voltage PMA High Warn Threshold */
160#define SK_SEN_PLL_3V3_LOW_WARN 3146 /* Voltage PMA Low Warn Threshold */
161#define SK_SEN_PLL_3V3_LOW_ERR 2970 /* Voltage PMA Low Err Threshold */
162
163/*
164 * VAUX (YUKON only)
165 */
166#define SK_SEN_VAUX_3V3_HIGH_ERR 3630 /* Voltage VAUX High Err Threshold */
167#define SK_SEN_VAUX_3V3_HIGH_WARN 3476 /* Voltage VAUX High Warn Threshold */
168#define SK_SEN_VAUX_3V3_LOW_WARN 3146 /* Voltage VAUX Low Warn Threshold */
169#define SK_SEN_VAUX_3V3_LOW_ERR 2970 /* Voltage VAUX Low Err Threshold */
170#define SK_SEN_VAUX_0V_WARN_ERR 0 /* if VAUX not present */
171#define SK_SEN_VAUX_RANGE_LIMITER 1000 /* 1000 mV range delimiter */
172
173/*
174 * PHY 2V5 voltage
175 */
176#define SK_SEN_PHY_2V5_HIGH_ERR 2750 /* Voltage PHY High Err Threshold */
177#define SK_SEN_PHY_2V5_HIGH_WARN 2640 /* Voltage PHY High Warn Threshold */
178#define SK_SEN_PHY_2V5_LOW_WARN 2376 /* Voltage PHY Low Warn Threshold */
179#define SK_SEN_PHY_2V5_LOW_ERR 2222 /* Voltage PHY Low Err Threshold */
180
181/*
182 * ASIC Core 1V5 voltage (YUKON only)
183 */
184#define SK_SEN_CORE_1V5_HIGH_ERR 1650 /* Voltage ASIC Core High Err Threshold */
185#define SK_SEN_CORE_1V5_HIGH_WARN 1575 /* Voltage ASIC Core High Warn Threshold */
186#define SK_SEN_CORE_1V5_LOW_WARN 1425 /* Voltage ASIC Core Low Warn Threshold */
187#define SK_SEN_CORE_1V5_LOW_ERR 1350 /* Voltage ASIC Core Low Err Threshold */
188
189/*
190 * FAN 1 speed
191 */
192/* assuming: 6500rpm +-15%, 4 pulses,
193 * warning at: 80 %
194 * error at: 70 %
195 * no upper limit
196 */
197#define SK_SEN_FAN_HIGH_ERR 20000 /* FAN Speed High Err Threshold */
198#define SK_SEN_FAN_HIGH_WARN 20000 /* FAN Speed High Warn Threshold */
199#define SK_SEN_FAN_LOW_WARN 5200 /* FAN Speed Low Warn Threshold */
200#define SK_SEN_FAN_LOW_ERR 4550 /* FAN Speed Low Err Threshold */
201
202/*
203 * Some Voltages need dynamic thresholds
204 */
205#define SK_SEN_DYN_INIT_NONE 0 /* No dynamic init of thresholds */
206#define SK_SEN_DYN_INIT_PCI_IO 10 /* Init PCI-IO with new thresholds */
207#define SK_SEN_DYN_INIT_VAUX 11 /* Init VAUX with new thresholds */
208
209extern int SkLm80ReadSensor(SK_AC *pAC, SK_IOC IoC, SK_SENSOR *pSen);
210#endif /* n_INC_SKGEI2C_H */
diff --git a/drivers/net/sk98lin/h/skgeinit.h b/drivers/net/sk98lin/h/skgeinit.h
deleted file mode 100644
index 143e635ec24d..000000000000
--- a/drivers/net/sk98lin/h/skgeinit.h
+++ /dev/null
@@ -1,797 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skgeinit.h
4 * Project: Gigabit Ethernet Adapters, Common Modules
5 * Version: $Revision: 1.83 $
6 * Date: $Date: 2003/09/16 14:07:37 $
7 * Purpose: Structures and prototypes for the GE Init Module
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25#ifndef __INC_SKGEINIT_H_
26#define __INC_SKGEINIT_H_
27
28#ifdef __cplusplus
29extern "C" {
30#endif /* __cplusplus */
31
32/* defines ********************************************************************/
33
34#define SK_TEST_VAL 0x11335577UL
35
36/* modifying Link LED behaviour (used with SkGeLinkLED()) */
37#define SK_LNK_OFF LED_OFF
38#define SK_LNK_ON (LED_ON | LED_BLK_OFF | LED_SYNC_OFF)
39#define SK_LNK_BLINK (LED_ON | LED_BLK_ON | LED_SYNC_ON)
40#define SK_LNK_PERM (LED_ON | LED_BLK_OFF | LED_SYNC_ON)
41#define SK_LNK_TST (LED_ON | LED_BLK_ON | LED_SYNC_OFF)
42
43/* parameter 'Mode' when calling SK_HWAC_LINK_LED() */
44#define SK_LED_OFF LED_OFF
45#define SK_LED_ACTIVE (LED_ON | LED_BLK_OFF | LED_SYNC_OFF)
46#define SK_LED_STANDBY (LED_ON | LED_BLK_ON | LED_SYNC_OFF)
47
48/* addressing LED Registers in SkGeXmitLED() */
49#define XMIT_LED_INI 0
50#define XMIT_LED_CNT (RX_LED_VAL - RX_LED_INI)
51#define XMIT_LED_CTRL (RX_LED_CTRL- RX_LED_INI)
52#define XMIT_LED_TST (RX_LED_TST - RX_LED_INI)
53
54/* parameter 'Mode' when calling SkGeXmitLED() */
55#define SK_LED_DIS 0
56#define SK_LED_ENA 1
57#define SK_LED_TST 2
58
59/* Counter and Timer constants, for a host clock of 62.5 MHz */
60#define SK_XMIT_DUR 0x002faf08UL /* 50 ms */
61#define SK_BLK_DUR 0x01dcd650UL /* 500 ms */
62
63#define SK_DPOLL_DEF 0x00ee6b28UL /* 250 ms at 62.5 MHz */
64
65#define SK_DPOLL_MAX 0x00ffffffUL /* 268 ms at 62.5 MHz */
66 /* 215 ms at 78.12 MHz */
67
68#define SK_FACT_62 100 /* is given in percent */
69#define SK_FACT_53 85 /* on GENESIS: 53.12 MHz */
70#define SK_FACT_78 125 /* on YUKON: 78.12 MHz */
71
72/* Timeout values */
73#define SK_MAC_TO_53 72 /* MAC arbiter timeout */
74#define SK_PKT_TO_53 0x2000 /* Packet arbiter timeout */
75#define SK_PKT_TO_MAX 0xffff /* Maximum value */
76#define SK_RI_TO_53 36 /* RAM interface timeout */
77
78#define SK_PHY_ACC_TO 600000 /* PHY access timeout */
79
80/* RAM Buffer High Pause Threshold values */
81#define SK_RB_ULPP ( 8 * 1024) /* Upper Level in kB/8 */
82#define SK_RB_LLPP_S (10 * 1024) /* Lower Level for small Queues */
83#define SK_RB_LLPP_B (16 * 1024) /* Lower Level for big Queues */
84
85#ifndef SK_BMU_RX_WM
86#define SK_BMU_RX_WM 0x600 /* BMU Rx Watermark */
87#endif
88#ifndef SK_BMU_TX_WM
89#define SK_BMU_TX_WM 0x600 /* BMU Tx Watermark */
90#endif
91
92/* XMAC II Rx High Watermark */
93#define SK_XM_RX_HI_WM 0x05aa /* 1450 */
94
95/* XMAC II Tx Threshold */
96#define SK_XM_THR_REDL 0x01fb /* .. for redundant link usage */
97#define SK_XM_THR_SL 0x01fb /* .. for single link adapters */
98#define SK_XM_THR_MULL 0x01fb /* .. for multiple link usage */
99#define SK_XM_THR_JUMBO 0x03fc /* .. for jumbo frame usage */
100
101/* values for GIPortUsage */
102#define SK_RED_LINK 1 /* redundant link usage */
103#define SK_MUL_LINK 2 /* multiple link usage */
104#define SK_JUMBO_LINK 3 /* driver uses jumbo frames */
105
106/* Minimum RAM Buffer Rx Queue Size */
107#define SK_MIN_RXQ_SIZE 16 /* 16 kB */
108
109/* Minimum RAM Buffer Tx Queue Size */
110#define SK_MIN_TXQ_SIZE 16 /* 16 kB */
111
112/* Queue Size units */
113#define QZ_UNITS 0x7
114#define QZ_STEP 8
115
116/* Percentage of queue size from whole memory */
117/* 80 % for receive */
118#define RAM_QUOTA_RX 80L
119/* 0% for sync transfer */
120#define RAM_QUOTA_SYNC 0L
121/* the rest (20%) is taken for async transfer */
122
123/* Get the rounded queue size in Bytes in 8k steps */
124#define ROUND_QUEUE_SIZE(SizeInBytes) \
125 ((((unsigned long) (SizeInBytes) + (QZ_STEP*1024L)-1) / 1024) & \
126 ~(QZ_STEP-1))
127
128/* Get the rounded queue size in KBytes in 8k steps */
129#define ROUND_QUEUE_SIZE_KB(Kilobytes) \
130 ROUND_QUEUE_SIZE((Kilobytes) * 1024L)
131
132/* Types of RAM Buffer Queues */
133#define SK_RX_SRAM_Q 1 /* small receive queue */
134#define SK_RX_BRAM_Q 2 /* big receive queue */
135#define SK_TX_RAM_Q 3 /* small or big transmit queue */
136
137/* parameter 'Dir' when calling SkGeStopPort() */
138#define SK_STOP_TX 1 /* Stops the transmit path, resets the XMAC */
139#define SK_STOP_RX 2 /* Stops the receive path */
140#define SK_STOP_ALL 3 /* Stops Rx and Tx path, resets the XMAC */
141
142/* parameter 'RstMode' when calling SkGeStopPort() */
143#define SK_SOFT_RST 1 /* perform a software reset */
144#define SK_HARD_RST 2 /* perform a hardware reset */
145
146/* Init Levels */
147#define SK_INIT_DATA 0 /* Init level 0: init data structures */
148#define SK_INIT_IO 1 /* Init level 1: init with IOs */
149#define SK_INIT_RUN 2 /* Init level 2: init for run time */
150
151/* Link Mode Parameter */
152#define SK_LMODE_HALF 1 /* Half Duplex Mode */
153#define SK_LMODE_FULL 2 /* Full Duplex Mode */
154#define SK_LMODE_AUTOHALF 3 /* AutoHalf Duplex Mode */
155#define SK_LMODE_AUTOFULL 4 /* AutoFull Duplex Mode */
156#define SK_LMODE_AUTOBOTH 5 /* AutoBoth Duplex Mode */
157#define SK_LMODE_AUTOSENSE 6 /* configured mode auto sensing */
158#define SK_LMODE_INDETERMINATED 7 /* indeterminated */
159
160/* Auto-negotiation timeout in 100ms granularity */
161#define SK_AND_MAX_TO 6 /* Wait 600 msec before link comes up */
162
163/* Auto-negotiation error codes */
164#define SK_AND_OK 0 /* no error */
165#define SK_AND_OTHER 1 /* other error than below */
166#define SK_AND_DUP_CAP 2 /* Duplex capabilities error */
167
168
169/* Link Speed Capabilities */
170#define SK_LSPEED_CAP_AUTO (1<<0) /* Automatic resolution */
171#define SK_LSPEED_CAP_10MBPS (1<<1) /* 10 Mbps */
172#define SK_LSPEED_CAP_100MBPS (1<<2) /* 100 Mbps */
173#define SK_LSPEED_CAP_1000MBPS (1<<3) /* 1000 Mbps */
174#define SK_LSPEED_CAP_INDETERMINATED (1<<4) /* indeterminated */
175
176/* Link Speed Parameter */
177#define SK_LSPEED_AUTO 1 /* Automatic resolution */
178#define SK_LSPEED_10MBPS 2 /* 10 Mbps */
179#define SK_LSPEED_100MBPS 3 /* 100 Mbps */
180#define SK_LSPEED_1000MBPS 4 /* 1000 Mbps */
181#define SK_LSPEED_INDETERMINATED 5 /* indeterminated */
182
183/* Link Speed Current State */
184#define SK_LSPEED_STAT_UNKNOWN 1
185#define SK_LSPEED_STAT_10MBPS 2
186#define SK_LSPEED_STAT_100MBPS 3
187#define SK_LSPEED_STAT_1000MBPS 4
188#define SK_LSPEED_STAT_INDETERMINATED 5
189
190
191/* Link Capability Parameter */
192#define SK_LMODE_CAP_HALF (1<<0) /* Half Duplex Mode */
193#define SK_LMODE_CAP_FULL (1<<1) /* Full Duplex Mode */
194#define SK_LMODE_CAP_AUTOHALF (1<<2) /* AutoHalf Duplex Mode */
195#define SK_LMODE_CAP_AUTOFULL (1<<3) /* AutoFull Duplex Mode */
196#define SK_LMODE_CAP_INDETERMINATED (1<<4) /* indeterminated */
197
198/* Link Mode Current State */
199#define SK_LMODE_STAT_UNKNOWN 1 /* Unknown Duplex Mode */
200#define SK_LMODE_STAT_HALF 2 /* Half Duplex Mode */
201#define SK_LMODE_STAT_FULL 3 /* Full Duplex Mode */
202#define SK_LMODE_STAT_AUTOHALF 4 /* Half Duplex Mode obtained by Auto-Neg */
203#define SK_LMODE_STAT_AUTOFULL 5 /* Full Duplex Mode obtained by Auto-Neg */
204#define SK_LMODE_STAT_INDETERMINATED 6 /* indeterminated */
205
206/* Flow Control Mode Parameter (and capabilities) */
207#define SK_FLOW_MODE_NONE 1 /* No Flow-Control */
208#define SK_FLOW_MODE_LOC_SEND 2 /* Local station sends PAUSE */
209#define SK_FLOW_MODE_SYMMETRIC 3 /* Both stations may send PAUSE */
210#define SK_FLOW_MODE_SYM_OR_REM 4 /* Both stations may send PAUSE or
211 * just the remote station may send PAUSE
212 */
213#define SK_FLOW_MODE_INDETERMINATED 5 /* indeterminated */
214
215/* Flow Control Status Parameter */
216#define SK_FLOW_STAT_NONE 1 /* No Flow Control */
217#define SK_FLOW_STAT_REM_SEND 2 /* Remote Station sends PAUSE */
218#define SK_FLOW_STAT_LOC_SEND 3 /* Local station sends PAUSE */
219#define SK_FLOW_STAT_SYMMETRIC 4 /* Both station may send PAUSE */
220#define SK_FLOW_STAT_INDETERMINATED 5 /* indeterminated */
221
222/* Master/Slave Mode Capabilities */
223#define SK_MS_CAP_AUTO (1<<0) /* Automatic resolution */
224#define SK_MS_CAP_MASTER (1<<1) /* This station is master */
225#define SK_MS_CAP_SLAVE (1<<2) /* This station is slave */
226#define SK_MS_CAP_INDETERMINATED (1<<3) /* indeterminated */
227
228/* Set Master/Slave Mode Parameter (and capabilities) */
229#define SK_MS_MODE_AUTO 1 /* Automatic resolution */
230#define SK_MS_MODE_MASTER 2 /* This station is master */
231#define SK_MS_MODE_SLAVE 3 /* This station is slave */
232#define SK_MS_MODE_INDETERMINATED 4 /* indeterminated */
233
234/* Master/Slave Status Parameter */
235#define SK_MS_STAT_UNSET 1 /* The M/S status is not set */
236#define SK_MS_STAT_MASTER 2 /* This station is master */
237#define SK_MS_STAT_SLAVE 3 /* This station is slave */
238#define SK_MS_STAT_FAULT 4 /* M/S resolution failed */
239#define SK_MS_STAT_INDETERMINATED 5 /* indeterminated */
240
241/* parameter 'Mode' when calling SkXmSetRxCmd() */
242#define SK_STRIP_FCS_ON (1<<0) /* Enable FCS stripping of Rx frames */
243#define SK_STRIP_FCS_OFF (1<<1) /* Disable FCS stripping of Rx frames */
244#define SK_STRIP_PAD_ON (1<<2) /* Enable pad byte stripping of Rx fr */
245#define SK_STRIP_PAD_OFF (1<<3) /* Disable pad byte stripping of Rx fr */
246#define SK_LENERR_OK_ON (1<<4) /* Don't chk fr for in range len error */
247#define SK_LENERR_OK_OFF (1<<5) /* Check frames for in range len error */
248#define SK_BIG_PK_OK_ON (1<<6) /* Don't set Rx Error bit for big frames */
249#define SK_BIG_PK_OK_OFF (1<<7) /* Set Rx Error bit for big frames */
250#define SK_SELF_RX_ON (1<<8) /* Enable Rx of own packets */
251#define SK_SELF_RX_OFF (1<<9) /* Disable Rx of own packets */
252
253/* parameter 'Para' when calling SkMacSetRxTxEn() */
254#define SK_MAC_LOOPB_ON (1<<0) /* Enable MAC Loopback Mode */
255#define SK_MAC_LOOPB_OFF (1<<1) /* Disable MAC Loopback Mode */
256#define SK_PHY_LOOPB_ON (1<<2) /* Enable PHY Loopback Mode */
257#define SK_PHY_LOOPB_OFF (1<<3) /* Disable PHY Loopback Mode */
258#define SK_PHY_FULLD_ON (1<<4) /* Enable GMII Full Duplex */
259#define SK_PHY_FULLD_OFF (1<<5) /* Disable GMII Full Duplex */
260
261/* States of PState */
262#define SK_PRT_RESET 0 /* the port is reset */
263#define SK_PRT_STOP 1 /* the port is stopped (similar to SW reset) */
264#define SK_PRT_INIT 2 /* the port is initialized */
265#define SK_PRT_RUN 3 /* the port has an active link */
266
267/* PHY power down modes */
268#define PHY_PM_OPERATIONAL_MODE 0 /* PHY operational mode */
269#define PHY_PM_DEEP_SLEEP 1 /* coma mode --> minimal power */
270#define PHY_PM_IEEE_POWER_DOWN 2 /* IEEE 22.2.4.1.5 compl. power down */
271#define PHY_PM_ENERGY_DETECT 3 /* energy detect */
272#define PHY_PM_ENERGY_DETECT_PLUS 4 /* energy detect plus */
273
274/* Default receive frame limit for Workaround of XMAC Errata */
275#define SK_DEF_RX_WA_LIM SK_CONSTU64(100)
276
277/* values for GILedBlinkCtrl (LED Blink Control) */
278#define SK_ACT_LED_BLINK (1<<0) /* Active LED blinking */
279#define SK_DUP_LED_NORMAL (1<<1) /* Duplex LED normal */
280#define SK_LED_LINK100_ON (1<<2) /* Link 100M LED on */
281
282/* Link Partner Status */
283#define SK_LIPA_UNKNOWN 0 /* Link partner is in unknown state */
284#define SK_LIPA_MANUAL 1 /* Link partner is in detected manual state */
285#define SK_LIPA_AUTO 2 /* Link partner is in auto-negotiation state */
286
287/* Maximum Restarts before restart is ignored (3Com WA) */
288#define SK_MAX_LRESTART 3 /* Max. 3 times the link is restarted */
289
290/* Max. Auto-neg. timeouts before link detection in sense mode is reset */
291#define SK_MAX_ANEG_TO 10 /* Max. 10 times the sense mode is reset */
292
293/* structures *****************************************************************/
294
295/*
296 * MAC specific functions
297 */
298typedef struct s_GeMacFunc {
299 int (*pFnMacUpdateStats)(SK_AC *pAC, SK_IOC IoC, unsigned int Port);
300 int (*pFnMacStatistic)(SK_AC *pAC, SK_IOC IoC, unsigned int Port,
301 SK_U16 StatAddr, SK_U32 SK_FAR *pVal);
302 int (*pFnMacResetCounter)(SK_AC *pAC, SK_IOC IoC, unsigned int Port);
303 int (*pFnMacOverflow)(SK_AC *pAC, SK_IOC IoC, unsigned int Port,
304 SK_U16 IStatus, SK_U64 SK_FAR *pVal);
305} SK_GEMACFUNC;
306
307/*
308 * Port Structure
309 */
310typedef struct s_GePort {
311#ifndef SK_DIAG
312 SK_TIMER PWaTimer; /* Workaround Timer */
313 SK_TIMER HalfDupChkTimer;
314#endif /* SK_DIAG */
315 SK_U32 PPrevShorts; /* Previous Short Counter checking */
316 SK_U32 PPrevFcs; /* Previous FCS Error Counter checking */
317 SK_U64 PPrevRx; /* Previous RxOk Counter checking */
318 SK_U64 PRxLim; /* Previous RxOk Counter checking */
319 SK_U64 LastOctets; /* For half duplex hang check */
320 int PLinkResCt; /* Link Restart Counter */
321 int PAutoNegTimeOut;/* Auto-negotiation timeout current value */
322 int PAutoNegTOCt; /* Auto-negotiation Timeout Counter */
323 int PRxQSize; /* Port Rx Queue Size in kB */
324 int PXSQSize; /* Port Synchronous Transmit Queue Size in kB */
325 int PXAQSize; /* Port Asynchronous Transmit Queue Size in kB */
326 SK_U32 PRxQRamStart; /* Receive Queue RAM Buffer Start Address */
327 SK_U32 PRxQRamEnd; /* Receive Queue RAM Buffer End Address */
328 SK_U32 PXsQRamStart; /* Sync Tx Queue RAM Buffer Start Address */
329 SK_U32 PXsQRamEnd; /* Sync Tx Queue RAM Buffer End Address */
330 SK_U32 PXaQRamStart; /* Async Tx Queue RAM Buffer Start Address */
331 SK_U32 PXaQRamEnd; /* Async Tx Queue RAM Buffer End Address */
332 SK_U32 PRxOverCnt; /* Receive Overflow Counter */
333 int PRxQOff; /* Rx Queue Address Offset */
334 int PXsQOff; /* Synchronous Tx Queue Address Offset */
335 int PXaQOff; /* Asynchronous Tx Queue Address Offset */
336 int PhyType; /* PHY used on this port */
337 int PState; /* Port status (reset, stop, init, run) */
338 SK_U16 PhyId1; /* PHY Id1 on this port */
339 SK_U16 PhyAddr; /* MDIO/MDC PHY address */
340 SK_U16 PIsave; /* Saved Interrupt status word */
341 SK_U16 PSsave; /* Saved PHY status word */
342 SK_U16 PGmANegAdv; /* Saved GPhy AutoNegAdvertisment register */
343 SK_BOOL PHWLinkUp; /* The hardware Link is up (wiring) */
344 SK_BOOL PLinkBroken; /* Is Link broken ? */
345 SK_BOOL PCheckPar; /* Do we check for parity errors ? */
346 SK_BOOL HalfDupTimerActive;
347 SK_U8 PLinkCap; /* Link Capabilities */
348 SK_U8 PLinkModeConf; /* Link Mode configured */
349 SK_U8 PLinkMode; /* Link Mode currently used */
350 SK_U8 PLinkModeStatus;/* Link Mode Status */
351 SK_U8 PLinkSpeedCap; /* Link Speed Capabilities(10/100/1000 Mbps) */
352 SK_U8 PLinkSpeed; /* configured Link Speed (10/100/1000 Mbps) */
353 SK_U8 PLinkSpeedUsed; /* current Link Speed (10/100/1000 Mbps) */
354 SK_U8 PFlowCtrlCap; /* Flow Control Capabilities */
355 SK_U8 PFlowCtrlMode; /* Flow Control Mode */
356 SK_U8 PFlowCtrlStatus;/* Flow Control Status */
357 SK_U8 PMSCap; /* Master/Slave Capabilities */
358 SK_U8 PMSMode; /* Master/Slave Mode */
359 SK_U8 PMSStatus; /* Master/Slave Status */
360 SK_BOOL PAutoNegFail; /* Auto-negotiation fail flag */
361 SK_U8 PLipaAutoNeg; /* Auto-negotiation possible with Link Partner */
362 SK_U8 PCableLen; /* Cable Length */
363 SK_U8 PMdiPairLen[4]; /* MDI[0..3] Pair Length */
364 SK_U8 PMdiPairSts[4]; /* MDI[0..3] Pair Diagnostic Status */
365 SK_U8 PPhyPowerState; /* PHY current power state */
366 int PMacColThres; /* MAC Collision Threshold */
367 int PMacJamLen; /* MAC Jam length */
368 int PMacJamIpgVal; /* MAC Jam IPG */
369 int PMacJamIpgData; /* MAC IPG Jam to Data */
370 int PMacIpgData; /* MAC Data IPG */
371 SK_BOOL PMacLimit4; /* reset collision counter and backoff algorithm */
372} SK_GEPORT;
373
374/*
375 * Gigabit Ethernet Initialization Struct
376 * (has to be included in the adapter context)
377 */
378typedef struct s_GeInit {
379 int GIChipId; /* Chip Identification Number */
380 int GIChipRev; /* Chip Revision Number */
381 SK_U8 GIPciHwRev; /* PCI HW Revision Number */
382 SK_BOOL GIGenesis; /* Genesis adapter ? */
383 SK_BOOL GIYukon; /* YUKON-A1/Bx chip */
384 SK_BOOL GIYukonLite; /* YUKON-Lite chip */
385 SK_BOOL GICopperType; /* Copper Type adapter ? */
386 SK_BOOL GIPciSlot64; /* 64-bit PCI Slot */
387 SK_BOOL GIPciClock66; /* 66 MHz PCI Clock */
388 SK_BOOL GIVauxAvail; /* VAUX available (YUKON) */
389 SK_BOOL GIYukon32Bit; /* 32-Bit YUKON adapter */
390 SK_U16 GILedBlinkCtrl; /* LED Blink Control */
391 int GIMacsFound; /* Number of MACs found on this adapter */
392 int GIMacType; /* MAC Type used on this adapter */
393 int GIHstClkFact; /* Host Clock Factor (62.5 / HstClk * 100) */
394 int GIPortUsage; /* Driver Port Usage */
395 int GILevel; /* Initialization Level completed */
396 int GIRamSize; /* The RAM size of the adapter in kB */
397 int GIWolOffs; /* WOL Register Offset (HW-Bug in Rev. A) */
398 SK_U32 GIRamOffs; /* RAM Address Offset for addr calculation */
399 SK_U32 GIPollTimerVal; /* Descr. Poll Timer Init Val (HstClk ticks) */
400 SK_U32 GIValIrqMask; /* Value for Interrupt Mask */
401 SK_U32 GITimeStampCnt; /* Time Stamp High Counter (YUKON only) */
402 SK_GEPORT GP[SK_MAX_MACS];/* Port Dependent Information */
403 SK_GEMACFUNC GIFunc; /* MAC depedent functions */
404} SK_GEINIT;
405
406/*
407 * Error numbers and messages for skxmac2.c and skgeinit.c
408 */
409#define SKERR_HWI_E001 (SK_ERRBASE_HWINIT)
410#define SKERR_HWI_E001MSG "SkXmClrExactAddr() has got illegal parameters"
411#define SKERR_HWI_E002 (SKERR_HWI_E001+1)
412#define SKERR_HWI_E002MSG "SkGeInit(): Level 1 call missing"
413#define SKERR_HWI_E003 (SKERR_HWI_E002+1)
414#define SKERR_HWI_E003MSG "SkGeInit() called with illegal init Level"
415#define SKERR_HWI_E004 (SKERR_HWI_E003+1)
416#define SKERR_HWI_E004MSG "SkGeInitPort(): Queue Size illegal configured"
417#define SKERR_HWI_E005 (SKERR_HWI_E004+1)
418#define SKERR_HWI_E005MSG "SkGeInitPort(): cannot init running ports"
419#define SKERR_HWI_E006 (SKERR_HWI_E005+1)
420#define SKERR_HWI_E006MSG "SkGeMacInit(): PState does not match HW state"
421#define SKERR_HWI_E007 (SKERR_HWI_E006+1)
422#define SKERR_HWI_E007MSG "SkXmInitDupMd() called with invalid Dup Mode"
423#define SKERR_HWI_E008 (SKERR_HWI_E007+1)
424#define SKERR_HWI_E008MSG "SkXmSetRxCmd() called with invalid Mode"
425#define SKERR_HWI_E009 (SKERR_HWI_E008+1)
426#define SKERR_HWI_E009MSG "SkGeCfgSync() called although PXSQSize zero"
427#define SKERR_HWI_E010 (SKERR_HWI_E009+1)
428#define SKERR_HWI_E010MSG "SkGeCfgSync() called with invalid parameters"
429#define SKERR_HWI_E011 (SKERR_HWI_E010+1)
430#define SKERR_HWI_E011MSG "SkGeInitPort(): Receive Queue Size too small"
431#define SKERR_HWI_E012 (SKERR_HWI_E011+1)
432#define SKERR_HWI_E012MSG "SkGeInitPort(): invalid Queue Size specified"
433#define SKERR_HWI_E013 (SKERR_HWI_E012+1)
434#define SKERR_HWI_E013MSG "SkGeInitPort(): cfg changed for running queue"
435#define SKERR_HWI_E014 (SKERR_HWI_E013+1)
436#define SKERR_HWI_E014MSG "SkGeInitPort(): unknown GIPortUsage specified"
437#define SKERR_HWI_E015 (SKERR_HWI_E014+1)
438#define SKERR_HWI_E015MSG "Illegal Link mode parameter"
439#define SKERR_HWI_E016 (SKERR_HWI_E015+1)
440#define SKERR_HWI_E016MSG "Illegal Flow control mode parameter"
441#define SKERR_HWI_E017 (SKERR_HWI_E016+1)
442#define SKERR_HWI_E017MSG "Illegal value specified for GIPollTimerVal"
443#define SKERR_HWI_E018 (SKERR_HWI_E017+1)
444#define SKERR_HWI_E018MSG "FATAL: SkGeStopPort() does not terminate (Tx)"
445#define SKERR_HWI_E019 (SKERR_HWI_E018+1)
446#define SKERR_HWI_E019MSG "Illegal Speed parameter"
447#define SKERR_HWI_E020 (SKERR_HWI_E019+1)
448#define SKERR_HWI_E020MSG "Illegal Master/Slave parameter"
449#define SKERR_HWI_E021 (SKERR_HWI_E020+1)
450#define SKERR_HWI_E021MSG "MacUpdateStats(): cannot update statistic counter"
451#define SKERR_HWI_E022 (SKERR_HWI_E021+1)
452#define SKERR_HWI_E022MSG "MacStatistic(): illegal statistic base address"
453#define SKERR_HWI_E023 (SKERR_HWI_E022+1)
454#define SKERR_HWI_E023MSG "SkGeInitPort(): Transmit Queue Size too small"
455#define SKERR_HWI_E024 (SKERR_HWI_E023+1)
456#define SKERR_HWI_E024MSG "FATAL: SkGeStopPort() does not terminate (Rx)"
457#define SKERR_HWI_E025 (SKERR_HWI_E024+1)
458#define SKERR_HWI_E025MSG ""
459
460/* function prototypes ********************************************************/
461
462#ifndef SK_KR_PROTO
463
464/*
465 * public functions in skgeinit.c
466 */
467extern void SkGePollTxD(
468 SK_AC *pAC,
469 SK_IOC IoC,
470 int Port,
471 SK_BOOL PollTxD);
472
473extern void SkGeYellowLED(
474 SK_AC *pAC,
475 SK_IOC IoC,
476 int State);
477
478extern int SkGeCfgSync(
479 SK_AC *pAC,
480 SK_IOC IoC,
481 int Port,
482 SK_U32 IntTime,
483 SK_U32 LimCount,
484 int SyncMode);
485
486extern void SkGeLoadLnkSyncCnt(
487 SK_AC *pAC,
488 SK_IOC IoC,
489 int Port,
490 SK_U32 CntVal);
491
492extern void SkGeStopPort(
493 SK_AC *pAC,
494 SK_IOC IoC,
495 int Port,
496 int Dir,
497 int RstMode);
498
499extern int SkGeInit(
500 SK_AC *pAC,
501 SK_IOC IoC,
502 int Level);
503
504extern void SkGeDeInit(
505 SK_AC *pAC,
506 SK_IOC IoC);
507
508extern int SkGeInitPort(
509 SK_AC *pAC,
510 SK_IOC IoC,
511 int Port);
512
513extern void SkGeXmitLED(
514 SK_AC *pAC,
515 SK_IOC IoC,
516 int Led,
517 int Mode);
518
519extern int SkGeInitAssignRamToQueues(
520 SK_AC *pAC,
521 int ActivePort,
522 SK_BOOL DualNet);
523
524/*
525 * public functions in skxmac2.c
526 */
527extern void SkMacRxTxDisable(
528 SK_AC *pAC,
529 SK_IOC IoC,
530 int Port);
531
532extern void SkMacSoftRst(
533 SK_AC *pAC,
534 SK_IOC IoC,
535 int Port);
536
537extern void SkMacHardRst(
538 SK_AC *pAC,
539 SK_IOC IoC,
540 int Port);
541
542extern void SkXmInitMac(
543 SK_AC *pAC,
544 SK_IOC IoC,
545 int Port);
546
547extern void SkGmInitMac(
548 SK_AC *pAC,
549 SK_IOC IoC,
550 int Port);
551
552extern void SkMacInitPhy(
553 SK_AC *pAC,
554 SK_IOC IoC,
555 int Port,
556 SK_BOOL DoLoop);
557
558extern void SkMacIrqDisable(
559 SK_AC *pAC,
560 SK_IOC IoC,
561 int Port);
562
563extern void SkMacFlushTxFifo(
564 SK_AC *pAC,
565 SK_IOC IoC,
566 int Port);
567
568extern void SkMacIrq(
569 SK_AC *pAC,
570 SK_IOC IoC,
571 int Port);
572
573extern int SkMacAutoNegDone(
574 SK_AC *pAC,
575 SK_IOC IoC,
576 int Port);
577
578extern void SkMacAutoNegLipaPhy(
579 SK_AC *pAC,
580 SK_IOC IoC,
581 int Port,
582 SK_U16 IStatus);
583
584extern int SkMacRxTxEnable(
585 SK_AC *pAC,
586 SK_IOC IoC,
587 int Port);
588
589extern void SkMacPromiscMode(
590 SK_AC *pAC,
591 SK_IOC IoC,
592 int Port,
593 SK_BOOL Enable);
594
595extern void SkMacHashing(
596 SK_AC *pAC,
597 SK_IOC IoC,
598 int Port,
599 SK_BOOL Enable);
600
601extern void SkXmPhyRead(
602 SK_AC *pAC,
603 SK_IOC IoC,
604 int Port,
605 int Addr,
606 SK_U16 SK_FAR *pVal);
607
608extern void SkXmPhyWrite(
609 SK_AC *pAC,
610 SK_IOC IoC,
611 int Port,
612 int Addr,
613 SK_U16 Val);
614
615extern void SkGmPhyRead(
616 SK_AC *pAC,
617 SK_IOC IoC,
618 int Port,
619 int Addr,
620 SK_U16 SK_FAR *pVal);
621
622extern void SkGmPhyWrite(
623 SK_AC *pAC,
624 SK_IOC IoC,
625 int Port,
626 int Addr,
627 SK_U16 Val);
628
629extern void SkXmClrExactAddr(
630 SK_AC *pAC,
631 SK_IOC IoC,
632 int Port,
633 int StartNum,
634 int StopNum);
635
636extern void SkXmAutoNegLipaXmac(
637 SK_AC *pAC,
638 SK_IOC IoC,
639 int Port,
640 SK_U16 IStatus);
641
642extern int SkXmUpdateStats(
643 SK_AC *pAC,
644 SK_IOC IoC,
645 unsigned int Port);
646
647extern int SkGmUpdateStats(
648 SK_AC *pAC,
649 SK_IOC IoC,
650 unsigned int Port);
651
652extern int SkXmMacStatistic(
653 SK_AC *pAC,
654 SK_IOC IoC,
655 unsigned int Port,
656 SK_U16 StatAddr,
657 SK_U32 SK_FAR *pVal);
658
659extern int SkGmMacStatistic(
660 SK_AC *pAC,
661 SK_IOC IoC,
662 unsigned int Port,
663 SK_U16 StatAddr,
664 SK_U32 SK_FAR *pVal);
665
666extern int SkXmResetCounter(
667 SK_AC *pAC,
668 SK_IOC IoC,
669 unsigned int Port);
670
671extern int SkGmResetCounter(
672 SK_AC *pAC,
673 SK_IOC IoC,
674 unsigned int Port);
675
676extern int SkXmOverflowStatus(
677 SK_AC *pAC,
678 SK_IOC IoC,
679 unsigned int Port,
680 SK_U16 IStatus,
681 SK_U64 SK_FAR *pStatus);
682
683extern int SkGmOverflowStatus(
684 SK_AC *pAC,
685 SK_IOC IoC,
686 unsigned int Port,
687 SK_U16 MacStatus,
688 SK_U64 SK_FAR *pStatus);
689
690extern int SkGmCableDiagStatus(
691 SK_AC *pAC,
692 SK_IOC IoC,
693 int Port,
694 SK_BOOL StartTest);
695
696#ifdef SK_DIAG
697extern void SkGePhyRead(
698 SK_AC *pAC,
699 SK_IOC IoC,
700 int Port,
701 int Addr,
702 SK_U16 *pVal);
703
704extern void SkGePhyWrite(
705 SK_AC *pAC,
706 SK_IOC IoC,
707 int Port,
708 int Addr,
709 SK_U16 Val);
710
711extern void SkMacSetRxCmd(
712 SK_AC *pAC,
713 SK_IOC IoC,
714 int Port,
715 int Mode);
716extern void SkMacCrcGener(
717 SK_AC *pAC,
718 SK_IOC IoC,
719 int Port,
720 SK_BOOL Enable);
721extern void SkMacTimeStamp(
722 SK_AC *pAC,
723 SK_IOC IoC,
724 int Port,
725 SK_BOOL Enable);
726extern void SkXmSendCont(
727 SK_AC *pAC,
728 SK_IOC IoC,
729 int Port,
730 SK_BOOL Enable);
731#endif /* SK_DIAG */
732
733#else /* SK_KR_PROTO */
734
735/*
736 * public functions in skgeinit.c
737 */
738extern void SkGePollTxD();
739extern void SkGeYellowLED();
740extern int SkGeCfgSync();
741extern void SkGeLoadLnkSyncCnt();
742extern void SkGeStopPort();
743extern int SkGeInit();
744extern void SkGeDeInit();
745extern int SkGeInitPort();
746extern void SkGeXmitLED();
747extern int SkGeInitAssignRamToQueues();
748
749/*
750 * public functions in skxmac2.c
751 */
752extern void SkMacRxTxDisable();
753extern void SkMacSoftRst();
754extern void SkMacHardRst();
755extern void SkMacInitPhy();
756extern int SkMacRxTxEnable();
757extern void SkMacPromiscMode();
758extern void SkMacHashing();
759extern void SkMacIrqDisable();
760extern void SkMacFlushTxFifo();
761extern void SkMacIrq();
762extern int SkMacAutoNegDone();
763extern void SkMacAutoNegLipaPhy();
764extern void SkXmInitMac();
765extern void SkXmPhyRead();
766extern void SkXmPhyWrite();
767extern void SkGmInitMac();
768extern void SkGmPhyRead();
769extern void SkGmPhyWrite();
770extern void SkXmClrExactAddr();
771extern void SkXmAutoNegLipaXmac();
772extern int SkXmUpdateStats();
773extern int SkGmUpdateStats();
774extern int SkXmMacStatistic();
775extern int SkGmMacStatistic();
776extern int SkXmResetCounter();
777extern int SkGmResetCounter();
778extern int SkXmOverflowStatus();
779extern int SkGmOverflowStatus();
780extern int SkGmCableDiagStatus();
781
782#ifdef SK_DIAG
783extern void SkGePhyRead();
784extern void SkGePhyWrite();
785extern void SkMacSetRxCmd();
786extern void SkMacCrcGener();
787extern void SkMacTimeStamp();
788extern void SkXmSendCont();
789#endif /* SK_DIAG */
790
791#endif /* SK_KR_PROTO */
792
793#ifdef __cplusplus
794}
795#endif /* __cplusplus */
796
797#endif /* __INC_SKGEINIT_H_ */
diff --git a/drivers/net/sk98lin/h/skgepnm2.h b/drivers/net/sk98lin/h/skgepnm2.h
deleted file mode 100644
index ddd304f1a48b..000000000000
--- a/drivers/net/sk98lin/h/skgepnm2.h
+++ /dev/null
@@ -1,334 +0,0 @@
1/*****************************************************************************
2 *
3 * Name: skgepnm2.h
4 * Project: GEnesis, PCI Gigabit Ethernet Adapter
5 * Version: $Revision: 1.36 $
6 * Date: $Date: 2003/05/23 12:45:13 $
7 * Purpose: Defines for Private Network Management Interface
8 *
9 ****************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25#ifndef _SKGEPNM2_H_
26#define _SKGEPNM2_H_
27
28/*
29 * General definitions
30 */
31#define SK_PNMI_CHIPSET_XMAC 1 /* XMAC11800FP */
32#define SK_PNMI_CHIPSET_YUKON 2 /* YUKON */
33
34#define SK_PNMI_BUS_PCI 1 /* PCI bus*/
35
36/*
37 * Actions
38 */
39#define SK_PNMI_ACT_IDLE 1
40#define SK_PNMI_ACT_RESET 2
41#define SK_PNMI_ACT_SELFTEST 3
42#define SK_PNMI_ACT_RESETCNT 4
43
44/*
45 * VPD releated defines
46 */
47
48#define SK_PNMI_VPD_RW 1
49#define SK_PNMI_VPD_RO 2
50
51#define SK_PNMI_VPD_OK 0
52#define SK_PNMI_VPD_NOTFOUND 1
53#define SK_PNMI_VPD_CUT 2
54#define SK_PNMI_VPD_TIMEOUT 3
55#define SK_PNMI_VPD_FULL 4
56#define SK_PNMI_VPD_NOWRITE 5
57#define SK_PNMI_VPD_FATAL 6
58
59#define SK_PNMI_VPD_IGNORE 0
60#define SK_PNMI_VPD_CREATE 1
61#define SK_PNMI_VPD_DELETE 2
62
63
64/*
65 * RLMT related defines
66 */
67#define SK_PNMI_DEF_RLMT_CHG_THRES 240 /* 4 changes per minute */
68
69
70/*
71 * VCT internal status values
72 */
73#define SK_PNMI_VCT_PENDING 32
74#define SK_PNMI_VCT_TEST_DONE 64
75#define SK_PNMI_VCT_LINK 128
76
77/*
78 * Internal table definitions
79 */
80#define SK_PNMI_GET 0
81#define SK_PNMI_PRESET 1
82#define SK_PNMI_SET 2
83
84#define SK_PNMI_RO 0
85#define SK_PNMI_RW 1
86#define SK_PNMI_WO 2
87
88typedef struct s_OidTabEntry {
89 SK_U32 Id;
90 SK_U32 InstanceNo;
91 unsigned int StructSize;
92 unsigned int Offset;
93 int Access;
94 int (* Func)(SK_AC *pAc, SK_IOC pIo, int action,
95 SK_U32 Id, char* pBuf, unsigned int* pLen,
96 SK_U32 Instance, unsigned int TableIndex,
97 SK_U32 NetNumber);
98 SK_U16 Param;
99} SK_PNMI_TAB_ENTRY;
100
101
102/*
103 * Trap lengths
104 */
105#define SK_PNMI_TRAP_SIMPLE_LEN 17
106#define SK_PNMI_TRAP_SENSOR_LEN_BASE 46
107#define SK_PNMI_TRAP_RLMT_CHANGE_LEN 23
108#define SK_PNMI_TRAP_RLMT_PORT_LEN 23
109
110/*
111 * Number of MAC types supported
112 */
113#define SK_PNMI_MAC_TYPES (SK_MAC_GMAC + 1)
114
115/*
116 * MAC statistic data list (overall set for MAC types used)
117 */
118enum SK_MACSTATS {
119 SK_PNMI_HTX = 0,
120 SK_PNMI_HTX_OCTET,
121 SK_PNMI_HTX_OCTETHIGH = SK_PNMI_HTX_OCTET,
122 SK_PNMI_HTX_OCTETLOW,
123 SK_PNMI_HTX_BROADCAST,
124 SK_PNMI_HTX_MULTICAST,
125 SK_PNMI_HTX_UNICAST,
126 SK_PNMI_HTX_BURST,
127 SK_PNMI_HTX_PMACC,
128 SK_PNMI_HTX_MACC,
129 SK_PNMI_HTX_COL,
130 SK_PNMI_HTX_SINGLE_COL,
131 SK_PNMI_HTX_MULTI_COL,
132 SK_PNMI_HTX_EXCESS_COL,
133 SK_PNMI_HTX_LATE_COL,
134 SK_PNMI_HTX_DEFFERAL,
135 SK_PNMI_HTX_EXCESS_DEF,
136 SK_PNMI_HTX_UNDERRUN,
137 SK_PNMI_HTX_CARRIER,
138 SK_PNMI_HTX_UTILUNDER,
139 SK_PNMI_HTX_UTILOVER,
140 SK_PNMI_HTX_64,
141 SK_PNMI_HTX_127,
142 SK_PNMI_HTX_255,
143 SK_PNMI_HTX_511,
144 SK_PNMI_HTX_1023,
145 SK_PNMI_HTX_MAX,
146 SK_PNMI_HTX_LONGFRAMES,
147 SK_PNMI_HTX_SYNC,
148 SK_PNMI_HTX_SYNC_OCTET,
149 SK_PNMI_HTX_RESERVED,
150
151 SK_PNMI_HRX,
152 SK_PNMI_HRX_OCTET,
153 SK_PNMI_HRX_OCTETHIGH = SK_PNMI_HRX_OCTET,
154 SK_PNMI_HRX_OCTETLOW,
155 SK_PNMI_HRX_BADOCTET,
156 SK_PNMI_HRX_BADOCTETHIGH = SK_PNMI_HRX_BADOCTET,
157 SK_PNMI_HRX_BADOCTETLOW,
158 SK_PNMI_HRX_BROADCAST,
159 SK_PNMI_HRX_MULTICAST,
160 SK_PNMI_HRX_UNICAST,
161 SK_PNMI_HRX_PMACC,
162 SK_PNMI_HRX_MACC,
163 SK_PNMI_HRX_PMACC_ERR,
164 SK_PNMI_HRX_MACC_UNKWN,
165 SK_PNMI_HRX_BURST,
166 SK_PNMI_HRX_MISSED,
167 SK_PNMI_HRX_FRAMING,
168 SK_PNMI_HRX_UNDERSIZE,
169 SK_PNMI_HRX_OVERFLOW,
170 SK_PNMI_HRX_JABBER,
171 SK_PNMI_HRX_CARRIER,
172 SK_PNMI_HRX_IRLENGTH,
173 SK_PNMI_HRX_SYMBOL,
174 SK_PNMI_HRX_SHORTS,
175 SK_PNMI_HRX_RUNT,
176 SK_PNMI_HRX_TOO_LONG,
177 SK_PNMI_HRX_FCS,
178 SK_PNMI_HRX_CEXT,
179 SK_PNMI_HRX_UTILUNDER,
180 SK_PNMI_HRX_UTILOVER,
181 SK_PNMI_HRX_64,
182 SK_PNMI_HRX_127,
183 SK_PNMI_HRX_255,
184 SK_PNMI_HRX_511,
185 SK_PNMI_HRX_1023,
186 SK_PNMI_HRX_MAX,
187 SK_PNMI_HRX_LONGFRAMES,
188
189 SK_PNMI_HRX_RESERVED,
190
191 SK_PNMI_MAX_IDX /* NOTE: Ensure SK_PNMI_CNT_NO is set to this value */
192};
193
194/*
195 * MAC specific data
196 */
197typedef struct s_PnmiStatAddr {
198 SK_U16 Reg; /* MAC register containing the value */
199 SK_BOOL GetOffset; /* TRUE: Offset managed by PNMI (call GetStatVal())*/
200} SK_PNMI_STATADDR;
201
202
203/*
204 * SK_PNMI_STRUCT_DATA copy offset evaluation macros
205 */
206#define SK_PNMI_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_STRUCT_DATA *)0)->e))
207#define SK_PNMI_MAI_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_STRUCT_DATA *)0)->e))
208#define SK_PNMI_VPD_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_VPD *)0)->e))
209#define SK_PNMI_SEN_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_SENSOR *)0)->e))
210#define SK_PNMI_CHK_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_CHECKSUM *)0)->e))
211#define SK_PNMI_STA_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_STAT *)0)->e))
212#define SK_PNMI_CNF_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_CONF *)0)->e))
213#define SK_PNMI_RLM_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_RLMT *)0)->e))
214#define SK_PNMI_MON_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_RLMT_MONITOR *)0)->e))
215#define SK_PNMI_TRP_OFF(e) ((SK_U32)(SK_UPTR)&(((SK_PNMI_TRAP *)0)->e))
216
217#define SK_PNMI_SET_STAT(b,s,o) {SK_U32 Val32; char *pVal; \
218 Val32 = (s); \
219 pVal = (char *)(b) + ((SK_U32)(SK_UPTR) \
220 &(((SK_PNMI_STRUCT_DATA *)0)-> \
221 ReturnStatus.ErrorStatus)); \
222 SK_PNMI_STORE_U32(pVal, Val32); \
223 Val32 = (o); \
224 pVal = (char *)(b) + ((SK_U32)(SK_UPTR) \
225 &(((SK_PNMI_STRUCT_DATA *)0)-> \
226 ReturnStatus.ErrorOffset)); \
227 SK_PNMI_STORE_U32(pVal, Val32);}
228
229/*
230 * Time macros
231 */
232#ifndef SK_PNMI_HUNDREDS_SEC
233#if SK_TICKS_PER_SEC == 100
234#define SK_PNMI_HUNDREDS_SEC(t) (t)
235#else
236#define SK_PNMI_HUNDREDS_SEC(t) (((t) * 100) / (SK_TICKS_PER_SEC))
237#endif /* !SK_TICKS_PER_SEC */
238#endif /* !SK_PNMI_HUNDREDS_SEC */
239
240/*
241 * Macros to work around alignment problems
242 */
243#ifndef SK_PNMI_STORE_U16
244#define SK_PNMI_STORE_U16(p,v) {*(char *)(p) = *((char *)&(v)); \
245 *((char *)(p) + 1) = \
246 *(((char *)&(v)) + 1);}
247#endif
248
249#ifndef SK_PNMI_STORE_U32
250#define SK_PNMI_STORE_U32(p,v) {*(char *)(p) = *((char *)&(v)); \
251 *((char *)(p) + 1) = \
252 *(((char *)&(v)) + 1); \
253 *((char *)(p) + 2) = \
254 *(((char *)&(v)) + 2); \
255 *((char *)(p) + 3) = \
256 *(((char *)&(v)) + 3);}
257#endif
258
259#ifndef SK_PNMI_STORE_U64
260#define SK_PNMI_STORE_U64(p,v) {*(char *)(p) = *((char *)&(v)); \
261 *((char *)(p) + 1) = \
262 *(((char *)&(v)) + 1); \
263 *((char *)(p) + 2) = \
264 *(((char *)&(v)) + 2); \
265 *((char *)(p) + 3) = \
266 *(((char *)&(v)) + 3); \
267 *((char *)(p) + 4) = \
268 *(((char *)&(v)) + 4); \
269 *((char *)(p) + 5) = \
270 *(((char *)&(v)) + 5); \
271 *((char *)(p) + 6) = \
272 *(((char *)&(v)) + 6); \
273 *((char *)(p) + 7) = \
274 *(((char *)&(v)) + 7);}
275#endif
276
277#ifndef SK_PNMI_READ_U16
278#define SK_PNMI_READ_U16(p,v) {*((char *)&(v)) = *(char *)(p); \
279 *(((char *)&(v)) + 1) = \
280 *((char *)(p) + 1);}
281#endif
282
283#ifndef SK_PNMI_READ_U32
284#define SK_PNMI_READ_U32(p,v) {*((char *)&(v)) = *(char *)(p); \
285 *(((char *)&(v)) + 1) = \
286 *((char *)(p) + 1); \
287 *(((char *)&(v)) + 2) = \
288 *((char *)(p) + 2); \
289 *(((char *)&(v)) + 3) = \
290 *((char *)(p) + 3);}
291#endif
292
293#ifndef SK_PNMI_READ_U64
294#define SK_PNMI_READ_U64(p,v) {*((char *)&(v)) = *(char *)(p); \
295 *(((char *)&(v)) + 1) = \
296 *((char *)(p) + 1); \
297 *(((char *)&(v)) + 2) = \
298 *((char *)(p) + 2); \
299 *(((char *)&(v)) + 3) = \
300 *((char *)(p) + 3); \
301 *(((char *)&(v)) + 4) = \
302 *((char *)(p) + 4); \
303 *(((char *)&(v)) + 5) = \
304 *((char *)(p) + 5); \
305 *(((char *)&(v)) + 6) = \
306 *((char *)(p) + 6); \
307 *(((char *)&(v)) + 7) = \
308 *((char *)(p) + 7);}
309#endif
310
311/*
312 * Macros for Debug
313 */
314#ifdef DEBUG
315
316#define SK_PNMI_CHECKFLAGS(vSt) {if (pAC->Pnmi.MacUpdatedFlag > 0 || \
317 pAC->Pnmi.RlmtUpdatedFlag > 0 || \
318 pAC->Pnmi.SirqUpdatedFlag > 0) { \
319 SK_DBG_MSG(pAC, \
320 SK_DBGMOD_PNMI, \
321 SK_DBGCAT_CTRL, \
322 ("PNMI: ERR: %s MacUFlag=%d, RlmtUFlag=%d, SirqUFlag=%d\n", \
323 vSt, \
324 pAC->Pnmi.MacUpdatedFlag, \
325 pAC->Pnmi.RlmtUpdatedFlag, \
326 pAC->Pnmi.SirqUpdatedFlag))}}
327
328#else /* !DEBUG */
329
330#define SK_PNMI_CHECKFLAGS(vSt) /* Nothing */
331
332#endif /* !DEBUG */
333
334#endif /* _SKGEPNM2_H_ */
diff --git a/drivers/net/sk98lin/h/skgepnmi.h b/drivers/net/sk98lin/h/skgepnmi.h
deleted file mode 100644
index 1ed214ccb253..000000000000
--- a/drivers/net/sk98lin/h/skgepnmi.h
+++ /dev/null
@@ -1,962 +0,0 @@
1/*****************************************************************************
2 *
3 * Name: skgepnmi.h
4 * Project: GEnesis, PCI Gigabit Ethernet Adapter
5 * Version: $Revision: 1.62 $
6 * Date: $Date: 2003/08/15 12:31:52 $
7 * Purpose: Defines for Private Network Management Interface
8 *
9 ****************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25#ifndef _SKGEPNMI_H_
26#define _SKGEPNMI_H_
27
28/*
29 * Include dependencies
30 */
31#include "h/sktypes.h"
32#include "h/skerror.h"
33#include "h/sktimer.h"
34#include "h/ski2c.h"
35#include "h/skaddr.h"
36#include "h/skrlmt.h"
37#include "h/skvpd.h"
38
39/*
40 * Management Database Version
41 */
42#define SK_PNMI_MDB_VERSION 0x00030001 /* 3.1 */
43
44
45/*
46 * Event definitions
47 */
48#define SK_PNMI_EVT_SIRQ_OVERFLOW 1 /* Counter overflow */
49#define SK_PNMI_EVT_SEN_WAR_LOW 2 /* Lower war thres exceeded */
50#define SK_PNMI_EVT_SEN_WAR_UPP 3 /* Upper war thres exceeded */
51#define SK_PNMI_EVT_SEN_ERR_LOW 4 /* Lower err thres exceeded */
52#define SK_PNMI_EVT_SEN_ERR_UPP 5 /* Upper err thres exceeded */
53#define SK_PNMI_EVT_CHG_EST_TIMER 6 /* Timer event for RLMT Chg */
54#define SK_PNMI_EVT_UTILIZATION_TIMER 7 /* Timer event for Utiliza. */
55#define SK_PNMI_EVT_CLEAR_COUNTER 8 /* Clear statistic counters */
56#define SK_PNMI_EVT_XMAC_RESET 9 /* XMAC will be reset */
57
58#define SK_PNMI_EVT_RLMT_PORT_UP 10 /* Port came logically up */
59#define SK_PNMI_EVT_RLMT_PORT_DOWN 11 /* Port went logically down */
60#define SK_PNMI_EVT_RLMT_SEGMENTATION 13 /* Two SP root bridges found */
61#define SK_PNMI_EVT_RLMT_ACTIVE_DOWN 14 /* Port went logically down */
62#define SK_PNMI_EVT_RLMT_ACTIVE_UP 15 /* Port came logically up */
63#define SK_PNMI_EVT_RLMT_SET_NETS 16 /* 1. Parameter is number of nets
64 1 = single net; 2 = dual net */
65#define SK_PNMI_EVT_VCT_RESET 17 /* VCT port reset timer event started with SET. */
66
67
68/*
69 * Return values
70 */
71#define SK_PNMI_ERR_OK 0
72#define SK_PNMI_ERR_GENERAL 1
73#define SK_PNMI_ERR_TOO_SHORT 2
74#define SK_PNMI_ERR_BAD_VALUE 3
75#define SK_PNMI_ERR_READ_ONLY 4
76#define SK_PNMI_ERR_UNKNOWN_OID 5
77#define SK_PNMI_ERR_UNKNOWN_INST 6
78#define SK_PNMI_ERR_UNKNOWN_NET 7
79#define SK_PNMI_ERR_NOT_SUPPORTED 10
80
81
82/*
83 * Return values of driver reset function SK_DRIVER_RESET() and
84 * driver event function SK_DRIVER_EVENT()
85 */
86#define SK_PNMI_ERR_OK 0
87#define SK_PNMI_ERR_FAIL 1
88
89
90/*
91 * Return values of driver test function SK_DRIVER_SELFTEST()
92 */
93#define SK_PNMI_TST_UNKNOWN (1 << 0)
94#define SK_PNMI_TST_TRANCEIVER (1 << 1)
95#define SK_PNMI_TST_ASIC (1 << 2)
96#define SK_PNMI_TST_SENSOR (1 << 3)
97#define SK_PNMI_TST_POWERMGMT (1 << 4)
98#define SK_PNMI_TST_PCI (1 << 5)
99#define SK_PNMI_TST_MAC (1 << 6)
100
101
102/*
103 * RLMT specific definitions
104 */
105#define SK_PNMI_RLMT_STATUS_STANDBY 1
106#define SK_PNMI_RLMT_STATUS_ACTIVE 2
107#define SK_PNMI_RLMT_STATUS_ERROR 3
108
109#define SK_PNMI_RLMT_LSTAT_PHY_DOWN 1
110#define SK_PNMI_RLMT_LSTAT_AUTONEG 2
111#define SK_PNMI_RLMT_LSTAT_LOG_DOWN 3
112#define SK_PNMI_RLMT_LSTAT_LOG_UP 4
113#define SK_PNMI_RLMT_LSTAT_INDETERMINATED 5
114
115#define SK_PNMI_RLMT_MODE_CHK_LINK (SK_RLMT_CHECK_LINK)
116#define SK_PNMI_RLMT_MODE_CHK_RX (SK_RLMT_CHECK_LOC_LINK)
117#define SK_PNMI_RLMT_MODE_CHK_SPT (SK_RLMT_CHECK_SEG)
118/* #define SK_PNMI_RLMT_MODE_CHK_EX */
119
120/*
121 * OID definition
122 */
123#ifndef _NDIS_ /* Check, whether NDIS already included OIDs */
124
125#define OID_GEN_XMIT_OK 0x00020101
126#define OID_GEN_RCV_OK 0x00020102
127#define OID_GEN_XMIT_ERROR 0x00020103
128#define OID_GEN_RCV_ERROR 0x00020104
129#define OID_GEN_RCV_NO_BUFFER 0x00020105
130
131/* #define OID_GEN_DIRECTED_BYTES_XMIT 0x00020201 */
132#define OID_GEN_DIRECTED_FRAMES_XMIT 0x00020202
133/* #define OID_GEN_MULTICAST_BYTES_XMIT 0x00020203 */
134#define OID_GEN_MULTICAST_FRAMES_XMIT 0x00020204
135/* #define OID_GEN_BROADCAST_BYTES_XMIT 0x00020205 */
136#define OID_GEN_BROADCAST_FRAMES_XMIT 0x00020206
137/* #define OID_GEN_DIRECTED_BYTES_RCV 0x00020207 */
138#define OID_GEN_DIRECTED_FRAMES_RCV 0x00020208
139/* #define OID_GEN_MULTICAST_BYTES_RCV 0x00020209 */
140#define OID_GEN_MULTICAST_FRAMES_RCV 0x0002020A
141/* #define OID_GEN_BROADCAST_BYTES_RCV 0x0002020B */
142#define OID_GEN_BROADCAST_FRAMES_RCV 0x0002020C
143#define OID_GEN_RCV_CRC_ERROR 0x0002020D
144#define OID_GEN_TRANSMIT_QUEUE_LENGTH 0x0002020E
145
146#define OID_802_3_PERMANENT_ADDRESS 0x01010101
147#define OID_802_3_CURRENT_ADDRESS 0x01010102
148/* #define OID_802_3_MULTICAST_LIST 0x01010103 */
149/* #define OID_802_3_MAXIMUM_LIST_SIZE 0x01010104 */
150/* #define OID_802_3_MAC_OPTIONS 0x01010105 */
151
152#define OID_802_3_RCV_ERROR_ALIGNMENT 0x01020101
153#define OID_802_3_XMIT_ONE_COLLISION 0x01020102
154#define OID_802_3_XMIT_MORE_COLLISIONS 0x01020103
155#define OID_802_3_XMIT_DEFERRED 0x01020201
156#define OID_802_3_XMIT_MAX_COLLISIONS 0x01020202
157#define OID_802_3_RCV_OVERRUN 0x01020203
158#define OID_802_3_XMIT_UNDERRUN 0x01020204
159#define OID_802_3_XMIT_TIMES_CRS_LOST 0x01020206
160#define OID_802_3_XMIT_LATE_COLLISIONS 0x01020207
161
162/*
163 * PnP and PM OIDs
164 */
165#ifdef SK_POWER_MGMT
166#define OID_PNP_CAPABILITIES 0xFD010100
167#define OID_PNP_SET_POWER 0xFD010101
168#define OID_PNP_QUERY_POWER 0xFD010102
169#define OID_PNP_ADD_WAKE_UP_PATTERN 0xFD010103
170#define OID_PNP_REMOVE_WAKE_UP_PATTERN 0xFD010104
171#define OID_PNP_ENABLE_WAKE_UP 0xFD010106
172#endif /* SK_POWER_MGMT */
173
174#endif /* _NDIS_ */
175
176#define OID_SKGE_MDB_VERSION 0xFF010100
177#define OID_SKGE_SUPPORTED_LIST 0xFF010101
178#define OID_SKGE_VPD_FREE_BYTES 0xFF010102
179#define OID_SKGE_VPD_ENTRIES_LIST 0xFF010103
180#define OID_SKGE_VPD_ENTRIES_NUMBER 0xFF010104
181#define OID_SKGE_VPD_KEY 0xFF010105
182#define OID_SKGE_VPD_VALUE 0xFF010106
183#define OID_SKGE_VPD_ACCESS 0xFF010107
184#define OID_SKGE_VPD_ACTION 0xFF010108
185
186#define OID_SKGE_PORT_NUMBER 0xFF010110
187#define OID_SKGE_DEVICE_TYPE 0xFF010111
188#define OID_SKGE_DRIVER_DESCR 0xFF010112
189#define OID_SKGE_DRIVER_VERSION 0xFF010113
190#define OID_SKGE_HW_DESCR 0xFF010114
191#define OID_SKGE_HW_VERSION 0xFF010115
192#define OID_SKGE_CHIPSET 0xFF010116
193#define OID_SKGE_ACTION 0xFF010117
194#define OID_SKGE_RESULT 0xFF010118
195#define OID_SKGE_BUS_TYPE 0xFF010119
196#define OID_SKGE_BUS_SPEED 0xFF01011A
197#define OID_SKGE_BUS_WIDTH 0xFF01011B
198/* 0xFF01011C unused */
199#define OID_SKGE_DIAG_ACTION 0xFF01011D
200#define OID_SKGE_DIAG_RESULT 0xFF01011E
201#define OID_SKGE_MTU 0xFF01011F
202#define OID_SKGE_PHYS_CUR_ADDR 0xFF010120
203#define OID_SKGE_PHYS_FAC_ADDR 0xFF010121
204#define OID_SKGE_PMD 0xFF010122
205#define OID_SKGE_CONNECTOR 0xFF010123
206#define OID_SKGE_LINK_CAP 0xFF010124
207#define OID_SKGE_LINK_MODE 0xFF010125
208#define OID_SKGE_LINK_MODE_STATUS 0xFF010126
209#define OID_SKGE_LINK_STATUS 0xFF010127
210#define OID_SKGE_FLOWCTRL_CAP 0xFF010128
211#define OID_SKGE_FLOWCTRL_MODE 0xFF010129
212#define OID_SKGE_FLOWCTRL_STATUS 0xFF01012A
213#define OID_SKGE_PHY_OPERATION_CAP 0xFF01012B
214#define OID_SKGE_PHY_OPERATION_MODE 0xFF01012C
215#define OID_SKGE_PHY_OPERATION_STATUS 0xFF01012D
216#define OID_SKGE_MULTICAST_LIST 0xFF01012E
217#define OID_SKGE_CURRENT_PACKET_FILTER 0xFF01012F
218
219#define OID_SKGE_TRAP 0xFF010130
220#define OID_SKGE_TRAP_NUMBER 0xFF010131
221
222#define OID_SKGE_RLMT_MODE 0xFF010140
223#define OID_SKGE_RLMT_PORT_NUMBER 0xFF010141
224#define OID_SKGE_RLMT_PORT_ACTIVE 0xFF010142
225#define OID_SKGE_RLMT_PORT_PREFERRED 0xFF010143
226#define OID_SKGE_INTERMEDIATE_SUPPORT 0xFF010160
227
228#define OID_SKGE_SPEED_CAP 0xFF010170
229#define OID_SKGE_SPEED_MODE 0xFF010171
230#define OID_SKGE_SPEED_STATUS 0xFF010172
231
232#define OID_SKGE_BOARDLEVEL 0xFF010180
233
234#define OID_SKGE_SENSOR_NUMBER 0xFF020100
235#define OID_SKGE_SENSOR_INDEX 0xFF020101
236#define OID_SKGE_SENSOR_DESCR 0xFF020102
237#define OID_SKGE_SENSOR_TYPE 0xFF020103
238#define OID_SKGE_SENSOR_VALUE 0xFF020104
239#define OID_SKGE_SENSOR_WAR_THRES_LOW 0xFF020105
240#define OID_SKGE_SENSOR_WAR_THRES_UPP 0xFF020106
241#define OID_SKGE_SENSOR_ERR_THRES_LOW 0xFF020107
242#define OID_SKGE_SENSOR_ERR_THRES_UPP 0xFF020108
243#define OID_SKGE_SENSOR_STATUS 0xFF020109
244#define OID_SKGE_SENSOR_WAR_CTS 0xFF02010A
245#define OID_SKGE_SENSOR_ERR_CTS 0xFF02010B
246#define OID_SKGE_SENSOR_WAR_TIME 0xFF02010C
247#define OID_SKGE_SENSOR_ERR_TIME 0xFF02010D
248
249#define OID_SKGE_CHKSM_NUMBER 0xFF020110
250#define OID_SKGE_CHKSM_RX_OK_CTS 0xFF020111
251#define OID_SKGE_CHKSM_RX_UNABLE_CTS 0xFF020112
252#define OID_SKGE_CHKSM_RX_ERR_CTS 0xFF020113
253#define OID_SKGE_CHKSM_TX_OK_CTS 0xFF020114
254#define OID_SKGE_CHKSM_TX_UNABLE_CTS 0xFF020115
255
256#define OID_SKGE_STAT_TX 0xFF020120
257#define OID_SKGE_STAT_TX_OCTETS 0xFF020121
258#define OID_SKGE_STAT_TX_BROADCAST 0xFF020122
259#define OID_SKGE_STAT_TX_MULTICAST 0xFF020123
260#define OID_SKGE_STAT_TX_UNICAST 0xFF020124
261#define OID_SKGE_STAT_TX_LONGFRAMES 0xFF020125
262#define OID_SKGE_STAT_TX_BURST 0xFF020126
263#define OID_SKGE_STAT_TX_PFLOWC 0xFF020127
264#define OID_SKGE_STAT_TX_FLOWC 0xFF020128
265#define OID_SKGE_STAT_TX_SINGLE_COL 0xFF020129
266#define OID_SKGE_STAT_TX_MULTI_COL 0xFF02012A
267#define OID_SKGE_STAT_TX_EXCESS_COL 0xFF02012B
268#define OID_SKGE_STAT_TX_LATE_COL 0xFF02012C
269#define OID_SKGE_STAT_TX_DEFFERAL 0xFF02012D
270#define OID_SKGE_STAT_TX_EXCESS_DEF 0xFF02012E
271#define OID_SKGE_STAT_TX_UNDERRUN 0xFF02012F
272#define OID_SKGE_STAT_TX_CARRIER 0xFF020130
273/* #define OID_SKGE_STAT_TX_UTIL 0xFF020131 */
274#define OID_SKGE_STAT_TX_64 0xFF020132
275#define OID_SKGE_STAT_TX_127 0xFF020133
276#define OID_SKGE_STAT_TX_255 0xFF020134
277#define OID_SKGE_STAT_TX_511 0xFF020135
278#define OID_SKGE_STAT_TX_1023 0xFF020136
279#define OID_SKGE_STAT_TX_MAX 0xFF020137
280#define OID_SKGE_STAT_TX_SYNC 0xFF020138
281#define OID_SKGE_STAT_TX_SYNC_OCTETS 0xFF020139
282#define OID_SKGE_STAT_RX 0xFF02013A
283#define OID_SKGE_STAT_RX_OCTETS 0xFF02013B
284#define OID_SKGE_STAT_RX_BROADCAST 0xFF02013C
285#define OID_SKGE_STAT_RX_MULTICAST 0xFF02013D
286#define OID_SKGE_STAT_RX_UNICAST 0xFF02013E
287#define OID_SKGE_STAT_RX_PFLOWC 0xFF02013F
288#define OID_SKGE_STAT_RX_FLOWC 0xFF020140
289#define OID_SKGE_STAT_RX_PFLOWC_ERR 0xFF020141
290#define OID_SKGE_STAT_RX_FLOWC_UNKWN 0xFF020142
291#define OID_SKGE_STAT_RX_BURST 0xFF020143
292#define OID_SKGE_STAT_RX_MISSED 0xFF020144
293#define OID_SKGE_STAT_RX_FRAMING 0xFF020145
294#define OID_SKGE_STAT_RX_OVERFLOW 0xFF020146
295#define OID_SKGE_STAT_RX_JABBER 0xFF020147
296#define OID_SKGE_STAT_RX_CARRIER 0xFF020148
297#define OID_SKGE_STAT_RX_IR_LENGTH 0xFF020149
298#define OID_SKGE_STAT_RX_SYMBOL 0xFF02014A
299#define OID_SKGE_STAT_RX_SHORTS 0xFF02014B
300#define OID_SKGE_STAT_RX_RUNT 0xFF02014C
301#define OID_SKGE_STAT_RX_CEXT 0xFF02014D
302#define OID_SKGE_STAT_RX_TOO_LONG 0xFF02014E
303#define OID_SKGE_STAT_RX_FCS 0xFF02014F
304/* #define OID_SKGE_STAT_RX_UTIL 0xFF020150 */
305#define OID_SKGE_STAT_RX_64 0xFF020151
306#define OID_SKGE_STAT_RX_127 0xFF020152
307#define OID_SKGE_STAT_RX_255 0xFF020153
308#define OID_SKGE_STAT_RX_511 0xFF020154
309#define OID_SKGE_STAT_RX_1023 0xFF020155
310#define OID_SKGE_STAT_RX_MAX 0xFF020156
311#define OID_SKGE_STAT_RX_LONGFRAMES 0xFF020157
312
313#define OID_SKGE_RLMT_CHANGE_CTS 0xFF020160
314#define OID_SKGE_RLMT_CHANGE_TIME 0xFF020161
315#define OID_SKGE_RLMT_CHANGE_ESTIM 0xFF020162
316#define OID_SKGE_RLMT_CHANGE_THRES 0xFF020163
317
318#define OID_SKGE_RLMT_PORT_INDEX 0xFF020164
319#define OID_SKGE_RLMT_STATUS 0xFF020165
320#define OID_SKGE_RLMT_TX_HELLO_CTS 0xFF020166
321#define OID_SKGE_RLMT_RX_HELLO_CTS 0xFF020167
322#define OID_SKGE_RLMT_TX_SP_REQ_CTS 0xFF020168
323#define OID_SKGE_RLMT_RX_SP_CTS 0xFF020169
324
325#define OID_SKGE_RLMT_MONITOR_NUMBER 0xFF010150
326#define OID_SKGE_RLMT_MONITOR_INDEX 0xFF010151
327#define OID_SKGE_RLMT_MONITOR_ADDR 0xFF010152
328#define OID_SKGE_RLMT_MONITOR_ERRS 0xFF010153
329#define OID_SKGE_RLMT_MONITOR_TIMESTAMP 0xFF010154
330#define OID_SKGE_RLMT_MONITOR_ADMIN 0xFF010155
331
332#define OID_SKGE_TX_SW_QUEUE_LEN 0xFF020170
333#define OID_SKGE_TX_SW_QUEUE_MAX 0xFF020171
334#define OID_SKGE_TX_RETRY 0xFF020172
335#define OID_SKGE_RX_INTR_CTS 0xFF020173
336#define OID_SKGE_TX_INTR_CTS 0xFF020174
337#define OID_SKGE_RX_NO_BUF_CTS 0xFF020175
338#define OID_SKGE_TX_NO_BUF_CTS 0xFF020176
339#define OID_SKGE_TX_USED_DESCR_NO 0xFF020177
340#define OID_SKGE_RX_DELIVERED_CTS 0xFF020178
341#define OID_SKGE_RX_OCTETS_DELIV_CTS 0xFF020179
342#define OID_SKGE_RX_HW_ERROR_CTS 0xFF02017A
343#define OID_SKGE_TX_HW_ERROR_CTS 0xFF02017B
344#define OID_SKGE_IN_ERRORS_CTS 0xFF02017C
345#define OID_SKGE_OUT_ERROR_CTS 0xFF02017D
346#define OID_SKGE_ERR_RECOVERY_CTS 0xFF02017E
347#define OID_SKGE_SYSUPTIME 0xFF02017F
348
349#define OID_SKGE_ALL_DATA 0xFF020190
350
351/* Defines for VCT. */
352#define OID_SKGE_VCT_GET 0xFF020200
353#define OID_SKGE_VCT_SET 0xFF020201
354#define OID_SKGE_VCT_STATUS 0xFF020202
355
356#ifdef SK_DIAG_SUPPORT
357/* Defines for driver DIAG mode. */
358#define OID_SKGE_DIAG_MODE 0xFF020204
359#endif /* SK_DIAG_SUPPORT */
360
361/* New OIDs */
362#define OID_SKGE_DRIVER_RELDATE 0xFF020210
363#define OID_SKGE_DRIVER_FILENAME 0xFF020211
364#define OID_SKGE_CHIPID 0xFF020212
365#define OID_SKGE_RAMSIZE 0xFF020213
366#define OID_SKGE_VAUXAVAIL 0xFF020214
367#define OID_SKGE_PHY_TYPE 0xFF020215
368#define OID_SKGE_PHY_LP_MODE 0xFF020216
369
370/* VCT struct to store a backup copy of VCT data after a port reset. */
371typedef struct s_PnmiVct {
372 SK_U8 VctStatus;
373 SK_U8 PCableLen;
374 SK_U32 PMdiPairLen[4];
375 SK_U8 PMdiPairSts[4];
376} SK_PNMI_VCT;
377
378
379/* VCT status values (to be given to CPA via OID_SKGE_VCT_STATUS). */
380#define SK_PNMI_VCT_NONE 0
381#define SK_PNMI_VCT_OLD_VCT_DATA 1
382#define SK_PNMI_VCT_NEW_VCT_DATA 2
383#define SK_PNMI_VCT_OLD_DSP_DATA 4
384#define SK_PNMI_VCT_NEW_DSP_DATA 8
385#define SK_PNMI_VCT_RUNNING 16
386
387
388/* VCT cable test status. */
389#define SK_PNMI_VCT_NORMAL_CABLE 0
390#define SK_PNMI_VCT_SHORT_CABLE 1
391#define SK_PNMI_VCT_OPEN_CABLE 2
392#define SK_PNMI_VCT_TEST_FAIL 3
393#define SK_PNMI_VCT_IMPEDANCE_MISMATCH 4
394
395#define OID_SKGE_TRAP_SEN_WAR_LOW 500
396#define OID_SKGE_TRAP_SEN_WAR_UPP 501
397#define OID_SKGE_TRAP_SEN_ERR_LOW 502
398#define OID_SKGE_TRAP_SEN_ERR_UPP 503
399#define OID_SKGE_TRAP_RLMT_CHANGE_THRES 520
400#define OID_SKGE_TRAP_RLMT_CHANGE_PORT 521
401#define OID_SKGE_TRAP_RLMT_PORT_DOWN 522
402#define OID_SKGE_TRAP_RLMT_PORT_UP 523
403#define OID_SKGE_TRAP_RLMT_SEGMENTATION 524
404
405#ifdef SK_DIAG_SUPPORT
406/* Defines for driver DIAG mode. */
407#define SK_DIAG_ATTACHED 2
408#define SK_DIAG_RUNNING 1
409#define SK_DIAG_IDLE 0
410#endif /* SK_DIAG_SUPPORT */
411
412/*
413 * Generic PNMI IOCTL subcommand definitions.
414 */
415#define SK_GET_SINGLE_VAR 1
416#define SK_SET_SINGLE_VAR 2
417#define SK_PRESET_SINGLE_VAR 3
418#define SK_GET_FULL_MIB 4
419#define SK_SET_FULL_MIB 5
420#define SK_PRESET_FULL_MIB 6
421
422
423/*
424 * Define error numbers and messages for syslog
425 */
426#define SK_PNMI_ERR001 (SK_ERRBASE_PNMI + 1)
427#define SK_PNMI_ERR001MSG "SkPnmiGetStruct: Unknown OID"
428#define SK_PNMI_ERR002 (SK_ERRBASE_PNMI + 2)
429#define SK_PNMI_ERR002MSG "SkPnmiGetStruct: Cannot read VPD keys"
430#define SK_PNMI_ERR003 (SK_ERRBASE_PNMI + 3)
431#define SK_PNMI_ERR003MSG "OidStruct: Called with wrong OID"
432#define SK_PNMI_ERR004 (SK_ERRBASE_PNMI + 4)
433#define SK_PNMI_ERR004MSG "OidStruct: Called with wrong action"
434#define SK_PNMI_ERR005 (SK_ERRBASE_PNMI + 5)
435#define SK_PNMI_ERR005MSG "Perform: Cannot reset driver"
436#define SK_PNMI_ERR006 (SK_ERRBASE_PNMI + 6)
437#define SK_PNMI_ERR006MSG "Perform: Unknown OID action command"
438#define SK_PNMI_ERR007 (SK_ERRBASE_PNMI + 7)
439#define SK_PNMI_ERR007MSG "General: Driver description not initialized"
440#define SK_PNMI_ERR008 (SK_ERRBASE_PNMI + 8)
441#define SK_PNMI_ERR008MSG "Addr: Tried to get unknown OID"
442#define SK_PNMI_ERR009 (SK_ERRBASE_PNMI + 9)
443#define SK_PNMI_ERR009MSG "Addr: Unknown OID"
444#define SK_PNMI_ERR010 (SK_ERRBASE_PNMI + 10)
445#define SK_PNMI_ERR010MSG "CsumStat: Unknown OID"
446#define SK_PNMI_ERR011 (SK_ERRBASE_PNMI + 11)
447#define SK_PNMI_ERR011MSG "SensorStat: Sensor descr string too long"
448#define SK_PNMI_ERR012 (SK_ERRBASE_PNMI + 12)
449#define SK_PNMI_ERR012MSG "SensorStat: Unknown OID"
450#define SK_PNMI_ERR013 (SK_ERRBASE_PNMI + 13)
451#define SK_PNMI_ERR013MSG ""
452#define SK_PNMI_ERR014 (SK_ERRBASE_PNMI + 14)
453#define SK_PNMI_ERR014MSG "Vpd: Cannot read VPD keys"
454#define SK_PNMI_ERR015 (SK_ERRBASE_PNMI + 15)
455#define SK_PNMI_ERR015MSG "Vpd: Internal array for VPD keys to small"
456#define SK_PNMI_ERR016 (SK_ERRBASE_PNMI + 16)
457#define SK_PNMI_ERR016MSG "Vpd: Key string too long"
458#define SK_PNMI_ERR017 (SK_ERRBASE_PNMI + 17)
459#define SK_PNMI_ERR017MSG "Vpd: Invalid VPD status pointer"
460#define SK_PNMI_ERR018 (SK_ERRBASE_PNMI + 18)
461#define SK_PNMI_ERR018MSG "Vpd: VPD data not valid"
462#define SK_PNMI_ERR019 (SK_ERRBASE_PNMI + 19)
463#define SK_PNMI_ERR019MSG "Vpd: VPD entries list string too long"
464#define SK_PNMI_ERR021 (SK_ERRBASE_PNMI + 21)
465#define SK_PNMI_ERR021MSG "Vpd: VPD data string too long"
466#define SK_PNMI_ERR022 (SK_ERRBASE_PNMI + 22)
467#define SK_PNMI_ERR022MSG "Vpd: VPD data string too long should be errored before"
468#define SK_PNMI_ERR023 (SK_ERRBASE_PNMI + 23)
469#define SK_PNMI_ERR023MSG "Vpd: Unknown OID in get action"
470#define SK_PNMI_ERR024 (SK_ERRBASE_PNMI + 24)
471#define SK_PNMI_ERR024MSG "Vpd: Unknown OID in preset/set action"
472#define SK_PNMI_ERR025 (SK_ERRBASE_PNMI + 25)
473#define SK_PNMI_ERR025MSG "Vpd: Cannot write VPD after modify entry"
474#define SK_PNMI_ERR026 (SK_ERRBASE_PNMI + 26)
475#define SK_PNMI_ERR026MSG "Vpd: Cannot update VPD"
476#define SK_PNMI_ERR027 (SK_ERRBASE_PNMI + 27)
477#define SK_PNMI_ERR027MSG "Vpd: Cannot delete VPD entry"
478#define SK_PNMI_ERR028 (SK_ERRBASE_PNMI + 28)
479#define SK_PNMI_ERR028MSG "Vpd: Cannot update VPD after delete entry"
480#define SK_PNMI_ERR029 (SK_ERRBASE_PNMI + 29)
481#define SK_PNMI_ERR029MSG "General: Driver description string too long"
482#define SK_PNMI_ERR030 (SK_ERRBASE_PNMI + 30)
483#define SK_PNMI_ERR030MSG "General: Driver version not initialized"
484#define SK_PNMI_ERR031 (SK_ERRBASE_PNMI + 31)
485#define SK_PNMI_ERR031MSG "General: Driver version string too long"
486#define SK_PNMI_ERR032 (SK_ERRBASE_PNMI + 32)
487#define SK_PNMI_ERR032MSG "General: Cannot read VPD Name for HW descr"
488#define SK_PNMI_ERR033 (SK_ERRBASE_PNMI + 33)
489#define SK_PNMI_ERR033MSG "General: HW description string too long"
490#define SK_PNMI_ERR034 (SK_ERRBASE_PNMI + 34)
491#define SK_PNMI_ERR034MSG "General: Unknown OID"
492#define SK_PNMI_ERR035 (SK_ERRBASE_PNMI + 35)
493#define SK_PNMI_ERR035MSG "Rlmt: Unknown OID"
494#define SK_PNMI_ERR036 (SK_ERRBASE_PNMI + 36)
495#define SK_PNMI_ERR036MSG ""
496#define SK_PNMI_ERR037 (SK_ERRBASE_PNMI + 37)
497#define SK_PNMI_ERR037MSG "Rlmt: SK_RLMT_MODE_CHANGE event return not 0"
498#define SK_PNMI_ERR038 (SK_ERRBASE_PNMI + 38)
499#define SK_PNMI_ERR038MSG "Rlmt: SK_RLMT_PREFPORT_CHANGE event return not 0"
500#define SK_PNMI_ERR039 (SK_ERRBASE_PNMI + 39)
501#define SK_PNMI_ERR039MSG "RlmtStat: Unknown OID"
502#define SK_PNMI_ERR040 (SK_ERRBASE_PNMI + 40)
503#define SK_PNMI_ERR040MSG "PowerManagement: Unknown OID"
504#define SK_PNMI_ERR041 (SK_ERRBASE_PNMI + 41)
505#define SK_PNMI_ERR041MSG "MacPrivateConf: Unknown OID"
506#define SK_PNMI_ERR042 (SK_ERRBASE_PNMI + 42)
507#define SK_PNMI_ERR042MSG "MacPrivateConf: SK_HWEV_SET_ROLE returned not 0"
508#define SK_PNMI_ERR043 (SK_ERRBASE_PNMI + 43)
509#define SK_PNMI_ERR043MSG "MacPrivateConf: SK_HWEV_SET_LMODE returned not 0"
510#define SK_PNMI_ERR044 (SK_ERRBASE_PNMI + 44)
511#define SK_PNMI_ERR044MSG "MacPrivateConf: SK_HWEV_SET_FLOWMODE returned not 0"
512#define SK_PNMI_ERR045 (SK_ERRBASE_PNMI + 45)
513#define SK_PNMI_ERR045MSG "MacPrivateConf: SK_HWEV_SET_SPEED returned not 0"
514#define SK_PNMI_ERR046 (SK_ERRBASE_PNMI + 46)
515#define SK_PNMI_ERR046MSG "Monitor: Unknown OID"
516#define SK_PNMI_ERR047 (SK_ERRBASE_PNMI + 47)
517#define SK_PNMI_ERR047MSG "SirqUpdate: Event function returns not 0"
518#define SK_PNMI_ERR048 (SK_ERRBASE_PNMI + 48)
519#define SK_PNMI_ERR048MSG "RlmtUpdate: Event function returns not 0"
520#define SK_PNMI_ERR049 (SK_ERRBASE_PNMI + 49)
521#define SK_PNMI_ERR049MSG "SkPnmiInit: Invalid size of 'CounterOffset' struct!!"
522#define SK_PNMI_ERR050 (SK_ERRBASE_PNMI + 50)
523#define SK_PNMI_ERR050MSG "SkPnmiInit: Invalid size of 'StatAddr' table!!"
524#define SK_PNMI_ERR051 (SK_ERRBASE_PNMI + 51)
525#define SK_PNMI_ERR051MSG "SkPnmiEvent: Port switch suspicious"
526#define SK_PNMI_ERR052 (SK_ERRBASE_PNMI + 52)
527#define SK_PNMI_ERR052MSG ""
528#define SK_PNMI_ERR053 (SK_ERRBASE_PNMI + 53)
529#define SK_PNMI_ERR053MSG "General: Driver release date not initialized"
530#define SK_PNMI_ERR054 (SK_ERRBASE_PNMI + 54)
531#define SK_PNMI_ERR054MSG "General: Driver release date string too long"
532#define SK_PNMI_ERR055 (SK_ERRBASE_PNMI + 55)
533#define SK_PNMI_ERR055MSG "General: Driver file name not initialized"
534#define SK_PNMI_ERR056 (SK_ERRBASE_PNMI + 56)
535#define SK_PNMI_ERR056MSG "General: Driver file name string too long"
536
537/*
538 * Management counter macros called by the driver
539 */
540#define SK_PNMI_SET_DRIVER_DESCR(pAC,v) ((pAC)->Pnmi.pDriverDescription = \
541 (char *)(v))
542
543#define SK_PNMI_SET_DRIVER_VER(pAC,v) ((pAC)->Pnmi.pDriverVersion = \
544 (char *)(v))
545
546#define SK_PNMI_SET_DRIVER_RELDATE(pAC,v) ((pAC)->Pnmi.pDriverReleaseDate = \
547 (char *)(v))
548
549#define SK_PNMI_SET_DRIVER_FILENAME(pAC,v) ((pAC)->Pnmi.pDriverFileName = \
550 (char *)(v))
551
552#define SK_PNMI_CNT_TX_QUEUE_LEN(pAC,v,p) \
553 { \
554 (pAC)->Pnmi.Port[p].TxSwQueueLen = (SK_U64)(v); \
555 if ((pAC)->Pnmi.Port[p].TxSwQueueLen > (pAC)->Pnmi.Port[p].TxSwQueueMax) { \
556 (pAC)->Pnmi.Port[p].TxSwQueueMax = (pAC)->Pnmi.Port[p].TxSwQueueLen; \
557 } \
558 }
559#define SK_PNMI_CNT_TX_RETRY(pAC,p) (((pAC)->Pnmi.Port[p].TxRetryCts)++)
560#define SK_PNMI_CNT_RX_INTR(pAC,p) (((pAC)->Pnmi.Port[p].RxIntrCts)++)
561#define SK_PNMI_CNT_TX_INTR(pAC,p) (((pAC)->Pnmi.Port[p].TxIntrCts)++)
562#define SK_PNMI_CNT_NO_RX_BUF(pAC,p) (((pAC)->Pnmi.Port[p].RxNoBufCts)++)
563#define SK_PNMI_CNT_NO_TX_BUF(pAC,p) (((pAC)->Pnmi.Port[p].TxNoBufCts)++)
564#define SK_PNMI_CNT_USED_TX_DESCR(pAC,v,p) \
565 ((pAC)->Pnmi.Port[p].TxUsedDescrNo=(SK_U64)(v));
566#define SK_PNMI_CNT_RX_OCTETS_DELIVERED(pAC,v,p) \
567 { \
568 ((pAC)->Pnmi.Port[p].RxDeliveredCts)++; \
569 (pAC)->Pnmi.Port[p].RxOctetsDeliveredCts += (SK_U64)(v); \
570 }
571#define SK_PNMI_CNT_ERR_RECOVERY(pAC,p) (((pAC)->Pnmi.Port[p].ErrRecoveryCts)++);
572
573#define SK_PNMI_CNT_SYNC_OCTETS(pAC,p,v) \
574 { \
575 if ((p) < SK_MAX_MACS) { \
576 ((pAC)->Pnmi.Port[p].StatSyncCts)++; \
577 (pAC)->Pnmi.Port[p].StatSyncOctetsCts += (SK_U64)(v); \
578 } \
579 }
580
581#define SK_PNMI_CNT_RX_LONGFRAMES(pAC,p) \
582 { \
583 if ((p) < SK_MAX_MACS) { \
584 ((pAC)->Pnmi.Port[p].StatRxLongFrameCts++); \
585 } \
586 }
587
588#define SK_PNMI_CNT_RX_FRAMETOOLONG(pAC,p) \
589 { \
590 if ((p) < SK_MAX_MACS) { \
591 ((pAC)->Pnmi.Port[p].StatRxFrameTooLongCts++); \
592 } \
593 }
594
595#define SK_PNMI_CNT_RX_PMACC_ERR(pAC,p) \
596 { \
597 if ((p) < SK_MAX_MACS) { \
598 ((pAC)->Pnmi.Port[p].StatRxPMaccErr++); \
599 } \
600 }
601
602/*
603 * Conversion Macros
604 */
605#define SK_PNMI_PORT_INST2LOG(i) ((unsigned int)(i) - 1)
606#define SK_PNMI_PORT_LOG2INST(l) ((unsigned int)(l) + 1)
607#define SK_PNMI_PORT_PHYS2LOG(p) ((unsigned int)(p) + 1)
608#define SK_PNMI_PORT_LOG2PHYS(pAC,l) ((unsigned int)(l) - 1)
609#define SK_PNMI_PORT_PHYS2INST(pAC,p) \
610 (pAC->Pnmi.DualNetActiveFlag ? 2 : ((unsigned int)(p) + 2))
611#define SK_PNMI_PORT_INST2PHYS(pAC,i) ((unsigned int)(i) - 2)
612
613/*
614 * Structure definition for SkPnmiGetStruct and SkPnmiSetStruct
615 */
616#define SK_PNMI_VPD_KEY_SIZE 5
617#define SK_PNMI_VPD_BUFSIZE (VPD_SIZE)
618#define SK_PNMI_VPD_ENTRIES (VPD_SIZE / 4)
619#define SK_PNMI_VPD_DATALEN 128 /* Number of data bytes */
620
621#define SK_PNMI_MULTICAST_LISTLEN 64
622#define SK_PNMI_SENSOR_ENTRIES (SK_MAX_SENSORS)
623#define SK_PNMI_CHECKSUM_ENTRIES 3
624#define SK_PNMI_MAC_ENTRIES (SK_MAX_MACS + 1)
625#define SK_PNMI_MONITOR_ENTRIES 20
626#define SK_PNMI_TRAP_ENTRIES 10
627#define SK_PNMI_TRAPLEN 128
628#define SK_PNMI_STRINGLEN1 80
629#define SK_PNMI_STRINGLEN2 25
630#define SK_PNMI_TRAP_QUEUE_LEN 512
631
632typedef struct s_PnmiVpd {
633 char VpdKey[SK_PNMI_VPD_KEY_SIZE];
634 char VpdValue[SK_PNMI_VPD_DATALEN];
635 SK_U8 VpdAccess;
636 SK_U8 VpdAction;
637} SK_PNMI_VPD;
638
639typedef struct s_PnmiSensor {
640 SK_U8 SensorIndex;
641 char SensorDescr[SK_PNMI_STRINGLEN2];
642 SK_U8 SensorType;
643 SK_U32 SensorValue;
644 SK_U32 SensorWarningThresholdLow;
645 SK_U32 SensorWarningThresholdHigh;
646 SK_U32 SensorErrorThresholdLow;
647 SK_U32 SensorErrorThresholdHigh;
648 SK_U8 SensorStatus;
649 SK_U64 SensorWarningCts;
650 SK_U64 SensorErrorCts;
651 SK_U64 SensorWarningTimestamp;
652 SK_U64 SensorErrorTimestamp;
653} SK_PNMI_SENSOR;
654
655typedef struct s_PnmiChecksum {
656 SK_U64 ChecksumRxOkCts;
657 SK_U64 ChecksumRxUnableCts;
658 SK_U64 ChecksumRxErrCts;
659 SK_U64 ChecksumTxOkCts;
660 SK_U64 ChecksumTxUnableCts;
661} SK_PNMI_CHECKSUM;
662
663typedef struct s_PnmiStat {
664 SK_U64 StatTxOkCts;
665 SK_U64 StatTxOctetsOkCts;
666 SK_U64 StatTxBroadcastOkCts;
667 SK_U64 StatTxMulticastOkCts;
668 SK_U64 StatTxUnicastOkCts;
669 SK_U64 StatTxLongFramesCts;
670 SK_U64 StatTxBurstCts;
671 SK_U64 StatTxPauseMacCtrlCts;
672 SK_U64 StatTxMacCtrlCts;
673 SK_U64 StatTxSingleCollisionCts;
674 SK_U64 StatTxMultipleCollisionCts;
675 SK_U64 StatTxExcessiveCollisionCts;
676 SK_U64 StatTxLateCollisionCts;
677 SK_U64 StatTxDeferralCts;
678 SK_U64 StatTxExcessiveDeferralCts;
679 SK_U64 StatTxFifoUnderrunCts;
680 SK_U64 StatTxCarrierCts;
681 SK_U64 Dummy1; /* StatTxUtilization */
682 SK_U64 StatTx64Cts;
683 SK_U64 StatTx127Cts;
684 SK_U64 StatTx255Cts;
685 SK_U64 StatTx511Cts;
686 SK_U64 StatTx1023Cts;
687 SK_U64 StatTxMaxCts;
688 SK_U64 StatTxSyncCts;
689 SK_U64 StatTxSyncOctetsCts;
690 SK_U64 StatRxOkCts;
691 SK_U64 StatRxOctetsOkCts;
692 SK_U64 StatRxBroadcastOkCts;
693 SK_U64 StatRxMulticastOkCts;
694 SK_U64 StatRxUnicastOkCts;
695 SK_U64 StatRxLongFramesCts;
696 SK_U64 StatRxPauseMacCtrlCts;
697 SK_U64 StatRxMacCtrlCts;
698 SK_U64 StatRxPauseMacCtrlErrorCts;
699 SK_U64 StatRxMacCtrlUnknownCts;
700 SK_U64 StatRxBurstCts;
701 SK_U64 StatRxMissedCts;
702 SK_U64 StatRxFramingCts;
703 SK_U64 StatRxFifoOverflowCts;
704 SK_U64 StatRxJabberCts;
705 SK_U64 StatRxCarrierCts;
706 SK_U64 StatRxIRLengthCts;
707 SK_U64 StatRxSymbolCts;
708 SK_U64 StatRxShortsCts;
709 SK_U64 StatRxRuntCts;
710 SK_U64 StatRxCextCts;
711 SK_U64 StatRxTooLongCts;
712 SK_U64 StatRxFcsCts;
713 SK_U64 Dummy2; /* StatRxUtilization */
714 SK_U64 StatRx64Cts;
715 SK_U64 StatRx127Cts;
716 SK_U64 StatRx255Cts;
717 SK_U64 StatRx511Cts;
718 SK_U64 StatRx1023Cts;
719 SK_U64 StatRxMaxCts;
720} SK_PNMI_STAT;
721
722typedef struct s_PnmiConf {
723 char ConfMacCurrentAddr[6];
724 char ConfMacFactoryAddr[6];
725 SK_U8 ConfPMD;
726 SK_U8 ConfConnector;
727 SK_U32 ConfPhyType;
728 SK_U32 ConfPhyMode;
729 SK_U8 ConfLinkCapability;
730 SK_U8 ConfLinkMode;
731 SK_U8 ConfLinkModeStatus;
732 SK_U8 ConfLinkStatus;
733 SK_U8 ConfFlowCtrlCapability;
734 SK_U8 ConfFlowCtrlMode;
735 SK_U8 ConfFlowCtrlStatus;
736 SK_U8 ConfPhyOperationCapability;
737 SK_U8 ConfPhyOperationMode;
738 SK_U8 ConfPhyOperationStatus;
739 SK_U8 ConfSpeedCapability;
740 SK_U8 ConfSpeedMode;
741 SK_U8 ConfSpeedStatus;
742} SK_PNMI_CONF;
743
744typedef struct s_PnmiRlmt {
745 SK_U32 RlmtIndex;
746 SK_U32 RlmtStatus;
747 SK_U64 RlmtTxHelloCts;
748 SK_U64 RlmtRxHelloCts;
749 SK_U64 RlmtTxSpHelloReqCts;
750 SK_U64 RlmtRxSpHelloCts;
751} SK_PNMI_RLMT;
752
753typedef struct s_PnmiRlmtMonitor {
754 SK_U32 RlmtMonitorIndex;
755 char RlmtMonitorAddr[6];
756 SK_U64 RlmtMonitorErrorCts;
757 SK_U64 RlmtMonitorTimestamp;
758 SK_U8 RlmtMonitorAdmin;
759} SK_PNMI_RLMT_MONITOR;
760
761typedef struct s_PnmiRequestStatus {
762 SK_U32 ErrorStatus;
763 SK_U32 ErrorOffset;
764} SK_PNMI_REQUEST_STATUS;
765
766typedef struct s_PnmiStrucData {
767 SK_U32 MgmtDBVersion;
768 SK_PNMI_REQUEST_STATUS ReturnStatus;
769 SK_U32 VpdFreeBytes;
770 char VpdEntriesList[SK_PNMI_VPD_ENTRIES * SK_PNMI_VPD_KEY_SIZE];
771 SK_U32 VpdEntriesNumber;
772 SK_PNMI_VPD Vpd[SK_PNMI_VPD_ENTRIES];
773 SK_U32 PortNumber;
774 SK_U32 DeviceType;
775 char DriverDescr[SK_PNMI_STRINGLEN1];
776 char DriverVersion[SK_PNMI_STRINGLEN2];
777 char DriverReleaseDate[SK_PNMI_STRINGLEN1];
778 char DriverFileName[SK_PNMI_STRINGLEN1];
779 char HwDescr[SK_PNMI_STRINGLEN1];
780 char HwVersion[SK_PNMI_STRINGLEN2];
781 SK_U16 Chipset;
782 SK_U32 ChipId;
783 SK_U8 VauxAvail;
784 SK_U32 RamSize;
785 SK_U32 MtuSize;
786 SK_U32 Action;
787 SK_U32 TestResult;
788 SK_U8 BusType;
789 SK_U8 BusSpeed;
790 SK_U8 BusWidth;
791 SK_U8 SensorNumber;
792 SK_PNMI_SENSOR Sensor[SK_PNMI_SENSOR_ENTRIES];
793 SK_U8 ChecksumNumber;
794 SK_PNMI_CHECKSUM Checksum[SK_PNMI_CHECKSUM_ENTRIES];
795 SK_PNMI_STAT Stat[SK_PNMI_MAC_ENTRIES];
796 SK_PNMI_CONF Conf[SK_PNMI_MAC_ENTRIES];
797 SK_U8 RlmtMode;
798 SK_U32 RlmtPortNumber;
799 SK_U8 RlmtPortActive;
800 SK_U8 RlmtPortPreferred;
801 SK_U64 RlmtChangeCts;
802 SK_U64 RlmtChangeTime;
803 SK_U64 RlmtChangeEstimate;
804 SK_U64 RlmtChangeThreshold;
805 SK_PNMI_RLMT Rlmt[SK_MAX_MACS];
806 SK_U32 RlmtMonitorNumber;
807 SK_PNMI_RLMT_MONITOR RlmtMonitor[SK_PNMI_MONITOR_ENTRIES];
808 SK_U32 TrapNumber;
809 SK_U8 Trap[SK_PNMI_TRAP_QUEUE_LEN];
810 SK_U64 TxSwQueueLen;
811 SK_U64 TxSwQueueMax;
812 SK_U64 TxRetryCts;
813 SK_U64 RxIntrCts;
814 SK_U64 TxIntrCts;
815 SK_U64 RxNoBufCts;
816 SK_U64 TxNoBufCts;
817 SK_U64 TxUsedDescrNo;
818 SK_U64 RxDeliveredCts;
819 SK_U64 RxOctetsDeliveredCts;
820 SK_U64 RxHwErrorsCts;
821 SK_U64 TxHwErrorsCts;
822 SK_U64 InErrorsCts;
823 SK_U64 OutErrorsCts;
824 SK_U64 ErrRecoveryCts;
825 SK_U64 SysUpTime;
826} SK_PNMI_STRUCT_DATA;
827
828#define SK_PNMI_STRUCT_SIZE (sizeof(SK_PNMI_STRUCT_DATA))
829#define SK_PNMI_MIN_STRUCT_SIZE ((unsigned int)(SK_UPTR)\
830 &(((SK_PNMI_STRUCT_DATA *)0)->VpdFreeBytes))
831 /*
832 * ReturnStatus field
833 * must be located
834 * before VpdFreeBytes
835 */
836
837/*
838 * Various definitions
839 */
840#define SK_PNMI_MAX_PROTOS 3
841
842#define SK_PNMI_CNT_NO 66 /* Must have the value of the enum
843 * SK_PNMI_MAX_IDX. Define SK_PNMI_CHECK
844 * for check while init phase 1
845 */
846
847/*
848 * Estimate data structure
849 */
850typedef struct s_PnmiEstimate {
851 unsigned int EstValueIndex;
852 SK_U64 EstValue[7];
853 SK_U64 Estimate;
854 SK_TIMER EstTimer;
855} SK_PNMI_ESTIMATE;
856
857
858/*
859 * VCT timer data structure
860 */
861typedef struct s_VctTimer {
862 SK_TIMER VctTimer;
863} SK_PNMI_VCT_TIMER;
864
865
866/*
867 * PNMI specific adapter context structure
868 */
869typedef struct s_PnmiPort {
870 SK_U64 StatSyncCts;
871 SK_U64 StatSyncOctetsCts;
872 SK_U64 StatRxLongFrameCts;
873 SK_U64 StatRxFrameTooLongCts;
874 SK_U64 StatRxPMaccErr;
875 SK_U64 TxSwQueueLen;
876 SK_U64 TxSwQueueMax;
877 SK_U64 TxRetryCts;
878 SK_U64 RxIntrCts;
879 SK_U64 TxIntrCts;
880 SK_U64 RxNoBufCts;
881 SK_U64 TxNoBufCts;
882 SK_U64 TxUsedDescrNo;
883 SK_U64 RxDeliveredCts;
884 SK_U64 RxOctetsDeliveredCts;
885 SK_U64 RxHwErrorsCts;
886 SK_U64 TxHwErrorsCts;
887 SK_U64 InErrorsCts;
888 SK_U64 OutErrorsCts;
889 SK_U64 ErrRecoveryCts;
890 SK_U64 RxShortZeroMark;
891 SK_U64 CounterOffset[SK_PNMI_CNT_NO];
892 SK_U32 CounterHigh[SK_PNMI_CNT_NO];
893 SK_BOOL ActiveFlag;
894 SK_U8 Align[3];
895} SK_PNMI_PORT;
896
897
898typedef struct s_PnmiData {
899 SK_PNMI_PORT Port [SK_MAX_MACS];
900 SK_PNMI_PORT BufPort [SK_MAX_MACS]; /* 2002-09-13 pweber */
901 SK_U64 VirtualCounterOffset[SK_PNMI_CNT_NO];
902 SK_U32 TestResult;
903 char HwVersion[10];
904 SK_U16 Align01;
905
906 char *pDriverDescription;
907 char *pDriverVersion;
908 char *pDriverReleaseDate;
909 char *pDriverFileName;
910
911 int MacUpdatedFlag;
912 int RlmtUpdatedFlag;
913 int SirqUpdatedFlag;
914
915 SK_U64 RlmtChangeCts;
916 SK_U64 RlmtChangeTime;
917 SK_PNMI_ESTIMATE RlmtChangeEstimate;
918 SK_U64 RlmtChangeThreshold;
919
920 SK_U64 StartUpTime;
921 SK_U32 DeviceType;
922 char PciBusSpeed;
923 char PciBusWidth;
924 char Chipset;
925 char PMD;
926 char Connector;
927 SK_BOOL DualNetActiveFlag;
928 SK_U16 Align02;
929
930 char TrapBuf[SK_PNMI_TRAP_QUEUE_LEN];
931 unsigned int TrapBufFree;
932 unsigned int TrapQueueBeg;
933 unsigned int TrapQueueEnd;
934 unsigned int TrapBufPad;
935 unsigned int TrapUnique;
936 SK_U8 VctStatus[SK_MAX_MACS];
937 SK_PNMI_VCT VctBackup[SK_MAX_MACS];
938 SK_PNMI_VCT_TIMER VctTimeout[SK_MAX_MACS];
939#ifdef SK_DIAG_SUPPORT
940 SK_U32 DiagAttached;
941#endif /* SK_DIAG_SUPPORT */
942} SK_PNMI;
943
944
945/*
946 * Function prototypes
947 */
948extern int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int Level);
949extern int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void* pBuf,
950 unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
951extern int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf,
952 unsigned int *pLen, SK_U32 NetIndex);
953extern int SkPnmiPreSetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf,
954 unsigned int *pLen, SK_U32 NetIndex);
955extern int SkPnmiSetStruct(SK_AC *pAC, SK_IOC IoC, void* pBuf,
956 unsigned int *pLen, SK_U32 NetIndex);
957extern int SkPnmiEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event,
958 SK_EVPARA Param);
959extern int SkPnmiGenIoctl(SK_AC *pAC, SK_IOC IoC, void * pBuf,
960 unsigned int * pLen, SK_U32 NetIndex);
961
962#endif
diff --git a/drivers/net/sk98lin/h/skgesirq.h b/drivers/net/sk98lin/h/skgesirq.h
deleted file mode 100644
index 3eec6274e413..000000000000
--- a/drivers/net/sk98lin/h/skgesirq.h
+++ /dev/null
@@ -1,110 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skgesirq.h
4 * Project: Gigabit Ethernet Adapters, Common Modules
5 * Version: $Revision: 1.30 $
6 * Date: $Date: 2003/07/04 12:34:13 $
7 * Purpose: SK specific Gigabit Ethernet special IRQ functions
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25#ifndef _INC_SKGESIRQ_H_
26#define _INC_SKGESIRQ_H_
27
28/* Define return codes of SkGePortCheckUp and CheckShort */
29#define SK_HW_PS_NONE 0 /* No action needed */
30#define SK_HW_PS_RESTART 1 /* Restart needed */
31#define SK_HW_PS_LINK 2 /* Link Up actions needed */
32
33/*
34 * Define the Event the special IRQ/INI module can handle
35 */
36#define SK_HWEV_WATIM 1 /* Timeout for WA Errata #2 XMAC */
37#define SK_HWEV_PORT_START 2 /* Port Start Event by RLMT */
38#define SK_HWEV_PORT_STOP 3 /* Port Stop Event by RLMT */
39#define SK_HWEV_CLEAR_STAT 4 /* Clear Statistics by PNMI */
40#define SK_HWEV_UPDATE_STAT 5 /* Update Statistics by PNMI */
41#define SK_HWEV_SET_LMODE 6 /* Set Link Mode by PNMI */
42#define SK_HWEV_SET_FLOWMODE 7 /* Set Flow Control Mode by PNMI */
43#define SK_HWEV_SET_ROLE 8 /* Set Master/Slave (Role) by PNMI */
44#define SK_HWEV_SET_SPEED 9 /* Set Link Speed by PNMI */
45#define SK_HWEV_HALFDUP_CHK 10 /* Half Duplex Hangup Workaround */
46
47#define SK_WA_ACT_TIME (5000000UL) /* 5 sec */
48#define SK_WA_INA_TIME (100000UL) /* 100 msec */
49
50#define SK_HALFDUP_CHK_TIME (10000UL) /* 10 msec */
51
52/*
53 * Define the error numbers and messages
54 */
55#define SKERR_SIRQ_E001 (SK_ERRBASE_SIRQ+0)
56#define SKERR_SIRQ_E001MSG "Unknown event"
57#define SKERR_SIRQ_E002 (SKERR_SIRQ_E001+1)
58#define SKERR_SIRQ_E002MSG "Packet timeout RX1"
59#define SKERR_SIRQ_E003 (SKERR_SIRQ_E002+1)
60#define SKERR_SIRQ_E003MSG "Packet timeout RX2"
61#define SKERR_SIRQ_E004 (SKERR_SIRQ_E003+1)
62#define SKERR_SIRQ_E004MSG "MAC 1 not correctly initialized"
63#define SKERR_SIRQ_E005 (SKERR_SIRQ_E004+1)
64#define SKERR_SIRQ_E005MSG "MAC 2 not correctly initialized"
65#define SKERR_SIRQ_E006 (SKERR_SIRQ_E005+1)
66#define SKERR_SIRQ_E006MSG "CHECK failure R1"
67#define SKERR_SIRQ_E007 (SKERR_SIRQ_E006+1)
68#define SKERR_SIRQ_E007MSG "CHECK failure R2"
69#define SKERR_SIRQ_E008 (SKERR_SIRQ_E007+1)
70#define SKERR_SIRQ_E008MSG "CHECK failure XS1"
71#define SKERR_SIRQ_E009 (SKERR_SIRQ_E008+1)
72#define SKERR_SIRQ_E009MSG "CHECK failure XA1"
73#define SKERR_SIRQ_E010 (SKERR_SIRQ_E009+1)
74#define SKERR_SIRQ_E010MSG "CHECK failure XS2"
75#define SKERR_SIRQ_E011 (SKERR_SIRQ_E010+1)
76#define SKERR_SIRQ_E011MSG "CHECK failure XA2"
77#define SKERR_SIRQ_E012 (SKERR_SIRQ_E011+1)
78#define SKERR_SIRQ_E012MSG "unexpected IRQ Master error"
79#define SKERR_SIRQ_E013 (SKERR_SIRQ_E012+1)
80#define SKERR_SIRQ_E013MSG "unexpected IRQ Status error"
81#define SKERR_SIRQ_E014 (SKERR_SIRQ_E013+1)
82#define SKERR_SIRQ_E014MSG "Parity error on RAM (read)"
83#define SKERR_SIRQ_E015 (SKERR_SIRQ_E014+1)
84#define SKERR_SIRQ_E015MSG "Parity error on RAM (write)"
85#define SKERR_SIRQ_E016 (SKERR_SIRQ_E015+1)
86#define SKERR_SIRQ_E016MSG "Parity error MAC 1"
87#define SKERR_SIRQ_E017 (SKERR_SIRQ_E016+1)
88#define SKERR_SIRQ_E017MSG "Parity error MAC 2"
89#define SKERR_SIRQ_E018 (SKERR_SIRQ_E017+1)
90#define SKERR_SIRQ_E018MSG "Parity error RX 1"
91#define SKERR_SIRQ_E019 (SKERR_SIRQ_E018+1)
92#define SKERR_SIRQ_E019MSG "Parity error RX 2"
93#define SKERR_SIRQ_E020 (SKERR_SIRQ_E019+1)
94#define SKERR_SIRQ_E020MSG "MAC transmit FIFO underrun"
95#define SKERR_SIRQ_E021 (SKERR_SIRQ_E020+1)
96#define SKERR_SIRQ_E021MSG "Spurious TWSI interrupt"
97#define SKERR_SIRQ_E022 (SKERR_SIRQ_E021+1)
98#define SKERR_SIRQ_E022MSG "Cable pair swap error"
99#define SKERR_SIRQ_E023 (SKERR_SIRQ_E022+1)
100#define SKERR_SIRQ_E023MSG "Auto-negotiation error"
101#define SKERR_SIRQ_E024 (SKERR_SIRQ_E023+1)
102#define SKERR_SIRQ_E024MSG "FIFO overflow error"
103#define SKERR_SIRQ_E025 (SKERR_SIRQ_E024+1)
104#define SKERR_SIRQ_E025MSG "2 Pair Downshift detected"
105
106extern void SkGeSirqIsr(SK_AC *pAC, SK_IOC IoC, SK_U32 Istatus);
107extern int SkGeSirqEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Para);
108extern void SkHWLinkDown(SK_AC *pAC, SK_IOC IoC, int Port);
109
110#endif /* _INC_SKGESIRQ_H_ */
diff --git a/drivers/net/sk98lin/h/ski2c.h b/drivers/net/sk98lin/h/ski2c.h
deleted file mode 100644
index 6a63f4a15de6..000000000000
--- a/drivers/net/sk98lin/h/ski2c.h
+++ /dev/null
@@ -1,174 +0,0 @@
1/******************************************************************************
2 *
3 * Name: ski2c.h
4 * Project: Gigabit Ethernet Adapters, TWSI-Module
5 * Version: $Revision: 1.35 $
6 * Date: $Date: 2003/10/20 09:06:30 $
7 * Purpose: Defines to access Voltage and Temperature Sensor
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/*
26 * SKI2C.H contains all I2C specific defines
27 */
28
29#ifndef _SKI2C_H_
30#define _SKI2C_H_
31
32typedef struct s_Sensor SK_SENSOR;
33
34#include "h/skgei2c.h"
35
36/*
37 * Define the I2C events.
38 */
39#define SK_I2CEV_IRQ 1 /* IRQ happened Event */
40#define SK_I2CEV_TIM 2 /* Timeout event */
41#define SK_I2CEV_CLEAR 3 /* Clear MIB Values */
42
43/*
44 * Define READ and WRITE Constants.
45 */
46#define I2C_READ 0
47#define I2C_WRITE 1
48#define I2C_BURST 1
49#define I2C_SINGLE 0
50
51#define SKERR_I2C_E001 (SK_ERRBASE_I2C+0)
52#define SKERR_I2C_E001MSG "Sensor index unknown"
53#define SKERR_I2C_E002 (SKERR_I2C_E001+1)
54#define SKERR_I2C_E002MSG "TWSI: transfer does not complete"
55#define SKERR_I2C_E003 (SKERR_I2C_E002+1)
56#define SKERR_I2C_E003MSG "LM80: NAK on device send"
57#define SKERR_I2C_E004 (SKERR_I2C_E003+1)
58#define SKERR_I2C_E004MSG "LM80: NAK on register send"
59#define SKERR_I2C_E005 (SKERR_I2C_E004+1)
60#define SKERR_I2C_E005MSG "LM80: NAK on device (2) send"
61#define SKERR_I2C_E006 (SKERR_I2C_E005+1)
62#define SKERR_I2C_E006MSG "Unknown event"
63#define SKERR_I2C_E007 (SKERR_I2C_E006+1)
64#define SKERR_I2C_E007MSG "LM80 read out of state"
65#define SKERR_I2C_E008 (SKERR_I2C_E007+1)
66#define SKERR_I2C_E008MSG "Unexpected sensor read completed"
67#define SKERR_I2C_E009 (SKERR_I2C_E008+1)
68#define SKERR_I2C_E009MSG "WARNING: temperature sensor out of range"
69#define SKERR_I2C_E010 (SKERR_I2C_E009+1)
70#define SKERR_I2C_E010MSG "WARNING: voltage sensor out of range"
71#define SKERR_I2C_E011 (SKERR_I2C_E010+1)
72#define SKERR_I2C_E011MSG "ERROR: temperature sensor out of range"
73#define SKERR_I2C_E012 (SKERR_I2C_E011+1)
74#define SKERR_I2C_E012MSG "ERROR: voltage sensor out of range"
75#define SKERR_I2C_E013 (SKERR_I2C_E012+1)
76#define SKERR_I2C_E013MSG "ERROR: couldn't init sensor"
77#define SKERR_I2C_E014 (SKERR_I2C_E013+1)
78#define SKERR_I2C_E014MSG "WARNING: fan sensor out of range"
79#define SKERR_I2C_E015 (SKERR_I2C_E014+1)
80#define SKERR_I2C_E015MSG "ERROR: fan sensor out of range"
81#define SKERR_I2C_E016 (SKERR_I2C_E015+1)
82#define SKERR_I2C_E016MSG "TWSI: active transfer does not complete"
83
84/*
85 * Define Timeout values
86 */
87#define SK_I2C_TIM_LONG 2000000L /* 2 seconds */
88#define SK_I2C_TIM_SHORT 100000L /* 100 milliseconds */
89#define SK_I2C_TIM_WATCH 1000000L /* 1 second */
90
91/*
92 * Define trap and error log hold times
93 */
94#ifndef SK_SEN_ERR_TR_HOLD
95#define SK_SEN_ERR_TR_HOLD (4*SK_TICKS_PER_SEC)
96#endif
97#ifndef SK_SEN_ERR_LOG_HOLD
98#define SK_SEN_ERR_LOG_HOLD (60*SK_TICKS_PER_SEC)
99#endif
100#ifndef SK_SEN_WARN_TR_HOLD
101#define SK_SEN_WARN_TR_HOLD (15*SK_TICKS_PER_SEC)
102#endif
103#ifndef SK_SEN_WARN_LOG_HOLD
104#define SK_SEN_WARN_LOG_HOLD (15*60*SK_TICKS_PER_SEC)
105#endif
106
107/*
108 * Defines for SenType
109 */
110#define SK_SEN_UNKNOWN 0
111#define SK_SEN_TEMP 1
112#define SK_SEN_VOLT 2
113#define SK_SEN_FAN 3
114
115/*
116 * Define for the SenErrorFlag
117 */
118#define SK_SEN_ERR_NOT_PRESENT 0 /* Error Flag: Sensor not present */
119#define SK_SEN_ERR_OK 1 /* Error Flag: O.K. */
120#define SK_SEN_ERR_WARN 2 /* Error Flag: Warning */
121#define SK_SEN_ERR_ERR 3 /* Error Flag: Error */
122#define SK_SEN_ERR_FAULTY 4 /* Error Flag: Faulty */
123
124/*
125 * Define the Sensor struct
126 */
127struct s_Sensor {
128 char *SenDesc; /* Description */
129 int SenType; /* Voltage or Temperature */
130 SK_I32 SenValue; /* Current value of the sensor */
131 SK_I32 SenThreErrHigh; /* High error Threshhold of this sensor */
132 SK_I32 SenThreWarnHigh; /* High warning Threshhold of this sensor */
133 SK_I32 SenThreErrLow; /* Lower error Threshold of the sensor */
134 SK_I32 SenThreWarnLow; /* Lower warning Threshold of the sensor */
135 int SenErrFlag; /* Sensor indicated an error */
136 SK_BOOL SenInit; /* Is sensor initialized ? */
137 SK_U64 SenErrCts; /* Error trap counter */
138 SK_U64 SenWarnCts; /* Warning trap counter */
139 SK_U64 SenBegErrTS; /* Begin error timestamp */
140 SK_U64 SenBegWarnTS; /* Begin warning timestamp */
141 SK_U64 SenLastErrTrapTS; /* Last error trap timestamp */
142 SK_U64 SenLastErrLogTS; /* Last error log timestamp */
143 SK_U64 SenLastWarnTrapTS; /* Last warning trap timestamp */
144 SK_U64 SenLastWarnLogTS; /* Last warning log timestamp */
145 int SenState; /* Sensor State (see HW specific include) */
146 int (*SenRead)(SK_AC *pAC, SK_IOC IoC, struct s_Sensor *pSen);
147 /* Sensors read function */
148 SK_U16 SenReg; /* Register Address for this sensor */
149 SK_U8 SenDev; /* Device Selection for this sensor */
150};
151
152typedef struct s_I2c {
153 SK_SENSOR SenTable[SK_MAX_SENSORS]; /* Sensor Table */
154 int CurrSens; /* Which sensor is currently queried */
155 int MaxSens; /* Max. number of sensors */
156 int TimerMode; /* Use the timer also to watch the state machine */
157 int InitLevel; /* Initialized Level */
158#ifndef SK_DIAG
159 int DummyReads; /* Number of non-checked dummy reads */
160 SK_TIMER SenTimer; /* Sensors timer */
161#endif /* !SK_DIAG */
162} SK_I2C;
163
164extern int SkI2cInit(SK_AC *pAC, SK_IOC IoC, int Level);
165#ifdef SK_DIAG
166extern SK_U32 SkI2cRead(SK_AC *pAC, SK_IOC IoC, int Dev, int Size, int Reg,
167 int Burst);
168#else /* !SK_DIAG */
169extern int SkI2cEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Para);
170extern void SkI2cWaitIrq(SK_AC *pAC, SK_IOC IoC);
171extern void SkI2cIsr(SK_AC *pAC, SK_IOC IoC);
172#endif /* !SK_DIAG */
173#endif /* n_SKI2C_H */
174
diff --git a/drivers/net/sk98lin/h/skqueue.h b/drivers/net/sk98lin/h/skqueue.h
deleted file mode 100644
index 2ec40d4fdf60..000000000000
--- a/drivers/net/sk98lin/h/skqueue.h
+++ /dev/null
@@ -1,94 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skqueue.h
4 * Project: Gigabit Ethernet Adapters, Event Scheduler Module
5 * Version: $Revision: 1.16 $
6 * Date: $Date: 2003/09/16 12:50:32 $
7 * Purpose: Defines for the Event queue
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/*
26 * SKQUEUE.H contains all defines and types for the event queue
27 */
28
29#ifndef _SKQUEUE_H_
30#define _SKQUEUE_H_
31
32
33/*
34 * define the event classes to be served
35 */
36#define SKGE_DRV 1 /* Driver Event Class */
37#define SKGE_RLMT 2 /* RLMT Event Class */
38#define SKGE_I2C 3 /* I2C Event Class */
39#define SKGE_PNMI 4 /* PNMI Event Class */
40#define SKGE_CSUM 5 /* Checksum Event Class */
41#define SKGE_HWAC 6 /* Hardware Access Event Class */
42
43#define SKGE_SWT 9 /* Software Timer Event Class */
44#define SKGE_LACP 10 /* LACP Aggregation Event Class */
45#define SKGE_RSF 11 /* RSF Aggregation Event Class */
46#define SKGE_MARKER 12 /* MARKER Aggregation Event Class */
47#define SKGE_FD 13 /* FD Distributor Event Class */
48
49/*
50 * define event queue as circular buffer
51 */
52#define SK_MAX_EVENT 64
53
54/*
55 * Parameter union for the Para stuff
56 */
57typedef union u_EvPara {
58 void *pParaPtr; /* Parameter Pointer */
59 SK_U64 Para64; /* Parameter 64bit version */
60 SK_U32 Para32[2]; /* Parameter Array of 32bit parameters */
61} SK_EVPARA;
62
63/*
64 * Event Queue
65 * skqueue.c
66 * events are class/value pairs
67 * class is addressee, e.g. RLMT, PNMI etc.
68 * value is command, e.g. line state change, ring op change etc.
69 */
70typedef struct s_EventElem {
71 SK_U32 Class; /* Event class */
72 SK_U32 Event; /* Event value */
73 SK_EVPARA Para; /* Event parameter */
74} SK_EVENTELEM;
75
76typedef struct s_Queue {
77 SK_EVENTELEM EvQueue[SK_MAX_EVENT];
78 SK_EVENTELEM *EvPut;
79 SK_EVENTELEM *EvGet;
80} SK_QUEUE;
81
82extern void SkEventInit(SK_AC *pAC, SK_IOC Ioc, int Level);
83extern void SkEventQueue(SK_AC *pAC, SK_U32 Class, SK_U32 Event,
84 SK_EVPARA Para);
85extern int SkEventDispatcher(SK_AC *pAC, SK_IOC Ioc);
86
87
88/* Define Error Numbers and messages */
89#define SKERR_Q_E001 (SK_ERRBASE_QUEUE+0)
90#define SKERR_Q_E001MSG "Event queue overflow"
91#define SKERR_Q_E002 (SKERR_Q_E001+1)
92#define SKERR_Q_E002MSG "Undefined event class"
93#endif /* _SKQUEUE_H_ */
94
diff --git a/drivers/net/sk98lin/h/skrlmt.h b/drivers/net/sk98lin/h/skrlmt.h
deleted file mode 100644
index ca75dfdcf2d6..000000000000
--- a/drivers/net/sk98lin/h/skrlmt.h
+++ /dev/null
@@ -1,438 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skrlmt.h
4 * Project: GEnesis, PCI Gigabit Ethernet Adapter
5 * Version: $Revision: 1.37 $
6 * Date: $Date: 2003/04/15 09:43:43 $
7 * Purpose: Header file for Redundant Link ManagemenT.
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/******************************************************************************
26 *
27 * Description:
28 *
29 * This is the header file for Redundant Link ManagemenT.
30 *
31 * Include File Hierarchy:
32 *
33 * "skdrv1st.h"
34 * ...
35 * "sktypes.h"
36 * "skqueue.h"
37 * "skaddr.h"
38 * "skrlmt.h"
39 * ...
40 * "skdrv2nd.h"
41 *
42 ******************************************************************************/
43
44#ifndef __INC_SKRLMT_H
45#define __INC_SKRLMT_H
46
47#ifdef __cplusplus
48extern "C" {
49#endif /* cplusplus */
50
51/* defines ********************************************************************/
52
53#define SK_RLMT_NET_DOWN_TEMP 1 /* NET_DOWN due to last port down. */
54#define SK_RLMT_NET_DOWN_FINAL 2 /* NET_DOWN due to RLMT_STOP. */
55
56/* ----- Default queue sizes - must be multiples of 8 KB ----- */
57
58/* Less than 8 KB free in RX queue => pause frames. */
59#define SK_RLMT_STANDBY_QRXSIZE 128 /* Size of rx standby queue in KB. */
60#define SK_RLMT_STANDBY_QXASIZE 32 /* Size of async standby queue in KB. */
61#define SK_RLMT_STANDBY_QXSSIZE 0 /* Size of sync standby queue in KB. */
62
63#define SK_RLMT_MAX_TX_BUF_SIZE 60 /* Maximum RLMT transmit size. */
64
65/* ----- PORT states ----- */
66
67#define SK_RLMT_PS_INIT 0 /* Port state: Init. */
68#define SK_RLMT_PS_LINK_DOWN 1 /* Port state: Link down. */
69#define SK_RLMT_PS_DOWN 2 /* Port state: Port down. */
70#define SK_RLMT_PS_GOING_UP 3 /* Port state: Going up. */
71#define SK_RLMT_PS_UP 4 /* Port state: Up. */
72
73/* ----- RLMT states ----- */
74
75#define SK_RLMT_RS_INIT 0 /* RLMT state: Init. */
76#define SK_RLMT_RS_NET_DOWN 1 /* RLMT state: Net down. */
77#define SK_RLMT_RS_NET_UP 2 /* RLMT state: Net up. */
78
79/* ----- PORT events ----- */
80
81#define SK_RLMT_LINK_UP 1001 /* Link came up. */
82#define SK_RLMT_LINK_DOWN 1002 /* Link went down. */
83#define SK_RLMT_PORT_ADDR 1003 /* Port address changed. */
84
85/* ----- RLMT events ----- */
86
87#define SK_RLMT_START 2001 /* Start RLMT. */
88#define SK_RLMT_STOP 2002 /* Stop RLMT. */
89#define SK_RLMT_PACKET_RECEIVED 2003 /* Packet was received for RLMT. */
90#define SK_RLMT_STATS_CLEAR 2004 /* Clear statistics. */
91#define SK_RLMT_STATS_UPDATE 2005 /* Update statistics. */
92#define SK_RLMT_PREFPORT_CHANGE 2006 /* Change preferred port. */
93#define SK_RLMT_MODE_CHANGE 2007 /* New RlmtMode. */
94#define SK_RLMT_SET_NETS 2008 /* Number of Nets (1 or 2). */
95
96/* ----- RLMT mode bits ----- */
97
98/*
99 * CAUTION: These defines are private to RLMT.
100 * Please use the RLMT mode defines below.
101 */
102
103#define SK_RLMT_CHECK_LINK 1 /* Check Link. */
104#define SK_RLMT_CHECK_LOC_LINK 2 /* Check other link on same adapter. */
105#define SK_RLMT_CHECK_SEG 4 /* Check segmentation. */
106
107#ifndef RLMT_CHECK_REMOTE
108#define SK_RLMT_CHECK_OTHERS SK_RLMT_CHECK_LOC_LINK
109#else /* RLMT_CHECK_REMOTE */
110#define SK_RLMT_CHECK_REM_LINK 8 /* Check link(s) on other adapter(s). */
111#define SK_RLMT_MAX_REMOTE_PORTS_CHECKED 3
112#define SK_RLMT_CHECK_OTHERS \
113 (SK_RLMT_CHECK_LOC_LINK | SK_RLMT_CHECK_REM_LINK)
114#endif /* RLMT_CHECK_REMOTE */
115
116#ifndef SK_RLMT_ENABLE_TRANSPARENT
117#define SK_RLMT_TRANSPARENT 0 /* RLMT transparent - inactive. */
118#else /* SK_RLMT_ENABLE_TRANSPARENT */
119#define SK_RLMT_TRANSPARENT 128 /* RLMT transparent. */
120#endif /* SK_RLMT_ENABLE_TRANSPARENT */
121
122/* ----- RLMT modes ----- */
123
124/* Check Link State. */
125#define SK_RLMT_MODE_CLS (SK_RLMT_CHECK_LINK)
126
127/* Check Local Ports: check other links on the same adapter. */
128#define SK_RLMT_MODE_CLP (SK_RLMT_CHECK_LINK | SK_RLMT_CHECK_LOC_LINK)
129
130/* Check Local Ports and Segmentation Status. */
131#define SK_RLMT_MODE_CLPSS \
132 (SK_RLMT_CHECK_LINK | SK_RLMT_CHECK_LOC_LINK | SK_RLMT_CHECK_SEG)
133
134#ifdef RLMT_CHECK_REMOTE
135/* Check Local and Remote Ports: check links (local or remote). */
136 Name of define TBD!
137#define SK_RLMT_MODE_CRP \
138 (SK_RLMT_CHECK_LINK | SK_RLMT_CHECK_LOC_LINK | SK_RLMT_CHECK_REM_LINK)
139
140/* Check Local and Remote Ports and Segmentation Status. */
141 Name of define TBD!
142#define SK_RLMT_MODE_CRPSS \
143 (SK_RLMT_CHECK_LINK | SK_RLMT_CHECK_LOC_LINK | \
144 SK_RLMT_CHECK_REM_LINK | SK_RLMT_CHECK_SEG)
145#endif /* RLMT_CHECK_REMOTE */
146
147/* ----- RLMT lookahead result bits ----- */
148
149#define SK_RLMT_RX_RLMT 1 /* Give packet to RLMT. */
150#define SK_RLMT_RX_PROTOCOL 2 /* Give packet to protocol. */
151
152/* Macros */
153
154#if 0
155SK_AC *pAC /* adapter context */
156SK_U32 PortNum /* receiving port */
157unsigned PktLen /* received packet's length */
158SK_BOOL IsBc /* Flag: packet is broadcast */
159unsigned *pOffset /* offs. of bytes to present to SK_RLMT_LOOKAHEAD */
160unsigned *pNumBytes /* #Bytes to present to SK_RLMT_LOOKAHEAD */
161#endif /* 0 */
162
163#define SK_RLMT_PRE_LOOKAHEAD(pAC,PortNum,PktLen,IsBc,pOffset,pNumBytes) { \
164 SK_AC *_pAC; \
165 SK_U32 _PortNum; \
166 _pAC = (pAC); \
167 _PortNum = (SK_U32)(PortNum); \
168 /* _pAC->Rlmt.Port[_PortNum].PacketsRx++; */ \
169 _pAC->Rlmt.Port[_PortNum].PacketsPerTimeSlot++; \
170 if (_pAC->Rlmt.RlmtOff) { \
171 *(pNumBytes) = 0; \
172 } \
173 else {\
174 if ((_pAC->Rlmt.Port[_PortNum].Net->RlmtMode & SK_RLMT_TRANSPARENT) != 0) { \
175 *(pNumBytes) = 0; \
176 } \
177 else if (IsBc) { \
178 if (_pAC->Rlmt.Port[_PortNum].Net->RlmtMode != SK_RLMT_MODE_CLS) { \
179 *(pNumBytes) = 6; \
180 *(pOffset) = 6; \
181 } \
182 else { \
183 *(pNumBytes) = 0; \
184 } \
185 } \
186 else { \
187 if ((PktLen) > SK_RLMT_MAX_TX_BUF_SIZE) { \
188 /* _pAC->Rlmt.Port[_PortNum].DataPacketsPerTimeSlot++; */ \
189 *(pNumBytes) = 0; \
190 } \
191 else { \
192 *(pNumBytes) = 6; \
193 *(pOffset) = 0; \
194 } \
195 } \
196 } \
197}
198
199#if 0
200SK_AC *pAC /* adapter context */
201SK_U32 PortNum /* receiving port */
202SK_U8 *pLaPacket, /* received packet's data (points to pOffset) */
203SK_BOOL IsBc /* Flag: packet is broadcast */
204SK_BOOL IsMc /* Flag: packet is multicast */
205unsigned *pForRlmt /* Result: bits SK_RLMT_RX_RLMT, SK_RLMT_RX_PROTOCOL */
206SK_RLMT_LOOKAHEAD() expects *pNumBytes from
207packet offset *pOffset (s.a.) at *pLaPacket.
208
209If you use SK_RLMT_LOOKAHEAD in a path where you already know if the packet is
210BC, MC, or UC, you should use constants for IsBc and IsMc, so that your compiler
211can trash unneeded parts of the if construction.
212#endif /* 0 */
213
214#define SK_RLMT_LOOKAHEAD(pAC,PortNum,pLaPacket,IsBc,IsMc,pForRlmt) { \
215 SK_AC *_pAC; \
216 SK_U32 _PortNum; \
217 SK_U8 *_pLaPacket; \
218 _pAC = (pAC); \
219 _PortNum = (SK_U32)(PortNum); \
220 _pLaPacket = (SK_U8 *)(pLaPacket); \
221 if (IsBc) {\
222 if (!SK_ADDR_EQUAL(_pLaPacket, _pAC->Addr.Net[_pAC->Rlmt.Port[ \
223 _PortNum].Net->NetNumber].CurrentMacAddress.a)) { \
224 _pAC->Rlmt.Port[_PortNum].BcTimeStamp = SkOsGetTime(_pAC); \
225 _pAC->Rlmt.CheckSwitch = SK_TRUE; \
226 } \
227 /* _pAC->Rlmt.Port[_PortNum].DataPacketsPerTimeSlot++; */ \
228 *(pForRlmt) = SK_RLMT_RX_PROTOCOL; \
229 } \
230 else if (IsMc) { \
231 if (SK_ADDR_EQUAL(_pLaPacket, BridgeMcAddr.a)) { \
232 _pAC->Rlmt.Port[_PortNum].BpduPacketsPerTimeSlot++; \
233 if (_pAC->Rlmt.Port[_PortNum].Net->RlmtMode & SK_RLMT_CHECK_SEG) { \
234 *(pForRlmt) = SK_RLMT_RX_RLMT | SK_RLMT_RX_PROTOCOL; \
235 } \
236 else { \
237 *(pForRlmt) = SK_RLMT_RX_PROTOCOL; \
238 } \
239 } \
240 else if (SK_ADDR_EQUAL(_pLaPacket, SkRlmtMcAddr.a)) { \
241 *(pForRlmt) = SK_RLMT_RX_RLMT; \
242 } \
243 else { \
244 /* _pAC->Rlmt.Port[_PortNum].DataPacketsPerTimeSlot++; */ \
245 *(pForRlmt) = SK_RLMT_RX_PROTOCOL; \
246 } \
247 } \
248 else { \
249 if (SK_ADDR_EQUAL( \
250 _pLaPacket, \
251 _pAC->Addr.Port[_PortNum].CurrentMacAddress.a)) { \
252 *(pForRlmt) = SK_RLMT_RX_RLMT; \
253 } \
254 else { \
255 /* _pAC->Rlmt.Port[_PortNum].DataPacketsPerTimeSlot++; */ \
256 *(pForRlmt) = SK_RLMT_RX_PROTOCOL; \
257 } \
258 } \
259}
260
261#ifdef SK_RLMT_FAST_LOOKAHEAD
262Error: SK_RLMT_FAST_LOOKAHEAD no longer used. Use new macros for lookahead.
263#endif /* SK_RLMT_FAST_LOOKAHEAD */
264#ifdef SK_RLMT_SLOW_LOOKAHEAD
265Error: SK_RLMT_SLOW_LOOKAHEAD no longer used. Use new macros for lookahead.
266#endif /* SK_RLMT_SLOW_LOOKAHEAD */
267
268/* typedefs *******************************************************************/
269
270#ifdef SK_RLMT_MBUF_PRIVATE
271typedef struct s_RlmtMbuf {
272 some content
273} SK_RLMT_MBUF;
274#endif /* SK_RLMT_MBUF_PRIVATE */
275
276
277#ifdef SK_LA_INFO
278typedef struct s_Rlmt_PacketInfo {
279 unsigned PacketLength; /* Length of packet. */
280 unsigned PacketType; /* Directed/Multicast/Broadcast. */
281} SK_RLMT_PINFO;
282#endif /* SK_LA_INFO */
283
284
285typedef struct s_RootId {
286 SK_U8 Id[8]; /* Root Bridge Id. */
287} SK_RLMT_ROOT_ID;
288
289
290typedef struct s_port {
291 SK_MAC_ADDR CheckAddr;
292 SK_BOOL SuspectTx;
293} SK_PORT_CHECK;
294
295
296typedef struct s_RlmtNet SK_RLMT_NET;
297
298
299typedef struct s_RlmtPort {
300
301/* ----- Public part (read-only) ----- */
302
303 SK_U8 PortState; /* Current state of this port. */
304
305 /* For PNMI */
306 SK_BOOL LinkDown;
307 SK_BOOL PortDown;
308 SK_U8 Align01;
309
310 SK_U32 PortNumber; /* Number of port on adapter. */
311 SK_RLMT_NET * Net; /* Net port belongs to. */
312
313 SK_U64 TxHelloCts;
314 SK_U64 RxHelloCts;
315 SK_U64 TxSpHelloReqCts;
316 SK_U64 RxSpHelloCts;
317
318/* ----- Private part ----- */
319
320/* SK_U64 PacketsRx; */ /* Total packets received. */
321 SK_U32 PacketsPerTimeSlot; /* Packets rxed between TOs. */
322/* SK_U32 DataPacketsPerTimeSlot; */ /* Data packets ... */
323 SK_U32 BpduPacketsPerTimeSlot; /* BPDU packets rxed in TS. */
324 SK_U64 BcTimeStamp; /* Time of last BC receive. */
325 SK_U64 GuTimeStamp; /* Time of entering GOING_UP. */
326
327 SK_TIMER UpTimer; /* Timer struct Link/Port up. */
328 SK_TIMER DownRxTimer; /* Timer struct down rx. */
329 SK_TIMER DownTxTimer; /* Timer struct down tx. */
330
331 SK_U32 CheckingState; /* Checking State. */
332
333 SK_ADDR_PORT * AddrPort;
334
335 SK_U8 Random[4]; /* Random value. */
336 unsigned PortsChecked; /* #ports checked. */
337 unsigned PortsSuspect; /* #ports checked that are s. */
338 SK_PORT_CHECK PortCheck[1];
339/* SK_PORT_CHECK PortCheck[SK_MAX_MACS - 1]; */
340
341 SK_BOOL PortStarted; /* Port is started. */
342 SK_BOOL PortNoRx; /* NoRx for >= 1 time slot. */
343 SK_BOOL RootIdSet;
344 SK_RLMT_ROOT_ID Root; /* Root Bridge Id. */
345} SK_RLMT_PORT;
346
347
348struct s_RlmtNet {
349
350/* ----- Public part (read-only) ----- */
351
352 SK_U32 NetNumber; /* Number of net. */
353
354 SK_RLMT_PORT * Port[SK_MAX_MACS]; /* Ports that belong to this net. */
355 SK_U32 NumPorts; /* Number of ports. */
356 SK_U32 PrefPort; /* Preferred port. */
357
358 /* For PNMI */
359
360 SK_U32 ChgBcPrio; /* Change Priority of last broadcast received */
361 SK_U32 RlmtMode; /* Check ... */
362 SK_U32 ActivePort; /* Active port. */
363 SK_U32 Preference; /* 0xFFFFFFFF: Automatic. */
364
365 SK_U8 RlmtState; /* Current RLMT state. */
366
367/* ----- Private part ----- */
368 SK_BOOL RootIdSet;
369 SK_U16 Align01;
370
371 int LinksUp; /* #Links up. */
372 int PortsUp; /* #Ports up. */
373 SK_U32 TimeoutValue; /* RLMT timeout value. */
374
375 SK_U32 CheckingState; /* Checking State. */
376 SK_RLMT_ROOT_ID Root; /* Root Bridge Id. */
377
378 SK_TIMER LocTimer; /* Timer struct. */
379 SK_TIMER SegTimer; /* Timer struct. */
380};
381
382
383typedef struct s_Rlmt {
384
385/* ----- Public part (read-only) ----- */
386
387 SK_U32 NumNets; /* Number of nets. */
388 SK_U32 NetsStarted; /* Number of nets started. */
389 SK_RLMT_NET Net[SK_MAX_NETS]; /* Array of available nets. */
390 SK_RLMT_PORT Port[SK_MAX_MACS]; /* Array of available ports. */
391
392/* ----- Private part ----- */
393 SK_BOOL CheckSwitch;
394 SK_BOOL RlmtOff; /* set to zero if the Mac addresses
395 are equal or the second one
396 is zero */
397 SK_U16 Align01;
398
399} SK_RLMT;
400
401
402extern SK_MAC_ADDR BridgeMcAddr;
403extern SK_MAC_ADDR SkRlmtMcAddr;
404
405/* function prototypes ********************************************************/
406
407
408#ifndef SK_KR_PROTO
409
410/* Functions provided by SkRlmt */
411
412/* ANSI/C++ compliant function prototypes */
413
414extern void SkRlmtInit(
415 SK_AC *pAC,
416 SK_IOC IoC,
417 int Level);
418
419extern int SkRlmtEvent(
420 SK_AC *pAC,
421 SK_IOC IoC,
422 SK_U32 Event,
423 SK_EVPARA Para);
424
425#else /* defined(SK_KR_PROTO) */
426
427/* Non-ANSI/C++ compliant function prototypes */
428
429#error KR-style function prototypes are not yet provided.
430
431#endif /* defined(SK_KR_PROTO)) */
432
433
434#ifdef __cplusplus
435}
436#endif /* __cplusplus */
437
438#endif /* __INC_SKRLMT_H */
diff --git a/drivers/net/sk98lin/h/sktimer.h b/drivers/net/sk98lin/h/sktimer.h
deleted file mode 100644
index 04e6d7c1ec33..000000000000
--- a/drivers/net/sk98lin/h/sktimer.h
+++ /dev/null
@@ -1,63 +0,0 @@
1/******************************************************************************
2 *
3 * Name: sktimer.h
4 * Project: Gigabit Ethernet Adapters, Event Scheduler Module
5 * Version: $Revision: 1.11 $
6 * Date: $Date: 2003/09/16 12:58:18 $
7 * Purpose: Defines for the timer functions
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/*
26 * SKTIMER.H contains all defines and types for the timer functions
27 */
28
29#ifndef _SKTIMER_H_
30#define _SKTIMER_H_
31
32#include "h/skqueue.h"
33
34/*
35 * SK timer
36 * - needed wherever a timer is used. Put this in your data structure
37 * wherever you want.
38 */
39typedef struct s_Timer SK_TIMER;
40
41struct s_Timer {
42 SK_TIMER *TmNext; /* linked list */
43 SK_U32 TmClass; /* Timer Event class */
44 SK_U32 TmEvent; /* Timer Event value */
45 SK_EVPARA TmPara; /* Timer Event parameter */
46 SK_U32 TmDelta; /* delta time */
47 int TmActive; /* flag: active/inactive */
48};
49
50/*
51 * Timer control struct.
52 * - use in Adapters context name pAC->Tim
53 */
54typedef struct s_TimCtrl {
55 SK_TIMER *StQueue; /* Head of Timer queue */
56} SK_TIMCTRL;
57
58extern void SkTimerInit(SK_AC *pAC, SK_IOC Ioc, int Level);
59extern void SkTimerStop(SK_AC *pAC, SK_IOC Ioc, SK_TIMER *pTimer);
60extern void SkTimerStart(SK_AC *pAC, SK_IOC Ioc, SK_TIMER *pTimer,
61 SK_U32 Time, SK_U32 Class, SK_U32 Event, SK_EVPARA Para);
62extern void SkTimerDone(SK_AC *pAC, SK_IOC Ioc);
63#endif /* _SKTIMER_H_ */
diff --git a/drivers/net/sk98lin/h/sktypes.h b/drivers/net/sk98lin/h/sktypes.h
deleted file mode 100644
index 40edc96e1055..000000000000
--- a/drivers/net/sk98lin/h/sktypes.h
+++ /dev/null
@@ -1,69 +0,0 @@
1/******************************************************************************
2 *
3 * Name: sktypes.h
4 * Project: GEnesis, PCI Gigabit Ethernet Adapter
5 * Version: $Revision: 1.2 $
6 * Date: $Date: 2003/10/07 08:16:51 $
7 * Purpose: Define data types for Linux
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/******************************************************************************
26 *
27 * Description:
28 *
29 * In this file, all data types that are needed by the common modules
30 * are mapped to Linux data types.
31 *
32 *
33 * Include File Hierarchy:
34 *
35 *
36 ******************************************************************************/
37
38#ifndef __INC_SKTYPES_H
39#define __INC_SKTYPES_H
40
41
42/* defines *******************************************************************/
43
44/*
45 * Data types with a specific size. 'I' = signed, 'U' = unsigned.
46 */
47#define SK_I8 s8
48#define SK_U8 u8
49#define SK_I16 s16
50#define SK_U16 u16
51#define SK_I32 s32
52#define SK_U32 u32
53#define SK_I64 s64
54#define SK_U64 u64
55
56#define SK_UPTR ulong /* casting pointer <-> integral */
57
58/*
59* Boolean type.
60*/
61#define SK_BOOL SK_U8
62#define SK_FALSE 0
63#define SK_TRUE (!SK_FALSE)
64
65/* typedefs *******************************************************************/
66
67/* function prototypes ********************************************************/
68
69#endif /* __INC_SKTYPES_H */
diff --git a/drivers/net/sk98lin/h/skversion.h b/drivers/net/sk98lin/h/skversion.h
deleted file mode 100644
index a1a7294828e5..000000000000
--- a/drivers/net/sk98lin/h/skversion.h
+++ /dev/null
@@ -1,38 +0,0 @@
1/******************************************************************************
2 *
3 * Name: version.h
4 * Project: GEnesis, PCI Gigabit Ethernet Adapter
5 * Version: $Revision: 1.5 $
6 * Date: $Date: 2003/10/07 08:16:51 $
7 * Purpose: SK specific Error log support
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25#ifdef lint
26static const char SysKonnectFileId[] = "@(#) (C) SysKonnect GmbH.";
27static const char SysKonnectBuildNumber[] =
28 "@(#)SK-BUILD: 6.23 PL: 01";
29#endif /* !defined(lint) */
30
31#define BOOT_STRING "sk98lin: Network Device Driver v6.23\n" \
32 "(C)Copyright 1999-2004 Marvell(R)."
33
34#define VER_STRING "6.23"
35#define DRIVER_FILE_NAME "sk98lin"
36#define DRIVER_REL_DATE "Feb-13-2004"
37
38
diff --git a/drivers/net/sk98lin/h/skvpd.h b/drivers/net/sk98lin/h/skvpd.h
deleted file mode 100644
index fdd9e48e8040..000000000000
--- a/drivers/net/sk98lin/h/skvpd.h
+++ /dev/null
@@ -1,248 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skvpd.h
4 * Project: GEnesis, PCI Gigabit Ethernet Adapter
5 * Version: $Revision: 1.15 $
6 * Date: $Date: 2003/01/13 10:39:38 $
7 * Purpose: Defines and Macros for VPD handling
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2003 SysKonnect GmbH.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * The information in this file is provided "AS IS" without warranty.
21 *
22 ******************************************************************************/
23
24/*
25 * skvpd.h contains Diagnostic specific defines for VPD handling
26 */
27
28#ifndef __INC_SKVPD_H_
29#define __INC_SKVPD_H_
30
31/*
32 * Define Resource Type Identifiers and VPD keywords
33 */
34#define RES_ID 0x82 /* Resource Type ID String (Product Name) */
35#define RES_VPD_R 0x90 /* start of VPD read only area */
36#define RES_VPD_W 0x91 /* start of VPD read/write area */
37#define RES_END 0x78 /* Resource Type End Tag */
38
39#ifndef VPD_NAME
40#define VPD_NAME "Name" /* Product Name, VPD name of RES_ID */
41#endif /* VPD_NAME */
42#define VPD_PN "PN" /* Adapter Part Number */
43#define VPD_EC "EC" /* Adapter Engineering Level */
44#define VPD_MN "MN" /* Manufacture ID */
45#define VPD_SN "SN" /* Serial Number */
46#define VPD_CP "CP" /* Extended Capability */
47#define VPD_RV "RV" /* Checksum and Reserved */
48#define VPD_YA "YA" /* Asset Tag Identifier */
49#define VPD_VL "VL" /* First Error Log Message (SK specific) */
50#define VPD_VF "VF" /* Second Error Log Message (SK specific) */
51#define VPD_RW "RW" /* Remaining Read / Write Area */
52
53/* 'type' values for vpd_setup_para() */
54#define VPD_RO_KEY 1 /* RO keys are "PN", "EC", "MN", "SN", "RV" */
55#define VPD_RW_KEY 2 /* RW keys are "Yx", "Vx", and "RW" */
56
57/* 'op' values for vpd_setup_para() */
58#define ADD_KEY 1 /* add the key at the pos "RV" or "RW" */
59#define OWR_KEY 2 /* overwrite key if already exists */
60
61/*
62 * Define READ and WRITE Constants.
63 */
64
65#define VPD_DEV_ID_GENESIS 0x4300
66
67#define VPD_SIZE_YUKON 256
68#define VPD_SIZE_GENESIS 512
69#define VPD_SIZE 512
70#define VPD_READ 0x0000
71#define VPD_WRITE 0x8000
72
73#define VPD_STOP(pAC,IoC) VPD_OUT16(pAC,IoC,PCI_VPD_ADR_REG,VPD_WRITE)
74
75#define VPD_GET_RES_LEN(p) ((unsigned int) \
76 (* (SK_U8 *)&(p)[1]) |\
77 ((* (SK_U8 *)&(p)[2]) << 8))
78#define VPD_GET_VPD_LEN(p) ((unsigned int)(* (SK_U8 *)&(p)[2]))
79#define VPD_GET_VAL(p) ((char *)&(p)[3])
80
81#define VPD_MAX_LEN 50
82
83/* VPD status */
84 /* bit 7..1 reserved */
85#define VPD_VALID (1<<0) /* VPD data buffer, vpd_free_ro, */
86 /* and vpd_free_rw valid */
87
88/*
89 * VPD structs
90 */
91typedef struct s_vpd_status {
92 unsigned short Align01; /* Alignment */
93 unsigned short vpd_status; /* VPD status, description see above */
94 int vpd_free_ro; /* unused bytes in read only area */
95 int vpd_free_rw; /* bytes available in read/write area */
96} SK_VPD_STATUS;
97
98typedef struct s_vpd {
99 SK_VPD_STATUS v; /* VPD status structure */
100 char vpd_buf[VPD_SIZE]; /* VPD buffer */
101 int rom_size; /* VPD ROM Size from PCI_OUR_REG_2 */
102 int vpd_size; /* saved VPD-size */
103} SK_VPD;
104
105typedef struct s_vpd_para {
106 unsigned int p_len; /* parameter length */
107 char *p_val; /* points to the value */
108} SK_VPD_PARA;
109
110/*
111 * structure of Large Resource Type Identifiers
112 */
113
114/* was removed because of alignment problems */
115
116/*
117 * structure of VPD keywords
118 */
119typedef struct s_vpd_key {
120 char p_key[2]; /* 2 bytes ID string */
121 unsigned char p_len; /* 1 byte length */
122 char p_val; /* start of the value string */
123} SK_VPD_KEY;
124
125
126/*
127 * System specific VPD macros
128 */
129#ifndef SKDIAG
130#ifndef VPD_DO_IO
131#define VPD_OUT8(pAC,IoC,Addr,Val) (void)SkPciWriteCfgByte(pAC,Addr,Val)
132#define VPD_OUT16(pAC,IoC,Addr,Val) (void)SkPciWriteCfgWord(pAC,Addr,Val)
133#define VPD_IN8(pAC,IoC,Addr,pVal) (void)SkPciReadCfgByte(pAC,Addr,pVal)
134#define VPD_IN16(pAC,IoC,Addr,pVal) (void)SkPciReadCfgWord(pAC,Addr,pVal)
135#define VPD_IN32(pAC,IoC,Addr,pVal) (void)SkPciReadCfgDWord(pAC,Addr,pVal)
136#else /* VPD_DO_IO */
137#define VPD_OUT8(pAC,IoC,Addr,Val) SK_OUT8(IoC,PCI_C(Addr),Val)
138#define VPD_OUT16(pAC,IoC,Addr,Val) SK_OUT16(IoC,PCI_C(Addr),Val)
139#define VPD_IN8(pAC,IoC,Addr,pVal) SK_IN8(IoC,PCI_C(Addr),pVal)
140#define VPD_IN16(pAC,IoC,Addr,pVal) SK_IN16(IoC,PCI_C(Addr),pVal)
141#define VPD_IN32(pAC,IoC,Addr,pVal) SK_IN32(IoC,PCI_C(Addr),pVal)
142#endif /* VPD_DO_IO */
143#else /* SKDIAG */
144#define VPD_OUT8(pAC,Ioc,Addr,Val) { \
145 if ((pAC)->DgT.DgUseCfgCycle) \
146 SkPciWriteCfgByte(pAC,Addr,Val); \
147 else \
148 SK_OUT8(pAC,PCI_C(Addr),Val); \
149 }
150#define VPD_OUT16(pAC,Ioc,Addr,Val) { \
151 if ((pAC)->DgT.DgUseCfgCycle) \
152 SkPciWriteCfgWord(pAC,Addr,Val); \
153 else \
154 SK_OUT16(pAC,PCI_C(Addr),Val); \
155 }
156#define VPD_IN8(pAC,Ioc,Addr,pVal) { \
157 if ((pAC)->DgT.DgUseCfgCycle) \
158 SkPciReadCfgByte(pAC,Addr,pVal); \
159 else \
160 SK_IN8(pAC,PCI_C(Addr),pVal); \
161 }
162#define VPD_IN16(pAC,Ioc,Addr,pVal) { \
163 if ((pAC)->DgT.DgUseCfgCycle) \
164 SkPciReadCfgWord(pAC,Addr,pVal); \
165 else \
166 SK_IN16(pAC,PCI_C(Addr),pVal); \
167 }
168#define VPD_IN32(pAC,Ioc,Addr,pVal) { \
169 if ((pAC)->DgT.DgUseCfgCycle) \
170 SkPciReadCfgDWord(pAC,Addr,pVal); \
171 else \
172 SK_IN32(pAC,PCI_C(Addr),pVal); \
173 }
174#endif /* nSKDIAG */
175
176/* function prototypes ********************************************************/
177
178#ifndef SK_KR_PROTO
179#ifdef SKDIAG
180extern SK_U32 VpdReadDWord(
181 SK_AC *pAC,
182 SK_IOC IoC,
183 int addr);
184#endif /* SKDIAG */
185
186extern SK_VPD_STATUS *VpdStat(
187 SK_AC *pAC,
188 SK_IOC IoC);
189
190extern int VpdKeys(
191 SK_AC *pAC,
192 SK_IOC IoC,
193 char *buf,
194 int *len,
195 int *elements);
196
197extern int VpdRead(
198 SK_AC *pAC,
199 SK_IOC IoC,
200 const char *key,
201 char *buf,
202 int *len);
203
204extern SK_BOOL VpdMayWrite(
205 char *key);
206
207extern int VpdWrite(
208 SK_AC *pAC,
209 SK_IOC IoC,
210 const char *key,
211 const char *buf);
212
213extern int VpdDelete(
214 SK_AC *pAC,
215 SK_IOC IoC,
216 char *key);
217
218extern int VpdUpdate(
219 SK_AC *pAC,
220 SK_IOC IoC);
221
222#ifdef SKDIAG
223extern int VpdReadBlock(
224 SK_AC *pAC,
225 SK_IOC IoC,
226 char *buf,
227 int addr,
228 int len);
229
230extern int VpdWriteBlock(
231 SK_AC *pAC,
232 SK_IOC IoC,
233 char *buf,
234 int addr,
235 int len);
236#endif /* SKDIAG */
237#else /* SK_KR_PROTO */
238extern SK_U32 VpdReadDWord();
239extern SK_VPD_STATUS *VpdStat();
240extern int VpdKeys();
241extern int VpdRead();
242extern SK_BOOL VpdMayWrite();
243extern int VpdWrite();
244extern int VpdDelete();
245extern int VpdUpdate();
246#endif /* SK_KR_PROTO */
247
248#endif /* __INC_SKVPD_H_ */
diff --git a/drivers/net/sk98lin/h/xmac_ii.h b/drivers/net/sk98lin/h/xmac_ii.h
deleted file mode 100644
index 7f8e6d0084c7..000000000000
--- a/drivers/net/sk98lin/h/xmac_ii.h
+++ /dev/null
@@ -1,1579 +0,0 @@
1/******************************************************************************
2 *
3 * Name: xmac_ii.h
4 * Project: Gigabit Ethernet Adapters, Common Modules
5 * Version: $Revision: 1.52 $
6 * Date: $Date: 2003/10/02 16:35:50 $
7 * Purpose: Defines and Macros for Gigabit Ethernet Controller
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25#ifndef __INC_XMAC_H
26#define __INC_XMAC_H
27
28#ifdef __cplusplus
29extern "C" {
30#endif /* __cplusplus */
31
32/* defines ********************************************************************/
33
34/*
35 * XMAC II registers
36 *
37 * The XMAC registers are 16 or 32 bits wide.
38 * The XMACs host processor interface is set to 16 bit mode,
39 * therefore ALL registers will be addressed with 16 bit accesses.
40 *
41 * The following macros are provided to access the XMAC registers
42 * XM_IN16(), XM_OUT16, XM_IN32(), XM_OUT32(), XM_INADR(), XM_OUTADR(),
43 * XM_INHASH(), and XM_OUTHASH().
44 * The macros are defined in SkGeHw.h.
45 *
46 * Note: NA reg = Network Address e.g DA, SA etc.
47 *
48 */
49#define XM_MMU_CMD 0x0000 /* 16 bit r/w MMU Command Register */
50 /* 0x0004: reserved */
51#define XM_POFF 0x0008 /* 32 bit r/w Packet Offset Register */
52#define XM_BURST 0x000c /* 32 bit r/w Burst Register for half duplex*/
53#define XM_1L_VLAN_TAG 0x0010 /* 16 bit r/w One Level VLAN Tag ID */
54#define XM_2L_VLAN_TAG 0x0014 /* 16 bit r/w Two Level VLAN Tag ID */
55 /* 0x0018 - 0x001e: reserved */
56#define XM_TX_CMD 0x0020 /* 16 bit r/w Transmit Command Register */
57#define XM_TX_RT_LIM 0x0024 /* 16 bit r/w Transmit Retry Limit Register */
58#define XM_TX_STIME 0x0028 /* 16 bit r/w Transmit Slottime Register */
59#define XM_TX_IPG 0x002c /* 16 bit r/w Transmit Inter Packet Gap */
60#define XM_RX_CMD 0x0030 /* 16 bit r/w Receive Command Register */
61#define XM_PHY_ADDR 0x0034 /* 16 bit r/w PHY Address Register */
62#define XM_PHY_DATA 0x0038 /* 16 bit r/w PHY Data Register */
63 /* 0x003c: reserved */
64#define XM_GP_PORT 0x0040 /* 32 bit r/w General Purpose Port Register */
65#define XM_IMSK 0x0044 /* 16 bit r/w Interrupt Mask Register */
66#define XM_ISRC 0x0048 /* 16 bit r/o Interrupt Status Register */
67#define XM_HW_CFG 0x004c /* 16 bit r/w Hardware Config Register */
68 /* 0x0050 - 0x005e: reserved */
69#define XM_TX_LO_WM 0x0060 /* 16 bit r/w Tx FIFO Low Water Mark */
70#define XM_TX_HI_WM 0x0062 /* 16 bit r/w Tx FIFO High Water Mark */
71#define XM_TX_THR 0x0064 /* 16 bit r/w Tx Request Threshold */
72#define XM_HT_THR 0x0066 /* 16 bit r/w Host Request Threshold */
73#define XM_PAUSE_DA 0x0068 /* NA reg r/w Pause Destination Address */
74 /* 0x006e: reserved */
75#define XM_CTL_PARA 0x0070 /* 32 bit r/w Control Parameter Register */
76#define XM_MAC_OPCODE 0x0074 /* 16 bit r/w Opcode for MAC control frames */
77#define XM_MAC_PTIME 0x0076 /* 16 bit r/w Pause time for MAC ctrl frames*/
78#define XM_TX_STAT 0x0078 /* 32 bit r/o Tx Status LIFO Register */
79
80 /* 0x0080 - 0x00fc: 16 NA reg r/w Exact Match Address Registers */
81 /* use the XM_EXM() macro to address */
82#define XM_EXM_START 0x0080 /* r/w Start Address of the EXM Regs */
83
84 /*
85 * XM_EXM(Reg)
86 *
87 * returns the XMAC address offset of specified Exact Match Addr Reg
88 *
89 * para: Reg EXM register to addr (0 .. 15)
90 *
91 * usage: XM_INADDR(IoC, MAC_1, XM_EXM(i), &val[i]);
92 */
93#define XM_EXM(Reg) (XM_EXM_START + ((Reg) << 3))
94
95#define XM_SRC_CHK 0x0100 /* NA reg r/w Source Check Address Register */
96#define XM_SA 0x0108 /* NA reg r/w Station Address Register */
97#define XM_HSM 0x0110 /* 64 bit r/w Hash Match Address Registers */
98#define XM_RX_LO_WM 0x0118 /* 16 bit r/w Receive Low Water Mark */
99#define XM_RX_HI_WM 0x011a /* 16 bit r/w Receive High Water Mark */
100#define XM_RX_THR 0x011c /* 32 bit r/w Receive Request Threshold */
101#define XM_DEV_ID 0x0120 /* 32 bit r/o Device ID Register */
102#define XM_MODE 0x0124 /* 32 bit r/w Mode Register */
103#define XM_LSA 0x0128 /* NA reg r/o Last Source Register */
104 /* 0x012e: reserved */
105#define XM_TS_READ 0x0130 /* 32 bit r/o Time Stamp Read Register */
106#define XM_TS_LOAD 0x0134 /* 32 bit r/o Time Stamp Load Value */
107 /* 0x0138 - 0x01fe: reserved */
108#define XM_STAT_CMD 0x0200 /* 16 bit r/w Statistics Command Register */
109#define XM_RX_CNT_EV 0x0204 /* 32 bit r/o Rx Counter Event Register */
110#define XM_TX_CNT_EV 0x0208 /* 32 bit r/o Tx Counter Event Register */
111#define XM_RX_EV_MSK 0x020c /* 32 bit r/w Rx Counter Event Mask */
112#define XM_TX_EV_MSK 0x0210 /* 32 bit r/w Tx Counter Event Mask */
113 /* 0x0204 - 0x027e: reserved */
114#define XM_TXF_OK 0x0280 /* 32 bit r/o Frames Transmitted OK Conuter */
115#define XM_TXO_OK_HI 0x0284 /* 32 bit r/o Octets Transmitted OK High Cnt*/
116#define XM_TXO_OK_LO 0x0288 /* 32 bit r/o Octets Transmitted OK Low Cnt */
117#define XM_TXF_BC_OK 0x028c /* 32 bit r/o Broadcast Frames Xmitted OK */
118#define XM_TXF_MC_OK 0x0290 /* 32 bit r/o Multicast Frames Xmitted OK */
119#define XM_TXF_UC_OK 0x0294 /* 32 bit r/o Unicast Frames Xmitted OK */
120#define XM_TXF_LONG 0x0298 /* 32 bit r/o Tx Long Frame Counter */
121#define XM_TXE_BURST 0x029c /* 32 bit r/o Tx Burst Event Counter */
122#define XM_TXF_MPAUSE 0x02a0 /* 32 bit r/o Tx Pause MAC Ctrl Frame Cnt */
123#define XM_TXF_MCTRL 0x02a4 /* 32 bit r/o Tx MAC Ctrl Frame Counter */
124#define XM_TXF_SNG_COL 0x02a8 /* 32 bit r/o Tx Single Collision Counter */
125#define XM_TXF_MUL_COL 0x02ac /* 32 bit r/o Tx Multiple Collision Counter */
126#define XM_TXF_ABO_COL 0x02b0 /* 32 bit r/o Tx aborted due to Exces. Col. */
127#define XM_TXF_LAT_COL 0x02b4 /* 32 bit r/o Tx Late Collision Counter */
128#define XM_TXF_DEF 0x02b8 /* 32 bit r/o Tx Deferred Frame Counter */
129#define XM_TXF_EX_DEF 0x02bc /* 32 bit r/o Tx Excessive Deferall Counter */
130#define XM_TXE_FIFO_UR 0x02c0 /* 32 bit r/o Tx FIFO Underrun Event Cnt */
131#define XM_TXE_CS_ERR 0x02c4 /* 32 bit r/o Tx Carrier Sense Error Cnt */
132#define XM_TXP_UTIL 0x02c8 /* 32 bit r/o Tx Utilization in % */
133 /* 0x02cc - 0x02ce: reserved */
134#define XM_TXF_64B 0x02d0 /* 32 bit r/o 64 Byte Tx Frame Counter */
135#define XM_TXF_127B 0x02d4 /* 32 bit r/o 65-127 Byte Tx Frame Counter */
136#define XM_TXF_255B 0x02d8 /* 32 bit r/o 128-255 Byte Tx Frame Counter */
137#define XM_TXF_511B 0x02dc /* 32 bit r/o 256-511 Byte Tx Frame Counter */
138#define XM_TXF_1023B 0x02e0 /* 32 bit r/o 512-1023 Byte Tx Frame Counter*/
139#define XM_TXF_MAX_SZ 0x02e4 /* 32 bit r/o 1024-MaxSize Byte Tx Frame Cnt*/
140 /* 0x02e8 - 0x02fe: reserved */
141#define XM_RXF_OK 0x0300 /* 32 bit r/o Frames Received OK */
142#define XM_RXO_OK_HI 0x0304 /* 32 bit r/o Octets Received OK High Cnt */
143#define XM_RXO_OK_LO 0x0308 /* 32 bit r/o Octets Received OK Low Counter*/
144#define XM_RXF_BC_OK 0x030c /* 32 bit r/o Broadcast Frames Received OK */
145#define XM_RXF_MC_OK 0x0310 /* 32 bit r/o Multicast Frames Received OK */
146#define XM_RXF_UC_OK 0x0314 /* 32 bit r/o Unicast Frames Received OK */
147#define XM_RXF_MPAUSE 0x0318 /* 32 bit r/o Rx Pause MAC Ctrl Frame Cnt */
148#define XM_RXF_MCTRL 0x031c /* 32 bit r/o Rx MAC Ctrl Frame Counter */
149#define XM_RXF_INV_MP 0x0320 /* 32 bit r/o Rx invalid Pause Frame Cnt */
150#define XM_RXF_INV_MOC 0x0324 /* 32 bit r/o Rx Frames with inv. MAC Opcode*/
151#define XM_RXE_BURST 0x0328 /* 32 bit r/o Rx Burst Event Counter */
152#define XM_RXE_FMISS 0x032c /* 32 bit r/o Rx Missed Frames Event Cnt */
153#define XM_RXF_FRA_ERR 0x0330 /* 32 bit r/o Rx Framing Error Counter */
154#define XM_RXE_FIFO_OV 0x0334 /* 32 bit r/o Rx FIFO overflow Event Cnt */
155#define XM_RXF_JAB_PKT 0x0338 /* 32 bit r/o Rx Jabber Packet Frame Cnt */
156#define XM_RXE_CAR_ERR 0x033c /* 32 bit r/o Rx Carrier Event Error Cnt */
157#define XM_RXF_LEN_ERR 0x0340 /* 32 bit r/o Rx in Range Length Error */
158#define XM_RXE_SYM_ERR 0x0344 /* 32 bit r/o Rx Symbol Error Counter */
159#define XM_RXE_SHT_ERR 0x0348 /* 32 bit r/o Rx Short Event Error Cnt */
160#define XM_RXE_RUNT 0x034c /* 32 bit r/o Rx Runt Event Counter */
161#define XM_RXF_LNG_ERR 0x0350 /* 32 bit r/o Rx Frame too Long Error Cnt */
162#define XM_RXF_FCS_ERR 0x0354 /* 32 bit r/o Rx Frame Check Seq. Error Cnt */
163 /* 0x0358 - 0x035a: reserved */
164#define XM_RXF_CEX_ERR 0x035c /* 32 bit r/o Rx Carrier Ext Error Frame Cnt*/
165#define XM_RXP_UTIL 0x0360 /* 32 bit r/o Rx Utilization in % */
166 /* 0x0364 - 0x0366: reserved */
167#define XM_RXF_64B 0x0368 /* 32 bit r/o 64 Byte Rx Frame Counter */
168#define XM_RXF_127B 0x036c /* 32 bit r/o 65-127 Byte Rx Frame Counter */
169#define XM_RXF_255B 0x0370 /* 32 bit r/o 128-255 Byte Rx Frame Counter */
170#define XM_RXF_511B 0x0374 /* 32 bit r/o 256-511 Byte Rx Frame Counter */
171#define XM_RXF_1023B 0x0378 /* 32 bit r/o 512-1023 Byte Rx Frame Counter*/
172#define XM_RXF_MAX_SZ 0x037c /* 32 bit r/o 1024-MaxSize Byte Rx Frame Cnt*/
173 /* 0x02e8 - 0x02fe: reserved */
174
175
176/*----------------------------------------------------------------------------*/
177/*
178 * XMAC Bit Definitions
179 *
180 * If the bit access behaviour differs from the register access behaviour
181 * (r/w, r/o) this is documented after the bit number.
182 * The following bit access behaviours are used:
183 * (sc) self clearing
184 * (ro) read only
185 */
186
187/* XM_MMU_CMD 16 bit r/w MMU Command Register */
188 /* Bit 15..13: reserved */
189#define XM_MMU_PHY_RDY (1<<12) /* Bit 12: PHY Read Ready */
190#define XM_MMU_PHY_BUSY (1<<11) /* Bit 11: PHY Busy */
191#define XM_MMU_IGN_PF (1<<10) /* Bit 10: Ignore Pause Frame */
192#define XM_MMU_MAC_LB (1<<9) /* Bit 9: Enable MAC Loopback */
193 /* Bit 8: reserved */
194#define XM_MMU_FRC_COL (1<<7) /* Bit 7: Force Collision */
195#define XM_MMU_SIM_COL (1<<6) /* Bit 6: Simulate Collision */
196#define XM_MMU_NO_PRE (1<<5) /* Bit 5: No MDIO Preamble */
197#define XM_MMU_GMII_FD (1<<4) /* Bit 4: GMII uses Full Duplex */
198#define XM_MMU_RAT_CTRL (1<<3) /* Bit 3: Enable Rate Control */
199#define XM_MMU_GMII_LOOP (1<<2) /* Bit 2: PHY is in Loopback Mode */
200#define XM_MMU_ENA_RX (1<<1) /* Bit 1: Enable Receiver */
201#define XM_MMU_ENA_TX (1<<0) /* Bit 0: Enable Transmitter */
202
203
204/* XM_TX_CMD 16 bit r/w Transmit Command Register */
205 /* Bit 15..7: reserved */
206#define XM_TX_BK2BK (1<<6) /* Bit 6: Ignor Carrier Sense (Tx Bk2Bk)*/
207#define XM_TX_ENC_BYP (1<<5) /* Bit 5: Set Encoder in Bypass Mode */
208#define XM_TX_SAM_LINE (1<<4) /* Bit 4: (sc) Start utilization calculation */
209#define XM_TX_NO_GIG_MD (1<<3) /* Bit 3: Disable Carrier Extension */
210#define XM_TX_NO_PRE (1<<2) /* Bit 2: Disable Preamble Generation */
211#define XM_TX_NO_CRC (1<<1) /* Bit 1: Disable CRC Generation */
212#define XM_TX_AUTO_PAD (1<<0) /* Bit 0: Enable Automatic Padding */
213
214
215/* XM_TX_RT_LIM 16 bit r/w Transmit Retry Limit Register */
216 /* Bit 15..5: reserved */
217#define XM_RT_LIM_MSK 0x1f /* Bit 4..0: Tx Retry Limit */
218
219
220/* XM_TX_STIME 16 bit r/w Transmit Slottime Register */
221 /* Bit 15..7: reserved */
222#define XM_STIME_MSK 0x7f /* Bit 6..0: Tx Slottime bits */
223
224
225/* XM_TX_IPG 16 bit r/w Transmit Inter Packet Gap */
226 /* Bit 15..8: reserved */
227#define XM_IPG_MSK 0xff /* Bit 7..0: IPG value bits */
228
229
230/* XM_RX_CMD 16 bit r/w Receive Command Register */
231 /* Bit 15..9: reserved */
232#define XM_RX_LENERR_OK (1<<8) /* Bit 8 don't set Rx Err bit for */
233 /* inrange error packets */
234#define XM_RX_BIG_PK_OK (1<<7) /* Bit 7 don't set Rx Err bit for */
235 /* jumbo packets */
236#define XM_RX_IPG_CAP (1<<6) /* Bit 6 repl. type field with IPG */
237#define XM_RX_TP_MD (1<<5) /* Bit 5: Enable transparent Mode */
238#define XM_RX_STRIP_FCS (1<<4) /* Bit 4: Enable FCS Stripping */
239#define XM_RX_SELF_RX (1<<3) /* Bit 3: Enable Rx of own packets */
240#define XM_RX_SAM_LINE (1<<2) /* Bit 2: (sc) Start utilization calculation */
241#define XM_RX_STRIP_PAD (1<<1) /* Bit 1: Strip pad bytes of Rx frames */
242#define XM_RX_DIS_CEXT (1<<0) /* Bit 0: Disable carrier ext. check */
243
244
245/* XM_PHY_ADDR 16 bit r/w PHY Address Register */
246 /* Bit 15..5: reserved */
247#define XM_PHY_ADDR_SZ 0x1f /* Bit 4..0: PHY Address bits */
248
249
250/* XM_GP_PORT 32 bit r/w General Purpose Port Register */
251 /* Bit 31..7: reserved */
252#define XM_GP_ANIP (1L<<6) /* Bit 6: (ro) Auto-Neg. in progress */
253#define XM_GP_FRC_INT (1L<<5) /* Bit 5: (sc) Force Interrupt */
254 /* Bit 4: reserved */
255#define XM_GP_RES_MAC (1L<<3) /* Bit 3: (sc) Reset MAC and FIFOs */
256#define XM_GP_RES_STAT (1L<<2) /* Bit 2: (sc) Reset the statistics module */
257 /* Bit 1: reserved */
258#define XM_GP_INP_ASS (1L<<0) /* Bit 0: (ro) GP Input Pin asserted */
259
260
261/* XM_IMSK 16 bit r/w Interrupt Mask Register */
262/* XM_ISRC 16 bit r/o Interrupt Status Register */
263 /* Bit 15: reserved */
264#define XM_IS_LNK_AE (1<<14) /* Bit 14: Link Asynchronous Event */
265#define XM_IS_TX_ABORT (1<<13) /* Bit 13: Transmit Abort, late Col. etc */
266#define XM_IS_FRC_INT (1<<12) /* Bit 12: Force INT bit set in GP */
267#define XM_IS_INP_ASS (1<<11) /* Bit 11: Input Asserted, GP bit 0 set */
268#define XM_IS_LIPA_RC (1<<10) /* Bit 10: Link Partner requests config */
269#define XM_IS_RX_PAGE (1<<9) /* Bit 9: Page Received */
270#define XM_IS_TX_PAGE (1<<8) /* Bit 8: Next Page Loaded for Transmit */
271#define XM_IS_AND (1<<7) /* Bit 7: Auto-Negotiation Done */
272#define XM_IS_TSC_OV (1<<6) /* Bit 6: Time Stamp Counter Overflow */
273#define XM_IS_RXC_OV (1<<5) /* Bit 5: Rx Counter Event Overflow */
274#define XM_IS_TXC_OV (1<<4) /* Bit 4: Tx Counter Event Overflow */
275#define XM_IS_RXF_OV (1<<3) /* Bit 3: Receive FIFO Overflow */
276#define XM_IS_TXF_UR (1<<2) /* Bit 2: Transmit FIFO Underrun */
277#define XM_IS_TX_COMP (1<<1) /* Bit 1: Frame Tx Complete */
278#define XM_IS_RX_COMP (1<<0) /* Bit 0: Frame Rx Complete */
279
280#define XM_DEF_MSK (~(XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE |\
281 XM_IS_AND | XM_IS_RXC_OV | XM_IS_TXC_OV | XM_IS_TXF_UR))
282
283
284/* XM_HW_CFG 16 bit r/w Hardware Config Register */
285 /* Bit 15.. 4: reserved */
286#define XM_HW_GEN_EOP (1<<3) /* Bit 3: generate End of Packet pulse */
287#define XM_HW_COM4SIG (1<<2) /* Bit 2: use Comma Detect for Sig. Det.*/
288 /* Bit 1: reserved */
289#define XM_HW_GMII_MD (1<<0) /* Bit 0: GMII Interface selected */
290
291
292/* XM_TX_LO_WM 16 bit r/w Tx FIFO Low Water Mark */
293/* XM_TX_HI_WM 16 bit r/w Tx FIFO High Water Mark */
294 /* Bit 15..10 reserved */
295#define XM_TX_WM_MSK 0x01ff /* Bit 9.. 0 Tx FIFO Watermark bits */
296
297/* XM_TX_THR 16 bit r/w Tx Request Threshold */
298/* XM_HT_THR 16 bit r/w Host Request Threshold */
299/* XM_RX_THR 16 bit r/w Rx Request Threshold */
300 /* Bit 15..11 reserved */
301#define XM_THR_MSK 0x03ff /* Bit 10.. 0 Rx/Tx Request Threshold bits */
302
303
304/* XM_TX_STAT 32 bit r/o Tx Status LIFO Register */
305#define XM_ST_VALID (1UL<<31) /* Bit 31: Status Valid */
306#define XM_ST_BYTE_CNT (0x3fffL<<17) /* Bit 30..17: Tx frame Length */
307#define XM_ST_RETRY_CNT (0x1fL<<12) /* Bit 16..12: Retry Count */
308#define XM_ST_EX_COL (1L<<11) /* Bit 11: Excessive Collisions */
309#define XM_ST_EX_DEF (1L<<10) /* Bit 10: Excessive Deferral */
310#define XM_ST_BURST (1L<<9) /* Bit 9: p. xmitted in burst md*/
311#define XM_ST_DEFER (1L<<8) /* Bit 8: packet was defered */
312#define XM_ST_BC (1L<<7) /* Bit 7: Broadcast packet */
313#define XM_ST_MC (1L<<6) /* Bit 6: Multicast packet */
314#define XM_ST_UC (1L<<5) /* Bit 5: Unicast packet */
315#define XM_ST_TX_UR (1L<<4) /* Bit 4: FIFO Underrun occured */
316#define XM_ST_CS_ERR (1L<<3) /* Bit 3: Carrier Sense Error */
317#define XM_ST_LAT_COL (1L<<2) /* Bit 2: Late Collision Error */
318#define XM_ST_MUL_COL (1L<<1) /* Bit 1: Multiple Collisions */
319#define XM_ST_SGN_COL (1L<<0) /* Bit 0: Single Collision */
320
321/* XM_RX_LO_WM 16 bit r/w Receive Low Water Mark */
322/* XM_RX_HI_WM 16 bit r/w Receive High Water Mark */
323 /* Bit 15..11: reserved */
324#define XM_RX_WM_MSK 0x03ff /* Bit 11.. 0: Rx FIFO Watermark bits */
325
326
327/* XM_DEV_ID 32 bit r/o Device ID Register */
328#define XM_DEV_OUI (0x00ffffffUL<<8) /* Bit 31..8: Device OUI */
329#define XM_DEV_REV (0x07L << 5) /* Bit 7..5: Chip Rev Num */
330
331
332/* XM_MODE 32 bit r/w Mode Register */
333 /* Bit 31..27: reserved */
334#define XM_MD_ENA_REJ (1L<<26) /* Bit 26: Enable Frame Reject */
335#define XM_MD_SPOE_E (1L<<25) /* Bit 25: Send Pause on Edge */
336 /* extern generated */
337#define XM_MD_TX_REP (1L<<24) /* Bit 24: Transmit Repeater Mode */
338#define XM_MD_SPOFF_I (1L<<23) /* Bit 23: Send Pause on FIFO full */
339 /* intern generated */
340#define XM_MD_LE_STW (1L<<22) /* Bit 22: Rx Stat Word in Little Endian */
341#define XM_MD_TX_CONT (1L<<21) /* Bit 21: Send Continuous */
342#define XM_MD_TX_PAUSE (1L<<20) /* Bit 20: (sc) Send Pause Frame */
343#define XM_MD_ATS (1L<<19) /* Bit 19: Append Time Stamp */
344#define XM_MD_SPOL_I (1L<<18) /* Bit 18: Send Pause on Low */
345 /* intern generated */
346#define XM_MD_SPOH_I (1L<<17) /* Bit 17: Send Pause on High */
347 /* intern generated */
348#define XM_MD_CAP (1L<<16) /* Bit 16: Check Address Pair */
349#define XM_MD_ENA_HASH (1L<<15) /* Bit 15: Enable Hashing */
350#define XM_MD_CSA (1L<<14) /* Bit 14: Check Station Address */
351#define XM_MD_CAA (1L<<13) /* Bit 13: Check Address Array */
352#define XM_MD_RX_MCTRL (1L<<12) /* Bit 12: Rx MAC Control Frame */
353#define XM_MD_RX_RUNT (1L<<11) /* Bit 11: Rx Runt Frames */
354#define XM_MD_RX_IRLE (1L<<10) /* Bit 10: Rx in Range Len Err Frame */
355#define XM_MD_RX_LONG (1L<<9) /* Bit 9: Rx Long Frame */
356#define XM_MD_RX_CRCE (1L<<8) /* Bit 8: Rx CRC Error Frame */
357#define XM_MD_RX_ERR (1L<<7) /* Bit 7: Rx Error Frame */
358#define XM_MD_DIS_UC (1L<<6) /* Bit 6: Disable Rx Unicast */
359#define XM_MD_DIS_MC (1L<<5) /* Bit 5: Disable Rx Multicast */
360#define XM_MD_DIS_BC (1L<<4) /* Bit 4: Disable Rx Broadcast */
361#define XM_MD_ENA_PROM (1L<<3) /* Bit 3: Enable Promiscuous */
362#define XM_MD_ENA_BE (1L<<2) /* Bit 2: Enable Big Endian */
363#define XM_MD_FTF (1L<<1) /* Bit 1: (sc) Flush Tx FIFO */
364#define XM_MD_FRF (1L<<0) /* Bit 0: (sc) Flush Rx FIFO */
365
366#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I)
367#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\
368 XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA | XM_MD_CAA)
369
370/* XM_STAT_CMD 16 bit r/w Statistics Command Register */
371 /* Bit 16..6: reserved */
372#define XM_SC_SNP_RXC (1<<5) /* Bit 5: (sc) Snap Rx Counters */
373#define XM_SC_SNP_TXC (1<<4) /* Bit 4: (sc) Snap Tx Counters */
374#define XM_SC_CP_RXC (1<<3) /* Bit 3: Copy Rx Counters Continuously */
375#define XM_SC_CP_TXC (1<<2) /* Bit 2: Copy Tx Counters Continuously */
376#define XM_SC_CLR_RXC (1<<1) /* Bit 1: (sc) Clear Rx Counters */
377#define XM_SC_CLR_TXC (1<<0) /* Bit 0: (sc) Clear Tx Counters */
378
379
380/* XM_RX_CNT_EV 32 bit r/o Rx Counter Event Register */
381/* XM_RX_EV_MSK 32 bit r/w Rx Counter Event Mask */
382#define XMR_MAX_SZ_OV (1UL<<31) /* Bit 31: 1024-MaxSize Rx Cnt Ov*/
383#define XMR_1023B_OV (1L<<30) /* Bit 30: 512-1023Byte Rx Cnt Ov*/
384#define XMR_511B_OV (1L<<29) /* Bit 29: 256-511 Byte Rx Cnt Ov*/
385#define XMR_255B_OV (1L<<28) /* Bit 28: 128-255 Byte Rx Cnt Ov*/
386#define XMR_127B_OV (1L<<27) /* Bit 27: 65-127 Byte Rx Cnt Ov */
387#define XMR_64B_OV (1L<<26) /* Bit 26: 64 Byte Rx Cnt Ov */
388#define XMR_UTIL_OV (1L<<25) /* Bit 25: Rx Util Cnt Overflow */
389#define XMR_UTIL_UR (1L<<24) /* Bit 24: Rx Util Cnt Underrun */
390#define XMR_CEX_ERR_OV (1L<<23) /* Bit 23: CEXT Err Cnt Ov */
391 /* Bit 22: reserved */
392#define XMR_FCS_ERR_OV (1L<<21) /* Bit 21: Rx FCS Error Cnt Ov */
393#define XMR_LNG_ERR_OV (1L<<20) /* Bit 20: Rx too Long Err Cnt Ov*/
394#define XMR_RUNT_OV (1L<<19) /* Bit 19: Runt Event Cnt Ov */
395#define XMR_SHT_ERR_OV (1L<<18) /* Bit 18: Rx Short Ev Err Cnt Ov*/
396#define XMR_SYM_ERR_OV (1L<<17) /* Bit 17: Rx Sym Err Cnt Ov */
397 /* Bit 16: reserved */
398#define XMR_CAR_ERR_OV (1L<<15) /* Bit 15: Rx Carr Ev Err Cnt Ov */
399#define XMR_JAB_PKT_OV (1L<<14) /* Bit 14: Rx Jabb Packet Cnt Ov */
400#define XMR_FIFO_OV (1L<<13) /* Bit 13: Rx FIFO Ov Ev Cnt Ov */
401#define XMR_FRA_ERR_OV (1L<<12) /* Bit 12: Rx Framing Err Cnt Ov */
402#define XMR_FMISS_OV (1L<<11) /* Bit 11: Rx Missed Ev Cnt Ov */
403#define XMR_BURST (1L<<10) /* Bit 10: Rx Burst Event Cnt Ov */
404#define XMR_INV_MOC (1L<<9) /* Bit 9: Rx with inv. MAC OC Ov*/
405#define XMR_INV_MP (1L<<8) /* Bit 8: Rx inv Pause Frame Ov */
406#define XMR_MCTRL_OV (1L<<7) /* Bit 7: Rx MAC Ctrl-F Cnt Ov */
407#define XMR_MPAUSE_OV (1L<<6) /* Bit 6: Rx Pause MAC Ctrl-F Ov*/
408#define XMR_UC_OK_OV (1L<<5) /* Bit 5: Rx Unicast Frame CntOv*/
409#define XMR_MC_OK_OV (1L<<4) /* Bit 4: Rx Multicast Cnt Ov */
410#define XMR_BC_OK_OV (1L<<3) /* Bit 3: Rx Broadcast Cnt Ov */
411#define XMR_OK_LO_OV (1L<<2) /* Bit 2: Octets Rx OK Low CntOv*/
412#define XMR_OK_HI_OV (1L<<1) /* Bit 1: Octets Rx OK Hi Cnt Ov*/
413#define XMR_OK_OV (1L<<0) /* Bit 0: Frames Received Ok Ov */
414
415#define XMR_DEF_MSK (XMR_OK_LO_OV | XMR_OK_HI_OV)
416
417/* XM_TX_CNT_EV 32 bit r/o Tx Counter Event Register */
418/* XM_TX_EV_MSK 32 bit r/w Tx Counter Event Mask */
419 /* Bit 31..26: reserved */
420#define XMT_MAX_SZ_OV (1L<<25) /* Bit 25: 1024-MaxSize Tx Cnt Ov*/
421#define XMT_1023B_OV (1L<<24) /* Bit 24: 512-1023Byte Tx Cnt Ov*/
422#define XMT_511B_OV (1L<<23) /* Bit 23: 256-511 Byte Tx Cnt Ov*/
423#define XMT_255B_OV (1L<<22) /* Bit 22: 128-255 Byte Tx Cnt Ov*/
424#define XMT_127B_OV (1L<<21) /* Bit 21: 65-127 Byte Tx Cnt Ov */
425#define XMT_64B_OV (1L<<20) /* Bit 20: 64 Byte Tx Cnt Ov */
426#define XMT_UTIL_OV (1L<<19) /* Bit 19: Tx Util Cnt Overflow */
427#define XMT_UTIL_UR (1L<<18) /* Bit 18: Tx Util Cnt Underrun */
428#define XMT_CS_ERR_OV (1L<<17) /* Bit 17: Tx Carr Sen Err Cnt Ov*/
429#define XMT_FIFO_UR_OV (1L<<16) /* Bit 16: Tx FIFO Ur Ev Cnt Ov */
430#define XMT_EX_DEF_OV (1L<<15) /* Bit 15: Tx Ex Deferall Cnt Ov */
431#define XMT_DEF (1L<<14) /* Bit 14: Tx Deferred Cnt Ov */
432#define XMT_LAT_COL_OV (1L<<13) /* Bit 13: Tx Late Col Cnt Ov */
433#define XMT_ABO_COL_OV (1L<<12) /* Bit 12: Tx abo dueto Ex Col Ov*/
434#define XMT_MUL_COL_OV (1L<<11) /* Bit 11: Tx Mult Col Cnt Ov */
435#define XMT_SNG_COL (1L<<10) /* Bit 10: Tx Single Col Cnt Ov */
436#define XMT_MCTRL_OV (1L<<9) /* Bit 9: Tx MAC Ctrl Counter Ov*/
437#define XMT_MPAUSE (1L<<8) /* Bit 8: Tx Pause MAC Ctrl-F Ov*/
438#define XMT_BURST (1L<<7) /* Bit 7: Tx Burst Event Cnt Ov */
439#define XMT_LONG (1L<<6) /* Bit 6: Tx Long Frame Cnt Ov */
440#define XMT_UC_OK_OV (1L<<5) /* Bit 5: Tx Unicast Cnt Ov */
441#define XMT_MC_OK_OV (1L<<4) /* Bit 4: Tx Multicast Cnt Ov */
442#define XMT_BC_OK_OV (1L<<3) /* Bit 3: Tx Broadcast Cnt Ov */
443#define XMT_OK_LO_OV (1L<<2) /* Bit 2: Octets Tx OK Low CntOv*/
444#define XMT_OK_HI_OV (1L<<1) /* Bit 1: Octets Tx OK Hi Cnt Ov*/
445#define XMT_OK_OV (1L<<0) /* Bit 0: Frames Tx Ok Ov */
446
447#define XMT_DEF_MSK (XMT_OK_LO_OV | XMT_OK_HI_OV)
448
449/*
450 * Receive Frame Status Encoding
451 */
452#define XMR_FS_LEN (0x3fffUL<<18) /* Bit 31..18: Rx Frame Length */
453#define XMR_FS_2L_VLAN (1L<<17) /* Bit 17: tagged wh 2Lev VLAN ID*/
454#define XMR_FS_1L_VLAN (1L<<16) /* Bit 16: tagged wh 1Lev VLAN ID*/
455#define XMR_FS_BC (1L<<15) /* Bit 15: Broadcast Frame */
456#define XMR_FS_MC (1L<<14) /* Bit 14: Multicast Frame */
457#define XMR_FS_UC (1L<<13) /* Bit 13: Unicast Frame */
458 /* Bit 12: reserved */
459#define XMR_FS_BURST (1L<<11) /* Bit 11: Burst Mode */
460#define XMR_FS_CEX_ERR (1L<<10) /* Bit 10: Carrier Ext. Error */
461#define XMR_FS_802_3 (1L<<9) /* Bit 9: 802.3 Frame */
462#define XMR_FS_COL_ERR (1L<<8) /* Bit 8: Collision Error */
463#define XMR_FS_CAR_ERR (1L<<7) /* Bit 7: Carrier Event Error */
464#define XMR_FS_LEN_ERR (1L<<6) /* Bit 6: In-Range Length Error */
465#define XMR_FS_FRA_ERR (1L<<5) /* Bit 5: Framing Error */
466#define XMR_FS_RUNT (1L<<4) /* Bit 4: Runt Frame */
467#define XMR_FS_LNG_ERR (1L<<3) /* Bit 3: Giant (Jumbo) Frame */
468#define XMR_FS_FCS_ERR (1L<<2) /* Bit 2: Frame Check Sequ Err */
469#define XMR_FS_ERR (1L<<1) /* Bit 1: Frame Error */
470#define XMR_FS_MCTRL (1L<<0) /* Bit 0: MAC Control Packet */
471
472/*
473 * XMR_FS_ERR will be set if
474 * XMR_FS_FCS_ERR, XMR_FS_LNG_ERR, XMR_FS_RUNT,
475 * XMR_FS_FRA_ERR, XMR_FS_LEN_ERR, or XMR_FS_CEX_ERR
476 * is set. XMR_FS_LNG_ERR and XMR_FS_LEN_ERR will issue
477 * XMR_FS_ERR unless the corresponding bit in the Receive Command
478 * Register is set.
479 */
480#define XMR_FS_ANY_ERR XMR_FS_ERR
481
482/*----------------------------------------------------------------------------*/
483/*
484 * XMAC-PHY Registers, indirect addressed over the XMAC
485 */
486#define PHY_XMAC_CTRL 0x00 /* 16 bit r/w PHY Control Register */
487#define PHY_XMAC_STAT 0x01 /* 16 bit r/w PHY Status Register */
488#define PHY_XMAC_ID0 0x02 /* 16 bit r/o PHY ID0 Register */
489#define PHY_XMAC_ID1 0x03 /* 16 bit r/o PHY ID1 Register */
490#define PHY_XMAC_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */
491#define PHY_XMAC_AUNE_LP 0x05 /* 16 bit r/o Link Partner Abi Reg */
492#define PHY_XMAC_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */
493#define PHY_XMAC_NEPG 0x07 /* 16 bit r/w Next Page Register */
494#define PHY_XMAC_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner */
495 /* 0x09 - 0x0e: reserved */
496#define PHY_XMAC_EXT_STAT 0x0f /* 16 bit r/o Ext Status Register */
497#define PHY_XMAC_RES_ABI 0x10 /* 16 bit r/o PHY Resolved Ability */
498
499/*----------------------------------------------------------------------------*/
500/*
501 * Broadcom-PHY Registers, indirect addressed over XMAC
502 */
503#define PHY_BCOM_CTRL 0x00 /* 16 bit r/w PHY Control Register */
504#define PHY_BCOM_STAT 0x01 /* 16 bit r/o PHY Status Register */
505#define PHY_BCOM_ID0 0x02 /* 16 bit r/o PHY ID0 Register */
506#define PHY_BCOM_ID1 0x03 /* 16 bit r/o PHY ID1 Register */
507#define PHY_BCOM_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */
508#define PHY_BCOM_AUNE_LP 0x05 /* 16 bit r/o Link Part Ability Reg */
509#define PHY_BCOM_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */
510#define PHY_BCOM_NEPG 0x07 /* 16 bit r/w Next Page Register */
511#define PHY_BCOM_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner */
512 /* Broadcom-specific registers */
513#define PHY_BCOM_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Ctrl Reg */
514#define PHY_BCOM_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */
515 /* 0x0b - 0x0e: reserved */
516#define PHY_BCOM_EXT_STAT 0x0f /* 16 bit r/o Extended Status Reg */
517#define PHY_BCOM_P_EXT_CTRL 0x10 /* 16 bit r/w PHY Extended Ctrl Reg */
518#define PHY_BCOM_P_EXT_STAT 0x11 /* 16 bit r/o PHY Extended Stat Reg */
519#define PHY_BCOM_RE_CTR 0x12 /* 16 bit r/w Receive Error Counter */
520#define PHY_BCOM_FC_CTR 0x13 /* 16 bit r/w False Carrier Sense Cnt */
521#define PHY_BCOM_RNO_CTR 0x14 /* 16 bit r/w Receiver NOT_OK Cnt */
522 /* 0x15 - 0x17: reserved */
523#define PHY_BCOM_AUX_CTRL 0x18 /* 16 bit r/w Auxiliary Control Reg */
524#define PHY_BCOM_AUX_STAT 0x19 /* 16 bit r/o Auxiliary Stat Summary */
525#define PHY_BCOM_INT_STAT 0x1a /* 16 bit r/o Interrupt Status Reg */
526#define PHY_BCOM_INT_MASK 0x1b /* 16 bit r/w Interrupt Mask Reg */
527 /* 0x1c: reserved */
528 /* 0x1d - 0x1f: test registers */
529
530/*----------------------------------------------------------------------------*/
531/*
532 * Marvel-PHY Registers, indirect addressed over GMAC
533 */
534#define PHY_MARV_CTRL 0x00 /* 16 bit r/w PHY Control Register */
535#define PHY_MARV_STAT 0x01 /* 16 bit r/o PHY Status Register */
536#define PHY_MARV_ID0 0x02 /* 16 bit r/o PHY ID0 Register */
537#define PHY_MARV_ID1 0x03 /* 16 bit r/o PHY ID1 Register */
538#define PHY_MARV_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */
539#define PHY_MARV_AUNE_LP 0x05 /* 16 bit r/o Link Part Ability Reg */
540#define PHY_MARV_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */
541#define PHY_MARV_NEPG 0x07 /* 16 bit r/w Next Page Register */
542#define PHY_MARV_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner */
543 /* Marvel-specific registers */
544#define PHY_MARV_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Ctrl Reg */
545#define PHY_MARV_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */
546 /* 0x0b - 0x0e: reserved */
547#define PHY_MARV_EXT_STAT 0x0f /* 16 bit r/o Extended Status Reg */
548#define PHY_MARV_PHY_CTRL 0x10 /* 16 bit r/w PHY Specific Ctrl Reg */
549#define PHY_MARV_PHY_STAT 0x11 /* 16 bit r/o PHY Specific Stat Reg */
550#define PHY_MARV_INT_MASK 0x12 /* 16 bit r/w Interrupt Mask Reg */
551#define PHY_MARV_INT_STAT 0x13 /* 16 bit r/o Interrupt Status Reg */
552#define PHY_MARV_EXT_CTRL 0x14 /* 16 bit r/w Ext. PHY Specific Ctrl */
553#define PHY_MARV_RXE_CNT 0x15 /* 16 bit r/w Receive Error Counter */
554#define PHY_MARV_EXT_ADR 0x16 /* 16 bit r/w Ext. Ad. for Cable Diag. */
555 /* 0x17: reserved */
556#define PHY_MARV_LED_CTRL 0x18 /* 16 bit r/w LED Control Reg */
557#define PHY_MARV_LED_OVER 0x19 /* 16 bit r/w Manual LED Override Reg */
558#define PHY_MARV_EXT_CTRL_2 0x1a /* 16 bit r/w Ext. PHY Specific Ctrl 2 */
559#define PHY_MARV_EXT_P_STAT 0x1b /* 16 bit r/w Ext. PHY Spec. Stat Reg */
560#define PHY_MARV_CABLE_DIAG 0x1c /* 16 bit r/o Cable Diagnostic Reg */
561 /* 0x1d - 0x1f: reserved */
562
563/*----------------------------------------------------------------------------*/
564/*
565 * Level One-PHY Registers, indirect addressed over XMAC
566 */
567#define PHY_LONE_CTRL 0x00 /* 16 bit r/w PHY Control Register */
568#define PHY_LONE_STAT 0x01 /* 16 bit r/o PHY Status Register */
569#define PHY_LONE_ID0 0x02 /* 16 bit r/o PHY ID0 Register */
570#define PHY_LONE_ID1 0x03 /* 16 bit r/o PHY ID1 Register */
571#define PHY_LONE_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */
572#define PHY_LONE_AUNE_LP 0x05 /* 16 bit r/o Link Part Ability Reg */
573#define PHY_LONE_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */
574#define PHY_LONE_NEPG 0x07 /* 16 bit r/w Next Page Register */
575#define PHY_LONE_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner */
576 /* Level One-specific registers */
577#define PHY_LONE_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Control Reg*/
578#define PHY_LONE_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */
579 /* 0x0b -0x0e: reserved */
580#define PHY_LONE_EXT_STAT 0x0f /* 16 bit r/o Extended Status Reg */
581#define PHY_LONE_PORT_CFG 0x10 /* 16 bit r/w Port Configuration Reg*/
582#define PHY_LONE_Q_STAT 0x11 /* 16 bit r/o Quick Status Reg */
583#define PHY_LONE_INT_ENAB 0x12 /* 16 bit r/w Interrupt Enable Reg */
584#define PHY_LONE_INT_STAT 0x13 /* 16 bit r/o Interrupt Status Reg */
585#define PHY_LONE_LED_CFG 0x14 /* 16 bit r/w LED Configuration Reg */
586#define PHY_LONE_PORT_CTRL 0x15 /* 16 bit r/w Port Control Reg */
587#define PHY_LONE_CIM 0x16 /* 16 bit r/o CIM Reg */
588 /* 0x17 -0x1c: reserved */
589
590/*----------------------------------------------------------------------------*/
591/*
592 * National-PHY Registers, indirect addressed over XMAC
593 */
594#define PHY_NAT_CTRL 0x00 /* 16 bit r/w PHY Control Register */
595#define PHY_NAT_STAT 0x01 /* 16 bit r/w PHY Status Register */
596#define PHY_NAT_ID0 0x02 /* 16 bit r/o PHY ID0 Register */
597#define PHY_NAT_ID1 0x03 /* 16 bit r/o PHY ID1 Register */
598#define PHY_NAT_AUNE_ADV 0x04 /* 16 bit r/w Auto-Neg. Advertisement */
599#define PHY_NAT_AUNE_LP 0x05 /* 16 bit r/o Link Partner Ability Reg */
600#define PHY_NAT_AUNE_EXP 0x06 /* 16 bit r/o Auto-Neg. Expansion Reg */
601#define PHY_NAT_NEPG 0x07 /* 16 bit r/w Next Page Register */
602#define PHY_NAT_NEPG_LP 0x08 /* 16 bit r/o Next Page Link Partner Reg */
603 /* National-specific registers */
604#define PHY_NAT_1000T_CTRL 0x09 /* 16 bit r/w 1000Base-T Control Reg */
605#define PHY_NAT_1000T_STAT 0x0a /* 16 bit r/o 1000Base-T Status Reg */
606 /* 0x0b -0x0e: reserved */
607#define PHY_NAT_EXT_STAT 0x0f /* 16 bit r/o Extended Status Register */
608#define PHY_NAT_EXT_CTRL1 0x10 /* 16 bit r/o Extended Control Reg1 */
609#define PHY_NAT_Q_STAT1 0x11 /* 16 bit r/o Quick Status Reg1 */
610#define PHY_NAT_10B_OP 0x12 /* 16 bit r/o 10Base-T Operations Reg */
611#define PHY_NAT_EXT_CTRL2 0x13 /* 16 bit r/o Extended Control Reg1 */
612#define PHY_NAT_Q_STAT2 0x14 /* 16 bit r/o Quick Status Reg2 */
613 /* 0x15 -0x18: reserved */
614#define PHY_NAT_PHY_ADDR 0x19 /* 16 bit r/o PHY Address Register */
615
616
617/*----------------------------------------------------------------------------*/
618
619/*
620 * PHY bit definitions
621 * Bits defined as PHY_X_..., PHY_B_..., PHY_L_... or PHY_N_... are
622 * XMAC/Broadcom/LevelOne/National/Marvell-specific.
623 * All other are general.
624 */
625
626/***** PHY_XMAC_CTRL 16 bit r/w PHY Control Register *****/
627/***** PHY_BCOM_CTRL 16 bit r/w PHY Control Register *****/
628/***** PHY_MARV_CTRL 16 bit r/w PHY Status Register *****/
629/***** PHY_LONE_CTRL 16 bit r/w PHY Control Register *****/
630#define PHY_CT_RESET (1<<15) /* Bit 15: (sc) clear all PHY related regs */
631#define PHY_CT_LOOP (1<<14) /* Bit 14: enable Loopback over PHY */
632#define PHY_CT_SPS_LSB (1<<13) /* Bit 13: (BC,L1) Speed select, lower bit */
633#define PHY_CT_ANE (1<<12) /* Bit 12: Auto-Negotiation Enabled */
634#define PHY_CT_PDOWN (1<<11) /* Bit 11: (BC,L1) Power Down Mode */
635#define PHY_CT_ISOL (1<<10) /* Bit 10: (BC,L1) Isolate Mode */
636#define PHY_CT_RE_CFG (1<<9) /* Bit 9: (sc) Restart Auto-Negotiation */
637#define PHY_CT_DUP_MD (1<<8) /* Bit 8: Duplex Mode */
638#define PHY_CT_COL_TST (1<<7) /* Bit 7: (BC,L1) Collision Test enabled */
639#define PHY_CT_SPS_MSB (1<<6) /* Bit 6: (BC,L1) Speed select, upper bit */
640 /* Bit 5..0: reserved */
641
642#define PHY_CT_SP1000 PHY_CT_SPS_MSB /* enable speed of 1000 Mbps */
643#define PHY_CT_SP100 PHY_CT_SPS_LSB /* enable speed of 100 Mbps */
644#define PHY_CT_SP10 (0) /* enable speed of 10 Mbps */
645
646
647/***** PHY_XMAC_STAT 16 bit r/w PHY Status Register *****/
648/***** PHY_BCOM_STAT 16 bit r/w PHY Status Register *****/
649/***** PHY_MARV_STAT 16 bit r/w PHY Status Register *****/
650/***** PHY_LONE_STAT 16 bit r/w PHY Status Register *****/
651 /* Bit 15..9: reserved */
652 /* (BC/L1) 100/10 Mbps cap bits ignored*/
653#define PHY_ST_EXT_ST (1<<8) /* Bit 8: Extended Status Present */
654 /* Bit 7: reserved */
655#define PHY_ST_PRE_SUP (1<<6) /* Bit 6: (BC/L1) preamble suppression */
656#define PHY_ST_AN_OVER (1<<5) /* Bit 5: Auto-Negotiation Over */
657#define PHY_ST_REM_FLT (1<<4) /* Bit 4: Remote Fault Condition Occured */
658#define PHY_ST_AN_CAP (1<<3) /* Bit 3: Auto-Negotiation Capability */
659#define PHY_ST_LSYNC (1<<2) /* Bit 2: Link Synchronized */
660#define PHY_ST_JAB_DET (1<<1) /* Bit 1: (BC/L1) Jabber Detected */
661#define PHY_ST_EXT_REG (1<<0) /* Bit 0: Extended Register available */
662
663
664/***** PHY_XMAC_ID1 16 bit r/o PHY ID1 Register */
665/***** PHY_BCOM_ID1 16 bit r/o PHY ID1 Register */
666/***** PHY_MARV_ID1 16 bit r/o PHY ID1 Register */
667/***** PHY_LONE_ID1 16 bit r/o PHY ID1 Register */
668#define PHY_I1_OUI_MSK (0x3f<<10) /* Bit 15..10: Organization Unique ID */
669#define PHY_I1_MOD_NUM (0x3f<<4) /* Bit 9.. 4: Model Number */
670#define PHY_I1_REV_MSK 0x0f /* Bit 3.. 0: Revision Number */
671
672/* different Broadcom PHY Ids */
673#define PHY_BCOM_ID1_A1 0x6041
674#define PHY_BCOM_ID1_B2 0x6043
675#define PHY_BCOM_ID1_C0 0x6044
676#define PHY_BCOM_ID1_C5 0x6047
677
678
679/***** PHY_XMAC_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/
680/***** PHY_XMAC_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
681#define PHY_AN_NXT_PG (1<<15) /* Bit 15: Request Next Page */
682#define PHY_X_AN_ACK (1<<14) /* Bit 14: (ro) Acknowledge Received */
683#define PHY_X_AN_RFB (3<<12) /* Bit 13..12: Remote Fault Bits */
684 /* Bit 11.. 9: reserved */
685#define PHY_X_AN_PAUSE (3<<7) /* Bit 8.. 7: Pause Bits */
686#define PHY_X_AN_HD (1<<6) /* Bit 6: Half Duplex */
687#define PHY_X_AN_FD (1<<5) /* Bit 5: Full Duplex */
688 /* Bit 4.. 0: reserved */
689
690/***** PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/
691/***** PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
692/* PHY_AN_NXT_PG (see XMAC) Bit 15: Request Next Page */
693 /* Bit 14: reserved */
694#define PHY_B_AN_RF (1<<13) /* Bit 13: Remote Fault */
695 /* Bit 12: reserved */
696#define PHY_B_AN_ASP (1<<11) /* Bit 11: Asymmetric Pause */
697#define PHY_B_AN_PC (1<<10) /* Bit 10: Pause Capable */
698 /* Bit 9..5: 100/10 BT cap bits ingnored */
699#define PHY_B_AN_SEL 0x1f /* Bit 4..0: Selector Field, 00001=Ethernet*/
700
701/***** PHY_LONE_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/
702/***** PHY_LONE_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
703/* PHY_AN_NXT_PG (see XMAC) Bit 15: Request Next Page */
704 /* Bit 14: reserved */
705#define PHY_L_AN_RF (1<<13) /* Bit 13: Remote Fault */
706 /* Bit 12: reserved */
707#define PHY_L_AN_ASP (1<<11) /* Bit 11: Asymmetric Pause */
708#define PHY_L_AN_PC (1<<10) /* Bit 10: Pause Capable */
709 /* Bit 9..5: 100/10 BT cap bits ingnored */
710#define PHY_L_AN_SEL 0x1f /* Bit 4..0: Selector Field, 00001=Ethernet*/
711
712/***** PHY_NAT_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/
713/***** PHY_NAT_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
714/* PHY_AN_NXT_PG (see XMAC) Bit 15: Request Next Page */
715 /* Bit 14: reserved */
716#define PHY_N_AN_RF (1<<13) /* Bit 13: Remote Fault */
717 /* Bit 12: reserved */
718#define PHY_N_AN_100F (1<<11) /* Bit 11: 100Base-T2 FD Support */
719#define PHY_N_AN_100H (1<<10) /* Bit 10: 100Base-T2 HD Support */
720 /* Bit 9..5: 100/10 BT cap bits ingnored */
721#define PHY_N_AN_SEL 0x1f /* Bit 4..0: Selector Field, 00001=Ethernet*/
722
723/* field type definition for PHY_x_AN_SEL */
724#define PHY_SEL_TYPE 0x01 /* 00001 = Ethernet */
725
726/***** PHY_XMAC_AUNE_EXP 16 bit r/o Auto-Negotiation Expansion Reg *****/
727 /* Bit 15..4: reserved */
728#define PHY_ANE_LP_NP (1<<3) /* Bit 3: Link Partner can Next Page */
729#define PHY_ANE_LOC_NP (1<<2) /* Bit 2: Local PHY can Next Page */
730#define PHY_ANE_RX_PG (1<<1) /* Bit 1: Page Received */
731 /* Bit 0: reserved */
732
733/***** PHY_BCOM_AUNE_EXP 16 bit r/o Auto-Negotiation Expansion Reg *****/
734/***** PHY_LONE_AUNE_EXP 16 bit r/o Auto-Negotiation Expansion Reg *****/
735/***** PHY_MARV_AUNE_EXP 16 bit r/o Auto-Negotiation Expansion Reg *****/
736 /* Bit 15..5: reserved */
737#define PHY_ANE_PAR_DF (1<<4) /* Bit 4: Parallel Detection Fault */
738/* PHY_ANE_LP_NP (see XMAC) Bit 3: Link Partner can Next Page */
739/* PHY_ANE_LOC_NP (see XMAC) Bit 2: Local PHY can Next Page */
740/* PHY_ANE_RX_PG (see XMAC) Bit 1: Page Received */
741#define PHY_ANE_LP_CAP (1<<0) /* Bit 0: Link Partner Auto-Neg. Cap. */
742
743/***** PHY_XMAC_NEPG 16 bit r/w Next Page Register *****/
744/***** PHY_BCOM_NEPG 16 bit r/w Next Page Register *****/
745/***** PHY_LONE_NEPG 16 bit r/w Next Page Register *****/
746/***** PHY_XMAC_NEPG_LP 16 bit r/o Next Page Link Partner *****/
747/***** PHY_BCOM_NEPG_LP 16 bit r/o Next Page Link Partner *****/
748/***** PHY_LONE_NEPG_LP 16 bit r/o Next Page Link Partner *****/
749#define PHY_NP_MORE (1<<15) /* Bit 15: More, Next Pages to follow */
750#define PHY_NP_ACK1 (1<<14) /* Bit 14: (ro) Ack1, for receiving a message */
751#define PHY_NP_MSG_VAL (1<<13) /* Bit 13: Message Page valid */
752#define PHY_NP_ACK2 (1<<12) /* Bit 12: Ack2, comply with msg content */
753#define PHY_NP_TOG (1<<11) /* Bit 11: Toggle Bit, ensure sync */
754#define PHY_NP_MSG 0x07ff /* Bit 10..0: Message from/to Link Partner */
755
756/*
757 * XMAC-Specific
758 */
759/***** PHY_XMAC_EXT_STAT 16 bit r/w Extended Status Register *****/
760#define PHY_X_EX_FD (1<<15) /* Bit 15: Device Supports Full Duplex */
761#define PHY_X_EX_HD (1<<14) /* Bit 14: Device Supports Half Duplex */
762 /* Bit 13..0: reserved */
763
764/***** PHY_XMAC_RES_ABI 16 bit r/o PHY Resolved Ability *****/
765 /* Bit 15..9: reserved */
766#define PHY_X_RS_PAUSE (3<<7) /* Bit 8..7: selected Pause Mode */
767#define PHY_X_RS_HD (1<<6) /* Bit 6: Half Duplex Mode selected */
768#define PHY_X_RS_FD (1<<5) /* Bit 5: Full Duplex Mode selected */
769#define PHY_X_RS_ABLMIS (1<<4) /* Bit 4: duplex or pause cap mismatch */
770#define PHY_X_RS_PAUMIS (1<<3) /* Bit 3: pause capability mismatch */
771 /* Bit 2..0: reserved */
772/*
773 * Remote Fault Bits (PHY_X_AN_RFB) encoding
774 */
775#define X_RFB_OK (0<<12) /* Bit 13..12 No errors, Link OK */
776#define X_RFB_LF (1<<12) /* Bit 13..12 Link Failure */
777#define X_RFB_OFF (2<<12) /* Bit 13..12 Offline */
778#define X_RFB_AN_ERR (3<<12) /* Bit 13..12 Auto-Negotiation Error */
779
780/*
781 * Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding
782 */
783#define PHY_X_P_NO_PAUSE (0<<7) /* Bit 8..7: no Pause Mode */
784#define PHY_X_P_SYM_MD (1<<7) /* Bit 8..7: symmetric Pause Mode */
785#define PHY_X_P_ASYM_MD (2<<7) /* Bit 8..7: asymmetric Pause Mode */
786#define PHY_X_P_BOTH_MD (3<<7) /* Bit 8..7: both Pause Mode */
787
788
789/*
790 * Broadcom-Specific
791 */
792/***** PHY_BCOM_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
793#define PHY_B_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */
794#define PHY_B_1000C_MSE (1<<12) /* Bit 12: Master/Slave Enable */
795#define PHY_B_1000C_MSC (1<<11) /* Bit 11: M/S Configuration */
796#define PHY_B_1000C_RD (1<<10) /* Bit 10: Repeater/DTE */
797#define PHY_B_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */
798#define PHY_B_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */
799 /* Bit 7..0: reserved */
800
801/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
802/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
803#define PHY_B_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */
804#define PHY_B_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */
805#define PHY_B_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */
806#define PHY_B_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status */
807#define PHY_B_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */
808#define PHY_B_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */
809 /* Bit 9..8: reserved */
810#define PHY_B_1000S_IEC 0xff /* Bit 7..0: Idle Error Count */
811
812/***** PHY_BCOM_EXT_STAT 16 bit r/o Extended Status Register *****/
813#define PHY_B_ES_X_FD_CAP (1<<15) /* Bit 15: 1000Base-X FD capable */
814#define PHY_B_ES_X_HD_CAP (1<<14) /* Bit 14: 1000Base-X HD capable */
815#define PHY_B_ES_T_FD_CAP (1<<13) /* Bit 13: 1000Base-T FD capable */
816#define PHY_B_ES_T_HD_CAP (1<<12) /* Bit 12: 1000Base-T HD capable */
817 /* Bit 11..0: reserved */
818
819/***** PHY_BCOM_P_EXT_CTRL 16 bit r/w PHY Extended Control Reg *****/
820#define PHY_B_PEC_MAC_PHY (1<<15) /* Bit 15: 10BIT/GMI-Interface */
821#define PHY_B_PEC_DIS_CROSS (1<<14) /* Bit 14: Disable MDI Crossover */
822#define PHY_B_PEC_TX_DIS (1<<13) /* Bit 13: Tx output Disabled */
823#define PHY_B_PEC_INT_DIS (1<<12) /* Bit 12: Interrupts Disabled */
824#define PHY_B_PEC_F_INT (1<<11) /* Bit 11: Force Interrupt */
825#define PHY_B_PEC_BY_45 (1<<10) /* Bit 10: Bypass 4B5B-Decoder */
826#define PHY_B_PEC_BY_SCR (1<<9) /* Bit 9: Bypass Scrambler */
827#define PHY_B_PEC_BY_MLT3 (1<<8) /* Bit 8: Bypass MLT3 Encoder */
828#define PHY_B_PEC_BY_RXA (1<<7) /* Bit 7: Bypass Rx Alignm. */
829#define PHY_B_PEC_RES_SCR (1<<6) /* Bit 6: Reset Scrambler */
830#define PHY_B_PEC_EN_LTR (1<<5) /* Bit 5: Ena LED Traffic Mode */
831#define PHY_B_PEC_LED_ON (1<<4) /* Bit 4: Force LED's on */
832#define PHY_B_PEC_LED_OFF (1<<3) /* Bit 3: Force LED's off */
833#define PHY_B_PEC_EX_IPG (1<<2) /* Bit 2: Extend Tx IPG Mode */
834#define PHY_B_PEC_3_LED (1<<1) /* Bit 1: Three Link LED mode */
835#define PHY_B_PEC_HIGH_LA (1<<0) /* Bit 0: GMII FIFO Elasticy */
836
837/***** PHY_BCOM_P_EXT_STAT 16 bit r/o PHY Extended Status Reg *****/
838 /* Bit 15..14: reserved */
839#define PHY_B_PES_CROSS_STAT (1<<13) /* Bit 13: MDI Crossover Status */
840#define PHY_B_PES_INT_STAT (1<<12) /* Bit 12: Interrupt Status */
841#define PHY_B_PES_RRS (1<<11) /* Bit 11: Remote Receiver Stat. */
842#define PHY_B_PES_LRS (1<<10) /* Bit 10: Local Receiver Stat. */
843#define PHY_B_PES_LOCKED (1<<9) /* Bit 9: Locked */
844#define PHY_B_PES_LS (1<<8) /* Bit 8: Link Status */
845#define PHY_B_PES_RF (1<<7) /* Bit 7: Remote Fault */
846#define PHY_B_PES_CE_ER (1<<6) /* Bit 6: Carrier Ext Error */
847#define PHY_B_PES_BAD_SSD (1<<5) /* Bit 5: Bad SSD */
848#define PHY_B_PES_BAD_ESD (1<<4) /* Bit 4: Bad ESD */
849#define PHY_B_PES_RX_ER (1<<3) /* Bit 3: Receive Error */
850#define PHY_B_PES_TX_ER (1<<2) /* Bit 2: Transmit Error */
851#define PHY_B_PES_LOCK_ER (1<<1) /* Bit 1: Lock Error */
852#define PHY_B_PES_MLT3_ER (1<<0) /* Bit 0: MLT3 code Error */
853
854/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/
855 /* Bit 15..8: reserved */
856#define PHY_B_FC_CTR 0xff /* Bit 7..0: False Carrier Counter */
857
858/***** PHY_BCOM_RNO_CTR 16 bit r/w Receive NOT_OK Counter *****/
859#define PHY_B_RC_LOC_MSK 0xff00 /* Bit 15..8: Local Rx NOT_OK cnt */
860#define PHY_B_RC_REM_MSK 0x00ff /* Bit 7..0: Remote Rx NOT_OK cnt */
861
862/***** PHY_BCOM_AUX_CTRL 16 bit r/w Auxiliary Control Reg *****/
863#define PHY_B_AC_L_SQE (1<<15) /* Bit 15: Low Squelch */
864#define PHY_B_AC_LONG_PACK (1<<14) /* Bit 14: Rx Long Packets */
865#define PHY_B_AC_ER_CTRL (3<<12) /* Bit 13..12: Edgerate Control */
866 /* Bit 11: reserved */
867#define PHY_B_AC_TX_TST (1<<10) /* Bit 10: Tx test bit, always 1 */
868 /* Bit 9.. 8: reserved */
869#define PHY_B_AC_DIS_PRF (1<<7) /* Bit 7: dis part resp filter */
870 /* Bit 6: reserved */
871#define PHY_B_AC_DIS_PM (1<<5) /* Bit 5: dis power management */
872 /* Bit 4: reserved */
873#define PHY_B_AC_DIAG (1<<3) /* Bit 3: Diagnostic Mode */
874 /* Bit 2.. 0: reserved */
875
876/***** PHY_BCOM_AUX_STAT 16 bit r/o Auxiliary Status Reg *****/
877#define PHY_B_AS_AN_C (1<<15) /* Bit 15: AutoNeg complete */
878#define PHY_B_AS_AN_CA (1<<14) /* Bit 14: AN Complete Ack */
879#define PHY_B_AS_ANACK_D (1<<13) /* Bit 13: AN Ack Detect */
880#define PHY_B_AS_ANAB_D (1<<12) /* Bit 12: AN Ability Detect */
881#define PHY_B_AS_NPW (1<<11) /* Bit 11: AN Next Page Wait */
882#define PHY_B_AS_AN_RES_MSK (7<<8) /* Bit 10..8: AN HDC */
883#define PHY_B_AS_PDF (1<<7) /* Bit 7: Parallel Detect. Fault */
884#define PHY_B_AS_RF (1<<6) /* Bit 6: Remote Fault */
885#define PHY_B_AS_ANP_R (1<<5) /* Bit 5: AN Page Received */
886#define PHY_B_AS_LP_ANAB (1<<4) /* Bit 4: LP AN Ability */
887#define PHY_B_AS_LP_NPAB (1<<3) /* Bit 3: LP Next Page Ability */
888#define PHY_B_AS_LS (1<<2) /* Bit 2: Link Status */
889#define PHY_B_AS_PRR (1<<1) /* Bit 1: Pause Resolution-Rx */
890#define PHY_B_AS_PRT (1<<0) /* Bit 0: Pause Resolution-Tx */
891
892#define PHY_B_AS_PAUSE_MSK (PHY_B_AS_PRR | PHY_B_AS_PRT)
893
894/***** PHY_BCOM_INT_STAT 16 bit r/o Interrupt Status Reg *****/
895/***** PHY_BCOM_INT_MASK 16 bit r/w Interrupt Mask Reg *****/
896 /* Bit 15: reserved */
897#define PHY_B_IS_PSE (1<<14) /* Bit 14: Pair Swap Error */
898#define PHY_B_IS_MDXI_SC (1<<13) /* Bit 13: MDIX Status Change */
899#define PHY_B_IS_HCT (1<<12) /* Bit 12: counter above 32k */
900#define PHY_B_IS_LCT (1<<11) /* Bit 11: counter above 128 */
901#define PHY_B_IS_AN_PR (1<<10) /* Bit 10: Page Received */
902#define PHY_B_IS_NO_HDCL (1<<9) /* Bit 9: No HCD Link */
903#define PHY_B_IS_NO_HDC (1<<8) /* Bit 8: No HCD */
904#define PHY_B_IS_NEG_USHDC (1<<7) /* Bit 7: Negotiated Unsup. HCD */
905#define PHY_B_IS_SCR_S_ER (1<<6) /* Bit 6: Scrambler Sync Error */
906#define PHY_B_IS_RRS_CHANGE (1<<5) /* Bit 5: Remote Rx Stat Change */
907#define PHY_B_IS_LRS_CHANGE (1<<4) /* Bit 4: Local Rx Stat Change */
908#define PHY_B_IS_DUP_CHANGE (1<<3) /* Bit 3: Duplex Mode Change */
909#define PHY_B_IS_LSP_CHANGE (1<<2) /* Bit 2: Link Speed Change */
910#define PHY_B_IS_LST_CHANGE (1<<1) /* Bit 1: Link Status Changed */
911#define PHY_B_IS_CRC_ER (1<<0) /* Bit 0: CRC Error */
912
913#define PHY_B_DEF_MSK (~(PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
914
915/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */
916#define PHY_B_P_NO_PAUSE (0<<10) /* Bit 11..10: no Pause Mode */
917#define PHY_B_P_SYM_MD (1<<10) /* Bit 11..10: symmetric Pause Mode */
918#define PHY_B_P_ASYM_MD (2<<10) /* Bit 11..10: asymmetric Pause Mode */
919#define PHY_B_P_BOTH_MD (3<<10) /* Bit 11..10: both Pause Mode */
920
921/*
922 * Resolved Duplex mode and Capabilities (Aux Status Summary Reg)
923 */
924#define PHY_B_RES_1000FD (7<<8) /* Bit 10..8: 1000Base-T Full Dup. */
925#define PHY_B_RES_1000HD (6<<8) /* Bit 10..8: 1000Base-T Half Dup. */
926/* others: 100/10: invalid for us */
927
928/*
929 * Level One-Specific
930 */
931/***** PHY_LONE_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
932#define PHY_L_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */
933#define PHY_L_1000C_MSE (1<<12) /* Bit 12: Master/Slave Enable */
934#define PHY_L_1000C_MSC (1<<11) /* Bit 11: M/S Configuration */
935#define PHY_L_1000C_RD (1<<10) /* Bit 10: Repeater/DTE */
936#define PHY_L_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */
937#define PHY_L_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */
938 /* Bit 7..0: reserved */
939
940/***** PHY_LONE_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
941#define PHY_L_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */
942#define PHY_L_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */
943#define PHY_L_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */
944#define PHY_L_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status */
945#define PHY_L_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */
946#define PHY_L_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */
947 /* Bit 9..8: reserved */
948#define PHY_B_1000S_IEC 0xff /* Bit 7..0: Idle Error Count */
949
950/***** PHY_LONE_EXT_STAT 16 bit r/o Extended Status Register *****/
951#define PHY_L_ES_X_FD_CAP (1<<15) /* Bit 15: 1000Base-X FD capable */
952#define PHY_L_ES_X_HD_CAP (1<<14) /* Bit 14: 1000Base-X HD capable */
953#define PHY_L_ES_T_FD_CAP (1<<13) /* Bit 13: 1000Base-T FD capable */
954#define PHY_L_ES_T_HD_CAP (1<<12) /* Bit 12: 1000Base-T HD capable */
955 /* Bit 11..0: reserved */
956
957/***** PHY_LONE_PORT_CFG 16 bit r/w Port Configuration Reg *****/
958#define PHY_L_PC_REP_MODE (1<<15) /* Bit 15: Repeater Mode */
959 /* Bit 14: reserved */
960#define PHY_L_PC_TX_DIS (1<<13) /* Bit 13: Tx output Disabled */
961#define PHY_L_PC_BY_SCR (1<<12) /* Bit 12: Bypass Scrambler */
962#define PHY_L_PC_BY_45 (1<<11) /* Bit 11: Bypass 4B5B-Decoder */
963#define PHY_L_PC_JAB_DIS (1<<10) /* Bit 10: Jabber Disabled */
964#define PHY_L_PC_SQE (1<<9) /* Bit 9: Enable Heartbeat */
965#define PHY_L_PC_TP_LOOP (1<<8) /* Bit 8: TP Loopback */
966#define PHY_L_PC_SSS (1<<7) /* Bit 7: Smart Speed Selection */
967#define PHY_L_PC_FIFO_SIZE (1<<6) /* Bit 6: FIFO Size */
968#define PHY_L_PC_PRE_EN (1<<5) /* Bit 5: Preamble Enable */
969#define PHY_L_PC_CIM (1<<4) /* Bit 4: Carrier Integrity Mon */
970#define PHY_L_PC_10_SER (1<<3) /* Bit 3: Use Serial Output */
971#define PHY_L_PC_ANISOL (1<<2) /* Bit 2: Unisolate Port */
972#define PHY_L_PC_TEN_BIT (1<<1) /* Bit 1: 10bit iface mode on */
973#define PHY_L_PC_ALTCLOCK (1<<0) /* Bit 0: (ro) ALTCLOCK Mode on */
974
975/***** PHY_LONE_Q_STAT 16 bit r/o Quick Status Reg *****/
976#define PHY_L_QS_D_RATE (3<<14) /* Bit 15..14: Data Rate */
977#define PHY_L_QS_TX_STAT (1<<13) /* Bit 13: Transmitting */
978#define PHY_L_QS_RX_STAT (1<<12) /* Bit 12: Receiving */
979#define PHY_L_QS_COL_STAT (1<<11) /* Bit 11: Collision */
980#define PHY_L_QS_L_STAT (1<<10) /* Bit 10: Link is up */
981#define PHY_L_QS_DUP_MOD (1<<9) /* Bit 9: Full/Half Duplex */
982#define PHY_L_QS_AN (1<<8) /* Bit 8: AutoNeg is On */
983#define PHY_L_QS_AN_C (1<<7) /* Bit 7: AN is Complete */
984#define PHY_L_QS_LLE (7<<4) /* Bit 6: Line Length Estim. */
985#define PHY_L_QS_PAUSE (1<<3) /* Bit 3: LP advertised Pause */
986#define PHY_L_QS_AS_PAUSE (1<<2) /* Bit 2: LP adv. asym. Pause */
987#define PHY_L_QS_ISOLATE (1<<1) /* Bit 1: CIM Isolated */
988#define PHY_L_QS_EVENT (1<<0) /* Bit 0: Event has occurred */
989
990/***** PHY_LONE_INT_ENAB 16 bit r/w Interrupt Enable Reg *****/
991/***** PHY_LONE_INT_STAT 16 bit r/o Interrupt Status Reg *****/
992 /* Bit 15..14: reserved */
993#define PHY_L_IS_AN_F (1<<13) /* Bit 13: Auto-Negotiation fault */
994 /* Bit 12: not described */
995#define PHY_L_IS_CROSS (1<<11) /* Bit 11: Crossover used */
996#define PHY_L_IS_POL (1<<10) /* Bit 10: Polarity correct. used */
997#define PHY_L_IS_SS (1<<9) /* Bit 9: Smart Speed Downgrade */
998#define PHY_L_IS_CFULL (1<<8) /* Bit 8: Counter Full */
999#define PHY_L_IS_AN_C (1<<7) /* Bit 7: AutoNeg Complete */
1000#define PHY_L_IS_SPEED (1<<6) /* Bit 6: Speed Changed */
1001#define PHY_L_IS_DUP (1<<5) /* Bit 5: Duplex Changed */
1002#define PHY_L_IS_LS (1<<4) /* Bit 4: Link Status Changed */
1003#define PHY_L_IS_ISOL (1<<3) /* Bit 3: Isolate Occured */
1004#define PHY_L_IS_MDINT (1<<2) /* Bit 2: (ro) STAT: MII Int Pending */
1005#define PHY_L_IS_INTEN (1<<1) /* Bit 1: ENAB: Enable IRQs */
1006#define PHY_L_IS_FORCE (1<<0) /* Bit 0: ENAB: Force Interrupt */
1007
1008/* int. mask */
1009#define PHY_L_DEF_MSK (PHY_L_IS_LS | PHY_L_IS_ISOL | PHY_L_IS_INTEN)
1010
1011/***** PHY_LONE_LED_CFG 16 bit r/w LED Configuration Reg *****/
1012#define PHY_L_LC_LEDC (3<<14) /* Bit 15..14: Col/Blink/On/Off */
1013#define PHY_L_LC_LEDR (3<<12) /* Bit 13..12: Rx/Blink/On/Off */
1014#define PHY_L_LC_LEDT (3<<10) /* Bit 11..10: Tx/Blink/On/Off */
1015#define PHY_L_LC_LEDG (3<<8) /* Bit 9..8: Giga/Blink/On/Off */
1016#define PHY_L_LC_LEDS (3<<6) /* Bit 7..6: 10-100/Blink/On/Off */
1017#define PHY_L_LC_LEDL (3<<4) /* Bit 5..4: Link/Blink/On/Off */
1018#define PHY_L_LC_LEDF (3<<2) /* Bit 3..2: Duplex/Blink/On/Off */
1019#define PHY_L_LC_PSTRECH (1<<1) /* Bit 1: Strech LED Pulses */
1020#define PHY_L_LC_FREQ (1<<0) /* Bit 0: 30/100 ms */
1021
1022/***** PHY_LONE_PORT_CTRL 16 bit r/w Port Control Reg *****/
1023#define PHY_L_PC_TX_TCLK (1<<15) /* Bit 15: Enable TX_TCLK */
1024 /* Bit 14: reserved */
1025#define PHY_L_PC_ALT_NP (1<<13) /* Bit 14: Alternate Next Page */
1026#define PHY_L_PC_GMII_ALT (1<<12) /* Bit 13: Alternate GMII driver */
1027 /* Bit 11: reserved */
1028#define PHY_L_PC_TEN_CRS (1<<10) /* Bit 10: Extend CRS*/
1029 /* Bit 9..0: not described */
1030
1031/***** PHY_LONE_CIM 16 bit r/o CIM Reg *****/
1032#define PHY_L_CIM_ISOL (255<<8)/* Bit 15..8: Isolate Count */
1033#define PHY_L_CIM_FALSE_CAR (255<<0)/* Bit 7..0: False Carrier Count */
1034
1035
1036/*
1037 * Pause Bits (PHY_L_AN_ASP and PHY_L_AN_PC) encoding
1038 */
1039#define PHY_L_P_NO_PAUSE (0<<10) /* Bit 11..10: no Pause Mode */
1040#define PHY_L_P_SYM_MD (1<<10) /* Bit 11..10: symmetric Pause Mode */
1041#define PHY_L_P_ASYM_MD (2<<10) /* Bit 11..10: asymmetric Pause Mode */
1042#define PHY_L_P_BOTH_MD (3<<10) /* Bit 11..10: both Pause Mode */
1043
1044
1045/*
1046 * National-Specific
1047 */
1048/***** PHY_NAT_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
1049#define PHY_N_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */
1050#define PHY_N_1000C_MSE (1<<12) /* Bit 12: Master/Slave Enable */
1051#define PHY_N_1000C_MSC (1<<11) /* Bit 11: M/S Configuration */
1052#define PHY_N_1000C_RD (1<<10) /* Bit 10: Repeater/DTE */
1053#define PHY_N_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */
1054#define PHY_N_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */
1055#define PHY_N_1000C_APC (1<<7) /* Bit 7: Asymmetric Pause Cap. */
1056 /* Bit 6..0: reserved */
1057
1058/***** PHY_NAT_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
1059#define PHY_N_1000S_MSF (1<<15) /* Bit 15: Master/Slave Fault */
1060#define PHY_N_1000S_MSR (1<<14) /* Bit 14: Master/Slave Result */
1061#define PHY_N_1000S_LRS (1<<13) /* Bit 13: Local Receiver Status */
1062#define PHY_N_1000S_RRS (1<<12) /* Bit 12: Remote Receiver Status*/
1063#define PHY_N_1000S_LP_FD (1<<11) /* Bit 11: Link Partner can FD */
1064#define PHY_N_1000S_LP_HD (1<<10) /* Bit 10: Link Partner can HD */
1065#define PHY_N_1000C_LP_APC (1<<9) /* Bit 9: LP Asym. Pause Cap. */
1066 /* Bit 8: reserved */
1067#define PHY_N_1000S_IEC 0xff /* Bit 7..0: Idle Error Count */
1068
1069/***** PHY_NAT_EXT_STAT 16 bit r/o Extended Status Register *****/
1070#define PHY_N_ES_X_FD_CAP (1<<15) /* Bit 15: 1000Base-X FD capable */
1071#define PHY_N_ES_X_HD_CAP (1<<14) /* Bit 14: 1000Base-X HD capable */
1072#define PHY_N_ES_T_FD_CAP (1<<13) /* Bit 13: 1000Base-T FD capable */
1073#define PHY_N_ES_T_HD_CAP (1<<12) /* Bit 12: 1000Base-T HD capable */
1074 /* Bit 11..0: reserved */
1075
1076/* todo: those are still missing */
1077/***** PHY_NAT_EXT_CTRL1 16 bit r/o Extended Control Reg1 *****/
1078/***** PHY_NAT_Q_STAT1 16 bit r/o Quick Status Reg1 *****/
1079/***** PHY_NAT_10B_OP 16 bit r/o 10Base-T Operations Reg *****/
1080/***** PHY_NAT_EXT_CTRL2 16 bit r/o Extended Control Reg1 *****/
1081/***** PHY_NAT_Q_STAT2 16 bit r/o Quick Status Reg2 *****/
1082/***** PHY_NAT_PHY_ADDR 16 bit r/o PHY Address Register *****/
1083
1084/*
1085 * Marvell-Specific
1086 */
1087/***** PHY_MARV_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/
1088/***** PHY_MARV_AUNE_LP 16 bit r/w Link Part Ability Reg *****/
1089#define PHY_M_AN_NXT_PG BIT_15 /* Request Next Page */
1090#define PHY_M_AN_ACK BIT_14 /* (ro) Acknowledge Received */
1091#define PHY_M_AN_RF BIT_13 /* Remote Fault */
1092 /* Bit 12: reserved */
1093#define PHY_M_AN_ASP BIT_11 /* Asymmetric Pause */
1094#define PHY_M_AN_PC BIT_10 /* MAC Pause implemented */
1095#define PHY_M_AN_100_FD BIT_8 /* Advertise 100Base-TX Full Duplex */
1096#define PHY_M_AN_100_HD BIT_7 /* Advertise 100Base-TX Half Duplex */
1097#define PHY_M_AN_10_FD BIT_6 /* Advertise 10Base-TX Full Duplex */
1098#define PHY_M_AN_10_HD BIT_5 /* Advertise 10Base-TX Half Duplex */
1099
1100/* special defines for FIBER (88E1011S only) */
1101#define PHY_M_AN_ASP_X BIT_8 /* Asymmetric Pause */
1102#define PHY_M_AN_PC_X BIT_7 /* MAC Pause implemented */
1103#define PHY_M_AN_1000X_AHD BIT_6 /* Advertise 10000Base-X Half Duplex */
1104#define PHY_M_AN_1000X_AFD BIT_5 /* Advertise 10000Base-X Full Duplex */
1105
1106/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */
1107#define PHY_M_P_NO_PAUSE_X (0<<7) /* Bit 8.. 7: no Pause Mode */
1108#define PHY_M_P_SYM_MD_X (1<<7) /* Bit 8.. 7: symmetric Pause Mode */
1109#define PHY_M_P_ASYM_MD_X (2<<7) /* Bit 8.. 7: asymmetric Pause Mode */
1110#define PHY_M_P_BOTH_MD_X (3<<7) /* Bit 8.. 7: both Pause Mode */
1111
1112/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
1113#define PHY_M_1000C_TEST (7<<13) /* Bit 15..13: Test Modes */
1114#define PHY_M_1000C_MSE (1<<12) /* Bit 12: Manual Master/Slave Enable */
1115#define PHY_M_1000C_MSC (1<<11) /* Bit 11: M/S Configuration (1=Master) */
1116#define PHY_M_1000C_MPD (1<<10) /* Bit 10: Multi-Port Device */
1117#define PHY_M_1000C_AFD (1<<9) /* Bit 9: Advertise Full Duplex */
1118#define PHY_M_1000C_AHD (1<<8) /* Bit 8: Advertise Half Duplex */
1119 /* Bit 7..0: reserved */
1120
1121/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/
1122#define PHY_M_PC_TX_FFD_MSK (3<<14) /* Bit 15..14: Tx FIFO Depth Mask */
1123#define PHY_M_PC_RX_FFD_MSK (3<<12) /* Bit 13..12: Rx FIFO Depth Mask */
1124#define PHY_M_PC_ASS_CRS_TX (1<<11) /* Bit 11: Assert CRS on Transmit */
1125#define PHY_M_PC_FL_GOOD (1<<10) /* Bit 10: Force Link Good */
1126#define PHY_M_PC_EN_DET_MSK (3<<8) /* Bit 9.. 8: Energy Detect Mask */
1127#define PHY_M_PC_ENA_EXT_D (1<<7) /* Bit 7: Enable Ext. Distance (10BT) */
1128#define PHY_M_PC_MDIX_MSK (3<<5) /* Bit 6.. 5: MDI/MDIX Config. Mask */
1129#define PHY_M_PC_DIS_125CLK (1<<4) /* Bit 4: Disable 125 CLK */
1130#define PHY_M_PC_MAC_POW_UP (1<<3) /* Bit 3: MAC Power up */
1131#define PHY_M_PC_SQE_T_ENA (1<<2) /* Bit 2: SQE Test Enabled */
1132#define PHY_M_PC_POL_R_DIS (1<<1) /* Bit 1: Polarity Reversal Disabled */
1133#define PHY_M_PC_DIS_JABBER (1<<0) /* Bit 0: Disable Jabber */
1134
1135#define PHY_M_PC_EN_DET SHIFT8(2) /* Energy Detect (Mode 1) */
1136#define PHY_M_PC_EN_DET_PLUS SHIFT8(3) /* Energy Detect Plus (Mode 2) */
1137
1138#define PHY_M_PC_MDI_XMODE(x) SHIFT5(x)
1139#define PHY_M_PC_MAN_MDI 0 /* 00 = Manual MDI configuration */
1140#define PHY_M_PC_MAN_MDIX 1 /* 01 = Manual MDIX configuration */
1141#define PHY_M_PC_ENA_AUTO 3 /* 11 = Enable Automatic Crossover */
1142
1143/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/
1144#define PHY_M_PS_SPEED_MSK (3<<14) /* Bit 15..14: Speed Mask */
1145#define PHY_M_PS_SPEED_1000 (1<<15) /* 10 = 1000 Mbps */
1146#define PHY_M_PS_SPEED_100 (1<<14) /* 01 = 100 Mbps */
1147#define PHY_M_PS_SPEED_10 0 /* 00 = 10 Mbps */
1148#define PHY_M_PS_FULL_DUP (1<<13) /* Bit 13: Full Duplex */
1149#define PHY_M_PS_PAGE_REC (1<<12) /* Bit 12: Page Received */
1150#define PHY_M_PS_SPDUP_RES (1<<11) /* Bit 11: Speed & Duplex Resolved */
1151#define PHY_M_PS_LINK_UP (1<<10) /* Bit 10: Link Up */
1152#define PHY_M_PS_CABLE_MSK (3<<7) /* Bit 9.. 7: Cable Length Mask */
1153#define PHY_M_PS_MDI_X_STAT (1<<6) /* Bit 6: MDI Crossover Stat (1=MDIX) */
1154#define PHY_M_PS_DOWNS_STAT (1<<5) /* Bit 5: Downshift Status (1=downsh.) */
1155#define PHY_M_PS_ENDET_STAT (1<<4) /* Bit 4: Energy Detect Status (1=act) */
1156#define PHY_M_PS_TX_P_EN (1<<3) /* Bit 3: Tx Pause Enabled */
1157#define PHY_M_PS_RX_P_EN (1<<2) /* Bit 2: Rx Pause Enabled */
1158#define PHY_M_PS_POL_REV (1<<1) /* Bit 1: Polarity Reversed */
1159#define PHY_M_PC_JABBER (1<<0) /* Bit 0: Jabber */
1160
1161#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN)
1162
1163/***** PHY_MARV_INT_MASK 16 bit r/w Interrupt Mask Reg *****/
1164/***** PHY_MARV_INT_STAT 16 bit r/o Interrupt Status Reg *****/
1165#define PHY_M_IS_AN_ERROR (1<<15) /* Bit 15: Auto-Negotiation Error */
1166#define PHY_M_IS_LSP_CHANGE (1<<14) /* Bit 14: Link Speed Changed */
1167#define PHY_M_IS_DUP_CHANGE (1<<13) /* Bit 13: Duplex Mode Changed */
1168#define PHY_M_IS_AN_PR (1<<12) /* Bit 12: Page Received */
1169#define PHY_M_IS_AN_COMPL (1<<11) /* Bit 11: Auto-Negotiation Completed */
1170#define PHY_M_IS_LST_CHANGE (1<<10) /* Bit 10: Link Status Changed */
1171#define PHY_M_IS_SYMB_ERROR (1<<9) /* Bit 9: Symbol Error */
1172#define PHY_M_IS_FALSE_CARR (1<<8) /* Bit 8: False Carrier */
1173#define PHY_M_IS_FIFO_ERROR (1<<7) /* Bit 7: FIFO Overflow/Underrun Error */
1174#define PHY_M_IS_MDI_CHANGE (1<<6) /* Bit 6: MDI Crossover Changed */
1175#define PHY_M_IS_DOWNSH_DET (1<<5) /* Bit 5: Downshift Detected */
1176#define PHY_M_IS_END_CHANGE (1<<4) /* Bit 4: Energy Detect Changed */
1177 /* Bit 3..2: reserved */
1178#define PHY_M_IS_POL_CHANGE (1<<1) /* Bit 1: Polarity Changed */
1179#define PHY_M_IS_JABBER (1<<0) /* Bit 0: Jabber */
1180
1181#define PHY_M_DEF_MSK (PHY_M_IS_AN_ERROR | PHY_M_IS_AN_PR | \
1182 PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR)
1183
1184/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/
1185#define PHY_M_EC_M_DSC_MSK (3<<10) /* Bit 11..10: Master downshift counter */
1186#define PHY_M_EC_S_DSC_MSK (3<<8) /* Bit 9.. 8: Slave downshift counter */
1187#define PHY_M_EC_MAC_S_MSK (7<<4) /* Bit 6.. 4: Def. MAC interface speed */
1188#define PHY_M_EC_FIB_AN_ENA (1<<3) /* Bit 3: Fiber Auto-Neg. Enable */
1189
1190#define PHY_M_EC_M_DSC(x) SHIFT10(x) /* 00=1x; 01=2x; 10=3x; 11=4x */
1191#define PHY_M_EC_S_DSC(x) SHIFT8(x) /* 00=dis; 01=1x; 10=2x; 11=3x */
1192#define PHY_M_EC_MAC_S(x) SHIFT4(x) /* 01X=0; 110=2.5; 111=25 (MHz) */
1193
1194#define MAC_TX_CLK_0_MHZ 2
1195#define MAC_TX_CLK_2_5_MHZ 6
1196#define MAC_TX_CLK_25_MHZ 7
1197
1198/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/
1199#define PHY_M_LEDC_DIS_LED (1<<15) /* Bit 15: Disable LED */
1200#define PHY_M_LEDC_PULS_MSK (7<<12) /* Bit 14..12: Pulse Stretch Mask */
1201#define PHY_M_LEDC_F_INT (1<<11) /* Bit 11: Force Interrupt */
1202#define PHY_M_LEDC_BL_R_MSK (7<<8) /* Bit 10.. 8: Blink Rate Mask */
1203 /* Bit 7.. 5: reserved */
1204#define PHY_M_LEDC_LINK_MSK (3<<3) /* Bit 4.. 3: Link Control Mask */
1205#define PHY_M_LEDC_DP_CTRL (1<<2) /* Bit 2: Duplex Control */
1206#define PHY_M_LEDC_RX_CTRL (1<<1) /* Bit 1: Rx activity / Link */
1207#define PHY_M_LEDC_TX_CTRL (1<<0) /* Bit 0: Tx activity / Link */
1208
1209#define PHY_M_LED_PULS_DUR(x) SHIFT12(x) /* Pulse Stretch Duration */
1210
1211#define PULS_NO_STR 0 /* no pulse stretching */
1212#define PULS_21MS 1 /* 21 ms to 42 ms */
1213#define PULS_42MS 2 /* 42 ms to 84 ms */
1214#define PULS_84MS 3 /* 84 ms to 170 ms */
1215#define PULS_170MS 4 /* 170 ms to 340 ms */
1216#define PULS_340MS 5 /* 340 ms to 670 ms */
1217#define PULS_670MS 6 /* 670 ms to 1.3 s */
1218#define PULS_1300MS 7 /* 1.3 s to 2.7 s */
1219
1220#define PHY_M_LED_BLINK_RT(x) SHIFT8(x) /* Blink Rate */
1221
1222#define BLINK_42MS 0 /* 42 ms */
1223#define BLINK_84MS 1 /* 84 ms */
1224#define BLINK_170MS 2 /* 170 ms */
1225#define BLINK_340MS 3 /* 340 ms */
1226#define BLINK_670MS 4 /* 670 ms */
1227 /* values 5 - 7: reserved */
1228
1229/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/
1230#define PHY_M_LED_MO_DUP(x) SHIFT10(x) /* Bit 11..10: Duplex */
1231#define PHY_M_LED_MO_10(x) SHIFT8(x) /* Bit 9.. 8: Link 10 */
1232#define PHY_M_LED_MO_100(x) SHIFT6(x) /* Bit 7.. 6: Link 100 */
1233#define PHY_M_LED_MO_1000(x) SHIFT4(x) /* Bit 5.. 4: Link 1000 */
1234#define PHY_M_LED_MO_RX(x) SHIFT2(x) /* Bit 3.. 2: Rx */
1235#define PHY_M_LED_MO_TX(x) SHIFT0(x) /* Bit 1.. 0: Tx */
1236
1237#define MO_LED_NORM 0
1238#define MO_LED_BLINK 1
1239#define MO_LED_OFF 2
1240#define MO_LED_ON 3
1241
1242/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/
1243 /* Bit 15.. 7: reserved */
1244#define PHY_M_EC2_FI_IMPED (1<<6) /* Bit 6: Fiber Input Impedance */
1245#define PHY_M_EC2_FO_IMPED (1<<5) /* Bit 5: Fiber Output Impedance */
1246#define PHY_M_EC2_FO_M_CLK (1<<4) /* Bit 4: Fiber Mode Clock Enable */
1247#define PHY_M_EC2_FO_BOOST (1<<3) /* Bit 3: Fiber Output Boost */
1248#define PHY_M_EC2_FO_AM_MSK 7 /* Bit 2.. 0: Fiber Output Amplitude */
1249
1250/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/
1251#define PHY_M_FC_AUTO_SEL (1<<15) /* Bit 15: Fiber/Copper Auto Sel. dis. */
1252#define PHY_M_FC_AN_REG_ACC (1<<14) /* Bit 14: Fiber/Copper Autoneg. reg acc */
1253#define PHY_M_FC_RESULUTION (1<<13) /* Bit 13: Fiber/Copper Resulution */
1254#define PHY_M_SER_IF_AN_BP (1<<12) /* Bit 12: Ser IF autoneg. bypass enable */
1255#define PHY_M_SER_IF_BP_ST (1<<11) /* Bit 11: Ser IF autoneg. bypass status */
1256#define PHY_M_IRQ_POLARITY (1<<10) /* Bit 10: IRQ polarity */
1257 /* Bit 9..4: reserved */
1258#define PHY_M_UNDOC1 (1<< 7) /* undocumented bit !! */
1259#define PHY_M_MODE_MASK (0xf<<0)/* Bit 3..0: copy of HWCFG MODE[3:0] */
1260
1261
1262/***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/
1263#define PHY_M_CABD_ENA_TEST (1<<15) /* Bit 15: Enable Test */
1264#define PHY_M_CABD_STAT_MSK (3<<13) /* Bit 14..13: Status */
1265 /* Bit 12.. 8: reserved */
1266#define PHY_M_CABD_DIST_MSK 0xff /* Bit 7.. 0: Distance */
1267
1268/* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */
1269#define CABD_STAT_NORMAL 0
1270#define CABD_STAT_SHORT 1
1271#define CABD_STAT_OPEN 2
1272#define CABD_STAT_FAIL 3
1273
1274
1275/*
1276 * GMAC registers
1277 *
1278 * The GMAC registers are 16 or 32 bits wide.
1279 * The GMACs host processor interface is 16 bits wide,
1280 * therefore ALL registers will be addressed with 16 bit accesses.
1281 *
1282 * The following macros are provided to access the GMAC registers
1283 * GM_IN16(), GM_OUT16, GM_IN32(), GM_OUT32(), GM_INADR(), GM_OUTADR(),
1284 * GM_INHASH(), and GM_OUTHASH().
1285 * The macros are defined in SkGeHw.h.
1286 *
1287 * Note: NA reg = Network Address e.g DA, SA etc.
1288 *
1289 */
1290
1291/* Port Registers */
1292#define GM_GP_STAT 0x0000 /* 16 bit r/o General Purpose Status */
1293#define GM_GP_CTRL 0x0004 /* 16 bit r/w General Purpose Control */
1294#define GM_TX_CTRL 0x0008 /* 16 bit r/w Transmit Control Reg. */
1295#define GM_RX_CTRL 0x000c /* 16 bit r/w Receive Control Reg. */
1296#define GM_TX_FLOW_CTRL 0x0010 /* 16 bit r/w Transmit Flow-Control */
1297#define GM_TX_PARAM 0x0014 /* 16 bit r/w Transmit Parameter Reg. */
1298#define GM_SERIAL_MODE 0x0018 /* 16 bit r/w Serial Mode Register */
1299
1300/* Source Address Registers */
1301#define GM_SRC_ADDR_1L 0x001c /* 16 bit r/w Source Address 1 (low) */
1302#define GM_SRC_ADDR_1M 0x0020 /* 16 bit r/w Source Address 1 (middle) */
1303#define GM_SRC_ADDR_1H 0x0024 /* 16 bit r/w Source Address 1 (high) */
1304#define GM_SRC_ADDR_2L 0x0028 /* 16 bit r/w Source Address 2 (low) */
1305#define GM_SRC_ADDR_2M 0x002c /* 16 bit r/w Source Address 2 (middle) */
1306#define GM_SRC_ADDR_2H 0x0030 /* 16 bit r/w Source Address 2 (high) */
1307
1308/* Multicast Address Hash Registers */
1309#define GM_MC_ADDR_H1 0x0034 /* 16 bit r/w Multicast Address Hash 1 */
1310#define GM_MC_ADDR_H2 0x0038 /* 16 bit r/w Multicast Address Hash 2 */
1311#define GM_MC_ADDR_H3 0x003c /* 16 bit r/w Multicast Address Hash 3 */
1312#define GM_MC_ADDR_H4 0x0040 /* 16 bit r/w Multicast Address Hash 4 */
1313
1314/* Interrupt Source Registers */
1315#define GM_TX_IRQ_SRC 0x0044 /* 16 bit r/o Tx Overflow IRQ Source */
1316#define GM_RX_IRQ_SRC 0x0048 /* 16 bit r/o Rx Overflow IRQ Source */
1317#define GM_TR_IRQ_SRC 0x004c /* 16 bit r/o Tx/Rx Over. IRQ Source */
1318
1319/* Interrupt Mask Registers */
1320#define GM_TX_IRQ_MSK 0x0050 /* 16 bit r/w Tx Overflow IRQ Mask */
1321#define GM_RX_IRQ_MSK 0x0054 /* 16 bit r/w Rx Overflow IRQ Mask */
1322#define GM_TR_IRQ_MSK 0x0058 /* 16 bit r/w Tx/Rx Over. IRQ Mask */
1323
1324/* Serial Management Interface (SMI) Registers */
1325#define GM_SMI_CTRL 0x0080 /* 16 bit r/w SMI Control Register */
1326#define GM_SMI_DATA 0x0084 /* 16 bit r/w SMI Data Register */
1327#define GM_PHY_ADDR 0x0088 /* 16 bit r/w GPHY Address Register */
1328
1329/* MIB Counters */
1330#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */
1331#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */
1332
1333/*
1334 * MIB Counters base address definitions (low word) -
1335 * use offset 4 for access to high word (32 bit r/o)
1336 */
1337#define GM_RXF_UC_OK \
1338 (GM_MIB_CNT_BASE + 0) /* Unicast Frames Received OK */
1339#define GM_RXF_BC_OK \
1340 (GM_MIB_CNT_BASE + 8) /* Broadcast Frames Received OK */
1341#define GM_RXF_MPAUSE \
1342 (GM_MIB_CNT_BASE + 16) /* Pause MAC Ctrl Frames Received */
1343#define GM_RXF_MC_OK \
1344 (GM_MIB_CNT_BASE + 24) /* Multicast Frames Received OK */
1345#define GM_RXF_FCS_ERR \
1346 (GM_MIB_CNT_BASE + 32) /* Rx Frame Check Seq. Error */
1347 /* GM_MIB_CNT_BASE + 40: reserved */
1348#define GM_RXO_OK_LO \
1349 (GM_MIB_CNT_BASE + 48) /* Octets Received OK Low */
1350#define GM_RXO_OK_HI \
1351 (GM_MIB_CNT_BASE + 56) /* Octets Received OK High */
1352#define GM_RXO_ERR_LO \
1353 (GM_MIB_CNT_BASE + 64) /* Octets Received Invalid Low */
1354#define GM_RXO_ERR_HI \
1355 (GM_MIB_CNT_BASE + 72) /* Octets Received Invalid High */
1356#define GM_RXF_SHT \
1357 (GM_MIB_CNT_BASE + 80) /* Frames <64 Byte Received OK */
1358#define GM_RXE_FRAG \
1359 (GM_MIB_CNT_BASE + 88) /* Frames <64 Byte Received with FCS Err */
1360#define GM_RXF_64B \
1361 (GM_MIB_CNT_BASE + 96) /* 64 Byte Rx Frame */
1362#define GM_RXF_127B \
1363 (GM_MIB_CNT_BASE + 104) /* 65-127 Byte Rx Frame */
1364#define GM_RXF_255B \
1365 (GM_MIB_CNT_BASE + 112) /* 128-255 Byte Rx Frame */
1366#define GM_RXF_511B \
1367 (GM_MIB_CNT_BASE + 120) /* 256-511 Byte Rx Frame */
1368#define GM_RXF_1023B \
1369 (GM_MIB_CNT_BASE + 128) /* 512-1023 Byte Rx Frame */
1370#define GM_RXF_1518B \
1371 (GM_MIB_CNT_BASE + 136) /* 1024-1518 Byte Rx Frame */
1372#define GM_RXF_MAX_SZ \
1373 (GM_MIB_CNT_BASE + 144) /* 1519-MaxSize Byte Rx Frame */
1374#define GM_RXF_LNG_ERR \
1375 (GM_MIB_CNT_BASE + 152) /* Rx Frame too Long Error */
1376#define GM_RXF_JAB_PKT \
1377 (GM_MIB_CNT_BASE + 160) /* Rx Jabber Packet Frame */
1378 /* GM_MIB_CNT_BASE + 168: reserved */
1379#define GM_RXE_FIFO_OV \
1380 (GM_MIB_CNT_BASE + 176) /* Rx FIFO overflow Event */
1381 /* GM_MIB_CNT_BASE + 184: reserved */
1382#define GM_TXF_UC_OK \
1383 (GM_MIB_CNT_BASE + 192) /* Unicast Frames Xmitted OK */
1384#define GM_TXF_BC_OK \
1385 (GM_MIB_CNT_BASE + 200) /* Broadcast Frames Xmitted OK */
1386#define GM_TXF_MPAUSE \
1387 (GM_MIB_CNT_BASE + 208) /* Pause MAC Ctrl Frames Xmitted */
1388#define GM_TXF_MC_OK \
1389 (GM_MIB_CNT_BASE + 216) /* Multicast Frames Xmitted OK */
1390#define GM_TXO_OK_LO \
1391 (GM_MIB_CNT_BASE + 224) /* Octets Transmitted OK Low */
1392#define GM_TXO_OK_HI \
1393 (GM_MIB_CNT_BASE + 232) /* Octets Transmitted OK High */
1394#define GM_TXF_64B \
1395 (GM_MIB_CNT_BASE + 240) /* 64 Byte Tx Frame */
1396#define GM_TXF_127B \
1397 (GM_MIB_CNT_BASE + 248) /* 65-127 Byte Tx Frame */
1398#define GM_TXF_255B \
1399 (GM_MIB_CNT_BASE + 256) /* 128-255 Byte Tx Frame */
1400#define GM_TXF_511B \
1401 (GM_MIB_CNT_BASE + 264) /* 256-511 Byte Tx Frame */
1402#define GM_TXF_1023B \
1403 (GM_MIB_CNT_BASE + 272) /* 512-1023 Byte Tx Frame */
1404#define GM_TXF_1518B \
1405 (GM_MIB_CNT_BASE + 280) /* 1024-1518 Byte Tx Frame */
1406#define GM_TXF_MAX_SZ \
1407 (GM_MIB_CNT_BASE + 288) /* 1519-MaxSize Byte Tx Frame */
1408 /* GM_MIB_CNT_BASE + 296: reserved */
1409#define GM_TXF_COL \
1410 (GM_MIB_CNT_BASE + 304) /* Tx Collision */
1411#define GM_TXF_LAT_COL \
1412 (GM_MIB_CNT_BASE + 312) /* Tx Late Collision */
1413#define GM_TXF_ABO_COL \
1414 (GM_MIB_CNT_BASE + 320) /* Tx aborted due to Exces. Col. */
1415#define GM_TXF_MUL_COL \
1416 (GM_MIB_CNT_BASE + 328) /* Tx Multiple Collision */
1417#define GM_TXF_SNG_COL \
1418 (GM_MIB_CNT_BASE + 336) /* Tx Single Collision */
1419#define GM_TXE_FIFO_UR \
1420 (GM_MIB_CNT_BASE + 344) /* Tx FIFO Underrun Event */
1421
1422/*----------------------------------------------------------------------------*/
1423/*
1424 * GMAC Bit Definitions
1425 *
1426 * If the bit access behaviour differs from the register access behaviour
1427 * (r/w, r/o) this is documented after the bit number.
1428 * The following bit access behaviours are used:
1429 * (sc) self clearing
1430 * (r/o) read only
1431 */
1432
1433/* GM_GP_STAT 16 bit r/o General Purpose Status Register */
1434#define GM_GPSR_SPEED (1<<15) /* Bit 15: Port Speed (1 = 100 Mbps) */
1435#define GM_GPSR_DUPLEX (1<<14) /* Bit 14: Duplex Mode (1 = Full) */
1436#define GM_GPSR_FC_TX_DIS (1<<13) /* Bit 13: Tx Flow-Control Mode Disabled */
1437#define GM_GPSR_LINK_UP (1<<12) /* Bit 12: Link Up Status */
1438#define GM_GPSR_PAUSE (1<<11) /* Bit 11: Pause State */
1439#define GM_GPSR_TX_ACTIVE (1<<10) /* Bit 10: Tx in Progress */
1440#define GM_GPSR_EXC_COL (1<<9) /* Bit 9: Excessive Collisions Occured */
1441#define GM_GPSR_LAT_COL (1<<8) /* Bit 8: Late Collisions Occured */
1442 /* Bit 7..6: reserved */
1443#define GM_GPSR_PHY_ST_CH (1<<5) /* Bit 5: PHY Status Change */
1444#define GM_GPSR_GIG_SPEED (1<<4) /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */
1445#define GM_GPSR_PART_MODE (1<<3) /* Bit 3: Partition mode */
1446#define GM_GPSR_FC_RX_DIS (1<<2) /* Bit 2: Rx Flow-Control Mode Disabled */
1447#define GM_GPSR_PROM_EN (1<<1) /* Bit 1: Promiscuous Mode Enabled */
1448 /* Bit 0: reserved */
1449
1450/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */
1451 /* Bit 15: reserved */
1452#define GM_GPCR_PROM_ENA (1<<14) /* Bit 14: Enable Promiscuous Mode */
1453#define GM_GPCR_FC_TX_DIS (1<<13) /* Bit 13: Disable Tx Flow-Control Mode */
1454#define GM_GPCR_TX_ENA (1<<12) /* Bit 12: Enable Transmit */
1455#define GM_GPCR_RX_ENA (1<<11) /* Bit 11: Enable Receive */
1456#define GM_GPCR_BURST_ENA (1<<10) /* Bit 10: Enable Burst Mode */
1457#define GM_GPCR_LOOP_ENA (1<<9) /* Bit 9: Enable MAC Loopback Mode */
1458#define GM_GPCR_PART_ENA (1<<8) /* Bit 8: Enable Partition Mode */
1459#define GM_GPCR_GIGS_ENA (1<<7) /* Bit 7: Gigabit Speed (1000 Mbps) */
1460#define GM_GPCR_FL_PASS (1<<6) /* Bit 6: Force Link Pass */
1461#define GM_GPCR_DUP_FULL (1<<5) /* Bit 5: Full Duplex Mode */
1462#define GM_GPCR_FC_RX_DIS (1<<4) /* Bit 4: Disable Rx Flow-Control Mode */
1463#define GM_GPCR_SPEED_100 (1<<3) /* Bit 3: Port Speed 100 Mbps */
1464#define GM_GPCR_AU_DUP_DIS (1<<2) /* Bit 2: Disable Auto-Update Duplex */
1465#define GM_GPCR_AU_FCT_DIS (1<<1) /* Bit 1: Disable Auto-Update Flow-C. */
1466#define GM_GPCR_AU_SPD_DIS (1<<0) /* Bit 0: Disable Auto-Update Speed */
1467
1468#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
1469#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS |\
1470 GM_GPCR_AU_SPD_DIS)
1471
1472/* GM_TX_CTRL 16 bit r/w Transmit Control Register */
1473#define GM_TXCR_FORCE_JAM (1<<15) /* Bit 15: Force Jam / Flow-Control */
1474#define GM_TXCR_CRC_DIS (1<<14) /* Bit 14: Disable insertion of CRC */
1475#define GM_TXCR_PAD_DIS (1<<13) /* Bit 13: Disable padding of packets */
1476#define GM_TXCR_COL_THR_MSK (7<<10) /* Bit 12..10: Collision Threshold */
1477
1478#define TX_COL_THR(x) (SHIFT10(x) & GM_TXCR_COL_THR_MSK)
1479
1480#define TX_COL_DEF 0x04
1481
1482/* GM_RX_CTRL 16 bit r/w Receive Control Register */
1483#define GM_RXCR_UCF_ENA (1<<15) /* Bit 15: Enable Unicast filtering */
1484#define GM_RXCR_MCF_ENA (1<<14) /* Bit 14: Enable Multicast filtering */
1485#define GM_RXCR_CRC_DIS (1<<13) /* Bit 13: Remove 4-byte CRC */
1486#define GM_RXCR_PASS_FC (1<<12) /* Bit 12: Pass FC packets to FIFO */
1487
1488/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */
1489#define GM_TXPA_JAMLEN_MSK (0x03<<14) /* Bit 15..14: Jam Length */
1490#define GM_TXPA_JAMIPG_MSK (0x1f<<9) /* Bit 13..9: Jam IPG */
1491#define GM_TXPA_JAMDAT_MSK (0x1f<<4) /* Bit 8..4: IPG Jam to Data */
1492 /* Bit 3..0: reserved */
1493
1494#define TX_JAM_LEN_VAL(x) (SHIFT14(x) & GM_TXPA_JAMLEN_MSK)
1495#define TX_JAM_IPG_VAL(x) (SHIFT9(x) & GM_TXPA_JAMIPG_MSK)
1496#define TX_IPG_JAM_DATA(x) (SHIFT4(x) & GM_TXPA_JAMDAT_MSK)
1497
1498#define TX_JAM_LEN_DEF 0x03
1499#define TX_JAM_IPG_DEF 0x0b
1500#define TX_IPG_JAM_DEF 0x1c
1501
1502/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
1503#define GM_SMOD_DATABL_MSK (0x1f<<11) /* Bit 15..11: Data Blinder (r/o) */
1504#define GM_SMOD_LIMIT_4 (1<<10) /* Bit 10: 4 consecutive Tx trials */
1505#define GM_SMOD_VLAN_ENA (1<<9) /* Bit 9: Enable VLAN (Max. Frame Len) */
1506#define GM_SMOD_JUMBO_ENA (1<<8) /* Bit 8: Enable Jumbo (Max. Frame Len) */
1507 /* Bit 7..5: reserved */
1508#define GM_SMOD_IPG_MSK 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
1509
1510#define DATA_BLIND_VAL(x) (SHIFT11(x) & GM_SMOD_DATABL_MSK)
1511#define DATA_BLIND_DEF 0x04
1512
1513#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK)
1514#define IPG_DATA_DEF 0x1e
1515
1516/* GM_SMI_CTRL 16 bit r/w SMI Control Register */
1517#define GM_SMI_CT_PHY_A_MSK (0x1f<<11) /* Bit 15..11: PHY Device Address */
1518#define GM_SMI_CT_REG_A_MSK (0x1f<<6) /* Bit 10.. 6: PHY Register Address */
1519#define GM_SMI_CT_OP_RD (1<<5) /* Bit 5: OpCode Read (0=Write)*/
1520#define GM_SMI_CT_RD_VAL (1<<4) /* Bit 4: Read Valid (Read completed) */
1521#define GM_SMI_CT_BUSY (1<<3) /* Bit 3: Busy (Operation in progress) */
1522 /* Bit 2..0: reserved */
1523
1524#define GM_SMI_CT_PHY_AD(x) (SHIFT11(x) & GM_SMI_CT_PHY_A_MSK)
1525#define GM_SMI_CT_REG_AD(x) (SHIFT6(x) & GM_SMI_CT_REG_A_MSK)
1526
1527 /* GM_PHY_ADDR 16 bit r/w GPHY Address Register */
1528 /* Bit 15..6: reserved */
1529#define GM_PAR_MIB_CLR (1<<5) /* Bit 5: Set MIB Clear Counter Mode */
1530#define GM_PAR_MIB_TST (1<<4) /* Bit 4: MIB Load Counter (Test Mode) */
1531 /* Bit 3..0: reserved */
1532
1533/* Receive Frame Status Encoding */
1534#define GMR_FS_LEN (0xffffUL<<16) /* Bit 31..16: Rx Frame Length */
1535 /* Bit 15..14: reserved */
1536#define GMR_FS_VLAN (1L<<13) /* Bit 13: VLAN Packet */
1537#define GMR_FS_JABBER (1L<<12) /* Bit 12: Jabber Packet */
1538#define GMR_FS_UN_SIZE (1L<<11) /* Bit 11: Undersize Packet */
1539#define GMR_FS_MC (1L<<10) /* Bit 10: Multicast Packet */
1540#define GMR_FS_BC (1L<<9) /* Bit 9: Broadcast Packet */
1541#define GMR_FS_RX_OK (1L<<8) /* Bit 8: Receive OK (Good Packet) */
1542#define GMR_FS_GOOD_FC (1L<<7) /* Bit 7: Good Flow-Control Packet */
1543#define GMR_FS_BAD_FC (1L<<6) /* Bit 6: Bad Flow-Control Packet */
1544#define GMR_FS_MII_ERR (1L<<5) /* Bit 5: MII Error */
1545#define GMR_FS_LONG_ERR (1L<<4) /* Bit 4: Too Long Packet */
1546#define GMR_FS_FRAGMENT (1L<<3) /* Bit 3: Fragment */
1547 /* Bit 2: reserved */
1548#define GMR_FS_CRC_ERR (1L<<1) /* Bit 1: CRC Error */
1549#define GMR_FS_RX_FF_OV (1L<<0) /* Bit 0: Rx FIFO Overflow */
1550
1551/*
1552 * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR)
1553 */
1554#define GMR_FS_ANY_ERR (GMR_FS_CRC_ERR | \
1555 GMR_FS_LONG_ERR | \
1556 GMR_FS_MII_ERR | \
1557 GMR_FS_BAD_FC | \
1558 GMR_FS_GOOD_FC | \
1559 GMR_FS_JABBER)
1560
1561/* Rx GMAC FIFO Flush Mask (default) */
1562#define RX_FF_FL_DEF_MSK (GMR_FS_CRC_ERR | \
1563 GMR_FS_RX_FF_OV | \
1564 GMR_FS_MII_ERR | \
1565 GMR_FS_BAD_FC | \
1566 GMR_FS_GOOD_FC | \
1567 GMR_FS_UN_SIZE | \
1568 GMR_FS_JABBER)
1569
1570/* typedefs *******************************************************************/
1571
1572
1573/* function prototypes ********************************************************/
1574
1575#ifdef __cplusplus
1576}
1577#endif /* __cplusplus */
1578
1579#endif /* __INC_XMAC_H */
diff --git a/drivers/net/sk98lin/skaddr.c b/drivers/net/sk98lin/skaddr.c
deleted file mode 100644
index 6e6c56aa6d6f..000000000000
--- a/drivers/net/sk98lin/skaddr.c
+++ /dev/null
@@ -1,1788 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skaddr.c
4 * Project: Gigabit Ethernet Adapters, ADDR-Module
5 * Version: $Revision: 1.52 $
6 * Date: $Date: 2003/06/02 13:46:15 $
7 * Purpose: Manage Addresses (Multicast and Unicast) and Promiscuous Mode.
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/******************************************************************************
26 *
27 * Description:
28 *
29 * This module is intended to manage multicast addresses, address override,
30 * and promiscuous mode on GEnesis and Yukon adapters.
31 *
32 * Address Layout:
33 * port address: physical MAC address
34 * 1st exact match: logical MAC address (GEnesis only)
35 * 2nd exact match: RLMT multicast (GEnesis only)
36 * exact match 3-13: OS-specific multicasts (GEnesis only)
37 *
38 * Include File Hierarchy:
39 *
40 * "skdrv1st.h"
41 * "skdrv2nd.h"
42 *
43 ******************************************************************************/
44
45#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
46static const char SysKonnectFileId[] =
47 "@(#) $Id: skaddr.c,v 1.52 2003/06/02 13:46:15 tschilli Exp $ (C) Marvell.";
48#endif /* DEBUG ||!LINT || !SK_SLIM */
49
50#define __SKADDR_C
51
52#ifdef __cplusplus
53extern "C" {
54#endif /* cplusplus */
55
56#include "h/skdrv1st.h"
57#include "h/skdrv2nd.h"
58
59/* defines ********************************************************************/
60
61
62#define XMAC_POLY 0xEDB88320UL /* CRC32-Poly - XMAC: Little Endian */
63#define GMAC_POLY 0x04C11DB7L /* CRC16-Poly - GMAC: Little Endian */
64#define HASH_BITS 6 /* #bits in hash */
65#define SK_MC_BIT 0x01
66
67/* Error numbers and messages. */
68
69#define SKERR_ADDR_E001 (SK_ERRBASE_ADDR + 0)
70#define SKERR_ADDR_E001MSG "Bad Flags."
71#define SKERR_ADDR_E002 (SKERR_ADDR_E001 + 1)
72#define SKERR_ADDR_E002MSG "New Error."
73
74/* typedefs *******************************************************************/
75
76/* None. */
77
78/* global variables ***********************************************************/
79
80/* 64-bit hash values with all bits set. */
81
82static const SK_U16 OnesHash[4] = {0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF};
83
84/* local variables ************************************************************/
85
86#ifdef DEBUG
87static int Next0[SK_MAX_MACS] = {0};
88#endif /* DEBUG */
89
90static int SkAddrGmacMcAdd(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber,
91 SK_MAC_ADDR *pMc, int Flags);
92static int SkAddrGmacMcClear(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber,
93 int Flags);
94static int SkAddrGmacMcUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber);
95static int SkAddrGmacPromiscuousChange(SK_AC *pAC, SK_IOC IoC,
96 SK_U32 PortNumber, int NewPromMode);
97static int SkAddrXmacMcAdd(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber,
98 SK_MAC_ADDR *pMc, int Flags);
99static int SkAddrXmacMcClear(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber,
100 int Flags);
101static int SkAddrXmacMcUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 PortNumber);
102static int SkAddrXmacPromiscuousChange(SK_AC *pAC, SK_IOC IoC,
103 SK_U32 PortNumber, int NewPromMode);
104
105/* functions ******************************************************************/
106
107/******************************************************************************
108 *
109 * SkAddrInit - initialize data, set state to init
110 *
111 * Description:
112 *
113 * SK_INIT_DATA
114 * ============
115 *
116 * This routine clears the multicast tables and resets promiscuous mode.
117 * Some entries are reserved for the "logical MAC address", the
118 * SK-RLMT multicast address, and the BPDU multicast address.
119 *
120 *
121 * SK_INIT_IO
122 * ==========
123 *
124 * All permanent MAC addresses are read from EPROM.
125 * If the current MAC addresses are not already set in software,
126 * they are set to the values of the permanent addresses.
127 * The current addresses are written to the corresponding MAC.
128 *
129 *
130 * SK_INIT_RUN
131 * ===========
132 *
133 * Nothing.
134 *
135 * Context:
136 * init, pageable
137 *
138 * Returns:
139 * SK_ADDR_SUCCESS
140 */
141int SkAddrInit(
142SK_AC *pAC, /* the adapter context */
143SK_IOC IoC, /* I/O context */
144int Level) /* initialization level */
145{
146 int j;
147 SK_U32 i;
148 SK_U8 *InAddr;
149 SK_U16 *OutAddr;
150 SK_ADDR_PORT *pAPort;
151
152 switch (Level) {
153 case SK_INIT_DATA:
154 SK_MEMSET((char *) &pAC->Addr, (SK_U8) 0,
155 (SK_U16) sizeof(SK_ADDR));
156
157 for (i = 0; i < SK_MAX_MACS; i++) {
158 pAPort = &pAC->Addr.Port[i];
159 pAPort->PromMode = SK_PROM_MODE_NONE;
160
161 pAPort->FirstExactMatchRlmt = SK_ADDR_FIRST_MATCH_RLMT;
162 pAPort->FirstExactMatchDrv = SK_ADDR_FIRST_MATCH_DRV;
163 pAPort->NextExactMatchRlmt = SK_ADDR_FIRST_MATCH_RLMT;
164 pAPort->NextExactMatchDrv = SK_ADDR_FIRST_MATCH_DRV;
165 }
166#ifdef xDEBUG
167 for (i = 0; i < SK_MAX_MACS; i++) {
168 if (pAC->Addr.Port[i].NextExactMatchRlmt <
169 SK_ADDR_FIRST_MATCH_RLMT) {
170 Next0[i] |= 4;
171 }
172 }
173#endif /* DEBUG */
174 /* pAC->Addr.InitDone = SK_INIT_DATA; */
175 break;
176
177 case SK_INIT_IO:
178#ifndef SK_NO_RLMT
179 for (i = 0; i < SK_MAX_NETS; i++) {
180 pAC->Addr.Net[i].ActivePort = pAC->Rlmt.Net[i].ActivePort;
181 }
182#endif /* !SK_NO_RLMT */
183#ifdef xDEBUG
184 for (i = 0; i < SK_MAX_MACS; i++) {
185 if (pAC->Addr.Port[i].NextExactMatchRlmt <
186 SK_ADDR_FIRST_MATCH_RLMT) {
187 Next0[i] |= 8;
188 }
189 }
190#endif /* DEBUG */
191
192 /* Read permanent logical MAC address from Control Register File. */
193 for (j = 0; j < SK_MAC_ADDR_LEN; j++) {
194 InAddr = (SK_U8 *) &pAC->Addr.Net[0].PermanentMacAddress.a[j];
195 SK_IN8(IoC, B2_MAC_1 + j, InAddr);
196 }
197
198 if (!pAC->Addr.Net[0].CurrentMacAddressSet) {
199 /* Set the current logical MAC address to the permanent one. */
200 pAC->Addr.Net[0].CurrentMacAddress =
201 pAC->Addr.Net[0].PermanentMacAddress;
202 pAC->Addr.Net[0].CurrentMacAddressSet = SK_TRUE;
203 }
204
205 /* Set the current logical MAC address. */
206 pAC->Addr.Port[pAC->Addr.Net[0].ActivePort].Exact[0] =
207 pAC->Addr.Net[0].CurrentMacAddress;
208#if SK_MAX_NETS > 1
209 /* Set logical MAC address for net 2 to (log | 3). */
210 if (!pAC->Addr.Net[1].CurrentMacAddressSet) {
211 pAC->Addr.Net[1].PermanentMacAddress =
212 pAC->Addr.Net[0].PermanentMacAddress;
213 pAC->Addr.Net[1].PermanentMacAddress.a[5] |= 3;
214 /* Set the current logical MAC address to the permanent one. */
215 pAC->Addr.Net[1].CurrentMacAddress =
216 pAC->Addr.Net[1].PermanentMacAddress;
217 pAC->Addr.Net[1].CurrentMacAddressSet = SK_TRUE;
218 }
219#endif /* SK_MAX_NETS > 1 */
220
221#ifdef DEBUG
222 for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) {
223 SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_INIT,
224 ("Permanent MAC Address (Net%d): %02X %02X %02X %02X %02X %02X\n",
225 i,
226 pAC->Addr.Net[i].PermanentMacAddress.a[0],
227 pAC->Addr.Net[i].PermanentMacAddress.a[1],
228 pAC->Addr.Net[i].PermanentMacAddress.a[2],
229 pAC->Addr.Net[i].PermanentMacAddress.a[3],
230 pAC->Addr.Net[i].PermanentMacAddress.a[4],
231 pAC->Addr.Net[i].PermanentMacAddress.a[5]))
232
233 SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_INIT,
234 ("Logical MAC Address (Net%d): %02X %02X %02X %02X %02X %02X\n",
235 i,
236 pAC->Addr.Net[i].CurrentMacAddress.a[0],
237 pAC->Addr.Net[i].CurrentMacAddress.a[1],
238 pAC->Addr.Net[i].CurrentMacAddress.a[2],
239 pAC->Addr.Net[i].CurrentMacAddress.a[3],
240 pAC->Addr.Net[i].CurrentMacAddress.a[4],
241 pAC->Addr.Net[i].CurrentMacAddress.a[5]))
242 }
243#endif /* DEBUG */
244
245 for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) {
246 pAPort = &pAC->Addr.Port[i];
247
248 /* Read permanent port addresses from Control Register File. */
249 for (j = 0; j < SK_MAC_ADDR_LEN; j++) {
250 InAddr = (SK_U8 *) &pAPort->PermanentMacAddress.a[j];
251 SK_IN8(IoC, B2_MAC_2 + 8 * i + j, InAddr);
252 }
253
254 if (!pAPort->CurrentMacAddressSet) {
255 /*
256 * Set the current and previous physical MAC address
257 * of this port to its permanent MAC address.
258 */
259 pAPort->CurrentMacAddress = pAPort->PermanentMacAddress;
260 pAPort->PreviousMacAddress = pAPort->PermanentMacAddress;
261 pAPort->CurrentMacAddressSet = SK_TRUE;
262 }
263
264 /* Set port's current physical MAC address. */
265 OutAddr = (SK_U16 *) &pAPort->CurrentMacAddress.a[0];
266#ifdef GENESIS
267 if (pAC->GIni.GIGenesis) {
268 XM_OUTADDR(IoC, i, XM_SA, OutAddr);
269 }
270#endif /* GENESIS */
271#ifdef YUKON
272 if (!pAC->GIni.GIGenesis) {
273 GM_OUTADDR(IoC, i, GM_SRC_ADDR_1L, OutAddr);
274 }
275#endif /* YUKON */
276#ifdef DEBUG
277 SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_INIT,
278 ("SkAddrInit: Permanent Physical MAC Address: %02X %02X %02X %02X %02X %02X\n",
279 pAPort->PermanentMacAddress.a[0],
280 pAPort->PermanentMacAddress.a[1],
281 pAPort->PermanentMacAddress.a[2],
282 pAPort->PermanentMacAddress.a[3],
283 pAPort->PermanentMacAddress.a[4],
284 pAPort->PermanentMacAddress.a[5]))
285
286 SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_INIT,
287 ("SkAddrInit: Physical MAC Address: %02X %02X %02X %02X %02X %02X\n",
288 pAPort->CurrentMacAddress.a[0],
289 pAPort->CurrentMacAddress.a[1],
290 pAPort->CurrentMacAddress.a[2],
291 pAPort->CurrentMacAddress.a[3],
292 pAPort->CurrentMacAddress.a[4],
293 pAPort->CurrentMacAddress.a[5]))
294#endif /* DEBUG */
295 }
296 /* pAC->Addr.InitDone = SK_INIT_IO; */
297 break;
298
299 case SK_INIT_RUN:
300#ifdef xDEBUG
301 for (i = 0; i < SK_MAX_MACS; i++) {
302 if (pAC->Addr.Port[i].NextExactMatchRlmt <
303 SK_ADDR_FIRST_MATCH_RLMT) {
304 Next0[i] |= 16;
305 }
306 }
307#endif /* DEBUG */
308
309 /* pAC->Addr.InitDone = SK_INIT_RUN; */
310 break;
311
312 default: /* error */
313 break;
314 }
315
316 return (SK_ADDR_SUCCESS);
317
318} /* SkAddrInit */
319
320#ifndef SK_SLIM
321
322/******************************************************************************
323 *
324 * SkAddrMcClear - clear the multicast table
325 *
326 * Description:
327 * This routine clears the multicast table.
328 *
329 * If not suppressed by Flag SK_MC_SW_ONLY, the hardware is updated
330 * immediately.
331 *
332 * It calls either SkAddrXmacMcClear or SkAddrGmacMcClear, according
333 * to the adapter in use. The real work is done there.
334 *
335 * Context:
336 * runtime, pageable
337 * may be called starting with SK_INIT_DATA with flag SK_MC_SW_ONLY
338 * may be called after SK_INIT_IO without limitation
339 *
340 * Returns:
341 * SK_ADDR_SUCCESS
342 * SK_ADDR_ILLEGAL_PORT
343 */
344int SkAddrMcClear(
345SK_AC *pAC, /* adapter context */
346SK_IOC IoC, /* I/O context */
347SK_U32 PortNumber, /* Index of affected port */
348int Flags) /* permanent/non-perm, sw-only */
349{
350 int ReturnCode;
351
352 if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) {
353 return (SK_ADDR_ILLEGAL_PORT);
354 }
355
356 if (pAC->GIni.GIGenesis) {
357 ReturnCode = SkAddrXmacMcClear(pAC, IoC, PortNumber, Flags);
358 }
359 else {
360 ReturnCode = SkAddrGmacMcClear(pAC, IoC, PortNumber, Flags);
361 }
362
363 return (ReturnCode);
364
365} /* SkAddrMcClear */
366
367#endif /* !SK_SLIM */
368
369#ifndef SK_SLIM
370
371/******************************************************************************
372 *
373 * SkAddrXmacMcClear - clear the multicast table
374 *
375 * Description:
376 * This routine clears the multicast table
377 * (either entry 2 or entries 3-16 and InexactFilter) of the given port.
378 * If not suppressed by Flag SK_MC_SW_ONLY, the hardware is updated
379 * immediately.
380 *
381 * Context:
382 * runtime, pageable
383 * may be called starting with SK_INIT_DATA with flag SK_MC_SW_ONLY
384 * may be called after SK_INIT_IO without limitation
385 *
386 * Returns:
387 * SK_ADDR_SUCCESS
388 * SK_ADDR_ILLEGAL_PORT
389 */
390static int SkAddrXmacMcClear(
391SK_AC *pAC, /* adapter context */
392SK_IOC IoC, /* I/O context */
393SK_U32 PortNumber, /* Index of affected port */
394int Flags) /* permanent/non-perm, sw-only */
395{
396 int i;
397
398 if (Flags & SK_ADDR_PERMANENT) { /* permanent => RLMT */
399
400 /* Clear RLMT multicast addresses. */
401 pAC->Addr.Port[PortNumber].NextExactMatchRlmt = SK_ADDR_FIRST_MATCH_RLMT;
402 }
403 else { /* not permanent => DRV */
404
405 /* Clear InexactFilter */
406 for (i = 0; i < 8; i++) {
407 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0;
408 }
409
410 /* Clear DRV multicast addresses. */
411
412 pAC->Addr.Port[PortNumber].NextExactMatchDrv = SK_ADDR_FIRST_MATCH_DRV;
413 }
414
415 if (!(Flags & SK_MC_SW_ONLY)) {
416 (void) SkAddrXmacMcUpdate(pAC, IoC, PortNumber);
417 }
418
419 return (SK_ADDR_SUCCESS);
420
421} /* SkAddrXmacMcClear */
422
423#endif /* !SK_SLIM */
424
425#ifndef SK_SLIM
426
427/******************************************************************************
428 *
429 * SkAddrGmacMcClear - clear the multicast table
430 *
431 * Description:
432 * This routine clears the multicast hashing table (InexactFilter)
433 * (either the RLMT or the driver bits) of the given port.
434 *
435 * If not suppressed by Flag SK_MC_SW_ONLY, the hardware is updated
436 * immediately.
437 *
438 * Context:
439 * runtime, pageable
440 * may be called starting with SK_INIT_DATA with flag SK_MC_SW_ONLY
441 * may be called after SK_INIT_IO without limitation
442 *
443 * Returns:
444 * SK_ADDR_SUCCESS
445 * SK_ADDR_ILLEGAL_PORT
446 */
447static int SkAddrGmacMcClear(
448SK_AC *pAC, /* adapter context */
449SK_IOC IoC, /* I/O context */
450SK_U32 PortNumber, /* Index of affected port */
451int Flags) /* permanent/non-perm, sw-only */
452{
453 int i;
454
455#ifdef DEBUG
456 SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
457 ("GMAC InexactFilter (not cleared): %02X %02X %02X %02X %02X %02X %02X %02X\n",
458 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0],
459 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[1],
460 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[2],
461 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[3],
462 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[4],
463 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[5],
464 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[6],
465 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[7]))
466#endif /* DEBUG */
467
468 /* Clear InexactFilter */
469 for (i = 0; i < 8; i++) {
470 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0;
471 }
472
473 if (Flags & SK_ADDR_PERMANENT) { /* permanent => RLMT */
474
475 /* Copy DRV bits to InexactFilter. */
476 for (i = 0; i < 8; i++) {
477 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] |=
478 pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[i];
479
480 /* Clear InexactRlmtFilter. */
481 pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[i] = 0;
482
483 }
484 }
485 else { /* not permanent => DRV */
486
487 /* Copy RLMT bits to InexactFilter. */
488 for (i = 0; i < 8; i++) {
489 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] |=
490 pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[i];
491
492 /* Clear InexactDrvFilter. */
493 pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[i] = 0;
494 }
495 }
496
497#ifdef DEBUG
498 SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
499 ("GMAC InexactFilter (cleared): %02X %02X %02X %02X %02X %02X %02X %02X\n",
500 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0],
501 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[1],
502 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[2],
503 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[3],
504 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[4],
505 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[5],
506 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[6],
507 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[7]))
508#endif /* DEBUG */
509
510 if (!(Flags & SK_MC_SW_ONLY)) {
511 (void) SkAddrGmacMcUpdate(pAC, IoC, PortNumber);
512 }
513
514 return (SK_ADDR_SUCCESS);
515
516} /* SkAddrGmacMcClear */
517
518#ifndef SK_ADDR_CHEAT
519
520/******************************************************************************
521 *
522 * SkXmacMcHash - hash multicast address
523 *
524 * Description:
525 * This routine computes the hash value for a multicast address.
526 * A CRC32 algorithm is used.
527 *
528 * Notes:
529 * The code was adapted from the XaQti data sheet.
530 *
531 * Context:
532 * runtime, pageable
533 *
534 * Returns:
535 * Hash value of multicast address.
536 */
537static SK_U32 SkXmacMcHash(
538unsigned char *pMc) /* Multicast address */
539{
540 SK_U32 Idx;
541 SK_U32 Bit;
542 SK_U32 Data;
543 SK_U32 Crc;
544
545 Crc = 0xFFFFFFFFUL;
546 for (Idx = 0; Idx < SK_MAC_ADDR_LEN; Idx++) {
547 Data = *pMc++;
548 for (Bit = 0; Bit < 8; Bit++, Data >>= 1) {
549 Crc = (Crc >> 1) ^ (((Crc ^ Data) & 1) ? XMAC_POLY : 0);
550 }
551 }
552
553 return (Crc & ((1 << HASH_BITS) - 1));
554
555} /* SkXmacMcHash */
556
557
558/******************************************************************************
559 *
560 * SkGmacMcHash - hash multicast address
561 *
562 * Description:
563 * This routine computes the hash value for a multicast address.
564 * A CRC16 algorithm is used.
565 *
566 * Notes:
567 *
568 *
569 * Context:
570 * runtime, pageable
571 *
572 * Returns:
573 * Hash value of multicast address.
574 */
575static SK_U32 SkGmacMcHash(
576unsigned char *pMc) /* Multicast address */
577{
578 SK_U32 Data;
579 SK_U32 TmpData;
580 SK_U32 Crc;
581 int Byte;
582 int Bit;
583
584 Crc = 0xFFFFFFFFUL;
585 for (Byte = 0; Byte < 6; Byte++) {
586 /* Get next byte. */
587 Data = (SK_U32) pMc[Byte];
588
589 /* Change bit order in byte. */
590 TmpData = Data;
591 for (Bit = 0; Bit < 8; Bit++) {
592 if (TmpData & 1L) {
593 Data |= 1L << (7 - Bit);
594 }
595 else {
596 Data &= ~(1L << (7 - Bit));
597 }
598 TmpData >>= 1;
599 }
600
601 Crc ^= (Data << 24);
602 for (Bit = 0; Bit < 8; Bit++) {
603 if (Crc & 0x80000000) {
604 Crc = (Crc << 1) ^ GMAC_POLY;
605 }
606 else {
607 Crc <<= 1;
608 }
609 }
610 }
611
612 return (Crc & ((1 << HASH_BITS) - 1));
613
614} /* SkGmacMcHash */
615
616#endif /* !SK_ADDR_CHEAT */
617
618/******************************************************************************
619 *
620 * SkAddrMcAdd - add a multicast address to a port
621 *
622 * Description:
623 * This routine enables reception for a given address on the given port.
624 *
625 * It calls either SkAddrXmacMcAdd or SkAddrGmacMcAdd, according to the
626 * adapter in use. The real work is done there.
627 *
628 * Notes:
629 * The return code is only valid for SK_PROM_MODE_NONE.
630 *
631 * Context:
632 * runtime, pageable
633 * may be called after SK_INIT_DATA
634 *
635 * Returns:
636 * SK_MC_FILTERING_EXACT
637 * SK_MC_FILTERING_INEXACT
638 * SK_MC_ILLEGAL_ADDRESS
639 * SK_MC_ILLEGAL_PORT
640 * SK_MC_RLMT_OVERFLOW
641 */
642int SkAddrMcAdd(
643SK_AC *pAC, /* adapter context */
644SK_IOC IoC, /* I/O context */
645SK_U32 PortNumber, /* Port Number */
646SK_MAC_ADDR *pMc, /* multicast address to be added */
647int Flags) /* permanent/non-permanent */
648{
649 int ReturnCode;
650
651 if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) {
652 return (SK_ADDR_ILLEGAL_PORT);
653 }
654
655 if (pAC->GIni.GIGenesis) {
656 ReturnCode = SkAddrXmacMcAdd(pAC, IoC, PortNumber, pMc, Flags);
657 }
658 else {
659 ReturnCode = SkAddrGmacMcAdd(pAC, IoC, PortNumber, pMc, Flags);
660 }
661
662 return (ReturnCode);
663
664} /* SkAddrMcAdd */
665
666
667/******************************************************************************
668 *
669 * SkAddrXmacMcAdd - add a multicast address to a port
670 *
671 * Description:
672 * This routine enables reception for a given address on the given port.
673 *
674 * Notes:
675 * The return code is only valid for SK_PROM_MODE_NONE.
676 *
677 * The multicast bit is only checked if there are no free exact match
678 * entries.
679 *
680 * Context:
681 * runtime, pageable
682 * may be called after SK_INIT_DATA
683 *
684 * Returns:
685 * SK_MC_FILTERING_EXACT
686 * SK_MC_FILTERING_INEXACT
687 * SK_MC_ILLEGAL_ADDRESS
688 * SK_MC_RLMT_OVERFLOW
689 */
690static int SkAddrXmacMcAdd(
691SK_AC *pAC, /* adapter context */
692SK_IOC IoC, /* I/O context */
693SK_U32 PortNumber, /* Port Number */
694SK_MAC_ADDR *pMc, /* multicast address to be added */
695int Flags) /* permanent/non-permanent */
696{
697 int i;
698 SK_U8 Inexact;
699#ifndef SK_ADDR_CHEAT
700 SK_U32 HashBit;
701#endif /* !defined(SK_ADDR_CHEAT) */
702
703 if (Flags & SK_ADDR_PERMANENT) { /* permanent => RLMT */
704#ifdef xDEBUG
705 if (pAC->Addr.Port[PortNumber].NextExactMatchRlmt <
706 SK_ADDR_FIRST_MATCH_RLMT) {
707 Next0[PortNumber] |= 1;
708 return (SK_MC_RLMT_OVERFLOW);
709 }
710#endif /* DEBUG */
711
712 if (pAC->Addr.Port[PortNumber].NextExactMatchRlmt >
713 SK_ADDR_LAST_MATCH_RLMT) {
714 return (SK_MC_RLMT_OVERFLOW);
715 }
716
717 /* Set a RLMT multicast address. */
718
719 pAC->Addr.Port[PortNumber].Exact[
720 pAC->Addr.Port[PortNumber].NextExactMatchRlmt++] = *pMc;
721
722 return (SK_MC_FILTERING_EXACT);
723 }
724
725#ifdef xDEBUG
726 if (pAC->Addr.Port[PortNumber].NextExactMatchDrv <
727 SK_ADDR_FIRST_MATCH_DRV) {
728 Next0[PortNumber] |= 2;
729 return (SK_MC_RLMT_OVERFLOW);
730 }
731#endif /* DEBUG */
732
733 if (pAC->Addr.Port[PortNumber].NextExactMatchDrv <= SK_ADDR_LAST_MATCH_DRV) {
734
735 /* Set exact match entry. */
736 pAC->Addr.Port[PortNumber].Exact[
737 pAC->Addr.Port[PortNumber].NextExactMatchDrv++] = *pMc;
738
739 /* Clear InexactFilter */
740 for (i = 0; i < 8; i++) {
741 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0;
742 }
743 }
744 else {
745 if (!(pMc->a[0] & SK_MC_BIT)) {
746 /* Hashing only possible with multicast addresses */
747 return (SK_MC_ILLEGAL_ADDRESS);
748 }
749#ifndef SK_ADDR_CHEAT
750 /* Compute hash value of address. */
751 HashBit = 63 - SkXmacMcHash(&pMc->a[0]);
752
753 /* Add bit to InexactFilter. */
754 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[HashBit / 8] |=
755 1 << (HashBit % 8);
756#else /* SK_ADDR_CHEAT */
757 /* Set all bits in InexactFilter. */
758 for (i = 0; i < 8; i++) {
759 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0xFF;
760 }
761#endif /* SK_ADDR_CHEAT */
762 }
763
764 for (Inexact = 0, i = 0; i < 8; i++) {
765 Inexact |= pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i];
766 }
767
768 if (Inexact == 0 && pAC->Addr.Port[PortNumber].PromMode == 0) {
769 return (SK_MC_FILTERING_EXACT);
770 }
771 else {
772 return (SK_MC_FILTERING_INEXACT);
773 }
774
775} /* SkAddrXmacMcAdd */
776
777
778/******************************************************************************
779 *
780 * SkAddrGmacMcAdd - add a multicast address to a port
781 *
782 * Description:
783 * This routine enables reception for a given address on the given port.
784 *
785 * Notes:
786 * The return code is only valid for SK_PROM_MODE_NONE.
787 *
788 * Context:
789 * runtime, pageable
790 * may be called after SK_INIT_DATA
791 *
792 * Returns:
793 * SK_MC_FILTERING_INEXACT
794 * SK_MC_ILLEGAL_ADDRESS
795 */
796static int SkAddrGmacMcAdd(
797SK_AC *pAC, /* adapter context */
798SK_IOC IoC, /* I/O context */
799SK_U32 PortNumber, /* Port Number */
800SK_MAC_ADDR *pMc, /* multicast address to be added */
801int Flags) /* permanent/non-permanent */
802{
803 int i;
804#ifndef SK_ADDR_CHEAT
805 SK_U32 HashBit;
806#endif /* !defined(SK_ADDR_CHEAT) */
807
808 if (!(pMc->a[0] & SK_MC_BIT)) {
809 /* Hashing only possible with multicast addresses */
810 return (SK_MC_ILLEGAL_ADDRESS);
811 }
812
813#ifndef SK_ADDR_CHEAT
814
815 /* Compute hash value of address. */
816 HashBit = SkGmacMcHash(&pMc->a[0]);
817
818 if (Flags & SK_ADDR_PERMANENT) { /* permanent => RLMT */
819
820 /* Add bit to InexactRlmtFilter. */
821 pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[HashBit / 8] |=
822 1 << (HashBit % 8);
823
824 /* Copy bit to InexactFilter. */
825 for (i = 0; i < 8; i++) {
826 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] |=
827 pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[i];
828 }
829#ifdef DEBUG
830 SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
831 ("GMAC InexactRlmtFilter: %02X %02X %02X %02X %02X %02X %02X %02X\n",
832 pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[0],
833 pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[1],
834 pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[2],
835 pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[3],
836 pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[4],
837 pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[5],
838 pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[6],
839 pAC->Addr.Port[PortNumber].InexactRlmtFilter.Bytes[7]))
840#endif /* DEBUG */
841 }
842 else { /* not permanent => DRV */
843
844 /* Add bit to InexactDrvFilter. */
845 pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[HashBit / 8] |=
846 1 << (HashBit % 8);
847
848 /* Copy bit to InexactFilter. */
849 for (i = 0; i < 8; i++) {
850 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] |=
851 pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[i];
852 }
853#ifdef DEBUG
854 SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
855 ("GMAC InexactDrvFilter: %02X %02X %02X %02X %02X %02X %02X %02X\n",
856 pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[0],
857 pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[1],
858 pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[2],
859 pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[3],
860 pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[4],
861 pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[5],
862 pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[6],
863 pAC->Addr.Port[PortNumber].InexactDrvFilter.Bytes[7]))
864#endif /* DEBUG */
865 }
866
867#else /* SK_ADDR_CHEAT */
868
869 /* Set all bits in InexactFilter. */
870 for (i = 0; i < 8; i++) {
871 pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i] = 0xFF;
872 }
873#endif /* SK_ADDR_CHEAT */
874
875 return (SK_MC_FILTERING_INEXACT);
876
877} /* SkAddrGmacMcAdd */
878
879#endif /* !SK_SLIM */
880
881/******************************************************************************
882 *
883 * SkAddrMcUpdate - update the HW MC address table and set the MAC address
884 *
885 * Description:
886 * This routine enables reception of the addresses contained in a local
887 * table for a given port.
888 * It also programs the port's current physical MAC address.
889 *
890 * It calls either SkAddrXmacMcUpdate or SkAddrGmacMcUpdate, according
891 * to the adapter in use. The real work is done there.
892 *
893 * Notes:
894 * The return code is only valid for SK_PROM_MODE_NONE.
895 *
896 * Context:
897 * runtime, pageable
898 * may be called after SK_INIT_IO
899 *
900 * Returns:
901 * SK_MC_FILTERING_EXACT
902 * SK_MC_FILTERING_INEXACT
903 * SK_ADDR_ILLEGAL_PORT
904 */
905int SkAddrMcUpdate(
906SK_AC *pAC, /* adapter context */
907SK_IOC IoC, /* I/O context */
908SK_U32 PortNumber) /* Port Number */
909{
910 int ReturnCode = 0;
911#if (!defined(SK_SLIM) || defined(DEBUG))
912 if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) {
913 return (SK_ADDR_ILLEGAL_PORT);
914 }
915#endif /* !SK_SLIM || DEBUG */
916
917#ifdef GENESIS
918 if (pAC->GIni.GIGenesis) {
919 ReturnCode = SkAddrXmacMcUpdate(pAC, IoC, PortNumber);
920 }
921#endif /* GENESIS */
922#ifdef YUKON
923 if (!pAC->GIni.GIGenesis) {
924 ReturnCode = SkAddrGmacMcUpdate(pAC, IoC, PortNumber);
925 }
926#endif /* YUKON */
927 return (ReturnCode);
928
929} /* SkAddrMcUpdate */
930
931
932#ifdef GENESIS
933
934/******************************************************************************
935 *
936 * SkAddrXmacMcUpdate - update the HW MC address table and set the MAC address
937 *
938 * Description:
939 * This routine enables reception of the addresses contained in a local
940 * table for a given port.
941 * It also programs the port's current physical MAC address.
942 *
943 * Notes:
944 * The return code is only valid for SK_PROM_MODE_NONE.
945 *
946 * Context:
947 * runtime, pageable
948 * may be called after SK_INIT_IO
949 *
950 * Returns:
951 * SK_MC_FILTERING_EXACT
952 * SK_MC_FILTERING_INEXACT
953 * SK_ADDR_ILLEGAL_PORT
954 */
955static int SkAddrXmacMcUpdate(
956SK_AC *pAC, /* adapter context */
957SK_IOC IoC, /* I/O context */
958SK_U32 PortNumber) /* Port Number */
959{
960 SK_U32 i;
961 SK_U8 Inexact;
962 SK_U16 *OutAddr;
963 SK_ADDR_PORT *pAPort;
964
965 SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
966 ("SkAddrXmacMcUpdate on Port %u.\n", PortNumber))
967
968 pAPort = &pAC->Addr.Port[PortNumber];
969
970#ifdef DEBUG
971 SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
972 ("Next0 on Port %d: %d\n", PortNumber, Next0[PortNumber]))
973#endif /* DEBUG */
974
975 /* Start with 0 to also program the logical MAC address. */
976 for (i = 0; i < pAPort->NextExactMatchRlmt; i++) {
977 /* Set exact match address i on XMAC */
978 OutAddr = (SK_U16 *) &pAPort->Exact[i].a[0];
979 XM_OUTADDR(IoC, PortNumber, XM_EXM(i), OutAddr);
980 }
981
982 /* Clear other permanent exact match addresses on XMAC */
983 if (pAPort->NextExactMatchRlmt <= SK_ADDR_LAST_MATCH_RLMT) {
984
985 SkXmClrExactAddr(pAC, IoC, PortNumber, pAPort->NextExactMatchRlmt,
986 SK_ADDR_LAST_MATCH_RLMT);
987 }
988
989 for (i = pAPort->FirstExactMatchDrv; i < pAPort->NextExactMatchDrv; i++) {
990 OutAddr = (SK_U16 *) &pAPort->Exact[i].a[0];
991 XM_OUTADDR(IoC, PortNumber, XM_EXM(i), OutAddr);
992 }
993
994 /* Clear other non-permanent exact match addresses on XMAC */
995 if (pAPort->NextExactMatchDrv <= SK_ADDR_LAST_MATCH_DRV) {
996
997 SkXmClrExactAddr(pAC, IoC, PortNumber, pAPort->NextExactMatchDrv,
998 SK_ADDR_LAST_MATCH_DRV);
999 }
1000
1001 for (Inexact = 0, i = 0; i < 8; i++) {
1002 Inexact |= pAPort->InexactFilter.Bytes[i];
1003 }
1004
1005 if (pAPort->PromMode & SK_PROM_MODE_ALL_MC) {
1006
1007 /* Set all bits in 64-bit hash register. */
1008 XM_OUTHASH(IoC, PortNumber, XM_HSM, &OnesHash);
1009
1010 /* Enable Hashing */
1011 SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
1012 }
1013 else if (Inexact != 0) {
1014
1015 /* Set 64-bit hash register to InexactFilter. */
1016 XM_OUTHASH(IoC, PortNumber, XM_HSM, &pAPort->InexactFilter.Bytes[0]);
1017
1018 /* Enable Hashing */
1019 SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
1020 }
1021 else {
1022 /* Disable Hashing */
1023 SkMacHashing(pAC, IoC, (int) PortNumber, SK_FALSE);
1024 }
1025
1026 if (pAPort->PromMode != SK_PROM_MODE_NONE) {
1027 (void) SkAddrXmacPromiscuousChange(pAC, IoC, PortNumber, pAPort->PromMode);
1028 }
1029
1030 /* Set port's current physical MAC address. */
1031 OutAddr = (SK_U16 *) &pAPort->CurrentMacAddress.a[0];
1032
1033 XM_OUTADDR(IoC, PortNumber, XM_SA, OutAddr);
1034
1035#ifdef xDEBUG
1036 for (i = 0; i < pAPort->NextExactMatchRlmt; i++) {
1037 SK_U8 InAddr8[6];
1038 SK_U16 *InAddr;
1039
1040 /* Get exact match address i from port PortNumber. */
1041 InAddr = (SK_U16 *) &InAddr8[0];
1042
1043 XM_INADDR(IoC, PortNumber, XM_EXM(i), InAddr);
1044
1045 SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
1046 ("SkAddrXmacMcUpdate: MC address %d on Port %u: ",
1047 "%02x %02x %02x %02x %02x %02x -- %02x %02x %02x %02x %02x %02x\n",
1048 i,
1049 PortNumber,
1050 InAddr8[0],
1051 InAddr8[1],
1052 InAddr8[2],
1053 InAddr8[3],
1054 InAddr8[4],
1055 InAddr8[5],
1056 pAPort->Exact[i].a[0],
1057 pAPort->Exact[i].a[1],
1058 pAPort->Exact[i].a[2],
1059 pAPort->Exact[i].a[3],
1060 pAPort->Exact[i].a[4],
1061 pAPort->Exact[i].a[5]))
1062 }
1063#endif /* DEBUG */
1064
1065 /* Determine return value. */
1066 if (Inexact == 0 && pAPort->PromMode == 0) {
1067 return (SK_MC_FILTERING_EXACT);
1068 }
1069 else {
1070 return (SK_MC_FILTERING_INEXACT);
1071 }
1072
1073} /* SkAddrXmacMcUpdate */
1074
1075#endif /* GENESIS */
1076
1077#ifdef YUKON
1078
1079/******************************************************************************
1080 *
1081 * SkAddrGmacMcUpdate - update the HW MC address table and set the MAC address
1082 *
1083 * Description:
1084 * This routine enables reception of the addresses contained in a local
1085 * table for a given port.
1086 * It also programs the port's current physical MAC address.
1087 *
1088 * Notes:
1089 * The return code is only valid for SK_PROM_MODE_NONE.
1090 *
1091 * Context:
1092 * runtime, pageable
1093 * may be called after SK_INIT_IO
1094 *
1095 * Returns:
1096 * SK_MC_FILTERING_EXACT
1097 * SK_MC_FILTERING_INEXACT
1098 * SK_ADDR_ILLEGAL_PORT
1099 */
1100static int SkAddrGmacMcUpdate(
1101SK_AC *pAC, /* adapter context */
1102SK_IOC IoC, /* I/O context */
1103SK_U32 PortNumber) /* Port Number */
1104{
1105#ifndef SK_SLIM
1106 SK_U32 i;
1107 SK_U8 Inexact;
1108#endif /* not SK_SLIM */
1109 SK_U16 *OutAddr;
1110 SK_ADDR_PORT *pAPort;
1111
1112 SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
1113 ("SkAddrGmacMcUpdate on Port %u.\n", PortNumber))
1114
1115 pAPort = &pAC->Addr.Port[PortNumber];
1116
1117#ifdef DEBUG
1118 SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
1119 ("Next0 on Port %d: %d\n", PortNumber, Next0[PortNumber]))
1120#endif /* DEBUG */
1121
1122#ifndef SK_SLIM
1123 for (Inexact = 0, i = 0; i < 8; i++) {
1124 Inexact |= pAPort->InexactFilter.Bytes[i];
1125 }
1126
1127 /* Set 64-bit hash register to InexactFilter. */
1128 GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1,
1129 &pAPort->InexactFilter.Bytes[0]);
1130
1131 if (pAPort->PromMode & SK_PROM_MODE_ALL_MC) {
1132
1133 /* Set all bits in 64-bit hash register. */
1134 GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, &OnesHash);
1135
1136 /* Enable Hashing */
1137 SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
1138 }
1139 else {
1140 /* Enable Hashing. */
1141 SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
1142 }
1143
1144 if (pAPort->PromMode != SK_PROM_MODE_NONE) {
1145 (void) SkAddrGmacPromiscuousChange(pAC, IoC, PortNumber, pAPort->PromMode);
1146 }
1147#else /* SK_SLIM */
1148
1149 /* Set all bits in 64-bit hash register. */
1150 GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, &OnesHash);
1151
1152 /* Enable Hashing */
1153 SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
1154
1155 (void) SkAddrGmacPromiscuousChange(pAC, IoC, PortNumber, pAPort->PromMode);
1156
1157#endif /* SK_SLIM */
1158
1159 /* Set port's current physical MAC address. */
1160 OutAddr = (SK_U16 *) &pAPort->CurrentMacAddress.a[0];
1161 GM_OUTADDR(IoC, PortNumber, GM_SRC_ADDR_1L, OutAddr);
1162
1163 /* Set port's current logical MAC address. */
1164 OutAddr = (SK_U16 *) &pAPort->Exact[0].a[0];
1165 GM_OUTADDR(IoC, PortNumber, GM_SRC_ADDR_2L, OutAddr);
1166
1167#ifdef DEBUG
1168 SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
1169 ("SkAddrGmacMcUpdate: Permanent Physical MAC Address: %02X %02X %02X %02X %02X %02X\n",
1170 pAPort->Exact[0].a[0],
1171 pAPort->Exact[0].a[1],
1172 pAPort->Exact[0].a[2],
1173 pAPort->Exact[0].a[3],
1174 pAPort->Exact[0].a[4],
1175 pAPort->Exact[0].a[5]))
1176
1177 SK_DBG_MSG(pAC, SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
1178 ("SkAddrGmacMcUpdate: Physical MAC Address: %02X %02X %02X %02X %02X %02X\n",
1179 pAPort->CurrentMacAddress.a[0],
1180 pAPort->CurrentMacAddress.a[1],
1181 pAPort->CurrentMacAddress.a[2],
1182 pAPort->CurrentMacAddress.a[3],
1183 pAPort->CurrentMacAddress.a[4],
1184 pAPort->CurrentMacAddress.a[5]))
1185#endif /* DEBUG */
1186
1187#ifndef SK_SLIM
1188 /* Determine return value. */
1189 if (Inexact == 0 && pAPort->PromMode == 0) {
1190 return (SK_MC_FILTERING_EXACT);
1191 }
1192 else {
1193 return (SK_MC_FILTERING_INEXACT);
1194 }
1195#else /* SK_SLIM */
1196 return (SK_MC_FILTERING_INEXACT);
1197#endif /* SK_SLIM */
1198
1199} /* SkAddrGmacMcUpdate */
1200
1201#endif /* YUKON */
1202
1203#ifndef SK_NO_MAO
1204
1205/******************************************************************************
1206 *
1207 * SkAddrOverride - override a port's MAC address
1208 *
1209 * Description:
1210 * This routine overrides the MAC address of one port.
1211 *
1212 * Context:
1213 * runtime, pageable
1214 * may be called after SK_INIT_IO
1215 *
1216 * Returns:
1217 * SK_ADDR_SUCCESS if successful.
1218 * SK_ADDR_DUPLICATE_ADDRESS if duplicate MAC address.
1219 * SK_ADDR_MULTICAST_ADDRESS if multicast or broadcast address.
1220 * SK_ADDR_TOO_EARLY if SK_INIT_IO was not executed before.
1221 */
1222int SkAddrOverride(
1223SK_AC *pAC, /* adapter context */
1224SK_IOC IoC, /* I/O context */
1225SK_U32 PortNumber, /* Port Number */
1226SK_MAC_ADDR SK_FAR *pNewAddr, /* new MAC address */
1227int Flags) /* logical/physical MAC address */
1228{
1229#ifndef SK_NO_RLMT
1230 SK_EVPARA Para;
1231#endif /* !SK_NO_RLMT */
1232 SK_U32 NetNumber;
1233 SK_U32 i;
1234 SK_U16 SK_FAR *OutAddr;
1235
1236#ifndef SK_NO_RLMT
1237 NetNumber = pAC->Rlmt.Port[PortNumber].Net->NetNumber;
1238#else
1239 NetNumber = 0;
1240#endif /* SK_NO_RLMT */
1241#if (!defined(SK_SLIM) || defined(DEBUG))
1242 if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) {
1243 return (SK_ADDR_ILLEGAL_PORT);
1244 }
1245#endif /* !SK_SLIM || DEBUG */
1246 if (pNewAddr != NULL && (pNewAddr->a[0] & SK_MC_BIT) != 0) {
1247 return (SK_ADDR_MULTICAST_ADDRESS);
1248 }
1249
1250 if (!pAC->Addr.Net[NetNumber].CurrentMacAddressSet) {
1251 return (SK_ADDR_TOO_EARLY);
1252 }
1253
1254 if (Flags & SK_ADDR_SET_LOGICAL) { /* Activate logical MAC address. */
1255 /* Parameter *pNewAddr is ignored. */
1256 for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) {
1257 if (!pAC->Addr.Port[i].CurrentMacAddressSet) {
1258 return (SK_ADDR_TOO_EARLY);
1259 }
1260 }
1261#ifndef SK_NO_RLMT
1262 /* Set PortNumber to number of net's active port. */
1263 PortNumber = pAC->Rlmt.Net[NetNumber].
1264 Port[pAC->Addr.Net[NetNumber].ActivePort]->PortNumber;
1265#endif /* !SK_NO_RLMT */
1266 pAC->Addr.Port[PortNumber].Exact[0] =
1267 pAC->Addr.Net[NetNumber].CurrentMacAddress;
1268
1269 /* Write address to first exact match entry of active port. */
1270 (void) SkAddrMcUpdate(pAC, IoC, PortNumber);
1271 }
1272 else if (Flags & SK_ADDR_CLEAR_LOGICAL) {
1273 /* Deactivate logical MAC address. */
1274 /* Parameter *pNewAddr is ignored. */
1275 for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) {
1276 if (!pAC->Addr.Port[i].CurrentMacAddressSet) {
1277 return (SK_ADDR_TOO_EARLY);
1278 }
1279 }
1280#ifndef SK_NO_RLMT
1281 /* Set PortNumber to number of net's active port. */
1282 PortNumber = pAC->Rlmt.Net[NetNumber].
1283 Port[pAC->Addr.Net[NetNumber].ActivePort]->PortNumber;
1284#endif /* !SK_NO_RLMT */
1285 for (i = 0; i < SK_MAC_ADDR_LEN; i++ ) {
1286 pAC->Addr.Port[PortNumber].Exact[0].a[i] = 0;
1287 }
1288
1289 /* Write address to first exact match entry of active port. */
1290 (void) SkAddrMcUpdate(pAC, IoC, PortNumber);
1291 }
1292 else if (Flags & SK_ADDR_PHYSICAL_ADDRESS) { /* Physical MAC address. */
1293 if (SK_ADDR_EQUAL(pNewAddr->a,
1294 pAC->Addr.Net[NetNumber].CurrentMacAddress.a)) {
1295 return (SK_ADDR_DUPLICATE_ADDRESS);
1296 }
1297
1298 for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) {
1299 if (!pAC->Addr.Port[i].CurrentMacAddressSet) {
1300 return (SK_ADDR_TOO_EARLY);
1301 }
1302
1303 if (SK_ADDR_EQUAL(pNewAddr->a,
1304 pAC->Addr.Port[i].CurrentMacAddress.a)) {
1305 if (i == PortNumber) {
1306 return (SK_ADDR_SUCCESS);
1307 }
1308 else {
1309 return (SK_ADDR_DUPLICATE_ADDRESS);
1310 }
1311 }
1312 }
1313
1314 pAC->Addr.Port[PortNumber].PreviousMacAddress =
1315 pAC->Addr.Port[PortNumber].CurrentMacAddress;
1316 pAC->Addr.Port[PortNumber].CurrentMacAddress = *pNewAddr;
1317
1318 /* Change port's physical MAC address. */
1319 OutAddr = (SK_U16 SK_FAR *) pNewAddr;
1320#ifdef GENESIS
1321 if (pAC->GIni.GIGenesis) {
1322 XM_OUTADDR(IoC, PortNumber, XM_SA, OutAddr);
1323 }
1324#endif /* GENESIS */
1325#ifdef YUKON
1326 if (!pAC->GIni.GIGenesis) {
1327 GM_OUTADDR(IoC, PortNumber, GM_SRC_ADDR_1L, OutAddr);
1328 }
1329#endif /* YUKON */
1330
1331#ifndef SK_NO_RLMT
1332 /* Report address change to RLMT. */
1333 Para.Para32[0] = PortNumber;
1334 Para.Para32[0] = -1;
1335 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_PORT_ADDR, Para);
1336#endif /* !SK_NO_RLMT */
1337 }
1338 else { /* Logical MAC address. */
1339 if (SK_ADDR_EQUAL(pNewAddr->a,
1340 pAC->Addr.Net[NetNumber].CurrentMacAddress.a)) {
1341 return (SK_ADDR_SUCCESS);
1342 }
1343
1344 for (i = 0; i < (SK_U32) pAC->GIni.GIMacsFound; i++) {
1345 if (!pAC->Addr.Port[i].CurrentMacAddressSet) {
1346 return (SK_ADDR_TOO_EARLY);
1347 }
1348
1349 if (SK_ADDR_EQUAL(pNewAddr->a,
1350 pAC->Addr.Port[i].CurrentMacAddress.a)) {
1351 return (SK_ADDR_DUPLICATE_ADDRESS);
1352 }
1353 }
1354
1355 /*
1356 * In case that the physical and the logical MAC addresses are equal
1357 * we must also change the physical MAC address here.
1358 * In this case we have an adapter which initially was programmed with
1359 * two identical MAC addresses.
1360 */
1361 if (SK_ADDR_EQUAL(pAC->Addr.Port[PortNumber].CurrentMacAddress.a,
1362 pAC->Addr.Port[PortNumber].Exact[0].a)) {
1363
1364 pAC->Addr.Port[PortNumber].PreviousMacAddress =
1365 pAC->Addr.Port[PortNumber].CurrentMacAddress;
1366 pAC->Addr.Port[PortNumber].CurrentMacAddress = *pNewAddr;
1367
1368#ifndef SK_NO_RLMT
1369 /* Report address change to RLMT. */
1370 Para.Para32[0] = PortNumber;
1371 Para.Para32[0] = -1;
1372 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_PORT_ADDR, Para);
1373#endif /* !SK_NO_RLMT */
1374 }
1375
1376#ifndef SK_NO_RLMT
1377 /* Set PortNumber to number of net's active port. */
1378 PortNumber = pAC->Rlmt.Net[NetNumber].
1379 Port[pAC->Addr.Net[NetNumber].ActivePort]->PortNumber;
1380#endif /* !SK_NO_RLMT */
1381 pAC->Addr.Net[NetNumber].CurrentMacAddress = *pNewAddr;
1382 pAC->Addr.Port[PortNumber].Exact[0] = *pNewAddr;
1383#ifdef DEBUG
1384 SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
1385 ("SkAddrOverride: Permanent MAC Address: %02X %02X %02X %02X %02X %02X\n",
1386 pAC->Addr.Net[NetNumber].PermanentMacAddress.a[0],
1387 pAC->Addr.Net[NetNumber].PermanentMacAddress.a[1],
1388 pAC->Addr.Net[NetNumber].PermanentMacAddress.a[2],
1389 pAC->Addr.Net[NetNumber].PermanentMacAddress.a[3],
1390 pAC->Addr.Net[NetNumber].PermanentMacAddress.a[4],
1391 pAC->Addr.Net[NetNumber].PermanentMacAddress.a[5]))
1392
1393 SK_DBG_MSG(pAC,SK_DBGMOD_ADDR, SK_DBGCAT_CTRL,
1394 ("SkAddrOverride: New logical MAC Address: %02X %02X %02X %02X %02X %02X\n",
1395 pAC->Addr.Net[NetNumber].CurrentMacAddress.a[0],
1396 pAC->Addr.Net[NetNumber].CurrentMacAddress.a[1],
1397 pAC->Addr.Net[NetNumber].CurrentMacAddress.a[2],
1398 pAC->Addr.Net[NetNumber].CurrentMacAddress.a[3],
1399 pAC->Addr.Net[NetNumber].CurrentMacAddress.a[4],
1400 pAC->Addr.Net[NetNumber].CurrentMacAddress.a[5]))
1401#endif /* DEBUG */
1402
1403 /* Write address to first exact match entry of active port. */
1404 (void) SkAddrMcUpdate(pAC, IoC, PortNumber);
1405 }
1406
1407 return (SK_ADDR_SUCCESS);
1408
1409} /* SkAddrOverride */
1410
1411
1412#endif /* SK_NO_MAO */
1413
1414/******************************************************************************
1415 *
1416 * SkAddrPromiscuousChange - set promiscuous mode for given port
1417 *
1418 * Description:
1419 * This routine manages promiscuous mode:
1420 * - none
1421 * - all LLC frames
1422 * - all MC frames
1423 *
1424 * It calls either SkAddrXmacPromiscuousChange or
1425 * SkAddrGmacPromiscuousChange, according to the adapter in use.
1426 * The real work is done there.
1427 *
1428 * Context:
1429 * runtime, pageable
1430 * may be called after SK_INIT_IO
1431 *
1432 * Returns:
1433 * SK_ADDR_SUCCESS
1434 * SK_ADDR_ILLEGAL_PORT
1435 */
1436int SkAddrPromiscuousChange(
1437SK_AC *pAC, /* adapter context */
1438SK_IOC IoC, /* I/O context */
1439SK_U32 PortNumber, /* port whose promiscuous mode changes */
1440int NewPromMode) /* new promiscuous mode */
1441{
1442 int ReturnCode = 0;
1443#if (!defined(SK_SLIM) || defined(DEBUG))
1444 if (PortNumber >= (SK_U32) pAC->GIni.GIMacsFound) {
1445 return (SK_ADDR_ILLEGAL_PORT);
1446 }
1447#endif /* !SK_SLIM || DEBUG */
1448
1449#ifdef GENESIS
1450 if (pAC->GIni.GIGenesis) {
1451 ReturnCode =
1452 SkAddrXmacPromiscuousChange(pAC, IoC, PortNumber, NewPromMode);
1453 }
1454#endif /* GENESIS */
1455#ifdef YUKON
1456 if (!pAC->GIni.GIGenesis) {
1457 ReturnCode =
1458 SkAddrGmacPromiscuousChange(pAC, IoC, PortNumber, NewPromMode);
1459 }
1460#endif /* YUKON */
1461
1462 return (ReturnCode);
1463
1464} /* SkAddrPromiscuousChange */
1465
1466#ifdef GENESIS
1467
1468/******************************************************************************
1469 *
1470 * SkAddrXmacPromiscuousChange - set promiscuous mode for given port
1471 *
1472 * Description:
1473 * This routine manages promiscuous mode:
1474 * - none
1475 * - all LLC frames
1476 * - all MC frames
1477 *
1478 * Context:
1479 * runtime, pageable
1480 * may be called after SK_INIT_IO
1481 *
1482 * Returns:
1483 * SK_ADDR_SUCCESS
1484 * SK_ADDR_ILLEGAL_PORT
1485 */
1486static int SkAddrXmacPromiscuousChange(
1487SK_AC *pAC, /* adapter context */
1488SK_IOC IoC, /* I/O context */
1489SK_U32 PortNumber, /* port whose promiscuous mode changes */
1490int NewPromMode) /* new promiscuous mode */
1491{
1492 int i;
1493 SK_BOOL InexactModeBit;
1494 SK_U8 Inexact;
1495 SK_U8 HwInexact;
1496 SK_FILTER64 HwInexactFilter;
1497 SK_U16 LoMode; /* Lower 16 bits of XMAC Mode Register. */
1498 int CurPromMode = SK_PROM_MODE_NONE;
1499
1500 /* Read CurPromMode from Hardware. */
1501 XM_IN16(IoC, PortNumber, XM_MODE, &LoMode);
1502
1503 if ((LoMode & XM_MD_ENA_PROM) != 0) {
1504 /* Promiscuous mode! */
1505 CurPromMode |= SK_PROM_MODE_LLC;
1506 }
1507
1508 for (Inexact = 0xFF, i = 0; i < 8; i++) {
1509 Inexact &= pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i];
1510 }
1511 if (Inexact == 0xFF) {
1512 CurPromMode |= (pAC->Addr.Port[PortNumber].PromMode & SK_PROM_MODE_ALL_MC);
1513 }
1514 else {
1515 /* Get InexactModeBit (bit XM_MD_ENA_HASH in mode register) */
1516 XM_IN16(IoC, PortNumber, XM_MODE, &LoMode);
1517
1518 InexactModeBit = (LoMode & XM_MD_ENA_HASH) != 0;
1519
1520 /* Read 64-bit hash register from XMAC */
1521 XM_INHASH(IoC, PortNumber, XM_HSM, &HwInexactFilter.Bytes[0]);
1522
1523 for (HwInexact = 0xFF, i = 0; i < 8; i++) {
1524 HwInexact &= HwInexactFilter.Bytes[i];
1525 }
1526
1527 if (InexactModeBit && (HwInexact == 0xFF)) {
1528 CurPromMode |= SK_PROM_MODE_ALL_MC;
1529 }
1530 }
1531
1532 pAC->Addr.Port[PortNumber].PromMode = NewPromMode;
1533
1534 if (NewPromMode == CurPromMode) {
1535 return (SK_ADDR_SUCCESS);
1536 }
1537
1538 if ((NewPromMode & SK_PROM_MODE_ALL_MC) &&
1539 !(CurPromMode & SK_PROM_MODE_ALL_MC)) { /* All MC. */
1540
1541 /* Set all bits in 64-bit hash register. */
1542 XM_OUTHASH(IoC, PortNumber, XM_HSM, &OnesHash);
1543
1544 /* Enable Hashing */
1545 SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
1546 }
1547 else if ((CurPromMode & SK_PROM_MODE_ALL_MC) &&
1548 !(NewPromMode & SK_PROM_MODE_ALL_MC)) { /* Norm MC. */
1549 for (Inexact = 0, i = 0; i < 8; i++) {
1550 Inexact |= pAC->Addr.Port[PortNumber].InexactFilter.Bytes[i];
1551 }
1552 if (Inexact == 0) {
1553 /* Disable Hashing */
1554 SkMacHashing(pAC, IoC, (int) PortNumber, SK_FALSE);
1555 }
1556 else {
1557 /* Set 64-bit hash register to InexactFilter. */
1558 XM_OUTHASH(IoC, PortNumber, XM_HSM,
1559 &pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0]);
1560
1561 /* Enable Hashing */
1562 SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
1563 }
1564 }
1565
1566 if ((NewPromMode & SK_PROM_MODE_LLC) &&
1567 !(CurPromMode & SK_PROM_MODE_LLC)) { /* Prom. LLC */
1568 /* Set the MAC in Promiscuous Mode */
1569 SkMacPromiscMode(pAC, IoC, (int) PortNumber, SK_TRUE);
1570 }
1571 else if ((CurPromMode & SK_PROM_MODE_LLC) &&
1572 !(NewPromMode & SK_PROM_MODE_LLC)) { /* Norm. LLC. */
1573 /* Clear Promiscuous Mode */
1574 SkMacPromiscMode(pAC, IoC, (int) PortNumber, SK_FALSE);
1575 }
1576
1577 return (SK_ADDR_SUCCESS);
1578
1579} /* SkAddrXmacPromiscuousChange */
1580
1581#endif /* GENESIS */
1582
1583#ifdef YUKON
1584
1585/******************************************************************************
1586 *
1587 * SkAddrGmacPromiscuousChange - set promiscuous mode for given port
1588 *
1589 * Description:
1590 * This routine manages promiscuous mode:
1591 * - none
1592 * - all LLC frames
1593 * - all MC frames
1594 *
1595 * Context:
1596 * runtime, pageable
1597 * may be called after SK_INIT_IO
1598 *
1599 * Returns:
1600 * SK_ADDR_SUCCESS
1601 * SK_ADDR_ILLEGAL_PORT
1602 */
1603static int SkAddrGmacPromiscuousChange(
1604SK_AC *pAC, /* adapter context */
1605SK_IOC IoC, /* I/O context */
1606SK_U32 PortNumber, /* port whose promiscuous mode changes */
1607int NewPromMode) /* new promiscuous mode */
1608{
1609 SK_U16 ReceiveControl; /* GMAC Receive Control Register */
1610 int CurPromMode = SK_PROM_MODE_NONE;
1611
1612 /* Read CurPromMode from Hardware. */
1613 GM_IN16(IoC, PortNumber, GM_RX_CTRL, &ReceiveControl);
1614
1615 if ((ReceiveControl & (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA)) == 0) {
1616 /* Promiscuous mode! */
1617 CurPromMode |= SK_PROM_MODE_LLC;
1618 }
1619
1620 if ((ReceiveControl & GM_RXCR_MCF_ENA) == 0) {
1621 /* All Multicast mode! */
1622 CurPromMode |= (pAC->Addr.Port[PortNumber].PromMode & SK_PROM_MODE_ALL_MC);
1623 }
1624
1625 pAC->Addr.Port[PortNumber].PromMode = NewPromMode;
1626
1627 if (NewPromMode == CurPromMode) {
1628 return (SK_ADDR_SUCCESS);
1629 }
1630
1631 if ((NewPromMode & SK_PROM_MODE_ALL_MC) &&
1632 !(CurPromMode & SK_PROM_MODE_ALL_MC)) { /* All MC */
1633
1634 /* Set all bits in 64-bit hash register. */
1635 GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1, &OnesHash);
1636
1637 /* Enable Hashing */
1638 SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
1639 }
1640
1641 if ((CurPromMode & SK_PROM_MODE_ALL_MC) &&
1642 !(NewPromMode & SK_PROM_MODE_ALL_MC)) { /* Norm. MC */
1643
1644 /* Set 64-bit hash register to InexactFilter. */
1645 GM_OUTHASH(IoC, PortNumber, GM_MC_ADDR_H1,
1646 &pAC->Addr.Port[PortNumber].InexactFilter.Bytes[0]);
1647
1648 /* Enable Hashing. */
1649 SkMacHashing(pAC, IoC, (int) PortNumber, SK_TRUE);
1650 }
1651
1652 if ((NewPromMode & SK_PROM_MODE_LLC) &&
1653 !(CurPromMode & SK_PROM_MODE_LLC)) { /* Prom. LLC */
1654
1655 /* Set the MAC to Promiscuous Mode. */
1656 SkMacPromiscMode(pAC, IoC, (int) PortNumber, SK_TRUE);
1657 }
1658 else if ((CurPromMode & SK_PROM_MODE_LLC) &&
1659 !(NewPromMode & SK_PROM_MODE_LLC)) { /* Norm. LLC */
1660
1661 /* Clear Promiscuous Mode. */
1662 SkMacPromiscMode(pAC, IoC, (int) PortNumber, SK_FALSE);
1663 }
1664
1665 return (SK_ADDR_SUCCESS);
1666
1667} /* SkAddrGmacPromiscuousChange */
1668
1669#endif /* YUKON */
1670
1671#ifndef SK_SLIM
1672
1673/******************************************************************************
1674 *
1675 * SkAddrSwap - swap address info
1676 *
1677 * Description:
1678 * This routine swaps address info of two ports.
1679 *
1680 * Context:
1681 * runtime, pageable
1682 * may be called after SK_INIT_IO
1683 *
1684 * Returns:
1685 * SK_ADDR_SUCCESS
1686 * SK_ADDR_ILLEGAL_PORT
1687 */
1688int SkAddrSwap(
1689SK_AC *pAC, /* adapter context */
1690SK_IOC IoC, /* I/O context */
1691SK_U32 FromPortNumber, /* Port1 Index */
1692SK_U32 ToPortNumber) /* Port2 Index */
1693{
1694 int i;
1695 SK_U8 Byte;
1696 SK_MAC_ADDR MacAddr;
1697 SK_U32 DWord;
1698
1699 if (FromPortNumber >= (SK_U32) pAC->GIni.GIMacsFound) {
1700 return (SK_ADDR_ILLEGAL_PORT);
1701 }
1702
1703 if (ToPortNumber >= (SK_U32) pAC->GIni.GIMacsFound) {
1704 return (SK_ADDR_ILLEGAL_PORT);
1705 }
1706
1707 if (pAC->Rlmt.Port[FromPortNumber].Net != pAC->Rlmt.Port[ToPortNumber].Net) {
1708 return (SK_ADDR_ILLEGAL_PORT);
1709 }
1710
1711 /*
1712 * Swap:
1713 * - Exact Match Entries (GEnesis and Yukon)
1714 * Yukon uses first entry for the logical MAC
1715 * address (stored in the second GMAC register).
1716 * - FirstExactMatchRlmt (GEnesis only)
1717 * - NextExactMatchRlmt (GEnesis only)
1718 * - FirstExactMatchDrv (GEnesis only)
1719 * - NextExactMatchDrv (GEnesis only)
1720 * - 64-bit filter (InexactFilter)
1721 * - Promiscuous Mode
1722 * of ports.
1723 */
1724
1725 for (i = 0; i < SK_ADDR_EXACT_MATCHES; i++) {
1726 MacAddr = pAC->Addr.Port[FromPortNumber].Exact[i];
1727 pAC->Addr.Port[FromPortNumber].Exact[i] =
1728 pAC->Addr.Port[ToPortNumber].Exact[i];
1729 pAC->Addr.Port[ToPortNumber].Exact[i] = MacAddr;
1730 }
1731
1732 for (i = 0; i < 8; i++) {
1733 Byte = pAC->Addr.Port[FromPortNumber].InexactFilter.Bytes[i];
1734 pAC->Addr.Port[FromPortNumber].InexactFilter.Bytes[i] =
1735 pAC->Addr.Port[ToPortNumber].InexactFilter.Bytes[i];
1736 pAC->Addr.Port[ToPortNumber].InexactFilter.Bytes[i] = Byte;
1737 }
1738
1739 i = pAC->Addr.Port[FromPortNumber].PromMode;
1740 pAC->Addr.Port[FromPortNumber].PromMode = pAC->Addr.Port[ToPortNumber].PromMode;
1741 pAC->Addr.Port[ToPortNumber].PromMode = i;
1742
1743 if (pAC->GIni.GIGenesis) {
1744 DWord = pAC->Addr.Port[FromPortNumber].FirstExactMatchRlmt;
1745 pAC->Addr.Port[FromPortNumber].FirstExactMatchRlmt =
1746 pAC->Addr.Port[ToPortNumber].FirstExactMatchRlmt;
1747 pAC->Addr.Port[ToPortNumber].FirstExactMatchRlmt = DWord;
1748
1749 DWord = pAC->Addr.Port[FromPortNumber].NextExactMatchRlmt;
1750 pAC->Addr.Port[FromPortNumber].NextExactMatchRlmt =
1751 pAC->Addr.Port[ToPortNumber].NextExactMatchRlmt;
1752 pAC->Addr.Port[ToPortNumber].NextExactMatchRlmt = DWord;
1753
1754 DWord = pAC->Addr.Port[FromPortNumber].FirstExactMatchDrv;
1755 pAC->Addr.Port[FromPortNumber].FirstExactMatchDrv =
1756 pAC->Addr.Port[ToPortNumber].FirstExactMatchDrv;
1757 pAC->Addr.Port[ToPortNumber].FirstExactMatchDrv = DWord;
1758
1759 DWord = pAC->Addr.Port[FromPortNumber].NextExactMatchDrv;
1760 pAC->Addr.Port[FromPortNumber].NextExactMatchDrv =
1761 pAC->Addr.Port[ToPortNumber].NextExactMatchDrv;
1762 pAC->Addr.Port[ToPortNumber].NextExactMatchDrv = DWord;
1763 }
1764
1765 /* CAUTION: Solution works if only ports of one adapter are in use. */
1766 for (i = 0; (SK_U32) i < pAC->Rlmt.Net[pAC->Rlmt.Port[ToPortNumber].
1767 Net->NetNumber].NumPorts; i++) {
1768 if (pAC->Rlmt.Net[pAC->Rlmt.Port[ToPortNumber].Net->NetNumber].
1769 Port[i]->PortNumber == ToPortNumber) {
1770 pAC->Addr.Net[pAC->Rlmt.Port[ToPortNumber].Net->NetNumber].
1771 ActivePort = i;
1772 /* 20001207 RA: Was "ToPortNumber;". */
1773 }
1774 }
1775
1776 (void) SkAddrMcUpdate(pAC, IoC, FromPortNumber);
1777 (void) SkAddrMcUpdate(pAC, IoC, ToPortNumber);
1778
1779 return (SK_ADDR_SUCCESS);
1780
1781} /* SkAddrSwap */
1782
1783#endif /* !SK_SLIM */
1784
1785#ifdef __cplusplus
1786}
1787#endif /* __cplusplus */
1788
diff --git a/drivers/net/sk98lin/skdim.c b/drivers/net/sk98lin/skdim.c
deleted file mode 100644
index 37ce03fb8de3..000000000000
--- a/drivers/net/sk98lin/skdim.c
+++ /dev/null
@@ -1,742 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skdim.c
4 * Project: GEnesis, PCI Gigabit Ethernet Adapter
5 * Version: $Revision: 1.5 $
6 * Date: $Date: 2003/11/28 12:55:40 $
7 * Purpose: All functions to maintain interrupt moderation
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/******************************************************************************
26 *
27 * Description:
28 *
29 * This module is intended to manage the dynamic interrupt moderation on both
30 * GEnesis and Yukon adapters.
31 *
32 * Include File Hierarchy:
33 *
34 * "skdrv1st.h"
35 * "skdrv2nd.h"
36 *
37 ******************************************************************************/
38
39#ifndef lint
40static const char SysKonnectFileId[] =
41 "@(#) $Id: skdim.c,v 1.5 2003/11/28 12:55:40 rroesler Exp $ (C) SysKonnect.";
42#endif
43
44#define __SKADDR_C
45
46#ifdef __cplusplus
47#error C++ is not yet supported.
48extern "C" {
49#endif
50
51/*******************************************************************************
52**
53** Includes
54**
55*******************************************************************************/
56
57#ifndef __INC_SKDRV1ST_H
58#include "h/skdrv1st.h"
59#endif
60
61#ifndef __INC_SKDRV2ND_H
62#include "h/skdrv2nd.h"
63#endif
64
65#include <linux/kernel_stat.h>
66
67/*******************************************************************************
68**
69** Defines
70**
71*******************************************************************************/
72
73/*******************************************************************************
74**
75** Typedefs
76**
77*******************************************************************************/
78
79/*******************************************************************************
80**
81** Local function prototypes
82**
83*******************************************************************************/
84
85static unsigned int GetCurrentSystemLoad(SK_AC *pAC);
86static SK_U64 GetIsrCalls(SK_AC *pAC);
87static SK_BOOL IsIntModEnabled(SK_AC *pAC);
88static void SetCurrIntCtr(SK_AC *pAC);
89static void EnableIntMod(SK_AC *pAC);
90static void DisableIntMod(SK_AC *pAC);
91static void ResizeDimTimerDuration(SK_AC *pAC);
92static void DisplaySelectedModerationType(SK_AC *pAC);
93static void DisplaySelectedModerationMask(SK_AC *pAC);
94static void DisplayDescrRatio(SK_AC *pAC);
95
96/*******************************************************************************
97**
98** Global variables
99**
100*******************************************************************************/
101
102/*******************************************************************************
103**
104** Local variables
105**
106*******************************************************************************/
107
108/*******************************************************************************
109**
110** Global functions
111**
112*******************************************************************************/
113
114/*******************************************************************************
115** Function : SkDimModerate
116** Description : Called in every ISR to check if moderation is to be applied
117** or not for the current number of interrupts
118** Programmer : Ralph Roesler
119** Last Modified: 22-mar-03
120** Returns : void (!)
121** Notes : -
122*******************************************************************************/
123
124void
125SkDimModerate(SK_AC *pAC) {
126 unsigned int CurrSysLoad = 0; /* expressed in percent */
127 unsigned int LoadIncrease = 0; /* expressed in percent */
128 SK_U64 ThresholdInts = 0;
129 SK_U64 IsrCallsPerSec = 0;
130
131#define M_DIMINFO pAC->DynIrqModInfo
132
133 if (!IsIntModEnabled(pAC)) {
134 if (M_DIMINFO.IntModTypeSelect == C_INT_MOD_DYNAMIC) {
135 CurrSysLoad = GetCurrentSystemLoad(pAC);
136 if (CurrSysLoad > 75) {
137 /*
138 ** More than 75% total system load! Enable the moderation
139 ** to shield the system against too many interrupts.
140 */
141 EnableIntMod(pAC);
142 } else if (CurrSysLoad > M_DIMINFO.PrevSysLoad) {
143 LoadIncrease = (CurrSysLoad - M_DIMINFO.PrevSysLoad);
144 if (LoadIncrease > ((M_DIMINFO.PrevSysLoad *
145 C_INT_MOD_ENABLE_PERCENTAGE) / 100)) {
146 if (CurrSysLoad > 10) {
147 /*
148 ** More than 50% increase with respect to the
149 ** previous load of the system. Most likely this
150 ** is due to our ISR-proc...
151 */
152 EnableIntMod(pAC);
153 }
154 }
155 } else {
156 /*
157 ** Neither too much system load at all nor too much increase
158 ** with respect to the previous system load. Hence, we can leave
159 ** the ISR-handling like it is without enabling moderation.
160 */
161 }
162 M_DIMINFO.PrevSysLoad = CurrSysLoad;
163 }
164 } else {
165 if (M_DIMINFO.IntModTypeSelect == C_INT_MOD_DYNAMIC) {
166 ThresholdInts = ((M_DIMINFO.MaxModIntsPerSec *
167 C_INT_MOD_DISABLE_PERCENTAGE) / 100);
168 IsrCallsPerSec = GetIsrCalls(pAC);
169 if (IsrCallsPerSec <= ThresholdInts) {
170 /*
171 ** The number of interrupts within the last second is
172 ** lower than the disable_percentage of the desried
173 ** maxrate. Therefore we can disable the moderation.
174 */
175 DisableIntMod(pAC);
176 M_DIMINFO.MaxModIntsPerSec =
177 (M_DIMINFO.MaxModIntsPerSecUpperLimit +
178 M_DIMINFO.MaxModIntsPerSecLowerLimit) / 2;
179 } else {
180 /*
181 ** The number of interrupts per sec is the same as expected.
182 ** Evalulate the descriptor-ratio. If it has changed, a resize
183 ** in the moderation timer might be useful
184 */
185 if (M_DIMINFO.AutoSizing) {
186 ResizeDimTimerDuration(pAC);
187 }
188 }
189 }
190 }
191
192 /*
193 ** Some information to the log...
194 */
195 if (M_DIMINFO.DisplayStats) {
196 DisplaySelectedModerationType(pAC);
197 DisplaySelectedModerationMask(pAC);
198 DisplayDescrRatio(pAC);
199 }
200
201 M_DIMINFO.NbrProcessedDescr = 0;
202 SetCurrIntCtr(pAC);
203}
204
205/*******************************************************************************
206** Function : SkDimStartModerationTimer
207** Description : Starts the audit-timer for the dynamic interrupt moderation
208** Programmer : Ralph Roesler
209** Last Modified: 22-mar-03
210** Returns : void (!)
211** Notes : -
212*******************************************************************************/
213
214void
215SkDimStartModerationTimer(SK_AC *pAC) {
216 SK_EVPARA EventParam; /* Event struct for timer event */
217
218 SK_MEMSET((char *) &EventParam, 0, sizeof(EventParam));
219 EventParam.Para32[0] = SK_DRV_MODERATION_TIMER;
220 SkTimerStart(pAC, pAC->IoBase, &pAC->DynIrqModInfo.ModTimer,
221 SK_DRV_MODERATION_TIMER_LENGTH,
222 SKGE_DRV, SK_DRV_TIMER, EventParam);
223}
224
225/*******************************************************************************
226** Function : SkDimEnableModerationIfNeeded
227** Description : Either enables or disables moderation
228** Programmer : Ralph Roesler
229** Last Modified: 22-mar-03
230** Returns : void (!)
231** Notes : This function is called when a particular adapter is opened
232** There is no Disable function, because when all interrupts
233** might be disable, the moderation timer has no meaning at all
234******************************************************************************/
235
236void
237SkDimEnableModerationIfNeeded(SK_AC *pAC) {
238
239 if (M_DIMINFO.IntModTypeSelect == C_INT_MOD_STATIC) {
240 EnableIntMod(pAC); /* notification print in this function */
241 } else if (M_DIMINFO.IntModTypeSelect == C_INT_MOD_DYNAMIC) {
242 SkDimStartModerationTimer(pAC);
243 if (M_DIMINFO.DisplayStats) {
244 printk("Dynamic moderation has been enabled\n");
245 }
246 } else {
247 if (M_DIMINFO.DisplayStats) {
248 printk("No moderation has been enabled\n");
249 }
250 }
251}
252
253/*******************************************************************************
254** Function : SkDimDisplayModerationSettings
255** Description : Displays the current settings regarding interrupt moderation
256** Programmer : Ralph Roesler
257** Last Modified: 22-mar-03
258** Returns : void (!)
259** Notes : -
260*******************************************************************************/
261
262void
263SkDimDisplayModerationSettings(SK_AC *pAC) {
264 DisplaySelectedModerationType(pAC);
265 DisplaySelectedModerationMask(pAC);
266}
267
268/*******************************************************************************
269**
270** Local functions
271**
272*******************************************************************************/
273
274/*******************************************************************************
275** Function : GetCurrentSystemLoad
276** Description : Retrieves the current system load of the system. This load
277** is evaluated for all processors within the system.
278** Programmer : Ralph Roesler
279** Last Modified: 22-mar-03
280** Returns : unsigned int: load expressed in percentage
281** Notes : The possible range being returned is from 0 up to 100.
282** Whereas 0 means 'no load at all' and 100 'system fully loaded'
283** It is impossible to determine what actually causes the system
284** to be in 100%, but maybe that is due to too much interrupts.
285*******************************************************************************/
286
287static unsigned int
288GetCurrentSystemLoad(SK_AC *pAC) {
289 unsigned long jif = jiffies;
290 unsigned int UserTime = 0;
291 unsigned int SystemTime = 0;
292 unsigned int NiceTime = 0;
293 unsigned int IdleTime = 0;
294 unsigned int TotalTime = 0;
295 unsigned int UsedTime = 0;
296 unsigned int SystemLoad = 0;
297
298 /* unsigned int NbrCpu = 0; */
299
300 /*
301 ** The following lines have been commented out, because
302 ** from kernel 2.5.44 onwards, the kernel-owned structure
303 **
304 ** struct kernel_stat kstat
305 **
306 ** is not marked as an exported symbol in the file
307 **
308 ** kernel/ksyms.c
309 **
310 ** As a consequence, using this driver as KLM is not possible
311 ** and any access of the structure kernel_stat via the
312 ** dedicated macros kstat_cpu(i).cpustat.xxx is to be avoided.
313 **
314 ** The kstat-information might be added again in future
315 ** versions of the 2.5.xx kernel, but for the time being,
316 ** number of interrupts will serve as indication how much
317 ** load we currently have...
318 **
319 ** for (NbrCpu = 0; NbrCpu < num_online_cpus(); NbrCpu++) {
320 ** UserTime = UserTime + kstat_cpu(NbrCpu).cpustat.user;
321 ** NiceTime = NiceTime + kstat_cpu(NbrCpu).cpustat.nice;
322 ** SystemTime = SystemTime + kstat_cpu(NbrCpu).cpustat.system;
323 ** }
324 */
325 SK_U64 ThresholdInts = 0;
326 SK_U64 IsrCallsPerSec = 0;
327
328 ThresholdInts = ((M_DIMINFO.MaxModIntsPerSec *
329 C_INT_MOD_ENABLE_PERCENTAGE) + 100);
330 IsrCallsPerSec = GetIsrCalls(pAC);
331 if (IsrCallsPerSec >= ThresholdInts) {
332 /*
333 ** We do not know how much the real CPU-load is!
334 ** Return 80% as a default in order to activate DIM
335 */
336 SystemLoad = 80;
337 return (SystemLoad);
338 }
339
340 UsedTime = UserTime + NiceTime + SystemTime;
341
342 IdleTime = jif * num_online_cpus() - UsedTime;
343 TotalTime = UsedTime + IdleTime;
344
345 SystemLoad = ( 100 * (UsedTime - M_DIMINFO.PrevUsedTime) ) /
346 (TotalTime - M_DIMINFO.PrevTotalTime);
347
348 if (M_DIMINFO.DisplayStats) {
349 printk("Current system load is: %u\n", SystemLoad);
350 }
351
352 M_DIMINFO.PrevTotalTime = TotalTime;
353 M_DIMINFO.PrevUsedTime = UsedTime;
354
355 return (SystemLoad);
356}
357
358/*******************************************************************************
359** Function : GetIsrCalls
360** Description : Depending on the selected moderation mask, this function will
361** return the number of interrupts handled in the previous time-
362** frame. This evaluated number is based on the current number
363** of interrupts stored in PNMI-context and the previous stored
364** interrupts.
365** Programmer : Ralph Roesler
366** Last Modified: 23-mar-03
367** Returns : int: the number of interrupts being executed in the last
368** timeframe
369** Notes : It makes only sense to call this function, when dynamic
370** interrupt moderation is applied
371*******************************************************************************/
372
373static SK_U64
374GetIsrCalls(SK_AC *pAC) {
375 SK_U64 RxPort0IntDiff = 0;
376 SK_U64 RxPort1IntDiff = 0;
377 SK_U64 TxPort0IntDiff = 0;
378 SK_U64 TxPort1IntDiff = 0;
379
380 if (pAC->DynIrqModInfo.MaskIrqModeration == IRQ_MASK_TX_ONLY) {
381 if (pAC->GIni.GIMacsFound == 2) {
382 TxPort1IntDiff = pAC->Pnmi.Port[1].TxIntrCts -
383 pAC->DynIrqModInfo.PrevPort1TxIntrCts;
384 }
385 TxPort0IntDiff = pAC->Pnmi.Port[0].TxIntrCts -
386 pAC->DynIrqModInfo.PrevPort0TxIntrCts;
387 } else if (pAC->DynIrqModInfo.MaskIrqModeration == IRQ_MASK_RX_ONLY) {
388 if (pAC->GIni.GIMacsFound == 2) {
389 RxPort1IntDiff = pAC->Pnmi.Port[1].RxIntrCts -
390 pAC->DynIrqModInfo.PrevPort1RxIntrCts;
391 }
392 RxPort0IntDiff = pAC->Pnmi.Port[0].RxIntrCts -
393 pAC->DynIrqModInfo.PrevPort0RxIntrCts;
394 } else {
395 if (pAC->GIni.GIMacsFound == 2) {
396 RxPort1IntDiff = pAC->Pnmi.Port[1].RxIntrCts -
397 pAC->DynIrqModInfo.PrevPort1RxIntrCts;
398 TxPort1IntDiff = pAC->Pnmi.Port[1].TxIntrCts -
399 pAC->DynIrqModInfo.PrevPort1TxIntrCts;
400 }
401 RxPort0IntDiff = pAC->Pnmi.Port[0].RxIntrCts -
402 pAC->DynIrqModInfo.PrevPort0RxIntrCts;
403 TxPort0IntDiff = pAC->Pnmi.Port[0].TxIntrCts -
404 pAC->DynIrqModInfo.PrevPort0TxIntrCts;
405 }
406
407 return (RxPort0IntDiff + RxPort1IntDiff + TxPort0IntDiff + TxPort1IntDiff);
408}
409
410/*******************************************************************************
411** Function : GetRxCalls
412** Description : This function will return the number of times a receive inter-
413** rupt was processed. This is needed to evaluate any resizing
414** factor.
415** Programmer : Ralph Roesler
416** Last Modified: 23-mar-03
417** Returns : SK_U64: the number of RX-ints being processed
418** Notes : It makes only sense to call this function, when dynamic
419** interrupt moderation is applied
420*******************************************************************************/
421
422static SK_U64
423GetRxCalls(SK_AC *pAC) {
424 SK_U64 RxPort0IntDiff = 0;
425 SK_U64 RxPort1IntDiff = 0;
426
427 if (pAC->GIni.GIMacsFound == 2) {
428 RxPort1IntDiff = pAC->Pnmi.Port[1].RxIntrCts -
429 pAC->DynIrqModInfo.PrevPort1RxIntrCts;
430 }
431 RxPort0IntDiff = pAC->Pnmi.Port[0].RxIntrCts -
432 pAC->DynIrqModInfo.PrevPort0RxIntrCts;
433
434 return (RxPort0IntDiff + RxPort1IntDiff);
435}
436
437/*******************************************************************************
438** Function : SetCurrIntCtr
439** Description : Will store the current number orf occured interrupts in the
440** adapter context. This is needed to evaluated the number of
441** interrupts within a current timeframe.
442** Programmer : Ralph Roesler
443** Last Modified: 23-mar-03
444** Returns : void (!)
445** Notes : -
446*******************************************************************************/
447
448static void
449SetCurrIntCtr(SK_AC *pAC) {
450 if (pAC->GIni.GIMacsFound == 2) {
451 pAC->DynIrqModInfo.PrevPort1RxIntrCts = pAC->Pnmi.Port[1].RxIntrCts;
452 pAC->DynIrqModInfo.PrevPort1TxIntrCts = pAC->Pnmi.Port[1].TxIntrCts;
453 }
454 pAC->DynIrqModInfo.PrevPort0RxIntrCts = pAC->Pnmi.Port[0].RxIntrCts;
455 pAC->DynIrqModInfo.PrevPort0TxIntrCts = pAC->Pnmi.Port[0].TxIntrCts;
456}
457
458/*******************************************************************************
459** Function : IsIntModEnabled()
460** Description : Retrieves the current value of the interrupts moderation
461** command register. Its content determines whether any
462** moderation is running or not.
463** Programmer : Ralph Roesler
464** Last Modified: 23-mar-03
465** Returns : SK_TRUE : if mod timer running
466** SK_FALSE : if no moderation is being performed
467** Notes : -
468*******************************************************************************/
469
470static SK_BOOL
471IsIntModEnabled(SK_AC *pAC) {
472 unsigned long CtrCmd;
473
474 SK_IN32(pAC->IoBase, B2_IRQM_CTRL, &CtrCmd);
475 if ((CtrCmd & TIM_START) == TIM_START) {
476 return SK_TRUE;
477 } else {
478 return SK_FALSE;
479 }
480}
481
482/*******************************************************************************
483** Function : EnableIntMod()
484** Description : Enables the interrupt moderation using the values stored in
485** in the pAC->DynIntMod data structure
486** Programmer : Ralph Roesler
487** Last Modified: 22-mar-03
488** Returns : -
489** Notes : -
490*******************************************************************************/
491
492static void
493EnableIntMod(SK_AC *pAC) {
494 unsigned long ModBase;
495
496 if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) {
497 ModBase = C_CLK_FREQ_GENESIS / pAC->DynIrqModInfo.MaxModIntsPerSec;
498 } else {
499 ModBase = C_CLK_FREQ_YUKON / pAC->DynIrqModInfo.MaxModIntsPerSec;
500 }
501
502 SK_OUT32(pAC->IoBase, B2_IRQM_INI, ModBase);
503 SK_OUT32(pAC->IoBase, B2_IRQM_MSK, pAC->DynIrqModInfo.MaskIrqModeration);
504 SK_OUT32(pAC->IoBase, B2_IRQM_CTRL, TIM_START);
505 if (M_DIMINFO.DisplayStats) {
506 printk("Enabled interrupt moderation (%i ints/sec)\n",
507 M_DIMINFO.MaxModIntsPerSec);
508 }
509}
510
511/*******************************************************************************
512** Function : DisableIntMod()
513** Description : Disables the interrupt moderation independent of what inter-
514** rupts are running or not
515** Programmer : Ralph Roesler
516** Last Modified: 23-mar-03
517** Returns : -
518** Notes : -
519*******************************************************************************/
520
521static void
522DisableIntMod(SK_AC *pAC) {
523
524 SK_OUT32(pAC->IoBase, B2_IRQM_CTRL, TIM_STOP);
525 if (M_DIMINFO.DisplayStats) {
526 printk("Disabled interrupt moderation\n");
527 }
528}
529
530/*******************************************************************************
531** Function : ResizeDimTimerDuration();
532** Description : Checks the current used descriptor ratio and resizes the
533** duration timer (longer/smaller) if possible.
534** Programmer : Ralph Roesler
535** Last Modified: 23-mar-03
536** Returns : -
537** Notes : There are both maximum and minimum timer duration value.
538** This function assumes that interrupt moderation is already
539** enabled!
540*******************************************************************************/
541
542static void
543ResizeDimTimerDuration(SK_AC *pAC) {
544 SK_BOOL IncreaseTimerDuration;
545 int TotalMaxNbrDescr;
546 int UsedDescrRatio;
547 int RatioDiffAbs;
548 int RatioDiffRel;
549 int NewMaxModIntsPerSec;
550 int ModAdjValue;
551 long ModBase;
552
553 /*
554 ** Check first if we are allowed to perform any modification
555 */
556 if (IsIntModEnabled(pAC)) {
557 if (M_DIMINFO.IntModTypeSelect != C_INT_MOD_DYNAMIC) {
558 return;
559 } else {
560 if (M_DIMINFO.ModJustEnabled) {
561 M_DIMINFO.ModJustEnabled = SK_FALSE;
562 return;
563 }
564 }
565 }
566
567 /*
568 ** If we got until here, we have to evaluate the amount of the
569 ** descriptor ratio change...
570 */
571 TotalMaxNbrDescr = pAC->RxDescrPerRing * GetRxCalls(pAC);
572 UsedDescrRatio = (M_DIMINFO.NbrProcessedDescr * 100) / TotalMaxNbrDescr;
573
574 if (UsedDescrRatio > M_DIMINFO.PrevUsedDescrRatio) {
575 RatioDiffAbs = (UsedDescrRatio - M_DIMINFO.PrevUsedDescrRatio);
576 RatioDiffRel = (RatioDiffAbs * 100) / UsedDescrRatio;
577 M_DIMINFO.PrevUsedDescrRatio = UsedDescrRatio;
578 IncreaseTimerDuration = SK_FALSE; /* in other words: DECREASE */
579 } else if (UsedDescrRatio < M_DIMINFO.PrevUsedDescrRatio) {
580 RatioDiffAbs = (M_DIMINFO.PrevUsedDescrRatio - UsedDescrRatio);
581 RatioDiffRel = (RatioDiffAbs * 100) / M_DIMINFO.PrevUsedDescrRatio;
582 M_DIMINFO.PrevUsedDescrRatio = UsedDescrRatio;
583 IncreaseTimerDuration = SK_TRUE; /* in other words: INCREASE */
584 } else {
585 RatioDiffAbs = (M_DIMINFO.PrevUsedDescrRatio - UsedDescrRatio);
586 RatioDiffRel = (RatioDiffAbs * 100) / M_DIMINFO.PrevUsedDescrRatio;
587 M_DIMINFO.PrevUsedDescrRatio = UsedDescrRatio;
588 IncreaseTimerDuration = SK_TRUE; /* in other words: INCREASE */
589 }
590
591 /*
592 ** Now we can determine the change in percent
593 */
594 if ((RatioDiffRel >= 0) && (RatioDiffRel <= 5) ) {
595 ModAdjValue = 1; /* 1% change - maybe some other value in future */
596 } else if ((RatioDiffRel > 5) && (RatioDiffRel <= 10) ) {
597 ModAdjValue = 1; /* 1% change - maybe some other value in future */
598 } else if ((RatioDiffRel > 10) && (RatioDiffRel <= 15) ) {
599 ModAdjValue = 1; /* 1% change - maybe some other value in future */
600 } else {
601 ModAdjValue = 1; /* 1% change - maybe some other value in future */
602 }
603
604 if (IncreaseTimerDuration) {
605 NewMaxModIntsPerSec = M_DIMINFO.MaxModIntsPerSec +
606 (M_DIMINFO.MaxModIntsPerSec * ModAdjValue) / 100;
607 } else {
608 NewMaxModIntsPerSec = M_DIMINFO.MaxModIntsPerSec -
609 (M_DIMINFO.MaxModIntsPerSec * ModAdjValue) / 100;
610 }
611
612 /*
613 ** Check if we exceed boundaries...
614 */
615 if ( (NewMaxModIntsPerSec > M_DIMINFO.MaxModIntsPerSecUpperLimit) ||
616 (NewMaxModIntsPerSec < M_DIMINFO.MaxModIntsPerSecLowerLimit)) {
617 if (M_DIMINFO.DisplayStats) {
618 printk("Cannot change ModTim from %i to %i ints/sec\n",
619 M_DIMINFO.MaxModIntsPerSec, NewMaxModIntsPerSec);
620 }
621 return;
622 } else {
623 if (M_DIMINFO.DisplayStats) {
624 printk("Resized ModTim from %i to %i ints/sec\n",
625 M_DIMINFO.MaxModIntsPerSec, NewMaxModIntsPerSec);
626 }
627 }
628
629 M_DIMINFO.MaxModIntsPerSec = NewMaxModIntsPerSec;
630
631 if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) {
632 ModBase = C_CLK_FREQ_GENESIS / pAC->DynIrqModInfo.MaxModIntsPerSec;
633 } else {
634 ModBase = C_CLK_FREQ_YUKON / pAC->DynIrqModInfo.MaxModIntsPerSec;
635 }
636
637 /*
638 ** We do not need to touch any other registers
639 */
640 SK_OUT32(pAC->IoBase, B2_IRQM_INI, ModBase);
641}
642
643/*******************************************************************************
644** Function : DisplaySelectedModerationType()
645** Description : Displays what type of moderation we have
646** Programmer : Ralph Roesler
647** Last Modified: 23-mar-03
648** Returns : void!
649** Notes : -
650*******************************************************************************/
651
652static void
653DisplaySelectedModerationType(SK_AC *pAC) {
654
655 if (pAC->DynIrqModInfo.DisplayStats) {
656 if (pAC->DynIrqModInfo.IntModTypeSelect == C_INT_MOD_STATIC) {
657 printk("Static int moderation runs with %i INTS/sec\n",
658 pAC->DynIrqModInfo.MaxModIntsPerSec);
659 } else if (pAC->DynIrqModInfo.IntModTypeSelect == C_INT_MOD_DYNAMIC) {
660 if (IsIntModEnabled(pAC)) {
661 printk("Dynamic int moderation runs with %i INTS/sec\n",
662 pAC->DynIrqModInfo.MaxModIntsPerSec);
663 } else {
664 printk("Dynamic int moderation currently not applied\n");
665 }
666 } else {
667 printk("No interrupt moderation selected!\n");
668 }
669 }
670}
671
672/*******************************************************************************
673** Function : DisplaySelectedModerationMask()
674** Description : Displays what interrupts are moderated
675** Programmer : Ralph Roesler
676** Last Modified: 23-mar-03
677** Returns : void!
678** Notes : -
679*******************************************************************************/
680
681static void
682DisplaySelectedModerationMask(SK_AC *pAC) {
683
684 if (pAC->DynIrqModInfo.DisplayStats) {
685 if (pAC->DynIrqModInfo.IntModTypeSelect != C_INT_MOD_NONE) {
686 switch (pAC->DynIrqModInfo.MaskIrqModeration) {
687 case IRQ_MASK_TX_ONLY:
688 printk("Only Tx-interrupts are moderated\n");
689 break;
690 case IRQ_MASK_RX_ONLY:
691 printk("Only Rx-interrupts are moderated\n");
692 break;
693 case IRQ_MASK_SP_ONLY:
694 printk("Only special-interrupts are moderated\n");
695 break;
696 case IRQ_MASK_TX_RX:
697 printk("Tx- and Rx-interrupts are moderated\n");
698 break;
699 case IRQ_MASK_SP_RX:
700 printk("Special- and Rx-interrupts are moderated\n");
701 break;
702 case IRQ_MASK_SP_TX:
703 printk("Special- and Tx-interrupts are moderated\n");
704 break;
705 case IRQ_MASK_RX_TX_SP:
706 printk("All Rx-, Tx and special-interrupts are moderated\n");
707 break;
708 default:
709 printk("Don't know what is moderated\n");
710 break;
711 }
712 } else {
713 printk("No specific interrupts masked for moderation\n");
714 }
715 }
716}
717
718/*******************************************************************************
719** Function : DisplayDescrRatio
720** Description : Like the name states...
721** Programmer : Ralph Roesler
722** Last Modified: 23-mar-03
723** Returns : void!
724** Notes : -
725*******************************************************************************/
726
727static void
728DisplayDescrRatio(SK_AC *pAC) {
729 int TotalMaxNbrDescr = 0;
730
731 if (pAC->DynIrqModInfo.DisplayStats) {
732 TotalMaxNbrDescr = pAC->RxDescrPerRing * GetRxCalls(pAC);
733 printk("Ratio descriptors: %i/%i\n",
734 M_DIMINFO.NbrProcessedDescr, TotalMaxNbrDescr);
735 }
736}
737
738/*******************************************************************************
739**
740** End of file
741**
742*******************************************************************************/
diff --git a/drivers/net/sk98lin/skethtool.c b/drivers/net/sk98lin/skethtool.c
deleted file mode 100644
index 5a6da8950faa..000000000000
--- a/drivers/net/sk98lin/skethtool.c
+++ /dev/null
@@ -1,627 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skethtool.c
4 * Project: GEnesis, PCI Gigabit Ethernet Adapter
5 * Version: $Revision: 1.7 $
6 * Date: $Date: 2004/09/29 13:32:07 $
7 * Purpose: All functions regarding ethtool handling
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2004 Marvell.
15 *
16 * Driver for Marvell Yukon/2 chipset and SysKonnect Gigabit Ethernet
17 * Server Adapters.
18 *
19 * Author: Ralph Roesler (rroesler@syskonnect.de)
20 * Mirko Lindner (mlindner@syskonnect.de)
21 *
22 * Address all question to: linux@syskonnect.de
23 *
24 * The technical manual for the adapters is available from SysKonnect's
25 * web pages: www.syskonnect.com
26 *
27 * This program is free software; you can redistribute it and/or modify
28 * it under the terms of the GNU General Public License as published by
29 * the Free Software Foundation; either version 2 of the License, or
30 * (at your option) any later version.
31 *
32 * The information in this file is provided "AS IS" without warranty.
33 *
34 *****************************************************************************/
35
36#include "h/skdrv1st.h"
37#include "h/skdrv2nd.h"
38#include "h/skversion.h"
39
40#include <linux/ethtool.h>
41#include <linux/timer.h>
42#include <linux/delay.h>
43
44/******************************************************************************
45 *
46 * Defines
47 *
48 *****************************************************************************/
49
50#define SUPP_COPPER_ALL (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \
51 SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \
52 SUPPORTED_1000baseT_Half| SUPPORTED_1000baseT_Full| \
53 SUPPORTED_TP)
54
55#define ADV_COPPER_ALL (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
56 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
57 ADVERTISED_1000baseT_Half| ADVERTISED_1000baseT_Full| \
58 ADVERTISED_TP)
59
60#define SUPP_FIBRE_ALL (SUPPORTED_1000baseT_Full | \
61 SUPPORTED_FIBRE | \
62 SUPPORTED_Autoneg)
63
64#define ADV_FIBRE_ALL (ADVERTISED_1000baseT_Full | \
65 ADVERTISED_FIBRE | \
66 ADVERTISED_Autoneg)
67
68
69/******************************************************************************
70 *
71 * Local Functions
72 *
73 *****************************************************************************/
74
75/*****************************************************************************
76 *
77 * getSettings - retrieves the current settings of the selected adapter
78 *
79 * Description:
80 * The current configuration of the selected adapter is returned.
81 * This configuration involves a)speed, b)duplex and c)autoneg plus
82 * a number of other variables.
83 *
84 * Returns: always 0
85 *
86 */
87static int getSettings(struct net_device *dev, struct ethtool_cmd *ecmd)
88{
89 const DEV_NET *pNet = netdev_priv(dev);
90 int port = pNet->PortNr;
91 const SK_AC *pAC = pNet->pAC;
92 const SK_GEPORT *pPort = &pAC->GIni.GP[port];
93
94 static int DuplexAutoNegConfMap[9][3]= {
95 { -1 , -1 , -1 },
96 { 0 , -1 , -1 },
97 { SK_LMODE_HALF , DUPLEX_HALF, AUTONEG_DISABLE },
98 { SK_LMODE_FULL , DUPLEX_FULL, AUTONEG_DISABLE },
99 { SK_LMODE_AUTOHALF , DUPLEX_HALF, AUTONEG_ENABLE },
100 { SK_LMODE_AUTOFULL , DUPLEX_FULL, AUTONEG_ENABLE },
101 { SK_LMODE_AUTOBOTH , DUPLEX_FULL, AUTONEG_ENABLE },
102 { SK_LMODE_AUTOSENSE , -1 , -1 },
103 { SK_LMODE_INDETERMINATED, -1 , -1 }
104 };
105 static int SpeedConfMap[6][2] = {
106 { 0 , -1 },
107 { SK_LSPEED_AUTO , -1 },
108 { SK_LSPEED_10MBPS , SPEED_10 },
109 { SK_LSPEED_100MBPS , SPEED_100 },
110 { SK_LSPEED_1000MBPS , SPEED_1000 },
111 { SK_LSPEED_INDETERMINATED, -1 }
112 };
113 static int AdvSpeedMap[6][2] = {
114 { 0 , -1 },
115 { SK_LSPEED_AUTO , -1 },
116 { SK_LSPEED_10MBPS , ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full },
117 { SK_LSPEED_100MBPS , ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full },
118 { SK_LSPEED_1000MBPS , ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full},
119 { SK_LSPEED_INDETERMINATED, -1 }
120 };
121
122 ecmd->phy_address = port;
123 ecmd->speed = SpeedConfMap[pPort->PLinkSpeedUsed][1];
124 ecmd->duplex = DuplexAutoNegConfMap[pPort->PLinkModeStatus][1];
125 ecmd->autoneg = DuplexAutoNegConfMap[pPort->PLinkModeStatus][2];
126 ecmd->transceiver = XCVR_INTERNAL;
127
128 if (pAC->GIni.GICopperType) {
129 ecmd->port = PORT_TP;
130 ecmd->supported = (SUPP_COPPER_ALL|SUPPORTED_Autoneg);
131 if (pAC->GIni.GIGenesis) {
132 ecmd->supported &= ~(SUPPORTED_10baseT_Half);
133 ecmd->supported &= ~(SUPPORTED_10baseT_Full);
134 ecmd->supported &= ~(SUPPORTED_100baseT_Half);
135 ecmd->supported &= ~(SUPPORTED_100baseT_Full);
136 } else {
137 if (pAC->GIni.GIChipId == CHIP_ID_YUKON) {
138 ecmd->supported &= ~(SUPPORTED_1000baseT_Half);
139 }
140#ifdef CHIP_ID_YUKON_FE
141 if (pAC->GIni.GIChipId == CHIP_ID_YUKON_FE) {
142 ecmd->supported &= ~(SUPPORTED_1000baseT_Half);
143 ecmd->supported &= ~(SUPPORTED_1000baseT_Full);
144 }
145#endif
146 }
147 if (pAC->GIni.GP[0].PLinkSpeed != SK_LSPEED_AUTO) {
148 ecmd->advertising = AdvSpeedMap[pPort->PLinkSpeed][1];
149 if (pAC->GIni.GIChipId == CHIP_ID_YUKON) {
150 ecmd->advertising &= ~(SUPPORTED_1000baseT_Half);
151 }
152 } else {
153 ecmd->advertising = ecmd->supported;
154 }
155
156 if (ecmd->autoneg == AUTONEG_ENABLE)
157 ecmd->advertising |= ADVERTISED_Autoneg;
158 } else {
159 ecmd->port = PORT_FIBRE;
160 ecmd->supported = SUPP_FIBRE_ALL;
161 ecmd->advertising = ADV_FIBRE_ALL;
162 }
163 return 0;
164}
165
166/*
167 * MIB infrastructure uses instance value starting at 1
168 * based on board and port.
169 */
170static inline u32 pnmiInstance(const DEV_NET *pNet)
171{
172 return 1 + (pNet->pAC->RlmtNets == 2) + pNet->PortNr;
173}
174
175/*****************************************************************************
176 *
177 * setSettings - configures the settings of a selected adapter
178 *
179 * Description:
180 * Possible settings that may be altered are a)speed, b)duplex or
181 * c)autonegotiation.
182 *
183 * Returns:
184 * 0: everything fine, no error
185 * <0: the return value is the error code of the failure
186 */
187static int setSettings(struct net_device *dev, struct ethtool_cmd *ecmd)
188{
189 DEV_NET *pNet = netdev_priv(dev);
190 SK_AC *pAC = pNet->pAC;
191 u32 instance;
192 char buf[4];
193 int len = 1;
194
195 if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100
196 && ecmd->speed != SPEED_1000)
197 return -EINVAL;
198
199 if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
200 return -EINVAL;
201
202 if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
203 return -EINVAL;
204
205 if (ecmd->autoneg == AUTONEG_DISABLE)
206 *buf = (ecmd->duplex == DUPLEX_FULL)
207 ? SK_LMODE_FULL : SK_LMODE_HALF;
208 else
209 *buf = (ecmd->duplex == DUPLEX_FULL)
210 ? SK_LMODE_AUTOFULL : SK_LMODE_AUTOHALF;
211
212 instance = pnmiInstance(pNet);
213 if (SkPnmiSetVar(pAC, pAC->IoBase, OID_SKGE_LINK_MODE,
214 &buf, &len, instance, pNet->NetNr) != SK_PNMI_ERR_OK)
215 return -EINVAL;
216
217 switch(ecmd->speed) {
218 case SPEED_1000:
219 *buf = SK_LSPEED_1000MBPS;
220 break;
221 case SPEED_100:
222 *buf = SK_LSPEED_100MBPS;
223 break;
224 case SPEED_10:
225 *buf = SK_LSPEED_10MBPS;
226 }
227
228 if (SkPnmiSetVar(pAC, pAC->IoBase, OID_SKGE_SPEED_MODE,
229 &buf, &len, instance, pNet->NetNr) != SK_PNMI_ERR_OK)
230 return -EINVAL;
231
232 return 0;
233}
234
235/*****************************************************************************
236 *
237 * getDriverInfo - returns generic driver and adapter information
238 *
239 * Description:
240 * Generic driver information is returned via this function, such as
241 * the name of the driver, its version and and firmware version.
242 * In addition to this, the location of the selected adapter is
243 * returned as a bus info string (e.g. '01:05.0').
244 *
245 * Returns: N/A
246 *
247 */
248static void getDriverInfo(struct net_device *dev, struct ethtool_drvinfo *info)
249{
250 const DEV_NET *pNet = netdev_priv(dev);
251 const SK_AC *pAC = pNet->pAC;
252 char vers[32];
253
254 snprintf(vers, sizeof(vers)-1, VER_STRING "(v%d.%d)",
255 (pAC->GIni.GIPciHwRev >> 4) & 0xf, pAC->GIni.GIPciHwRev & 0xf);
256
257 strlcpy(info->driver, DRIVER_FILE_NAME, sizeof(info->driver));
258 strcpy(info->version, vers);
259 strcpy(info->fw_version, "N/A");
260 strlcpy(info->bus_info, pci_name(pAC->PciDev), ETHTOOL_BUSINFO_LEN);
261}
262
263/*
264 * Ethtool statistics support.
265 */
266static const char StringsStats[][ETH_GSTRING_LEN] = {
267 "rx_packets", "tx_packets",
268 "rx_bytes", "tx_bytes",
269 "rx_errors", "tx_errors",
270 "rx_dropped", "tx_dropped",
271 "multicasts", "collisions",
272 "rx_length_errors", "rx_buffer_overflow_errors",
273 "rx_crc_errors", "rx_frame_errors",
274 "rx_too_short_errors", "rx_too_long_errors",
275 "rx_carrier_extension_errors", "rx_symbol_errors",
276 "rx_llc_mac_size_errors", "rx_carrier_errors",
277 "rx_jabber_errors", "rx_missed_errors",
278 "tx_abort_collision_errors", "tx_carrier_errors",
279 "tx_buffer_underrun_errors", "tx_heartbeat_errors",
280 "tx_window_errors",
281};
282
283static int getStatsCount(struct net_device *dev)
284{
285 return ARRAY_SIZE(StringsStats);
286}
287
288static void getStrings(struct net_device *dev, u32 stringset, u8 *data)
289{
290 switch(stringset) {
291 case ETH_SS_STATS:
292 memcpy(data, *StringsStats, sizeof(StringsStats));
293 break;
294 }
295}
296
297static void getEthtoolStats(struct net_device *dev,
298 struct ethtool_stats *stats, u64 *data)
299{
300 const DEV_NET *pNet = netdev_priv(dev);
301 const SK_AC *pAC = pNet->pAC;
302 const SK_PNMI_STRUCT_DATA *pPnmiStruct = &pAC->PnmiStruct;
303
304 *data++ = pPnmiStruct->Stat[0].StatRxOkCts;
305 *data++ = pPnmiStruct->Stat[0].StatTxOkCts;
306 *data++ = pPnmiStruct->Stat[0].StatRxOctetsOkCts;
307 *data++ = pPnmiStruct->Stat[0].StatTxOctetsOkCts;
308 *data++ = pPnmiStruct->InErrorsCts;
309 *data++ = pPnmiStruct->Stat[0].StatTxSingleCollisionCts;
310 *data++ = pPnmiStruct->RxNoBufCts;
311 *data++ = pPnmiStruct->TxNoBufCts;
312 *data++ = pPnmiStruct->Stat[0].StatRxMulticastOkCts;
313 *data++ = pPnmiStruct->Stat[0].StatTxSingleCollisionCts;
314 *data++ = pPnmiStruct->Stat[0].StatRxRuntCts;
315 *data++ = pPnmiStruct->Stat[0].StatRxFifoOverflowCts;
316 *data++ = pPnmiStruct->Stat[0].StatRxFcsCts;
317 *data++ = pPnmiStruct->Stat[0].StatRxFramingCts;
318 *data++ = pPnmiStruct->Stat[0].StatRxShortsCts;
319 *data++ = pPnmiStruct->Stat[0].StatRxTooLongCts;
320 *data++ = pPnmiStruct->Stat[0].StatRxCextCts;
321 *data++ = pPnmiStruct->Stat[0].StatRxSymbolCts;
322 *data++ = pPnmiStruct->Stat[0].StatRxIRLengthCts;
323 *data++ = pPnmiStruct->Stat[0].StatRxCarrierCts;
324 *data++ = pPnmiStruct->Stat[0].StatRxJabberCts;
325 *data++ = pPnmiStruct->Stat[0].StatRxMissedCts;
326 *data++ = pAC->stats.tx_aborted_errors;
327 *data++ = pPnmiStruct->Stat[0].StatTxCarrierCts;
328 *data++ = pPnmiStruct->Stat[0].StatTxFifoUnderrunCts;
329 *data++ = pPnmiStruct->Stat[0].StatTxCarrierCts;
330 *data++ = pAC->stats.tx_window_errors;
331}
332
333
334/*****************************************************************************
335 *
336 * toggleLeds - Changes the LED state of an adapter
337 *
338 * Description:
339 * This function changes the current state of all LEDs of an adapter so
340 * that it can be located by a user.
341 *
342 * Returns: N/A
343 *
344 */
345static void toggleLeds(DEV_NET *pNet, int on)
346{
347 SK_AC *pAC = pNet->pAC;
348 int port = pNet->PortNr;
349 void __iomem *io = pAC->IoBase;
350
351 if (pAC->GIni.GIGenesis) {
352 SK_OUT8(io, MR_ADDR(port,LNK_LED_REG),
353 on ? SK_LNK_ON : SK_LNK_OFF);
354 SkGeYellowLED(pAC, io,
355 on ? (LED_ON >> 1) : (LED_OFF >> 1));
356 SkGeXmitLED(pAC, io, MR_ADDR(port,RX_LED_INI),
357 on ? SK_LED_TST : SK_LED_DIS);
358
359 if (pAC->GIni.GP[port].PhyType == SK_PHY_BCOM)
360 SkXmPhyWrite(pAC, io, port, PHY_BCOM_P_EXT_CTRL,
361 on ? PHY_B_PEC_LED_ON : PHY_B_PEC_LED_OFF);
362 else if (pAC->GIni.GP[port].PhyType == SK_PHY_LONE)
363 SkXmPhyWrite(pAC, io, port, PHY_LONE_LED_CFG,
364 on ? 0x0800 : PHY_L_LC_LEDT);
365 else
366 SkGeXmitLED(pAC, io, MR_ADDR(port,TX_LED_INI),
367 on ? SK_LED_TST : SK_LED_DIS);
368 } else {
369 const u16 YukLedOn = (PHY_M_LED_MO_DUP(MO_LED_ON) |
370 PHY_M_LED_MO_10(MO_LED_ON) |
371 PHY_M_LED_MO_100(MO_LED_ON) |
372 PHY_M_LED_MO_1000(MO_LED_ON) |
373 PHY_M_LED_MO_RX(MO_LED_ON));
374 const u16 YukLedOff = (PHY_M_LED_MO_DUP(MO_LED_OFF) |
375 PHY_M_LED_MO_10(MO_LED_OFF) |
376 PHY_M_LED_MO_100(MO_LED_OFF) |
377 PHY_M_LED_MO_1000(MO_LED_OFF) |
378 PHY_M_LED_MO_RX(MO_LED_OFF));
379
380
381 SkGmPhyWrite(pAC,io,port,PHY_MARV_LED_CTRL,0);
382 SkGmPhyWrite(pAC,io,port,PHY_MARV_LED_OVER,
383 on ? YukLedOn : YukLedOff);
384 }
385}
386
387/*****************************************************************************
388 *
389 * skGeBlinkTimer - Changes the LED state of an adapter
390 *
391 * Description:
392 * This function changes the current state of all LEDs of an adapter so
393 * that it can be located by a user. If the requested time interval for
394 * this test has elapsed, this function cleans up everything that was
395 * temporarily setup during the locate NIC test. This involves of course
396 * also closing or opening any adapter so that the initial board state
397 * is recovered.
398 *
399 * Returns: N/A
400 *
401 */
402void SkGeBlinkTimer(unsigned long data)
403{
404 struct net_device *dev = (struct net_device *) data;
405 DEV_NET *pNet = netdev_priv(dev);
406 SK_AC *pAC = pNet->pAC;
407
408 toggleLeds(pNet, pAC->LedsOn);
409
410 pAC->LedsOn = !pAC->LedsOn;
411 mod_timer(&pAC->BlinkTimer, jiffies + HZ/4);
412}
413
414/*****************************************************************************
415 *
416 * locateDevice - start the locate NIC feature of the elected adapter
417 *
418 * Description:
419 * This function is used if the user want to locate a particular NIC.
420 * All LEDs are regularly switched on and off, so the NIC can easily
421 * be identified.
422 *
423 * Returns:
424 * ==0: everything fine, no error, locateNIC test was started
425 * !=0: one locateNIC test runs already
426 *
427 */
428static int locateDevice(struct net_device *dev, u32 data)
429{
430 DEV_NET *pNet = netdev_priv(dev);
431 SK_AC *pAC = pNet->pAC;
432
433 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
434 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
435
436 /* start blinking */
437 pAC->LedsOn = 0;
438 mod_timer(&pAC->BlinkTimer, jiffies);
439 msleep_interruptible(data * 1000);
440 del_timer_sync(&pAC->BlinkTimer);
441 toggleLeds(pNet, 0);
442
443 return 0;
444}
445
446/*****************************************************************************
447 *
448 * getPauseParams - retrieves the pause parameters
449 *
450 * Description:
451 * All current pause parameters of a selected adapter are placed
452 * in the passed ethtool_pauseparam structure and are returned.
453 *
454 * Returns: N/A
455 *
456 */
457static void getPauseParams(struct net_device *dev, struct ethtool_pauseparam *epause)
458{
459 DEV_NET *pNet = netdev_priv(dev);
460 SK_AC *pAC = pNet->pAC;
461 SK_GEPORT *pPort = &pAC->GIni.GP[pNet->PortNr];
462
463 epause->rx_pause = (pPort->PFlowCtrlMode == SK_FLOW_MODE_SYMMETRIC) ||
464 (pPort->PFlowCtrlMode == SK_FLOW_MODE_SYM_OR_REM);
465
466 epause->tx_pause = epause->rx_pause || (pPort->PFlowCtrlMode == SK_FLOW_MODE_LOC_SEND);
467 epause->autoneg = epause->rx_pause || epause->tx_pause;
468}
469
470/*****************************************************************************
471 *
472 * setPauseParams - configures the pause parameters of an adapter
473 *
474 * Description:
475 * This function sets the Rx or Tx pause parameters
476 *
477 * Returns:
478 * ==0: everything fine, no error
479 * !=0: the return value is the error code of the failure
480 */
481static int setPauseParams(struct net_device *dev , struct ethtool_pauseparam *epause)
482{
483 DEV_NET *pNet = netdev_priv(dev);
484 SK_AC *pAC = pNet->pAC;
485 SK_GEPORT *pPort = &pAC->GIni.GP[pNet->PortNr];
486 u32 instance = pnmiInstance(pNet);
487 struct ethtool_pauseparam old;
488 u8 oldspeed = pPort->PLinkSpeedUsed;
489 char buf[4];
490 int len = 1;
491 int ret;
492
493 /*
494 ** we have to determine the current settings to see if
495 ** the operator requested any modification of the flow
496 ** control parameters...
497 */
498 getPauseParams(dev, &old);
499
500 /*
501 ** perform modifications regarding the changes
502 ** requested by the operator
503 */
504 if (epause->autoneg != old.autoneg)
505 *buf = epause->autoneg ? SK_FLOW_MODE_NONE : SK_FLOW_MODE_SYMMETRIC;
506 else {
507 if (epause->rx_pause && epause->tx_pause)
508 *buf = SK_FLOW_MODE_SYMMETRIC;
509 else if (epause->rx_pause && !epause->tx_pause)
510 *buf = SK_FLOW_MODE_SYM_OR_REM;
511 else if (!epause->rx_pause && epause->tx_pause)
512 *buf = SK_FLOW_MODE_LOC_SEND;
513 else
514 *buf = SK_FLOW_MODE_NONE;
515 }
516
517 ret = SkPnmiSetVar(pAC, pAC->IoBase, OID_SKGE_FLOWCTRL_MODE,
518 &buf, &len, instance, pNet->NetNr);
519
520 if (ret != SK_PNMI_ERR_OK) {
521 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_CTRL,
522 ("ethtool (sk98lin): error changing rx/tx pause (%i)\n", ret));
523 goto err;
524 }
525
526 /*
527 ** It may be that autoneg has been disabled! Therefore
528 ** set the speed to the previously used value...
529 */
530 if (!epause->autoneg) {
531 len = 1;
532 ret = SkPnmiSetVar(pAC, pAC->IoBase, OID_SKGE_SPEED_MODE,
533 &oldspeed, &len, instance, pNet->NetNr);
534 if (ret != SK_PNMI_ERR_OK)
535 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_CTRL,
536 ("ethtool (sk98lin): error setting speed (%i)\n", ret));
537 }
538 err:
539 return ret ? -EIO : 0;
540}
541
542/* Only Yukon supports checksum offload. */
543static int setScatterGather(struct net_device *dev, u32 data)
544{
545 DEV_NET *pNet = netdev_priv(dev);
546 SK_AC *pAC = pNet->pAC;
547
548 if (pAC->GIni.GIChipId == CHIP_ID_GENESIS)
549 return -EOPNOTSUPP;
550 return ethtool_op_set_sg(dev, data);
551}
552
553static int setTxCsum(struct net_device *dev, u32 data)
554{
555 DEV_NET *pNet = netdev_priv(dev);
556 SK_AC *pAC = pNet->pAC;
557
558 if (pAC->GIni.GIChipId == CHIP_ID_GENESIS)
559 return -EOPNOTSUPP;
560
561 return ethtool_op_set_tx_csum(dev, data);
562}
563
564static u32 getRxCsum(struct net_device *dev)
565{
566 DEV_NET *pNet = netdev_priv(dev);
567 SK_AC *pAC = pNet->pAC;
568
569 return pAC->RxPort[pNet->PortNr].RxCsum;
570}
571
572static int setRxCsum(struct net_device *dev, u32 data)
573{
574 DEV_NET *pNet = netdev_priv(dev);
575 SK_AC *pAC = pNet->pAC;
576
577 if (pAC->GIni.GIChipId == CHIP_ID_GENESIS)
578 return -EOPNOTSUPP;
579
580 pAC->RxPort[pNet->PortNr].RxCsum = data != 0;
581 return 0;
582}
583
584static int getRegsLen(struct net_device *dev)
585{
586 return 0x4000;
587}
588
589/*
590 * Returns copy of whole control register region
591 * Note: skip RAM address register because accessing it will
592 * cause bus hangs!
593 */
594static void getRegs(struct net_device *dev, struct ethtool_regs *regs,
595 void *p)
596{
597 DEV_NET *pNet = netdev_priv(dev);
598 const void __iomem *io = pNet->pAC->IoBase;
599
600 regs->version = 1;
601 memset(p, 0, regs->len);
602 memcpy_fromio(p, io, B3_RAM_ADDR);
603
604 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
605 regs->len - B3_RI_WTO_R1);
606}
607
608const struct ethtool_ops SkGeEthtoolOps = {
609 .get_settings = getSettings,
610 .set_settings = setSettings,
611 .get_drvinfo = getDriverInfo,
612 .get_strings = getStrings,
613 .get_stats_count = getStatsCount,
614 .get_ethtool_stats = getEthtoolStats,
615 .phys_id = locateDevice,
616 .get_pauseparam = getPauseParams,
617 .set_pauseparam = setPauseParams,
618 .get_link = ethtool_op_get_link,
619 .get_sg = ethtool_op_get_sg,
620 .set_sg = setScatterGather,
621 .get_tx_csum = ethtool_op_get_tx_csum,
622 .set_tx_csum = setTxCsum,
623 .get_rx_csum = getRxCsum,
624 .set_rx_csum = setRxCsum,
625 .get_regs = getRegs,
626 .get_regs_len = getRegsLen,
627};
diff --git a/drivers/net/sk98lin/skge.c b/drivers/net/sk98lin/skge.c
deleted file mode 100644
index 20890e44f99a..000000000000
--- a/drivers/net/sk98lin/skge.c
+++ /dev/null
@@ -1,5218 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skge.c
4 * Project: GEnesis, PCI Gigabit Ethernet Adapter
5 * Version: $Revision: 1.45 $
6 * Date: $Date: 2004/02/12 14:41:02 $
7 * Purpose: The main driver source module
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * Driver for Marvell Yukon chipset and SysKonnect Gigabit Ethernet
17 * Server Adapters.
18 *
19 * Created 10-Feb-1999, based on Linux' acenic.c, 3c59x.c and
20 * SysKonnects GEnesis Solaris driver
21 * Author: Christoph Goos (cgoos@syskonnect.de)
22 * Mirko Lindner (mlindner@syskonnect.de)
23 *
24 * Address all question to: linux@syskonnect.de
25 *
26 * The technical manual for the adapters is available from SysKonnect's
27 * web pages: www.syskonnect.com
28 * Goto "Support" and search Knowledge Base for "manual".
29 *
30 * This program is free software; you can redistribute it and/or modify
31 * it under the terms of the GNU General Public License as published by
32 * the Free Software Foundation; either version 2 of the License, or
33 * (at your option) any later version.
34 *
35 * The information in this file is provided "AS IS" without warranty.
36 *
37 ******************************************************************************/
38
39/******************************************************************************
40 *
41 * Possible compiler options (#define xxx / -Dxxx):
42 *
43 * debugging can be enable by changing SK_DEBUG_CHKMOD and
44 * SK_DEBUG_CHKCAT in makefile (described there).
45 *
46 ******************************************************************************/
47
48/******************************************************************************
49 *
50 * Description:
51 *
52 * This is the main module of the Linux GE driver.
53 *
54 * All source files except skge.c, skdrv1st.h, skdrv2nd.h and sktypes.h
55 * are part of SysKonnect's COMMON MODULES for the SK-98xx adapters.
56 * Those are used for drivers on multiple OS', so some thing may seem
57 * unnecessary complicated on Linux. Please do not try to 'clean up'
58 * them without VERY good reasons, because this will make it more
59 * difficult to keep the Linux driver in synchronisation with the
60 * other versions.
61 *
62 * Include file hierarchy:
63 *
64 * <linux/module.h>
65 *
66 * "h/skdrv1st.h"
67 * <linux/types.h>
68 * <linux/kernel.h>
69 * <linux/string.h>
70 * <linux/errno.h>
71 * <linux/ioport.h>
72 * <linux/slab.h>
73 * <linux/interrupt.h>
74 * <linux/pci.h>
75 * <linux/bitops.h>
76 * <asm/byteorder.h>
77 * <asm/io.h>
78 * <linux/netdevice.h>
79 * <linux/etherdevice.h>
80 * <linux/skbuff.h>
81 * those three depending on kernel version used:
82 * <linux/bios32.h>
83 * <linux/init.h>
84 * <asm/uaccess.h>
85 * <net/checksum.h>
86 *
87 * "h/skerror.h"
88 * "h/skdebug.h"
89 * "h/sktypes.h"
90 * "h/lm80.h"
91 * "h/xmac_ii.h"
92 *
93 * "h/skdrv2nd.h"
94 * "h/skqueue.h"
95 * "h/skgehwt.h"
96 * "h/sktimer.h"
97 * "h/ski2c.h"
98 * "h/skgepnmi.h"
99 * "h/skvpd.h"
100 * "h/skgehw.h"
101 * "h/skgeinit.h"
102 * "h/skaddr.h"
103 * "h/skgesirq.h"
104 * "h/skrlmt.h"
105 *
106 ******************************************************************************/
107
108#include "h/skversion.h"
109
110#include <linux/in.h>
111#include <linux/module.h>
112#include <linux/moduleparam.h>
113#include <linux/init.h>
114#include <linux/dma-mapping.h>
115#include <linux/ip.h>
116#include <linux/mii.h>
117#include <linux/mm.h>
118
119#include "h/skdrv1st.h"
120#include "h/skdrv2nd.h"
121
122/*******************************************************************************
123 *
124 * Defines
125 *
126 ******************************************************************************/
127
128/* for debuging on x86 only */
129/* #define BREAKPOINT() asm(" int $3"); */
130
131/* use the transmit hw checksum driver functionality */
132#define USE_SK_TX_CHECKSUM
133
134/* use the receive hw checksum driver functionality */
135#define USE_SK_RX_CHECKSUM
136
137/* use the scatter-gather functionality with sendfile() */
138#define SK_ZEROCOPY
139
140/* use of a transmit complete interrupt */
141#define USE_TX_COMPLETE
142
143/*
144 * threshold for copying small receive frames
145 * set to 0 to avoid copying, set to 9001 to copy all frames
146 */
147#define SK_COPY_THRESHOLD 50
148
149/* number of adapters that can be configured via command line params */
150#define SK_MAX_CARD_PARAM 16
151
152
153
154/*
155 * use those defines for a compile-in version of the driver instead
156 * of command line parameters
157 */
158// #define LINK_SPEED_A {"Auto", }
159// #define LINK_SPEED_B {"Auto", }
160// #define AUTO_NEG_A {"Sense", }
161// #define AUTO_NEG_B {"Sense", }
162// #define DUP_CAP_A {"Both", }
163// #define DUP_CAP_B {"Both", }
164// #define FLOW_CTRL_A {"SymOrRem", }
165// #define FLOW_CTRL_B {"SymOrRem", }
166// #define ROLE_A {"Auto", }
167// #define ROLE_B {"Auto", }
168// #define PREF_PORT {"A", }
169// #define CON_TYPE {"Auto", }
170// #define RLMT_MODE {"CheckLinkState", }
171
172#define DEV_KFREE_SKB(skb) dev_kfree_skb(skb)
173#define DEV_KFREE_SKB_IRQ(skb) dev_kfree_skb_irq(skb)
174#define DEV_KFREE_SKB_ANY(skb) dev_kfree_skb_any(skb)
175
176
177/* Set blink mode*/
178#define OEM_CONFIG_VALUE ( SK_ACT_LED_BLINK | \
179 SK_DUP_LED_NORMAL | \
180 SK_LED_LINK100_ON)
181
182
183/* Isr return value */
184#define SkIsrRetVar irqreturn_t
185#define SkIsrRetNone IRQ_NONE
186#define SkIsrRetHandled IRQ_HANDLED
187
188
189/*******************************************************************************
190 *
191 * Local Function Prototypes
192 *
193 ******************************************************************************/
194
195static void FreeResources(struct SK_NET_DEVICE *dev);
196static int SkGeBoardInit(struct SK_NET_DEVICE *dev, SK_AC *pAC);
197static SK_BOOL BoardAllocMem(SK_AC *pAC);
198static void BoardFreeMem(SK_AC *pAC);
199static void BoardInitMem(SK_AC *pAC);
200static void SetupRing(SK_AC*, void*, uintptr_t, RXD**, RXD**, RXD**, int*, SK_BOOL);
201static SkIsrRetVar SkGeIsr(int irq, void *dev_id);
202static SkIsrRetVar SkGeIsrOnePort(int irq, void *dev_id);
203static int SkGeOpen(struct SK_NET_DEVICE *dev);
204static int SkGeClose(struct SK_NET_DEVICE *dev);
205static int SkGeXmit(struct sk_buff *skb, struct SK_NET_DEVICE *dev);
206static int SkGeSetMacAddr(struct SK_NET_DEVICE *dev, void *p);
207static void SkGeSetRxMode(struct SK_NET_DEVICE *dev);
208static struct net_device_stats *SkGeStats(struct SK_NET_DEVICE *dev);
209static int SkGeIoctl(struct SK_NET_DEVICE *dev, struct ifreq *rq, int cmd);
210static void GetConfiguration(SK_AC*);
211static int XmitFrame(SK_AC*, TX_PORT*, struct sk_buff*);
212static void FreeTxDescriptors(SK_AC*pAC, TX_PORT*);
213static void FillRxRing(SK_AC*, RX_PORT*);
214static SK_BOOL FillRxDescriptor(SK_AC*, RX_PORT*);
215static void ReceiveIrq(SK_AC*, RX_PORT*, SK_BOOL);
216static void ClearAndStartRx(SK_AC*, int);
217static void ClearTxIrq(SK_AC*, int, int);
218static void ClearRxRing(SK_AC*, RX_PORT*);
219static void ClearTxRing(SK_AC*, TX_PORT*);
220static int SkGeChangeMtu(struct SK_NET_DEVICE *dev, int new_mtu);
221static void PortReInitBmu(SK_AC*, int);
222static int SkGeIocMib(DEV_NET*, unsigned int, int);
223static int SkGeInitPCI(SK_AC *pAC);
224static void StartDrvCleanupTimer(SK_AC *pAC);
225static void StopDrvCleanupTimer(SK_AC *pAC);
226static int XmitFrameSG(SK_AC*, TX_PORT*, struct sk_buff*);
227
228#ifdef SK_DIAG_SUPPORT
229static SK_U32 ParseDeviceNbrFromSlotName(const char *SlotName);
230static int SkDrvInitAdapter(SK_AC *pAC, int devNbr);
231static int SkDrvDeInitAdapter(SK_AC *pAC, int devNbr);
232#endif
233
234/*******************************************************************************
235 *
236 * Extern Function Prototypes
237 *
238 ******************************************************************************/
239extern void SkDimEnableModerationIfNeeded(SK_AC *pAC);
240extern void SkDimDisplayModerationSettings(SK_AC *pAC);
241extern void SkDimStartModerationTimer(SK_AC *pAC);
242extern void SkDimModerate(SK_AC *pAC);
243extern void SkGeBlinkTimer(unsigned long data);
244
245#ifdef DEBUG
246static void DumpMsg(struct sk_buff*, char*);
247static void DumpData(char*, int);
248static void DumpLong(char*, int);
249#endif
250
251/* global variables *********************************************************/
252static SK_BOOL DoPrintInterfaceChange = SK_TRUE;
253extern const struct ethtool_ops SkGeEthtoolOps;
254
255/* local variables **********************************************************/
256static uintptr_t TxQueueAddr[SK_MAX_MACS][2] = {{0x680, 0x600},{0x780, 0x700}};
257static uintptr_t RxQueueAddr[SK_MAX_MACS] = {0x400, 0x480};
258
259/*****************************************************************************
260 *
261 * SkPciWriteCfgDWord - write a 32 bit value to pci config space
262 *
263 * Description:
264 * This routine writes a 32 bit value to the pci configuration
265 * space.
266 *
267 * Returns:
268 * 0 - indicate everything worked ok.
269 * != 0 - error indication
270 */
271static inline int SkPciWriteCfgDWord(
272SK_AC *pAC, /* Adapter Control structure pointer */
273int PciAddr, /* PCI register address */
274SK_U32 Val) /* pointer to store the read value */
275{
276 pci_write_config_dword(pAC->PciDev, PciAddr, Val);
277 return(0);
278} /* SkPciWriteCfgDWord */
279
280/*****************************************************************************
281 *
282 * SkGeInitPCI - Init the PCI resources
283 *
284 * Description:
285 * This function initialize the PCI resources and IO
286 *
287 * Returns:
288 * 0 - indicate everything worked ok.
289 * != 0 - error indication
290 */
291static __devinit int SkGeInitPCI(SK_AC *pAC)
292{
293 struct SK_NET_DEVICE *dev = pAC->dev[0];
294 struct pci_dev *pdev = pAC->PciDev;
295 int retval;
296
297 dev->mem_start = pci_resource_start (pdev, 0);
298 pci_set_master(pdev);
299
300 retval = pci_request_regions(pdev, "sk98lin");
301 if (retval)
302 goto out;
303
304#ifdef SK_BIG_ENDIAN
305 /*
306 * On big endian machines, we use the adapter's aibility of
307 * reading the descriptors as big endian.
308 */
309 {
310 SK_U32 our2;
311 SkPciReadCfgDWord(pAC, PCI_OUR_REG_2, &our2);
312 our2 |= PCI_REV_DESC;
313 SkPciWriteCfgDWord(pAC, PCI_OUR_REG_2, our2);
314 }
315#endif
316
317 /*
318 * Remap the regs into kernel space.
319 */
320 pAC->IoBase = ioremap_nocache(dev->mem_start, 0x4000);
321 if (!pAC->IoBase) {
322 retval = -EIO;
323 goto out_release;
324 }
325
326 return 0;
327
328 out_release:
329 pci_release_regions(pdev);
330 out:
331 return retval;
332}
333
334
335/*****************************************************************************
336 *
337 * FreeResources - release resources allocated for adapter
338 *
339 * Description:
340 * This function releases the IRQ, unmaps the IO and
341 * frees the desriptor ring.
342 *
343 * Returns: N/A
344 *
345 */
346static void FreeResources(struct SK_NET_DEVICE *dev)
347{
348SK_U32 AllocFlag;
349DEV_NET *pNet;
350SK_AC *pAC;
351
352 pNet = netdev_priv(dev);
353 pAC = pNet->pAC;
354 AllocFlag = pAC->AllocFlag;
355 if (pAC->PciDev) {
356 pci_release_regions(pAC->PciDev);
357 }
358 if (AllocFlag & SK_ALLOC_IRQ) {
359 free_irq(dev->irq, dev);
360 }
361 if (pAC->IoBase) {
362 iounmap(pAC->IoBase);
363 }
364 if (pAC->pDescrMem) {
365 BoardFreeMem(pAC);
366 }
367
368} /* FreeResources */
369
370MODULE_AUTHOR("Mirko Lindner <mlindner@syskonnect.de>");
371MODULE_DESCRIPTION("SysKonnect SK-NET Gigabit Ethernet SK-98xx driver");
372MODULE_LICENSE("GPL");
373
374#ifdef LINK_SPEED_A
375static char *Speed_A[SK_MAX_CARD_PARAM] = LINK_SPEED;
376#else
377static char *Speed_A[SK_MAX_CARD_PARAM] = {"", };
378#endif
379
380#ifdef LINK_SPEED_B
381static char *Speed_B[SK_MAX_CARD_PARAM] = LINK_SPEED;
382#else
383static char *Speed_B[SK_MAX_CARD_PARAM] = {"", };
384#endif
385
386#ifdef AUTO_NEG_A
387static char *AutoNeg_A[SK_MAX_CARD_PARAM] = AUTO_NEG_A;
388#else
389static char *AutoNeg_A[SK_MAX_CARD_PARAM] = {"", };
390#endif
391
392#ifdef DUP_CAP_A
393static char *DupCap_A[SK_MAX_CARD_PARAM] = DUP_CAP_A;
394#else
395static char *DupCap_A[SK_MAX_CARD_PARAM] = {"", };
396#endif
397
398#ifdef FLOW_CTRL_A
399static char *FlowCtrl_A[SK_MAX_CARD_PARAM] = FLOW_CTRL_A;
400#else
401static char *FlowCtrl_A[SK_MAX_CARD_PARAM] = {"", };
402#endif
403
404#ifdef ROLE_A
405static char *Role_A[SK_MAX_CARD_PARAM] = ROLE_A;
406#else
407static char *Role_A[SK_MAX_CARD_PARAM] = {"", };
408#endif
409
410#ifdef AUTO_NEG_B
411static char *AutoNeg_B[SK_MAX_CARD_PARAM] = AUTO_NEG_B;
412#else
413static char *AutoNeg_B[SK_MAX_CARD_PARAM] = {"", };
414#endif
415
416#ifdef DUP_CAP_B
417static char *DupCap_B[SK_MAX_CARD_PARAM] = DUP_CAP_B;
418#else
419static char *DupCap_B[SK_MAX_CARD_PARAM] = {"", };
420#endif
421
422#ifdef FLOW_CTRL_B
423static char *FlowCtrl_B[SK_MAX_CARD_PARAM] = FLOW_CTRL_B;
424#else
425static char *FlowCtrl_B[SK_MAX_CARD_PARAM] = {"", };
426#endif
427
428#ifdef ROLE_B
429static char *Role_B[SK_MAX_CARD_PARAM] = ROLE_B;
430#else
431static char *Role_B[SK_MAX_CARD_PARAM] = {"", };
432#endif
433
434#ifdef CON_TYPE
435static char *ConType[SK_MAX_CARD_PARAM] = CON_TYPE;
436#else
437static char *ConType[SK_MAX_CARD_PARAM] = {"", };
438#endif
439
440#ifdef PREF_PORT
441static char *PrefPort[SK_MAX_CARD_PARAM] = PREF_PORT;
442#else
443static char *PrefPort[SK_MAX_CARD_PARAM] = {"", };
444#endif
445
446#ifdef RLMT_MODE
447static char *RlmtMode[SK_MAX_CARD_PARAM] = RLMT_MODE;
448#else
449static char *RlmtMode[SK_MAX_CARD_PARAM] = {"", };
450#endif
451
452static int IntsPerSec[SK_MAX_CARD_PARAM];
453static char *Moderation[SK_MAX_CARD_PARAM];
454static char *ModerationMask[SK_MAX_CARD_PARAM];
455static char *AutoSizing[SK_MAX_CARD_PARAM];
456static char *Stats[SK_MAX_CARD_PARAM];
457
458module_param_array(Speed_A, charp, NULL, 0);
459module_param_array(Speed_B, charp, NULL, 0);
460module_param_array(AutoNeg_A, charp, NULL, 0);
461module_param_array(AutoNeg_B, charp, NULL, 0);
462module_param_array(DupCap_A, charp, NULL, 0);
463module_param_array(DupCap_B, charp, NULL, 0);
464module_param_array(FlowCtrl_A, charp, NULL, 0);
465module_param_array(FlowCtrl_B, charp, NULL, 0);
466module_param_array(Role_A, charp, NULL, 0);
467module_param_array(Role_B, charp, NULL, 0);
468module_param_array(ConType, charp, NULL, 0);
469module_param_array(PrefPort, charp, NULL, 0);
470module_param_array(RlmtMode, charp, NULL, 0);
471/* used for interrupt moderation */
472module_param_array(IntsPerSec, int, NULL, 0);
473module_param_array(Moderation, charp, NULL, 0);
474module_param_array(Stats, charp, NULL, 0);
475module_param_array(ModerationMask, charp, NULL, 0);
476module_param_array(AutoSizing, charp, NULL, 0);
477
478/*****************************************************************************
479 *
480 * SkGeBoardInit - do level 0 and 1 initialization
481 *
482 * Description:
483 * This function prepares the board hardware for running. The desriptor
484 * ring is set up, the IRQ is allocated and the configuration settings
485 * are examined.
486 *
487 * Returns:
488 * 0, if everything is ok
489 * !=0, on error
490 */
491static int __devinit SkGeBoardInit(struct SK_NET_DEVICE *dev, SK_AC *pAC)
492{
493short i;
494unsigned long Flags;
495char *DescrString = "sk98lin: Driver for Linux"; /* this is given to PNMI */
496char *VerStr = VER_STRING;
497int Ret; /* return code of request_irq */
498SK_BOOL DualNet;
499
500 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
501 ("IoBase: %08lX\n", (unsigned long)pAC->IoBase));
502 for (i=0; i<SK_MAX_MACS; i++) {
503 pAC->TxPort[i][0].HwAddr = pAC->IoBase + TxQueueAddr[i][0];
504 pAC->TxPort[i][0].PortIndex = i;
505 pAC->RxPort[i].HwAddr = pAC->IoBase + RxQueueAddr[i];
506 pAC->RxPort[i].PortIndex = i;
507 }
508
509 /* Initialize the mutexes */
510 for (i=0; i<SK_MAX_MACS; i++) {
511 spin_lock_init(&pAC->TxPort[i][0].TxDesRingLock);
512 spin_lock_init(&pAC->RxPort[i].RxDesRingLock);
513 }
514 spin_lock_init(&pAC->SlowPathLock);
515
516 /* setup phy_id blink timer */
517 pAC->BlinkTimer.function = SkGeBlinkTimer;
518 pAC->BlinkTimer.data = (unsigned long) dev;
519 init_timer(&pAC->BlinkTimer);
520
521 /* level 0 init common modules here */
522
523 spin_lock_irqsave(&pAC->SlowPathLock, Flags);
524 /* Does a RESET on board ...*/
525 if (SkGeInit(pAC, pAC->IoBase, SK_INIT_DATA) != 0) {
526 printk("HWInit (0) failed.\n");
527 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
528 return -EIO;
529 }
530 SkI2cInit( pAC, pAC->IoBase, SK_INIT_DATA);
531 SkEventInit(pAC, pAC->IoBase, SK_INIT_DATA);
532 SkPnmiInit( pAC, pAC->IoBase, SK_INIT_DATA);
533 SkAddrInit( pAC, pAC->IoBase, SK_INIT_DATA);
534 SkRlmtInit( pAC, pAC->IoBase, SK_INIT_DATA);
535 SkTimerInit(pAC, pAC->IoBase, SK_INIT_DATA);
536
537 pAC->BoardLevel = SK_INIT_DATA;
538 pAC->RxBufSize = ETH_BUF_SIZE;
539
540 SK_PNMI_SET_DRIVER_DESCR(pAC, DescrString);
541 SK_PNMI_SET_DRIVER_VER(pAC, VerStr);
542
543 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
544
545 /* level 1 init common modules here (HW init) */
546 spin_lock_irqsave(&pAC->SlowPathLock, Flags);
547 if (SkGeInit(pAC, pAC->IoBase, SK_INIT_IO) != 0) {
548 printk("sk98lin: HWInit (1) failed.\n");
549 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
550 return -EIO;
551 }
552 SkI2cInit( pAC, pAC->IoBase, SK_INIT_IO);
553 SkEventInit(pAC, pAC->IoBase, SK_INIT_IO);
554 SkPnmiInit( pAC, pAC->IoBase, SK_INIT_IO);
555 SkAddrInit( pAC, pAC->IoBase, SK_INIT_IO);
556 SkRlmtInit( pAC, pAC->IoBase, SK_INIT_IO);
557 SkTimerInit(pAC, pAC->IoBase, SK_INIT_IO);
558
559 /* Set chipset type support */
560 pAC->ChipsetType = 0;
561 if ((pAC->GIni.GIChipId == CHIP_ID_YUKON) ||
562 (pAC->GIni.GIChipId == CHIP_ID_YUKON_LITE)) {
563 pAC->ChipsetType = 1;
564 }
565
566 GetConfiguration(pAC);
567 if (pAC->RlmtNets == 2) {
568 pAC->GIni.GIPortUsage = SK_MUL_LINK;
569 }
570
571 pAC->BoardLevel = SK_INIT_IO;
572 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
573
574 if (pAC->GIni.GIMacsFound == 2) {
575 Ret = request_irq(dev->irq, SkGeIsr, IRQF_SHARED, "sk98lin", dev);
576 } else if (pAC->GIni.GIMacsFound == 1) {
577 Ret = request_irq(dev->irq, SkGeIsrOnePort, IRQF_SHARED,
578 "sk98lin", dev);
579 } else {
580 printk(KERN_WARNING "sk98lin: Illegal number of ports: %d\n",
581 pAC->GIni.GIMacsFound);
582 return -EIO;
583 }
584
585 if (Ret) {
586 printk(KERN_WARNING "sk98lin: Requested IRQ %d is busy.\n",
587 dev->irq);
588 return Ret;
589 }
590 pAC->AllocFlag |= SK_ALLOC_IRQ;
591
592 /* Alloc memory for this board (Mem for RxD/TxD) : */
593 if(!BoardAllocMem(pAC)) {
594 printk("No memory for descriptor rings.\n");
595 return -ENOMEM;
596 }
597
598 BoardInitMem(pAC);
599 /* tschilling: New common function with minimum size check. */
600 DualNet = SK_FALSE;
601 if (pAC->RlmtNets == 2) {
602 DualNet = SK_TRUE;
603 }
604
605 if (SkGeInitAssignRamToQueues(
606 pAC,
607 pAC->ActivePort,
608 DualNet)) {
609 BoardFreeMem(pAC);
610 printk("sk98lin: SkGeInitAssignRamToQueues failed.\n");
611 return -EIO;
612 }
613
614 return (0);
615} /* SkGeBoardInit */
616
617
618/*****************************************************************************
619 *
620 * BoardAllocMem - allocate the memory for the descriptor rings
621 *
622 * Description:
623 * This function allocates the memory for all descriptor rings.
624 * Each ring is aligned for the desriptor alignment and no ring
625 * has a 4 GByte boundary in it (because the upper 32 bit must
626 * be constant for all descriptiors in one rings).
627 *
628 * Returns:
629 * SK_TRUE, if all memory could be allocated
630 * SK_FALSE, if not
631 */
632static __devinit SK_BOOL BoardAllocMem(SK_AC *pAC)
633{
634caddr_t pDescrMem; /* pointer to descriptor memory area */
635size_t AllocLength; /* length of complete descriptor area */
636int i; /* loop counter */
637unsigned long BusAddr;
638
639
640 /* rings plus one for alignment (do not cross 4 GB boundary) */
641 /* RX_RING_SIZE is assumed bigger than TX_RING_SIZE */
642#if (BITS_PER_LONG == 32)
643 AllocLength = (RX_RING_SIZE + TX_RING_SIZE) * pAC->GIni.GIMacsFound + 8;
644#else
645 AllocLength = (RX_RING_SIZE + TX_RING_SIZE) * pAC->GIni.GIMacsFound
646 + RX_RING_SIZE + 8;
647#endif
648
649 pDescrMem = pci_alloc_consistent(pAC->PciDev, AllocLength,
650 &pAC->pDescrMemDMA);
651
652 if (pDescrMem == NULL) {
653 return (SK_FALSE);
654 }
655 pAC->pDescrMem = pDescrMem;
656 BusAddr = (unsigned long) pAC->pDescrMemDMA;
657
658 /* Descriptors need 8 byte alignment, and this is ensured
659 * by pci_alloc_consistent.
660 */
661 for (i=0; i<pAC->GIni.GIMacsFound; i++) {
662 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS,
663 ("TX%d/A: pDescrMem: %lX, PhysDescrMem: %lX\n",
664 i, (unsigned long) pDescrMem,
665 BusAddr));
666 pAC->TxPort[i][0].pTxDescrRing = pDescrMem;
667 pAC->TxPort[i][0].VTxDescrRing = BusAddr;
668 pDescrMem += TX_RING_SIZE;
669 BusAddr += TX_RING_SIZE;
670
671 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS,
672 ("RX%d: pDescrMem: %lX, PhysDescrMem: %lX\n",
673 i, (unsigned long) pDescrMem,
674 (unsigned long)BusAddr));
675 pAC->RxPort[i].pRxDescrRing = pDescrMem;
676 pAC->RxPort[i].VRxDescrRing = BusAddr;
677 pDescrMem += RX_RING_SIZE;
678 BusAddr += RX_RING_SIZE;
679 } /* for */
680
681 return (SK_TRUE);
682} /* BoardAllocMem */
683
684
685/****************************************************************************
686 *
687 * BoardFreeMem - reverse of BoardAllocMem
688 *
689 * Description:
690 * Free all memory allocated in BoardAllocMem: adapter context,
691 * descriptor rings, locks.
692 *
693 * Returns: N/A
694 */
695static void BoardFreeMem(
696SK_AC *pAC)
697{
698size_t AllocLength; /* length of complete descriptor area */
699
700 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
701 ("BoardFreeMem\n"));
702#if (BITS_PER_LONG == 32)
703 AllocLength = (RX_RING_SIZE + TX_RING_SIZE) * pAC->GIni.GIMacsFound + 8;
704#else
705 AllocLength = (RX_RING_SIZE + TX_RING_SIZE) * pAC->GIni.GIMacsFound
706 + RX_RING_SIZE + 8;
707#endif
708
709 pci_free_consistent(pAC->PciDev, AllocLength,
710 pAC->pDescrMem, pAC->pDescrMemDMA);
711 pAC->pDescrMem = NULL;
712} /* BoardFreeMem */
713
714
715/*****************************************************************************
716 *
717 * BoardInitMem - initiate the descriptor rings
718 *
719 * Description:
720 * This function sets the descriptor rings up in memory.
721 * The adapter is initialized with the descriptor start addresses.
722 *
723 * Returns: N/A
724 */
725static __devinit void BoardInitMem(SK_AC *pAC)
726{
727int i; /* loop counter */
728int RxDescrSize; /* the size of a rx descriptor rounded up to alignment*/
729int TxDescrSize; /* the size of a tx descriptor rounded up to alignment*/
730
731 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
732 ("BoardInitMem\n"));
733
734 RxDescrSize = (((sizeof(RXD) - 1) / DESCR_ALIGN) + 1) * DESCR_ALIGN;
735 pAC->RxDescrPerRing = RX_RING_SIZE / RxDescrSize;
736 TxDescrSize = (((sizeof(TXD) - 1) / DESCR_ALIGN) + 1) * DESCR_ALIGN;
737 pAC->TxDescrPerRing = TX_RING_SIZE / RxDescrSize;
738
739 for (i=0; i<pAC->GIni.GIMacsFound; i++) {
740 SetupRing(
741 pAC,
742 pAC->TxPort[i][0].pTxDescrRing,
743 pAC->TxPort[i][0].VTxDescrRing,
744 (RXD**)&pAC->TxPort[i][0].pTxdRingHead,
745 (RXD**)&pAC->TxPort[i][0].pTxdRingTail,
746 (RXD**)&pAC->TxPort[i][0].pTxdRingPrev,
747 &pAC->TxPort[i][0].TxdRingFree,
748 SK_TRUE);
749 SetupRing(
750 pAC,
751 pAC->RxPort[i].pRxDescrRing,
752 pAC->RxPort[i].VRxDescrRing,
753 &pAC->RxPort[i].pRxdRingHead,
754 &pAC->RxPort[i].pRxdRingTail,
755 &pAC->RxPort[i].pRxdRingPrev,
756 &pAC->RxPort[i].RxdRingFree,
757 SK_FALSE);
758 }
759} /* BoardInitMem */
760
761
762/*****************************************************************************
763 *
764 * SetupRing - create one descriptor ring
765 *
766 * Description:
767 * This function creates one descriptor ring in the given memory area.
768 * The head, tail and number of free descriptors in the ring are set.
769 *
770 * Returns:
771 * none
772 */
773static void SetupRing(
774SK_AC *pAC,
775void *pMemArea, /* a pointer to the memory area for the ring */
776uintptr_t VMemArea, /* the virtual bus address of the memory area */
777RXD **ppRingHead, /* address where the head should be written */
778RXD **ppRingTail, /* address where the tail should be written */
779RXD **ppRingPrev, /* address where the tail should be written */
780int *pRingFree, /* address where the # of free descr. goes */
781SK_BOOL IsTx) /* flag: is this a tx ring */
782{
783int i; /* loop counter */
784int DescrSize; /* the size of a descriptor rounded up to alignment*/
785int DescrNum; /* number of descriptors per ring */
786RXD *pDescr; /* pointer to a descriptor (receive or transmit) */
787RXD *pNextDescr; /* pointer to the next descriptor */
788RXD *pPrevDescr; /* pointer to the previous descriptor */
789uintptr_t VNextDescr; /* the virtual bus address of the next descriptor */
790
791 if (IsTx == SK_TRUE) {
792 DescrSize = (((sizeof(TXD) - 1) / DESCR_ALIGN) + 1) *
793 DESCR_ALIGN;
794 DescrNum = TX_RING_SIZE / DescrSize;
795 } else {
796 DescrSize = (((sizeof(RXD) - 1) / DESCR_ALIGN) + 1) *
797 DESCR_ALIGN;
798 DescrNum = RX_RING_SIZE / DescrSize;
799 }
800
801 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS,
802 ("Descriptor size: %d Descriptor Number: %d\n",
803 DescrSize,DescrNum));
804
805 pDescr = (RXD*) pMemArea;
806 pPrevDescr = NULL;
807 pNextDescr = (RXD*) (((char*)pDescr) + DescrSize);
808 VNextDescr = VMemArea + DescrSize;
809 for(i=0; i<DescrNum; i++) {
810 /* set the pointers right */
811 pDescr->VNextRxd = VNextDescr & 0xffffffffULL;
812 pDescr->pNextRxd = pNextDescr;
813 if (!IsTx) pDescr->TcpSumStarts = ETH_HLEN << 16 | ETH_HLEN;
814
815 /* advance one step */
816 pPrevDescr = pDescr;
817 pDescr = pNextDescr;
818 pNextDescr = (RXD*) (((char*)pDescr) + DescrSize);
819 VNextDescr += DescrSize;
820 }
821 pPrevDescr->pNextRxd = (RXD*) pMemArea;
822 pPrevDescr->VNextRxd = VMemArea;
823 pDescr = (RXD*) pMemArea;
824 *ppRingHead = (RXD*) pMemArea;
825 *ppRingTail = *ppRingHead;
826 *ppRingPrev = pPrevDescr;
827 *pRingFree = DescrNum;
828} /* SetupRing */
829
830
831/*****************************************************************************
832 *
833 * PortReInitBmu - re-initiate the descriptor rings for one port
834 *
835 * Description:
836 * This function reinitializes the descriptor rings of one port
837 * in memory. The port must be stopped before.
838 * The HW is initialized with the descriptor start addresses.
839 *
840 * Returns:
841 * none
842 */
843static void PortReInitBmu(
844SK_AC *pAC, /* pointer to adapter context */
845int PortIndex) /* index of the port for which to re-init */
846{
847 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
848 ("PortReInitBmu "));
849
850 /* set address of first descriptor of ring in BMU */
851 SK_OUT32(pAC->IoBase, TxQueueAddr[PortIndex][TX_PRIO_LOW]+ Q_DA_L,
852 (uint32_t)(((caddr_t)
853 (pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxdRingHead) -
854 pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxDescrRing +
855 pAC->TxPort[PortIndex][TX_PRIO_LOW].VTxDescrRing) &
856 0xFFFFFFFF));
857 SK_OUT32(pAC->IoBase, TxQueueAddr[PortIndex][TX_PRIO_LOW]+ Q_DA_H,
858 (uint32_t)(((caddr_t)
859 (pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxdRingHead) -
860 pAC->TxPort[PortIndex][TX_PRIO_LOW].pTxDescrRing +
861 pAC->TxPort[PortIndex][TX_PRIO_LOW].VTxDescrRing) >> 32));
862 SK_OUT32(pAC->IoBase, RxQueueAddr[PortIndex]+Q_DA_L,
863 (uint32_t)(((caddr_t)(pAC->RxPort[PortIndex].pRxdRingHead) -
864 pAC->RxPort[PortIndex].pRxDescrRing +
865 pAC->RxPort[PortIndex].VRxDescrRing) & 0xFFFFFFFF));
866 SK_OUT32(pAC->IoBase, RxQueueAddr[PortIndex]+Q_DA_H,
867 (uint32_t)(((caddr_t)(pAC->RxPort[PortIndex].pRxdRingHead) -
868 pAC->RxPort[PortIndex].pRxDescrRing +
869 pAC->RxPort[PortIndex].VRxDescrRing) >> 32));
870} /* PortReInitBmu */
871
872
873/****************************************************************************
874 *
875 * SkGeIsr - handle adapter interrupts
876 *
877 * Description:
878 * The interrupt routine is called when the network adapter
879 * generates an interrupt. It may also be called if another device
880 * shares this interrupt vector with the driver.
881 *
882 * Returns: N/A
883 *
884 */
885static SkIsrRetVar SkGeIsr(int irq, void *dev_id)
886{
887struct SK_NET_DEVICE *dev = (struct SK_NET_DEVICE *)dev_id;
888DEV_NET *pNet;
889SK_AC *pAC;
890SK_U32 IntSrc; /* interrupts source register contents */
891
892 pNet = netdev_priv(dev);
893 pAC = pNet->pAC;
894
895 /*
896 * Check and process if its our interrupt
897 */
898 SK_IN32(pAC->IoBase, B0_SP_ISRC, &IntSrc);
899 if (IntSrc == 0) {
900 return SkIsrRetNone;
901 }
902
903 while (((IntSrc & IRQ_MASK) & ~SPECIAL_IRQS) != 0) {
904#if 0 /* software irq currently not used */
905 if (IntSrc & IS_IRQ_SW) {
906 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
907 SK_DBGCAT_DRV_INT_SRC,
908 ("Software IRQ\n"));
909 }
910#endif
911 if (IntSrc & IS_R1_F) {
912 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
913 SK_DBGCAT_DRV_INT_SRC,
914 ("EOF RX1 IRQ\n"));
915 ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE);
916 SK_PNMI_CNT_RX_INTR(pAC, 0);
917 }
918 if (IntSrc & IS_R2_F) {
919 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
920 SK_DBGCAT_DRV_INT_SRC,
921 ("EOF RX2 IRQ\n"));
922 ReceiveIrq(pAC, &pAC->RxPort[1], SK_TRUE);
923 SK_PNMI_CNT_RX_INTR(pAC, 1);
924 }
925#ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */
926 if (IntSrc & IS_XA1_F) {
927 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
928 SK_DBGCAT_DRV_INT_SRC,
929 ("EOF AS TX1 IRQ\n"));
930 SK_PNMI_CNT_TX_INTR(pAC, 0);
931 spin_lock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock);
932 FreeTxDescriptors(pAC, &pAC->TxPort[0][TX_PRIO_LOW]);
933 spin_unlock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock);
934 }
935 if (IntSrc & IS_XA2_F) {
936 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
937 SK_DBGCAT_DRV_INT_SRC,
938 ("EOF AS TX2 IRQ\n"));
939 SK_PNMI_CNT_TX_INTR(pAC, 1);
940 spin_lock(&pAC->TxPort[1][TX_PRIO_LOW].TxDesRingLock);
941 FreeTxDescriptors(pAC, &pAC->TxPort[1][TX_PRIO_LOW]);
942 spin_unlock(&pAC->TxPort[1][TX_PRIO_LOW].TxDesRingLock);
943 }
944#if 0 /* only if sync. queues used */
945 if (IntSrc & IS_XS1_F) {
946 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
947 SK_DBGCAT_DRV_INT_SRC,
948 ("EOF SY TX1 IRQ\n"));
949 SK_PNMI_CNT_TX_INTR(pAC, 1);
950 spin_lock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock);
951 FreeTxDescriptors(pAC, 0, TX_PRIO_HIGH);
952 spin_unlock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock);
953 ClearTxIrq(pAC, 0, TX_PRIO_HIGH);
954 }
955 if (IntSrc & IS_XS2_F) {
956 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
957 SK_DBGCAT_DRV_INT_SRC,
958 ("EOF SY TX2 IRQ\n"));
959 SK_PNMI_CNT_TX_INTR(pAC, 1);
960 spin_lock(&pAC->TxPort[1][TX_PRIO_HIGH].TxDesRingLock);
961 FreeTxDescriptors(pAC, 1, TX_PRIO_HIGH);
962 spin_unlock(&pAC->TxPort[1][TX_PRIO_HIGH].TxDesRingLock);
963 ClearTxIrq(pAC, 1, TX_PRIO_HIGH);
964 }
965#endif
966#endif
967
968 /* do all IO at once */
969 if (IntSrc & IS_R1_F)
970 ClearAndStartRx(pAC, 0);
971 if (IntSrc & IS_R2_F)
972 ClearAndStartRx(pAC, 1);
973#ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */
974 if (IntSrc & IS_XA1_F)
975 ClearTxIrq(pAC, 0, TX_PRIO_LOW);
976 if (IntSrc & IS_XA2_F)
977 ClearTxIrq(pAC, 1, TX_PRIO_LOW);
978#endif
979 SK_IN32(pAC->IoBase, B0_ISRC, &IntSrc);
980 } /* while (IntSrc & IRQ_MASK != 0) */
981
982 IntSrc &= pAC->GIni.GIValIrqMask;
983 if ((IntSrc & SPECIAL_IRQS) || pAC->CheckQueue) {
984 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC,
985 ("SPECIAL IRQ DP-Cards => %x\n", IntSrc));
986 pAC->CheckQueue = SK_FALSE;
987 spin_lock(&pAC->SlowPathLock);
988 if (IntSrc & SPECIAL_IRQS)
989 SkGeSirqIsr(pAC, pAC->IoBase, IntSrc);
990
991 SkEventDispatcher(pAC, pAC->IoBase);
992 spin_unlock(&pAC->SlowPathLock);
993 }
994 /*
995 * do it all again is case we cleared an interrupt that
996 * came in after handling the ring (OUTs may be delayed
997 * in hardware buffers, but are through after IN)
998 *
999 * rroesler: has been commented out and shifted to
1000 * SkGeDrvEvent(), because it is timer
1001 * guarded now
1002 *
1003 ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE);
1004 ReceiveIrq(pAC, &pAC->RxPort[1], SK_TRUE);
1005 */
1006
1007 if (pAC->CheckQueue) {
1008 pAC->CheckQueue = SK_FALSE;
1009 spin_lock(&pAC->SlowPathLock);
1010 SkEventDispatcher(pAC, pAC->IoBase);
1011 spin_unlock(&pAC->SlowPathLock);
1012 }
1013
1014 /* IRQ is processed - Enable IRQs again*/
1015 SK_OUT32(pAC->IoBase, B0_IMSK, pAC->GIni.GIValIrqMask);
1016
1017 return SkIsrRetHandled;
1018} /* SkGeIsr */
1019
1020
1021/****************************************************************************
1022 *
1023 * SkGeIsrOnePort - handle adapter interrupts for single port adapter
1024 *
1025 * Description:
1026 * The interrupt routine is called when the network adapter
1027 * generates an interrupt. It may also be called if another device
1028 * shares this interrupt vector with the driver.
1029 * This is the same as above, but handles only one port.
1030 *
1031 * Returns: N/A
1032 *
1033 */
1034static SkIsrRetVar SkGeIsrOnePort(int irq, void *dev_id)
1035{
1036struct SK_NET_DEVICE *dev = (struct SK_NET_DEVICE *)dev_id;
1037DEV_NET *pNet;
1038SK_AC *pAC;
1039SK_U32 IntSrc; /* interrupts source register contents */
1040
1041 pNet = netdev_priv(dev);
1042 pAC = pNet->pAC;
1043
1044 /*
1045 * Check and process if its our interrupt
1046 */
1047 SK_IN32(pAC->IoBase, B0_SP_ISRC, &IntSrc);
1048 if (IntSrc == 0) {
1049 return SkIsrRetNone;
1050 }
1051
1052 while (((IntSrc & IRQ_MASK) & ~SPECIAL_IRQS) != 0) {
1053#if 0 /* software irq currently not used */
1054 if (IntSrc & IS_IRQ_SW) {
1055 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
1056 SK_DBGCAT_DRV_INT_SRC,
1057 ("Software IRQ\n"));
1058 }
1059#endif
1060 if (IntSrc & IS_R1_F) {
1061 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
1062 SK_DBGCAT_DRV_INT_SRC,
1063 ("EOF RX1 IRQ\n"));
1064 ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE);
1065 SK_PNMI_CNT_RX_INTR(pAC, 0);
1066 }
1067#ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */
1068 if (IntSrc & IS_XA1_F) {
1069 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
1070 SK_DBGCAT_DRV_INT_SRC,
1071 ("EOF AS TX1 IRQ\n"));
1072 SK_PNMI_CNT_TX_INTR(pAC, 0);
1073 spin_lock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock);
1074 FreeTxDescriptors(pAC, &pAC->TxPort[0][TX_PRIO_LOW]);
1075 spin_unlock(&pAC->TxPort[0][TX_PRIO_LOW].TxDesRingLock);
1076 }
1077#if 0 /* only if sync. queues used */
1078 if (IntSrc & IS_XS1_F) {
1079 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
1080 SK_DBGCAT_DRV_INT_SRC,
1081 ("EOF SY TX1 IRQ\n"));
1082 SK_PNMI_CNT_TX_INTR(pAC, 0);
1083 spin_lock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock);
1084 FreeTxDescriptors(pAC, 0, TX_PRIO_HIGH);
1085 spin_unlock(&pAC->TxPort[0][TX_PRIO_HIGH].TxDesRingLock);
1086 ClearTxIrq(pAC, 0, TX_PRIO_HIGH);
1087 }
1088#endif
1089#endif
1090
1091 /* do all IO at once */
1092 if (IntSrc & IS_R1_F)
1093 ClearAndStartRx(pAC, 0);
1094#ifdef USE_TX_COMPLETE /* only if tx complete interrupt used */
1095 if (IntSrc & IS_XA1_F)
1096 ClearTxIrq(pAC, 0, TX_PRIO_LOW);
1097#endif
1098 SK_IN32(pAC->IoBase, B0_ISRC, &IntSrc);
1099 } /* while (IntSrc & IRQ_MASK != 0) */
1100
1101 IntSrc &= pAC->GIni.GIValIrqMask;
1102 if ((IntSrc & SPECIAL_IRQS) || pAC->CheckQueue) {
1103 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_INT_SRC,
1104 ("SPECIAL IRQ SP-Cards => %x\n", IntSrc));
1105 pAC->CheckQueue = SK_FALSE;
1106 spin_lock(&pAC->SlowPathLock);
1107 if (IntSrc & SPECIAL_IRQS)
1108 SkGeSirqIsr(pAC, pAC->IoBase, IntSrc);
1109
1110 SkEventDispatcher(pAC, pAC->IoBase);
1111 spin_unlock(&pAC->SlowPathLock);
1112 }
1113 /*
1114 * do it all again is case we cleared an interrupt that
1115 * came in after handling the ring (OUTs may be delayed
1116 * in hardware buffers, but are through after IN)
1117 *
1118 * rroesler: has been commented out and shifted to
1119 * SkGeDrvEvent(), because it is timer
1120 * guarded now
1121 *
1122 ReceiveIrq(pAC, &pAC->RxPort[0], SK_TRUE);
1123 */
1124
1125 /* IRQ is processed - Enable IRQs again*/
1126 SK_OUT32(pAC->IoBase, B0_IMSK, pAC->GIni.GIValIrqMask);
1127
1128 return SkIsrRetHandled;
1129} /* SkGeIsrOnePort */
1130
1131#ifdef CONFIG_NET_POLL_CONTROLLER
1132/****************************************************************************
1133 *
1134 * SkGePollController - polling receive, for netconsole
1135 *
1136 * Description:
1137 * Polling receive - used by netconsole and other diagnostic tools
1138 * to allow network i/o with interrupts disabled.
1139 *
1140 * Returns: N/A
1141 */
1142static void SkGePollController(struct net_device *dev)
1143{
1144 disable_irq(dev->irq);
1145 SkGeIsr(dev->irq, dev);
1146 enable_irq(dev->irq);
1147}
1148#endif
1149
1150/****************************************************************************
1151 *
1152 * SkGeOpen - handle start of initialized adapter
1153 *
1154 * Description:
1155 * This function starts the initialized adapter.
1156 * The board level variable is set and the adapter is
1157 * brought to full functionality.
1158 * The device flags are set for operation.
1159 * Do all necessary level 2 initialization, enable interrupts and
1160 * give start command to RLMT.
1161 *
1162 * Returns:
1163 * 0 on success
1164 * != 0 on error
1165 */
1166static int SkGeOpen(
1167struct SK_NET_DEVICE *dev)
1168{
1169 DEV_NET *pNet;
1170 SK_AC *pAC;
1171 unsigned long Flags; /* for spin lock */
1172 int i;
1173 SK_EVPARA EvPara; /* an event parameter union */
1174
1175 pNet = netdev_priv(dev);
1176 pAC = pNet->pAC;
1177
1178 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
1179 ("SkGeOpen: pAC=0x%lX:\n", (unsigned long)pAC));
1180
1181#ifdef SK_DIAG_SUPPORT
1182 if (pAC->DiagModeActive == DIAG_ACTIVE) {
1183 if (pAC->Pnmi.DiagAttached == SK_DIAG_RUNNING) {
1184 return (-1); /* still in use by diag; deny actions */
1185 }
1186 }
1187#endif
1188
1189 /* Set blink mode */
1190 if ((pAC->PciDev->vendor == 0x1186) || (pAC->PciDev->vendor == 0x11ab ))
1191 pAC->GIni.GILedBlinkCtrl = OEM_CONFIG_VALUE;
1192
1193 if (pAC->BoardLevel == SK_INIT_DATA) {
1194 /* level 1 init common modules here */
1195 if (SkGeInit(pAC, pAC->IoBase, SK_INIT_IO) != 0) {
1196 printk("%s: HWInit (1) failed.\n", pAC->dev[pNet->PortNr]->name);
1197 return (-1);
1198 }
1199 SkI2cInit (pAC, pAC->IoBase, SK_INIT_IO);
1200 SkEventInit (pAC, pAC->IoBase, SK_INIT_IO);
1201 SkPnmiInit (pAC, pAC->IoBase, SK_INIT_IO);
1202 SkAddrInit (pAC, pAC->IoBase, SK_INIT_IO);
1203 SkRlmtInit (pAC, pAC->IoBase, SK_INIT_IO);
1204 SkTimerInit (pAC, pAC->IoBase, SK_INIT_IO);
1205 pAC->BoardLevel = SK_INIT_IO;
1206 }
1207
1208 if (pAC->BoardLevel != SK_INIT_RUN) {
1209 /* tschilling: Level 2 init modules here, check return value. */
1210 if (SkGeInit(pAC, pAC->IoBase, SK_INIT_RUN) != 0) {
1211 printk("%s: HWInit (2) failed.\n", pAC->dev[pNet->PortNr]->name);
1212 return (-1);
1213 }
1214 SkI2cInit (pAC, pAC->IoBase, SK_INIT_RUN);
1215 SkEventInit (pAC, pAC->IoBase, SK_INIT_RUN);
1216 SkPnmiInit (pAC, pAC->IoBase, SK_INIT_RUN);
1217 SkAddrInit (pAC, pAC->IoBase, SK_INIT_RUN);
1218 SkRlmtInit (pAC, pAC->IoBase, SK_INIT_RUN);
1219 SkTimerInit (pAC, pAC->IoBase, SK_INIT_RUN);
1220 pAC->BoardLevel = SK_INIT_RUN;
1221 }
1222
1223 for (i=0; i<pAC->GIni.GIMacsFound; i++) {
1224 /* Enable transmit descriptor polling. */
1225 SkGePollTxD(pAC, pAC->IoBase, i, SK_TRUE);
1226 FillRxRing(pAC, &pAC->RxPort[i]);
1227 }
1228 SkGeYellowLED(pAC, pAC->IoBase, 1);
1229
1230 StartDrvCleanupTimer(pAC);
1231 SkDimEnableModerationIfNeeded(pAC);
1232 SkDimDisplayModerationSettings(pAC);
1233
1234 pAC->GIni.GIValIrqMask &= IRQ_MASK;
1235
1236 /* enable Interrupts */
1237 SK_OUT32(pAC->IoBase, B0_IMSK, pAC->GIni.GIValIrqMask);
1238 SK_OUT32(pAC->IoBase, B0_HWE_IMSK, IRQ_HWE_MASK);
1239
1240 spin_lock_irqsave(&pAC->SlowPathLock, Flags);
1241
1242 if ((pAC->RlmtMode != 0) && (pAC->MaxPorts == 0)) {
1243 EvPara.Para32[0] = pAC->RlmtNets;
1244 EvPara.Para32[1] = -1;
1245 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_SET_NETS,
1246 EvPara);
1247 EvPara.Para32[0] = pAC->RlmtMode;
1248 EvPara.Para32[1] = 0;
1249 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_MODE_CHANGE,
1250 EvPara);
1251 }
1252
1253 EvPara.Para32[0] = pNet->NetNr;
1254 EvPara.Para32[1] = -1;
1255 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara);
1256 SkEventDispatcher(pAC, pAC->IoBase);
1257 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
1258
1259 pAC->MaxPorts++;
1260
1261
1262 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
1263 ("SkGeOpen suceeded\n"));
1264
1265 return (0);
1266} /* SkGeOpen */
1267
1268
1269/****************************************************************************
1270 *
1271 * SkGeClose - Stop initialized adapter
1272 *
1273 * Description:
1274 * Close initialized adapter.
1275 *
1276 * Returns:
1277 * 0 - on success
1278 * error code - on error
1279 */
1280static int SkGeClose(
1281struct SK_NET_DEVICE *dev)
1282{
1283 DEV_NET *pNet;
1284 DEV_NET *newPtrNet;
1285 SK_AC *pAC;
1286
1287 unsigned long Flags; /* for spin lock */
1288 int i;
1289 int PortIdx;
1290 SK_EVPARA EvPara;
1291
1292 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
1293 ("SkGeClose: pAC=0x%lX ", (unsigned long)pAC));
1294
1295 pNet = netdev_priv(dev);
1296 pAC = pNet->pAC;
1297
1298#ifdef SK_DIAG_SUPPORT
1299 if (pAC->DiagModeActive == DIAG_ACTIVE) {
1300 if (pAC->DiagFlowCtrl == SK_FALSE) {
1301 /*
1302 ** notify that the interface which has been closed
1303 ** by operator interaction must not be started up
1304 ** again when the DIAG has finished.
1305 */
1306 newPtrNet = netdev_priv(pAC->dev[0]);
1307 if (newPtrNet == pNet) {
1308 pAC->WasIfUp[0] = SK_FALSE;
1309 } else {
1310 pAC->WasIfUp[1] = SK_FALSE;
1311 }
1312 return 0; /* return to system everything is fine... */
1313 } else {
1314 pAC->DiagFlowCtrl = SK_FALSE;
1315 }
1316 }
1317#endif
1318
1319 netif_stop_queue(dev);
1320
1321 if (pAC->RlmtNets == 1)
1322 PortIdx = pAC->ActivePort;
1323 else
1324 PortIdx = pNet->NetNr;
1325
1326 StopDrvCleanupTimer(pAC);
1327
1328 /*
1329 * Clear multicast table, promiscuous mode ....
1330 */
1331 SkAddrMcClear(pAC, pAC->IoBase, PortIdx, 0);
1332 SkAddrPromiscuousChange(pAC, pAC->IoBase, PortIdx,
1333 SK_PROM_MODE_NONE);
1334
1335 if (pAC->MaxPorts == 1) {
1336 spin_lock_irqsave(&pAC->SlowPathLock, Flags);
1337 /* disable interrupts */
1338 SK_OUT32(pAC->IoBase, B0_IMSK, 0);
1339 EvPara.Para32[0] = pNet->NetNr;
1340 EvPara.Para32[1] = -1;
1341 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
1342 SkEventDispatcher(pAC, pAC->IoBase);
1343 SK_OUT32(pAC->IoBase, B0_IMSK, 0);
1344 /* stop the hardware */
1345 SkGeDeInit(pAC, pAC->IoBase);
1346 pAC->BoardLevel = SK_INIT_DATA;
1347 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
1348 } else {
1349
1350 spin_lock_irqsave(&pAC->SlowPathLock, Flags);
1351 EvPara.Para32[0] = pNet->NetNr;
1352 EvPara.Para32[1] = -1;
1353 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
1354 SkPnmiEvent(pAC, pAC->IoBase, SK_PNMI_EVT_XMAC_RESET, EvPara);
1355 SkEventDispatcher(pAC, pAC->IoBase);
1356 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
1357
1358 /* Stop port */
1359 spin_lock_irqsave(&pAC->TxPort[pNet->PortNr]
1360 [TX_PRIO_LOW].TxDesRingLock, Flags);
1361 SkGeStopPort(pAC, pAC->IoBase, pNet->PortNr,
1362 SK_STOP_ALL, SK_HARD_RST);
1363 spin_unlock_irqrestore(&pAC->TxPort[pNet->PortNr]
1364 [TX_PRIO_LOW].TxDesRingLock, Flags);
1365 }
1366
1367 if (pAC->RlmtNets == 1) {
1368 /* clear all descriptor rings */
1369 for (i=0; i<pAC->GIni.GIMacsFound; i++) {
1370 ReceiveIrq(pAC, &pAC->RxPort[i], SK_TRUE);
1371 ClearRxRing(pAC, &pAC->RxPort[i]);
1372 ClearTxRing(pAC, &pAC->TxPort[i][TX_PRIO_LOW]);
1373 }
1374 } else {
1375 /* clear port descriptor rings */
1376 ReceiveIrq(pAC, &pAC->RxPort[pNet->PortNr], SK_TRUE);
1377 ClearRxRing(pAC, &pAC->RxPort[pNet->PortNr]);
1378 ClearTxRing(pAC, &pAC->TxPort[pNet->PortNr][TX_PRIO_LOW]);
1379 }
1380
1381 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
1382 ("SkGeClose: done "));
1383
1384 SK_MEMSET(&(pAC->PnmiBackup), 0, sizeof(SK_PNMI_STRUCT_DATA));
1385 SK_MEMCPY(&(pAC->PnmiBackup), &(pAC->PnmiStruct),
1386 sizeof(SK_PNMI_STRUCT_DATA));
1387
1388 pAC->MaxPorts--;
1389
1390 return (0);
1391} /* SkGeClose */
1392
1393
1394/*****************************************************************************
1395 *
1396 * SkGeXmit - Linux frame transmit function
1397 *
1398 * Description:
1399 * The system calls this function to send frames onto the wire.
1400 * It puts the frame in the tx descriptor ring. If the ring is
1401 * full then, the 'tbusy' flag is set.
1402 *
1403 * Returns:
1404 * 0, if everything is ok
1405 * !=0, on error
1406 * WARNING: returning 1 in 'tbusy' case caused system crashes (double
1407 * allocated skb's) !!!
1408 */
1409static int SkGeXmit(struct sk_buff *skb, struct SK_NET_DEVICE *dev)
1410{
1411DEV_NET *pNet;
1412SK_AC *pAC;
1413int Rc; /* return code of XmitFrame */
1414
1415 pNet = netdev_priv(dev);
1416 pAC = pNet->pAC;
1417
1418 if ((!skb_shinfo(skb)->nr_frags) ||
1419 (pAC->GIni.GIChipId == CHIP_ID_GENESIS)) {
1420 /* Don't activate scatter-gather and hardware checksum */
1421
1422 if (pAC->RlmtNets == 2)
1423 Rc = XmitFrame(
1424 pAC,
1425 &pAC->TxPort[pNet->PortNr][TX_PRIO_LOW],
1426 skb);
1427 else
1428 Rc = XmitFrame(
1429 pAC,
1430 &pAC->TxPort[pAC->ActivePort][TX_PRIO_LOW],
1431 skb);
1432 } else {
1433 /* scatter-gather and hardware TCP checksumming anabled*/
1434 if (pAC->RlmtNets == 2)
1435 Rc = XmitFrameSG(
1436 pAC,
1437 &pAC->TxPort[pNet->PortNr][TX_PRIO_LOW],
1438 skb);
1439 else
1440 Rc = XmitFrameSG(
1441 pAC,
1442 &pAC->TxPort[pAC->ActivePort][TX_PRIO_LOW],
1443 skb);
1444 }
1445
1446 /* Transmitter out of resources? */
1447 if (Rc <= 0) {
1448 netif_stop_queue(dev);
1449 }
1450
1451 /* If not taken, give buffer ownership back to the
1452 * queueing layer.
1453 */
1454 if (Rc < 0)
1455 return (1);
1456
1457 dev->trans_start = jiffies;
1458 return (0);
1459} /* SkGeXmit */
1460
1461
1462/*****************************************************************************
1463 *
1464 * XmitFrame - fill one socket buffer into the transmit ring
1465 *
1466 * Description:
1467 * This function puts a message into the transmit descriptor ring
1468 * if there is a descriptors left.
1469 * Linux skb's consist of only one continuous buffer.
1470 * The first step locks the ring. It is held locked
1471 * all time to avoid problems with SWITCH_../PORT_RESET.
1472 * Then the descriptoris allocated.
1473 * The second part is linking the buffer to the descriptor.
1474 * At the very last, the Control field of the descriptor
1475 * is made valid for the BMU and a start TX command is given
1476 * if necessary.
1477 *
1478 * Returns:
1479 * > 0 - on succes: the number of bytes in the message
1480 * = 0 - on resource shortage: this frame sent or dropped, now
1481 * the ring is full ( -> set tbusy)
1482 * < 0 - on failure: other problems ( -> return failure to upper layers)
1483 */
1484static int XmitFrame(
1485SK_AC *pAC, /* pointer to adapter context */
1486TX_PORT *pTxPort, /* pointer to struct of port to send to */
1487struct sk_buff *pMessage) /* pointer to send-message */
1488{
1489 TXD *pTxd; /* the rxd to fill */
1490 TXD *pOldTxd;
1491 unsigned long Flags;
1492 SK_U64 PhysAddr;
1493 int BytesSend = pMessage->len;
1494
1495 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_TX_PROGRESS, ("X"));
1496
1497 spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags);
1498#ifndef USE_TX_COMPLETE
1499 FreeTxDescriptors(pAC, pTxPort);
1500#endif
1501 if (pTxPort->TxdRingFree == 0) {
1502 /*
1503 ** no enough free descriptors in ring at the moment.
1504 ** Maybe free'ing some old one help?
1505 */
1506 FreeTxDescriptors(pAC, pTxPort);
1507 if (pTxPort->TxdRingFree == 0) {
1508 spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);
1509 SK_PNMI_CNT_NO_TX_BUF(pAC, pTxPort->PortIndex);
1510 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
1511 SK_DBGCAT_DRV_TX_PROGRESS,
1512 ("XmitFrame failed\n"));
1513 /*
1514 ** the desired message can not be sent
1515 ** Because tbusy seems to be set, the message
1516 ** should not be freed here. It will be used
1517 ** by the scheduler of the ethernet handler
1518 */
1519 return (-1);
1520 }
1521 }
1522
1523 /*
1524 ** If the passed socket buffer is of smaller MTU-size than 60,
1525 ** copy everything into new buffer and fill all bytes between
1526 ** the original packet end and the new packet end of 60 with 0x00.
1527 ** This is to resolve faulty padding by the HW with 0xaa bytes.
1528 */
1529 if (BytesSend < C_LEN_ETHERNET_MINSIZE) {
1530 if (skb_padto(pMessage, C_LEN_ETHERNET_MINSIZE)) {
1531 spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);
1532 return 0;
1533 }
1534 pMessage->len = C_LEN_ETHERNET_MINSIZE;
1535 }
1536
1537 /*
1538 ** advance head counter behind descriptor needed for this frame,
1539 ** so that needed descriptor is reserved from that on. The next
1540 ** action will be to add the passed buffer to the TX-descriptor
1541 */
1542 pTxd = pTxPort->pTxdRingHead;
1543 pTxPort->pTxdRingHead = pTxd->pNextTxd;
1544 pTxPort->TxdRingFree--;
1545
1546#ifdef SK_DUMP_TX
1547 DumpMsg(pMessage, "XmitFrame");
1548#endif
1549
1550 /*
1551 ** First step is to map the data to be sent via the adapter onto
1552 ** the DMA memory. Kernel 2.2 uses virt_to_bus(), but kernels 2.4
1553 ** and 2.6 need to use pci_map_page() for that mapping.
1554 */
1555 PhysAddr = (SK_U64) pci_map_page(pAC->PciDev,
1556 virt_to_page(pMessage->data),
1557 ((unsigned long) pMessage->data & ~PAGE_MASK),
1558 pMessage->len,
1559 PCI_DMA_TODEVICE);
1560 pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff);
1561 pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32);
1562 pTxd->pMBuf = pMessage;
1563
1564 if (pMessage->ip_summed == CHECKSUM_PARTIAL) {
1565 u16 hdrlen = skb_transport_offset(pMessage);
1566 u16 offset = hdrlen + pMessage->csum_offset;
1567
1568 if ((ipip_hdr(pMessage)->protocol == IPPROTO_UDP) &&
1569 (pAC->GIni.GIChipRev == 0) &&
1570 (pAC->GIni.GIChipId == CHIP_ID_YUKON)) {
1571 pTxd->TBControl = BMU_TCP_CHECK;
1572 } else {
1573 pTxd->TBControl = BMU_UDP_CHECK;
1574 }
1575
1576 pTxd->TcpSumOfs = 0;
1577 pTxd->TcpSumSt = hdrlen;
1578 pTxd->TcpSumWr = offset;
1579
1580 pTxd->TBControl |= BMU_OWN | BMU_STF |
1581 BMU_SW | BMU_EOF |
1582#ifdef USE_TX_COMPLETE
1583 BMU_IRQ_EOF |
1584#endif
1585 pMessage->len;
1586 } else {
1587 pTxd->TBControl = BMU_OWN | BMU_STF | BMU_CHECK |
1588 BMU_SW | BMU_EOF |
1589#ifdef USE_TX_COMPLETE
1590 BMU_IRQ_EOF |
1591#endif
1592 pMessage->len;
1593 }
1594
1595 /*
1596 ** If previous descriptor already done, give TX start cmd
1597 */
1598 pOldTxd = xchg(&pTxPort->pTxdRingPrev, pTxd);
1599 if ((pOldTxd->TBControl & BMU_OWN) == 0) {
1600 SK_OUT8(pTxPort->HwAddr, Q_CSR, CSR_START);
1601 }
1602
1603 /*
1604 ** after releasing the lock, the skb may immediately be free'd
1605 */
1606 spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);
1607 if (pTxPort->TxdRingFree != 0) {
1608 return (BytesSend);
1609 } else {
1610 return (0);
1611 }
1612
1613} /* XmitFrame */
1614
1615/*****************************************************************************
1616 *
1617 * XmitFrameSG - fill one socket buffer into the transmit ring
1618 * (use SG and TCP/UDP hardware checksumming)
1619 *
1620 * Description:
1621 * This function puts a message into the transmit descriptor ring
1622 * if there is a descriptors left.
1623 *
1624 * Returns:
1625 * > 0 - on succes: the number of bytes in the message
1626 * = 0 - on resource shortage: this frame sent or dropped, now
1627 * the ring is full ( -> set tbusy)
1628 * < 0 - on failure: other problems ( -> return failure to upper layers)
1629 */
1630static int XmitFrameSG(
1631SK_AC *pAC, /* pointer to adapter context */
1632TX_PORT *pTxPort, /* pointer to struct of port to send to */
1633struct sk_buff *pMessage) /* pointer to send-message */
1634{
1635
1636 TXD *pTxd;
1637 TXD *pTxdFst;
1638 TXD *pTxdLst;
1639 int CurrFrag;
1640 int BytesSend;
1641 skb_frag_t *sk_frag;
1642 SK_U64 PhysAddr;
1643 unsigned long Flags;
1644 SK_U32 Control;
1645
1646 spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags);
1647#ifndef USE_TX_COMPLETE
1648 FreeTxDescriptors(pAC, pTxPort);
1649#endif
1650 if ((skb_shinfo(pMessage)->nr_frags +1) > pTxPort->TxdRingFree) {
1651 FreeTxDescriptors(pAC, pTxPort);
1652 if ((skb_shinfo(pMessage)->nr_frags + 1) > pTxPort->TxdRingFree) {
1653 spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);
1654 SK_PNMI_CNT_NO_TX_BUF(pAC, pTxPort->PortIndex);
1655 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
1656 SK_DBGCAT_DRV_TX_PROGRESS,
1657 ("XmitFrameSG failed - Ring full\n"));
1658 /* this message can not be sent now */
1659 return(-1);
1660 }
1661 }
1662
1663 pTxd = pTxPort->pTxdRingHead;
1664 pTxdFst = pTxd;
1665 pTxdLst = pTxd;
1666 BytesSend = 0;
1667
1668 /*
1669 ** Map the first fragment (header) into the DMA-space
1670 */
1671 PhysAddr = (SK_U64) pci_map_page(pAC->PciDev,
1672 virt_to_page(pMessage->data),
1673 ((unsigned long) pMessage->data & ~PAGE_MASK),
1674 skb_headlen(pMessage),
1675 PCI_DMA_TODEVICE);
1676
1677 pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff);
1678 pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32);
1679
1680 /*
1681 ** Does the HW need to evaluate checksum for TCP or UDP packets?
1682 */
1683 if (pMessage->ip_summed == CHECKSUM_PARTIAL) {
1684 u16 hdrlen = skb_transport_offset(pMessage);
1685 u16 offset = hdrlen + pMessage->csum_offset;
1686
1687 Control = BMU_STFWD;
1688
1689 /*
1690 ** We have to use the opcode for tcp here, because the
1691 ** opcode for udp is not working in the hardware yet
1692 ** (Revision 2.0)
1693 */
1694 if ((ipip_hdr(pMessage)->protocol == IPPROTO_UDP) &&
1695 (pAC->GIni.GIChipRev == 0) &&
1696 (pAC->GIni.GIChipId == CHIP_ID_YUKON)) {
1697 Control |= BMU_TCP_CHECK;
1698 } else {
1699 Control |= BMU_UDP_CHECK;
1700 }
1701
1702 pTxd->TcpSumOfs = 0;
1703 pTxd->TcpSumSt = hdrlen;
1704 pTxd->TcpSumWr = offset;
1705 } else
1706 Control = BMU_CHECK | BMU_SW;
1707
1708 pTxd->TBControl = BMU_STF | Control | skb_headlen(pMessage);
1709
1710 pTxd = pTxd->pNextTxd;
1711 pTxPort->TxdRingFree--;
1712 BytesSend += skb_headlen(pMessage);
1713
1714 /*
1715 ** Browse over all SG fragments and map each of them into the DMA space
1716 */
1717 for (CurrFrag = 0; CurrFrag < skb_shinfo(pMessage)->nr_frags; CurrFrag++) {
1718 sk_frag = &skb_shinfo(pMessage)->frags[CurrFrag];
1719 /*
1720 ** we already have the proper value in entry
1721 */
1722 PhysAddr = (SK_U64) pci_map_page(pAC->PciDev,
1723 sk_frag->page,
1724 sk_frag->page_offset,
1725 sk_frag->size,
1726 PCI_DMA_TODEVICE);
1727
1728 pTxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff);
1729 pTxd->VDataHigh = (SK_U32) (PhysAddr >> 32);
1730 pTxd->pMBuf = pMessage;
1731
1732 pTxd->TBControl = Control | BMU_OWN | sk_frag->size;
1733
1734 /*
1735 ** Do we have the last fragment?
1736 */
1737 if( (CurrFrag+1) == skb_shinfo(pMessage)->nr_frags ) {
1738#ifdef USE_TX_COMPLETE
1739 pTxd->TBControl |= BMU_EOF | BMU_IRQ_EOF;
1740#else
1741 pTxd->TBControl |= BMU_EOF;
1742#endif
1743 pTxdFst->TBControl |= BMU_OWN | BMU_SW;
1744 }
1745 pTxdLst = pTxd;
1746 pTxd = pTxd->pNextTxd;
1747 pTxPort->TxdRingFree--;
1748 BytesSend += sk_frag->size;
1749 }
1750
1751 /*
1752 ** If previous descriptor already done, give TX start cmd
1753 */
1754 if ((pTxPort->pTxdRingPrev->TBControl & BMU_OWN) == 0) {
1755 SK_OUT8(pTxPort->HwAddr, Q_CSR, CSR_START);
1756 }
1757
1758 pTxPort->pTxdRingPrev = pTxdLst;
1759 pTxPort->pTxdRingHead = pTxd;
1760
1761 spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);
1762
1763 if (pTxPort->TxdRingFree > 0) {
1764 return (BytesSend);
1765 } else {
1766 return (0);
1767 }
1768}
1769
1770/*****************************************************************************
1771 *
1772 * FreeTxDescriptors - release descriptors from the descriptor ring
1773 *
1774 * Description:
1775 * This function releases descriptors from a transmit ring if they
1776 * have been sent by the BMU.
1777 * If a descriptors is sent, it can be freed and the message can
1778 * be freed, too.
1779 * The SOFTWARE controllable bit is used to prevent running around a
1780 * completely free ring for ever. If this bit is no set in the
1781 * frame (by XmitFrame), this frame has never been sent or is
1782 * already freed.
1783 * The Tx descriptor ring lock must be held while calling this function !!!
1784 *
1785 * Returns:
1786 * none
1787 */
1788static void FreeTxDescriptors(
1789SK_AC *pAC, /* pointer to the adapter context */
1790TX_PORT *pTxPort) /* pointer to destination port structure */
1791{
1792TXD *pTxd; /* pointer to the checked descriptor */
1793TXD *pNewTail; /* pointer to 'end' of the ring */
1794SK_U32 Control; /* TBControl field of descriptor */
1795SK_U64 PhysAddr; /* address of DMA mapping */
1796
1797 pNewTail = pTxPort->pTxdRingTail;
1798 pTxd = pNewTail;
1799 /*
1800 ** loop forever; exits if BMU_SW bit not set in start frame
1801 ** or BMU_OWN bit set in any frame
1802 */
1803 while (1) {
1804 Control = pTxd->TBControl;
1805 if ((Control & BMU_SW) == 0) {
1806 /*
1807 ** software controllable bit is set in first
1808 ** fragment when given to BMU. Not set means that
1809 ** this fragment was never sent or is already
1810 ** freed ( -> ring completely free now).
1811 */
1812 pTxPort->pTxdRingTail = pTxd;
1813 netif_wake_queue(pAC->dev[pTxPort->PortIndex]);
1814 return;
1815 }
1816 if (Control & BMU_OWN) {
1817 pTxPort->pTxdRingTail = pTxd;
1818 if (pTxPort->TxdRingFree > 0) {
1819 netif_wake_queue(pAC->dev[pTxPort->PortIndex]);
1820 }
1821 return;
1822 }
1823
1824 /*
1825 ** release the DMA mapping, because until not unmapped
1826 ** this buffer is considered being under control of the
1827 ** adapter card!
1828 */
1829 PhysAddr = ((SK_U64) pTxd->VDataHigh) << (SK_U64) 32;
1830 PhysAddr |= (SK_U64) pTxd->VDataLow;
1831 pci_unmap_page(pAC->PciDev, PhysAddr,
1832 pTxd->pMBuf->len,
1833 PCI_DMA_TODEVICE);
1834
1835 if (Control & BMU_EOF)
1836 DEV_KFREE_SKB_ANY(pTxd->pMBuf); /* free message */
1837
1838 pTxPort->TxdRingFree++;
1839 pTxd->TBControl &= ~BMU_SW;
1840 pTxd = pTxd->pNextTxd; /* point behind fragment with EOF */
1841 } /* while(forever) */
1842} /* FreeTxDescriptors */
1843
1844/*****************************************************************************
1845 *
1846 * FillRxRing - fill the receive ring with valid descriptors
1847 *
1848 * Description:
1849 * This function fills the receive ring descriptors with data
1850 * segments and makes them valid for the BMU.
1851 * The active ring is filled completely, if possible.
1852 * The non-active ring is filled only partial to save memory.
1853 *
1854 * Description of rx ring structure:
1855 * head - points to the descriptor which will be used next by the BMU
1856 * tail - points to the next descriptor to give to the BMU
1857 *
1858 * Returns: N/A
1859 */
1860static void FillRxRing(
1861SK_AC *pAC, /* pointer to the adapter context */
1862RX_PORT *pRxPort) /* ptr to port struct for which the ring
1863 should be filled */
1864{
1865unsigned long Flags;
1866
1867 spin_lock_irqsave(&pRxPort->RxDesRingLock, Flags);
1868 while (pRxPort->RxdRingFree > pRxPort->RxFillLimit) {
1869 if(!FillRxDescriptor(pAC, pRxPort))
1870 break;
1871 }
1872 spin_unlock_irqrestore(&pRxPort->RxDesRingLock, Flags);
1873} /* FillRxRing */
1874
1875
1876/*****************************************************************************
1877 *
1878 * FillRxDescriptor - fill one buffer into the receive ring
1879 *
1880 * Description:
1881 * The function allocates a new receive buffer and
1882 * puts it into the next descriptor.
1883 *
1884 * Returns:
1885 * SK_TRUE - a buffer was added to the ring
1886 * SK_FALSE - a buffer could not be added
1887 */
1888static SK_BOOL FillRxDescriptor(
1889SK_AC *pAC, /* pointer to the adapter context struct */
1890RX_PORT *pRxPort) /* ptr to port struct of ring to fill */
1891{
1892struct sk_buff *pMsgBlock; /* pointer to a new message block */
1893RXD *pRxd; /* the rxd to fill */
1894SK_U16 Length; /* data fragment length */
1895SK_U64 PhysAddr; /* physical address of a rx buffer */
1896
1897 pMsgBlock = alloc_skb(pAC->RxBufSize, GFP_ATOMIC);
1898 if (pMsgBlock == NULL) {
1899 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
1900 SK_DBGCAT_DRV_ENTRY,
1901 ("%s: Allocation of rx buffer failed !\n",
1902 pAC->dev[pRxPort->PortIndex]->name));
1903 SK_PNMI_CNT_NO_RX_BUF(pAC, pRxPort->PortIndex);
1904 return(SK_FALSE);
1905 }
1906 skb_reserve(pMsgBlock, 2); /* to align IP frames */
1907 /* skb allocated ok, so add buffer */
1908 pRxd = pRxPort->pRxdRingTail;
1909 pRxPort->pRxdRingTail = pRxd->pNextRxd;
1910 pRxPort->RxdRingFree--;
1911 Length = pAC->RxBufSize;
1912 PhysAddr = (SK_U64) pci_map_page(pAC->PciDev,
1913 virt_to_page(pMsgBlock->data),
1914 ((unsigned long) pMsgBlock->data &
1915 ~PAGE_MASK),
1916 pAC->RxBufSize - 2,
1917 PCI_DMA_FROMDEVICE);
1918
1919 pRxd->VDataLow = (SK_U32) (PhysAddr & 0xffffffff);
1920 pRxd->VDataHigh = (SK_U32) (PhysAddr >> 32);
1921 pRxd->pMBuf = pMsgBlock;
1922 pRxd->RBControl = BMU_OWN |
1923 BMU_STF |
1924 BMU_IRQ_EOF |
1925 BMU_TCP_CHECK |
1926 Length;
1927 return (SK_TRUE);
1928
1929} /* FillRxDescriptor */
1930
1931
1932/*****************************************************************************
1933 *
1934 * ReQueueRxBuffer - fill one buffer back into the receive ring
1935 *
1936 * Description:
1937 * Fill a given buffer back into the rx ring. The buffer
1938 * has been previously allocated and aligned, and its phys.
1939 * address calculated, so this is no more necessary.
1940 *
1941 * Returns: N/A
1942 */
1943static void ReQueueRxBuffer(
1944SK_AC *pAC, /* pointer to the adapter context struct */
1945RX_PORT *pRxPort, /* ptr to port struct of ring to fill */
1946struct sk_buff *pMsg, /* pointer to the buffer */
1947SK_U32 PhysHigh, /* phys address high dword */
1948SK_U32 PhysLow) /* phys address low dword */
1949{
1950RXD *pRxd; /* the rxd to fill */
1951SK_U16 Length; /* data fragment length */
1952
1953 pRxd = pRxPort->pRxdRingTail;
1954 pRxPort->pRxdRingTail = pRxd->pNextRxd;
1955 pRxPort->RxdRingFree--;
1956 Length = pAC->RxBufSize;
1957
1958 pRxd->VDataLow = PhysLow;
1959 pRxd->VDataHigh = PhysHigh;
1960 pRxd->pMBuf = pMsg;
1961 pRxd->RBControl = BMU_OWN |
1962 BMU_STF |
1963 BMU_IRQ_EOF |
1964 BMU_TCP_CHECK |
1965 Length;
1966 return;
1967} /* ReQueueRxBuffer */
1968
1969/*****************************************************************************
1970 *
1971 * ReceiveIrq - handle a receive IRQ
1972 *
1973 * Description:
1974 * This function is called when a receive IRQ is set.
1975 * It walks the receive descriptor ring and sends up all
1976 * frames that are complete.
1977 *
1978 * Returns: N/A
1979 */
1980static void ReceiveIrq(
1981 SK_AC *pAC, /* pointer to adapter context */
1982 RX_PORT *pRxPort, /* pointer to receive port struct */
1983 SK_BOOL SlowPathLock) /* indicates if SlowPathLock is needed */
1984{
1985RXD *pRxd; /* pointer to receive descriptors */
1986SK_U32 Control; /* control field of descriptor */
1987struct sk_buff *pMsg; /* pointer to message holding frame */
1988struct sk_buff *pNewMsg; /* pointer to a new message for copying frame */
1989int FrameLength; /* total length of received frame */
1990SK_MBUF *pRlmtMbuf; /* ptr to a buffer for giving a frame to rlmt */
1991SK_EVPARA EvPara; /* an event parameter union */
1992unsigned long Flags; /* for spin lock */
1993int PortIndex = pRxPort->PortIndex;
1994unsigned int Offset;
1995unsigned int NumBytes;
1996unsigned int ForRlmt;
1997SK_BOOL IsBc;
1998SK_BOOL IsMc;
1999SK_BOOL IsBadFrame; /* Bad frame */
2000
2001SK_U32 FrameStat;
2002SK_U64 PhysAddr;
2003
2004rx_start:
2005 /* do forever; exit if BMU_OWN found */
2006 for ( pRxd = pRxPort->pRxdRingHead ;
2007 pRxPort->RxdRingFree < pAC->RxDescrPerRing ;
2008 pRxd = pRxd->pNextRxd,
2009 pRxPort->pRxdRingHead = pRxd,
2010 pRxPort->RxdRingFree ++) {
2011
2012 /*
2013 * For a better understanding of this loop
2014 * Go through every descriptor beginning at the head
2015 * Please note: the ring might be completely received so the OWN bit
2016 * set is not a good crirteria to leave that loop.
2017 * Therefore the RingFree counter is used.
2018 * On entry of this loop pRxd is a pointer to the Rxd that needs
2019 * to be checked next.
2020 */
2021
2022 Control = pRxd->RBControl;
2023
2024 /* check if this descriptor is ready */
2025 if ((Control & BMU_OWN) != 0) {
2026 /* this descriptor is not yet ready */
2027 /* This is the usual end of the loop */
2028 /* We don't need to start the ring again */
2029 FillRxRing(pAC, pRxPort);
2030 return;
2031 }
2032 pAC->DynIrqModInfo.NbrProcessedDescr++;
2033
2034 /* get length of frame and check it */
2035 FrameLength = Control & BMU_BBC;
2036 if (FrameLength > pAC->RxBufSize) {
2037 goto rx_failed;
2038 }
2039
2040 /* check for STF and EOF */
2041 if ((Control & (BMU_STF | BMU_EOF)) != (BMU_STF | BMU_EOF)) {
2042 goto rx_failed;
2043 }
2044
2045 /* here we have a complete frame in the ring */
2046 pMsg = pRxd->pMBuf;
2047
2048 FrameStat = pRxd->FrameStat;
2049
2050 /* check for frame length mismatch */
2051#define XMR_FS_LEN_SHIFT 18
2052#define GMR_FS_LEN_SHIFT 16
2053 if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) {
2054 if (FrameLength != (SK_U32) (FrameStat >> XMR_FS_LEN_SHIFT)) {
2055 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
2056 SK_DBGCAT_DRV_RX_PROGRESS,
2057 ("skge: Frame length mismatch (%u/%u).\n",
2058 FrameLength,
2059 (SK_U32) (FrameStat >> XMR_FS_LEN_SHIFT)));
2060 goto rx_failed;
2061 }
2062 }
2063 else {
2064 if (FrameLength != (SK_U32) (FrameStat >> GMR_FS_LEN_SHIFT)) {
2065 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
2066 SK_DBGCAT_DRV_RX_PROGRESS,
2067 ("skge: Frame length mismatch (%u/%u).\n",
2068 FrameLength,
2069 (SK_U32) (FrameStat >> XMR_FS_LEN_SHIFT)));
2070 goto rx_failed;
2071 }
2072 }
2073
2074 /* Set Rx Status */
2075 if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) {
2076 IsBc = (FrameStat & XMR_FS_BC) != 0;
2077 IsMc = (FrameStat & XMR_FS_MC) != 0;
2078 IsBadFrame = (FrameStat &
2079 (XMR_FS_ANY_ERR | XMR_FS_2L_VLAN)) != 0;
2080 } else {
2081 IsBc = (FrameStat & GMR_FS_BC) != 0;
2082 IsMc = (FrameStat & GMR_FS_MC) != 0;
2083 IsBadFrame = (((FrameStat & GMR_FS_ANY_ERR) != 0) ||
2084 ((FrameStat & GMR_FS_RX_OK) == 0));
2085 }
2086
2087 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 0,
2088 ("Received frame of length %d on port %d\n",
2089 FrameLength, PortIndex));
2090 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 0,
2091 ("Number of free rx descriptors: %d\n",
2092 pRxPort->RxdRingFree));
2093/* DumpMsg(pMsg, "Rx"); */
2094
2095 if ((Control & BMU_STAT_VAL) != BMU_STAT_VAL || (IsBadFrame)) {
2096#if 0
2097 (FrameStat & (XMR_FS_ANY_ERR | XMR_FS_2L_VLAN)) != 0) {
2098#endif
2099 /* there is a receive error in this frame */
2100 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
2101 SK_DBGCAT_DRV_RX_PROGRESS,
2102 ("skge: Error in received frame, dropped!\n"
2103 "Control: %x\nRxStat: %x\n",
2104 Control, FrameStat));
2105
2106 ReQueueRxBuffer(pAC, pRxPort, pMsg,
2107 pRxd->VDataHigh, pRxd->VDataLow);
2108
2109 continue;
2110 }
2111
2112 /*
2113 * if short frame then copy data to reduce memory waste
2114 */
2115 if ((FrameLength < SK_COPY_THRESHOLD) &&
2116 ((pNewMsg = alloc_skb(FrameLength+2, GFP_ATOMIC)) != NULL)) {
2117 /*
2118 * Short frame detected and allocation successfull
2119 */
2120 /* use new skb and copy data */
2121 skb_reserve(pNewMsg, 2);
2122 skb_put(pNewMsg, FrameLength);
2123 PhysAddr = ((SK_U64) pRxd->VDataHigh) << (SK_U64)32;
2124 PhysAddr |= (SK_U64) pRxd->VDataLow;
2125
2126 pci_dma_sync_single_for_cpu(pAC->PciDev,
2127 (dma_addr_t) PhysAddr,
2128 FrameLength,
2129 PCI_DMA_FROMDEVICE);
2130 skb_copy_to_linear_data(pNewMsg, pMsg, FrameLength);
2131
2132 pci_dma_sync_single_for_device(pAC->PciDev,
2133 (dma_addr_t) PhysAddr,
2134 FrameLength,
2135 PCI_DMA_FROMDEVICE);
2136 ReQueueRxBuffer(pAC, pRxPort, pMsg,
2137 pRxd->VDataHigh, pRxd->VDataLow);
2138
2139 pMsg = pNewMsg;
2140
2141 }
2142 else {
2143 /*
2144 * if large frame, or SKB allocation failed, pass
2145 * the SKB directly to the networking
2146 */
2147
2148 PhysAddr = ((SK_U64) pRxd->VDataHigh) << (SK_U64)32;
2149 PhysAddr |= (SK_U64) pRxd->VDataLow;
2150
2151 /* release the DMA mapping */
2152 pci_unmap_single(pAC->PciDev,
2153 PhysAddr,
2154 pAC->RxBufSize - 2,
2155 PCI_DMA_FROMDEVICE);
2156
2157 /* set length in message */
2158 skb_put(pMsg, FrameLength);
2159 } /* frame > SK_COPY_TRESHOLD */
2160
2161#ifdef USE_SK_RX_CHECKSUM
2162 pMsg->csum = pRxd->TcpSums & 0xffff;
2163 pMsg->ip_summed = CHECKSUM_COMPLETE;
2164#else
2165 pMsg->ip_summed = CHECKSUM_NONE;
2166#endif
2167
2168 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 1,("V"));
2169 ForRlmt = SK_RLMT_RX_PROTOCOL;
2170#if 0
2171 IsBc = (FrameStat & XMR_FS_BC)==XMR_FS_BC;
2172#endif
2173 SK_RLMT_PRE_LOOKAHEAD(pAC, PortIndex, FrameLength,
2174 IsBc, &Offset, &NumBytes);
2175 if (NumBytes != 0) {
2176#if 0
2177 IsMc = (FrameStat & XMR_FS_MC)==XMR_FS_MC;
2178#endif
2179 SK_RLMT_LOOKAHEAD(pAC, PortIndex,
2180 &pMsg->data[Offset],
2181 IsBc, IsMc, &ForRlmt);
2182 }
2183 if (ForRlmt == SK_RLMT_RX_PROTOCOL) {
2184 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 1,("W"));
2185 /* send up only frames from active port */
2186 if ((PortIndex == pAC->ActivePort) ||
2187 (pAC->RlmtNets == 2)) {
2188 /* frame for upper layer */
2189 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, 1,("U"));
2190#ifdef xDEBUG
2191 DumpMsg(pMsg, "Rx");
2192#endif
2193 SK_PNMI_CNT_RX_OCTETS_DELIVERED(pAC,
2194 FrameLength, pRxPort->PortIndex);
2195
2196 pMsg->protocol = eth_type_trans(pMsg,
2197 pAC->dev[pRxPort->PortIndex]);
2198 netif_rx(pMsg);
2199 pAC->dev[pRxPort->PortIndex]->last_rx = jiffies;
2200 }
2201 else {
2202 /* drop frame */
2203 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
2204 SK_DBGCAT_DRV_RX_PROGRESS,
2205 ("D"));
2206 DEV_KFREE_SKB(pMsg);
2207 }
2208
2209 } /* if not for rlmt */
2210 else {
2211 /* packet for rlmt */
2212 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
2213 SK_DBGCAT_DRV_RX_PROGRESS, ("R"));
2214 pRlmtMbuf = SkDrvAllocRlmtMbuf(pAC,
2215 pAC->IoBase, FrameLength);
2216 if (pRlmtMbuf != NULL) {
2217 pRlmtMbuf->pNext = NULL;
2218 pRlmtMbuf->Length = FrameLength;
2219 pRlmtMbuf->PortIdx = PortIndex;
2220 EvPara.pParaPtr = pRlmtMbuf;
2221 memcpy((char*)(pRlmtMbuf->pData),
2222 (char*)(pMsg->data),
2223 FrameLength);
2224
2225 /* SlowPathLock needed? */
2226 if (SlowPathLock == SK_TRUE) {
2227 spin_lock_irqsave(&pAC->SlowPathLock, Flags);
2228 SkEventQueue(pAC, SKGE_RLMT,
2229 SK_RLMT_PACKET_RECEIVED,
2230 EvPara);
2231 pAC->CheckQueue = SK_TRUE;
2232 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
2233 } else {
2234 SkEventQueue(pAC, SKGE_RLMT,
2235 SK_RLMT_PACKET_RECEIVED,
2236 EvPara);
2237 pAC->CheckQueue = SK_TRUE;
2238 }
2239
2240 SK_DBG_MSG(NULL, SK_DBGMOD_DRV,
2241 SK_DBGCAT_DRV_RX_PROGRESS,
2242 ("Q"));
2243 }
2244 if ((pAC->dev[pRxPort->PortIndex]->flags &
2245 (IFF_PROMISC | IFF_ALLMULTI)) != 0 ||
2246 (ForRlmt & SK_RLMT_RX_PROTOCOL) ==
2247 SK_RLMT_RX_PROTOCOL) {
2248 pMsg->protocol = eth_type_trans(pMsg,
2249 pAC->dev[pRxPort->PortIndex]);
2250 netif_rx(pMsg);
2251 pAC->dev[pRxPort->PortIndex]->last_rx = jiffies;
2252 }
2253 else {
2254 DEV_KFREE_SKB(pMsg);
2255 }
2256
2257 } /* if packet for rlmt */
2258 } /* for ... scanning the RXD ring */
2259
2260 /* RXD ring is empty -> fill and restart */
2261 FillRxRing(pAC, pRxPort);
2262 /* do not start if called from Close */
2263 if (pAC->BoardLevel > SK_INIT_DATA) {
2264 ClearAndStartRx(pAC, PortIndex);
2265 }
2266 return;
2267
2268rx_failed:
2269 /* remove error frame */
2270 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ERROR,
2271 ("Schrottdescriptor, length: 0x%x\n", FrameLength));
2272
2273 /* release the DMA mapping */
2274
2275 PhysAddr = ((SK_U64) pRxd->VDataHigh) << (SK_U64)32;
2276 PhysAddr |= (SK_U64) pRxd->VDataLow;
2277 pci_unmap_page(pAC->PciDev,
2278 PhysAddr,
2279 pAC->RxBufSize - 2,
2280 PCI_DMA_FROMDEVICE);
2281 DEV_KFREE_SKB_IRQ(pRxd->pMBuf);
2282 pRxd->pMBuf = NULL;
2283 pRxPort->RxdRingFree++;
2284 pRxPort->pRxdRingHead = pRxd->pNextRxd;
2285 goto rx_start;
2286
2287} /* ReceiveIrq */
2288
2289
2290/*****************************************************************************
2291 *
2292 * ClearAndStartRx - give a start receive command to BMU, clear IRQ
2293 *
2294 * Description:
2295 * This function sends a start command and a clear interrupt
2296 * command for one receive queue to the BMU.
2297 *
2298 * Returns: N/A
2299 * none
2300 */
2301static void ClearAndStartRx(
2302SK_AC *pAC, /* pointer to the adapter context */
2303int PortIndex) /* index of the receive port (XMAC) */
2304{
2305 SK_OUT8(pAC->IoBase,
2306 RxQueueAddr[PortIndex]+Q_CSR,
2307 CSR_START | CSR_IRQ_CL_F);
2308} /* ClearAndStartRx */
2309
2310
2311/*****************************************************************************
2312 *
2313 * ClearTxIrq - give a clear transmit IRQ command to BMU
2314 *
2315 * Description:
2316 * This function sends a clear tx IRQ command for one
2317 * transmit queue to the BMU.
2318 *
2319 * Returns: N/A
2320 */
2321static void ClearTxIrq(
2322SK_AC *pAC, /* pointer to the adapter context */
2323int PortIndex, /* index of the transmit port (XMAC) */
2324int Prio) /* priority or normal queue */
2325{
2326 SK_OUT8(pAC->IoBase,
2327 TxQueueAddr[PortIndex][Prio]+Q_CSR,
2328 CSR_IRQ_CL_F);
2329} /* ClearTxIrq */
2330
2331
2332/*****************************************************************************
2333 *
2334 * ClearRxRing - remove all buffers from the receive ring
2335 *
2336 * Description:
2337 * This function removes all receive buffers from the ring.
2338 * The receive BMU must be stopped before calling this function.
2339 *
2340 * Returns: N/A
2341 */
2342static void ClearRxRing(
2343SK_AC *pAC, /* pointer to adapter context */
2344RX_PORT *pRxPort) /* pointer to rx port struct */
2345{
2346RXD *pRxd; /* pointer to the current descriptor */
2347unsigned long Flags;
2348SK_U64 PhysAddr;
2349
2350 if (pRxPort->RxdRingFree == pAC->RxDescrPerRing) {
2351 return;
2352 }
2353 spin_lock_irqsave(&pRxPort->RxDesRingLock, Flags);
2354 pRxd = pRxPort->pRxdRingHead;
2355 do {
2356 if (pRxd->pMBuf != NULL) {
2357
2358 PhysAddr = ((SK_U64) pRxd->VDataHigh) << (SK_U64)32;
2359 PhysAddr |= (SK_U64) pRxd->VDataLow;
2360 pci_unmap_page(pAC->PciDev,
2361 PhysAddr,
2362 pAC->RxBufSize - 2,
2363 PCI_DMA_FROMDEVICE);
2364 DEV_KFREE_SKB(pRxd->pMBuf);
2365 pRxd->pMBuf = NULL;
2366 }
2367 pRxd->RBControl &= BMU_OWN;
2368 pRxd = pRxd->pNextRxd;
2369 pRxPort->RxdRingFree++;
2370 } while (pRxd != pRxPort->pRxdRingTail);
2371 pRxPort->pRxdRingTail = pRxPort->pRxdRingHead;
2372 spin_unlock_irqrestore(&pRxPort->RxDesRingLock, Flags);
2373} /* ClearRxRing */
2374
2375/*****************************************************************************
2376 *
2377 * ClearTxRing - remove all buffers from the transmit ring
2378 *
2379 * Description:
2380 * This function removes all transmit buffers from the ring.
2381 * The transmit BMU must be stopped before calling this function
2382 * and transmitting at the upper level must be disabled.
2383 * The BMU own bit of all descriptors is cleared, the rest is
2384 * done by calling FreeTxDescriptors.
2385 *
2386 * Returns: N/A
2387 */
2388static void ClearTxRing(
2389SK_AC *pAC, /* pointer to adapter context */
2390TX_PORT *pTxPort) /* pointer to tx prt struct */
2391{
2392TXD *pTxd; /* pointer to the current descriptor */
2393int i;
2394unsigned long Flags;
2395
2396 spin_lock_irqsave(&pTxPort->TxDesRingLock, Flags);
2397 pTxd = pTxPort->pTxdRingHead;
2398 for (i=0; i<pAC->TxDescrPerRing; i++) {
2399 pTxd->TBControl &= ~BMU_OWN;
2400 pTxd = pTxd->pNextTxd;
2401 }
2402 FreeTxDescriptors(pAC, pTxPort);
2403 spin_unlock_irqrestore(&pTxPort->TxDesRingLock, Flags);
2404} /* ClearTxRing */
2405
2406/*****************************************************************************
2407 *
2408 * SkGeSetMacAddr - Set the hardware MAC address
2409 *
2410 * Description:
2411 * This function sets the MAC address used by the adapter.
2412 *
2413 * Returns:
2414 * 0, if everything is ok
2415 * !=0, on error
2416 */
2417static int SkGeSetMacAddr(struct SK_NET_DEVICE *dev, void *p)
2418{
2419
2420DEV_NET *pNet = netdev_priv(dev);
2421SK_AC *pAC = pNet->pAC;
2422
2423struct sockaddr *addr = p;
2424unsigned long Flags;
2425
2426 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
2427 ("SkGeSetMacAddr starts now...\n"));
2428 if(netif_running(dev))
2429 return -EBUSY;
2430
2431 memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
2432
2433 spin_lock_irqsave(&pAC->SlowPathLock, Flags);
2434
2435 if (pAC->RlmtNets == 2)
2436 SkAddrOverride(pAC, pAC->IoBase, pNet->NetNr,
2437 (SK_MAC_ADDR*)dev->dev_addr, SK_ADDR_VIRTUAL_ADDRESS);
2438 else
2439 SkAddrOverride(pAC, pAC->IoBase, pAC->ActivePort,
2440 (SK_MAC_ADDR*)dev->dev_addr, SK_ADDR_VIRTUAL_ADDRESS);
2441
2442
2443
2444 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
2445 return 0;
2446} /* SkGeSetMacAddr */
2447
2448
2449/*****************************************************************************
2450 *
2451 * SkGeSetRxMode - set receive mode
2452 *
2453 * Description:
2454 * This function sets the receive mode of an adapter. The adapter
2455 * supports promiscuous mode, allmulticast mode and a number of
2456 * multicast addresses. If more multicast addresses the available
2457 * are selected, a hash function in the hardware is used.
2458 *
2459 * Returns:
2460 * 0, if everything is ok
2461 * !=0, on error
2462 */
2463static void SkGeSetRxMode(struct SK_NET_DEVICE *dev)
2464{
2465
2466DEV_NET *pNet;
2467SK_AC *pAC;
2468
2469struct dev_mc_list *pMcList;
2470int i;
2471int PortIdx;
2472unsigned long Flags;
2473
2474 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
2475 ("SkGeSetRxMode starts now... "));
2476
2477 pNet = netdev_priv(dev);
2478 pAC = pNet->pAC;
2479 if (pAC->RlmtNets == 1)
2480 PortIdx = pAC->ActivePort;
2481 else
2482 PortIdx = pNet->NetNr;
2483
2484 spin_lock_irqsave(&pAC->SlowPathLock, Flags);
2485 if (dev->flags & IFF_PROMISC) {
2486 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
2487 ("PROMISCUOUS mode\n"));
2488 SkAddrPromiscuousChange(pAC, pAC->IoBase, PortIdx,
2489 SK_PROM_MODE_LLC);
2490 } else if (dev->flags & IFF_ALLMULTI) {
2491 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
2492 ("ALLMULTI mode\n"));
2493 SkAddrPromiscuousChange(pAC, pAC->IoBase, PortIdx,
2494 SK_PROM_MODE_ALL_MC);
2495 } else {
2496 SkAddrPromiscuousChange(pAC, pAC->IoBase, PortIdx,
2497 SK_PROM_MODE_NONE);
2498 SkAddrMcClear(pAC, pAC->IoBase, PortIdx, 0);
2499
2500 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
2501 ("Number of MC entries: %d ", dev->mc_count));
2502
2503 pMcList = dev->mc_list;
2504 for (i=0; i<dev->mc_count; i++, pMcList = pMcList->next) {
2505 SkAddrMcAdd(pAC, pAC->IoBase, PortIdx,
2506 (SK_MAC_ADDR*)pMcList->dmi_addr, 0);
2507 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_MCA,
2508 ("%02x:%02x:%02x:%02x:%02x:%02x\n",
2509 pMcList->dmi_addr[0],
2510 pMcList->dmi_addr[1],
2511 pMcList->dmi_addr[2],
2512 pMcList->dmi_addr[3],
2513 pMcList->dmi_addr[4],
2514 pMcList->dmi_addr[5]));
2515 }
2516 SkAddrMcUpdate(pAC, pAC->IoBase, PortIdx);
2517 }
2518 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
2519
2520 return;
2521} /* SkGeSetRxMode */
2522
2523
2524/*****************************************************************************
2525 *
2526 * SkGeChangeMtu - set the MTU to another value
2527 *
2528 * Description:
2529 * This function sets is called whenever the MTU size is changed
2530 * (ifconfig mtu xxx dev ethX). If the MTU is bigger than standard
2531 * ethernet MTU size, long frame support is activated.
2532 *
2533 * Returns:
2534 * 0, if everything is ok
2535 * !=0, on error
2536 */
2537static int SkGeChangeMtu(struct SK_NET_DEVICE *dev, int NewMtu)
2538{
2539DEV_NET *pNet;
2540struct net_device *pOtherDev;
2541SK_AC *pAC;
2542unsigned long Flags;
2543int i;
2544SK_EVPARA EvPara;
2545
2546 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
2547 ("SkGeChangeMtu starts now...\n"));
2548
2549 pNet = netdev_priv(dev);
2550 pAC = pNet->pAC;
2551
2552 if ((NewMtu < 68) || (NewMtu > SK_JUMBO_MTU)) {
2553 return -EINVAL;
2554 }
2555
2556 if(pAC->BoardLevel != SK_INIT_RUN) {
2557 return -EINVAL;
2558 }
2559
2560#ifdef SK_DIAG_SUPPORT
2561 if (pAC->DiagModeActive == DIAG_ACTIVE) {
2562 if (pAC->DiagFlowCtrl == SK_FALSE) {
2563 return -1; /* still in use, deny any actions of MTU */
2564 } else {
2565 pAC->DiagFlowCtrl = SK_FALSE;
2566 }
2567 }
2568#endif
2569
2570 pOtherDev = pAC->dev[1 - pNet->NetNr];
2571
2572 if ( netif_running(pOtherDev) && (pOtherDev->mtu > 1500)
2573 && (NewMtu <= 1500))
2574 return 0;
2575
2576 pAC->RxBufSize = NewMtu + 32;
2577 dev->mtu = NewMtu;
2578
2579 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
2580 ("New MTU: %d\n", NewMtu));
2581
2582 /*
2583 ** Prevent any reconfiguration while changing the MTU
2584 ** by disabling any interrupts
2585 */
2586 SK_OUT32(pAC->IoBase, B0_IMSK, 0);
2587 spin_lock_irqsave(&pAC->SlowPathLock, Flags);
2588
2589 /*
2590 ** Notify RLMT that any ports are to be stopped
2591 */
2592 EvPara.Para32[0] = 0;
2593 EvPara.Para32[1] = -1;
2594 if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) {
2595 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
2596 EvPara.Para32[0] = 1;
2597 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
2598 } else {
2599 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
2600 }
2601
2602 /*
2603 ** After calling the SkEventDispatcher(), RLMT is aware about
2604 ** the stopped ports -> configuration can take place!
2605 */
2606 SkEventDispatcher(pAC, pAC->IoBase);
2607
2608 for (i=0; i<pAC->GIni.GIMacsFound; i++) {
2609 spin_lock(&pAC->TxPort[i][TX_PRIO_LOW].TxDesRingLock);
2610 netif_stop_queue(pAC->dev[i]);
2611
2612 }
2613
2614 /*
2615 ** Depending on the desired MTU size change, a different number of
2616 ** RX buffers need to be allocated
2617 */
2618 if (NewMtu > 1500) {
2619 /*
2620 ** Use less rx buffers
2621 */
2622 for (i=0; i<pAC->GIni.GIMacsFound; i++) {
2623 if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) {
2624 pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing -
2625 (pAC->RxDescrPerRing / 4);
2626 } else {
2627 if (i == pAC->ActivePort) {
2628 pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing -
2629 (pAC->RxDescrPerRing / 4);
2630 } else {
2631 pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing -
2632 (pAC->RxDescrPerRing / 10);
2633 }
2634 }
2635 }
2636 } else {
2637 /*
2638 ** Use the normal amount of rx buffers
2639 */
2640 for (i=0; i<pAC->GIni.GIMacsFound; i++) {
2641 if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) {
2642 pAC->RxPort[i].RxFillLimit = 1;
2643 } else {
2644 if (i == pAC->ActivePort) {
2645 pAC->RxPort[i].RxFillLimit = 1;
2646 } else {
2647 pAC->RxPort[i].RxFillLimit = pAC->RxDescrPerRing -
2648 (pAC->RxDescrPerRing / 4);
2649 }
2650 }
2651 }
2652 }
2653
2654 SkGeDeInit(pAC, pAC->IoBase);
2655
2656 /*
2657 ** enable/disable hardware support for long frames
2658 */
2659 if (NewMtu > 1500) {
2660// pAC->JumboActivated = SK_TRUE; /* is never set back !!! */
2661 pAC->GIni.GIPortUsage = SK_JUMBO_LINK;
2662 } else {
2663 if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) {
2664 pAC->GIni.GIPortUsage = SK_MUL_LINK;
2665 } else {
2666 pAC->GIni.GIPortUsage = SK_RED_LINK;
2667 }
2668 }
2669
2670 SkGeInit( pAC, pAC->IoBase, SK_INIT_IO);
2671 SkI2cInit( pAC, pAC->IoBase, SK_INIT_IO);
2672 SkEventInit(pAC, pAC->IoBase, SK_INIT_IO);
2673 SkPnmiInit( pAC, pAC->IoBase, SK_INIT_IO);
2674 SkAddrInit( pAC, pAC->IoBase, SK_INIT_IO);
2675 SkRlmtInit( pAC, pAC->IoBase, SK_INIT_IO);
2676 SkTimerInit(pAC, pAC->IoBase, SK_INIT_IO);
2677
2678 /*
2679 ** tschilling:
2680 ** Speed and others are set back to default in level 1 init!
2681 */
2682 GetConfiguration(pAC);
2683
2684 SkGeInit( pAC, pAC->IoBase, SK_INIT_RUN);
2685 SkI2cInit( pAC, pAC->IoBase, SK_INIT_RUN);
2686 SkEventInit(pAC, pAC->IoBase, SK_INIT_RUN);
2687 SkPnmiInit( pAC, pAC->IoBase, SK_INIT_RUN);
2688 SkAddrInit( pAC, pAC->IoBase, SK_INIT_RUN);
2689 SkRlmtInit( pAC, pAC->IoBase, SK_INIT_RUN);
2690 SkTimerInit(pAC, pAC->IoBase, SK_INIT_RUN);
2691
2692 /*
2693 ** clear and reinit the rx rings here
2694 */
2695 for (i=0; i<pAC->GIni.GIMacsFound; i++) {
2696 ReceiveIrq(pAC, &pAC->RxPort[i], SK_TRUE);
2697 ClearRxRing(pAC, &pAC->RxPort[i]);
2698 FillRxRing(pAC, &pAC->RxPort[i]);
2699
2700 /*
2701 ** Enable transmit descriptor polling
2702 */
2703 SkGePollTxD(pAC, pAC->IoBase, i, SK_TRUE);
2704 FillRxRing(pAC, &pAC->RxPort[i]);
2705 };
2706
2707 SkGeYellowLED(pAC, pAC->IoBase, 1);
2708 SkDimEnableModerationIfNeeded(pAC);
2709 SkDimDisplayModerationSettings(pAC);
2710
2711 netif_start_queue(pAC->dev[pNet->PortNr]);
2712 for (i=pAC->GIni.GIMacsFound-1; i>=0; i--) {
2713 spin_unlock(&pAC->TxPort[i][TX_PRIO_LOW].TxDesRingLock);
2714 }
2715
2716 /*
2717 ** Enable Interrupts again
2718 */
2719 SK_OUT32(pAC->IoBase, B0_IMSK, pAC->GIni.GIValIrqMask);
2720 SK_OUT32(pAC->IoBase, B0_HWE_IMSK, IRQ_HWE_MASK);
2721
2722 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara);
2723 SkEventDispatcher(pAC, pAC->IoBase);
2724
2725 /*
2726 ** Notify RLMT about the changing and restarting one (or more) ports
2727 */
2728 if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) {
2729 EvPara.Para32[0] = pAC->RlmtNets;
2730 EvPara.Para32[1] = -1;
2731 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_SET_NETS, EvPara);
2732 EvPara.Para32[0] = pNet->PortNr;
2733 EvPara.Para32[1] = -1;
2734 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara);
2735
2736 if (netif_running(pOtherDev)) {
2737 DEV_NET *pOtherNet = netdev_priv(pOtherDev);
2738 EvPara.Para32[0] = pOtherNet->PortNr;
2739 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara);
2740 }
2741 } else {
2742 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_START, EvPara);
2743 }
2744
2745 SkEventDispatcher(pAC, pAC->IoBase);
2746 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
2747
2748 /*
2749 ** While testing this driver with latest kernel 2.5 (2.5.70), it
2750 ** seems as if upper layers have a problem to handle a successful
2751 ** return value of '0'. If such a zero is returned, the complete
2752 ** system hangs for several minutes (!), which is in acceptable.
2753 **
2754 ** Currently it is not clear, what the exact reason for this problem
2755 ** is. The implemented workaround for 2.5 is to return the desired
2756 ** new MTU size if all needed changes for the new MTU size where
2757 ** performed. In kernels 2.2 and 2.4, a zero value is returned,
2758 ** which indicates the successful change of the mtu-size.
2759 */
2760 return NewMtu;
2761
2762} /* SkGeChangeMtu */
2763
2764
2765/*****************************************************************************
2766 *
2767 * SkGeStats - return ethernet device statistics
2768 *
2769 * Description:
2770 * This function return statistic data about the ethernet device
2771 * to the operating system.
2772 *
2773 * Returns:
2774 * pointer to the statistic structure.
2775 */
2776static struct net_device_stats *SkGeStats(struct SK_NET_DEVICE *dev)
2777{
2778DEV_NET *pNet = netdev_priv(dev);
2779SK_AC *pAC = pNet->pAC;
2780SK_PNMI_STRUCT_DATA *pPnmiStruct; /* structure for all Pnmi-Data */
2781SK_PNMI_STAT *pPnmiStat; /* pointer to virtual XMAC stat. data */
2782SK_PNMI_CONF *pPnmiConf; /* pointer to virtual link config. */
2783unsigned int Size; /* size of pnmi struct */
2784unsigned long Flags; /* for spin lock */
2785
2786 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
2787 ("SkGeStats starts now...\n"));
2788 pPnmiStruct = &pAC->PnmiStruct;
2789
2790#ifdef SK_DIAG_SUPPORT
2791 if ((pAC->DiagModeActive == DIAG_NOTACTIVE) &&
2792 (pAC->BoardLevel == SK_INIT_RUN)) {
2793#endif
2794 SK_MEMSET(pPnmiStruct, 0, sizeof(SK_PNMI_STRUCT_DATA));
2795 spin_lock_irqsave(&pAC->SlowPathLock, Flags);
2796 Size = SK_PNMI_STRUCT_SIZE;
2797 SkPnmiGetStruct(pAC, pAC->IoBase, pPnmiStruct, &Size, pNet->NetNr);
2798 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
2799#ifdef SK_DIAG_SUPPORT
2800 }
2801#endif
2802
2803 pPnmiStat = &pPnmiStruct->Stat[0];
2804 pPnmiConf = &pPnmiStruct->Conf[0];
2805
2806 pAC->stats.rx_packets = (SK_U32) pPnmiStruct->RxDeliveredCts & 0xFFFFFFFF;
2807 pAC->stats.tx_packets = (SK_U32) pPnmiStat->StatTxOkCts & 0xFFFFFFFF;
2808 pAC->stats.rx_bytes = (SK_U32) pPnmiStruct->RxOctetsDeliveredCts;
2809 pAC->stats.tx_bytes = (SK_U32) pPnmiStat->StatTxOctetsOkCts;
2810
2811 if (dev->mtu <= 1500) {
2812 pAC->stats.rx_errors = (SK_U32) pPnmiStruct->InErrorsCts & 0xFFFFFFFF;
2813 } else {
2814 pAC->stats.rx_errors = (SK_U32) ((pPnmiStruct->InErrorsCts -
2815 pPnmiStat->StatRxTooLongCts) & 0xFFFFFFFF);
2816 }
2817
2818
2819 if (pAC->GIni.GP[0].PhyType == SK_PHY_XMAC && pAC->HWRevision < 12)
2820 pAC->stats.rx_errors = pAC->stats.rx_errors - pPnmiStat->StatRxShortsCts;
2821
2822 pAC->stats.tx_errors = (SK_U32) pPnmiStat->StatTxSingleCollisionCts & 0xFFFFFFFF;
2823 pAC->stats.rx_dropped = (SK_U32) pPnmiStruct->RxNoBufCts & 0xFFFFFFFF;
2824 pAC->stats.tx_dropped = (SK_U32) pPnmiStruct->TxNoBufCts & 0xFFFFFFFF;
2825 pAC->stats.multicast = (SK_U32) pPnmiStat->StatRxMulticastOkCts & 0xFFFFFFFF;
2826 pAC->stats.collisions = (SK_U32) pPnmiStat->StatTxSingleCollisionCts & 0xFFFFFFFF;
2827
2828 /* detailed rx_errors: */
2829 pAC->stats.rx_length_errors = (SK_U32) pPnmiStat->StatRxRuntCts & 0xFFFFFFFF;
2830 pAC->stats.rx_over_errors = (SK_U32) pPnmiStat->StatRxFifoOverflowCts & 0xFFFFFFFF;
2831 pAC->stats.rx_crc_errors = (SK_U32) pPnmiStat->StatRxFcsCts & 0xFFFFFFFF;
2832 pAC->stats.rx_frame_errors = (SK_U32) pPnmiStat->StatRxFramingCts & 0xFFFFFFFF;
2833 pAC->stats.rx_fifo_errors = (SK_U32) pPnmiStat->StatRxFifoOverflowCts & 0xFFFFFFFF;
2834 pAC->stats.rx_missed_errors = (SK_U32) pPnmiStat->StatRxMissedCts & 0xFFFFFFFF;
2835
2836 /* detailed tx_errors */
2837 pAC->stats.tx_aborted_errors = (SK_U32) 0;
2838 pAC->stats.tx_carrier_errors = (SK_U32) pPnmiStat->StatTxCarrierCts & 0xFFFFFFFF;
2839 pAC->stats.tx_fifo_errors = (SK_U32) pPnmiStat->StatTxFifoUnderrunCts & 0xFFFFFFFF;
2840 pAC->stats.tx_heartbeat_errors = (SK_U32) pPnmiStat->StatTxCarrierCts & 0xFFFFFFFF;
2841 pAC->stats.tx_window_errors = (SK_U32) 0;
2842
2843 return(&pAC->stats);
2844} /* SkGeStats */
2845
2846/*
2847 * Basic MII register access
2848 */
2849static int SkGeMiiIoctl(struct net_device *dev,
2850 struct mii_ioctl_data *data, int cmd)
2851{
2852 DEV_NET *pNet = netdev_priv(dev);
2853 SK_AC *pAC = pNet->pAC;
2854 SK_IOC IoC = pAC->IoBase;
2855 int Port = pNet->PortNr;
2856 SK_GEPORT *pPrt = &pAC->GIni.GP[Port];
2857 unsigned long Flags;
2858 int err = 0;
2859 int reg = data->reg_num & 0x1f;
2860 SK_U16 val = data->val_in;
2861
2862 if (!netif_running(dev))
2863 return -ENODEV; /* Phy still in reset */
2864
2865 spin_lock_irqsave(&pAC->SlowPathLock, Flags);
2866 switch(cmd) {
2867 case SIOCGMIIPHY:
2868 data->phy_id = pPrt->PhyAddr;
2869
2870 /* fallthru */
2871 case SIOCGMIIREG:
2872 if (pAC->GIni.GIGenesis)
2873 SkXmPhyRead(pAC, IoC, Port, reg, &val);
2874 else
2875 SkGmPhyRead(pAC, IoC, Port, reg, &val);
2876
2877 data->val_out = val;
2878 break;
2879
2880 case SIOCSMIIREG:
2881 if (!capable(CAP_NET_ADMIN))
2882 err = -EPERM;
2883
2884 else if (pAC->GIni.GIGenesis)
2885 SkXmPhyWrite(pAC, IoC, Port, reg, val);
2886 else
2887 SkGmPhyWrite(pAC, IoC, Port, reg, val);
2888 break;
2889 default:
2890 err = -EOPNOTSUPP;
2891 }
2892 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
2893 return err;
2894}
2895
2896
2897/*****************************************************************************
2898 *
2899 * SkGeIoctl - IO-control function
2900 *
2901 * Description:
2902 * This function is called if an ioctl is issued on the device.
2903 * There are three subfunction for reading, writing and test-writing
2904 * the private MIB data structure (useful for SysKonnect-internal tools).
2905 *
2906 * Returns:
2907 * 0, if everything is ok
2908 * !=0, on error
2909 */
2910static int SkGeIoctl(struct SK_NET_DEVICE *dev, struct ifreq *rq, int cmd)
2911{
2912DEV_NET *pNet;
2913SK_AC *pAC;
2914void *pMemBuf;
2915struct pci_dev *pdev = NULL;
2916SK_GE_IOCTL Ioctl;
2917unsigned int Err = 0;
2918int Size = 0;
2919int Ret = 0;
2920unsigned int Length = 0;
2921int HeaderLength = sizeof(SK_U32) + sizeof(SK_U32);
2922
2923 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
2924 ("SkGeIoctl starts now...\n"));
2925
2926 pNet = netdev_priv(dev);
2927 pAC = pNet->pAC;
2928
2929 if (cmd == SIOCGMIIPHY || cmd == SIOCSMIIREG || cmd == SIOCGMIIREG)
2930 return SkGeMiiIoctl(dev, if_mii(rq), cmd);
2931
2932 if(copy_from_user(&Ioctl, rq->ifr_data, sizeof(SK_GE_IOCTL))) {
2933 return -EFAULT;
2934 }
2935
2936 switch(cmd) {
2937 case SK_IOCTL_SETMIB:
2938 case SK_IOCTL_PRESETMIB:
2939 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2940 case SK_IOCTL_GETMIB:
2941 if(copy_from_user(&pAC->PnmiStruct, Ioctl.pData,
2942 Ioctl.Len<sizeof(pAC->PnmiStruct)?
2943 Ioctl.Len : sizeof(pAC->PnmiStruct))) {
2944 return -EFAULT;
2945 }
2946 Size = SkGeIocMib(pNet, Ioctl.Len, cmd);
2947 if(copy_to_user(Ioctl.pData, &pAC->PnmiStruct,
2948 Ioctl.Len<Size? Ioctl.Len : Size)) {
2949 return -EFAULT;
2950 }
2951 Ioctl.Len = Size;
2952 if(copy_to_user(rq->ifr_data, &Ioctl, sizeof(SK_GE_IOCTL))) {
2953 return -EFAULT;
2954 }
2955 break;
2956 case SK_IOCTL_GEN:
2957 if (Ioctl.Len < (sizeof(pAC->PnmiStruct) + HeaderLength)) {
2958 Length = Ioctl.Len;
2959 } else {
2960 Length = sizeof(pAC->PnmiStruct) + HeaderLength;
2961 }
2962 if (NULL == (pMemBuf = kmalloc(Length, GFP_KERNEL))) {
2963 return -ENOMEM;
2964 }
2965 if(copy_from_user(pMemBuf, Ioctl.pData, Length)) {
2966 Err = -EFAULT;
2967 goto fault_gen;
2968 }
2969 if ((Ret = SkPnmiGenIoctl(pAC, pAC->IoBase, pMemBuf, &Length, 0)) < 0) {
2970 Err = -EFAULT;
2971 goto fault_gen;
2972 }
2973 if(copy_to_user(Ioctl.pData, pMemBuf, Length) ) {
2974 Err = -EFAULT;
2975 goto fault_gen;
2976 }
2977 Ioctl.Len = Length;
2978 if(copy_to_user(rq->ifr_data, &Ioctl, sizeof(SK_GE_IOCTL))) {
2979 Err = -EFAULT;
2980 goto fault_gen;
2981 }
2982fault_gen:
2983 kfree(pMemBuf); /* cleanup everything */
2984 break;
2985#ifdef SK_DIAG_SUPPORT
2986 case SK_IOCTL_DIAG:
2987 if (!capable(CAP_NET_ADMIN)) return -EPERM;
2988 if (Ioctl.Len < (sizeof(pAC->PnmiStruct) + HeaderLength)) {
2989 Length = Ioctl.Len;
2990 } else {
2991 Length = sizeof(pAC->PnmiStruct) + HeaderLength;
2992 }
2993 if (NULL == (pMemBuf = kmalloc(Length, GFP_KERNEL))) {
2994 return -ENOMEM;
2995 }
2996 if(copy_from_user(pMemBuf, Ioctl.pData, Length)) {
2997 Err = -EFAULT;
2998 goto fault_diag;
2999 }
3000 pdev = pAC->PciDev;
3001 Length = 3 * sizeof(SK_U32); /* Error, Bus and Device */
3002 /*
3003 ** While coding this new IOCTL interface, only a few lines of code
3004 ** are to to be added. Therefore no dedicated function has been
3005 ** added. If more functionality is added, a separate function
3006 ** should be used...
3007 */
3008 * ((SK_U32 *)pMemBuf) = 0;
3009 * ((SK_U32 *)pMemBuf + 1) = pdev->bus->number;
3010 * ((SK_U32 *)pMemBuf + 2) = ParseDeviceNbrFromSlotName(pci_name(pdev));
3011 if(copy_to_user(Ioctl.pData, pMemBuf, Length) ) {
3012 Err = -EFAULT;
3013 goto fault_diag;
3014 }
3015 Ioctl.Len = Length;
3016 if(copy_to_user(rq->ifr_data, &Ioctl, sizeof(SK_GE_IOCTL))) {
3017 Err = -EFAULT;
3018 goto fault_diag;
3019 }
3020fault_diag:
3021 kfree(pMemBuf); /* cleanup everything */
3022 break;
3023#endif
3024 default:
3025 Err = -EOPNOTSUPP;
3026 }
3027
3028 return(Err);
3029
3030} /* SkGeIoctl */
3031
3032
3033/*****************************************************************************
3034 *
3035 * SkGeIocMib - handle a GetMib, SetMib- or PresetMib-ioctl message
3036 *
3037 * Description:
3038 * This function reads/writes the MIB data using PNMI (Private Network
3039 * Management Interface).
3040 * The destination for the data must be provided with the
3041 * ioctl call and is given to the driver in the form of
3042 * a user space address.
3043 * Copying from the user-provided data area into kernel messages
3044 * and back is done by copy_from_user and copy_to_user calls in
3045 * SkGeIoctl.
3046 *
3047 * Returns:
3048 * returned size from PNMI call
3049 */
3050static int SkGeIocMib(
3051DEV_NET *pNet, /* pointer to the adapter context */
3052unsigned int Size, /* length of ioctl data */
3053int mode) /* flag for set/preset */
3054{
3055unsigned long Flags; /* for spin lock */
3056SK_AC *pAC;
3057
3058 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
3059 ("SkGeIocMib starts now...\n"));
3060 pAC = pNet->pAC;
3061 /* access MIB */
3062 spin_lock_irqsave(&pAC->SlowPathLock, Flags);
3063 switch(mode) {
3064 case SK_IOCTL_GETMIB:
3065 SkPnmiGetStruct(pAC, pAC->IoBase, &pAC->PnmiStruct, &Size,
3066 pNet->NetNr);
3067 break;
3068 case SK_IOCTL_PRESETMIB:
3069 SkPnmiPreSetStruct(pAC, pAC->IoBase, &pAC->PnmiStruct, &Size,
3070 pNet->NetNr);
3071 break;
3072 case SK_IOCTL_SETMIB:
3073 SkPnmiSetStruct(pAC, pAC->IoBase, &pAC->PnmiStruct, &Size,
3074 pNet->NetNr);
3075 break;
3076 default:
3077 break;
3078 }
3079 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
3080 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_ENTRY,
3081 ("MIB data access succeeded\n"));
3082 return (Size);
3083} /* SkGeIocMib */
3084
3085
3086/*****************************************************************************
3087 *
3088 * GetConfiguration - read configuration information
3089 *
3090 * Description:
3091 * This function reads per-adapter configuration information from
3092 * the options provided on the command line.
3093 *
3094 * Returns:
3095 * none
3096 */
3097static void GetConfiguration(
3098SK_AC *pAC) /* pointer to the adapter context structure */
3099{
3100SK_I32 Port; /* preferred port */
3101SK_BOOL AutoSet;
3102SK_BOOL DupSet;
3103int LinkSpeed = SK_LSPEED_AUTO; /* Link speed */
3104int AutoNeg = 1; /* autoneg off (0) or on (1) */
3105int DuplexCap = 0; /* 0=both,1=full,2=half */
3106int FlowCtrl = SK_FLOW_MODE_SYM_OR_REM; /* FlowControl */
3107int MSMode = SK_MS_MODE_AUTO; /* master/slave mode */
3108
3109SK_BOOL IsConTypeDefined = SK_TRUE;
3110SK_BOOL IsLinkSpeedDefined = SK_TRUE;
3111SK_BOOL IsFlowCtrlDefined = SK_TRUE;
3112SK_BOOL IsRoleDefined = SK_TRUE;
3113SK_BOOL IsModeDefined = SK_TRUE;
3114/*
3115 * The two parameters AutoNeg. and DuplexCap. map to one configuration
3116 * parameter. The mapping is described by this table:
3117 * DuplexCap -> | both | full | half |
3118 * AutoNeg | | | |
3119 * -----------------------------------------------------------------
3120 * Off | illegal | Full | Half |
3121 * -----------------------------------------------------------------
3122 * On | AutoBoth | AutoFull | AutoHalf |
3123 * -----------------------------------------------------------------
3124 * Sense | AutoSense | AutoSense | AutoSense |
3125 */
3126int Capabilities[3][3] =
3127 { { -1, SK_LMODE_FULL , SK_LMODE_HALF },
3128 {SK_LMODE_AUTOBOTH , SK_LMODE_AUTOFULL , SK_LMODE_AUTOHALF },
3129 {SK_LMODE_AUTOSENSE, SK_LMODE_AUTOSENSE, SK_LMODE_AUTOSENSE} };
3130
3131#define DC_BOTH 0
3132#define DC_FULL 1
3133#define DC_HALF 2
3134#define AN_OFF 0
3135#define AN_ON 1
3136#define AN_SENS 2
3137#define M_CurrPort pAC->GIni.GP[Port]
3138
3139
3140 /*
3141 ** Set the default values first for both ports!
3142 */
3143 for (Port = 0; Port < SK_MAX_MACS; Port++) {
3144 M_CurrPort.PLinkModeConf = Capabilities[AN_ON][DC_BOTH];
3145 M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_SYM_OR_REM;
3146 M_CurrPort.PMSMode = SK_MS_MODE_AUTO;
3147 M_CurrPort.PLinkSpeed = SK_LSPEED_AUTO;
3148 }
3149
3150 /*
3151 ** Check merged parameter ConType. If it has not been used,
3152 ** verify any other parameter (e.g. AutoNeg) and use default values.
3153 **
3154 ** Stating both ConType and other lowlevel link parameters is also
3155 ** possible. If this is the case, the passed ConType-parameter is
3156 ** overwritten by the lowlevel link parameter.
3157 **
3158 ** The following settings are used for a merged ConType-parameter:
3159 **
3160 ** ConType DupCap AutoNeg FlowCtrl Role Speed
3161 ** ------- ------ ------- -------- ---------- -----
3162 ** Auto Both On SymOrRem Auto Auto
3163 ** 100FD Full Off None <ignored> 100
3164 ** 100HD Half Off None <ignored> 100
3165 ** 10FD Full Off None <ignored> 10
3166 ** 10HD Half Off None <ignored> 10
3167 **
3168 ** This ConType parameter is used for all ports of the adapter!
3169 */
3170 if ( (ConType != NULL) &&
3171 (pAC->Index < SK_MAX_CARD_PARAM) &&
3172 (ConType[pAC->Index] != NULL) ) {
3173
3174 /* Check chipset family */
3175 if ((!pAC->ChipsetType) &&
3176 (strcmp(ConType[pAC->Index],"Auto")!=0) &&
3177 (strcmp(ConType[pAC->Index],"")!=0)) {
3178 /* Set the speed parameter back */
3179 printk("sk98lin: Illegal value \"%s\" "
3180 "for ConType."
3181 " Using Auto.\n",
3182 ConType[pAC->Index]);
3183
3184 sprintf(ConType[pAC->Index], "Auto");
3185 }
3186
3187 if (strcmp(ConType[pAC->Index],"")==0) {
3188 IsConTypeDefined = SK_FALSE; /* No ConType defined */
3189 } else if (strcmp(ConType[pAC->Index],"Auto")==0) {
3190 for (Port = 0; Port < SK_MAX_MACS; Port++) {
3191 M_CurrPort.PLinkModeConf = Capabilities[AN_ON][DC_BOTH];
3192 M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_SYM_OR_REM;
3193 M_CurrPort.PMSMode = SK_MS_MODE_AUTO;
3194 M_CurrPort.PLinkSpeed = SK_LSPEED_AUTO;
3195 }
3196 } else if (strcmp(ConType[pAC->Index],"100FD")==0) {
3197 for (Port = 0; Port < SK_MAX_MACS; Port++) {
3198 M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_FULL];
3199 M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE;
3200 M_CurrPort.PMSMode = SK_MS_MODE_AUTO;
3201 M_CurrPort.PLinkSpeed = SK_LSPEED_100MBPS;
3202 }
3203 } else if (strcmp(ConType[pAC->Index],"100HD")==0) {
3204 for (Port = 0; Port < SK_MAX_MACS; Port++) {
3205 M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_HALF];
3206 M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE;
3207 M_CurrPort.PMSMode = SK_MS_MODE_AUTO;
3208 M_CurrPort.PLinkSpeed = SK_LSPEED_100MBPS;
3209 }
3210 } else if (strcmp(ConType[pAC->Index],"10FD")==0) {
3211 for (Port = 0; Port < SK_MAX_MACS; Port++) {
3212 M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_FULL];
3213 M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE;
3214 M_CurrPort.PMSMode = SK_MS_MODE_AUTO;
3215 M_CurrPort.PLinkSpeed = SK_LSPEED_10MBPS;
3216 }
3217 } else if (strcmp(ConType[pAC->Index],"10HD")==0) {
3218 for (Port = 0; Port < SK_MAX_MACS; Port++) {
3219 M_CurrPort.PLinkModeConf = Capabilities[AN_OFF][DC_HALF];
3220 M_CurrPort.PFlowCtrlMode = SK_FLOW_MODE_NONE;
3221 M_CurrPort.PMSMode = SK_MS_MODE_AUTO;
3222 M_CurrPort.PLinkSpeed = SK_LSPEED_10MBPS;
3223 }
3224 } else {
3225 printk("sk98lin: Illegal value \"%s\" for ConType\n",
3226 ConType[pAC->Index]);
3227 IsConTypeDefined = SK_FALSE; /* Wrong ConType defined */
3228 }
3229 } else {
3230 IsConTypeDefined = SK_FALSE; /* No ConType defined */
3231 }
3232
3233 /*
3234 ** Parse any parameter settings for port A:
3235 ** a) any LinkSpeed stated?
3236 */
3237 if (Speed_A != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
3238 Speed_A[pAC->Index] != NULL) {
3239 if (strcmp(Speed_A[pAC->Index],"")==0) {
3240 IsLinkSpeedDefined = SK_FALSE;
3241 } else if (strcmp(Speed_A[pAC->Index],"Auto")==0) {
3242 LinkSpeed = SK_LSPEED_AUTO;
3243 } else if (strcmp(Speed_A[pAC->Index],"10")==0) {
3244 LinkSpeed = SK_LSPEED_10MBPS;
3245 } else if (strcmp(Speed_A[pAC->Index],"100")==0) {
3246 LinkSpeed = SK_LSPEED_100MBPS;
3247 } else if (strcmp(Speed_A[pAC->Index],"1000")==0) {
3248 LinkSpeed = SK_LSPEED_1000MBPS;
3249 } else {
3250 printk("sk98lin: Illegal value \"%s\" for Speed_A\n",
3251 Speed_A[pAC->Index]);
3252 IsLinkSpeedDefined = SK_FALSE;
3253 }
3254 } else {
3255 IsLinkSpeedDefined = SK_FALSE;
3256 }
3257
3258 /*
3259 ** Check speed parameter:
3260 ** Only copper type adapter and GE V2 cards
3261 */
3262 if (((!pAC->ChipsetType) || (pAC->GIni.GICopperType != SK_TRUE)) &&
3263 ((LinkSpeed != SK_LSPEED_AUTO) &&
3264 (LinkSpeed != SK_LSPEED_1000MBPS))) {
3265 printk("sk98lin: Illegal value for Speed_A. "
3266 "Not a copper card or GE V2 card\n Using "
3267 "speed 1000\n");
3268 LinkSpeed = SK_LSPEED_1000MBPS;
3269 }
3270
3271 /*
3272 ** Decide whether to set new config value if somethig valid has
3273 ** been received.
3274 */
3275 if (IsLinkSpeedDefined) {
3276 pAC->GIni.GP[0].PLinkSpeed = LinkSpeed;
3277 }
3278
3279 /*
3280 ** b) Any Autonegotiation and DuplexCapabilities set?
3281 ** Please note that both belong together...
3282 */
3283 AutoNeg = AN_ON; /* tschilling: Default: Autonegotiation on! */
3284 AutoSet = SK_FALSE;
3285 if (AutoNeg_A != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
3286 AutoNeg_A[pAC->Index] != NULL) {
3287 AutoSet = SK_TRUE;
3288 if (strcmp(AutoNeg_A[pAC->Index],"")==0) {
3289 AutoSet = SK_FALSE;
3290 } else if (strcmp(AutoNeg_A[pAC->Index],"On")==0) {
3291 AutoNeg = AN_ON;
3292 } else if (strcmp(AutoNeg_A[pAC->Index],"Off")==0) {
3293 AutoNeg = AN_OFF;
3294 } else if (strcmp(AutoNeg_A[pAC->Index],"Sense")==0) {
3295 AutoNeg = AN_SENS;
3296 } else {
3297 printk("sk98lin: Illegal value \"%s\" for AutoNeg_A\n",
3298 AutoNeg_A[pAC->Index]);
3299 }
3300 }
3301
3302 DuplexCap = DC_BOTH;
3303 DupSet = SK_FALSE;
3304 if (DupCap_A != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
3305 DupCap_A[pAC->Index] != NULL) {
3306 DupSet = SK_TRUE;
3307 if (strcmp(DupCap_A[pAC->Index],"")==0) {
3308 DupSet = SK_FALSE;
3309 } else if (strcmp(DupCap_A[pAC->Index],"Both")==0) {
3310 DuplexCap = DC_BOTH;
3311 } else if (strcmp(DupCap_A[pAC->Index],"Full")==0) {
3312 DuplexCap = DC_FULL;
3313 } else if (strcmp(DupCap_A[pAC->Index],"Half")==0) {
3314 DuplexCap = DC_HALF;
3315 } else {
3316 printk("sk98lin: Illegal value \"%s\" for DupCap_A\n",
3317 DupCap_A[pAC->Index]);
3318 }
3319 }
3320
3321 /*
3322 ** Check for illegal combinations
3323 */
3324 if ((LinkSpeed == SK_LSPEED_1000MBPS) &&
3325 ((DuplexCap == SK_LMODE_STAT_AUTOHALF) ||
3326 (DuplexCap == SK_LMODE_STAT_HALF)) &&
3327 (pAC->ChipsetType)) {
3328 printk("sk98lin: Half Duplex not possible with Gigabit speed!\n"
3329 " Using Full Duplex.\n");
3330 DuplexCap = DC_FULL;
3331 }
3332
3333 if ( AutoSet && AutoNeg==AN_SENS && DupSet) {
3334 printk("sk98lin, Port A: DuplexCapabilities"
3335 " ignored using Sense mode\n");
3336 }
3337
3338 if (AutoSet && AutoNeg==AN_OFF && DupSet && DuplexCap==DC_BOTH){
3339 printk("sk98lin: Port A: Illegal combination"
3340 " of values AutoNeg. and DuplexCap.\n Using "
3341 "Full Duplex\n");
3342 DuplexCap = DC_FULL;
3343 }
3344
3345 if (AutoSet && AutoNeg==AN_OFF && !DupSet) {
3346 DuplexCap = DC_FULL;
3347 }
3348
3349 if (!AutoSet && DupSet) {
3350 printk("sk98lin: Port A: Duplex setting not"
3351 " possible in\n default AutoNegotiation mode"
3352 " (Sense).\n Using AutoNegotiation On\n");
3353 AutoNeg = AN_ON;
3354 }
3355
3356 /*
3357 ** set the desired mode
3358 */
3359 if (AutoSet || DupSet) {
3360 pAC->GIni.GP[0].PLinkModeConf = Capabilities[AutoNeg][DuplexCap];
3361 }
3362
3363 /*
3364 ** c) Any Flowcontrol-parameter set?
3365 */
3366 if (FlowCtrl_A != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
3367 FlowCtrl_A[pAC->Index] != NULL) {
3368 if (strcmp(FlowCtrl_A[pAC->Index],"") == 0) {
3369 IsFlowCtrlDefined = SK_FALSE;
3370 } else if (strcmp(FlowCtrl_A[pAC->Index],"SymOrRem") == 0) {
3371 FlowCtrl = SK_FLOW_MODE_SYM_OR_REM;
3372 } else if (strcmp(FlowCtrl_A[pAC->Index],"Sym")==0) {
3373 FlowCtrl = SK_FLOW_MODE_SYMMETRIC;
3374 } else if (strcmp(FlowCtrl_A[pAC->Index],"LocSend")==0) {
3375 FlowCtrl = SK_FLOW_MODE_LOC_SEND;
3376 } else if (strcmp(FlowCtrl_A[pAC->Index],"None")==0) {
3377 FlowCtrl = SK_FLOW_MODE_NONE;
3378 } else {
3379 printk("sk98lin: Illegal value \"%s\" for FlowCtrl_A\n",
3380 FlowCtrl_A[pAC->Index]);
3381 IsFlowCtrlDefined = SK_FALSE;
3382 }
3383 } else {
3384 IsFlowCtrlDefined = SK_FALSE;
3385 }
3386
3387 if (IsFlowCtrlDefined) {
3388 if ((AutoNeg == AN_OFF) && (FlowCtrl != SK_FLOW_MODE_NONE)) {
3389 printk("sk98lin: Port A: FlowControl"
3390 " impossible without AutoNegotiation,"
3391 " disabled\n");
3392 FlowCtrl = SK_FLOW_MODE_NONE;
3393 }
3394 pAC->GIni.GP[0].PFlowCtrlMode = FlowCtrl;
3395 }
3396
3397 /*
3398 ** d) What is with the RoleParameter?
3399 */
3400 if (Role_A != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
3401 Role_A[pAC->Index] != NULL) {
3402 if (strcmp(Role_A[pAC->Index],"")==0) {
3403 IsRoleDefined = SK_FALSE;
3404 } else if (strcmp(Role_A[pAC->Index],"Auto")==0) {
3405 MSMode = SK_MS_MODE_AUTO;
3406 } else if (strcmp(Role_A[pAC->Index],"Master")==0) {
3407 MSMode = SK_MS_MODE_MASTER;
3408 } else if (strcmp(Role_A[pAC->Index],"Slave")==0) {
3409 MSMode = SK_MS_MODE_SLAVE;
3410 } else {
3411 printk("sk98lin: Illegal value \"%s\" for Role_A\n",
3412 Role_A[pAC->Index]);
3413 IsRoleDefined = SK_FALSE;
3414 }
3415 } else {
3416 IsRoleDefined = SK_FALSE;
3417 }
3418
3419 if (IsRoleDefined == SK_TRUE) {
3420 pAC->GIni.GP[0].PMSMode = MSMode;
3421 }
3422
3423
3424
3425 /*
3426 ** Parse any parameter settings for port B:
3427 ** a) any LinkSpeed stated?
3428 */
3429 IsConTypeDefined = SK_TRUE;
3430 IsLinkSpeedDefined = SK_TRUE;
3431 IsFlowCtrlDefined = SK_TRUE;
3432 IsModeDefined = SK_TRUE;
3433
3434 if (Speed_B != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
3435 Speed_B[pAC->Index] != NULL) {
3436 if (strcmp(Speed_B[pAC->Index],"")==0) {
3437 IsLinkSpeedDefined = SK_FALSE;
3438 } else if (strcmp(Speed_B[pAC->Index],"Auto")==0) {
3439 LinkSpeed = SK_LSPEED_AUTO;
3440 } else if (strcmp(Speed_B[pAC->Index],"10")==0) {
3441 LinkSpeed = SK_LSPEED_10MBPS;
3442 } else if (strcmp(Speed_B[pAC->Index],"100")==0) {
3443 LinkSpeed = SK_LSPEED_100MBPS;
3444 } else if (strcmp(Speed_B[pAC->Index],"1000")==0) {
3445 LinkSpeed = SK_LSPEED_1000MBPS;
3446 } else {
3447 printk("sk98lin: Illegal value \"%s\" for Speed_B\n",
3448 Speed_B[pAC->Index]);
3449 IsLinkSpeedDefined = SK_FALSE;
3450 }
3451 } else {
3452 IsLinkSpeedDefined = SK_FALSE;
3453 }
3454
3455 /*
3456 ** Check speed parameter:
3457 ** Only copper type adapter and GE V2 cards
3458 */
3459 if (((!pAC->ChipsetType) || (pAC->GIni.GICopperType != SK_TRUE)) &&
3460 ((LinkSpeed != SK_LSPEED_AUTO) &&
3461 (LinkSpeed != SK_LSPEED_1000MBPS))) {
3462 printk("sk98lin: Illegal value for Speed_B. "
3463 "Not a copper card or GE V2 card\n Using "
3464 "speed 1000\n");
3465 LinkSpeed = SK_LSPEED_1000MBPS;
3466 }
3467
3468 /*
3469 ** Decide whether to set new config value if somethig valid has
3470 ** been received.
3471 */
3472 if (IsLinkSpeedDefined) {
3473 pAC->GIni.GP[1].PLinkSpeed = LinkSpeed;
3474 }
3475
3476 /*
3477 ** b) Any Autonegotiation and DuplexCapabilities set?
3478 ** Please note that both belong together...
3479 */
3480 AutoNeg = AN_SENS; /* default: do auto Sense */
3481 AutoSet = SK_FALSE;
3482 if (AutoNeg_B != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
3483 AutoNeg_B[pAC->Index] != NULL) {
3484 AutoSet = SK_TRUE;
3485 if (strcmp(AutoNeg_B[pAC->Index],"")==0) {
3486 AutoSet = SK_FALSE;
3487 } else if (strcmp(AutoNeg_B[pAC->Index],"On")==0) {
3488 AutoNeg = AN_ON;
3489 } else if (strcmp(AutoNeg_B[pAC->Index],"Off")==0) {
3490 AutoNeg = AN_OFF;
3491 } else if (strcmp(AutoNeg_B[pAC->Index],"Sense")==0) {
3492 AutoNeg = AN_SENS;
3493 } else {
3494 printk("sk98lin: Illegal value \"%s\" for AutoNeg_B\n",
3495 AutoNeg_B[pAC->Index]);
3496 }
3497 }
3498
3499 DuplexCap = DC_BOTH;
3500 DupSet = SK_FALSE;
3501 if (DupCap_B != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
3502 DupCap_B[pAC->Index] != NULL) {
3503 DupSet = SK_TRUE;
3504 if (strcmp(DupCap_B[pAC->Index],"")==0) {
3505 DupSet = SK_FALSE;
3506 } else if (strcmp(DupCap_B[pAC->Index],"Both")==0) {
3507 DuplexCap = DC_BOTH;
3508 } else if (strcmp(DupCap_B[pAC->Index],"Full")==0) {
3509 DuplexCap = DC_FULL;
3510 } else if (strcmp(DupCap_B[pAC->Index],"Half")==0) {
3511 DuplexCap = DC_HALF;
3512 } else {
3513 printk("sk98lin: Illegal value \"%s\" for DupCap_B\n",
3514 DupCap_B[pAC->Index]);
3515 }
3516 }
3517
3518
3519 /*
3520 ** Check for illegal combinations
3521 */
3522 if ((LinkSpeed == SK_LSPEED_1000MBPS) &&
3523 ((DuplexCap == SK_LMODE_STAT_AUTOHALF) ||
3524 (DuplexCap == SK_LMODE_STAT_HALF)) &&
3525 (pAC->ChipsetType)) {
3526 printk("sk98lin: Half Duplex not possible with Gigabit speed!\n"
3527 " Using Full Duplex.\n");
3528 DuplexCap = DC_FULL;
3529 }
3530
3531 if (AutoSet && AutoNeg==AN_SENS && DupSet) {
3532 printk("sk98lin, Port B: DuplexCapabilities"
3533 " ignored using Sense mode\n");
3534 }
3535
3536 if (AutoSet && AutoNeg==AN_OFF && DupSet && DuplexCap==DC_BOTH){
3537 printk("sk98lin: Port B: Illegal combination"
3538 " of values AutoNeg. and DuplexCap.\n Using "
3539 "Full Duplex\n");
3540 DuplexCap = DC_FULL;
3541 }
3542
3543 if (AutoSet && AutoNeg==AN_OFF && !DupSet) {
3544 DuplexCap = DC_FULL;
3545 }
3546
3547 if (!AutoSet && DupSet) {
3548 printk("sk98lin: Port B: Duplex setting not"
3549 " possible in\n default AutoNegotiation mode"
3550 " (Sense).\n Using AutoNegotiation On\n");
3551 AutoNeg = AN_ON;
3552 }
3553
3554 /*
3555 ** set the desired mode
3556 */
3557 if (AutoSet || DupSet) {
3558 pAC->GIni.GP[1].PLinkModeConf = Capabilities[AutoNeg][DuplexCap];
3559 }
3560
3561 /*
3562 ** c) Any FlowCtrl parameter set?
3563 */
3564 if (FlowCtrl_B != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
3565 FlowCtrl_B[pAC->Index] != NULL) {
3566 if (strcmp(FlowCtrl_B[pAC->Index],"") == 0) {
3567 IsFlowCtrlDefined = SK_FALSE;
3568 } else if (strcmp(FlowCtrl_B[pAC->Index],"SymOrRem") == 0) {
3569 FlowCtrl = SK_FLOW_MODE_SYM_OR_REM;
3570 } else if (strcmp(FlowCtrl_B[pAC->Index],"Sym")==0) {
3571 FlowCtrl = SK_FLOW_MODE_SYMMETRIC;
3572 } else if (strcmp(FlowCtrl_B[pAC->Index],"LocSend")==0) {
3573 FlowCtrl = SK_FLOW_MODE_LOC_SEND;
3574 } else if (strcmp(FlowCtrl_B[pAC->Index],"None")==0) {
3575 FlowCtrl = SK_FLOW_MODE_NONE;
3576 } else {
3577 printk("sk98lin: Illegal value \"%s\" for FlowCtrl_B\n",
3578 FlowCtrl_B[pAC->Index]);
3579 IsFlowCtrlDefined = SK_FALSE;
3580 }
3581 } else {
3582 IsFlowCtrlDefined = SK_FALSE;
3583 }
3584
3585 if (IsFlowCtrlDefined) {
3586 if ((AutoNeg == AN_OFF) && (FlowCtrl != SK_FLOW_MODE_NONE)) {
3587 printk("sk98lin: Port B: FlowControl"
3588 " impossible without AutoNegotiation,"
3589 " disabled\n");
3590 FlowCtrl = SK_FLOW_MODE_NONE;
3591 }
3592 pAC->GIni.GP[1].PFlowCtrlMode = FlowCtrl;
3593 }
3594
3595 /*
3596 ** d) What is the RoleParameter?
3597 */
3598 if (Role_B != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
3599 Role_B[pAC->Index] != NULL) {
3600 if (strcmp(Role_B[pAC->Index],"")==0) {
3601 IsRoleDefined = SK_FALSE;
3602 } else if (strcmp(Role_B[pAC->Index],"Auto")==0) {
3603 MSMode = SK_MS_MODE_AUTO;
3604 } else if (strcmp(Role_B[pAC->Index],"Master")==0) {
3605 MSMode = SK_MS_MODE_MASTER;
3606 } else if (strcmp(Role_B[pAC->Index],"Slave")==0) {
3607 MSMode = SK_MS_MODE_SLAVE;
3608 } else {
3609 printk("sk98lin: Illegal value \"%s\" for Role_B\n",
3610 Role_B[pAC->Index]);
3611 IsRoleDefined = SK_FALSE;
3612 }
3613 } else {
3614 IsRoleDefined = SK_FALSE;
3615 }
3616
3617 if (IsRoleDefined) {
3618 pAC->GIni.GP[1].PMSMode = MSMode;
3619 }
3620
3621 /*
3622 ** Evaluate settings for both ports
3623 */
3624 pAC->ActivePort = 0;
3625 if (PrefPort != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
3626 PrefPort[pAC->Index] != NULL) {
3627 if (strcmp(PrefPort[pAC->Index],"") == 0) { /* Auto */
3628 pAC->ActivePort = 0;
3629 pAC->Rlmt.Net[0].Preference = -1; /* auto */
3630 pAC->Rlmt.Net[0].PrefPort = 0;
3631 } else if (strcmp(PrefPort[pAC->Index],"A") == 0) {
3632 /*
3633 ** do not set ActivePort here, thus a port
3634 ** switch is issued after net up.
3635 */
3636 Port = 0;
3637 pAC->Rlmt.Net[0].Preference = Port;
3638 pAC->Rlmt.Net[0].PrefPort = Port;
3639 } else if (strcmp(PrefPort[pAC->Index],"B") == 0) {
3640 /*
3641 ** do not set ActivePort here, thus a port
3642 ** switch is issued after net up.
3643 */
3644 if (pAC->GIni.GIMacsFound == 1) {
3645 printk("sk98lin: Illegal value \"B\" for PrefPort.\n"
3646 " Port B not available on single port adapters.\n");
3647
3648 pAC->ActivePort = 0;
3649 pAC->Rlmt.Net[0].Preference = -1; /* auto */
3650 pAC->Rlmt.Net[0].PrefPort = 0;
3651 } else {
3652 Port = 1;
3653 pAC->Rlmt.Net[0].Preference = Port;
3654 pAC->Rlmt.Net[0].PrefPort = Port;
3655 }
3656 } else {
3657 printk("sk98lin: Illegal value \"%s\" for PrefPort\n",
3658 PrefPort[pAC->Index]);
3659 }
3660 }
3661
3662 pAC->RlmtNets = 1;
3663
3664 if (RlmtMode != NULL && pAC->Index<SK_MAX_CARD_PARAM &&
3665 RlmtMode[pAC->Index] != NULL) {
3666 if (strcmp(RlmtMode[pAC->Index], "") == 0) {
3667 pAC->RlmtMode = 0;
3668 } else if (strcmp(RlmtMode[pAC->Index], "CheckLinkState") == 0) {
3669 pAC->RlmtMode = SK_RLMT_CHECK_LINK;
3670 } else if (strcmp(RlmtMode[pAC->Index], "CheckLocalPort") == 0) {
3671 pAC->RlmtMode = SK_RLMT_CHECK_LINK |
3672 SK_RLMT_CHECK_LOC_LINK;
3673 } else if (strcmp(RlmtMode[pAC->Index], "CheckSeg") == 0) {
3674 pAC->RlmtMode = SK_RLMT_CHECK_LINK |
3675 SK_RLMT_CHECK_LOC_LINK |
3676 SK_RLMT_CHECK_SEG;
3677 } else if ((strcmp(RlmtMode[pAC->Index], "DualNet") == 0) &&
3678 (pAC->GIni.GIMacsFound == 2)) {
3679 pAC->RlmtMode = SK_RLMT_CHECK_LINK;
3680 pAC->RlmtNets = 2;
3681 } else {
3682 printk("sk98lin: Illegal value \"%s\" for"
3683 " RlmtMode, using default\n",
3684 RlmtMode[pAC->Index]);
3685 pAC->RlmtMode = 0;
3686 }
3687 } else {
3688 pAC->RlmtMode = 0;
3689 }
3690
3691 /*
3692 ** Check the interrupt moderation parameters
3693 */
3694 if (Moderation[pAC->Index] != NULL) {
3695 if (strcmp(Moderation[pAC->Index], "") == 0) {
3696 pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_NONE;
3697 } else if (strcmp(Moderation[pAC->Index], "Static") == 0) {
3698 pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_STATIC;
3699 } else if (strcmp(Moderation[pAC->Index], "Dynamic") == 0) {
3700 pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_DYNAMIC;
3701 } else if (strcmp(Moderation[pAC->Index], "None") == 0) {
3702 pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_NONE;
3703 } else {
3704 printk("sk98lin: Illegal value \"%s\" for Moderation.\n"
3705 " Disable interrupt moderation.\n",
3706 Moderation[pAC->Index]);
3707 pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_NONE;
3708 }
3709 } else {
3710 pAC->DynIrqModInfo.IntModTypeSelect = C_INT_MOD_NONE;
3711 }
3712
3713 if (Stats[pAC->Index] != NULL) {
3714 if (strcmp(Stats[pAC->Index], "Yes") == 0) {
3715 pAC->DynIrqModInfo.DisplayStats = SK_TRUE;
3716 } else {
3717 pAC->DynIrqModInfo.DisplayStats = SK_FALSE;
3718 }
3719 } else {
3720 pAC->DynIrqModInfo.DisplayStats = SK_FALSE;
3721 }
3722
3723 if (ModerationMask[pAC->Index] != NULL) {
3724 if (strcmp(ModerationMask[pAC->Index], "Rx") == 0) {
3725 pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_ONLY;
3726 } else if (strcmp(ModerationMask[pAC->Index], "Tx") == 0) {
3727 pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_TX_ONLY;
3728 } else if (strcmp(ModerationMask[pAC->Index], "Sp") == 0) {
3729 pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_ONLY;
3730 } else if (strcmp(ModerationMask[pAC->Index], "RxSp") == 0) {
3731 pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_RX;
3732 } else if (strcmp(ModerationMask[pAC->Index], "SpRx") == 0) {
3733 pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_RX;
3734 } else if (strcmp(ModerationMask[pAC->Index], "RxTx") == 0) {
3735 pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_TX_RX;
3736 } else if (strcmp(ModerationMask[pAC->Index], "TxRx") == 0) {
3737 pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_TX_RX;
3738 } else if (strcmp(ModerationMask[pAC->Index], "TxSp") == 0) {
3739 pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_TX;
3740 } else if (strcmp(ModerationMask[pAC->Index], "SpTx") == 0) {
3741 pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_SP_TX;
3742 } else if (strcmp(ModerationMask[pAC->Index], "RxTxSp") == 0) {
3743 pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP;
3744 } else if (strcmp(ModerationMask[pAC->Index], "RxSpTx") == 0) {
3745 pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP;
3746 } else if (strcmp(ModerationMask[pAC->Index], "TxRxSp") == 0) {
3747 pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP;
3748 } else if (strcmp(ModerationMask[pAC->Index], "TxSpRx") == 0) {
3749 pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP;
3750 } else if (strcmp(ModerationMask[pAC->Index], "SpTxRx") == 0) {
3751 pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP;
3752 } else if (strcmp(ModerationMask[pAC->Index], "SpRxTx") == 0) {
3753 pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_TX_SP;
3754 } else { /* some rubbish */
3755 pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_RX_ONLY;
3756 }
3757 } else { /* operator has stated nothing */
3758 pAC->DynIrqModInfo.MaskIrqModeration = IRQ_MASK_TX_RX;
3759 }
3760
3761 if (AutoSizing[pAC->Index] != NULL) {
3762 if (strcmp(AutoSizing[pAC->Index], "On") == 0) {
3763 pAC->DynIrqModInfo.AutoSizing = SK_FALSE;
3764 } else {
3765 pAC->DynIrqModInfo.AutoSizing = SK_FALSE;
3766 }
3767 } else { /* operator has stated nothing */
3768 pAC->DynIrqModInfo.AutoSizing = SK_FALSE;
3769 }
3770
3771 if (IntsPerSec[pAC->Index] != 0) {
3772 if ((IntsPerSec[pAC->Index]< C_INT_MOD_IPS_LOWER_RANGE) ||
3773 (IntsPerSec[pAC->Index] > C_INT_MOD_IPS_UPPER_RANGE)) {
3774 printk("sk98lin: Illegal value \"%d\" for IntsPerSec. (Range: %d - %d)\n"
3775 " Using default value of %i.\n",
3776 IntsPerSec[pAC->Index],
3777 C_INT_MOD_IPS_LOWER_RANGE,
3778 C_INT_MOD_IPS_UPPER_RANGE,
3779 C_INTS_PER_SEC_DEFAULT);
3780 pAC->DynIrqModInfo.MaxModIntsPerSec = C_INTS_PER_SEC_DEFAULT;
3781 } else {
3782 pAC->DynIrqModInfo.MaxModIntsPerSec = IntsPerSec[pAC->Index];
3783 }
3784 } else {
3785 pAC->DynIrqModInfo.MaxModIntsPerSec = C_INTS_PER_SEC_DEFAULT;
3786 }
3787
3788 /*
3789 ** Evaluate upper and lower moderation threshold
3790 */
3791 pAC->DynIrqModInfo.MaxModIntsPerSecUpperLimit =
3792 pAC->DynIrqModInfo.MaxModIntsPerSec +
3793 (pAC->DynIrqModInfo.MaxModIntsPerSec / 2);
3794
3795 pAC->DynIrqModInfo.MaxModIntsPerSecLowerLimit =
3796 pAC->DynIrqModInfo.MaxModIntsPerSec -
3797 (pAC->DynIrqModInfo.MaxModIntsPerSec / 2);
3798
3799 pAC->DynIrqModInfo.PrevTimeVal = jiffies; /* initial value */
3800
3801
3802} /* GetConfiguration */
3803
3804
3805/*****************************************************************************
3806 *
3807 * ProductStr - return a adapter identification string from vpd
3808 *
3809 * Description:
3810 * This function reads the product name string from the vpd area
3811 * and puts it the field pAC->DeviceString.
3812 *
3813 * Returns: N/A
3814 */
3815static inline int ProductStr(
3816 SK_AC *pAC, /* pointer to adapter context */
3817 char *DeviceStr, /* result string */
3818 int StrLen /* length of the string */
3819)
3820{
3821char Keyword[] = VPD_NAME; /* vpd productname identifier */
3822int ReturnCode; /* return code from vpd_read */
3823unsigned long Flags;
3824
3825 spin_lock_irqsave(&pAC->SlowPathLock, Flags);
3826 ReturnCode = VpdRead(pAC, pAC->IoBase, Keyword, DeviceStr, &StrLen);
3827 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
3828
3829 return ReturnCode;
3830} /* ProductStr */
3831
3832/*****************************************************************************
3833 *
3834 * StartDrvCleanupTimer - Start timer to check for descriptors which
3835 * might be placed in descriptor ring, but
3836 * havent been handled up to now
3837 *
3838 * Description:
3839 * This function requests a HW-timer fo the Yukon card. The actions to
3840 * perform when this timer expires, are located in the SkDrvEvent().
3841 *
3842 * Returns: N/A
3843 */
3844static void
3845StartDrvCleanupTimer(SK_AC *pAC) {
3846 SK_EVPARA EventParam; /* Event struct for timer event */
3847
3848 SK_MEMSET((char *) &EventParam, 0, sizeof(EventParam));
3849 EventParam.Para32[0] = SK_DRV_RX_CLEANUP_TIMER;
3850 SkTimerStart(pAC, pAC->IoBase, &pAC->DrvCleanupTimer,
3851 SK_DRV_RX_CLEANUP_TIMER_LENGTH,
3852 SKGE_DRV, SK_DRV_TIMER, EventParam);
3853}
3854
3855/*****************************************************************************
3856 *
3857 * StopDrvCleanupTimer - Stop timer to check for descriptors
3858 *
3859 * Description:
3860 * This function requests a HW-timer fo the Yukon card. The actions to
3861 * perform when this timer expires, are located in the SkDrvEvent().
3862 *
3863 * Returns: N/A
3864 */
3865static void
3866StopDrvCleanupTimer(SK_AC *pAC) {
3867 SkTimerStop(pAC, pAC->IoBase, &pAC->DrvCleanupTimer);
3868 SK_MEMSET((char *) &pAC->DrvCleanupTimer, 0, sizeof(SK_TIMER));
3869}
3870
3871/****************************************************************************/
3872/* functions for common modules *********************************************/
3873/****************************************************************************/
3874
3875
3876/*****************************************************************************
3877 *
3878 * SkDrvAllocRlmtMbuf - allocate an RLMT mbuf
3879 *
3880 * Description:
3881 * This routine returns an RLMT mbuf or NULL. The RLMT Mbuf structure
3882 * is embedded into a socket buff data area.
3883 *
3884 * Context:
3885 * runtime
3886 *
3887 * Returns:
3888 * NULL or pointer to Mbuf.
3889 */
3890SK_MBUF *SkDrvAllocRlmtMbuf(
3891SK_AC *pAC, /* pointer to adapter context */
3892SK_IOC IoC, /* the IO-context */
3893unsigned BufferSize) /* size of the requested buffer */
3894{
3895SK_MBUF *pRlmtMbuf; /* pointer to a new rlmt-mbuf structure */
3896struct sk_buff *pMsgBlock; /* pointer to a new message block */
3897
3898 pMsgBlock = alloc_skb(BufferSize + sizeof(SK_MBUF), GFP_ATOMIC);
3899 if (pMsgBlock == NULL) {
3900 return (NULL);
3901 }
3902 pRlmtMbuf = (SK_MBUF*) pMsgBlock->data;
3903 skb_reserve(pMsgBlock, sizeof(SK_MBUF));
3904 pRlmtMbuf->pNext = NULL;
3905 pRlmtMbuf->pOs = pMsgBlock;
3906 pRlmtMbuf->pData = pMsgBlock->data; /* Data buffer. */
3907 pRlmtMbuf->Size = BufferSize; /* Data buffer size. */
3908 pRlmtMbuf->Length = 0; /* Length of packet (<= Size). */
3909 return (pRlmtMbuf);
3910
3911} /* SkDrvAllocRlmtMbuf */
3912
3913
3914/*****************************************************************************
3915 *
3916 * SkDrvFreeRlmtMbuf - free an RLMT mbuf
3917 *
3918 * Description:
3919 * This routine frees one or more RLMT mbuf(s).
3920 *
3921 * Context:
3922 * runtime
3923 *
3924 * Returns:
3925 * Nothing
3926 */
3927void SkDrvFreeRlmtMbuf(
3928SK_AC *pAC, /* pointer to adapter context */
3929SK_IOC IoC, /* the IO-context */
3930SK_MBUF *pMbuf) /* size of the requested buffer */
3931{
3932SK_MBUF *pFreeMbuf;
3933SK_MBUF *pNextMbuf;
3934
3935 pFreeMbuf = pMbuf;
3936 do {
3937 pNextMbuf = pFreeMbuf->pNext;
3938 DEV_KFREE_SKB_ANY(pFreeMbuf->pOs);
3939 pFreeMbuf = pNextMbuf;
3940 } while ( pFreeMbuf != NULL );
3941} /* SkDrvFreeRlmtMbuf */
3942
3943
3944/*****************************************************************************
3945 *
3946 * SkOsGetTime - provide a time value
3947 *
3948 * Description:
3949 * This routine provides a time value. The unit is 1/HZ (defined by Linux).
3950 * It is not used for absolute time, but only for time differences.
3951 *
3952 *
3953 * Returns:
3954 * Time value
3955 */
3956SK_U64 SkOsGetTime(SK_AC *pAC)
3957{
3958 SK_U64 PrivateJiffies;
3959 SkOsGetTimeCurrent(pAC, &PrivateJiffies);
3960 return PrivateJiffies;
3961} /* SkOsGetTime */
3962
3963
3964/*****************************************************************************
3965 *
3966 * SkPciReadCfgDWord - read a 32 bit value from pci config space
3967 *
3968 * Description:
3969 * This routine reads a 32 bit value from the pci configuration
3970 * space.
3971 *
3972 * Returns:
3973 * 0 - indicate everything worked ok.
3974 * != 0 - error indication
3975 */
3976int SkPciReadCfgDWord(
3977SK_AC *pAC, /* Adapter Control structure pointer */
3978int PciAddr, /* PCI register address */
3979SK_U32 *pVal) /* pointer to store the read value */
3980{
3981 pci_read_config_dword(pAC->PciDev, PciAddr, pVal);
3982 return(0);
3983} /* SkPciReadCfgDWord */
3984
3985
3986/*****************************************************************************
3987 *
3988 * SkPciReadCfgWord - read a 16 bit value from pci config space
3989 *
3990 * Description:
3991 * This routine reads a 16 bit value from the pci configuration
3992 * space.
3993 *
3994 * Returns:
3995 * 0 - indicate everything worked ok.
3996 * != 0 - error indication
3997 */
3998int SkPciReadCfgWord(
3999SK_AC *pAC, /* Adapter Control structure pointer */
4000int PciAddr, /* PCI register address */
4001SK_U16 *pVal) /* pointer to store the read value */
4002{
4003 pci_read_config_word(pAC->PciDev, PciAddr, pVal);
4004 return(0);
4005} /* SkPciReadCfgWord */
4006
4007
4008/*****************************************************************************
4009 *
4010 * SkPciReadCfgByte - read a 8 bit value from pci config space
4011 *
4012 * Description:
4013 * This routine reads a 8 bit value from the pci configuration
4014 * space.
4015 *
4016 * Returns:
4017 * 0 - indicate everything worked ok.
4018 * != 0 - error indication
4019 */
4020int SkPciReadCfgByte(
4021SK_AC *pAC, /* Adapter Control structure pointer */
4022int PciAddr, /* PCI register address */
4023SK_U8 *pVal) /* pointer to store the read value */
4024{
4025 pci_read_config_byte(pAC->PciDev, PciAddr, pVal);
4026 return(0);
4027} /* SkPciReadCfgByte */
4028
4029
4030/*****************************************************************************
4031 *
4032 * SkPciWriteCfgWord - write a 16 bit value to pci config space
4033 *
4034 * Description:
4035 * This routine writes a 16 bit value to the pci configuration
4036 * space. The flag PciConfigUp indicates whether the config space
4037 * is accesible or must be set up first.
4038 *
4039 * Returns:
4040 * 0 - indicate everything worked ok.
4041 * != 0 - error indication
4042 */
4043int SkPciWriteCfgWord(
4044SK_AC *pAC, /* Adapter Control structure pointer */
4045int PciAddr, /* PCI register address */
4046SK_U16 Val) /* pointer to store the read value */
4047{
4048 pci_write_config_word(pAC->PciDev, PciAddr, Val);
4049 return(0);
4050} /* SkPciWriteCfgWord */
4051
4052
4053/*****************************************************************************
4054 *
4055 * SkPciWriteCfgWord - write a 8 bit value to pci config space
4056 *
4057 * Description:
4058 * This routine writes a 8 bit value to the pci configuration
4059 * space. The flag PciConfigUp indicates whether the config space
4060 * is accesible or must be set up first.
4061 *
4062 * Returns:
4063 * 0 - indicate everything worked ok.
4064 * != 0 - error indication
4065 */
4066int SkPciWriteCfgByte(
4067SK_AC *pAC, /* Adapter Control structure pointer */
4068int PciAddr, /* PCI register address */
4069SK_U8 Val) /* pointer to store the read value */
4070{
4071 pci_write_config_byte(pAC->PciDev, PciAddr, Val);
4072 return(0);
4073} /* SkPciWriteCfgByte */
4074
4075
4076/*****************************************************************************
4077 *
4078 * SkDrvEvent - handle driver events
4079 *
4080 * Description:
4081 * This function handles events from all modules directed to the driver
4082 *
4083 * Context:
4084 * Is called under protection of slow path lock.
4085 *
4086 * Returns:
4087 * 0 if everything ok
4088 * < 0 on error
4089 *
4090 */
4091int SkDrvEvent(
4092SK_AC *pAC, /* pointer to adapter context */
4093SK_IOC IoC, /* io-context */
4094SK_U32 Event, /* event-id */
4095SK_EVPARA Param) /* event-parameter */
4096{
4097SK_MBUF *pRlmtMbuf; /* pointer to a rlmt-mbuf structure */
4098struct sk_buff *pMsg; /* pointer to a message block */
4099int FromPort; /* the port from which we switch away */
4100int ToPort; /* the port we switch to */
4101SK_EVPARA NewPara; /* parameter for further events */
4102int Stat;
4103unsigned long Flags;
4104SK_BOOL DualNet;
4105
4106 switch (Event) {
4107 case SK_DRV_ADAP_FAIL:
4108 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
4109 ("ADAPTER FAIL EVENT\n"));
4110 printk("%s: Adapter failed.\n", pAC->dev[0]->name);
4111 /* disable interrupts */
4112 SK_OUT32(pAC->IoBase, B0_IMSK, 0);
4113 /* cgoos */
4114 break;
4115 case SK_DRV_PORT_FAIL:
4116 FromPort = Param.Para32[0];
4117 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
4118 ("PORT FAIL EVENT, Port: %d\n", FromPort));
4119 if (FromPort == 0) {
4120 printk("%s: Port A failed.\n", pAC->dev[0]->name);
4121 } else {
4122 printk("%s: Port B failed.\n", pAC->dev[1]->name);
4123 }
4124 /* cgoos */
4125 break;
4126 case SK_DRV_PORT_RESET: /* SK_U32 PortIdx */
4127 /* action list 4 */
4128 FromPort = Param.Para32[0];
4129 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
4130 ("PORT RESET EVENT, Port: %d ", FromPort));
4131 NewPara.Para64 = FromPort;
4132 SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_XMAC_RESET, NewPara);
4133 spin_lock_irqsave(
4134 &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
4135 Flags);
4136
4137 SkGeStopPort(pAC, IoC, FromPort, SK_STOP_ALL, SK_HARD_RST);
4138 netif_carrier_off(pAC->dev[Param.Para32[0]]);
4139 spin_unlock_irqrestore(
4140 &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
4141 Flags);
4142
4143 /* clear rx ring from received frames */
4144 ReceiveIrq(pAC, &pAC->RxPort[FromPort], SK_FALSE);
4145
4146 ClearTxRing(pAC, &pAC->TxPort[FromPort][TX_PRIO_LOW]);
4147 spin_lock_irqsave(
4148 &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
4149 Flags);
4150
4151 /* tschilling: Handling of return value inserted. */
4152 if (SkGeInitPort(pAC, IoC, FromPort)) {
4153 if (FromPort == 0) {
4154 printk("%s: SkGeInitPort A failed.\n", pAC->dev[0]->name);
4155 } else {
4156 printk("%s: SkGeInitPort B failed.\n", pAC->dev[1]->name);
4157 }
4158 }
4159 SkAddrMcUpdate(pAC,IoC, FromPort);
4160 PortReInitBmu(pAC, FromPort);
4161 SkGePollTxD(pAC, IoC, FromPort, SK_TRUE);
4162 ClearAndStartRx(pAC, FromPort);
4163 spin_unlock_irqrestore(
4164 &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
4165 Flags);
4166 break;
4167 case SK_DRV_NET_UP: /* SK_U32 PortIdx */
4168 { struct net_device *dev = pAC->dev[Param.Para32[0]];
4169 /* action list 5 */
4170 FromPort = Param.Para32[0];
4171 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
4172 ("NET UP EVENT, Port: %d ", Param.Para32[0]));
4173 /* Mac update */
4174 SkAddrMcUpdate(pAC,IoC, FromPort);
4175
4176 if (DoPrintInterfaceChange) {
4177 printk("%s: network connection up using"
4178 " port %c\n", pAC->dev[Param.Para32[0]]->name, 'A'+Param.Para32[0]);
4179
4180 /* tschilling: Values changed according to LinkSpeedUsed. */
4181 Stat = pAC->GIni.GP[FromPort].PLinkSpeedUsed;
4182 if (Stat == SK_LSPEED_STAT_10MBPS) {
4183 printk(" speed: 10\n");
4184 } else if (Stat == SK_LSPEED_STAT_100MBPS) {
4185 printk(" speed: 100\n");
4186 } else if (Stat == SK_LSPEED_STAT_1000MBPS) {
4187 printk(" speed: 1000\n");
4188 } else {
4189 printk(" speed: unknown\n");
4190 }
4191
4192
4193 Stat = pAC->GIni.GP[FromPort].PLinkModeStatus;
4194 if (Stat == SK_LMODE_STAT_AUTOHALF ||
4195 Stat == SK_LMODE_STAT_AUTOFULL) {
4196 printk(" autonegotiation: yes\n");
4197 }
4198 else {
4199 printk(" autonegotiation: no\n");
4200 }
4201 if (Stat == SK_LMODE_STAT_AUTOHALF ||
4202 Stat == SK_LMODE_STAT_HALF) {
4203 printk(" duplex mode: half\n");
4204 }
4205 else {
4206 printk(" duplex mode: full\n");
4207 }
4208 Stat = pAC->GIni.GP[FromPort].PFlowCtrlStatus;
4209 if (Stat == SK_FLOW_STAT_REM_SEND ) {
4210 printk(" flowctrl: remote send\n");
4211 }
4212 else if (Stat == SK_FLOW_STAT_LOC_SEND ){
4213 printk(" flowctrl: local send\n");
4214 }
4215 else if (Stat == SK_FLOW_STAT_SYMMETRIC ){
4216 printk(" flowctrl: symmetric\n");
4217 }
4218 else {
4219 printk(" flowctrl: none\n");
4220 }
4221
4222 /* tschilling: Check against CopperType now. */
4223 if ((pAC->GIni.GICopperType == SK_TRUE) &&
4224 (pAC->GIni.GP[FromPort].PLinkSpeedUsed ==
4225 SK_LSPEED_STAT_1000MBPS)) {
4226 Stat = pAC->GIni.GP[FromPort].PMSStatus;
4227 if (Stat == SK_MS_STAT_MASTER ) {
4228 printk(" role: master\n");
4229 }
4230 else if (Stat == SK_MS_STAT_SLAVE ) {
4231 printk(" role: slave\n");
4232 }
4233 else {
4234 printk(" role: ???\n");
4235 }
4236 }
4237
4238 /*
4239 Display dim (dynamic interrupt moderation)
4240 informations
4241 */
4242 if (pAC->DynIrqModInfo.IntModTypeSelect == C_INT_MOD_STATIC)
4243 printk(" irq moderation: static (%d ints/sec)\n",
4244 pAC->DynIrqModInfo.MaxModIntsPerSec);
4245 else if (pAC->DynIrqModInfo.IntModTypeSelect == C_INT_MOD_DYNAMIC)
4246 printk(" irq moderation: dynamic (%d ints/sec)\n",
4247 pAC->DynIrqModInfo.MaxModIntsPerSec);
4248 else
4249 printk(" irq moderation: disabled\n");
4250
4251
4252 printk(" scatter-gather: %s\n",
4253 (dev->features & NETIF_F_SG) ? "enabled" : "disabled");
4254 printk(" tx-checksum: %s\n",
4255 (dev->features & NETIF_F_IP_CSUM) ? "enabled" : "disabled");
4256 printk(" rx-checksum: %s\n",
4257 pAC->RxPort[Param.Para32[0]].RxCsum ? "enabled" : "disabled");
4258
4259 } else {
4260 DoPrintInterfaceChange = SK_TRUE;
4261 }
4262
4263 if ((Param.Para32[0] != pAC->ActivePort) &&
4264 (pAC->RlmtNets == 1)) {
4265 NewPara.Para32[0] = pAC->ActivePort;
4266 NewPara.Para32[1] = Param.Para32[0];
4267 SkEventQueue(pAC, SKGE_DRV, SK_DRV_SWITCH_INTERN,
4268 NewPara);
4269 }
4270
4271 /* Inform the world that link protocol is up. */
4272 netif_carrier_on(dev);
4273 break;
4274 }
4275 case SK_DRV_NET_DOWN: /* SK_U32 Reason */
4276 /* action list 7 */
4277 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
4278 ("NET DOWN EVENT "));
4279 if (DoPrintInterfaceChange) {
4280 printk("%s: network connection down\n",
4281 pAC->dev[Param.Para32[1]]->name);
4282 } else {
4283 DoPrintInterfaceChange = SK_TRUE;
4284 }
4285 netif_carrier_off(pAC->dev[Param.Para32[1]]);
4286 break;
4287 case SK_DRV_SWITCH_HARD: /* SK_U32 FromPortIdx SK_U32 ToPortIdx */
4288 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
4289 ("PORT SWITCH HARD "));
4290 case SK_DRV_SWITCH_SOFT: /* SK_U32 FromPortIdx SK_U32 ToPortIdx */
4291 /* action list 6 */
4292 printk("%s: switching to port %c\n", pAC->dev[0]->name,
4293 'A'+Param.Para32[1]);
4294 case SK_DRV_SWITCH_INTERN: /* SK_U32 FromPortIdx SK_U32 ToPortIdx */
4295 FromPort = Param.Para32[0];
4296 ToPort = Param.Para32[1];
4297 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
4298 ("PORT SWITCH EVENT, From: %d To: %d (Pref %d) ",
4299 FromPort, ToPort, pAC->Rlmt.Net[0].PrefPort));
4300 NewPara.Para64 = FromPort;
4301 SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_XMAC_RESET, NewPara);
4302 NewPara.Para64 = ToPort;
4303 SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_XMAC_RESET, NewPara);
4304 spin_lock_irqsave(
4305 &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
4306 Flags);
4307 spin_lock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock);
4308 SkGeStopPort(pAC, IoC, FromPort, SK_STOP_ALL, SK_SOFT_RST);
4309 SkGeStopPort(pAC, IoC, ToPort, SK_STOP_ALL, SK_SOFT_RST);
4310 spin_unlock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock);
4311 spin_unlock_irqrestore(
4312 &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
4313 Flags);
4314
4315 ReceiveIrq(pAC, &pAC->RxPort[FromPort], SK_FALSE); /* clears rx ring */
4316 ReceiveIrq(pAC, &pAC->RxPort[ToPort], SK_FALSE); /* clears rx ring */
4317
4318 ClearTxRing(pAC, &pAC->TxPort[FromPort][TX_PRIO_LOW]);
4319 ClearTxRing(pAC, &pAC->TxPort[ToPort][TX_PRIO_LOW]);
4320 spin_lock_irqsave(
4321 &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
4322 Flags);
4323 spin_lock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock);
4324 pAC->ActivePort = ToPort;
4325#if 0
4326 SetQueueSizes(pAC);
4327#else
4328 /* tschilling: New common function with minimum size check. */
4329 DualNet = SK_FALSE;
4330 if (pAC->RlmtNets == 2) {
4331 DualNet = SK_TRUE;
4332 }
4333
4334 if (SkGeInitAssignRamToQueues(
4335 pAC,
4336 pAC->ActivePort,
4337 DualNet)) {
4338 spin_unlock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock);
4339 spin_unlock_irqrestore(
4340 &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
4341 Flags);
4342 printk("SkGeInitAssignRamToQueues failed.\n");
4343 break;
4344 }
4345#endif
4346 /* tschilling: Handling of return values inserted. */
4347 if (SkGeInitPort(pAC, IoC, FromPort) ||
4348 SkGeInitPort(pAC, IoC, ToPort)) {
4349 printk("%s: SkGeInitPort failed.\n", pAC->dev[0]->name);
4350 }
4351 if (Event == SK_DRV_SWITCH_SOFT) {
4352 SkMacRxTxEnable(pAC, IoC, FromPort);
4353 }
4354 SkMacRxTxEnable(pAC, IoC, ToPort);
4355 SkAddrSwap(pAC, IoC, FromPort, ToPort);
4356 SkAddrMcUpdate(pAC, IoC, FromPort);
4357 SkAddrMcUpdate(pAC, IoC, ToPort);
4358 PortReInitBmu(pAC, FromPort);
4359 PortReInitBmu(pAC, ToPort);
4360 SkGePollTxD(pAC, IoC, FromPort, SK_TRUE);
4361 SkGePollTxD(pAC, IoC, ToPort, SK_TRUE);
4362 ClearAndStartRx(pAC, FromPort);
4363 ClearAndStartRx(pAC, ToPort);
4364 spin_unlock(&pAC->TxPort[ToPort][TX_PRIO_LOW].TxDesRingLock);
4365 spin_unlock_irqrestore(
4366 &pAC->TxPort[FromPort][TX_PRIO_LOW].TxDesRingLock,
4367 Flags);
4368 break;
4369 case SK_DRV_RLMT_SEND: /* SK_MBUF *pMb */
4370 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
4371 ("RLS "));
4372 pRlmtMbuf = (SK_MBUF*) Param.pParaPtr;
4373 pMsg = (struct sk_buff*) pRlmtMbuf->pOs;
4374 skb_put(pMsg, pRlmtMbuf->Length);
4375 if (XmitFrame(pAC, &pAC->TxPort[pRlmtMbuf->PortIdx][TX_PRIO_LOW],
4376 pMsg) < 0)
4377
4378 DEV_KFREE_SKB_ANY(pMsg);
4379 break;
4380 case SK_DRV_TIMER:
4381 if (Param.Para32[0] == SK_DRV_MODERATION_TIMER) {
4382 /*
4383 ** expiration of the moderation timer implies that
4384 ** dynamic moderation is to be applied
4385 */
4386 SkDimStartModerationTimer(pAC);
4387 SkDimModerate(pAC);
4388 if (pAC->DynIrqModInfo.DisplayStats) {
4389 SkDimDisplayModerationSettings(pAC);
4390 }
4391 } else if (Param.Para32[0] == SK_DRV_RX_CLEANUP_TIMER) {
4392 /*
4393 ** check if we need to check for descriptors which
4394 ** haven't been handled the last millisecs
4395 */
4396 StartDrvCleanupTimer(pAC);
4397 if (pAC->GIni.GIMacsFound == 2) {
4398 ReceiveIrq(pAC, &pAC->RxPort[1], SK_FALSE);
4399 }
4400 ReceiveIrq(pAC, &pAC->RxPort[0], SK_FALSE);
4401 } else {
4402 printk("Expiration of unknown timer\n");
4403 }
4404 break;
4405 default:
4406 break;
4407 }
4408 SK_DBG_MSG(NULL, SK_DBGMOD_DRV, SK_DBGCAT_DRV_EVENT,
4409 ("END EVENT "));
4410
4411 return (0);
4412} /* SkDrvEvent */
4413
4414
4415/*****************************************************************************
4416 *
4417 * SkErrorLog - log errors
4418 *
4419 * Description:
4420 * This function logs errors to the system buffer and to the console
4421 *
4422 * Returns:
4423 * 0 if everything ok
4424 * < 0 on error
4425 *
4426 */
4427void SkErrorLog(
4428SK_AC *pAC,
4429int ErrClass,
4430int ErrNum,
4431char *pErrorMsg)
4432{
4433char ClassStr[80];
4434
4435 switch (ErrClass) {
4436 case SK_ERRCL_OTHER:
4437 strcpy(ClassStr, "Other error");
4438 break;
4439 case SK_ERRCL_CONFIG:
4440 strcpy(ClassStr, "Configuration error");
4441 break;
4442 case SK_ERRCL_INIT:
4443 strcpy(ClassStr, "Initialization error");
4444 break;
4445 case SK_ERRCL_NORES:
4446 strcpy(ClassStr, "Out of resources error");
4447 break;
4448 case SK_ERRCL_SW:
4449 strcpy(ClassStr, "internal Software error");
4450 break;
4451 case SK_ERRCL_HW:
4452 strcpy(ClassStr, "Hardware failure");
4453 break;
4454 case SK_ERRCL_COMM:
4455 strcpy(ClassStr, "Communication error");
4456 break;
4457 }
4458 printk(KERN_INFO "%s: -- ERROR --\n Class: %s\n"
4459 " Nr: 0x%x\n Msg: %s\n", pAC->dev[0]->name,
4460 ClassStr, ErrNum, pErrorMsg);
4461
4462} /* SkErrorLog */
4463
4464#ifdef SK_DIAG_SUPPORT
4465
4466/*****************************************************************************
4467 *
4468 * SkDrvEnterDiagMode - handles DIAG attach request
4469 *
4470 * Description:
4471 * Notify the kernel to NOT access the card any longer due to DIAG
4472 * Deinitialize the Card
4473 *
4474 * Returns:
4475 * int
4476 */
4477int SkDrvEnterDiagMode(
4478SK_AC *pAc) /* pointer to adapter context */
4479{
4480 DEV_NET *pNet = netdev_priv(pAc->dev[0]);
4481 SK_AC *pAC = pNet->pAC;
4482
4483 SK_MEMCPY(&(pAc->PnmiBackup), &(pAc->PnmiStruct),
4484 sizeof(SK_PNMI_STRUCT_DATA));
4485
4486 pAC->DiagModeActive = DIAG_ACTIVE;
4487 if (pAC->BoardLevel > SK_INIT_DATA) {
4488 if (netif_running(pAC->dev[0])) {
4489 pAC->WasIfUp[0] = SK_TRUE;
4490 pAC->DiagFlowCtrl = SK_TRUE; /* for SkGeClose */
4491 DoPrintInterfaceChange = SK_FALSE;
4492 SkDrvDeInitAdapter(pAC, 0); /* performs SkGeClose */
4493 } else {
4494 pAC->WasIfUp[0] = SK_FALSE;
4495 }
4496 if (pNet != netdev_priv(pAC->dev[1])) {
4497 pNet = netdev_priv(pAC->dev[1]);
4498 if (netif_running(pAC->dev[1])) {
4499 pAC->WasIfUp[1] = SK_TRUE;
4500 pAC->DiagFlowCtrl = SK_TRUE; /* for SkGeClose */
4501 DoPrintInterfaceChange = SK_FALSE;
4502 SkDrvDeInitAdapter(pAC, 1); /* do SkGeClose */
4503 } else {
4504 pAC->WasIfUp[1] = SK_FALSE;
4505 }
4506 }
4507 pAC->BoardLevel = SK_INIT_DATA;
4508 }
4509 return(0);
4510}
4511
4512/*****************************************************************************
4513 *
4514 * SkDrvLeaveDiagMode - handles DIAG detach request
4515 *
4516 * Description:
4517 * Notify the kernel to may access the card again after use by DIAG
4518 * Initialize the Card
4519 *
4520 * Returns:
4521 * int
4522 */
4523int SkDrvLeaveDiagMode(
4524SK_AC *pAc) /* pointer to adapter control context */
4525{
4526 SK_MEMCPY(&(pAc->PnmiStruct), &(pAc->PnmiBackup),
4527 sizeof(SK_PNMI_STRUCT_DATA));
4528 pAc->DiagModeActive = DIAG_NOTACTIVE;
4529 pAc->Pnmi.DiagAttached = SK_DIAG_IDLE;
4530 if (pAc->WasIfUp[0] == SK_TRUE) {
4531 pAc->DiagFlowCtrl = SK_TRUE; /* for SkGeClose */
4532 DoPrintInterfaceChange = SK_FALSE;
4533 SkDrvInitAdapter(pAc, 0); /* first device */
4534 }
4535 if (pAc->WasIfUp[1] == SK_TRUE) {
4536 pAc->DiagFlowCtrl = SK_TRUE; /* for SkGeClose */
4537 DoPrintInterfaceChange = SK_FALSE;
4538 SkDrvInitAdapter(pAc, 1); /* second device */
4539 }
4540 return(0);
4541}
4542
4543/*****************************************************************************
4544 *
4545 * ParseDeviceNbrFromSlotName - Evaluate PCI device number
4546 *
4547 * Description:
4548 * This function parses the PCI slot name information string and will
4549 * retrieve the devcie number out of it. The slot_name maintianed by
4550 * linux is in the form of '02:0a.0', whereas the first two characters
4551 * represent the bus number in hex (in the sample above this is
4552 * pci bus 0x02) and the next two characters the device number (0x0a).
4553 *
4554 * Returns:
4555 * SK_U32: The device number from the PCI slot name
4556 */
4557
4558static SK_U32 ParseDeviceNbrFromSlotName(
4559const char *SlotName) /* pointer to pci slot name eg. '02:0a.0' */
4560{
4561 char *CurrCharPos = (char *) SlotName;
4562 int FirstNibble = -1;
4563 int SecondNibble = -1;
4564 SK_U32 Result = 0;
4565
4566 while (*CurrCharPos != '\0') {
4567 if (*CurrCharPos == ':') {
4568 while (*CurrCharPos != '.') {
4569 CurrCharPos++;
4570 if ( (*CurrCharPos >= '0') &&
4571 (*CurrCharPos <= '9')) {
4572 if (FirstNibble == -1) {
4573 /* dec. value for '0' */
4574 FirstNibble = *CurrCharPos - 48;
4575 } else {
4576 SecondNibble = *CurrCharPos - 48;
4577 }
4578 } else if ( (*CurrCharPos >= 'a') &&
4579 (*CurrCharPos <= 'f') ) {
4580 if (FirstNibble == -1) {
4581 FirstNibble = *CurrCharPos - 87;
4582 } else {
4583 SecondNibble = *CurrCharPos - 87;
4584 }
4585 } else {
4586 Result = 0;
4587 }
4588 }
4589
4590 Result = FirstNibble;
4591 Result = Result << 4; /* first nibble is higher one */
4592 Result = Result | SecondNibble;
4593 }
4594 CurrCharPos++; /* next character */
4595 }
4596 return (Result);
4597}
4598
4599/****************************************************************************
4600 *
4601 * SkDrvDeInitAdapter - deinitialize adapter (this function is only
4602 * called if Diag attaches to that card)
4603 *
4604 * Description:
4605 * Close initialized adapter.
4606 *
4607 * Returns:
4608 * 0 - on success
4609 * error code - on error
4610 */
4611static int SkDrvDeInitAdapter(
4612SK_AC *pAC, /* pointer to adapter context */
4613int devNbr) /* what device is to be handled */
4614{
4615 struct SK_NET_DEVICE *dev;
4616
4617 dev = pAC->dev[devNbr];
4618
4619 /* On Linux 2.6 the network driver does NOT mess with reference
4620 ** counts. The driver MUST be able to be unloaded at any time
4621 ** due to the possibility of hotplug.
4622 */
4623 if (SkGeClose(dev) != 0) {
4624 return (-1);
4625 }
4626 return (0);
4627
4628} /* SkDrvDeInitAdapter() */
4629
4630/****************************************************************************
4631 *
4632 * SkDrvInitAdapter - Initialize adapter (this function is only
4633 * called if Diag deattaches from that card)
4634 *
4635 * Description:
4636 * Close initialized adapter.
4637 *
4638 * Returns:
4639 * 0 - on success
4640 * error code - on error
4641 */
4642static int SkDrvInitAdapter(
4643SK_AC *pAC, /* pointer to adapter context */
4644int devNbr) /* what device is to be handled */
4645{
4646 struct SK_NET_DEVICE *dev;
4647
4648 dev = pAC->dev[devNbr];
4649
4650 if (SkGeOpen(dev) != 0) {
4651 return (-1);
4652 }
4653
4654 /*
4655 ** Use correct MTU size and indicate to kernel TX queue can be started
4656 */
4657 if (SkGeChangeMtu(dev, dev->mtu) != 0) {
4658 return (-1);
4659 }
4660 return (0);
4661
4662} /* SkDrvInitAdapter */
4663
4664#endif
4665
4666#ifdef DEBUG
4667/****************************************************************************/
4668/* "debug only" section *****************************************************/
4669/****************************************************************************/
4670
4671
4672/*****************************************************************************
4673 *
4674 * DumpMsg - print a frame
4675 *
4676 * Description:
4677 * This function prints frames to the system logfile/to the console.
4678 *
4679 * Returns: N/A
4680 *
4681 */
4682static void DumpMsg(struct sk_buff *skb, char *str)
4683{
4684 int msglen;
4685
4686 if (skb == NULL) {
4687 printk("DumpMsg(): NULL-Message\n");
4688 return;
4689 }
4690
4691 if (skb->data == NULL) {
4692 printk("DumpMsg(): Message empty\n");
4693 return;
4694 }
4695
4696 msglen = skb->len;
4697 if (msglen > 64)
4698 msglen = 64;
4699
4700 printk("--- Begin of message from %s , len %d (from %d) ----\n", str, msglen, skb->len);
4701
4702 DumpData((char *)skb->data, msglen);
4703
4704 printk("------- End of message ---------\n");
4705} /* DumpMsg */
4706
4707
4708
4709/*****************************************************************************
4710 *
4711 * DumpData - print a data area
4712 *
4713 * Description:
4714 * This function prints a area of data to the system logfile/to the
4715 * console.
4716 *
4717 * Returns: N/A
4718 *
4719 */
4720static void DumpData(char *p, int size)
4721{
4722register int i;
4723int haddr, addr;
4724char hex_buffer[180];
4725char asc_buffer[180];
4726char HEXCHAR[] = "0123456789ABCDEF";
4727
4728 addr = 0;
4729 haddr = 0;
4730 hex_buffer[0] = 0;
4731 asc_buffer[0] = 0;
4732 for (i=0; i < size; ) {
4733 if (*p >= '0' && *p <='z')
4734 asc_buffer[addr] = *p;
4735 else
4736 asc_buffer[addr] = '.';
4737 addr++;
4738 asc_buffer[addr] = 0;
4739 hex_buffer[haddr] = HEXCHAR[(*p & 0xf0) >> 4];
4740 haddr++;
4741 hex_buffer[haddr] = HEXCHAR[*p & 0x0f];
4742 haddr++;
4743 hex_buffer[haddr] = ' ';
4744 haddr++;
4745 hex_buffer[haddr] = 0;
4746 p++;
4747 i++;
4748 if (i%16 == 0) {
4749 printk("%s %s\n", hex_buffer, asc_buffer);
4750 addr = 0;
4751 haddr = 0;
4752 }
4753 }
4754} /* DumpData */
4755
4756
4757/*****************************************************************************
4758 *
4759 * DumpLong - print a data area as long values
4760 *
4761 * Description:
4762 * This function prints a area of data to the system logfile/to the
4763 * console.
4764 *
4765 * Returns: N/A
4766 *
4767 */
4768static void DumpLong(char *pc, int size)
4769{
4770register int i;
4771int haddr, addr;
4772char hex_buffer[180];
4773char asc_buffer[180];
4774char HEXCHAR[] = "0123456789ABCDEF";
4775long *p;
4776int l;
4777
4778 addr = 0;
4779 haddr = 0;
4780 hex_buffer[0] = 0;
4781 asc_buffer[0] = 0;
4782 p = (long*) pc;
4783 for (i=0; i < size; ) {
4784 l = (long) *p;
4785 hex_buffer[haddr] = HEXCHAR[(l >> 28) & 0xf];
4786 haddr++;
4787 hex_buffer[haddr] = HEXCHAR[(l >> 24) & 0xf];
4788 haddr++;
4789 hex_buffer[haddr] = HEXCHAR[(l >> 20) & 0xf];
4790 haddr++;
4791 hex_buffer[haddr] = HEXCHAR[(l >> 16) & 0xf];
4792 haddr++;
4793 hex_buffer[haddr] = HEXCHAR[(l >> 12) & 0xf];
4794 haddr++;
4795 hex_buffer[haddr] = HEXCHAR[(l >> 8) & 0xf];
4796 haddr++;
4797 hex_buffer[haddr] = HEXCHAR[(l >> 4) & 0xf];
4798 haddr++;
4799 hex_buffer[haddr] = HEXCHAR[l & 0x0f];
4800 haddr++;
4801 hex_buffer[haddr] = ' ';
4802 haddr++;
4803 hex_buffer[haddr] = 0;
4804 p++;
4805 i++;
4806 if (i%8 == 0) {
4807 printk("%4x %s\n", (i-8)*4, hex_buffer);
4808 haddr = 0;
4809 }
4810 }
4811 printk("------------------------\n");
4812} /* DumpLong */
4813
4814#endif
4815
4816static int __devinit skge_probe_one(struct pci_dev *pdev,
4817 const struct pci_device_id *ent)
4818{
4819 SK_AC *pAC;
4820 DEV_NET *pNet = NULL;
4821 struct net_device *dev = NULL;
4822 static int boards_found = 0;
4823 int error = -ENODEV;
4824 int using_dac = 0;
4825 char DeviceStr[80];
4826
4827 if (pci_enable_device(pdev))
4828 goto out;
4829
4830 /* Configure DMA attributes. */
4831 if (sizeof(dma_addr_t) > sizeof(u32) &&
4832 !(error = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
4833 using_dac = 1;
4834 error = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
4835 if (error < 0) {
4836 printk(KERN_ERR "sk98lin %s unable to obtain 64 bit DMA "
4837 "for consistent allocations\n", pci_name(pdev));
4838 goto out_disable_device;
4839 }
4840 } else {
4841 error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
4842 if (error) {
4843 printk(KERN_ERR "sk98lin %s no usable DMA configuration\n",
4844 pci_name(pdev));
4845 goto out_disable_device;
4846 }
4847 }
4848
4849 error = -ENOMEM;
4850 dev = alloc_etherdev(sizeof(DEV_NET));
4851 if (!dev) {
4852 printk(KERN_ERR "sk98lin: unable to allocate etherdev "
4853 "structure!\n");
4854 goto out_disable_device;
4855 }
4856
4857 pNet = netdev_priv(dev);
4858 pNet->pAC = kzalloc(sizeof(SK_AC), GFP_KERNEL);
4859 if (!pNet->pAC) {
4860 printk(KERN_ERR "sk98lin: unable to allocate adapter "
4861 "structure!\n");
4862 goto out_free_netdev;
4863 }
4864
4865 pAC = pNet->pAC;
4866 pAC->PciDev = pdev;
4867
4868 pAC->dev[0] = dev;
4869 pAC->dev[1] = dev;
4870 pAC->CheckQueue = SK_FALSE;
4871
4872 dev->irq = pdev->irq;
4873
4874 error = SkGeInitPCI(pAC);
4875 if (error) {
4876 printk(KERN_ERR "sk98lin: PCI setup failed: %i\n", error);
4877 goto out_free_netdev;
4878 }
4879
4880 dev->open = &SkGeOpen;
4881 dev->stop = &SkGeClose;
4882 dev->hard_start_xmit = &SkGeXmit;
4883 dev->get_stats = &SkGeStats;
4884 dev->set_multicast_list = &SkGeSetRxMode;
4885 dev->set_mac_address = &SkGeSetMacAddr;
4886 dev->do_ioctl = &SkGeIoctl;
4887 dev->change_mtu = &SkGeChangeMtu;
4888#ifdef CONFIG_NET_POLL_CONTROLLER
4889 dev->poll_controller = &SkGePollController;
4890#endif
4891 SET_NETDEV_DEV(dev, &pdev->dev);
4892 SET_ETHTOOL_OPS(dev, &SkGeEthtoolOps);
4893
4894 /* Use only if yukon hardware */
4895 if (pAC->ChipsetType) {
4896#ifdef USE_SK_TX_CHECKSUM
4897 dev->features |= NETIF_F_IP_CSUM;
4898#endif
4899#ifdef SK_ZEROCOPY
4900 dev->features |= NETIF_F_SG;
4901#endif
4902#ifdef USE_SK_RX_CHECKSUM
4903 pAC->RxPort[0].RxCsum = 1;
4904#endif
4905 }
4906
4907 if (using_dac)
4908 dev->features |= NETIF_F_HIGHDMA;
4909
4910 pAC->Index = boards_found++;
4911
4912 error = SkGeBoardInit(dev, pAC);
4913 if (error)
4914 goto out_free_netdev;
4915
4916 /* Read Adapter name from VPD */
4917 if (ProductStr(pAC, DeviceStr, sizeof(DeviceStr)) != 0) {
4918 error = -EIO;
4919 printk(KERN_ERR "sk98lin: Could not read VPD data.\n");
4920 goto out_free_resources;
4921 }
4922
4923 /* Register net device */
4924 error = register_netdev(dev);
4925 if (error) {
4926 printk(KERN_ERR "sk98lin: Could not register device.\n");
4927 goto out_free_resources;
4928 }
4929
4930 /* Print adapter specific string from vpd */
4931 printk("%s: %s\n", dev->name, DeviceStr);
4932
4933 /* Print configuration settings */
4934 printk(" PrefPort:%c RlmtMode:%s\n",
4935 'A' + pAC->Rlmt.Net[0].Port[pAC->Rlmt.Net[0].PrefPort]->PortNumber,
4936 (pAC->RlmtMode==0) ? "Check Link State" :
4937 ((pAC->RlmtMode==1) ? "Check Link State" :
4938 ((pAC->RlmtMode==3) ? "Check Local Port" :
4939 ((pAC->RlmtMode==7) ? "Check Segmentation" :
4940 ((pAC->RlmtMode==17) ? "Dual Check Link State" :"Error")))));
4941
4942 SkGeYellowLED(pAC, pAC->IoBase, 1);
4943
4944 memcpy(&dev->dev_addr, &pAC->Addr.Net[0].CurrentMacAddress, 6);
4945 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
4946
4947 pNet->PortNr = 0;
4948 pNet->NetNr = 0;
4949
4950 boards_found++;
4951
4952 pci_set_drvdata(pdev, dev);
4953
4954 /* More then one port found */
4955 if ((pAC->GIni.GIMacsFound == 2 ) && (pAC->RlmtNets == 2)) {
4956 dev = alloc_etherdev(sizeof(DEV_NET));
4957 if (!dev) {
4958 printk(KERN_ERR "sk98lin: unable to allocate etherdev "
4959 "structure!\n");
4960 goto single_port;
4961 }
4962
4963 pNet = netdev_priv(dev);
4964 pNet->PortNr = 1;
4965 pNet->NetNr = 1;
4966 pNet->pAC = pAC;
4967
4968 dev->open = &SkGeOpen;
4969 dev->stop = &SkGeClose;
4970 dev->hard_start_xmit = &SkGeXmit;
4971 dev->get_stats = &SkGeStats;
4972 dev->set_multicast_list = &SkGeSetRxMode;
4973 dev->set_mac_address = &SkGeSetMacAddr;
4974 dev->do_ioctl = &SkGeIoctl;
4975 dev->change_mtu = &SkGeChangeMtu;
4976 SET_NETDEV_DEV(dev, &pdev->dev);
4977 SET_ETHTOOL_OPS(dev, &SkGeEthtoolOps);
4978
4979 if (pAC->ChipsetType) {
4980#ifdef USE_SK_TX_CHECKSUM
4981 dev->features |= NETIF_F_IP_CSUM;
4982#endif
4983#ifdef SK_ZEROCOPY
4984 dev->features |= NETIF_F_SG;
4985#endif
4986#ifdef USE_SK_RX_CHECKSUM
4987 pAC->RxPort[1].RxCsum = 1;
4988#endif
4989 }
4990
4991 if (using_dac)
4992 dev->features |= NETIF_F_HIGHDMA;
4993
4994 error = register_netdev(dev);
4995 if (error) {
4996 printk(KERN_ERR "sk98lin: Could not register device"
4997 " for second port. (%d)\n", error);
4998 free_netdev(dev);
4999 goto single_port;
5000 }
5001
5002 pAC->dev[1] = dev;
5003 memcpy(&dev->dev_addr,
5004 &pAC->Addr.Net[1].CurrentMacAddress, 6);
5005 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5006
5007 printk("%s: %s\n", dev->name, DeviceStr);
5008 printk(" PrefPort:B RlmtMode:Dual Check Link State\n");
5009 }
5010
5011single_port:
5012
5013 /* Save the hardware revision */
5014 pAC->HWRevision = (((pAC->GIni.GIPciHwRev >> 4) & 0x0F)*10) +
5015 (pAC->GIni.GIPciHwRev & 0x0F);
5016
5017 /* Set driver globals */
5018 pAC->Pnmi.pDriverFileName = DRIVER_FILE_NAME;
5019 pAC->Pnmi.pDriverReleaseDate = DRIVER_REL_DATE;
5020
5021 memset(&pAC->PnmiBackup, 0, sizeof(SK_PNMI_STRUCT_DATA));
5022 memcpy(&pAC->PnmiBackup, &pAC->PnmiStruct, sizeof(SK_PNMI_STRUCT_DATA));
5023
5024 return 0;
5025
5026 out_free_resources:
5027 FreeResources(dev);
5028 out_free_netdev:
5029 free_netdev(dev);
5030 out_disable_device:
5031 pci_disable_device(pdev);
5032 out:
5033 return error;
5034}
5035
5036static void __devexit skge_remove_one(struct pci_dev *pdev)
5037{
5038 struct net_device *dev = pci_get_drvdata(pdev);
5039 DEV_NET *pNet = netdev_priv(dev);
5040 SK_AC *pAC = pNet->pAC;
5041 struct net_device *otherdev = pAC->dev[1];
5042
5043 unregister_netdev(dev);
5044
5045 SkGeYellowLED(pAC, pAC->IoBase, 0);
5046
5047 if (pAC->BoardLevel == SK_INIT_RUN) {
5048 SK_EVPARA EvPara;
5049 unsigned long Flags;
5050
5051 /* board is still alive */
5052 spin_lock_irqsave(&pAC->SlowPathLock, Flags);
5053 EvPara.Para32[0] = 0;
5054 EvPara.Para32[1] = -1;
5055 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
5056 EvPara.Para32[0] = 1;
5057 EvPara.Para32[1] = -1;
5058 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STOP, EvPara);
5059 SkEventDispatcher(pAC, pAC->IoBase);
5060 /* disable interrupts */
5061 SK_OUT32(pAC->IoBase, B0_IMSK, 0);
5062 SkGeDeInit(pAC, pAC->IoBase);
5063 spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
5064 pAC->BoardLevel = SK_INIT_DATA;
5065 /* We do NOT check here, if IRQ was pending, of course*/
5066 }
5067
5068 if (pAC->BoardLevel == SK_INIT_IO) {
5069 /* board is still alive */
5070 SkGeDeInit(pAC, pAC->IoBase);
5071 pAC->BoardLevel = SK_INIT_DATA;
5072 }
5073
5074 FreeResources(dev);
5075 free_netdev(dev);
5076 if (otherdev != dev)
5077 free_netdev(otherdev);
5078 kfree(pAC);
5079}
5080
5081#ifdef CONFIG_PM
5082static int skge_suspend(struct pci_dev *pdev, pm_message_t state)
5083{
5084 struct net_device *dev = pci_get_drvdata(pdev);
5085 DEV_NET *pNet = netdev_priv(dev);
5086 SK_AC *pAC = pNet->pAC;
5087 struct net_device *otherdev = pAC->dev[1];
5088
5089 if (netif_running(dev)) {
5090 netif_carrier_off(dev);
5091 DoPrintInterfaceChange = SK_FALSE;
5092 SkDrvDeInitAdapter(pAC, 0); /* performs SkGeClose */
5093 netif_device_detach(dev);
5094 }
5095 if (otherdev != dev) {
5096 if (netif_running(otherdev)) {
5097 netif_carrier_off(otherdev);
5098 DoPrintInterfaceChange = SK_FALSE;
5099 SkDrvDeInitAdapter(pAC, 1); /* performs SkGeClose */
5100 netif_device_detach(otherdev);
5101 }
5102 }
5103
5104 pci_save_state(pdev);
5105 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
5106 if (pAC->AllocFlag & SK_ALLOC_IRQ) {
5107 free_irq(dev->irq, dev);
5108 }
5109 pci_disable_device(pdev);
5110 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5111
5112 return 0;
5113}
5114
5115static int skge_resume(struct pci_dev *pdev)
5116{
5117 struct net_device *dev = pci_get_drvdata(pdev);
5118 DEV_NET *pNet = netdev_priv(dev);
5119 SK_AC *pAC = pNet->pAC;
5120 struct net_device *otherdev = pAC->dev[1];
5121 int ret;
5122
5123 pci_set_power_state(pdev, PCI_D0);
5124 pci_restore_state(pdev);
5125 ret = pci_enable_device(pdev);
5126 if (ret) {
5127 printk(KERN_WARNING "sk98lin: unable to enable device %s "
5128 "in resume\n", dev->name);
5129 goto err_out;
5130 }
5131 pci_set_master(pdev);
5132 if (pAC->GIni.GIMacsFound == 2)
5133 ret = request_irq(dev->irq, SkGeIsr, IRQF_SHARED, "sk98lin", dev);
5134 else
5135 ret = request_irq(dev->irq, SkGeIsrOnePort, IRQF_SHARED, "sk98lin", dev);
5136 if (ret) {
5137 printk(KERN_WARNING "sk98lin: unable to acquire IRQ %d\n", dev->irq);
5138 ret = -EBUSY;
5139 goto err_out_disable_pdev;
5140 }
5141
5142 netif_device_attach(dev);
5143 if (netif_running(dev)) {
5144 DoPrintInterfaceChange = SK_FALSE;
5145 SkDrvInitAdapter(pAC, 0); /* first device */
5146 }
5147 if (otherdev != dev) {
5148 netif_device_attach(otherdev);
5149 if (netif_running(otherdev)) {
5150 DoPrintInterfaceChange = SK_FALSE;
5151 SkDrvInitAdapter(pAC, 1); /* second device */
5152 }
5153 }
5154
5155 return 0;
5156
5157err_out_disable_pdev:
5158 pci_disable_device(pdev);
5159err_out:
5160 pAC->AllocFlag &= ~SK_ALLOC_IRQ;
5161 dev->irq = 0;
5162 return ret;
5163}
5164#else
5165#define skge_suspend NULL
5166#define skge_resume NULL
5167#endif
5168
5169static struct pci_device_id skge_pci_tbl[] = {
5170#ifdef SK98LIN_ALL_DEVICES
5171 { PCI_VENDOR_ID_3COM, 0x1700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5172 { PCI_VENDOR_ID_3COM, 0x80eb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5173#endif
5174#ifdef GENESIS
5175 /* Generic SysKonnect SK-98xx Gigabit Ethernet Server Adapter */
5176 { PCI_VENDOR_ID_SYSKONNECT, 0x4300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5177#endif
5178 /* Generic SysKonnect SK-98xx V2.0 Gigabit Ethernet Adapter */
5179 { PCI_VENDOR_ID_SYSKONNECT, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5180#ifdef SK98LIN_ALL_DEVICES
5181/* DLink card does not have valid VPD so this driver gags
5182 * { PCI_VENDOR_ID_DLINK, 0x4c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5183 */
5184 { PCI_VENDOR_ID_MARVELL, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5185 { PCI_VENDOR_ID_MARVELL, 0x5005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5186 { PCI_VENDOR_ID_CNET, 0x434e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5187 { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015, },
5188 { PCI_VENDOR_ID_LINKSYS, 0x1064, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
5189#endif
5190 { 0 }
5191};
5192
5193MODULE_DEVICE_TABLE(pci, skge_pci_tbl);
5194
5195static struct pci_driver skge_driver = {
5196 .name = "sk98lin",
5197 .id_table = skge_pci_tbl,
5198 .probe = skge_probe_one,
5199 .remove = __devexit_p(skge_remove_one),
5200 .suspend = skge_suspend,
5201 .resume = skge_resume,
5202};
5203
5204static int __init skge_init(void)
5205{
5206 printk(KERN_NOTICE "sk98lin: driver has been replaced by the skge driver"
5207 " and is scheduled for removal\n");
5208
5209 return pci_register_driver(&skge_driver);
5210}
5211
5212static void __exit skge_exit(void)
5213{
5214 pci_unregister_driver(&skge_driver);
5215}
5216
5217module_init(skge_init);
5218module_exit(skge_exit);
diff --git a/drivers/net/sk98lin/skgehwt.c b/drivers/net/sk98lin/skgehwt.c
deleted file mode 100644
index db670993c2df..000000000000
--- a/drivers/net/sk98lin/skgehwt.c
+++ /dev/null
@@ -1,171 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skgehwt.c
4 * Project: Gigabit Ethernet Adapters, Event Scheduler Module
5 * Version: $Revision: 1.15 $
6 * Date: $Date: 2003/09/16 13:41:23 $
7 * Purpose: Hardware Timer
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/*
26 * Event queue and dispatcher
27 */
28#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
29static const char SysKonnectFileId[] =
30 "@(#) $Id: skgehwt.c,v 1.15 2003/09/16 13:41:23 rschmidt Exp $ (C) Marvell.";
31#endif
32
33#include "h/skdrv1st.h" /* Driver Specific Definitions */
34#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */
35
36#ifdef __C2MAN__
37/*
38 * Hardware Timer function queue management.
39 */
40intro()
41{}
42#endif
43
44/*
45 * Prototypes of local functions.
46 */
47#define SK_HWT_MAX (65000)
48
49/* correction factor */
50#define SK_HWT_FAC (1000 * (SK_U32)pAC->GIni.GIHstClkFact / 100)
51
52/*
53 * Initialize hardware timer.
54 *
55 * Must be called during init level 1.
56 */
57void SkHwtInit(
58SK_AC *pAC, /* Adapters context */
59SK_IOC Ioc) /* IoContext */
60{
61 pAC->Hwt.TStart = 0 ;
62 pAC->Hwt.TStop = 0 ;
63 pAC->Hwt.TActive = SK_FALSE;
64
65 SkHwtStop(pAC, Ioc);
66}
67
68/*
69 *
70 * Start hardware timer (clock ticks are 16us).
71 *
72 */
73void SkHwtStart(
74SK_AC *pAC, /* Adapters context */
75SK_IOC Ioc, /* IoContext */
76SK_U32 Time) /* Time in units of 16us to load the timer with. */
77{
78 SK_U32 Cnt;
79
80 if (Time > SK_HWT_MAX)
81 Time = SK_HWT_MAX;
82
83 pAC->Hwt.TStart = Time;
84 pAC->Hwt.TStop = 0L;
85
86 Cnt = Time;
87
88 /*
89 * if time < 16 us
90 * time = 16 us
91 */
92 if (!Cnt) {
93 Cnt++;
94 }
95
96 SK_OUT32(Ioc, B2_TI_INI, Cnt * SK_HWT_FAC);
97
98 SK_OUT16(Ioc, B2_TI_CTRL, TIM_START); /* Start timer. */
99
100 pAC->Hwt.TActive = SK_TRUE;
101}
102
103/*
104 * Stop hardware timer.
105 * and clear the timer IRQ
106 */
107void SkHwtStop(
108SK_AC *pAC, /* Adapters context */
109SK_IOC Ioc) /* IoContext */
110{
111 SK_OUT16(Ioc, B2_TI_CTRL, TIM_STOP);
112
113 SK_OUT16(Ioc, B2_TI_CTRL, TIM_CLR_IRQ);
114
115 pAC->Hwt.TActive = SK_FALSE;
116}
117
118
119/*
120 * Stop hardware timer and read time elapsed since last start.
121 *
122 * returns
123 * The elapsed time since last start in units of 16us.
124 *
125 */
126SK_U32 SkHwtRead(
127SK_AC *pAC, /* Adapters context */
128SK_IOC Ioc) /* IoContext */
129{
130 SK_U32 TRead;
131 SK_U32 IStatus;
132
133 if (pAC->Hwt.TActive) {
134
135 SkHwtStop(pAC, Ioc);
136
137 SK_IN32(Ioc, B2_TI_VAL, &TRead);
138 TRead /= SK_HWT_FAC;
139
140 SK_IN32(Ioc, B0_ISRC, &IStatus);
141
142 /* Check if timer expired (or wraped around) */
143 if ((TRead > pAC->Hwt.TStart) || (IStatus & IS_TIMINT)) {
144
145 SkHwtStop(pAC, Ioc);
146
147 pAC->Hwt.TStop = pAC->Hwt.TStart;
148 }
149 else {
150
151 pAC->Hwt.TStop = pAC->Hwt.TStart - TRead;
152 }
153 }
154 return(pAC->Hwt.TStop);
155}
156
157/*
158 * interrupt source= timer
159 */
160void SkHwtIsr(
161SK_AC *pAC, /* Adapters context */
162SK_IOC Ioc) /* IoContext */
163{
164 SkHwtStop(pAC, Ioc);
165
166 pAC->Hwt.TStop = pAC->Hwt.TStart;
167
168 SkTimerDone(pAC, Ioc);
169}
170
171/* End of file */
diff --git a/drivers/net/sk98lin/skgeinit.c b/drivers/net/sk98lin/skgeinit.c
deleted file mode 100644
index 67f1d6a5c15d..000000000000
--- a/drivers/net/sk98lin/skgeinit.c
+++ /dev/null
@@ -1,2005 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skgeinit.c
4 * Project: Gigabit Ethernet Adapters, Common Modules
5 * Version: $Revision: 1.97 $
6 * Date: $Date: 2003/10/02 16:45:31 $
7 * Purpose: Contains functions to initialize the adapter
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25#include "h/skdrv1st.h"
26#include "h/skdrv2nd.h"
27
28/* global variables ***********************************************************/
29
30/* local variables ************************************************************/
31
32#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
33static const char SysKonnectFileId[] =
34 "@(#) $Id: skgeinit.c,v 1.97 2003/10/02 16:45:31 rschmidt Exp $ (C) Marvell.";
35#endif
36
37struct s_QOffTab {
38 int RxQOff; /* Receive Queue Address Offset */
39 int XsQOff; /* Sync Tx Queue Address Offset */
40 int XaQOff; /* Async Tx Queue Address Offset */
41};
42static struct s_QOffTab QOffTab[] = {
43 {Q_R1, Q_XS1, Q_XA1}, {Q_R2, Q_XS2, Q_XA2}
44};
45
46struct s_Config {
47 char ScanString[8];
48 SK_U32 Value;
49};
50
51static struct s_Config OemConfig = {
52 {'O','E','M','_','C','o','n','f'},
53#ifdef SK_OEM_CONFIG
54 OEM_CONFIG_VALUE,
55#else
56 0,
57#endif
58};
59
60/******************************************************************************
61 *
62 * SkGePollTxD() - Enable / Disable Descriptor Polling of TxD Rings
63 *
64 * Description:
65 * Enable or disable the descriptor polling of the transmit descriptor
66 * ring(s) (TxD) for port 'Port'.
67 * The new configuration is *not* saved over any SkGeStopPort() and
68 * SkGeInitPort() calls.
69 *
70 * Returns:
71 * nothing
72 */
73void SkGePollTxD(
74SK_AC *pAC, /* adapter context */
75SK_IOC IoC, /* IO context */
76int Port, /* Port Index (MAC_1 + n) */
77SK_BOOL PollTxD) /* SK_TRUE (enable pol.), SK_FALSE (disable pol.) */
78{
79 SK_GEPORT *pPrt;
80 SK_U32 DWord;
81
82 pPrt = &pAC->GIni.GP[Port];
83
84 DWord = (SK_U32)(PollTxD ? CSR_ENA_POL : CSR_DIS_POL);
85
86 if (pPrt->PXSQSize != 0) {
87 SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), DWord);
88 }
89
90 if (pPrt->PXAQSize != 0) {
91 SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), DWord);
92 }
93} /* SkGePollTxD */
94
95
96/******************************************************************************
97 *
98 * SkGeYellowLED() - Switch the yellow LED on or off.
99 *
100 * Description:
101 * Switch the yellow LED on or off.
102 *
103 * Note:
104 * This function may be called any time after SkGeInit(Level 1).
105 *
106 * Returns:
107 * nothing
108 */
109void SkGeYellowLED(
110SK_AC *pAC, /* adapter context */
111SK_IOC IoC, /* IO context */
112int State) /* yellow LED state, 0 = OFF, 0 != ON */
113{
114 if (State == 0) {
115 /* Switch yellow LED OFF */
116 SK_OUT8(IoC, B0_LED, LED_STAT_OFF);
117 }
118 else {
119 /* Switch yellow LED ON */
120 SK_OUT8(IoC, B0_LED, LED_STAT_ON);
121 }
122} /* SkGeYellowLED */
123
124
125#if (!defined(SK_SLIM) || defined(GENESIS))
126/******************************************************************************
127 *
128 * SkGeXmitLED() - Modify the Operational Mode of a transmission LED.
129 *
130 * Description:
131 * The Rx or Tx LED which is specified by 'Led' will be
132 * enabled, disabled or switched on in test mode.
133 *
134 * Note:
135 * 'Led' must contain the address offset of the LEDs INI register.
136 *
137 * Usage:
138 * SkGeXmitLED(pAC, IoC, MR_ADDR(Port, TX_LED_INI), SK_LED_ENA);
139 *
140 * Returns:
141 * nothing
142 */
143void SkGeXmitLED(
144SK_AC *pAC, /* adapter context */
145SK_IOC IoC, /* IO context */
146int Led, /* offset to the LED Init Value register */
147int Mode) /* Mode may be SK_LED_DIS, SK_LED_ENA, SK_LED_TST */
148{
149 SK_U32 LedIni;
150
151 switch (Mode) {
152 case SK_LED_ENA:
153 LedIni = SK_XMIT_DUR * (SK_U32)pAC->GIni.GIHstClkFact / 100;
154 SK_OUT32(IoC, Led + XMIT_LED_INI, LedIni);
155 SK_OUT8(IoC, Led + XMIT_LED_CTRL, LED_START);
156 break;
157 case SK_LED_TST:
158 SK_OUT8(IoC, Led + XMIT_LED_TST, LED_T_ON);
159 SK_OUT32(IoC, Led + XMIT_LED_CNT, 100);
160 SK_OUT8(IoC, Led + XMIT_LED_CTRL, LED_START);
161 break;
162 case SK_LED_DIS:
163 default:
164 /*
165 * Do NOT stop the LED Timer here. The LED might be
166 * in on state. But it needs to go off.
167 */
168 SK_OUT32(IoC, Led + XMIT_LED_CNT, 0);
169 SK_OUT8(IoC, Led + XMIT_LED_TST, LED_T_OFF);
170 break;
171 }
172
173 /*
174 * 1000BT: The Transmit LED is driven by the PHY.
175 * But the default LED configuration is used for
176 * Level One and Broadcom PHYs.
177 * (Broadcom: It may be that PHY_B_PEC_EN_LTR has to be set.)
178 * (In this case it has to be added here. But we will see. XXX)
179 */
180} /* SkGeXmitLED */
181#endif /* !SK_SLIM || GENESIS */
182
183
184/******************************************************************************
185 *
186 * DoCalcAddr() - Calculates the start and the end address of a queue.
187 *
188 * Description:
189 * This function calculates the start and the end address of a queue.
190 * Afterwards the 'StartVal' is incremented to the next start position.
191 * If the port is already initialized the calculated values
192 * will be checked against the configured values and an
193 * error will be returned, if they are not equal.
194 * If the port is not initialized the values will be written to
195 * *StartAdr and *EndAddr.
196 *
197 * Returns:
198 * 0: success
199 * 1: configuration error
200 */
201static int DoCalcAddr(
202SK_AC *pAC, /* adapter context */
203SK_GEPORT SK_FAR *pPrt, /* port index */
204int QuSize, /* size of the queue to configure in kB */
205SK_U32 SK_FAR *StartVal, /* start value for address calculation */
206SK_U32 SK_FAR *QuStartAddr,/* start addr to calculate */
207SK_U32 SK_FAR *QuEndAddr) /* end address to calculate */
208{
209 SK_U32 EndVal;
210 SK_U32 NextStart;
211 int Rtv;
212
213 Rtv = 0;
214 if (QuSize == 0) {
215 EndVal = *StartVal;
216 NextStart = EndVal;
217 }
218 else {
219 EndVal = *StartVal + ((SK_U32)QuSize * 1024) - 1;
220 NextStart = EndVal + 1;
221 }
222
223 if (pPrt->PState >= SK_PRT_INIT) {
224 if (*StartVal != *QuStartAddr || EndVal != *QuEndAddr) {
225 Rtv = 1;
226 }
227 }
228 else {
229 *QuStartAddr = *StartVal;
230 *QuEndAddr = EndVal;
231 }
232
233 *StartVal = NextStart;
234 return(Rtv);
235} /* DoCalcAddr */
236
237/******************************************************************************
238 *
239 * SkGeInitAssignRamToQueues() - allocate default queue sizes
240 *
241 * Description:
242 * This function assigns the memory to the different queues and ports.
243 * When DualNet is set to SK_TRUE all ports get the same amount of memory.
244 * Otherwise the first port gets most of the memory and all the
245 * other ports just the required minimum.
246 * This function can only be called when pAC->GIni.GIRamSize and
247 * pAC->GIni.GIMacsFound have been initialized, usually this happens
248 * at init level 1
249 *
250 * Returns:
251 * 0 - ok
252 * 1 - invalid input values
253 * 2 - not enough memory
254 */
255
256int SkGeInitAssignRamToQueues(
257SK_AC *pAC, /* Adapter context */
258int ActivePort, /* Active Port in RLMT mode */
259SK_BOOL DualNet) /* adapter context */
260{
261 int i;
262 int UsedKilobytes; /* memory already assigned */
263 int ActivePortKilobytes; /* memory available for active port */
264 SK_GEPORT *pGePort;
265
266 UsedKilobytes = 0;
267
268 if (ActivePort >= pAC->GIni.GIMacsFound) {
269 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_INIT,
270 ("SkGeInitAssignRamToQueues: ActivePort (%d) invalid\n",
271 ActivePort));
272 return(1);
273 }
274 if (((pAC->GIni.GIMacsFound * (SK_MIN_RXQ_SIZE + SK_MIN_TXQ_SIZE)) +
275 ((RAM_QUOTA_SYNC == 0) ? 0 : SK_MIN_TXQ_SIZE)) > pAC->GIni.GIRamSize) {
276 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_INIT,
277 ("SkGeInitAssignRamToQueues: Not enough memory (%d)\n",
278 pAC->GIni.GIRamSize));
279 return(2);
280 }
281
282 if (DualNet) {
283 /* every port gets the same amount of memory */
284 ActivePortKilobytes = pAC->GIni.GIRamSize / pAC->GIni.GIMacsFound;
285 for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
286
287 pGePort = &pAC->GIni.GP[i];
288
289 /* take away the minimum memory for active queues */
290 ActivePortKilobytes -= (SK_MIN_RXQ_SIZE + SK_MIN_TXQ_SIZE);
291
292 /* receive queue gets the minimum + 80% of the rest */
293 pGePort->PRxQSize = (int) (ROUND_QUEUE_SIZE_KB((
294 ActivePortKilobytes * (unsigned long) RAM_QUOTA_RX) / 100))
295 + SK_MIN_RXQ_SIZE;
296
297 ActivePortKilobytes -= (pGePort->PRxQSize - SK_MIN_RXQ_SIZE);
298
299 /* synchronous transmit queue */
300 pGePort->PXSQSize = 0;
301
302 /* asynchronous transmit queue */
303 pGePort->PXAQSize = (int) ROUND_QUEUE_SIZE_KB(ActivePortKilobytes +
304 SK_MIN_TXQ_SIZE);
305 }
306 }
307 else {
308 /* Rlmt Mode or single link adapter */
309
310 /* Set standby queue size defaults for all standby ports */
311 for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
312
313 if (i != ActivePort) {
314 pGePort = &pAC->GIni.GP[i];
315
316 pGePort->PRxQSize = SK_MIN_RXQ_SIZE;
317 pGePort->PXAQSize = SK_MIN_TXQ_SIZE;
318 pGePort->PXSQSize = 0;
319
320 /* Count used RAM */
321 UsedKilobytes += pGePort->PRxQSize + pGePort->PXAQSize;
322 }
323 }
324 /* what's left? */
325 ActivePortKilobytes = pAC->GIni.GIRamSize - UsedKilobytes;
326
327 /* assign it to the active port */
328 /* first take away the minimum memory */
329 ActivePortKilobytes -= (SK_MIN_RXQ_SIZE + SK_MIN_TXQ_SIZE);
330 pGePort = &pAC->GIni.GP[ActivePort];
331
332 /* receive queue get's the minimum + 80% of the rest */
333 pGePort->PRxQSize = (int) (ROUND_QUEUE_SIZE_KB((ActivePortKilobytes *
334 (unsigned long) RAM_QUOTA_RX) / 100)) + SK_MIN_RXQ_SIZE;
335
336 ActivePortKilobytes -= (pGePort->PRxQSize - SK_MIN_RXQ_SIZE);
337
338 /* synchronous transmit queue */
339 pGePort->PXSQSize = 0;
340
341 /* asynchronous transmit queue */
342 pGePort->PXAQSize = (int) ROUND_QUEUE_SIZE_KB(ActivePortKilobytes) +
343 SK_MIN_TXQ_SIZE;
344 }
345#ifdef VCPU
346 VCPUprintf(0, "PRxQSize=%u, PXSQSize=%u, PXAQSize=%u\n",
347 pGePort->PRxQSize, pGePort->PXSQSize, pGePort->PXAQSize);
348#endif /* VCPU */
349
350 return(0);
351} /* SkGeInitAssignRamToQueues */
352
353/******************************************************************************
354 *
355 * SkGeCheckQSize() - Checks the Adapters Queue Size Configuration
356 *
357 * Description:
358 * This function verifies the Queue Size Configuration specified
359 * in the variables PRxQSize, PXSQSize, and PXAQSize of all
360 * used ports.
361 * This requirements must be fullfilled to have a valid configuration:
362 * - The size of all queues must not exceed GIRamSize.
363 * - The queue sizes must be specified in units of 8 kB.
364 * - The size of Rx queues of available ports must not be
365 * smaller than 16 kB.
366 * - The size of at least one Tx queue (synch. or asynch.)
367 * of available ports must not be smaller than 16 kB
368 * when Jumbo Frames are used.
369 * - The RAM start and end addresses must not be changed
370 * for ports which are already initialized.
371 * Furthermore SkGeCheckQSize() defines the Start and End Addresses
372 * of all ports and stores them into the HWAC port structure.
373 *
374 * Returns:
375 * 0: Queue Size Configuration valid
376 * 1: Queue Size Configuration invalid
377 */
378static int SkGeCheckQSize(
379SK_AC *pAC, /* adapter context */
380int Port) /* port index */
381{
382 SK_GEPORT *pPrt;
383 int i;
384 int Rtv;
385 int Rtv2;
386 SK_U32 StartAddr;
387#ifndef SK_SLIM
388 int UsedMem; /* total memory used (max. found ports) */
389#endif
390
391 Rtv = 0;
392
393#ifndef SK_SLIM
394
395 UsedMem = 0;
396 for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
397 pPrt = &pAC->GIni.GP[i];
398
399 if ((pPrt->PRxQSize & QZ_UNITS) != 0 ||
400 (pPrt->PXSQSize & QZ_UNITS) != 0 ||
401 (pPrt->PXAQSize & QZ_UNITS) != 0) {
402
403 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E012, SKERR_HWI_E012MSG);
404 return(1);
405 }
406
407 if (i == Port && pPrt->PRxQSize < SK_MIN_RXQ_SIZE) {
408 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E011, SKERR_HWI_E011MSG);
409 return(1);
410 }
411
412 /*
413 * the size of at least one Tx queue (synch. or asynch.) has to be > 0.
414 * if Jumbo Frames are used, this size has to be >= 16 kB.
415 */
416 if ((i == Port && pPrt->PXSQSize == 0 && pPrt->PXAQSize == 0) ||
417 (pAC->GIni.GIPortUsage == SK_JUMBO_LINK &&
418 ((pPrt->PXSQSize > 0 && pPrt->PXSQSize < SK_MIN_TXQ_SIZE) ||
419 (pPrt->PXAQSize > 0 && pPrt->PXAQSize < SK_MIN_TXQ_SIZE)))) {
420 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E023, SKERR_HWI_E023MSG);
421 return(1);
422 }
423
424 UsedMem += pPrt->PRxQSize + pPrt->PXSQSize + pPrt->PXAQSize;
425 }
426
427 if (UsedMem > pAC->GIni.GIRamSize) {
428 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E012, SKERR_HWI_E012MSG);
429 return(1);
430 }
431#endif /* !SK_SLIM */
432
433 /* Now start address calculation */
434 StartAddr = pAC->GIni.GIRamOffs;
435 for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
436 pPrt = &pAC->GIni.GP[i];
437
438 /* Calculate/Check values for the receive queue */
439 Rtv2 = DoCalcAddr(pAC, pPrt, pPrt->PRxQSize, &StartAddr,
440 &pPrt->PRxQRamStart, &pPrt->PRxQRamEnd);
441 Rtv |= Rtv2;
442
443 /* Calculate/Check values for the synchronous Tx queue */
444 Rtv2 = DoCalcAddr(pAC, pPrt, pPrt->PXSQSize, &StartAddr,
445 &pPrt->PXsQRamStart, &pPrt->PXsQRamEnd);
446 Rtv |= Rtv2;
447
448 /* Calculate/Check values for the asynchronous Tx queue */
449 Rtv2 = DoCalcAddr(pAC, pPrt, pPrt->PXAQSize, &StartAddr,
450 &pPrt->PXaQRamStart, &pPrt->PXaQRamEnd);
451 Rtv |= Rtv2;
452
453 if (Rtv) {
454 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E013, SKERR_HWI_E013MSG);
455 return(1);
456 }
457 }
458
459 return(0);
460} /* SkGeCheckQSize */
461
462
463#ifdef GENESIS
464/******************************************************************************
465 *
466 * SkGeInitMacArb() - Initialize the MAC Arbiter
467 *
468 * Description:
469 * This function initializes the MAC Arbiter.
470 * It must not be called if there is still an
471 * initialized or active port.
472 *
473 * Returns:
474 * nothing
475 */
476static void SkGeInitMacArb(
477SK_AC *pAC, /* adapter context */
478SK_IOC IoC) /* IO context */
479{
480 /* release local reset */
481 SK_OUT16(IoC, B3_MA_TO_CTRL, MA_RST_CLR);
482
483 /* configure timeout values */
484 SK_OUT8(IoC, B3_MA_TOINI_RX1, SK_MAC_TO_53);
485 SK_OUT8(IoC, B3_MA_TOINI_RX2, SK_MAC_TO_53);
486 SK_OUT8(IoC, B3_MA_TOINI_TX1, SK_MAC_TO_53);
487 SK_OUT8(IoC, B3_MA_TOINI_TX2, SK_MAC_TO_53);
488
489 SK_OUT8(IoC, B3_MA_RCINI_RX1, 0);
490 SK_OUT8(IoC, B3_MA_RCINI_RX2, 0);
491 SK_OUT8(IoC, B3_MA_RCINI_TX1, 0);
492 SK_OUT8(IoC, B3_MA_RCINI_TX2, 0);
493
494 /* recovery values are needed for XMAC II Rev. B2 only */
495 /* Fast Output Enable Mode was intended to use with Rev. B2, but now? */
496
497 /*
498 * There is no start or enable button to push, therefore
499 * the MAC arbiter is configured and enabled now.
500 */
501} /* SkGeInitMacArb */
502
503
504/******************************************************************************
505 *
506 * SkGeInitPktArb() - Initialize the Packet Arbiter
507 *
508 * Description:
509 * This function initializes the Packet Arbiter.
510 * It must not be called if there is still an
511 * initialized or active port.
512 *
513 * Returns:
514 * nothing
515 */
516static void SkGeInitPktArb(
517SK_AC *pAC, /* adapter context */
518SK_IOC IoC) /* IO context */
519{
520 /* release local reset */
521 SK_OUT16(IoC, B3_PA_CTRL, PA_RST_CLR);
522
523 /* configure timeout values */
524 SK_OUT16(IoC, B3_PA_TOINI_RX1, SK_PKT_TO_MAX);
525 SK_OUT16(IoC, B3_PA_TOINI_RX2, SK_PKT_TO_MAX);
526 SK_OUT16(IoC, B3_PA_TOINI_TX1, SK_PKT_TO_MAX);
527 SK_OUT16(IoC, B3_PA_TOINI_TX2, SK_PKT_TO_MAX);
528
529 /*
530 * enable timeout timers if jumbo frames not used
531 * NOTE: the packet arbiter timeout interrupt is needed for
532 * half duplex hangup workaround
533 */
534 if (pAC->GIni.GIPortUsage != SK_JUMBO_LINK) {
535 if (pAC->GIni.GIMacsFound == 1) {
536 SK_OUT16(IoC, B3_PA_CTRL, PA_ENA_TO_TX1);
537 }
538 else {
539 SK_OUT16(IoC, B3_PA_CTRL, PA_ENA_TO_TX1 | PA_ENA_TO_TX2);
540 }
541 }
542} /* SkGeInitPktArb */
543#endif /* GENESIS */
544
545
546/******************************************************************************
547 *
548 * SkGeInitMacFifo() - Initialize the MAC FIFOs
549 *
550 * Description:
551 * Initialize all MAC FIFOs of the specified port
552 *
553 * Returns:
554 * nothing
555 */
556static void SkGeInitMacFifo(
557SK_AC *pAC, /* adapter context */
558SK_IOC IoC, /* IO context */
559int Port) /* Port Index (MAC_1 + n) */
560{
561 SK_U16 Word;
562#ifdef VCPU
563 SK_U32 DWord;
564#endif /* VCPU */
565 /*
566 * For each FIFO:
567 * - release local reset
568 * - use default value for MAC FIFO size
569 * - setup defaults for the control register
570 * - enable the FIFO
571 */
572
573#ifdef GENESIS
574 if (pAC->GIni.GIGenesis) {
575 /* Configure Rx MAC FIFO */
576 SK_OUT8(IoC, MR_ADDR(Port, RX_MFF_CTRL2), MFF_RST_CLR);
577 SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_RX_CTRL_DEF);
578 SK_OUT8(IoC, MR_ADDR(Port, RX_MFF_CTRL2), MFF_ENA_OP_MD);
579
580 /* Configure Tx MAC FIFO */
581 SK_OUT8(IoC, MR_ADDR(Port, TX_MFF_CTRL2), MFF_RST_CLR);
582 SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF);
583 SK_OUT8(IoC, MR_ADDR(Port, TX_MFF_CTRL2), MFF_ENA_OP_MD);
584
585 /* Enable frame flushing if jumbo frames used */
586 if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) {
587 SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_ENA_FLUSH);
588 }
589 }
590#endif /* GENESIS */
591
592#ifdef YUKON
593 if (pAC->GIni.GIYukon) {
594 /* set Rx GMAC FIFO Flush Mask */
595 SK_OUT16(IoC, MR_ADDR(Port, RX_GMF_FL_MSK), (SK_U16)RX_FF_FL_DEF_MSK);
596
597 Word = (SK_U16)GMF_RX_CTRL_DEF;
598
599 /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */
600 if (pAC->GIni.GIYukonLite && pAC->GIni.GIChipId == CHIP_ID_YUKON) {
601
602 Word &= ~GMF_RX_F_FL_ON;
603 }
604
605 /* Configure Rx MAC FIFO */
606 SK_OUT8(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), (SK_U8)GMF_RST_CLR);
607 SK_OUT16(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), Word);
608
609 /* set Rx GMAC FIFO Flush Threshold (default: 0x0a -> 56 bytes) */
610 SK_OUT16(IoC, MR_ADDR(Port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
611
612 /* Configure Tx MAC FIFO */
613 SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U8)GMF_RST_CLR);
614 SK_OUT16(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U16)GMF_TX_CTRL_DEF);
615
616#ifdef VCPU
617 SK_IN32(IoC, MR_ADDR(Port, RX_GMF_AF_THR), &DWord);
618 SK_IN32(IoC, MR_ADDR(Port, TX_GMF_AE_THR), &DWord);
619#endif /* VCPU */
620
621 /* set Tx GMAC FIFO Almost Empty Threshold */
622/* SK_OUT32(IoC, MR_ADDR(Port, TX_GMF_AE_THR), 0); */
623 }
624#endif /* YUKON */
625
626} /* SkGeInitMacFifo */
627
628#ifdef SK_LNK_SYNC_CNT
629/******************************************************************************
630 *
631 * SkGeLoadLnkSyncCnt() - Load the Link Sync Counter and starts counting
632 *
633 * Description:
634 * This function starts the Link Sync Counter of the specified
635 * port and enables the generation of an Link Sync IRQ.
636 * The Link Sync Counter may be used to detect an active link,
637 * if autonegotiation is not used.
638 *
639 * Note:
640 * o To ensure receiving the Link Sync Event the LinkSyncCounter
641 * should be initialized BEFORE clearing the XMAC's reset!
642 * o Enable IS_LNK_SYNC_M1 and IS_LNK_SYNC_M2 after calling this
643 * function.
644 *
645 * Returns:
646 * nothing
647 */
648void SkGeLoadLnkSyncCnt(
649SK_AC *pAC, /* adapter context */
650SK_IOC IoC, /* IO context */
651int Port, /* Port Index (MAC_1 + n) */
652SK_U32 CntVal) /* Counter value */
653{
654 SK_U32 OrgIMsk;
655 SK_U32 NewIMsk;
656 SK_U32 ISrc;
657 SK_BOOL IrqPend;
658
659 /* stop counter */
660 SK_OUT8(IoC, MR_ADDR(Port, LNK_SYNC_CTRL), LED_STOP);
661
662 /*
663 * ASIC problem:
664 * Each time starting the Link Sync Counter an IRQ is generated
665 * by the adapter. See problem report entry from 21.07.98
666 *
667 * Workaround: Disable Link Sync IRQ and clear the unexpeced IRQ
668 * if no IRQ is already pending.
669 */
670 IrqPend = SK_FALSE;
671 SK_IN32(IoC, B0_ISRC, &ISrc);
672 SK_IN32(IoC, B0_IMSK, &OrgIMsk);
673 if (Port == MAC_1) {
674 NewIMsk = OrgIMsk & ~IS_LNK_SYNC_M1;
675 if ((ISrc & IS_LNK_SYNC_M1) != 0) {
676 IrqPend = SK_TRUE;
677 }
678 }
679 else {
680 NewIMsk = OrgIMsk & ~IS_LNK_SYNC_M2;
681 if ((ISrc & IS_LNK_SYNC_M2) != 0) {
682 IrqPend = SK_TRUE;
683 }
684 }
685 if (!IrqPend) {
686 SK_OUT32(IoC, B0_IMSK, NewIMsk);
687 }
688
689 /* load counter */
690 SK_OUT32(IoC, MR_ADDR(Port, LNK_SYNC_INI), CntVal);
691
692 /* start counter */
693 SK_OUT8(IoC, MR_ADDR(Port, LNK_SYNC_CTRL), LED_START);
694
695 if (!IrqPend) {
696 /* clear the unexpected IRQ, and restore the interrupt mask */
697 SK_OUT8(IoC, MR_ADDR(Port, LNK_SYNC_CTRL), LED_CLR_IRQ);
698 SK_OUT32(IoC, B0_IMSK, OrgIMsk);
699 }
700} /* SkGeLoadLnkSyncCnt*/
701#endif /* SK_LNK_SYNC_CNT */
702
703#if defined(SK_DIAG) || defined(SK_CFG_SYNC)
704/******************************************************************************
705 *
706 * SkGeCfgSync() - Configure synchronous bandwidth for this port.
707 *
708 * Description:
709 * This function may be used to configure synchronous bandwidth
710 * to the specified port. This may be done any time after
711 * initializing the port. The configuration values are NOT saved
712 * in the HWAC port structure and will be overwritten any
713 * time when stopping and starting the port.
714 * Any values for the synchronous configuration will be ignored
715 * if the size of the synchronous queue is zero!
716 *
717 * The default configuration for the synchronous service is
718 * TXA_ENA_FSYNC. This means if the size of
719 * the synchronous queue is unequal zero but no specific
720 * synchronous bandwidth is configured, the synchronous queue
721 * will always have the 'unlimited' transmit priority!
722 *
723 * This mode will be restored if the synchronous bandwidth is
724 * deallocated ('IntTime' = 0 and 'LimCount' = 0).
725 *
726 * Returns:
727 * 0: success
728 * 1: parameter configuration error
729 * 2: try to configure quality of service although no
730 * synchronous queue is configured
731 */
732int SkGeCfgSync(
733SK_AC *pAC, /* adapter context */
734SK_IOC IoC, /* IO context */
735int Port, /* Port Index (MAC_1 + n) */
736SK_U32 IntTime, /* Interval Timer Value in units of 8ns */
737SK_U32 LimCount, /* Number of bytes to transfer during IntTime */
738int SyncMode) /* Sync Mode: TXA_ENA_ALLOC | TXA_DIS_ALLOC | 0 */
739{
740 int Rtv;
741
742 Rtv = 0;
743
744 /* check the parameters */
745 if (LimCount > IntTime ||
746 (LimCount == 0 && IntTime != 0) ||
747 (LimCount != 0 && IntTime == 0)) {
748
749 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E010, SKERR_HWI_E010MSG);
750 return(1);
751 }
752
753 if (pAC->GIni.GP[Port].PXSQSize == 0) {
754 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E009, SKERR_HWI_E009MSG);
755 return(2);
756 }
757
758 /* calculate register values */
759 IntTime = (IntTime / 2) * pAC->GIni.GIHstClkFact / 100;
760 LimCount = LimCount / 8;
761
762 if (IntTime > TXA_MAX_VAL || LimCount > TXA_MAX_VAL) {
763 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E010, SKERR_HWI_E010MSG);
764 return(1);
765 }
766
767 /*
768 * - Enable 'Force Sync' to ensure the synchronous queue
769 * has the priority while configuring the new values.
770 * - Also 'disable alloc' to ensure the settings complies
771 * to the SyncMode parameter.
772 * - Disable 'Rate Control' to configure the new values.
773 * - write IntTime and LimCount
774 * - start 'Rate Control' and disable 'Force Sync'
775 * if Interval Timer or Limit Counter not zero.
776 */
777 SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL),
778 TXA_ENA_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
779
780 SK_OUT32(IoC, MR_ADDR(Port, TXA_ITI_INI), IntTime);
781 SK_OUT32(IoC, MR_ADDR(Port, TXA_LIM_INI), LimCount);
782
783 SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL),
784 (SK_U8)(SyncMode & (TXA_ENA_ALLOC | TXA_DIS_ALLOC)));
785
786 if (IntTime != 0 || LimCount != 0) {
787 SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL), TXA_DIS_FSYNC | TXA_START_RC);
788 }
789
790 return(0);
791} /* SkGeCfgSync */
792#endif /* SK_DIAG || SK_CFG_SYNC*/
793
794
795/******************************************************************************
796 *
797 * DoInitRamQueue() - Initialize the RAM Buffer Address of a single Queue
798 *
799 * Desccription:
800 * If the queue is used, enable and initialize it.
801 * Make sure the queue is still reset, if it is not used.
802 *
803 * Returns:
804 * nothing
805 */
806static void DoInitRamQueue(
807SK_AC *pAC, /* adapter context */
808SK_IOC IoC, /* IO context */
809int QuIoOffs, /* Queue IO Address Offset */
810SK_U32 QuStartAddr, /* Queue Start Address */
811SK_U32 QuEndAddr, /* Queue End Address */
812int QuType) /* Queue Type (SK_RX_SRAM_Q|SK_RX_BRAM_Q|SK_TX_RAM_Q) */
813{
814 SK_U32 RxUpThresVal;
815 SK_U32 RxLoThresVal;
816
817 if (QuStartAddr != QuEndAddr) {
818 /* calculate thresholds, assume we have a big Rx queue */
819 RxUpThresVal = (QuEndAddr + 1 - QuStartAddr - SK_RB_ULPP) / 8;
820 RxLoThresVal = (QuEndAddr + 1 - QuStartAddr - SK_RB_LLPP_B)/8;
821
822 /* build HW address format */
823 QuStartAddr = QuStartAddr / 8;
824 QuEndAddr = QuEndAddr / 8;
825
826 /* release local reset */
827 SK_OUT8(IoC, RB_ADDR(QuIoOffs, RB_CTRL), RB_RST_CLR);
828
829 /* configure addresses */
830 SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_START), QuStartAddr);
831 SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_END), QuEndAddr);
832 SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_WP), QuStartAddr);
833 SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_RP), QuStartAddr);
834
835 switch (QuType) {
836 case SK_RX_SRAM_Q:
837 /* configure threshold for small Rx Queue */
838 RxLoThresVal += (SK_RB_LLPP_B - SK_RB_LLPP_S) / 8;
839
840 /* continue with SK_RX_BRAM_Q */
841 case SK_RX_BRAM_Q:
842 /* write threshold for Rx Queue */
843
844 SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_RX_UTPP), RxUpThresVal);
845 SK_OUT32(IoC, RB_ADDR(QuIoOffs, RB_RX_LTPP), RxLoThresVal);
846
847 /* the high priority threshold not used */
848 break;
849 case SK_TX_RAM_Q:
850 /*
851 * Do NOT use Store & Forward under normal operation due to
852 * performance optimization (GENESIS only).
853 * But if Jumbo Frames are configured (XMAC Tx FIFO is only 4 kB)
854 * or YUKON is used ((GMAC Tx FIFO is only 1 kB)
855 * we NEED Store & Forward of the RAM buffer.
856 */
857 if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK ||
858 pAC->GIni.GIYukon) {
859 /* enable Store & Forward Mode for the Tx Side */
860 SK_OUT8(IoC, RB_ADDR(QuIoOffs, RB_CTRL), RB_ENA_STFWD);
861 }
862 break;
863 }
864
865 /* set queue operational */
866 SK_OUT8(IoC, RB_ADDR(QuIoOffs, RB_CTRL), RB_ENA_OP_MD);
867 }
868 else {
869 /* ensure the queue is still disabled */
870 SK_OUT8(IoC, RB_ADDR(QuIoOffs, RB_CTRL), RB_RST_SET);
871 }
872} /* DoInitRamQueue */
873
874
875/******************************************************************************
876 *
877 * SkGeInitRamBufs() - Initialize the RAM Buffer Queues
878 *
879 * Description:
880 * Initialize all RAM Buffer Queues of the specified port
881 *
882 * Returns:
883 * nothing
884 */
885static void SkGeInitRamBufs(
886SK_AC *pAC, /* adapter context */
887SK_IOC IoC, /* IO context */
888int Port) /* Port Index (MAC_1 + n) */
889{
890 SK_GEPORT *pPrt;
891 int RxQType;
892
893 pPrt = &pAC->GIni.GP[Port];
894
895 if (pPrt->PRxQSize == SK_MIN_RXQ_SIZE) {
896 RxQType = SK_RX_SRAM_Q; /* small Rx Queue */
897 }
898 else {
899 RxQType = SK_RX_BRAM_Q; /* big Rx Queue */
900 }
901
902 DoInitRamQueue(pAC, IoC, pPrt->PRxQOff, pPrt->PRxQRamStart,
903 pPrt->PRxQRamEnd, RxQType);
904
905 DoInitRamQueue(pAC, IoC, pPrt->PXsQOff, pPrt->PXsQRamStart,
906 pPrt->PXsQRamEnd, SK_TX_RAM_Q);
907
908 DoInitRamQueue(pAC, IoC, pPrt->PXaQOff, pPrt->PXaQRamStart,
909 pPrt->PXaQRamEnd, SK_TX_RAM_Q);
910
911} /* SkGeInitRamBufs */
912
913
914/******************************************************************************
915 *
916 * SkGeInitRamIface() - Initialize the RAM Interface
917 *
918 * Description:
919 * This function initializes the Adapters RAM Interface.
920 *
921 * Note:
922 * This function is used in the diagnostics.
923 *
924 * Returns:
925 * nothing
926 */
927static void SkGeInitRamIface(
928SK_AC *pAC, /* adapter context */
929SK_IOC IoC) /* IO context */
930{
931 /* release local reset */
932 SK_OUT16(IoC, B3_RI_CTRL, RI_RST_CLR);
933
934 /* configure timeout values */
935 SK_OUT8(IoC, B3_RI_WTO_R1, SK_RI_TO_53);
936 SK_OUT8(IoC, B3_RI_WTO_XA1, SK_RI_TO_53);
937 SK_OUT8(IoC, B3_RI_WTO_XS1, SK_RI_TO_53);
938 SK_OUT8(IoC, B3_RI_RTO_R1, SK_RI_TO_53);
939 SK_OUT8(IoC, B3_RI_RTO_XA1, SK_RI_TO_53);
940 SK_OUT8(IoC, B3_RI_RTO_XS1, SK_RI_TO_53);
941 SK_OUT8(IoC, B3_RI_WTO_R2, SK_RI_TO_53);
942 SK_OUT8(IoC, B3_RI_WTO_XA2, SK_RI_TO_53);
943 SK_OUT8(IoC, B3_RI_WTO_XS2, SK_RI_TO_53);
944 SK_OUT8(IoC, B3_RI_RTO_R2, SK_RI_TO_53);
945 SK_OUT8(IoC, B3_RI_RTO_XA2, SK_RI_TO_53);
946 SK_OUT8(IoC, B3_RI_RTO_XS2, SK_RI_TO_53);
947
948} /* SkGeInitRamIface */
949
950
951/******************************************************************************
952 *
953 * SkGeInitBmu() - Initialize the BMU state machines
954 *
955 * Description:
956 * Initialize all BMU state machines of the specified port
957 *
958 * Returns:
959 * nothing
960 */
961static void SkGeInitBmu(
962SK_AC *pAC, /* adapter context */
963SK_IOC IoC, /* IO context */
964int Port) /* Port Index (MAC_1 + n) */
965{
966 SK_GEPORT *pPrt;
967 SK_U32 RxWm;
968 SK_U32 TxWm;
969
970 pPrt = &pAC->GIni.GP[Port];
971
972 RxWm = SK_BMU_RX_WM;
973 TxWm = SK_BMU_TX_WM;
974
975 if (!pAC->GIni.GIPciSlot64 && !pAC->GIni.GIPciClock66) {
976 /* for better performance */
977 RxWm /= 2;
978 TxWm /= 2;
979 }
980
981 /* Rx Queue: Release all local resets and set the watermark */
982 SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), CSR_CLR_RESET);
983 SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_F), RxWm);
984
985 /*
986 * Tx Queue: Release all local resets if the queue is used !
987 * set watermark
988 */
989 if (pPrt->PXSQSize != 0) {
990 SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), CSR_CLR_RESET);
991 SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_F), TxWm);
992 }
993
994 if (pPrt->PXAQSize != 0) {
995 SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), CSR_CLR_RESET);
996 SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_F), TxWm);
997 }
998 /*
999 * Do NOT enable the descriptor poll timers here, because
1000 * the descriptor addresses are not specified yet.
1001 */
1002} /* SkGeInitBmu */
1003
1004
1005/******************************************************************************
1006 *
1007 * TestStopBit() - Test the stop bit of the queue
1008 *
1009 * Description:
1010 * Stopping a queue is not as simple as it seems to be.
1011 * If descriptor polling is enabled, it may happen
1012 * that RX/TX stop is done and SV idle is NOT set.
1013 * In this case we have to issue another stop command.
1014 *
1015 * Returns:
1016 * The queues control status register
1017 */
1018static SK_U32 TestStopBit(
1019SK_AC *pAC, /* Adapter Context */
1020SK_IOC IoC, /* IO Context */
1021int QuIoOffs) /* Queue IO Address Offset */
1022{
1023 SK_U32 QuCsr; /* CSR contents */
1024
1025 SK_IN32(IoC, Q_ADDR(QuIoOffs, Q_CSR), &QuCsr);
1026
1027 if ((QuCsr & (CSR_STOP | CSR_SV_IDLE)) == 0) {
1028 /* Stop Descriptor overridden by start command */
1029 SK_OUT32(IoC, Q_ADDR(QuIoOffs, Q_CSR), CSR_STOP);
1030
1031 SK_IN32(IoC, Q_ADDR(QuIoOffs, Q_CSR), &QuCsr);
1032 }
1033
1034 return(QuCsr);
1035} /* TestStopBit */
1036
1037
1038/******************************************************************************
1039 *
1040 * SkGeStopPort() - Stop the Rx/Tx activity of the port 'Port'.
1041 *
1042 * Description:
1043 * After calling this function the descriptor rings and Rx and Tx
1044 * queues of this port may be reconfigured.
1045 *
1046 * It is possible to stop the receive and transmit path separate or
1047 * both together.
1048 *
1049 * Dir = SK_STOP_TX Stops the transmit path only and resets the MAC.
1050 * The receive queue is still active and
1051 * the pending Rx frames may be still transferred
1052 * into the RxD.
1053 * SK_STOP_RX Stop the receive path. The tansmit path
1054 * has to be stopped once before.
1055 * SK_STOP_ALL SK_STOP_TX + SK_STOP_RX
1056 *
1057 * RstMode = SK_SOFT_RST Resets the MAC. The PHY is still alive.
1058 * SK_HARD_RST Resets the MAC and the PHY.
1059 *
1060 * Example:
1061 * 1) A Link Down event was signaled for a port. Therefore the activity
1062 * of this port should be stopped and a hardware reset should be issued
1063 * to enable the workaround of XMAC Errata #2. But the received frames
1064 * should not be discarded.
1065 * ...
1066 * SkGeStopPort(pAC, IoC, Port, SK_STOP_TX, SK_HARD_RST);
1067 * (transfer all pending Rx frames)
1068 * SkGeStopPort(pAC, IoC, Port, SK_STOP_RX, SK_HARD_RST);
1069 * ...
1070 *
1071 * 2) An event was issued which request the driver to switch
1072 * the 'virtual active' link to an other already active port
1073 * as soon as possible. The frames in the receive queue of this
1074 * port may be lost. But the PHY must not be reset during this
1075 * event.
1076 * ...
1077 * SkGeStopPort(pAC, IoC, Port, SK_STOP_ALL, SK_SOFT_RST);
1078 * ...
1079 *
1080 * Extended Description:
1081 * If SK_STOP_TX is set,
1082 * o disable the MAC's receive and transmitter to prevent
1083 * from sending incomplete frames
1084 * o stop the port's transmit queues before terminating the
1085 * BMUs to prevent from performing incomplete PCI cycles
1086 * on the PCI bus
1087 * - The network Rx and Tx activity and PCI Tx transfer is
1088 * disabled now.
1089 * o reset the MAC depending on the RstMode
1090 * o Stop Interval Timer and Limit Counter of Tx Arbiter,
1091 * also disable Force Sync bit and Enable Alloc bit.
1092 * o perform a local reset of the port's Tx path
1093 * - reset the PCI FIFO of the async Tx queue
1094 * - reset the PCI FIFO of the sync Tx queue
1095 * - reset the RAM Buffer async Tx queue
1096 * - reset the RAM Buffer sync Tx queue
1097 * - reset the MAC Tx FIFO
1098 * o switch Link and Tx LED off, stop the LED counters
1099 *
1100 * If SK_STOP_RX is set,
1101 * o stop the port's receive queue
1102 * - The path data transfer activity is fully stopped now.
1103 * o perform a local reset of the port's Rx path
1104 * - reset the PCI FIFO of the Rx queue
1105 * - reset the RAM Buffer receive queue
1106 * - reset the MAC Rx FIFO
1107 * o switch Rx LED off, stop the LED counter
1108 *
1109 * If all ports are stopped,
1110 * o reset the RAM Interface.
1111 *
1112 * Notes:
1113 * o This function may be called during the driver states RESET_PORT and
1114 * SWITCH_PORT.
1115 */
1116void SkGeStopPort(
1117SK_AC *pAC, /* adapter context */
1118SK_IOC IoC, /* I/O context */
1119int Port, /* port to stop (MAC_1 + n) */
1120int Dir, /* Direction to Stop (SK_STOP_RX, SK_STOP_TX, SK_STOP_ALL) */
1121int RstMode)/* Reset Mode (SK_SOFT_RST, SK_HARD_RST) */
1122{
1123#ifndef SK_DIAG
1124 SK_EVPARA Para;
1125#endif /* !SK_DIAG */
1126 SK_GEPORT *pPrt;
1127 SK_U32 DWord;
1128 SK_U32 XsCsr;
1129 SK_U32 XaCsr;
1130 SK_U64 ToutStart;
1131 int i;
1132 int ToutCnt;
1133
1134 pPrt = &pAC->GIni.GP[Port];
1135
1136 if ((Dir & SK_STOP_TX) != 0) {
1137 /* disable receiver and transmitter */
1138 SkMacRxTxDisable(pAC, IoC, Port);
1139
1140 /* stop both transmit queues */
1141 /*
1142 * If the BMU is in the reset state CSR_STOP will terminate
1143 * immediately.
1144 */
1145 SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), CSR_STOP);
1146 SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), CSR_STOP);
1147
1148 ToutStart = SkOsGetTime(pAC);
1149 ToutCnt = 0;
1150 do {
1151 /*
1152 * Clear packet arbiter timeout to make sure
1153 * this loop will terminate.
1154 */
1155 SK_OUT16(IoC, B3_PA_CTRL, (SK_U16)((Port == MAC_1) ?
1156 PA_CLR_TO_TX1 : PA_CLR_TO_TX2));
1157
1158 /*
1159 * If the transfer stucks at the MAC the STOP command will not
1160 * terminate if we don't flush the XMAC's transmit FIFO !
1161 */
1162 SkMacFlushTxFifo(pAC, IoC, Port);
1163
1164 XsCsr = TestStopBit(pAC, IoC, pPrt->PXsQOff);
1165 XaCsr = TestStopBit(pAC, IoC, pPrt->PXaQOff);
1166
1167 if (SkOsGetTime(pAC) - ToutStart > (SK_TICKS_PER_SEC / 18)) {
1168 /*
1169 * Timeout of 1/18 second reached.
1170 * This needs to be checked at 1/18 sec only.
1171 */
1172 ToutCnt++;
1173 if (ToutCnt > 1) {
1174 /* Might be a problem when the driver event handler
1175 * calls StopPort again. XXX.
1176 */
1177
1178 /* Fatal Error, Loop aborted */
1179 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_HWI_E018,
1180 SKERR_HWI_E018MSG);
1181#ifndef SK_DIAG
1182 Para.Para64 = Port;
1183 SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
1184#endif /* !SK_DIAG */
1185 return;
1186 }
1187 /*
1188 * Cache incoherency workaround: Assume a start command
1189 * has been lost while sending the frame.
1190 */
1191 ToutStart = SkOsGetTime(pAC);
1192
1193 if ((XsCsr & CSR_STOP) != 0) {
1194 SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), CSR_START);
1195 }
1196 if ((XaCsr & CSR_STOP) != 0) {
1197 SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), CSR_START);
1198 }
1199 }
1200
1201 /*
1202 * Because of the ASIC problem report entry from 21.08.1998 it is
1203 * required to wait until CSR_STOP is reset and CSR_SV_IDLE is set.
1204 */
1205 } while ((XsCsr & (CSR_STOP | CSR_SV_IDLE)) != CSR_SV_IDLE ||
1206 (XaCsr & (CSR_STOP | CSR_SV_IDLE)) != CSR_SV_IDLE);
1207
1208 /* Reset the MAC depending on the RstMode */
1209 if (RstMode == SK_SOFT_RST) {
1210 SkMacSoftRst(pAC, IoC, Port);
1211 }
1212 else {
1213 SkMacHardRst(pAC, IoC, Port);
1214 }
1215
1216 /* Disable Force Sync bit and Enable Alloc bit */
1217 SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL),
1218 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
1219
1220 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
1221 SK_OUT32(IoC, MR_ADDR(Port, TXA_ITI_INI), 0L);
1222 SK_OUT32(IoC, MR_ADDR(Port, TXA_LIM_INI), 0L);
1223
1224 /* Perform a local reset of the port's Tx path */
1225
1226 /* Reset the PCI FIFO of the async Tx queue */
1227 SK_OUT32(IoC, Q_ADDR(pPrt->PXaQOff, Q_CSR), CSR_SET_RESET);
1228 /* Reset the PCI FIFO of the sync Tx queue */
1229 SK_OUT32(IoC, Q_ADDR(pPrt->PXsQOff, Q_CSR), CSR_SET_RESET);
1230 /* Reset the RAM Buffer async Tx queue */
1231 SK_OUT8(IoC, RB_ADDR(pPrt->PXaQOff, RB_CTRL), RB_RST_SET);
1232 /* Reset the RAM Buffer sync Tx queue */
1233 SK_OUT8(IoC, RB_ADDR(pPrt->PXsQOff, RB_CTRL), RB_RST_SET);
1234
1235 /* Reset Tx MAC FIFO */
1236#ifdef GENESIS
1237 if (pAC->GIni.GIGenesis) {
1238 /* Note: MFF_RST_SET does NOT reset the XMAC ! */
1239 SK_OUT8(IoC, MR_ADDR(Port, TX_MFF_CTRL2), MFF_RST_SET);
1240
1241 /* switch Link and Tx LED off, stop the LED counters */
1242 /* Link LED is switched off by the RLMT and the Diag itself */
1243 SkGeXmitLED(pAC, IoC, MR_ADDR(Port, TX_LED_INI), SK_LED_DIS);
1244 }
1245#endif /* GENESIS */
1246
1247#ifdef YUKON
1248 if (pAC->GIni.GIYukon) {
1249 /* Reset TX MAC FIFO */
1250 SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U8)GMF_RST_SET);
1251 }
1252#endif /* YUKON */
1253 }
1254
1255 if ((Dir & SK_STOP_RX) != 0) {
1256 /*
1257 * The RX Stop Command will not terminate if no buffers
1258 * are queued in the RxD ring. But it will always reach
1259 * the Idle state. Therefore we can use this feature to
1260 * stop the transfer of received packets.
1261 */
1262 /* stop the port's receive queue */
1263 SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), CSR_STOP);
1264
1265 i = 100;
1266 do {
1267 /*
1268 * Clear packet arbiter timeout to make sure
1269 * this loop will terminate
1270 */
1271 SK_OUT16(IoC, B3_PA_CTRL, (SK_U16)((Port == MAC_1) ?
1272 PA_CLR_TO_RX1 : PA_CLR_TO_RX2));
1273
1274 DWord = TestStopBit(pAC, IoC, pPrt->PRxQOff);
1275
1276 /* timeout if i==0 (bug fix for #10748) */
1277 if (--i == 0) {
1278 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_HWI_E024,
1279 SKERR_HWI_E024MSG);
1280 break;
1281 }
1282 /*
1283 * because of the ASIC problem report entry from 21.08.98
1284 * it is required to wait until CSR_STOP is reset and
1285 * CSR_SV_IDLE is set.
1286 */
1287 } while ((DWord & (CSR_STOP | CSR_SV_IDLE)) != CSR_SV_IDLE);
1288
1289 /* The path data transfer activity is fully stopped now */
1290
1291 /* Perform a local reset of the port's Rx path */
1292
1293 /* Reset the PCI FIFO of the Rx queue */
1294 SK_OUT32(IoC, Q_ADDR(pPrt->PRxQOff, Q_CSR), CSR_SET_RESET);
1295 /* Reset the RAM Buffer receive queue */
1296 SK_OUT8(IoC, RB_ADDR(pPrt->PRxQOff, RB_CTRL), RB_RST_SET);
1297
1298 /* Reset Rx MAC FIFO */
1299#ifdef GENESIS
1300 if (pAC->GIni.GIGenesis) {
1301
1302 SK_OUT8(IoC, MR_ADDR(Port, RX_MFF_CTRL2), MFF_RST_SET);
1303
1304 /* switch Rx LED off, stop the LED counter */
1305 SkGeXmitLED(pAC, IoC, MR_ADDR(Port, RX_LED_INI), SK_LED_DIS);
1306 }
1307#endif /* GENESIS */
1308
1309#ifdef YUKON
1310 if (pAC->GIni.GIYukon) {
1311 /* Reset Rx MAC FIFO */
1312 SK_OUT8(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), (SK_U8)GMF_RST_SET);
1313 }
1314#endif /* YUKON */
1315 }
1316} /* SkGeStopPort */
1317
1318
1319/******************************************************************************
1320 *
1321 * SkGeInit0() - Level 0 Initialization
1322 *
1323 * Description:
1324 * - Initialize the BMU address offsets
1325 *
1326 * Returns:
1327 * nothing
1328 */
1329static void SkGeInit0(
1330SK_AC *pAC, /* adapter context */
1331SK_IOC IoC) /* IO context */
1332{
1333 int i;
1334 SK_GEPORT *pPrt;
1335
1336 for (i = 0; i < SK_MAX_MACS; i++) {
1337 pPrt = &pAC->GIni.GP[i];
1338
1339 pPrt->PState = SK_PRT_RESET;
1340 pPrt->PRxQOff = QOffTab[i].RxQOff;
1341 pPrt->PXsQOff = QOffTab[i].XsQOff;
1342 pPrt->PXaQOff = QOffTab[i].XaQOff;
1343 pPrt->PCheckPar = SK_FALSE;
1344 pPrt->PIsave = 0;
1345 pPrt->PPrevShorts = 0;
1346 pPrt->PLinkResCt = 0;
1347 pPrt->PAutoNegTOCt = 0;
1348 pPrt->PPrevRx = 0;
1349 pPrt->PPrevFcs = 0;
1350 pPrt->PRxLim = SK_DEF_RX_WA_LIM;
1351 pPrt->PLinkMode = (SK_U8)SK_LMODE_AUTOFULL;
1352 pPrt->PLinkSpeedCap = (SK_U8)SK_LSPEED_CAP_1000MBPS;
1353 pPrt->PLinkSpeed = (SK_U8)SK_LSPEED_1000MBPS;
1354 pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_UNKNOWN;
1355 pPrt->PLinkModeConf = (SK_U8)SK_LMODE_AUTOSENSE;
1356 pPrt->PFlowCtrlMode = (SK_U8)SK_FLOW_MODE_SYM_OR_REM;
1357 pPrt->PLinkCap = (SK_U8)(SK_LMODE_CAP_HALF | SK_LMODE_CAP_FULL |
1358 SK_LMODE_CAP_AUTOHALF | SK_LMODE_CAP_AUTOFULL);
1359 pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_UNKNOWN;
1360 pPrt->PFlowCtrlCap = (SK_U8)SK_FLOW_MODE_SYM_OR_REM;
1361 pPrt->PFlowCtrlStatus = (SK_U8)SK_FLOW_STAT_NONE;
1362 pPrt->PMSCap = 0;
1363 pPrt->PMSMode = (SK_U8)SK_MS_MODE_AUTO;
1364 pPrt->PMSStatus = (SK_U8)SK_MS_STAT_UNSET;
1365 pPrt->PLipaAutoNeg = (SK_U8)SK_LIPA_UNKNOWN;
1366 pPrt->PAutoNegFail = SK_FALSE;
1367 pPrt->PHWLinkUp = SK_FALSE;
1368 pPrt->PLinkBroken = SK_TRUE; /* See WA code */
1369 pPrt->PPhyPowerState = PHY_PM_OPERATIONAL_MODE;
1370 pPrt->PMacColThres = TX_COL_DEF;
1371 pPrt->PMacJamLen = TX_JAM_LEN_DEF;
1372 pPrt->PMacJamIpgVal = TX_JAM_IPG_DEF;
1373 pPrt->PMacJamIpgData = TX_IPG_JAM_DEF;
1374 pPrt->PMacIpgData = IPG_DATA_DEF;
1375 pPrt->PMacLimit4 = SK_FALSE;
1376 }
1377
1378 pAC->GIni.GIPortUsage = SK_RED_LINK;
1379 pAC->GIni.GILedBlinkCtrl = (SK_U16)OemConfig.Value;
1380 pAC->GIni.GIValIrqMask = IS_ALL_MSK;
1381
1382} /* SkGeInit0*/
1383
1384
1385/******************************************************************************
1386 *
1387 * SkGeInit1() - Level 1 Initialization
1388 *
1389 * Description:
1390 * o Do a software reset.
1391 * o Clear all reset bits.
1392 * o Verify that the detected hardware is present.
1393 * Return an error if not.
1394 * o Get the hardware configuration
1395 * + Read the number of MACs/Ports.
1396 * + Read the RAM size.
1397 * + Read the PCI Revision Id.
1398 * + Find out the adapters host clock speed
1399 * + Read and check the PHY type
1400 *
1401 * Returns:
1402 * 0: success
1403 * 5: Unexpected PHY type detected
1404 * 6: HW self test failed
1405 */
1406static int SkGeInit1(
1407SK_AC *pAC, /* adapter context */
1408SK_IOC IoC) /* IO context */
1409{
1410 SK_U8 Byte;
1411 SK_U16 Word;
1412 SK_U16 CtrlStat;
1413 SK_U32 DWord;
1414 int RetVal;
1415 int i;
1416
1417 RetVal = 0;
1418
1419 /* save CLK_RUN bits (YUKON-Lite) */
1420 SK_IN16(IoC, B0_CTST, &CtrlStat);
1421
1422 /* do the SW-reset */
1423 SK_OUT8(IoC, B0_CTST, CS_RST_SET);
1424
1425 /* release the SW-reset */
1426 SK_OUT8(IoC, B0_CTST, CS_RST_CLR);
1427
1428 /* reset all error bits in the PCI STATUS register */
1429 /*
1430 * Note: PCI Cfg cycles cannot be used, because they are not
1431 * available on some platforms after 'boot time'.
1432 */
1433 SK_IN16(IoC, PCI_C(PCI_STATUS), &Word);
1434
1435 SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1436 SK_OUT16(IoC, PCI_C(PCI_STATUS), (SK_U16)(Word | PCI_ERRBITS));
1437 SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1438
1439 /* release Master Reset */
1440 SK_OUT8(IoC, B0_CTST, CS_MRST_CLR);
1441
1442#ifdef CLK_RUN
1443 CtrlStat |= CS_CLK_RUN_ENA;
1444#endif /* CLK_RUN */
1445
1446 /* restore CLK_RUN bits */
1447 SK_OUT16(IoC, B0_CTST, (SK_U16)(CtrlStat &
1448 (CS_CLK_RUN_HOT | CS_CLK_RUN_RST | CS_CLK_RUN_ENA)));
1449
1450 /* read Chip Identification Number */
1451 SK_IN8(IoC, B2_CHIP_ID, &Byte);
1452 pAC->GIni.GIChipId = Byte;
1453
1454 /* read number of MACs */
1455 SK_IN8(IoC, B2_MAC_CFG, &Byte);
1456 pAC->GIni.GIMacsFound = (Byte & CFG_SNG_MAC) ? 1 : 2;
1457
1458 /* get Chip Revision Number */
1459 pAC->GIni.GIChipRev = (SK_U8)((Byte & CFG_CHIP_R_MSK) >> 4);
1460
1461 /* get diff. PCI parameters */
1462 SK_IN16(IoC, B0_CTST, &CtrlStat);
1463
1464 /* read the adapters RAM size */
1465 SK_IN8(IoC, B2_E_0, &Byte);
1466
1467 pAC->GIni.GIGenesis = SK_FALSE;
1468 pAC->GIni.GIYukon = SK_FALSE;
1469 pAC->GIni.GIYukonLite = SK_FALSE;
1470
1471#ifdef GENESIS
1472 if (pAC->GIni.GIChipId == CHIP_ID_GENESIS) {
1473
1474 pAC->GIni.GIGenesis = SK_TRUE;
1475
1476 if (Byte == (SK_U8)3) {
1477 /* special case: 4 x 64k x 36, offset = 0x80000 */
1478 pAC->GIni.GIRamSize = 1024;
1479 pAC->GIni.GIRamOffs = (SK_U32)512 * 1024;
1480 }
1481 else {
1482 pAC->GIni.GIRamSize = (int)Byte * 512;
1483 pAC->GIni.GIRamOffs = 0;
1484 }
1485 /* all GE adapters work with 53.125 MHz host clock */
1486 pAC->GIni.GIHstClkFact = SK_FACT_53;
1487
1488 /* set Descr. Poll Timer Init Value to 250 ms */
1489 pAC->GIni.GIPollTimerVal =
1490 SK_DPOLL_DEF * (SK_U32)pAC->GIni.GIHstClkFact / 100;
1491 }
1492#endif /* GENESIS */
1493
1494#ifdef YUKON
1495 if (pAC->GIni.GIChipId != CHIP_ID_GENESIS) {
1496
1497 pAC->GIni.GIYukon = SK_TRUE;
1498
1499 pAC->GIni.GIRamSize = (Byte == (SK_U8)0) ? 128 : (int)Byte * 4;
1500
1501 pAC->GIni.GIRamOffs = 0;
1502
1503 /* WA for chip Rev. A */
1504 pAC->GIni.GIWolOffs = (pAC->GIni.GIChipId == CHIP_ID_YUKON &&
1505 pAC->GIni.GIChipRev == 0) ? WOL_REG_OFFS : 0;
1506
1507 /* get PM Capabilities of PCI config space */
1508 SK_IN16(IoC, PCI_C(PCI_PM_CAP_REG), &Word);
1509
1510 /* check if VAUX is available */
1511 if (((CtrlStat & CS_VAUX_AVAIL) != 0) &&
1512 /* check also if PME from D3cold is set */
1513 ((Word & PCI_PME_D3C_SUP) != 0)) {
1514 /* set entry in GE init struct */
1515 pAC->GIni.GIVauxAvail = SK_TRUE;
1516 }
1517
1518 if (pAC->GIni.GIChipId == CHIP_ID_YUKON_LITE) {
1519 /* this is Rev. A1 */
1520 pAC->GIni.GIYukonLite = SK_TRUE;
1521 }
1522 else {
1523 /* save Flash-Address Register */
1524 SK_IN32(IoC, B2_FAR, &DWord);
1525
1526 /* test Flash-Address Register */
1527 SK_OUT8(IoC, B2_FAR + 3, 0xff);
1528 SK_IN8(IoC, B2_FAR + 3, &Byte);
1529
1530 if (Byte != 0) {
1531 /* this is Rev. A0 */
1532 pAC->GIni.GIYukonLite = SK_TRUE;
1533
1534 /* restore Flash-Address Register */
1535 SK_OUT32(IoC, B2_FAR, DWord);
1536 }
1537 }
1538
1539 /* switch power to VCC (WA for VAUX problem) */
1540 SK_OUT8(IoC, B0_POWER_CTRL, (SK_U8)(PC_VAUX_ENA | PC_VCC_ENA |
1541 PC_VAUX_OFF | PC_VCC_ON));
1542
1543 /* read the Interrupt source */
1544 SK_IN32(IoC, B0_ISRC, &DWord);
1545
1546 if ((DWord & IS_HW_ERR) != 0) {
1547 /* read the HW Error Interrupt source */
1548 SK_IN32(IoC, B0_HWE_ISRC, &DWord);
1549
1550 if ((DWord & IS_IRQ_SENSOR) != 0) {
1551 /* disable HW Error IRQ */
1552 pAC->GIni.GIValIrqMask &= ~IS_HW_ERR;
1553 }
1554 }
1555
1556 for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
1557 /* set GMAC Link Control reset */
1558 SK_OUT16(IoC, MR_ADDR(i, GMAC_LINK_CTRL), GMLC_RST_SET);
1559
1560 /* clear GMAC Link Control reset */
1561 SK_OUT16(IoC, MR_ADDR(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
1562 }
1563 /* all YU chips work with 78.125 MHz host clock */
1564 pAC->GIni.GIHstClkFact = SK_FACT_78;
1565
1566 pAC->GIni.GIPollTimerVal = SK_DPOLL_MAX; /* 215 ms */
1567 }
1568#endif /* YUKON */
1569
1570 /* check if 64-bit PCI Slot is present */
1571 pAC->GIni.GIPciSlot64 = (SK_BOOL)((CtrlStat & CS_BUS_SLOT_SZ) != 0);
1572
1573 /* check if 66 MHz PCI Clock is active */
1574 pAC->GIni.GIPciClock66 = (SK_BOOL)((CtrlStat & CS_BUS_CLOCK) != 0);
1575
1576 /* read PCI HW Revision Id. */
1577 SK_IN8(IoC, PCI_C(PCI_REV_ID), &Byte);
1578 pAC->GIni.GIPciHwRev = Byte;
1579
1580 /* read the PMD type */
1581 SK_IN8(IoC, B2_PMD_TYP, &Byte);
1582 pAC->GIni.GICopperType = (SK_U8)(Byte == 'T');
1583
1584 /* read the PHY type */
1585 SK_IN8(IoC, B2_E_1, &Byte);
1586
1587 Byte &= 0x0f; /* the PHY type is stored in the lower nibble */
1588 for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
1589
1590#ifdef GENESIS
1591 if (pAC->GIni.GIGenesis) {
1592 switch (Byte) {
1593 case SK_PHY_XMAC:
1594 pAC->GIni.GP[i].PhyAddr = PHY_ADDR_XMAC;
1595 break;
1596 case SK_PHY_BCOM:
1597 pAC->GIni.GP[i].PhyAddr = PHY_ADDR_BCOM;
1598 pAC->GIni.GP[i].PMSCap = (SK_U8)(SK_MS_CAP_AUTO |
1599 SK_MS_CAP_MASTER | SK_MS_CAP_SLAVE);
1600 break;
1601#ifdef OTHER_PHY
1602 case SK_PHY_LONE:
1603 pAC->GIni.GP[i].PhyAddr = PHY_ADDR_LONE;
1604 break;
1605 case SK_PHY_NAT:
1606 pAC->GIni.GP[i].PhyAddr = PHY_ADDR_NAT;
1607 break;
1608#endif /* OTHER_PHY */
1609 default:
1610 /* ERROR: unexpected PHY type detected */
1611 RetVal = 5;
1612 break;
1613 }
1614 }
1615#endif /* GENESIS */
1616
1617#ifdef YUKON
1618 if (pAC->GIni.GIYukon) {
1619
1620 if (Byte < (SK_U8)SK_PHY_MARV_COPPER) {
1621 /* if this field is not initialized */
1622 Byte = (SK_U8)SK_PHY_MARV_COPPER;
1623
1624 pAC->GIni.GICopperType = SK_TRUE;
1625 }
1626
1627 pAC->GIni.GP[i].PhyAddr = PHY_ADDR_MARV;
1628
1629 if (pAC->GIni.GICopperType) {
1630
1631 pAC->GIni.GP[i].PLinkSpeedCap = (SK_U8)(SK_LSPEED_CAP_AUTO |
1632 SK_LSPEED_CAP_10MBPS | SK_LSPEED_CAP_100MBPS |
1633 SK_LSPEED_CAP_1000MBPS);
1634
1635 pAC->GIni.GP[i].PLinkSpeed = (SK_U8)SK_LSPEED_AUTO;
1636
1637 pAC->GIni.GP[i].PMSCap = (SK_U8)(SK_MS_CAP_AUTO |
1638 SK_MS_CAP_MASTER | SK_MS_CAP_SLAVE);
1639 }
1640 else {
1641 Byte = (SK_U8)SK_PHY_MARV_FIBER;
1642 }
1643 }
1644#endif /* YUKON */
1645
1646 pAC->GIni.GP[i].PhyType = (int)Byte;
1647
1648 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_INIT,
1649 ("PHY type: %d PHY addr: %04x\n", Byte,
1650 pAC->GIni.GP[i].PhyAddr));
1651 }
1652
1653 /* get MAC Type & set function pointers dependent on */
1654#ifdef GENESIS
1655 if (pAC->GIni.GIGenesis) {
1656
1657 pAC->GIni.GIMacType = SK_MAC_XMAC;
1658
1659 pAC->GIni.GIFunc.pFnMacUpdateStats = SkXmUpdateStats;
1660 pAC->GIni.GIFunc.pFnMacStatistic = SkXmMacStatistic;
1661 pAC->GIni.GIFunc.pFnMacResetCounter = SkXmResetCounter;
1662 pAC->GIni.GIFunc.pFnMacOverflow = SkXmOverflowStatus;
1663 }
1664#endif /* GENESIS */
1665
1666#ifdef YUKON
1667 if (pAC->GIni.GIYukon) {
1668
1669 pAC->GIni.GIMacType = SK_MAC_GMAC;
1670
1671 pAC->GIni.GIFunc.pFnMacUpdateStats = SkGmUpdateStats;
1672 pAC->GIni.GIFunc.pFnMacStatistic = SkGmMacStatistic;
1673 pAC->GIni.GIFunc.pFnMacResetCounter = SkGmResetCounter;
1674 pAC->GIni.GIFunc.pFnMacOverflow = SkGmOverflowStatus;
1675
1676#ifdef SPECIAL_HANDLING
1677 if (pAC->GIni.GIChipId == CHIP_ID_YUKON) {
1678 /* check HW self test result */
1679 SK_IN8(IoC, B2_E_3, &Byte);
1680 if (Byte & B2_E3_RES_MASK) {
1681 RetVal = 6;
1682 }
1683 }
1684#endif
1685 }
1686#endif /* YUKON */
1687
1688 return(RetVal);
1689} /* SkGeInit1 */
1690
1691
1692/******************************************************************************
1693 *
1694 * SkGeInit2() - Level 2 Initialization
1695 *
1696 * Description:
1697 * - start the Blink Source Counter
1698 * - start the Descriptor Poll Timer
1699 * - configure the MAC-Arbiter
1700 * - configure the Packet-Arbiter
1701 * - enable the Tx Arbiters
1702 * - enable the RAM Interface Arbiter
1703 *
1704 * Returns:
1705 * nothing
1706 */
1707static void SkGeInit2(
1708SK_AC *pAC, /* adapter context */
1709SK_IOC IoC) /* IO context */
1710{
1711#ifdef GENESIS
1712 SK_U32 DWord;
1713#endif /* GENESIS */
1714 int i;
1715
1716 /* start the Descriptor Poll Timer */
1717 if (pAC->GIni.GIPollTimerVal != 0) {
1718 if (pAC->GIni.GIPollTimerVal > SK_DPOLL_MAX) {
1719 pAC->GIni.GIPollTimerVal = SK_DPOLL_MAX;
1720
1721 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E017, SKERR_HWI_E017MSG);
1722 }
1723 SK_OUT32(IoC, B28_DPT_INI, pAC->GIni.GIPollTimerVal);
1724 SK_OUT8(IoC, B28_DPT_CTRL, DPT_START);
1725 }
1726
1727#ifdef GENESIS
1728 if (pAC->GIni.GIGenesis) {
1729 /* start the Blink Source Counter */
1730 DWord = SK_BLK_DUR * (SK_U32)pAC->GIni.GIHstClkFact / 100;
1731
1732 SK_OUT32(IoC, B2_BSC_INI, DWord);
1733 SK_OUT8(IoC, B2_BSC_CTRL, BSC_START);
1734
1735 /*
1736 * Configure the MAC Arbiter and the Packet Arbiter.
1737 * They will be started once and never be stopped.
1738 */
1739 SkGeInitMacArb(pAC, IoC);
1740
1741 SkGeInitPktArb(pAC, IoC);
1742 }
1743#endif /* GENESIS */
1744
1745#ifdef YUKON
1746 if (pAC->GIni.GIYukon) {
1747 /* start Time Stamp Timer */
1748 SK_OUT8(IoC, GMAC_TI_ST_CTRL, (SK_U8)GMT_ST_START);
1749 }
1750#endif /* YUKON */
1751
1752 /* enable the Tx Arbiters */
1753 for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
1754 SK_OUT8(IoC, MR_ADDR(i, TXA_CTRL), TXA_ENA_ARB);
1755 }
1756
1757 /* enable the RAM Interface Arbiter */
1758 SkGeInitRamIface(pAC, IoC);
1759
1760} /* SkGeInit2 */
1761
1762/******************************************************************************
1763 *
1764 * SkGeInit() - Initialize the GE Adapter with the specified level.
1765 *
1766 * Description:
1767 * Level 0: Initialize the Module structures.
1768 * Level 1: Generic Hardware Initialization. The IOP/MemBase pointer has
1769 * to be set before calling this level.
1770 *
1771 * o Do a software reset.
1772 * o Clear all reset bits.
1773 * o Verify that the detected hardware is present.
1774 * Return an error if not.
1775 * o Get the hardware configuration
1776 * + Set GIMacsFound with the number of MACs.
1777 * + Store the RAM size in GIRamSize.
1778 * + Save the PCI Revision ID in GIPciHwRev.
1779 * o return an error
1780 * if Number of MACs > SK_MAX_MACS
1781 *
1782 * After returning from Level 0 the adapter
1783 * may be accessed with IO operations.
1784 *
1785 * Level 2: start the Blink Source Counter
1786 *
1787 * Returns:
1788 * 0: success
1789 * 1: Number of MACs exceeds SK_MAX_MACS (after level 1)
1790 * 2: Adapter not present or not accessible
1791 * 3: Illegal initialization level
1792 * 4: Initialization Level 1 Call missing
1793 * 5: Unexpected PHY type detected
1794 * 6: HW self test failed
1795 */
1796int SkGeInit(
1797SK_AC *pAC, /* adapter context */
1798SK_IOC IoC, /* IO context */
1799int Level) /* initialization level */
1800{
1801 int RetVal; /* return value */
1802 SK_U32 DWord;
1803
1804 RetVal = 0;
1805 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_INIT,
1806 ("SkGeInit(Level %d)\n", Level));
1807
1808 switch (Level) {
1809 case SK_INIT_DATA:
1810 /* Initialization Level 0 */
1811 SkGeInit0(pAC, IoC);
1812 pAC->GIni.GILevel = SK_INIT_DATA;
1813 break;
1814
1815 case SK_INIT_IO:
1816 /* Initialization Level 1 */
1817 RetVal = SkGeInit1(pAC, IoC);
1818 if (RetVal != 0) {
1819 break;
1820 }
1821
1822 /* check if the adapter seems to be accessible */
1823 SK_OUT32(IoC, B2_IRQM_INI, SK_TEST_VAL);
1824 SK_IN32(IoC, B2_IRQM_INI, &DWord);
1825 SK_OUT32(IoC, B2_IRQM_INI, 0L);
1826
1827 if (DWord != SK_TEST_VAL) {
1828 RetVal = 2;
1829 break;
1830 }
1831
1832 /* check if the number of GIMacsFound matches SK_MAX_MACS */
1833 if (pAC->GIni.GIMacsFound > SK_MAX_MACS) {
1834 RetVal = 1;
1835 break;
1836 }
1837
1838 /* Level 1 successfully passed */
1839 pAC->GIni.GILevel = SK_INIT_IO;
1840 break;
1841
1842 case SK_INIT_RUN:
1843 /* Initialization Level 2 */
1844 if (pAC->GIni.GILevel != SK_INIT_IO) {
1845#ifndef SK_DIAG
1846 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E002, SKERR_HWI_E002MSG);
1847#endif /* !SK_DIAG */
1848 RetVal = 4;
1849 break;
1850 }
1851 SkGeInit2(pAC, IoC);
1852
1853 /* Level 2 successfully passed */
1854 pAC->GIni.GILevel = SK_INIT_RUN;
1855 break;
1856
1857 default:
1858 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E003, SKERR_HWI_E003MSG);
1859 RetVal = 3;
1860 break;
1861 }
1862
1863 return(RetVal);
1864} /* SkGeInit */
1865
1866
1867/******************************************************************************
1868 *
1869 * SkGeDeInit() - Deinitialize the adapter
1870 *
1871 * Description:
1872 * All ports of the adapter will be stopped if not already done.
1873 * Do a software reset and switch off all LEDs.
1874 *
1875 * Returns:
1876 * nothing
1877 */
1878void SkGeDeInit(
1879SK_AC *pAC, /* adapter context */
1880SK_IOC IoC) /* IO context */
1881{
1882 int i;
1883 SK_U16 Word;
1884
1885#if (!defined(SK_SLIM) && !defined(VCPU))
1886 /* ensure I2C is ready */
1887 SkI2cWaitIrq(pAC, IoC);
1888#endif
1889
1890 /* stop all current transfer activity */
1891 for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
1892 if (pAC->GIni.GP[i].PState != SK_PRT_STOP &&
1893 pAC->GIni.GP[i].PState != SK_PRT_RESET) {
1894
1895 SkGeStopPort(pAC, IoC, i, SK_STOP_ALL, SK_HARD_RST);
1896 }
1897 }
1898
1899 /* Reset all bits in the PCI STATUS register */
1900 /*
1901 * Note: PCI Cfg cycles cannot be used, because they are not
1902 * available on some platforms after 'boot time'.
1903 */
1904 SK_IN16(IoC, PCI_C(PCI_STATUS), &Word);
1905
1906 SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
1907 SK_OUT16(IoC, PCI_C(PCI_STATUS), (SK_U16)(Word | PCI_ERRBITS));
1908 SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
1909
1910 /* do the reset, all LEDs are switched off now */
1911 SK_OUT8(IoC, B0_CTST, CS_RST_SET);
1912
1913 pAC->GIni.GILevel = SK_INIT_DATA;
1914} /* SkGeDeInit */
1915
1916
1917/******************************************************************************
1918 *
1919 * SkGeInitPort() Initialize the specified port.
1920 *
1921 * Description:
1922 * PRxQSize, PXSQSize, and PXAQSize has to be
1923 * configured for the specified port before calling this function.
1924 * The descriptor rings has to be initialized too.
1925 *
1926 * o (Re)configure queues of the specified port.
1927 * o configure the MAC of the specified port.
1928 * o put ASIC and MAC(s) in operational mode.
1929 * o initialize Rx/Tx and Sync LED
1930 * o initialize RAM Buffers and MAC FIFOs
1931 *
1932 * The port is ready to connect when returning.
1933 *
1934 * Note:
1935 * The MAC's Rx and Tx state machine is still disabled when returning.
1936 *
1937 * Returns:
1938 * 0: success
1939 * 1: Queue size initialization error. The configured values
1940 * for PRxQSize, PXSQSize, or PXAQSize are invalid for one
1941 * or more queues. The specified port was NOT initialized.
1942 * An error log entry was generated.
1943 * 2: The port has to be stopped before it can be initialized again.
1944 */
1945int SkGeInitPort(
1946SK_AC *pAC, /* adapter context */
1947SK_IOC IoC, /* IO context */
1948int Port) /* Port to configure */
1949{
1950 SK_GEPORT *pPrt;
1951
1952 pPrt = &pAC->GIni.GP[Port];
1953
1954 if (SkGeCheckQSize(pAC, Port) != 0) {
1955 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E004, SKERR_HWI_E004MSG);
1956 return(1);
1957 }
1958
1959 if (pPrt->PState == SK_PRT_INIT || pPrt->PState == SK_PRT_RUN) {
1960 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E005, SKERR_HWI_E005MSG);
1961 return(2);
1962 }
1963
1964 /* configuration ok, initialize the Port now */
1965
1966#ifdef GENESIS
1967 if (pAC->GIni.GIGenesis) {
1968 /* initialize Rx, Tx and Link LED */
1969 /*
1970 * If 1000BT Phy needs LED initialization than swap
1971 * LED and XMAC initialization order
1972 */
1973 SkGeXmitLED(pAC, IoC, MR_ADDR(Port, TX_LED_INI), SK_LED_ENA);
1974 SkGeXmitLED(pAC, IoC, MR_ADDR(Port, RX_LED_INI), SK_LED_ENA);
1975 /* The Link LED is initialized by RLMT or Diagnostics itself */
1976
1977 SkXmInitMac(pAC, IoC, Port);
1978 }
1979#endif /* GENESIS */
1980
1981#ifdef YUKON
1982 if (pAC->GIni.GIYukon) {
1983
1984 SkGmInitMac(pAC, IoC, Port);
1985 }
1986#endif /* YUKON */
1987
1988 /* do NOT initialize the Link Sync Counter */
1989
1990 SkGeInitMacFifo(pAC, IoC, Port);
1991
1992 SkGeInitRamBufs(pAC, IoC, Port);
1993
1994 if (pPrt->PXSQSize != 0) {
1995 /* enable Force Sync bit if synchronous queue available */
1996 SK_OUT8(IoC, MR_ADDR(Port, TXA_CTRL), TXA_ENA_FSYNC);
1997 }
1998
1999 SkGeInitBmu(pAC, IoC, Port);
2000
2001 /* mark port as initialized */
2002 pPrt->PState = SK_PRT_INIT;
2003
2004 return(0);
2005} /* SkGeInitPort */
diff --git a/drivers/net/sk98lin/skgemib.c b/drivers/net/sk98lin/skgemib.c
deleted file mode 100644
index fde45083eb7b..000000000000
--- a/drivers/net/sk98lin/skgemib.c
+++ /dev/null
@@ -1,1075 +0,0 @@
1/*****************************************************************************
2 *
3 * Name: skgemib.c
4 * Project: GEnesis, PCI Gigabit Ethernet Adapter
5 * Version: $Revision: 1.11 $
6 * Date: $Date: 2003/09/15 13:38:12 $
7 * Purpose: Private Network Management Interface Management Database
8 *
9 ****************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/*
26 * PRIVATE OID handler function prototypes
27 */
28PNMI_STATIC int Addr(SK_AC *pAC, SK_IOC IoC, int action,
29 SK_U32 Id, char *pBuf, unsigned int *pLen, SK_U32 Instance,
30 unsigned int TableIndex, SK_U32 NetIndex);
31PNMI_STATIC int CsumStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
32 char *pBuf, unsigned int *pLen, SK_U32 Instance,
33 unsigned int TableIndex, SK_U32 NetIndex);
34PNMI_STATIC int General(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
35 char *pBuf, unsigned int *pLen, SK_U32 Instance,
36 unsigned int TableIndex, SK_U32 NetIndex);
37PNMI_STATIC int Mac8023Stat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
38 char *pBuf, unsigned int *pLen, SK_U32 Instance,
39 unsigned int TableIndex, SK_U32 NetIndex);
40PNMI_STATIC int MacPrivateConf(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
41 char *pBuf, unsigned int *pLen, SK_U32 Instance,
42 unsigned int TableIndex, SK_U32 NetIndex);
43PNMI_STATIC int MacPrivateStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
44 char *pBuf, unsigned int *pLen, SK_U32 Instance,
45 unsigned int TableIndex, SK_U32 NetIndex);
46PNMI_STATIC int Monitor(SK_AC *pAC, SK_IOC IoC, int action,
47 SK_U32 Id, char *pBuf, unsigned int *pLen, SK_U32 Instance,
48 unsigned int TableIndex, SK_U32 NetIndex);
49PNMI_STATIC int OidStruct(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
50 char *pBuf, unsigned int *pLen, SK_U32 Instance,
51 unsigned int TableIndex, SK_U32 NetIndex);
52PNMI_STATIC int Perform(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
53 char *pBuf, unsigned int* pLen, SK_U32 Instance,
54 unsigned int TableIndex, SK_U32 NetIndex);
55PNMI_STATIC int Rlmt(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
56 char *pBuf, unsigned int *pLen, SK_U32 Instance,
57 unsigned int TableIndex, SK_U32 NetIndex);
58PNMI_STATIC int RlmtStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
59 char *pBuf, unsigned int *pLen, SK_U32 Instance,
60 unsigned int TableIndex, SK_U32 NetIndex);
61PNMI_STATIC int SensorStat(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
62 char *pBuf, unsigned int *pLen, SK_U32 Instance,
63 unsigned int TableIndex, SK_U32 NetIndex);
64PNMI_STATIC int Vpd(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
65 char *pBuf, unsigned int *pLen, SK_U32 Instance,
66 unsigned int TableIndex, SK_U32 NetIndex);
67PNMI_STATIC int Vct(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
68 char *pBuf, unsigned int *pLen, SK_U32 Instance,
69 unsigned int TableIndex, SK_U32 NetIndex);
70
71#ifdef SK_POWER_MGMT
72PNMI_STATIC int PowerManagement(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
73 char *pBuf, unsigned int *pLen, SK_U32 Instance,
74 unsigned int TableIndex, SK_U32 NetIndex);
75#endif /* SK_POWER_MGMT */
76
77#ifdef SK_DIAG_SUPPORT
78PNMI_STATIC int DiagActions(SK_AC *pAC, SK_IOC IoC, int action, SK_U32 Id,
79 char *pBuf, unsigned int *pLen, SK_U32 Instance,
80 unsigned int TableIndex, SK_U32 NetIndex);
81#endif /* SK_DIAG_SUPPORT */
82
83
84/* defines *******************************************************************/
85#define ID_TABLE_SIZE ARRAY_SIZE(IdTable)
86
87
88/* global variables **********************************************************/
89
90/*
91 * Table to correlate OID with handler function and index to
92 * hardware register stored in StatAddress if applicable.
93 */
94PNMI_STATIC const SK_PNMI_TAB_ENTRY IdTable[] = {
95 {OID_GEN_XMIT_OK,
96 0,
97 0,
98 0,
99 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX},
100 {OID_GEN_RCV_OK,
101 0,
102 0,
103 0,
104 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX},
105 {OID_GEN_XMIT_ERROR,
106 0,
107 0,
108 0,
109 SK_PNMI_RO, General, 0},
110 {OID_GEN_RCV_ERROR,
111 0,
112 0,
113 0,
114 SK_PNMI_RO, General, 0},
115 {OID_GEN_RCV_NO_BUFFER,
116 0,
117 0,
118 0,
119 SK_PNMI_RO, General, 0},
120 {OID_GEN_DIRECTED_FRAMES_XMIT,
121 0,
122 0,
123 0,
124 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_UNICAST},
125 {OID_GEN_MULTICAST_FRAMES_XMIT,
126 0,
127 0,
128 0,
129 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_MULTICAST},
130 {OID_GEN_BROADCAST_FRAMES_XMIT,
131 0,
132 0,
133 0,
134 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_BROADCAST},
135 {OID_GEN_DIRECTED_FRAMES_RCV,
136 0,
137 0,
138 0,
139 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_UNICAST},
140 {OID_GEN_MULTICAST_FRAMES_RCV,
141 0,
142 0,
143 0,
144 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_MULTICAST},
145 {OID_GEN_BROADCAST_FRAMES_RCV,
146 0,
147 0,
148 0,
149 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_BROADCAST},
150 {OID_GEN_RCV_CRC_ERROR,
151 0,
152 0,
153 0,
154 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_FCS},
155 {OID_GEN_TRANSMIT_QUEUE_LENGTH,
156 0,
157 0,
158 0,
159 SK_PNMI_RO, General, 0},
160 {OID_802_3_PERMANENT_ADDRESS,
161 0,
162 0,
163 0,
164 SK_PNMI_RO, Mac8023Stat, 0},
165 {OID_802_3_CURRENT_ADDRESS,
166 0,
167 0,
168 0,
169 SK_PNMI_RO, Mac8023Stat, 0},
170 {OID_802_3_RCV_ERROR_ALIGNMENT,
171 0,
172 0,
173 0,
174 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_FRAMING},
175 {OID_802_3_XMIT_ONE_COLLISION,
176 0,
177 0,
178 0,
179 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_SINGLE_COL},
180 {OID_802_3_XMIT_MORE_COLLISIONS,
181 0,
182 0,
183 0,
184 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_MULTI_COL},
185 {OID_802_3_XMIT_DEFERRED,
186 0,
187 0,
188 0,
189 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_DEFFERAL},
190 {OID_802_3_XMIT_MAX_COLLISIONS,
191 0,
192 0,
193 0,
194 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_EXCESS_COL},
195 {OID_802_3_RCV_OVERRUN,
196 0,
197 0,
198 0,
199 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HRX_OVERFLOW},
200 {OID_802_3_XMIT_UNDERRUN,
201 0,
202 0,
203 0,
204 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_UNDERRUN},
205 {OID_802_3_XMIT_TIMES_CRS_LOST,
206 0,
207 0,
208 0,
209 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_CARRIER},
210 {OID_802_3_XMIT_LATE_COLLISIONS,
211 0,
212 0,
213 0,
214 SK_PNMI_RO, Mac8023Stat, SK_PNMI_HTX_LATE_COL},
215#ifdef SK_POWER_MGMT
216 {OID_PNP_CAPABILITIES,
217 0,
218 0,
219 0,
220 SK_PNMI_RO, PowerManagement, 0},
221 {OID_PNP_SET_POWER,
222 0,
223 0,
224 0,
225 SK_PNMI_WO, PowerManagement, 0},
226 {OID_PNP_QUERY_POWER,
227 0,
228 0,
229 0,
230 SK_PNMI_RO, PowerManagement, 0},
231 {OID_PNP_ADD_WAKE_UP_PATTERN,
232 0,
233 0,
234 0,
235 SK_PNMI_WO, PowerManagement, 0},
236 {OID_PNP_REMOVE_WAKE_UP_PATTERN,
237 0,
238 0,
239 0,
240 SK_PNMI_WO, PowerManagement, 0},
241 {OID_PNP_ENABLE_WAKE_UP,
242 0,
243 0,
244 0,
245 SK_PNMI_RW, PowerManagement, 0},
246#endif /* SK_POWER_MGMT */
247#ifdef SK_DIAG_SUPPORT
248 {OID_SKGE_DIAG_MODE,
249 0,
250 0,
251 0,
252 SK_PNMI_RW, DiagActions, 0},
253#endif /* SK_DIAG_SUPPORT */
254 {OID_SKGE_MDB_VERSION,
255 1,
256 0,
257 SK_PNMI_MAI_OFF(MgmtDBVersion),
258 SK_PNMI_RO, General, 0},
259 {OID_SKGE_SUPPORTED_LIST,
260 0,
261 0,
262 0,
263 SK_PNMI_RO, General, 0},
264 {OID_SKGE_ALL_DATA,
265 0,
266 0,
267 0,
268 SK_PNMI_RW, OidStruct, 0},
269 {OID_SKGE_VPD_FREE_BYTES,
270 1,
271 0,
272 SK_PNMI_MAI_OFF(VpdFreeBytes),
273 SK_PNMI_RO, Vpd, 0},
274 {OID_SKGE_VPD_ENTRIES_LIST,
275 1,
276 0,
277 SK_PNMI_MAI_OFF(VpdEntriesList),
278 SK_PNMI_RO, Vpd, 0},
279 {OID_SKGE_VPD_ENTRIES_NUMBER,
280 1,
281 0,
282 SK_PNMI_MAI_OFF(VpdEntriesNumber),
283 SK_PNMI_RO, Vpd, 0},
284 {OID_SKGE_VPD_KEY,
285 SK_PNMI_VPD_ENTRIES,
286 sizeof(SK_PNMI_VPD),
287 SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdKey),
288 SK_PNMI_RO, Vpd, 0},
289 {OID_SKGE_VPD_VALUE,
290 SK_PNMI_VPD_ENTRIES,
291 sizeof(SK_PNMI_VPD),
292 SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdValue),
293 SK_PNMI_RO, Vpd, 0},
294 {OID_SKGE_VPD_ACCESS,
295 SK_PNMI_VPD_ENTRIES,
296 sizeof(SK_PNMI_VPD),
297 SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdAccess),
298 SK_PNMI_RO, Vpd, 0},
299 {OID_SKGE_VPD_ACTION,
300 SK_PNMI_VPD_ENTRIES,
301 sizeof(SK_PNMI_VPD),
302 SK_PNMI_OFF(Vpd) + SK_PNMI_VPD_OFF(VpdAction),
303 SK_PNMI_RW, Vpd, 0},
304 {OID_SKGE_PORT_NUMBER,
305 1,
306 0,
307 SK_PNMI_MAI_OFF(PortNumber),
308 SK_PNMI_RO, General, 0},
309 {OID_SKGE_DEVICE_TYPE,
310 1,
311 0,
312 SK_PNMI_MAI_OFF(DeviceType),
313 SK_PNMI_RO, General, 0},
314 {OID_SKGE_DRIVER_DESCR,
315 1,
316 0,
317 SK_PNMI_MAI_OFF(DriverDescr),
318 SK_PNMI_RO, General, 0},
319 {OID_SKGE_DRIVER_VERSION,
320 1,
321 0,
322 SK_PNMI_MAI_OFF(DriverVersion),
323 SK_PNMI_RO, General, 0},
324 {OID_SKGE_DRIVER_RELDATE,
325 1,
326 0,
327 SK_PNMI_MAI_OFF(DriverReleaseDate),
328 SK_PNMI_RO, General, 0},
329 {OID_SKGE_DRIVER_FILENAME,
330 1,
331 0,
332 SK_PNMI_MAI_OFF(DriverFileName),
333 SK_PNMI_RO, General, 0},
334 {OID_SKGE_HW_DESCR,
335 1,
336 0,
337 SK_PNMI_MAI_OFF(HwDescr),
338 SK_PNMI_RO, General, 0},
339 {OID_SKGE_HW_VERSION,
340 1,
341 0,
342 SK_PNMI_MAI_OFF(HwVersion),
343 SK_PNMI_RO, General, 0},
344 {OID_SKGE_CHIPSET,
345 1,
346 0,
347 SK_PNMI_MAI_OFF(Chipset),
348 SK_PNMI_RO, General, 0},
349 {OID_SKGE_CHIPID,
350 1,
351 0,
352 SK_PNMI_MAI_OFF(ChipId),
353 SK_PNMI_RO, General, 0},
354 {OID_SKGE_RAMSIZE,
355 1,
356 0,
357 SK_PNMI_MAI_OFF(RamSize),
358 SK_PNMI_RO, General, 0},
359 {OID_SKGE_VAUXAVAIL,
360 1,
361 0,
362 SK_PNMI_MAI_OFF(VauxAvail),
363 SK_PNMI_RO, General, 0},
364 {OID_SKGE_ACTION,
365 1,
366 0,
367 SK_PNMI_MAI_OFF(Action),
368 SK_PNMI_RW, Perform, 0},
369 {OID_SKGE_RESULT,
370 1,
371 0,
372 SK_PNMI_MAI_OFF(TestResult),
373 SK_PNMI_RO, General, 0},
374 {OID_SKGE_BUS_TYPE,
375 1,
376 0,
377 SK_PNMI_MAI_OFF(BusType),
378 SK_PNMI_RO, General, 0},
379 {OID_SKGE_BUS_SPEED,
380 1,
381 0,
382 SK_PNMI_MAI_OFF(BusSpeed),
383 SK_PNMI_RO, General, 0},
384 {OID_SKGE_BUS_WIDTH,
385 1,
386 0,
387 SK_PNMI_MAI_OFF(BusWidth),
388 SK_PNMI_RO, General, 0},
389 {OID_SKGE_TX_SW_QUEUE_LEN,
390 1,
391 0,
392 SK_PNMI_MAI_OFF(TxSwQueueLen),
393 SK_PNMI_RO, General, 0},
394 {OID_SKGE_TX_SW_QUEUE_MAX,
395 1,
396 0,
397 SK_PNMI_MAI_OFF(TxSwQueueMax),
398 SK_PNMI_RO, General, 0},
399 {OID_SKGE_TX_RETRY,
400 1,
401 0,
402 SK_PNMI_MAI_OFF(TxRetryCts),
403 SK_PNMI_RO, General, 0},
404 {OID_SKGE_RX_INTR_CTS,
405 1,
406 0,
407 SK_PNMI_MAI_OFF(RxIntrCts),
408 SK_PNMI_RO, General, 0},
409 {OID_SKGE_TX_INTR_CTS,
410 1,
411 0,
412 SK_PNMI_MAI_OFF(TxIntrCts),
413 SK_PNMI_RO, General, 0},
414 {OID_SKGE_RX_NO_BUF_CTS,
415 1,
416 0,
417 SK_PNMI_MAI_OFF(RxNoBufCts),
418 SK_PNMI_RO, General, 0},
419 {OID_SKGE_TX_NO_BUF_CTS,
420 1,
421 0,
422 SK_PNMI_MAI_OFF(TxNoBufCts),
423 SK_PNMI_RO, General, 0},
424 {OID_SKGE_TX_USED_DESCR_NO,
425 1,
426 0,
427 SK_PNMI_MAI_OFF(TxUsedDescrNo),
428 SK_PNMI_RO, General, 0},
429 {OID_SKGE_RX_DELIVERED_CTS,
430 1,
431 0,
432 SK_PNMI_MAI_OFF(RxDeliveredCts),
433 SK_PNMI_RO, General, 0},
434 {OID_SKGE_RX_OCTETS_DELIV_CTS,
435 1,
436 0,
437 SK_PNMI_MAI_OFF(RxOctetsDeliveredCts),
438 SK_PNMI_RO, General, 0},
439 {OID_SKGE_RX_HW_ERROR_CTS,
440 1,
441 0,
442 SK_PNMI_MAI_OFF(RxHwErrorsCts),
443 SK_PNMI_RO, General, 0},
444 {OID_SKGE_TX_HW_ERROR_CTS,
445 1,
446 0,
447 SK_PNMI_MAI_OFF(TxHwErrorsCts),
448 SK_PNMI_RO, General, 0},
449 {OID_SKGE_IN_ERRORS_CTS,
450 1,
451 0,
452 SK_PNMI_MAI_OFF(InErrorsCts),
453 SK_PNMI_RO, General, 0},
454 {OID_SKGE_OUT_ERROR_CTS,
455 1,
456 0,
457 SK_PNMI_MAI_OFF(OutErrorsCts),
458 SK_PNMI_RO, General, 0},
459 {OID_SKGE_ERR_RECOVERY_CTS,
460 1,
461 0,
462 SK_PNMI_MAI_OFF(ErrRecoveryCts),
463 SK_PNMI_RO, General, 0},
464 {OID_SKGE_SYSUPTIME,
465 1,
466 0,
467 SK_PNMI_MAI_OFF(SysUpTime),
468 SK_PNMI_RO, General, 0},
469 {OID_SKGE_SENSOR_NUMBER,
470 1,
471 0,
472 SK_PNMI_MAI_OFF(SensorNumber),
473 SK_PNMI_RO, General, 0},
474 {OID_SKGE_SENSOR_INDEX,
475 SK_PNMI_SENSOR_ENTRIES,
476 sizeof(SK_PNMI_SENSOR),
477 SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorIndex),
478 SK_PNMI_RO, SensorStat, 0},
479 {OID_SKGE_SENSOR_DESCR,
480 SK_PNMI_SENSOR_ENTRIES,
481 sizeof(SK_PNMI_SENSOR),
482 SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorDescr),
483 SK_PNMI_RO, SensorStat, 0},
484 {OID_SKGE_SENSOR_TYPE,
485 SK_PNMI_SENSOR_ENTRIES,
486 sizeof(SK_PNMI_SENSOR),
487 SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorType),
488 SK_PNMI_RO, SensorStat, 0},
489 {OID_SKGE_SENSOR_VALUE,
490 SK_PNMI_SENSOR_ENTRIES,
491 sizeof(SK_PNMI_SENSOR),
492 SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorValue),
493 SK_PNMI_RO, SensorStat, 0},
494 {OID_SKGE_SENSOR_WAR_THRES_LOW,
495 SK_PNMI_SENSOR_ENTRIES,
496 sizeof(SK_PNMI_SENSOR),
497 SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningThresholdLow),
498 SK_PNMI_RO, SensorStat, 0},
499 {OID_SKGE_SENSOR_WAR_THRES_UPP,
500 SK_PNMI_SENSOR_ENTRIES,
501 sizeof(SK_PNMI_SENSOR),
502 SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningThresholdHigh),
503 SK_PNMI_RO, SensorStat, 0},
504 {OID_SKGE_SENSOR_ERR_THRES_LOW,
505 SK_PNMI_SENSOR_ENTRIES,
506 sizeof(SK_PNMI_SENSOR),
507 SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorThresholdLow),
508 SK_PNMI_RO, SensorStat, 0},
509 {OID_SKGE_SENSOR_ERR_THRES_UPP,
510 SK_PNMI_SENSOR_ENTRIES,
511 sizeof(SK_PNMI_SENSOR),
512 SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorThresholdHigh),
513 SK_PNMI_RO, SensorStat, 0},
514 {OID_SKGE_SENSOR_STATUS,
515 SK_PNMI_SENSOR_ENTRIES,
516 sizeof(SK_PNMI_SENSOR),
517 SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorStatus),
518 SK_PNMI_RO, SensorStat, 0},
519 {OID_SKGE_SENSOR_WAR_CTS,
520 SK_PNMI_SENSOR_ENTRIES,
521 sizeof(SK_PNMI_SENSOR),
522 SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningCts),
523 SK_PNMI_RO, SensorStat, 0},
524 {OID_SKGE_SENSOR_ERR_CTS,
525 SK_PNMI_SENSOR_ENTRIES,
526 sizeof(SK_PNMI_SENSOR),
527 SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorCts),
528 SK_PNMI_RO, SensorStat, 0},
529 {OID_SKGE_SENSOR_WAR_TIME,
530 SK_PNMI_SENSOR_ENTRIES,
531 sizeof(SK_PNMI_SENSOR),
532 SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorWarningTimestamp),
533 SK_PNMI_RO, SensorStat, 0},
534 {OID_SKGE_SENSOR_ERR_TIME,
535 SK_PNMI_SENSOR_ENTRIES,
536 sizeof(SK_PNMI_SENSOR),
537 SK_PNMI_OFF(Sensor) + SK_PNMI_SEN_OFF(SensorErrorTimestamp),
538 SK_PNMI_RO, SensorStat, 0},
539 {OID_SKGE_CHKSM_NUMBER,
540 1,
541 0,
542 SK_PNMI_MAI_OFF(ChecksumNumber),
543 SK_PNMI_RO, General, 0},
544 {OID_SKGE_CHKSM_RX_OK_CTS,
545 SKCS_NUM_PROTOCOLS,
546 sizeof(SK_PNMI_CHECKSUM),
547 SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumRxOkCts),
548 SK_PNMI_RO, CsumStat, 0},
549 {OID_SKGE_CHKSM_RX_UNABLE_CTS,
550 SKCS_NUM_PROTOCOLS,
551 sizeof(SK_PNMI_CHECKSUM),
552 SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumRxUnableCts),
553 SK_PNMI_RO, CsumStat, 0},
554 {OID_SKGE_CHKSM_RX_ERR_CTS,
555 SKCS_NUM_PROTOCOLS,
556 sizeof(SK_PNMI_CHECKSUM),
557 SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumRxErrCts),
558 SK_PNMI_RO, CsumStat, 0},
559 {OID_SKGE_CHKSM_TX_OK_CTS,
560 SKCS_NUM_PROTOCOLS,
561 sizeof(SK_PNMI_CHECKSUM),
562 SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumTxOkCts),
563 SK_PNMI_RO, CsumStat, 0},
564 {OID_SKGE_CHKSM_TX_UNABLE_CTS,
565 SKCS_NUM_PROTOCOLS,
566 sizeof(SK_PNMI_CHECKSUM),
567 SK_PNMI_OFF(Checksum) + SK_PNMI_CHK_OFF(ChecksumTxUnableCts),
568 SK_PNMI_RO, CsumStat, 0},
569 {OID_SKGE_STAT_TX,
570 SK_PNMI_MAC_ENTRIES,
571 sizeof(SK_PNMI_STAT),
572 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxOkCts),
573 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX},
574 {OID_SKGE_STAT_TX_OCTETS,
575 SK_PNMI_MAC_ENTRIES,
576 sizeof(SK_PNMI_STAT),
577 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxOctetsOkCts),
578 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_OCTET},
579 {OID_SKGE_STAT_TX_BROADCAST,
580 SK_PNMI_MAC_ENTRIES,
581 sizeof(SK_PNMI_STAT),
582 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxBroadcastOkCts),
583 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_BROADCAST},
584 {OID_SKGE_STAT_TX_MULTICAST,
585 SK_PNMI_MAC_ENTRIES,
586 sizeof(SK_PNMI_STAT),
587 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMulticastOkCts),
588 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MULTICAST},
589 {OID_SKGE_STAT_TX_UNICAST,
590 SK_PNMI_MAC_ENTRIES,
591 sizeof(SK_PNMI_STAT),
592 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxUnicastOkCts),
593 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_UNICAST},
594 {OID_SKGE_STAT_TX_LONGFRAMES,
595 SK_PNMI_MAC_ENTRIES,
596 sizeof(SK_PNMI_STAT),
597 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxLongFramesCts),
598 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_LONGFRAMES},
599 {OID_SKGE_STAT_TX_BURST,
600 SK_PNMI_MAC_ENTRIES,
601 sizeof(SK_PNMI_STAT),
602 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxBurstCts),
603 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_BURST},
604 {OID_SKGE_STAT_TX_PFLOWC,
605 SK_PNMI_MAC_ENTRIES,
606 sizeof(SK_PNMI_STAT),
607 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxPauseMacCtrlCts),
608 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_PMACC},
609 {OID_SKGE_STAT_TX_FLOWC,
610 SK_PNMI_MAC_ENTRIES,
611 sizeof(SK_PNMI_STAT),
612 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMacCtrlCts),
613 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MACC},
614 {OID_SKGE_STAT_TX_SINGLE_COL,
615 SK_PNMI_MAC_ENTRIES,
616 sizeof(SK_PNMI_STAT),
617 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxSingleCollisionCts),
618 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_SINGLE_COL},
619 {OID_SKGE_STAT_TX_MULTI_COL,
620 SK_PNMI_MAC_ENTRIES,
621 sizeof(SK_PNMI_STAT),
622 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMultipleCollisionCts),
623 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MULTI_COL},
624 {OID_SKGE_STAT_TX_EXCESS_COL,
625 SK_PNMI_MAC_ENTRIES,
626 sizeof(SK_PNMI_STAT),
627 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxExcessiveCollisionCts),
628 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_EXCESS_COL},
629 {OID_SKGE_STAT_TX_LATE_COL,
630 SK_PNMI_MAC_ENTRIES,
631 sizeof(SK_PNMI_STAT),
632 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxLateCollisionCts),
633 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_LATE_COL},
634 {OID_SKGE_STAT_TX_DEFFERAL,
635 SK_PNMI_MAC_ENTRIES,
636 sizeof(SK_PNMI_STAT),
637 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxDeferralCts),
638 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_DEFFERAL},
639 {OID_SKGE_STAT_TX_EXCESS_DEF,
640 SK_PNMI_MAC_ENTRIES,
641 sizeof(SK_PNMI_STAT),
642 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxExcessiveDeferralCts),
643 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_EXCESS_DEF},
644 {OID_SKGE_STAT_TX_UNDERRUN,
645 SK_PNMI_MAC_ENTRIES,
646 sizeof(SK_PNMI_STAT),
647 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxFifoUnderrunCts),
648 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_UNDERRUN},
649 {OID_SKGE_STAT_TX_CARRIER,
650 SK_PNMI_MAC_ENTRIES,
651 sizeof(SK_PNMI_STAT),
652 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxCarrierCts),
653 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_CARRIER},
654/* {OID_SKGE_STAT_TX_UTIL,
655 SK_PNMI_MAC_ENTRIES,
656 sizeof(SK_PNMI_STAT),
657 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxUtilization),
658 SK_PNMI_RO, MacPrivateStat, (SK_U16)(-1)}, */
659 {OID_SKGE_STAT_TX_64,
660 SK_PNMI_MAC_ENTRIES,
661 sizeof(SK_PNMI_STAT),
662 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx64Cts),
663 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_64},
664 {OID_SKGE_STAT_TX_127,
665 SK_PNMI_MAC_ENTRIES,
666 sizeof(SK_PNMI_STAT),
667 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx127Cts),
668 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_127},
669 {OID_SKGE_STAT_TX_255,
670 SK_PNMI_MAC_ENTRIES,
671 sizeof(SK_PNMI_STAT),
672 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx255Cts),
673 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_255},
674 {OID_SKGE_STAT_TX_511,
675 SK_PNMI_MAC_ENTRIES,
676 sizeof(SK_PNMI_STAT),
677 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx511Cts),
678 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_511},
679 {OID_SKGE_STAT_TX_1023,
680 SK_PNMI_MAC_ENTRIES,
681 sizeof(SK_PNMI_STAT),
682 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTx1023Cts),
683 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_1023},
684 {OID_SKGE_STAT_TX_MAX,
685 SK_PNMI_MAC_ENTRIES,
686 sizeof(SK_PNMI_STAT),
687 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxMaxCts),
688 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_MAX},
689 {OID_SKGE_STAT_TX_SYNC,
690 SK_PNMI_MAC_ENTRIES,
691 sizeof(SK_PNMI_STAT),
692 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxSyncCts),
693 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_SYNC},
694 {OID_SKGE_STAT_TX_SYNC_OCTETS,
695 SK_PNMI_MAC_ENTRIES,
696 sizeof(SK_PNMI_STAT),
697 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatTxSyncOctetsCts),
698 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HTX_SYNC_OCTET},
699 {OID_SKGE_STAT_RX,
700 SK_PNMI_MAC_ENTRIES,
701 sizeof(SK_PNMI_STAT),
702 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxOkCts),
703 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX},
704 {OID_SKGE_STAT_RX_OCTETS,
705 SK_PNMI_MAC_ENTRIES,
706 sizeof(SK_PNMI_STAT),
707 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxOctetsOkCts),
708 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_OCTET},
709 {OID_SKGE_STAT_RX_BROADCAST,
710 SK_PNMI_MAC_ENTRIES,
711 sizeof(SK_PNMI_STAT),
712 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxBroadcastOkCts),
713 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_BROADCAST},
714 {OID_SKGE_STAT_RX_MULTICAST,
715 SK_PNMI_MAC_ENTRIES,
716 sizeof(SK_PNMI_STAT),
717 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMulticastOkCts),
718 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MULTICAST},
719 {OID_SKGE_STAT_RX_UNICAST,
720 SK_PNMI_MAC_ENTRIES,
721 sizeof(SK_PNMI_STAT),
722 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxUnicastOkCts),
723 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_UNICAST},
724 {OID_SKGE_STAT_RX_LONGFRAMES,
725 SK_PNMI_MAC_ENTRIES,
726 sizeof(SK_PNMI_STAT),
727 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxLongFramesCts),
728 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_LONGFRAMES},
729 {OID_SKGE_STAT_RX_PFLOWC,
730 SK_PNMI_MAC_ENTRIES,
731 sizeof(SK_PNMI_STAT),
732 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxPauseMacCtrlCts),
733 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_PMACC},
734 {OID_SKGE_STAT_RX_FLOWC,
735 SK_PNMI_MAC_ENTRIES,
736 sizeof(SK_PNMI_STAT),
737 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMacCtrlCts),
738 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MACC},
739 {OID_SKGE_STAT_RX_PFLOWC_ERR,
740 SK_PNMI_MAC_ENTRIES,
741 sizeof(SK_PNMI_STAT),
742 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxPauseMacCtrlErrorCts),
743 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_PMACC_ERR},
744 {OID_SKGE_STAT_RX_FLOWC_UNKWN,
745 SK_PNMI_MAC_ENTRIES,
746 sizeof(SK_PNMI_STAT),
747 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMacCtrlUnknownCts),
748 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MACC_UNKWN},
749 {OID_SKGE_STAT_RX_BURST,
750 SK_PNMI_MAC_ENTRIES,
751 sizeof(SK_PNMI_STAT),
752 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxBurstCts),
753 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_BURST},
754 {OID_SKGE_STAT_RX_MISSED,
755 SK_PNMI_MAC_ENTRIES,
756 sizeof(SK_PNMI_STAT),
757 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMissedCts),
758 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MISSED},
759 {OID_SKGE_STAT_RX_FRAMING,
760 SK_PNMI_MAC_ENTRIES,
761 sizeof(SK_PNMI_STAT),
762 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxFramingCts),
763 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_FRAMING},
764 {OID_SKGE_STAT_RX_OVERFLOW,
765 SK_PNMI_MAC_ENTRIES,
766 sizeof(SK_PNMI_STAT),
767 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxFifoOverflowCts),
768 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_OVERFLOW},
769 {OID_SKGE_STAT_RX_JABBER,
770 SK_PNMI_MAC_ENTRIES,
771 sizeof(SK_PNMI_STAT),
772 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxJabberCts),
773 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_JABBER},
774 {OID_SKGE_STAT_RX_CARRIER,
775 SK_PNMI_MAC_ENTRIES,
776 sizeof(SK_PNMI_STAT),
777 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxCarrierCts),
778 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_CARRIER},
779 {OID_SKGE_STAT_RX_IR_LENGTH,
780 SK_PNMI_MAC_ENTRIES,
781 sizeof(SK_PNMI_STAT),
782 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxIRLengthCts),
783 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_IRLENGTH},
784 {OID_SKGE_STAT_RX_SYMBOL,
785 SK_PNMI_MAC_ENTRIES,
786 sizeof(SK_PNMI_STAT),
787 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxSymbolCts),
788 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_SYMBOL},
789 {OID_SKGE_STAT_RX_SHORTS,
790 SK_PNMI_MAC_ENTRIES,
791 sizeof(SK_PNMI_STAT),
792 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxShortsCts),
793 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_SHORTS},
794 {OID_SKGE_STAT_RX_RUNT,
795 SK_PNMI_MAC_ENTRIES,
796 sizeof(SK_PNMI_STAT),
797 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxRuntCts),
798 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_RUNT},
799 {OID_SKGE_STAT_RX_CEXT,
800 SK_PNMI_MAC_ENTRIES,
801 sizeof(SK_PNMI_STAT),
802 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxCextCts),
803 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_CEXT},
804 {OID_SKGE_STAT_RX_TOO_LONG,
805 SK_PNMI_MAC_ENTRIES,
806 sizeof(SK_PNMI_STAT),
807 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxTooLongCts),
808 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_TOO_LONG},
809 {OID_SKGE_STAT_RX_FCS,
810 SK_PNMI_MAC_ENTRIES,
811 sizeof(SK_PNMI_STAT),
812 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxFcsCts),
813 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_FCS},
814/* {OID_SKGE_STAT_RX_UTIL,
815 SK_PNMI_MAC_ENTRIES,
816 sizeof(SK_PNMI_STAT),
817 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxUtilization),
818 SK_PNMI_RO, MacPrivateStat, (SK_U16)(-1)}, */
819 {OID_SKGE_STAT_RX_64,
820 SK_PNMI_MAC_ENTRIES,
821 sizeof(SK_PNMI_STAT),
822 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx64Cts),
823 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_64},
824 {OID_SKGE_STAT_RX_127,
825 SK_PNMI_MAC_ENTRIES,
826 sizeof(SK_PNMI_STAT),
827 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx127Cts),
828 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_127},
829 {OID_SKGE_STAT_RX_255,
830 SK_PNMI_MAC_ENTRIES,
831 sizeof(SK_PNMI_STAT),
832 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx255Cts),
833 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_255},
834 {OID_SKGE_STAT_RX_511,
835 SK_PNMI_MAC_ENTRIES,
836 sizeof(SK_PNMI_STAT),
837 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx511Cts),
838 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_511},
839 {OID_SKGE_STAT_RX_1023,
840 SK_PNMI_MAC_ENTRIES,
841 sizeof(SK_PNMI_STAT),
842 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRx1023Cts),
843 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_1023},
844 {OID_SKGE_STAT_RX_MAX,
845 SK_PNMI_MAC_ENTRIES,
846 sizeof(SK_PNMI_STAT),
847 SK_PNMI_OFF(Stat) + SK_PNMI_STA_OFF(StatRxMaxCts),
848 SK_PNMI_RO, MacPrivateStat, SK_PNMI_HRX_MAX},
849 {OID_SKGE_PHYS_CUR_ADDR,
850 SK_PNMI_MAC_ENTRIES,
851 sizeof(SK_PNMI_CONF),
852 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfMacCurrentAddr),
853 SK_PNMI_RW, Addr, 0},
854 {OID_SKGE_PHYS_FAC_ADDR,
855 SK_PNMI_MAC_ENTRIES,
856 sizeof(SK_PNMI_CONF),
857 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfMacFactoryAddr),
858 SK_PNMI_RO, Addr, 0},
859 {OID_SKGE_PMD,
860 SK_PNMI_MAC_ENTRIES,
861 sizeof(SK_PNMI_CONF),
862 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPMD),
863 SK_PNMI_RO, MacPrivateConf, 0},
864 {OID_SKGE_CONNECTOR,
865 SK_PNMI_MAC_ENTRIES,
866 sizeof(SK_PNMI_CONF),
867 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfConnector),
868 SK_PNMI_RO, MacPrivateConf, 0},
869 {OID_SKGE_PHY_TYPE,
870 SK_PNMI_MAC_ENTRIES,
871 sizeof(SK_PNMI_CONF),
872 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyType),
873 SK_PNMI_RO, MacPrivateConf, 0},
874 {OID_SKGE_LINK_CAP,
875 SK_PNMI_MAC_ENTRIES,
876 sizeof(SK_PNMI_CONF),
877 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkCapability),
878 SK_PNMI_RO, MacPrivateConf, 0},
879 {OID_SKGE_LINK_MODE,
880 SK_PNMI_MAC_ENTRIES,
881 sizeof(SK_PNMI_CONF),
882 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkMode),
883 SK_PNMI_RW, MacPrivateConf, 0},
884 {OID_SKGE_LINK_MODE_STATUS,
885 SK_PNMI_MAC_ENTRIES,
886 sizeof(SK_PNMI_CONF),
887 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkModeStatus),
888 SK_PNMI_RO, MacPrivateConf, 0},
889 {OID_SKGE_LINK_STATUS,
890 SK_PNMI_MAC_ENTRIES,
891 sizeof(SK_PNMI_CONF),
892 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfLinkStatus),
893 SK_PNMI_RO, MacPrivateConf, 0},
894 {OID_SKGE_FLOWCTRL_CAP,
895 SK_PNMI_MAC_ENTRIES,
896 sizeof(SK_PNMI_CONF),
897 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfFlowCtrlCapability),
898 SK_PNMI_RO, MacPrivateConf, 0},
899 {OID_SKGE_FLOWCTRL_MODE,
900 SK_PNMI_MAC_ENTRIES,
901 sizeof(SK_PNMI_CONF),
902 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfFlowCtrlMode),
903 SK_PNMI_RW, MacPrivateConf, 0},
904 {OID_SKGE_FLOWCTRL_STATUS,
905 SK_PNMI_MAC_ENTRIES,
906 sizeof(SK_PNMI_CONF),
907 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfFlowCtrlStatus),
908 SK_PNMI_RO, MacPrivateConf, 0},
909 {OID_SKGE_PHY_OPERATION_CAP,
910 SK_PNMI_MAC_ENTRIES,
911 sizeof(SK_PNMI_CONF),
912 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyOperationCapability),
913 SK_PNMI_RO, MacPrivateConf, 0},
914 {OID_SKGE_PHY_OPERATION_MODE,
915 SK_PNMI_MAC_ENTRIES,
916 sizeof(SK_PNMI_CONF),
917 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyOperationMode),
918 SK_PNMI_RW, MacPrivateConf, 0},
919 {OID_SKGE_PHY_OPERATION_STATUS,
920 SK_PNMI_MAC_ENTRIES,
921 sizeof(SK_PNMI_CONF),
922 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfPhyOperationStatus),
923 SK_PNMI_RO, MacPrivateConf, 0},
924 {OID_SKGE_SPEED_CAP,
925 SK_PNMI_MAC_ENTRIES,
926 sizeof(SK_PNMI_CONF),
927 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfSpeedCapability),
928 SK_PNMI_RO, MacPrivateConf, 0},
929 {OID_SKGE_SPEED_MODE,
930 SK_PNMI_MAC_ENTRIES,
931 sizeof(SK_PNMI_CONF),
932 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfSpeedMode),
933 SK_PNMI_RW, MacPrivateConf, 0},
934 {OID_SKGE_SPEED_STATUS,
935 SK_PNMI_MAC_ENTRIES,
936 sizeof(SK_PNMI_CONF),
937 SK_PNMI_OFF(Conf) + SK_PNMI_CNF_OFF(ConfSpeedStatus),
938 SK_PNMI_RO, MacPrivateConf, 0},
939 {OID_SKGE_TRAP,
940 1,
941 0,
942 SK_PNMI_MAI_OFF(Trap),
943 SK_PNMI_RO, General, 0},
944 {OID_SKGE_TRAP_NUMBER,
945 1,
946 0,
947 SK_PNMI_MAI_OFF(TrapNumber),
948 SK_PNMI_RO, General, 0},
949 {OID_SKGE_RLMT_MODE,
950 1,
951 0,
952 SK_PNMI_MAI_OFF(RlmtMode),
953 SK_PNMI_RW, Rlmt, 0},
954 {OID_SKGE_RLMT_PORT_NUMBER,
955 1,
956 0,
957 SK_PNMI_MAI_OFF(RlmtPortNumber),
958 SK_PNMI_RO, Rlmt, 0},
959 {OID_SKGE_RLMT_PORT_ACTIVE,
960 1,
961 0,
962 SK_PNMI_MAI_OFF(RlmtPortActive),
963 SK_PNMI_RO, Rlmt, 0},
964 {OID_SKGE_RLMT_PORT_PREFERRED,
965 1,
966 0,
967 SK_PNMI_MAI_OFF(RlmtPortPreferred),
968 SK_PNMI_RW, Rlmt, 0},
969 {OID_SKGE_RLMT_CHANGE_CTS,
970 1,
971 0,
972 SK_PNMI_MAI_OFF(RlmtChangeCts),
973 SK_PNMI_RO, Rlmt, 0},
974 {OID_SKGE_RLMT_CHANGE_TIME,
975 1,
976 0,
977 SK_PNMI_MAI_OFF(RlmtChangeTime),
978 SK_PNMI_RO, Rlmt, 0},
979 {OID_SKGE_RLMT_CHANGE_ESTIM,
980 1,
981 0,
982 SK_PNMI_MAI_OFF(RlmtChangeEstimate),
983 SK_PNMI_RO, Rlmt, 0},
984 {OID_SKGE_RLMT_CHANGE_THRES,
985 1,
986 0,
987 SK_PNMI_MAI_OFF(RlmtChangeThreshold),
988 SK_PNMI_RW, Rlmt, 0},
989 {OID_SKGE_RLMT_PORT_INDEX,
990 SK_PNMI_MAC_ENTRIES,
991 sizeof(SK_PNMI_RLMT),
992 SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtIndex),
993 SK_PNMI_RO, RlmtStat, 0},
994 {OID_SKGE_RLMT_STATUS,
995 SK_PNMI_MAC_ENTRIES,
996 sizeof(SK_PNMI_RLMT),
997 SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtStatus),
998 SK_PNMI_RO, RlmtStat, 0},
999 {OID_SKGE_RLMT_TX_HELLO_CTS,
1000 SK_PNMI_MAC_ENTRIES,
1001 sizeof(SK_PNMI_RLMT),
1002 SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtTxHelloCts),
1003 SK_PNMI_RO, RlmtStat, 0},
1004 {OID_SKGE_RLMT_RX_HELLO_CTS,
1005 SK_PNMI_MAC_ENTRIES,
1006 sizeof(SK_PNMI_RLMT),
1007 SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtRxHelloCts),
1008 SK_PNMI_RO, RlmtStat, 0},
1009 {OID_SKGE_RLMT_TX_SP_REQ_CTS,
1010 SK_PNMI_MAC_ENTRIES,
1011 sizeof(SK_PNMI_RLMT),
1012 SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtTxSpHelloReqCts),
1013 SK_PNMI_RO, RlmtStat, 0},
1014 {OID_SKGE_RLMT_RX_SP_CTS,
1015 SK_PNMI_MAC_ENTRIES,
1016 sizeof(SK_PNMI_RLMT),
1017 SK_PNMI_OFF(Rlmt) + SK_PNMI_RLM_OFF(RlmtRxSpHelloCts),
1018 SK_PNMI_RO, RlmtStat, 0},
1019 {OID_SKGE_RLMT_MONITOR_NUMBER,
1020 1,
1021 0,
1022 SK_PNMI_MAI_OFF(RlmtMonitorNumber),
1023 SK_PNMI_RO, General, 0},
1024 {OID_SKGE_RLMT_MONITOR_INDEX,
1025 SK_PNMI_MONITOR_ENTRIES,
1026 sizeof(SK_PNMI_RLMT_MONITOR),
1027 SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorIndex),
1028 SK_PNMI_RO, Monitor, 0},
1029 {OID_SKGE_RLMT_MONITOR_ADDR,
1030 SK_PNMI_MONITOR_ENTRIES,
1031 sizeof(SK_PNMI_RLMT_MONITOR),
1032 SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorAddr),
1033 SK_PNMI_RO, Monitor, 0},
1034 {OID_SKGE_RLMT_MONITOR_ERRS,
1035 SK_PNMI_MONITOR_ENTRIES,
1036 sizeof(SK_PNMI_RLMT_MONITOR),
1037 SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorErrorCts),
1038 SK_PNMI_RO, Monitor, 0},
1039 {OID_SKGE_RLMT_MONITOR_TIMESTAMP,
1040 SK_PNMI_MONITOR_ENTRIES,
1041 sizeof(SK_PNMI_RLMT_MONITOR),
1042 SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorTimestamp),
1043 SK_PNMI_RO, Monitor, 0},
1044 {OID_SKGE_RLMT_MONITOR_ADMIN,
1045 SK_PNMI_MONITOR_ENTRIES,
1046 sizeof(SK_PNMI_RLMT_MONITOR),
1047 SK_PNMI_OFF(RlmtMonitor) + SK_PNMI_MON_OFF(RlmtMonitorAdmin),
1048 SK_PNMI_RW, Monitor, 0},
1049 {OID_SKGE_MTU,
1050 1,
1051 0,
1052 SK_PNMI_MAI_OFF(MtuSize),
1053 SK_PNMI_RW, MacPrivateConf, 0},
1054 {OID_SKGE_VCT_GET,
1055 0,
1056 0,
1057 0,
1058 SK_PNMI_RO, Vct, 0},
1059 {OID_SKGE_VCT_SET,
1060 0,
1061 0,
1062 0,
1063 SK_PNMI_WO, Vct, 0},
1064 {OID_SKGE_VCT_STATUS,
1065 0,
1066 0,
1067 0,
1068 SK_PNMI_RO, Vct, 0},
1069 {OID_SKGE_BOARDLEVEL,
1070 0,
1071 0,
1072 0,
1073 SK_PNMI_RO, General, 0},
1074};
1075
diff --git a/drivers/net/sk98lin/skgepnmi.c b/drivers/net/sk98lin/skgepnmi.c
deleted file mode 100644
index 876bb2158fa6..000000000000
--- a/drivers/net/sk98lin/skgepnmi.c
+++ /dev/null
@@ -1,8198 +0,0 @@
1/*****************************************************************************
2 *
3 * Name: skgepnmi.c
4 * Project: GEnesis, PCI Gigabit Ethernet Adapter
5 * Version: $Revision: 1.111 $
6 * Date: $Date: 2003/09/15 13:35:35 $
7 * Purpose: Private Network Management Interface
8 *
9 ****************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25
26#ifndef _lint
27static const char SysKonnectFileId[] =
28 "@(#) $Id: skgepnmi.c,v 1.111 2003/09/15 13:35:35 tschilli Exp $ (C) Marvell.";
29#endif /* !_lint */
30
31#include "h/skdrv1st.h"
32#include "h/sktypes.h"
33#include "h/xmac_ii.h"
34#include "h/skdebug.h"
35#include "h/skqueue.h"
36#include "h/skgepnmi.h"
37#include "h/skgesirq.h"
38#include "h/skcsum.h"
39#include "h/skvpd.h"
40#include "h/skgehw.h"
41#include "h/skgeinit.h"
42#include "h/skdrv2nd.h"
43#include "h/skgepnm2.h"
44#ifdef SK_POWER_MGMT
45#include "h/skgepmgt.h"
46#endif
47/* defines *******************************************************************/
48
49#ifndef DEBUG
50#define PNMI_STATIC static
51#else /* DEBUG */
52#define PNMI_STATIC
53#endif /* DEBUG */
54
55/*
56 * Public Function prototypes
57 */
58int SkPnmiInit(SK_AC *pAC, SK_IOC IoC, int level);
59int SkPnmiSetVar(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, void *pBuf,
60 unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
61int SkPnmiGetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf,
62 unsigned int *pLen, SK_U32 NetIndex);
63int SkPnmiPreSetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf,
64 unsigned int *pLen, SK_U32 NetIndex);
65int SkPnmiSetStruct(SK_AC *pAC, SK_IOC IoC, void *pBuf,
66 unsigned int *pLen, SK_U32 NetIndex);
67int SkPnmiEvent(SK_AC *pAC, SK_IOC IoC, SK_U32 Event, SK_EVPARA Param);
68int SkPnmiGenIoctl(SK_AC *pAC, SK_IOC IoC, void * pBuf,
69 unsigned int * pLen, SK_U32 NetIndex);
70
71
72/*
73 * Private Function prototypes
74 */
75
76PNMI_STATIC SK_U8 CalculateLinkModeStatus(SK_AC *pAC, SK_IOC IoC, unsigned int
77 PhysPortIndex);
78PNMI_STATIC SK_U8 CalculateLinkStatus(SK_AC *pAC, SK_IOC IoC, unsigned int
79 PhysPortIndex);
80PNMI_STATIC void CopyMac(char *pDst, SK_MAC_ADDR *pMac);
81PNMI_STATIC void CopyTrapQueue(SK_AC *pAC, char *pDstBuf);
82PNMI_STATIC SK_U64 GetPhysStatVal(SK_AC *pAC, SK_IOC IoC,
83 unsigned int PhysPortIndex, unsigned int StatIndex);
84PNMI_STATIC SK_U64 GetStatVal(SK_AC *pAC, SK_IOC IoC, unsigned int LogPortIndex,
85 unsigned int StatIndex, SK_U32 NetIndex);
86PNMI_STATIC char* GetTrapEntry(SK_AC *pAC, SK_U32 TrapId, unsigned int Size);
87PNMI_STATIC void GetTrapQueueLen(SK_AC *pAC, unsigned int *pLen,
88 unsigned int *pEntries);
89PNMI_STATIC int GetVpdKeyArr(SK_AC *pAC, SK_IOC IoC, char *pKeyArr,
90 unsigned int KeyArrLen, unsigned int *pKeyNo);
91PNMI_STATIC int LookupId(SK_U32 Id);
92PNMI_STATIC int MacUpdate(SK_AC *pAC, SK_IOC IoC, unsigned int FirstMac,
93 unsigned int LastMac);
94PNMI_STATIC int PnmiStruct(SK_AC *pAC, SK_IOC IoC, int Action, char *pBuf,
95 unsigned int *pLen, SK_U32 NetIndex);
96PNMI_STATIC int PnmiVar(SK_AC *pAC, SK_IOC IoC, int Action, SK_U32 Id,
97 char *pBuf, unsigned int *pLen, SK_U32 Instance, SK_U32 NetIndex);
98PNMI_STATIC void QueueRlmtNewMacTrap(SK_AC *pAC, unsigned int ActiveMac);
99PNMI_STATIC void QueueRlmtPortTrap(SK_AC *pAC, SK_U32 TrapId,
100 unsigned int PortIndex);
101PNMI_STATIC void QueueSensorTrap(SK_AC *pAC, SK_U32 TrapId,
102 unsigned int SensorIndex);
103PNMI_STATIC void QueueSimpleTrap(SK_AC *pAC, SK_U32 TrapId);
104PNMI_STATIC void ResetCounter(SK_AC *pAC, SK_IOC IoC, SK_U32 NetIndex);
105PNMI_STATIC int RlmtUpdate(SK_AC *pAC, SK_IOC IoC, SK_U32 NetIndex);
106PNMI_STATIC int SirqUpdate(SK_AC *pAC, SK_IOC IoC);
107PNMI_STATIC void VirtualConf(SK_AC *pAC, SK_IOC IoC, SK_U32 Id, char *pBuf);
108PNMI_STATIC int Vct(SK_AC *pAC, SK_IOC IoC, int Action, SK_U32 Id, char *pBuf,
109 unsigned int *pLen, SK_U32 Instance, unsigned int TableIndex, SK_U32 NetIndex);
110PNMI_STATIC void CheckVctStatus(SK_AC *, SK_IOC, char *, SK_U32, SK_U32);
111
112/*
113 * Table to correlate OID with handler function and index to
114 * hardware register stored in StatAddress if applicable.
115 */
116#include "skgemib.c"
117
118/* global variables **********************************************************/
119
120/*
121 * Overflow status register bit table and corresponding counter
122 * dependent on MAC type - the number relates to the size of overflow
123 * mask returned by the pFnMacOverflow function
124 */
125PNMI_STATIC const SK_U16 StatOvrflwBit[][SK_PNMI_MAC_TYPES] = {
126/* Bit0 */ { SK_PNMI_HTX, SK_PNMI_HTX_UNICAST},
127/* Bit1 */ { SK_PNMI_HTX_OCTETHIGH, SK_PNMI_HTX_BROADCAST},
128/* Bit2 */ { SK_PNMI_HTX_OCTETLOW, SK_PNMI_HTX_PMACC},
129/* Bit3 */ { SK_PNMI_HTX_BROADCAST, SK_PNMI_HTX_MULTICAST},
130/* Bit4 */ { SK_PNMI_HTX_MULTICAST, SK_PNMI_HTX_OCTETLOW},
131/* Bit5 */ { SK_PNMI_HTX_UNICAST, SK_PNMI_HTX_OCTETHIGH},
132/* Bit6 */ { SK_PNMI_HTX_LONGFRAMES, SK_PNMI_HTX_64},
133/* Bit7 */ { SK_PNMI_HTX_BURST, SK_PNMI_HTX_127},
134/* Bit8 */ { SK_PNMI_HTX_PMACC, SK_PNMI_HTX_255},
135/* Bit9 */ { SK_PNMI_HTX_MACC, SK_PNMI_HTX_511},
136/* Bit10 */ { SK_PNMI_HTX_SINGLE_COL, SK_PNMI_HTX_1023},
137/* Bit11 */ { SK_PNMI_HTX_MULTI_COL, SK_PNMI_HTX_MAX},
138/* Bit12 */ { SK_PNMI_HTX_EXCESS_COL, SK_PNMI_HTX_LONGFRAMES},
139/* Bit13 */ { SK_PNMI_HTX_LATE_COL, SK_PNMI_HTX_RESERVED},
140/* Bit14 */ { SK_PNMI_HTX_DEFFERAL, SK_PNMI_HTX_COL},
141/* Bit15 */ { SK_PNMI_HTX_EXCESS_DEF, SK_PNMI_HTX_LATE_COL},
142/* Bit16 */ { SK_PNMI_HTX_UNDERRUN, SK_PNMI_HTX_EXCESS_COL},
143/* Bit17 */ { SK_PNMI_HTX_CARRIER, SK_PNMI_HTX_MULTI_COL},
144/* Bit18 */ { SK_PNMI_HTX_UTILUNDER, SK_PNMI_HTX_SINGLE_COL},
145/* Bit19 */ { SK_PNMI_HTX_UTILOVER, SK_PNMI_HTX_UNDERRUN},
146/* Bit20 */ { SK_PNMI_HTX_64, SK_PNMI_HTX_RESERVED},
147/* Bit21 */ { SK_PNMI_HTX_127, SK_PNMI_HTX_RESERVED},
148/* Bit22 */ { SK_PNMI_HTX_255, SK_PNMI_HTX_RESERVED},
149/* Bit23 */ { SK_PNMI_HTX_511, SK_PNMI_HTX_RESERVED},
150/* Bit24 */ { SK_PNMI_HTX_1023, SK_PNMI_HTX_RESERVED},
151/* Bit25 */ { SK_PNMI_HTX_MAX, SK_PNMI_HTX_RESERVED},
152/* Bit26 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED},
153/* Bit27 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED},
154/* Bit28 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED},
155/* Bit29 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED},
156/* Bit30 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED},
157/* Bit31 */ { SK_PNMI_HTX_RESERVED, SK_PNMI_HTX_RESERVED},
158/* Bit32 */ { SK_PNMI_HRX, SK_PNMI_HRX_UNICAST},
159/* Bit33 */ { SK_PNMI_HRX_OCTETHIGH, SK_PNMI_HRX_BROADCAST},
160/* Bit34 */ { SK_PNMI_HRX_OCTETLOW, SK_PNMI_HRX_PMACC},
161/* Bit35 */ { SK_PNMI_HRX_BROADCAST, SK_PNMI_HRX_MULTICAST},
162/* Bit36 */ { SK_PNMI_HRX_MULTICAST, SK_PNMI_HRX_FCS},
163/* Bit37 */ { SK_PNMI_HRX_UNICAST, SK_PNMI_HRX_RESERVED},
164/* Bit38 */ { SK_PNMI_HRX_PMACC, SK_PNMI_HRX_OCTETLOW},
165/* Bit39 */ { SK_PNMI_HRX_MACC, SK_PNMI_HRX_OCTETHIGH},
166/* Bit40 */ { SK_PNMI_HRX_PMACC_ERR, SK_PNMI_HRX_BADOCTETLOW},
167/* Bit41 */ { SK_PNMI_HRX_MACC_UNKWN, SK_PNMI_HRX_BADOCTETHIGH},
168/* Bit42 */ { SK_PNMI_HRX_BURST, SK_PNMI_HRX_UNDERSIZE},
169/* Bit43 */ { SK_PNMI_HRX_MISSED, SK_PNMI_HRX_RUNT},
170/* Bit44 */ { SK_PNMI_HRX_FRAMING, SK_PNMI_HRX_64},
171/* Bit45 */ { SK_PNMI_HRX_OVERFLOW, SK_PNMI_HRX_127},
172/* Bit46 */ { SK_PNMI_HRX_JABBER, SK_PNMI_HRX_255},
173/* Bit47 */ { SK_PNMI_HRX_CARRIER, SK_PNMI_HRX_511},
174/* Bit48 */ { SK_PNMI_HRX_IRLENGTH, SK_PNMI_HRX_1023},
175/* Bit49 */ { SK_PNMI_HRX_SYMBOL, SK_PNMI_HRX_MAX},
176/* Bit50 */ { SK_PNMI_HRX_SHORTS, SK_PNMI_HRX_LONGFRAMES},
177/* Bit51 */ { SK_PNMI_HRX_RUNT, SK_PNMI_HRX_TOO_LONG},
178/* Bit52 */ { SK_PNMI_HRX_TOO_LONG, SK_PNMI_HRX_JABBER},
179/* Bit53 */ { SK_PNMI_HRX_FCS, SK_PNMI_HRX_RESERVED},
180/* Bit54 */ { SK_PNMI_HRX_RESERVED, SK_PNMI_HRX_OVERFLOW},
181/* Bit55 */ { SK_PNMI_HRX_CEXT, SK_PNMI_HRX_RESERVED},
182/* Bit56 */ { SK_PNMI_HRX_UTILUNDER, SK_PNMI_HRX_RESERVED},
183/* Bit57 */ { SK_PNMI_HRX_UTILOVER, SK_PNMI_HRX_RESERVED},
184/* Bit58 */ { SK_PNMI_HRX_64, SK_PNMI_HRX_RESERVED},
185/* Bit59 */ { SK_PNMI_HRX_127, SK_PNMI_HRX_RESERVED},
186/* Bit60 */ { SK_PNMI_HRX_255, SK_PNMI_HRX_RESERVED},
187/* Bit61 */ { SK_PNMI_HRX_511, SK_PNMI_HRX_RESERVED},
188/* Bit62 */ { SK_PNMI_HRX_1023, SK_PNMI_HRX_RESERVED},
189/* Bit63 */ { SK_PNMI_HRX_MAX, SK_PNMI_HRX_RESERVED}
190};
191
192/*
193 * Table for hardware register saving on resets and port switches
194 */
195PNMI_STATIC const SK_PNMI_STATADDR StatAddr[SK_PNMI_MAX_IDX][SK_PNMI_MAC_TYPES] = {
196 /* SK_PNMI_HTX */
197 {{XM_TXF_OK, SK_TRUE}, {0, SK_FALSE}},
198 /* SK_PNMI_HTX_OCTETHIGH */
199 {{XM_TXO_OK_HI, SK_TRUE}, {GM_TXO_OK_HI, SK_TRUE}},
200 /* SK_PNMI_HTX_OCTETLOW */
201 {{XM_TXO_OK_LO, SK_FALSE}, {GM_TXO_OK_LO, SK_FALSE}},
202 /* SK_PNMI_HTX_BROADCAST */
203 {{XM_TXF_BC_OK, SK_TRUE}, {GM_TXF_BC_OK, SK_TRUE}},
204 /* SK_PNMI_HTX_MULTICAST */
205 {{XM_TXF_MC_OK, SK_TRUE}, {GM_TXF_MC_OK, SK_TRUE}},
206 /* SK_PNMI_HTX_UNICAST */
207 {{XM_TXF_UC_OK, SK_TRUE}, {GM_TXF_UC_OK, SK_TRUE}},
208 /* SK_PNMI_HTX_BURST */
209 {{XM_TXE_BURST, SK_TRUE}, {0, SK_FALSE}},
210 /* SK_PNMI_HTX_PMACC */
211 {{XM_TXF_MPAUSE, SK_TRUE}, {GM_TXF_MPAUSE, SK_TRUE}},
212 /* SK_PNMI_HTX_MACC */
213 {{XM_TXF_MCTRL, SK_TRUE}, {0, SK_FALSE}},
214 /* SK_PNMI_HTX_COL */
215 {{0, SK_FALSE}, {GM_TXF_COL, SK_TRUE}},
216 /* SK_PNMI_HTX_SINGLE_COL */
217 {{XM_TXF_SNG_COL, SK_TRUE}, {GM_TXF_SNG_COL, SK_TRUE}},
218 /* SK_PNMI_HTX_MULTI_COL */
219 {{XM_TXF_MUL_COL, SK_TRUE}, {GM_TXF_MUL_COL, SK_TRUE}},
220 /* SK_PNMI_HTX_EXCESS_COL */
221 {{XM_TXF_ABO_COL, SK_TRUE}, {GM_TXF_ABO_COL, SK_TRUE}},
222 /* SK_PNMI_HTX_LATE_COL */
223 {{XM_TXF_LAT_COL, SK_TRUE}, {GM_TXF_LAT_COL, SK_TRUE}},
224 /* SK_PNMI_HTX_DEFFERAL */
225 {{XM_TXF_DEF, SK_TRUE}, {0, SK_FALSE}},
226 /* SK_PNMI_HTX_EXCESS_DEF */
227 {{XM_TXF_EX_DEF, SK_TRUE}, {0, SK_FALSE}},
228 /* SK_PNMI_HTX_UNDERRUN */
229 {{XM_TXE_FIFO_UR, SK_TRUE}, {GM_TXE_FIFO_UR, SK_TRUE}},
230 /* SK_PNMI_HTX_CARRIER */
231 {{XM_TXE_CS_ERR, SK_TRUE}, {0, SK_FALSE}},
232 /* SK_PNMI_HTX_UTILUNDER */
233 {{0, SK_FALSE}, {0, SK_FALSE}},
234 /* SK_PNMI_HTX_UTILOVER */
235 {{0, SK_FALSE}, {0, SK_FALSE}},
236 /* SK_PNMI_HTX_64 */
237 {{XM_TXF_64B, SK_TRUE}, {GM_TXF_64B, SK_TRUE}},
238 /* SK_PNMI_HTX_127 */
239 {{XM_TXF_127B, SK_TRUE}, {GM_TXF_127B, SK_TRUE}},
240 /* SK_PNMI_HTX_255 */
241 {{XM_TXF_255B, SK_TRUE}, {GM_TXF_255B, SK_TRUE}},
242 /* SK_PNMI_HTX_511 */
243 {{XM_TXF_511B, SK_TRUE}, {GM_TXF_511B, SK_TRUE}},
244 /* SK_PNMI_HTX_1023 */
245 {{XM_TXF_1023B, SK_TRUE}, {GM_TXF_1023B, SK_TRUE}},
246 /* SK_PNMI_HTX_MAX */
247 {{XM_TXF_MAX_SZ, SK_TRUE}, {GM_TXF_1518B, SK_TRUE}},
248 /* SK_PNMI_HTX_LONGFRAMES */
249 {{XM_TXF_LONG, SK_TRUE}, {GM_TXF_MAX_SZ, SK_TRUE}},
250 /* SK_PNMI_HTX_SYNC */
251 {{0, SK_FALSE}, {0, SK_FALSE}},
252 /* SK_PNMI_HTX_SYNC_OCTET */
253 {{0, SK_FALSE}, {0, SK_FALSE}},
254 /* SK_PNMI_HTX_RESERVED */
255 {{0, SK_FALSE}, {0, SK_FALSE}},
256 /* SK_PNMI_HRX */
257 {{XM_RXF_OK, SK_TRUE}, {0, SK_FALSE}},
258 /* SK_PNMI_HRX_OCTETHIGH */
259 {{XM_RXO_OK_HI, SK_TRUE}, {GM_RXO_OK_HI, SK_TRUE}},
260 /* SK_PNMI_HRX_OCTETLOW */
261 {{XM_RXO_OK_LO, SK_FALSE}, {GM_RXO_OK_LO, SK_FALSE}},
262 /* SK_PNMI_HRX_BADOCTETHIGH */
263 {{0, SK_FALSE}, {GM_RXO_ERR_HI, SK_TRUE}},
264 /* SK_PNMI_HRX_BADOCTETLOW */
265 {{0, SK_FALSE}, {GM_RXO_ERR_LO, SK_TRUE}},
266 /* SK_PNMI_HRX_BROADCAST */
267 {{XM_RXF_BC_OK, SK_TRUE}, {GM_RXF_BC_OK, SK_TRUE}},
268 /* SK_PNMI_HRX_MULTICAST */
269 {{XM_RXF_MC_OK, SK_TRUE}, {GM_RXF_MC_OK, SK_TRUE}},
270 /* SK_PNMI_HRX_UNICAST */
271 {{XM_RXF_UC_OK, SK_TRUE}, {GM_RXF_UC_OK, SK_TRUE}},
272 /* SK_PNMI_HRX_PMACC */
273 {{XM_RXF_MPAUSE, SK_TRUE}, {GM_RXF_MPAUSE, SK_TRUE}},
274 /* SK_PNMI_HRX_MACC */
275 {{XM_RXF_MCTRL, SK_TRUE}, {0, SK_FALSE}},
276 /* SK_PNMI_HRX_PMACC_ERR */
277 {{XM_RXF_INV_MP, SK_TRUE}, {0, SK_FALSE}},
278 /* SK_PNMI_HRX_MACC_UNKWN */
279 {{XM_RXF_INV_MOC, SK_TRUE}, {0, SK_FALSE}},
280 /* SK_PNMI_HRX_BURST */
281 {{XM_RXE_BURST, SK_TRUE}, {0, SK_FALSE}},
282 /* SK_PNMI_HRX_MISSED */
283 {{XM_RXE_FMISS, SK_TRUE}, {0, SK_FALSE}},
284 /* SK_PNMI_HRX_FRAMING */
285 {{XM_RXF_FRA_ERR, SK_TRUE}, {0, SK_FALSE}},
286 /* SK_PNMI_HRX_UNDERSIZE */
287 {{0, SK_FALSE}, {GM_RXF_SHT, SK_TRUE}},
288 /* SK_PNMI_HRX_OVERFLOW */
289 {{XM_RXE_FIFO_OV, SK_TRUE}, {GM_RXE_FIFO_OV, SK_TRUE}},
290 /* SK_PNMI_HRX_JABBER */
291 {{XM_RXF_JAB_PKT, SK_TRUE}, {GM_RXF_JAB_PKT, SK_TRUE}},
292 /* SK_PNMI_HRX_CARRIER */
293 {{XM_RXE_CAR_ERR, SK_TRUE}, {0, SK_FALSE}},
294 /* SK_PNMI_HRX_IRLENGTH */
295 {{XM_RXF_LEN_ERR, SK_TRUE}, {0, SK_FALSE}},
296 /* SK_PNMI_HRX_SYMBOL */
297 {{XM_RXE_SYM_ERR, SK_TRUE}, {0, SK_FALSE}},
298 /* SK_PNMI_HRX_SHORTS */
299 {{XM_RXE_SHT_ERR, SK_TRUE}, {0, SK_FALSE}},
300 /* SK_PNMI_HRX_RUNT */
301 {{XM_RXE_RUNT, SK_TRUE}, {GM_RXE_FRAG, SK_TRUE}},
302 /* SK_PNMI_HRX_TOO_LONG */
303 {{XM_RXF_LNG_ERR, SK_TRUE}, {GM_RXF_LNG_ERR, SK_TRUE}},
304 /* SK_PNMI_HRX_FCS */
305 {{XM_RXF_FCS_ERR, SK_TRUE}, {GM_RXF_FCS_ERR, SK_TRUE}},
306 /* SK_PNMI_HRX_CEXT */
307 {{XM_RXF_CEX_ERR, SK_TRUE}, {0, SK_FALSE}},
308 /* SK_PNMI_HRX_UTILUNDER */
309 {{0, SK_FALSE}, {0, SK_FALSE}},
310 /* SK_PNMI_HRX_UTILOVER */
311 {{0, SK_FALSE}, {0, SK_FALSE}},
312 /* SK_PNMI_HRX_64 */
313 {{XM_RXF_64B, SK_TRUE}, {GM_RXF_64B, SK_TRUE}},
314 /* SK_PNMI_HRX_127 */
315 {{XM_RXF_127B, SK_TRUE}, {GM_RXF_127B, SK_TRUE}},
316 /* SK_PNMI_HRX_255 */
317 {{XM_RXF_255B, SK_TRUE}, {GM_RXF_255B, SK_TRUE}},
318 /* SK_PNMI_HRX_511 */
319 {{XM_RXF_511B, SK_TRUE}, {GM_RXF_511B, SK_TRUE}},
320 /* SK_PNMI_HRX_1023 */
321 {{XM_RXF_1023B, SK_TRUE}, {GM_RXF_1023B, SK_TRUE}},
322 /* SK_PNMI_HRX_MAX */
323 {{XM_RXF_MAX_SZ, SK_TRUE}, {GM_RXF_1518B, SK_TRUE}},
324 /* SK_PNMI_HRX_LONGFRAMES */
325 {{0, SK_FALSE}, {GM_RXF_MAX_SZ, SK_TRUE}},
326 /* SK_PNMI_HRX_RESERVED */
327 {{0, SK_FALSE}, {0, SK_FALSE}}
328};
329
330
331/*****************************************************************************
332 *
333 * Public functions
334 *
335 */
336
337/*****************************************************************************
338 *
339 * SkPnmiInit - Init function of PNMI
340 *
341 * Description:
342 * SK_INIT_DATA: Initialises the data structures
343 * SK_INIT_IO: Resets the XMAC statistics, determines the device and
344 * connector type.
345 * SK_INIT_RUN: Starts a timer event for port switch per hour
346 * calculation.
347 *
348 * Returns:
349 * Always 0
350 */
351int SkPnmiInit(
352SK_AC *pAC, /* Pointer to adapter context */
353SK_IOC IoC, /* IO context handle */
354int Level) /* Initialization level */
355{
356 unsigned int PortMax; /* Number of ports */
357 unsigned int PortIndex; /* Current port index in loop */
358 SK_U16 Val16; /* Multiple purpose 16 bit variable */
359 SK_U8 Val8; /* Mulitple purpose 8 bit variable */
360 SK_EVPARA EventParam; /* Event struct for timer event */
361 SK_PNMI_VCT *pVctBackupData;
362
363
364 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
365 ("PNMI: SkPnmiInit: Called, level=%d\n", Level));
366
367 switch (Level) {
368
369 case SK_INIT_DATA:
370 SK_MEMSET((char *)&pAC->Pnmi, 0, sizeof(pAC->Pnmi));
371 pAC->Pnmi.TrapBufFree = SK_PNMI_TRAP_QUEUE_LEN;
372 pAC->Pnmi.StartUpTime = SK_PNMI_HUNDREDS_SEC(SkOsGetTime(pAC));
373 pAC->Pnmi.RlmtChangeThreshold = SK_PNMI_DEF_RLMT_CHG_THRES;
374 for (PortIndex = 0; PortIndex < SK_MAX_MACS; PortIndex ++) {
375
376 pAC->Pnmi.Port[PortIndex].ActiveFlag = SK_FALSE;
377 pAC->Pnmi.DualNetActiveFlag = SK_FALSE;
378 }
379
380#ifdef SK_PNMI_CHECK
381 if (SK_PNMI_MAX_IDX != SK_PNMI_CNT_NO) {
382
383 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR049, SK_PNMI_ERR049MSG);
384
385 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_INIT | SK_DBGCAT_FATAL,
386 ("CounterOffset struct size (%d) differs from "
387 "SK_PNMI_MAX_IDX (%d)\n",
388 SK_PNMI_CNT_NO, SK_PNMI_MAX_IDX));
389 }
390
391#endif /* SK_PNMI_CHECK */
392 break;
393
394 case SK_INIT_IO:
395 /*
396 * Reset MAC counters
397 */
398 PortMax = pAC->GIni.GIMacsFound;
399
400 for (PortIndex = 0; PortIndex < PortMax; PortIndex ++) {
401
402 pAC->GIni.GIFunc.pFnMacResetCounter(pAC, IoC, PortIndex);
403 }
404
405 /* Initialize DSP variables for Vct() to 0xff => Never written! */
406 for (PortIndex = 0; PortIndex < PortMax; PortIndex ++) {
407 pAC->GIni.GP[PortIndex].PCableLen = 0xff;
408 pVctBackupData = &pAC->Pnmi.VctBackup[PortIndex];
409 pVctBackupData->PCableLen = 0xff;
410 }
411
412 /*
413 * Get pci bus speed
414 */
415 SK_IN16(IoC, B0_CTST, &Val16);
416 if ((Val16 & CS_BUS_CLOCK) == 0) {
417
418 pAC->Pnmi.PciBusSpeed = 33;
419 }
420 else {
421 pAC->Pnmi.PciBusSpeed = 66;
422 }
423
424 /*
425 * Get pci bus width
426 */
427 SK_IN16(IoC, B0_CTST, &Val16);
428 if ((Val16 & CS_BUS_SLOT_SZ) == 0) {
429
430 pAC->Pnmi.PciBusWidth = 32;
431 }
432 else {
433 pAC->Pnmi.PciBusWidth = 64;
434 }
435
436 /*
437 * Get chipset
438 */
439 switch (pAC->GIni.GIChipId) {
440 case CHIP_ID_GENESIS:
441 pAC->Pnmi.Chipset = SK_PNMI_CHIPSET_XMAC;
442 break;
443
444 case CHIP_ID_YUKON:
445 pAC->Pnmi.Chipset = SK_PNMI_CHIPSET_YUKON;
446 break;
447
448 default:
449 break;
450 }
451
452 /*
453 * Get PMD and DeviceType
454 */
455 SK_IN8(IoC, B2_PMD_TYP, &Val8);
456 switch (Val8) {
457 case 'S':
458 pAC->Pnmi.PMD = 3;
459 if (pAC->GIni.GIMacsFound > 1) {
460
461 pAC->Pnmi.DeviceType = 0x00020002;
462 }
463 else {
464 pAC->Pnmi.DeviceType = 0x00020001;
465 }
466 break;
467
468 case 'L':
469 pAC->Pnmi.PMD = 2;
470 if (pAC->GIni.GIMacsFound > 1) {
471
472 pAC->Pnmi.DeviceType = 0x00020004;
473 }
474 else {
475 pAC->Pnmi.DeviceType = 0x00020003;
476 }
477 break;
478
479 case 'C':
480 pAC->Pnmi.PMD = 4;
481 if (pAC->GIni.GIMacsFound > 1) {
482
483 pAC->Pnmi.DeviceType = 0x00020006;
484 }
485 else {
486 pAC->Pnmi.DeviceType = 0x00020005;
487 }
488 break;
489
490 case 'T':
491 pAC->Pnmi.PMD = 5;
492 if (pAC->GIni.GIMacsFound > 1) {
493
494 pAC->Pnmi.DeviceType = 0x00020008;
495 }
496 else {
497 pAC->Pnmi.DeviceType = 0x00020007;
498 }
499 break;
500
501 default :
502 pAC->Pnmi.PMD = 1;
503 pAC->Pnmi.DeviceType = 0;
504 break;
505 }
506
507 /*
508 * Get connector
509 */
510 SK_IN8(IoC, B2_CONN_TYP, &Val8);
511 switch (Val8) {
512 case 'C':
513 pAC->Pnmi.Connector = 2;
514 break;
515
516 case 'D':
517 pAC->Pnmi.Connector = 3;
518 break;
519
520 case 'F':
521 pAC->Pnmi.Connector = 4;
522 break;
523
524 case 'J':
525 pAC->Pnmi.Connector = 5;
526 break;
527
528 case 'V':
529 pAC->Pnmi.Connector = 6;
530 break;
531
532 default:
533 pAC->Pnmi.Connector = 1;
534 break;
535 }
536 break;
537
538 case SK_INIT_RUN:
539 /*
540 * Start timer for RLMT change counter
541 */
542 SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam));
543 SkTimerStart(pAC, IoC, &pAC->Pnmi.RlmtChangeEstimate.EstTimer,
544 28125000, SKGE_PNMI, SK_PNMI_EVT_CHG_EST_TIMER,
545 EventParam);
546 break;
547
548 default:
549 break; /* Nothing todo */
550 }
551
552 return (0);
553}
554
555/*****************************************************************************
556 *
557 * SkPnmiGetVar - Retrieves the value of a single OID
558 *
559 * Description:
560 * Calls a general sub-function for all this stuff. If the instance
561 * -1 is passed, the values of all instances are returned in an
562 * array of values.
563 *
564 * Returns:
565 * SK_PNMI_ERR_OK The request was successfully performed
566 * SK_PNMI_ERR_GENERAL A general severe internal error occured
567 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to take
568 * the data.
569 * SK_PNMI_ERR_UNKNOWN_OID The requested OID is unknown
570 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
571 * exist (e.g. port instance 3 on a two port
572 * adapter.
573 */
574static int SkPnmiGetVar(
575SK_AC *pAC, /* Pointer to adapter context */
576SK_IOC IoC, /* IO context handle */
577SK_U32 Id, /* Object ID that is to be processed */
578void *pBuf, /* Buffer to which the management data will be copied */
579unsigned int *pLen, /* On call: buffer length. On return: used buffer */
580SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
581SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
582{
583 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
584 ("PNMI: SkPnmiGetVar: Called, Id=0x%x, BufLen=%d, Instance=%d, NetIndex=%d\n",
585 Id, *pLen, Instance, NetIndex));
586
587 return (PnmiVar(pAC, IoC, SK_PNMI_GET, Id, (char *)pBuf, pLen,
588 Instance, NetIndex));
589}
590
591/*****************************************************************************
592 *
593 * SkPnmiPreSetVar - Presets the value of a single OID
594 *
595 * Description:
596 * Calls a general sub-function for all this stuff. The preset does
597 * the same as a set, but returns just before finally setting the
598 * new value. This is useful to check if a set might be successfull.
599 * If the instance -1 is passed, an array of values is supposed and
600 * all instances of the OID will be set.
601 *
602 * Returns:
603 * SK_PNMI_ERR_OK The request was successfully performed.
604 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
605 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
606 * the correct data (e.g. a 32bit value is
607 * needed, but a 16 bit value was passed).
608 * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
609 * value range.
610 * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
611 * SK_PNMI_ERR_UNKNOWN_OID The requested OID is unknown.
612 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
613 * exist (e.g. port instance 3 on a two port
614 * adapter.
615 */
616static int SkPnmiPreSetVar(
617SK_AC *pAC, /* Pointer to adapter context */
618SK_IOC IoC, /* IO context handle */
619SK_U32 Id, /* Object ID that is to be processed */
620void *pBuf, /* Buffer to which the management data will be copied */
621unsigned int *pLen, /* Total length of management data */
622SK_U32 Instance, /* Instance (1..n) that is to be set or -1 */
623SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
624{
625 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
626 ("PNMI: SkPnmiPreSetVar: Called, Id=0x%x, BufLen=%d, Instance=%d, NetIndex=%d\n",
627 Id, *pLen, Instance, NetIndex));
628
629
630 return (PnmiVar(pAC, IoC, SK_PNMI_PRESET, Id, (char *)pBuf, pLen,
631 Instance, NetIndex));
632}
633
634/*****************************************************************************
635 *
636 * SkPnmiSetVar - Sets the value of a single OID
637 *
638 * Description:
639 * Calls a general sub-function for all this stuff. The preset does
640 * the same as a set, but returns just before finally setting the
641 * new value. This is useful to check if a set might be successfull.
642 * If the instance -1 is passed, an array of values is supposed and
643 * all instances of the OID will be set.
644 *
645 * Returns:
646 * SK_PNMI_ERR_OK The request was successfully performed.
647 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
648 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
649 * the correct data (e.g. a 32bit value is
650 * needed, but a 16 bit value was passed).
651 * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
652 * value range.
653 * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
654 * SK_PNMI_ERR_UNKNOWN_OID The requested OID is unknown.
655 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
656 * exist (e.g. port instance 3 on a two port
657 * adapter.
658 */
659int SkPnmiSetVar(
660SK_AC *pAC, /* Pointer to adapter context */
661SK_IOC IoC, /* IO context handle */
662SK_U32 Id, /* Object ID that is to be processed */
663void *pBuf, /* Buffer to which the management data will be copied */
664unsigned int *pLen, /* Total length of management data */
665SK_U32 Instance, /* Instance (1..n) that is to be set or -1 */
666SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
667{
668 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
669 ("PNMI: SkPnmiSetVar: Called, Id=0x%x, BufLen=%d, Instance=%d, NetIndex=%d\n",
670 Id, *pLen, Instance, NetIndex));
671
672 return (PnmiVar(pAC, IoC, SK_PNMI_SET, Id, (char *)pBuf, pLen,
673 Instance, NetIndex));
674}
675
676/*****************************************************************************
677 *
678 * SkPnmiGetStruct - Retrieves the management database in SK_PNMI_STRUCT_DATA
679 *
680 * Description:
681 * Runs through the IdTable, queries the single OIDs and stores the
682 * returned data into the management database structure
683 * SK_PNMI_STRUCT_DATA. The offset of the OID in the structure
684 * is stored in the IdTable. The return value of the function will also
685 * be stored in SK_PNMI_STRUCT_DATA if the passed buffer has the
686 * minimum size of SK_PNMI_MIN_STRUCT_SIZE.
687 *
688 * Returns:
689 * SK_PNMI_ERR_OK The request was successfully performed
690 * SK_PNMI_ERR_GENERAL A general severe internal error occured
691 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to take
692 * the data.
693 * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist
694 */
695int SkPnmiGetStruct(
696SK_AC *pAC, /* Pointer to adapter context */
697SK_IOC IoC, /* IO context handle */
698void *pBuf, /* Buffer to which the management data will be copied. */
699unsigned int *pLen, /* Length of buffer */
700SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
701{
702 int Ret;
703 unsigned int TableIndex;
704 unsigned int DstOffset;
705 unsigned int InstanceNo;
706 unsigned int InstanceCnt;
707 SK_U32 Instance;
708 unsigned int TmpLen;
709 char KeyArr[SK_PNMI_VPD_ENTRIES][SK_PNMI_VPD_KEY_SIZE];
710
711
712 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
713 ("PNMI: SkPnmiGetStruct: Called, BufLen=%d, NetIndex=%d\n",
714 *pLen, NetIndex));
715
716 if (*pLen < SK_PNMI_STRUCT_SIZE) {
717
718 if (*pLen >= SK_PNMI_MIN_STRUCT_SIZE) {
719
720 SK_PNMI_SET_STAT(pBuf, SK_PNMI_ERR_TOO_SHORT,
721 (SK_U32)(-1));
722 }
723
724 *pLen = SK_PNMI_STRUCT_SIZE;
725 return (SK_PNMI_ERR_TOO_SHORT);
726 }
727
728 /*
729 * Check NetIndex
730 */
731 if (NetIndex >= pAC->Rlmt.NumNets) {
732 return (SK_PNMI_ERR_UNKNOWN_NET);
733 }
734
735 /* Update statistic */
736 SK_PNMI_CHECKFLAGS("SkPnmiGetStruct: On call");
737
738 if ((Ret = MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1)) !=
739 SK_PNMI_ERR_OK) {
740
741 SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1));
742 *pLen = SK_PNMI_MIN_STRUCT_SIZE;
743 return (Ret);
744 }
745
746 if ((Ret = RlmtUpdate(pAC, IoC, NetIndex)) != SK_PNMI_ERR_OK) {
747
748 SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1));
749 *pLen = SK_PNMI_MIN_STRUCT_SIZE;
750 return (Ret);
751 }
752
753 if ((Ret = SirqUpdate(pAC, IoC)) != SK_PNMI_ERR_OK) {
754
755 SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1));
756 *pLen = SK_PNMI_MIN_STRUCT_SIZE;
757 return (Ret);
758 }
759
760 /*
761 * Increment semaphores to indicate that an update was
762 * already done
763 */
764 pAC->Pnmi.MacUpdatedFlag ++;
765 pAC->Pnmi.RlmtUpdatedFlag ++;
766 pAC->Pnmi.SirqUpdatedFlag ++;
767
768 /* Get vpd keys for instance calculation */
769 Ret = GetVpdKeyArr(pAC, IoC, &KeyArr[0][0], sizeof(KeyArr), &TmpLen);
770 if (Ret != SK_PNMI_ERR_OK) {
771
772 pAC->Pnmi.MacUpdatedFlag --;
773 pAC->Pnmi.RlmtUpdatedFlag --;
774 pAC->Pnmi.SirqUpdatedFlag --;
775
776 SK_PNMI_CHECKFLAGS("SkPnmiGetStruct: On return");
777 SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1));
778 *pLen = SK_PNMI_MIN_STRUCT_SIZE;
779 return (SK_PNMI_ERR_GENERAL);
780 }
781
782 /* Retrieve values */
783 SK_MEMSET((char *)pBuf, 0, SK_PNMI_STRUCT_SIZE);
784 for (TableIndex = 0; TableIndex < ID_TABLE_SIZE; TableIndex ++) {
785
786 InstanceNo = IdTable[TableIndex].InstanceNo;
787 for (InstanceCnt = 1; InstanceCnt <= InstanceNo;
788 InstanceCnt ++) {
789
790 DstOffset = IdTable[TableIndex].Offset +
791 (InstanceCnt - 1) *
792 IdTable[TableIndex].StructSize;
793
794 /*
795 * For the VPD the instance is not an index number
796 * but the key itself. Determin with the instance
797 * counter the VPD key to be used.
798 */
799 if (IdTable[TableIndex].Id == OID_SKGE_VPD_KEY ||
800 IdTable[TableIndex].Id == OID_SKGE_VPD_VALUE ||
801 IdTable[TableIndex].Id == OID_SKGE_VPD_ACCESS ||
802 IdTable[TableIndex].Id == OID_SKGE_VPD_ACTION) {
803
804 SK_STRNCPY((char *)&Instance, KeyArr[InstanceCnt - 1], 4);
805 }
806 else {
807 Instance = (SK_U32)InstanceCnt;
808 }
809
810 TmpLen = *pLen - DstOffset;
811 Ret = IdTable[TableIndex].Func(pAC, IoC, SK_PNMI_GET,
812 IdTable[TableIndex].Id, (char *)pBuf +
813 DstOffset, &TmpLen, Instance, TableIndex, NetIndex);
814
815 /*
816 * An unknown instance error means that we reached
817 * the last instance of that variable. Proceed with
818 * the next OID in the table and ignore the return
819 * code.
820 */
821 if (Ret == SK_PNMI_ERR_UNKNOWN_INST) {
822
823 break;
824 }
825
826 if (Ret != SK_PNMI_ERR_OK) {
827
828 pAC->Pnmi.MacUpdatedFlag --;
829 pAC->Pnmi.RlmtUpdatedFlag --;
830 pAC->Pnmi.SirqUpdatedFlag --;
831
832 SK_PNMI_CHECKFLAGS("SkPnmiGetStruct: On return");
833 SK_PNMI_SET_STAT(pBuf, Ret, DstOffset);
834 *pLen = SK_PNMI_MIN_STRUCT_SIZE;
835 return (Ret);
836 }
837 }
838 }
839
840 pAC->Pnmi.MacUpdatedFlag --;
841 pAC->Pnmi.RlmtUpdatedFlag --;
842 pAC->Pnmi.SirqUpdatedFlag --;
843
844 *pLen = SK_PNMI_STRUCT_SIZE;
845 SK_PNMI_CHECKFLAGS("SkPnmiGetStruct: On return");
846 SK_PNMI_SET_STAT(pBuf, SK_PNMI_ERR_OK, (SK_U32)(-1));
847 return (SK_PNMI_ERR_OK);
848}
849
850/*****************************************************************************
851 *
852 * SkPnmiPreSetStruct - Presets the management database in SK_PNMI_STRUCT_DATA
853 *
854 * Description:
855 * Calls a general sub-function for all this set stuff. The preset does
856 * the same as a set, but returns just before finally setting the
857 * new value. This is useful to check if a set might be successfull.
858 * The sub-function runs through the IdTable, checks which OIDs are able
859 * to set, and calls the handler function of the OID to perform the
860 * preset. The return value of the function will also be stored in
861 * SK_PNMI_STRUCT_DATA if the passed buffer has the minimum size of
862 * SK_PNMI_MIN_STRUCT_SIZE.
863 *
864 * Returns:
865 * SK_PNMI_ERR_OK The request was successfully performed.
866 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
867 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
868 * the correct data (e.g. a 32bit value is
869 * needed, but a 16 bit value was passed).
870 * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
871 * value range.
872 */
873int SkPnmiPreSetStruct(
874SK_AC *pAC, /* Pointer to adapter context */
875SK_IOC IoC, /* IO context handle */
876void *pBuf, /* Buffer which contains the data to be set */
877unsigned int *pLen, /* Length of buffer */
878SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
879{
880 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
881 ("PNMI: SkPnmiPreSetStruct: Called, BufLen=%d, NetIndex=%d\n",
882 *pLen, NetIndex));
883
884 return (PnmiStruct(pAC, IoC, SK_PNMI_PRESET, (char *)pBuf,
885 pLen, NetIndex));
886}
887
888/*****************************************************************************
889 *
890 * SkPnmiSetStruct - Sets the management database in SK_PNMI_STRUCT_DATA
891 *
892 * Description:
893 * Calls a general sub-function for all this set stuff. The return value
894 * of the function will also be stored in SK_PNMI_STRUCT_DATA if the
895 * passed buffer has the minimum size of SK_PNMI_MIN_STRUCT_SIZE.
896 * The sub-function runs through the IdTable, checks which OIDs are able
897 * to set, and calls the handler function of the OID to perform the
898 * set. The return value of the function will also be stored in
899 * SK_PNMI_STRUCT_DATA if the passed buffer has the minimum size of
900 * SK_PNMI_MIN_STRUCT_SIZE.
901 *
902 * Returns:
903 * SK_PNMI_ERR_OK The request was successfully performed.
904 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
905 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
906 * the correct data (e.g. a 32bit value is
907 * needed, but a 16 bit value was passed).
908 * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
909 * value range.
910 */
911int SkPnmiSetStruct(
912SK_AC *pAC, /* Pointer to adapter context */
913SK_IOC IoC, /* IO context handle */
914void *pBuf, /* Buffer which contains the data to be set */
915unsigned int *pLen, /* Length of buffer */
916SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
917{
918 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
919 ("PNMI: SkPnmiSetStruct: Called, BufLen=%d, NetIndex=%d\n",
920 *pLen, NetIndex));
921
922 return (PnmiStruct(pAC, IoC, SK_PNMI_SET, (char *)pBuf,
923 pLen, NetIndex));
924}
925
926/*****************************************************************************
927 *
928 * SkPnmiEvent - Event handler
929 *
930 * Description:
931 * Handles the following events:
932 * SK_PNMI_EVT_SIRQ_OVERFLOW When a hardware counter overflows an
933 * interrupt will be generated which is
934 * first handled by SIRQ which generates a
935 * this event. The event increments the
936 * upper 32 bit of the 64 bit counter.
937 * SK_PNMI_EVT_SEN_XXX The event is generated by the I2C module
938 * when a sensor reports a warning or
939 * error. The event will store a trap
940 * message in the trap buffer.
941 * SK_PNMI_EVT_CHG_EST_TIMER The timer event was initiated by this
942 * module and is used to calculate the
943 * port switches per hour.
944 * SK_PNMI_EVT_CLEAR_COUNTER The event clears all counters and
945 * timestamps.
946 * SK_PNMI_EVT_XMAC_RESET The event is generated by the driver
947 * before a hard reset of the XMAC is
948 * performed. All counters will be saved
949 * and added to the hardware counter
950 * values after reset to grant continuous
951 * counter values.
952 * SK_PNMI_EVT_RLMT_PORT_UP Generated by RLMT to notify that a port
953 * went logically up. A trap message will
954 * be stored to the trap buffer.
955 * SK_PNMI_EVT_RLMT_PORT_DOWN Generated by RLMT to notify that a port
956 * went logically down. A trap message will
957 * be stored to the trap buffer.
958 * SK_PNMI_EVT_RLMT_SEGMENTATION Generated by RLMT to notify that two
959 * spanning tree root bridges were
960 * detected. A trap message will be stored
961 * to the trap buffer.
962 * SK_PNMI_EVT_RLMT_ACTIVE_DOWN Notifies PNMI that an active port went
963 * down. PNMI will not further add the
964 * statistic values to the virtual port.
965 * SK_PNMI_EVT_RLMT_ACTIVE_UP Notifies PNMI that a port went up and
966 * is now an active port. PNMI will now
967 * add the statistic data of this port to
968 * the virtual port.
969 * SK_PNMI_EVT_RLMT_SET_NETS Notifies PNMI about the net mode. The first parameter
970 * contains the number of nets. 1 means single net, 2 means
971 * dual net. The second parameter is -1
972 *
973 * Returns:
974 * Always 0
975 */
976int SkPnmiEvent(
977SK_AC *pAC, /* Pointer to adapter context */
978SK_IOC IoC, /* IO context handle */
979SK_U32 Event, /* Event-Id */
980SK_EVPARA Param) /* Event dependent parameter */
981{
982 unsigned int PhysPortIndex;
983 unsigned int MaxNetNumber;
984 int CounterIndex;
985 int Ret;
986 SK_U16 MacStatus;
987 SK_U64 OverflowStatus;
988 SK_U64 Mask;
989 int MacType;
990 SK_U64 Value;
991 SK_U32 Val32;
992 SK_U16 Register;
993 SK_EVPARA EventParam;
994 SK_U64 NewestValue;
995 SK_U64 OldestValue;
996 SK_U64 Delta;
997 SK_PNMI_ESTIMATE *pEst;
998 SK_U32 NetIndex;
999 SK_GEPORT *pPrt;
1000 SK_PNMI_VCT *pVctBackupData;
1001 SK_U32 RetCode;
1002 int i;
1003 SK_U32 CableLength;
1004
1005
1006#ifdef DEBUG
1007 if (Event != SK_PNMI_EVT_XMAC_RESET) {
1008
1009 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
1010 ("PNMI: SkPnmiEvent: Called, Event=0x%x, Param=0x%x\n",
1011 (unsigned int)Event, (unsigned int)Param.Para64));
1012 }
1013#endif /* DEBUG */
1014 SK_PNMI_CHECKFLAGS("SkPnmiEvent: On call");
1015
1016 MacType = pAC->GIni.GIMacType;
1017
1018 switch (Event) {
1019
1020 case SK_PNMI_EVT_SIRQ_OVERFLOW:
1021 PhysPortIndex = (int)Param.Para32[0];
1022 MacStatus = (SK_U16)Param.Para32[1];
1023#ifdef DEBUG
1024 if (PhysPortIndex >= SK_MAX_MACS) {
1025
1026 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
1027 ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SIRQ_OVERFLOW parameter"
1028 " wrong, PhysPortIndex=0x%x\n",
1029 PhysPortIndex));
1030 return (0);
1031 }
1032#endif /* DEBUG */
1033 OverflowStatus = 0;
1034
1035 /*
1036 * Check which source caused an overflow interrupt.
1037 */
1038 if ((pAC->GIni.GIFunc.pFnMacOverflow(pAC, IoC, PhysPortIndex,
1039 MacStatus, &OverflowStatus) != 0) ||
1040 (OverflowStatus == 0)) {
1041
1042 SK_PNMI_CHECKFLAGS("SkPnmiEvent: On return");
1043 return (0);
1044 }
1045
1046 /*
1047 * Check the overflow status register and increment
1048 * the upper dword of corresponding counter.
1049 */
1050 for (CounterIndex = 0; CounterIndex < sizeof(Mask) * 8;
1051 CounterIndex ++) {
1052
1053 Mask = (SK_U64)1 << CounterIndex;
1054 if ((OverflowStatus & Mask) == 0) {
1055
1056 continue;
1057 }
1058
1059 switch (StatOvrflwBit[CounterIndex][MacType]) {
1060
1061 case SK_PNMI_HTX_UTILUNDER:
1062 case SK_PNMI_HTX_UTILOVER:
1063 if (MacType == SK_MAC_XMAC) {
1064 XM_IN16(IoC, PhysPortIndex, XM_TX_CMD, &Register);
1065 Register |= XM_TX_SAM_LINE;
1066 XM_OUT16(IoC, PhysPortIndex, XM_TX_CMD, Register);
1067 }
1068 break;
1069
1070 case SK_PNMI_HRX_UTILUNDER:
1071 case SK_PNMI_HRX_UTILOVER:
1072 if (MacType == SK_MAC_XMAC) {
1073 XM_IN16(IoC, PhysPortIndex, XM_RX_CMD, &Register);
1074 Register |= XM_RX_SAM_LINE;
1075 XM_OUT16(IoC, PhysPortIndex, XM_RX_CMD, Register);
1076 }
1077 break;
1078
1079 case SK_PNMI_HTX_OCTETHIGH:
1080 case SK_PNMI_HTX_OCTETLOW:
1081 case SK_PNMI_HTX_RESERVED:
1082 case SK_PNMI_HRX_OCTETHIGH:
1083 case SK_PNMI_HRX_OCTETLOW:
1084 case SK_PNMI_HRX_IRLENGTH:
1085 case SK_PNMI_HRX_RESERVED:
1086
1087 /*
1088 * the following counters aren't be handled (id > 63)
1089 */
1090 case SK_PNMI_HTX_SYNC:
1091 case SK_PNMI_HTX_SYNC_OCTET:
1092 break;
1093
1094 case SK_PNMI_HRX_LONGFRAMES:
1095 if (MacType == SK_MAC_GMAC) {
1096 pAC->Pnmi.Port[PhysPortIndex].
1097 CounterHigh[CounterIndex] ++;
1098 }
1099 break;
1100
1101 default:
1102 pAC->Pnmi.Port[PhysPortIndex].
1103 CounterHigh[CounterIndex] ++;
1104 }
1105 }
1106 break;
1107
1108 case SK_PNMI_EVT_SEN_WAR_LOW:
1109#ifdef DEBUG
1110 if ((unsigned int)Param.Para64 >= (unsigned int)pAC->I2c.MaxSens) {
1111
1112 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
1113 ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SEN_WAR_LOW parameter wrong, SensorIndex=%d\n",
1114 (unsigned int)Param.Para64));
1115 return (0);
1116 }
1117#endif /* DEBUG */
1118
1119 /*
1120 * Store a trap message in the trap buffer and generate
1121 * an event for user space applications with the
1122 * SK_DRIVER_SENDEVENT macro.
1123 */
1124 QueueSensorTrap(pAC, OID_SKGE_TRAP_SEN_WAR_LOW,
1125 (unsigned int)Param.Para64);
1126 (void)SK_DRIVER_SENDEVENT(pAC, IoC);
1127 break;
1128
1129 case SK_PNMI_EVT_SEN_WAR_UPP:
1130#ifdef DEBUG
1131 if ((unsigned int)Param.Para64 >= (unsigned int)pAC->I2c.MaxSens) {
1132
1133 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
1134 ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SEN_WAR_UPP parameter wrong, SensorIndex=%d\n",
1135 (unsigned int)Param.Para64));
1136 return (0);
1137 }
1138#endif /* DEBUG */
1139
1140 /*
1141 * Store a trap message in the trap buffer and generate
1142 * an event for user space applications with the
1143 * SK_DRIVER_SENDEVENT macro.
1144 */
1145 QueueSensorTrap(pAC, OID_SKGE_TRAP_SEN_WAR_UPP,
1146 (unsigned int)Param.Para64);
1147 (void)SK_DRIVER_SENDEVENT(pAC, IoC);
1148 break;
1149
1150 case SK_PNMI_EVT_SEN_ERR_LOW:
1151#ifdef DEBUG
1152 if ((unsigned int)Param.Para64 >= (unsigned int)pAC->I2c.MaxSens) {
1153
1154 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
1155 ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SEN_ERR_LOW parameter wrong, SensorIndex=%d\n",
1156 (unsigned int)Param.Para64));
1157 return (0);
1158 }
1159#endif /* DEBUG */
1160
1161 /*
1162 * Store a trap message in the trap buffer and generate
1163 * an event for user space applications with the
1164 * SK_DRIVER_SENDEVENT macro.
1165 */
1166 QueueSensorTrap(pAC, OID_SKGE_TRAP_SEN_ERR_LOW,
1167 (unsigned int)Param.Para64);
1168 (void)SK_DRIVER_SENDEVENT(pAC, IoC);
1169 break;
1170
1171 case SK_PNMI_EVT_SEN_ERR_UPP:
1172#ifdef DEBUG
1173 if ((unsigned int)Param.Para64 >= (unsigned int)pAC->I2c.MaxSens) {
1174
1175 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
1176 ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_SEN_ERR_UPP parameter wrong, SensorIndex=%d\n",
1177 (unsigned int)Param.Para64));
1178 return (0);
1179 }
1180#endif /* DEBUG */
1181
1182 /*
1183 * Store a trap message in the trap buffer and generate
1184 * an event for user space applications with the
1185 * SK_DRIVER_SENDEVENT macro.
1186 */
1187 QueueSensorTrap(pAC, OID_SKGE_TRAP_SEN_ERR_UPP,
1188 (unsigned int)Param.Para64);
1189 (void)SK_DRIVER_SENDEVENT(pAC, IoC);
1190 break;
1191
1192 case SK_PNMI_EVT_CHG_EST_TIMER:
1193 /*
1194 * Calculate port switch average on a per hour basis
1195 * Time interval for check : 28125 ms
1196 * Number of values for average : 8
1197 *
1198 * Be careful in changing these values, on change check
1199 * - typedef of SK_PNMI_ESTIMATE (Size of EstValue
1200 * array one less than value number)
1201 * - Timer initialization SkTimerStart() in SkPnmiInit
1202 * - Delta value below must be multiplicated with
1203 * power of 2
1204 *
1205 */
1206 pEst = &pAC->Pnmi.RlmtChangeEstimate;
1207 CounterIndex = pEst->EstValueIndex + 1;
1208 if (CounterIndex == 7) {
1209
1210 CounterIndex = 0;
1211 }
1212 pEst->EstValueIndex = CounterIndex;
1213
1214 NewestValue = pAC->Pnmi.RlmtChangeCts;
1215 OldestValue = pEst->EstValue[CounterIndex];
1216 pEst->EstValue[CounterIndex] = NewestValue;
1217
1218 /*
1219 * Calculate average. Delta stores the number of
1220 * port switches per 28125 * 8 = 225000 ms
1221 */
1222 if (NewestValue >= OldestValue) {
1223
1224 Delta = NewestValue - OldestValue;
1225 }
1226 else {
1227 /* Overflow situation */
1228 Delta = (SK_U64)(0 - OldestValue) + NewestValue;
1229 }
1230
1231 /*
1232 * Extrapolate delta to port switches per hour.
1233 * Estimate = Delta * (3600000 / 225000)
1234 * = Delta * 16
1235 * = Delta << 4
1236 */
1237 pAC->Pnmi.RlmtChangeEstimate.Estimate = Delta << 4;
1238
1239 /*
1240 * Check if threshold is exceeded. If the threshold is
1241 * permanently exceeded every 28125 ms an event will be
1242 * generated to remind the user of this condition.
1243 */
1244 if ((pAC->Pnmi.RlmtChangeThreshold != 0) &&
1245 (pAC->Pnmi.RlmtChangeEstimate.Estimate >=
1246 pAC->Pnmi.RlmtChangeThreshold)) {
1247
1248 QueueSimpleTrap(pAC, OID_SKGE_TRAP_RLMT_CHANGE_THRES);
1249 (void)SK_DRIVER_SENDEVENT(pAC, IoC);
1250 }
1251
1252 SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam));
1253 SkTimerStart(pAC, IoC, &pAC->Pnmi.RlmtChangeEstimate.EstTimer,
1254 28125000, SKGE_PNMI, SK_PNMI_EVT_CHG_EST_TIMER,
1255 EventParam);
1256 break;
1257
1258 case SK_PNMI_EVT_CLEAR_COUNTER:
1259 /*
1260 * Param.Para32[0] contains the NetIndex (0 ..1).
1261 * Param.Para32[1] is reserved, contains -1.
1262 */
1263 NetIndex = (SK_U32)Param.Para32[0];
1264
1265#ifdef DEBUG
1266 if (NetIndex >= pAC->Rlmt.NumNets) {
1267
1268 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
1269 ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_CLEAR_COUNTER parameter wrong, NetIndex=%d\n",
1270 NetIndex));
1271
1272 return (0);
1273 }
1274#endif /* DEBUG */
1275
1276 /*
1277 * Set all counters and timestamps to zero.
1278 * The according NetIndex is required as a
1279 * parameter of the event.
1280 */
1281 ResetCounter(pAC, IoC, NetIndex);
1282 break;
1283
1284 case SK_PNMI_EVT_XMAC_RESET:
1285 /*
1286 * To grant continuous counter values store the current
1287 * XMAC statistic values to the entries 1..n of the
1288 * CounterOffset array. XMAC Errata #2
1289 */
1290#ifdef DEBUG
1291 if ((unsigned int)Param.Para64 >= SK_MAX_MACS) {
1292
1293 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
1294 ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_XMAC_RESET parameter wrong, PhysPortIndex=%d\n",
1295 (unsigned int)Param.Para64));
1296 return (0);
1297 }
1298#endif
1299 PhysPortIndex = (unsigned int)Param.Para64;
1300
1301 /*
1302 * Update XMAC statistic to get fresh values
1303 */
1304 Ret = MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1);
1305 if (Ret != SK_PNMI_ERR_OK) {
1306
1307 SK_PNMI_CHECKFLAGS("SkPnmiEvent: On return");
1308 return (0);
1309 }
1310 /*
1311 * Increment semaphore to indicate that an update was
1312 * already done
1313 */
1314 pAC->Pnmi.MacUpdatedFlag ++;
1315
1316 for (CounterIndex = 0; CounterIndex < SK_PNMI_MAX_IDX;
1317 CounterIndex ++) {
1318
1319 if (!StatAddr[CounterIndex][MacType].GetOffset) {
1320
1321 continue;
1322 }
1323
1324 pAC->Pnmi.Port[PhysPortIndex].CounterOffset[CounterIndex] =
1325 GetPhysStatVal(pAC, IoC, PhysPortIndex, CounterIndex);
1326
1327 pAC->Pnmi.Port[PhysPortIndex].CounterHigh[CounterIndex] = 0;
1328 }
1329
1330 pAC->Pnmi.MacUpdatedFlag --;
1331 break;
1332
1333 case SK_PNMI_EVT_RLMT_PORT_UP:
1334 PhysPortIndex = (unsigned int)Param.Para32[0];
1335#ifdef DEBUG
1336 if (PhysPortIndex >= SK_MAX_MACS) {
1337
1338 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
1339 ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_PORT_UP parameter"
1340 " wrong, PhysPortIndex=%d\n", PhysPortIndex));
1341
1342 return (0);
1343 }
1344#endif /* DEBUG */
1345
1346 /*
1347 * Store a trap message in the trap buffer and generate an event for
1348 * user space applications with the SK_DRIVER_SENDEVENT macro.
1349 */
1350 QueueRlmtPortTrap(pAC, OID_SKGE_TRAP_RLMT_PORT_UP, PhysPortIndex);
1351 (void)SK_DRIVER_SENDEVENT(pAC, IoC);
1352
1353 /* Bugfix for XMAC errata (#10620)*/
1354 if (MacType == SK_MAC_XMAC) {
1355 /* Add incremental difference to offset (#10620)*/
1356 (void)pAC->GIni.GIFunc.pFnMacStatistic(pAC, IoC, PhysPortIndex,
1357 XM_RXE_SHT_ERR, &Val32);
1358
1359 Value = (((SK_U64)pAC->Pnmi.Port[PhysPortIndex].
1360 CounterHigh[SK_PNMI_HRX_SHORTS] << 32) | (SK_U64)Val32);
1361 pAC->Pnmi.Port[PhysPortIndex].CounterOffset[SK_PNMI_HRX_SHORTS] +=
1362 Value - pAC->Pnmi.Port[PhysPortIndex].RxShortZeroMark;
1363 }
1364
1365 /* Tell VctStatus() that a link was up meanwhile. */
1366 pAC->Pnmi.VctStatus[PhysPortIndex] |= SK_PNMI_VCT_LINK;
1367 break;
1368
1369 case SK_PNMI_EVT_RLMT_PORT_DOWN:
1370 PhysPortIndex = (unsigned int)Param.Para32[0];
1371
1372#ifdef DEBUG
1373 if (PhysPortIndex >= SK_MAX_MACS) {
1374
1375 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
1376 ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_PORT_DOWN parameter"
1377 " wrong, PhysPortIndex=%d\n", PhysPortIndex));
1378
1379 return (0);
1380 }
1381#endif /* DEBUG */
1382
1383 /*
1384 * Store a trap message in the trap buffer and generate an event for
1385 * user space applications with the SK_DRIVER_SENDEVENT macro.
1386 */
1387 QueueRlmtPortTrap(pAC, OID_SKGE_TRAP_RLMT_PORT_DOWN, PhysPortIndex);
1388 (void)SK_DRIVER_SENDEVENT(pAC, IoC);
1389
1390 /* Bugfix #10620 - get zero level for incremental difference */
1391 if (MacType == SK_MAC_XMAC) {
1392
1393 (void)pAC->GIni.GIFunc.pFnMacStatistic(pAC, IoC, PhysPortIndex,
1394 XM_RXE_SHT_ERR, &Val32);
1395
1396 pAC->Pnmi.Port[PhysPortIndex].RxShortZeroMark =
1397 (((SK_U64)pAC->Pnmi.Port[PhysPortIndex].
1398 CounterHigh[SK_PNMI_HRX_SHORTS] << 32) | (SK_U64)Val32);
1399 }
1400 break;
1401
1402 case SK_PNMI_EVT_RLMT_ACTIVE_DOWN:
1403 PhysPortIndex = (unsigned int)Param.Para32[0];
1404 NetIndex = (SK_U32)Param.Para32[1];
1405
1406#ifdef DEBUG
1407 if (PhysPortIndex >= SK_MAX_MACS) {
1408
1409 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
1410 ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_ACTIVE_DOWN parameter too high, PhysPort=%d\n",
1411 PhysPortIndex));
1412 }
1413
1414 if (NetIndex >= pAC->Rlmt.NumNets) {
1415
1416 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
1417 ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_ACTIVE_DOWN parameter too high, NetIndex=%d\n",
1418 NetIndex));
1419 }
1420#endif /* DEBUG */
1421
1422 /*
1423 * For now, ignore event if NetIndex != 0.
1424 */
1425 if (Param.Para32[1] != 0) {
1426
1427 return (0);
1428 }
1429
1430 /*
1431 * Nothing to do if port is already inactive
1432 */
1433 if (!pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) {
1434
1435 return (0);
1436 }
1437
1438 /*
1439 * Update statistic counters to calculate new offset for the virtual
1440 * port and increment semaphore to indicate that an update was already
1441 * done.
1442 */
1443 if (MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1) !=
1444 SK_PNMI_ERR_OK) {
1445
1446 SK_PNMI_CHECKFLAGS("SkPnmiEvent: On return");
1447 return (0);
1448 }
1449 pAC->Pnmi.MacUpdatedFlag ++;
1450
1451 /*
1452 * Calculate new counter offset for virtual port to grant continous
1453 * counting on port switches. The virtual port consists of all currently
1454 * active ports. The port down event indicates that a port is removed
1455 * from the virtual port. Therefore add the counter value of the removed
1456 * port to the CounterOffset for the virtual port to grant the same
1457 * counter value.
1458 */
1459 for (CounterIndex = 0; CounterIndex < SK_PNMI_MAX_IDX;
1460 CounterIndex ++) {
1461
1462 if (!StatAddr[CounterIndex][MacType].GetOffset) {
1463
1464 continue;
1465 }
1466
1467 Value = GetPhysStatVal(pAC, IoC, PhysPortIndex, CounterIndex);
1468
1469 pAC->Pnmi.VirtualCounterOffset[CounterIndex] += Value;
1470 }
1471
1472 /*
1473 * Set port to inactive
1474 */
1475 pAC->Pnmi.Port[PhysPortIndex].ActiveFlag = SK_FALSE;
1476
1477 pAC->Pnmi.MacUpdatedFlag --;
1478 break;
1479
1480 case SK_PNMI_EVT_RLMT_ACTIVE_UP:
1481 PhysPortIndex = (unsigned int)Param.Para32[0];
1482 NetIndex = (SK_U32)Param.Para32[1];
1483
1484#ifdef DEBUG
1485 if (PhysPortIndex >= SK_MAX_MACS) {
1486
1487 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
1488 ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_ACTIVE_UP parameter too high, PhysPort=%d\n",
1489 PhysPortIndex));
1490 }
1491
1492 if (NetIndex >= pAC->Rlmt.NumNets) {
1493
1494 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_CTRL,
1495 ("PNMI: ERR: SkPnmiEvent: SK_PNMI_EVT_RLMT_ACTIVE_UP parameter too high, NetIndex=%d\n",
1496 NetIndex));
1497 }
1498#endif /* DEBUG */
1499
1500 /*
1501 * For now, ignore event if NetIndex != 0.
1502 */
1503 if (Param.Para32[1] != 0) {
1504
1505 return (0);
1506 }
1507
1508 /*
1509 * Nothing to do if port is already active
1510 */
1511 if (pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) {
1512
1513 return (0);
1514 }
1515
1516 /*
1517 * Statistic maintenance
1518 */
1519 pAC->Pnmi.RlmtChangeCts ++;
1520 pAC->Pnmi.RlmtChangeTime = SK_PNMI_HUNDREDS_SEC(SkOsGetTime(pAC));
1521
1522 /*
1523 * Store a trap message in the trap buffer and generate an event for
1524 * user space applications with the SK_DRIVER_SENDEVENT macro.
1525 */
1526 QueueRlmtNewMacTrap(pAC, PhysPortIndex);
1527 (void)SK_DRIVER_SENDEVENT(pAC, IoC);
1528
1529 /*
1530 * Update statistic counters to calculate new offset for the virtual
1531 * port and increment semaphore to indicate that an update was
1532 * already done.
1533 */
1534 if (MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1) !=
1535 SK_PNMI_ERR_OK) {
1536
1537 SK_PNMI_CHECKFLAGS("SkPnmiEvent: On return");
1538 return (0);
1539 }
1540 pAC->Pnmi.MacUpdatedFlag ++;
1541
1542 /*
1543 * Calculate new counter offset for virtual port to grant continous
1544 * counting on port switches. A new port is added to the virtual port.
1545 * Therefore substract the counter value of the new port from the
1546 * CounterOffset for the virtual port to grant the same value.
1547 */
1548 for (CounterIndex = 0; CounterIndex < SK_PNMI_MAX_IDX;
1549 CounterIndex ++) {
1550
1551 if (!StatAddr[CounterIndex][MacType].GetOffset) {
1552
1553 continue;
1554 }
1555
1556 Value = GetPhysStatVal(pAC, IoC, PhysPortIndex, CounterIndex);
1557
1558 pAC->Pnmi.VirtualCounterOffset[CounterIndex] -= Value;
1559 }
1560
1561 /* Set port to active */
1562 pAC->Pnmi.Port[PhysPortIndex].ActiveFlag = SK_TRUE;
1563
1564 pAC->Pnmi.MacUpdatedFlag --;
1565 break;
1566
1567 case SK_PNMI_EVT_RLMT_SEGMENTATION:
1568 /*
1569 * Para.Para32[0] contains the NetIndex.
1570 */
1571
1572 /*
1573 * Store a trap message in the trap buffer and generate an event for
1574 * user space applications with the SK_DRIVER_SENDEVENT macro.
1575 */
1576 QueueSimpleTrap(pAC, OID_SKGE_TRAP_RLMT_SEGMENTATION);
1577 (void)SK_DRIVER_SENDEVENT(pAC, IoC);
1578 break;
1579
1580 case SK_PNMI_EVT_RLMT_SET_NETS:
1581 /*
1582 * Param.Para32[0] contains the number of Nets.
1583 * Param.Para32[1] is reserved, contains -1.
1584 */
1585 /*
1586 * Check number of nets
1587 */
1588 MaxNetNumber = pAC->GIni.GIMacsFound;
1589 if (((unsigned int)Param.Para32[0] < 1)
1590 || ((unsigned int)Param.Para32[0] > MaxNetNumber)) {
1591 return (SK_PNMI_ERR_UNKNOWN_NET);
1592 }
1593
1594 if ((unsigned int)Param.Para32[0] == 1) { /* single net mode */
1595 pAC->Pnmi.DualNetActiveFlag = SK_FALSE;
1596 }
1597 else { /* dual net mode */
1598 pAC->Pnmi.DualNetActiveFlag = SK_TRUE;
1599 }
1600 break;
1601
1602 case SK_PNMI_EVT_VCT_RESET:
1603 PhysPortIndex = Param.Para32[0];
1604 pPrt = &pAC->GIni.GP[PhysPortIndex];
1605 pVctBackupData = &pAC->Pnmi.VctBackup[PhysPortIndex];
1606
1607 if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_PENDING) {
1608 RetCode = SkGmCableDiagStatus(pAC, IoC, PhysPortIndex, SK_FALSE);
1609 if (RetCode == 2) {
1610 /*
1611 * VCT test is still running.
1612 * Start VCT timer counter again.
1613 */
1614 SK_MEMSET((char *) &Param, 0, sizeof(Param));
1615 Param.Para32[0] = PhysPortIndex;
1616 Param.Para32[1] = -1;
1617 SkTimerStart(pAC, IoC,
1618 &pAC->Pnmi.VctTimeout[PhysPortIndex].VctTimer,
1619 4000000, SKGE_PNMI, SK_PNMI_EVT_VCT_RESET, Param);
1620 break;
1621 }
1622 pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_PENDING;
1623 pAC->Pnmi.VctStatus[PhysPortIndex] |=
1624 (SK_PNMI_VCT_NEW_VCT_DATA | SK_PNMI_VCT_TEST_DONE);
1625
1626 /* Copy results for later use to PNMI struct. */
1627 for (i = 0; i < 4; i++) {
1628 if (pPrt->PMdiPairSts[i] == SK_PNMI_VCT_NORMAL_CABLE) {
1629 if ((pPrt->PMdiPairLen[i] > 35) &&
1630 (pPrt->PMdiPairLen[i] < 0xff)) {
1631 pPrt->PMdiPairSts[i] = SK_PNMI_VCT_IMPEDANCE_MISMATCH;
1632 }
1633 }
1634 if ((pPrt->PMdiPairLen[i] > 35) &&
1635 (pPrt->PMdiPairLen[i] != 0xff)) {
1636 CableLength = 1000 *
1637 (((175 * pPrt->PMdiPairLen[i]) / 210) - 28);
1638 }
1639 else {
1640 CableLength = 0;
1641 }
1642 pVctBackupData->PMdiPairLen[i] = CableLength;
1643 pVctBackupData->PMdiPairSts[i] = pPrt->PMdiPairSts[i];
1644 }
1645
1646 Param.Para32[0] = PhysPortIndex;
1647 Param.Para32[1] = -1;
1648 SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_RESET, Param);
1649 SkEventDispatcher(pAC, IoC);
1650 }
1651
1652 break;
1653
1654 default:
1655 break;
1656 }
1657
1658 SK_PNMI_CHECKFLAGS("SkPnmiEvent: On return");
1659 return (0);
1660}
1661
1662
1663/******************************************************************************
1664 *
1665 * Private functions
1666 *
1667 */
1668
1669/*****************************************************************************
1670 *
1671 * PnmiVar - Gets, presets, and sets single OIDs
1672 *
1673 * Description:
1674 * Looks up the requested OID, calls the corresponding handler
1675 * function, and passes the parameters with the get, preset, or
1676 * set command. The function is called by SkGePnmiGetVar,
1677 * SkGePnmiPreSetVar, or SkGePnmiSetVar.
1678 *
1679 * Returns:
1680 * SK_PNMI_ERR_XXX. For details have a look at the description of the
1681 * calling functions.
1682 * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist
1683 */
1684PNMI_STATIC int PnmiVar(
1685SK_AC *pAC, /* Pointer to adapter context */
1686SK_IOC IoC, /* IO context handle */
1687int Action, /* GET/PRESET/SET action */
1688SK_U32 Id, /* Object ID that is to be processed */
1689char *pBuf, /* Buffer used for the management data transfer */
1690unsigned int *pLen, /* Total length of pBuf management data */
1691SK_U32 Instance, /* Instance (1..n) that is to be set or -1 */
1692SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
1693{
1694 unsigned int TableIndex;
1695 int Ret;
1696
1697
1698 if ((TableIndex = LookupId(Id)) == (unsigned int)(-1)) {
1699
1700 *pLen = 0;
1701 return (SK_PNMI_ERR_UNKNOWN_OID);
1702 }
1703
1704 /* Check NetIndex */
1705 if (NetIndex >= pAC->Rlmt.NumNets) {
1706 return (SK_PNMI_ERR_UNKNOWN_NET);
1707 }
1708
1709 SK_PNMI_CHECKFLAGS("PnmiVar: On call");
1710
1711 Ret = IdTable[TableIndex].Func(pAC, IoC, Action, Id, pBuf, pLen,
1712 Instance, TableIndex, NetIndex);
1713
1714 SK_PNMI_CHECKFLAGS("PnmiVar: On return");
1715
1716 return (Ret);
1717}
1718
1719/*****************************************************************************
1720 *
1721 * PnmiStruct - Presets and Sets data in structure SK_PNMI_STRUCT_DATA
1722 *
1723 * Description:
1724 * The return value of the function will also be stored in
1725 * SK_PNMI_STRUCT_DATA if the passed buffer has the minimum size of
1726 * SK_PNMI_MIN_STRUCT_SIZE. The sub-function runs through the IdTable,
1727 * checks which OIDs are able to set, and calls the handler function of
1728 * the OID to perform the set. The return value of the function will
1729 * also be stored in SK_PNMI_STRUCT_DATA if the passed buffer has the
1730 * minimum size of SK_PNMI_MIN_STRUCT_SIZE. The function is called
1731 * by SkGePnmiPreSetStruct and SkGePnmiSetStruct.
1732 *
1733 * Returns:
1734 * SK_PNMI_ERR_XXX. The codes are described in the calling functions.
1735 * SK_PNMI_ERR_UNKNOWN_NET The requested NetIndex doesn't exist
1736 */
1737PNMI_STATIC int PnmiStruct(
1738SK_AC *pAC, /* Pointer to adapter context */
1739SK_IOC IoC, /* IO context handle */
1740int Action, /* PRESET/SET action to be performed */
1741char *pBuf, /* Buffer used for the management data transfer */
1742unsigned int *pLen, /* Length of pBuf management data buffer */
1743SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
1744{
1745 int Ret;
1746 unsigned int TableIndex;
1747 unsigned int DstOffset;
1748 unsigned int Len;
1749 unsigned int InstanceNo;
1750 unsigned int InstanceCnt;
1751 SK_U32 Instance;
1752 SK_U32 Id;
1753
1754
1755 /* Check if the passed buffer has the right size */
1756 if (*pLen < SK_PNMI_STRUCT_SIZE) {
1757
1758 /* Check if we can return the error within the buffer */
1759 if (*pLen >= SK_PNMI_MIN_STRUCT_SIZE) {
1760
1761 SK_PNMI_SET_STAT(pBuf, SK_PNMI_ERR_TOO_SHORT,
1762 (SK_U32)(-1));
1763 }
1764
1765 *pLen = SK_PNMI_STRUCT_SIZE;
1766 return (SK_PNMI_ERR_TOO_SHORT);
1767 }
1768
1769 /* Check NetIndex */
1770 if (NetIndex >= pAC->Rlmt.NumNets) {
1771 return (SK_PNMI_ERR_UNKNOWN_NET);
1772 }
1773
1774 SK_PNMI_CHECKFLAGS("PnmiStruct: On call");
1775
1776 /*
1777 * Update the values of RLMT and SIRQ and increment semaphores to
1778 * indicate that an update was already done.
1779 */
1780 if ((Ret = RlmtUpdate(pAC, IoC, NetIndex)) != SK_PNMI_ERR_OK) {
1781
1782 SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1));
1783 *pLen = SK_PNMI_MIN_STRUCT_SIZE;
1784 return (Ret);
1785 }
1786
1787 if ((Ret = SirqUpdate(pAC, IoC)) != SK_PNMI_ERR_OK) {
1788
1789 SK_PNMI_SET_STAT(pBuf, Ret, (SK_U32)(-1));
1790 *pLen = SK_PNMI_MIN_STRUCT_SIZE;
1791 return (Ret);
1792 }
1793
1794 pAC->Pnmi.RlmtUpdatedFlag ++;
1795 pAC->Pnmi.SirqUpdatedFlag ++;
1796
1797 /* Preset/Set values */
1798 for (TableIndex = 0; TableIndex < ID_TABLE_SIZE; TableIndex ++) {
1799
1800 if ((IdTable[TableIndex].Access != SK_PNMI_RW) &&
1801 (IdTable[TableIndex].Access != SK_PNMI_WO)) {
1802
1803 continue;
1804 }
1805
1806 InstanceNo = IdTable[TableIndex].InstanceNo;
1807 Id = IdTable[TableIndex].Id;
1808
1809 for (InstanceCnt = 1; InstanceCnt <= InstanceNo;
1810 InstanceCnt ++) {
1811
1812 DstOffset = IdTable[TableIndex].Offset +
1813 (InstanceCnt - 1) *
1814 IdTable[TableIndex].StructSize;
1815
1816 /*
1817 * Because VPD multiple instance variables are
1818 * not setable we do not need to evaluate VPD
1819 * instances. Have a look to VPD instance
1820 * calculation in SkPnmiGetStruct().
1821 */
1822 Instance = (SK_U32)InstanceCnt;
1823
1824 /*
1825 * Evaluate needed buffer length
1826 */
1827 Len = 0;
1828 Ret = IdTable[TableIndex].Func(pAC, IoC,
1829 SK_PNMI_GET, IdTable[TableIndex].Id,
1830 NULL, &Len, Instance, TableIndex, NetIndex);
1831
1832 if (Ret == SK_PNMI_ERR_UNKNOWN_INST) {
1833
1834 break;
1835 }
1836 if (Ret != SK_PNMI_ERR_TOO_SHORT) {
1837
1838 pAC->Pnmi.RlmtUpdatedFlag --;
1839 pAC->Pnmi.SirqUpdatedFlag --;
1840
1841 SK_PNMI_CHECKFLAGS("PnmiStruct: On return");
1842 SK_PNMI_SET_STAT(pBuf,
1843 SK_PNMI_ERR_GENERAL, DstOffset);
1844 *pLen = SK_PNMI_MIN_STRUCT_SIZE;
1845 return (SK_PNMI_ERR_GENERAL);
1846 }
1847 if (Id == OID_SKGE_VPD_ACTION) {
1848
1849 switch (*(pBuf + DstOffset)) {
1850
1851 case SK_PNMI_VPD_CREATE:
1852 Len = 3 + *(pBuf + DstOffset + 3);
1853 break;
1854
1855 case SK_PNMI_VPD_DELETE:
1856 Len = 3;
1857 break;
1858
1859 default:
1860 Len = 1;
1861 break;
1862 }
1863 }
1864
1865 /* Call the OID handler function */
1866 Ret = IdTable[TableIndex].Func(pAC, IoC, Action,
1867 IdTable[TableIndex].Id, pBuf + DstOffset,
1868 &Len, Instance, TableIndex, NetIndex);
1869
1870 if (Ret != SK_PNMI_ERR_OK) {
1871
1872 pAC->Pnmi.RlmtUpdatedFlag --;
1873 pAC->Pnmi.SirqUpdatedFlag --;
1874
1875 SK_PNMI_CHECKFLAGS("PnmiStruct: On return");
1876 SK_PNMI_SET_STAT(pBuf, SK_PNMI_ERR_BAD_VALUE,
1877 DstOffset);
1878 *pLen = SK_PNMI_MIN_STRUCT_SIZE;
1879 return (SK_PNMI_ERR_BAD_VALUE);
1880 }
1881 }
1882 }
1883
1884 pAC->Pnmi.RlmtUpdatedFlag --;
1885 pAC->Pnmi.SirqUpdatedFlag --;
1886
1887 SK_PNMI_CHECKFLAGS("PnmiStruct: On return");
1888 SK_PNMI_SET_STAT(pBuf, SK_PNMI_ERR_OK, (SK_U32)(-1));
1889 return (SK_PNMI_ERR_OK);
1890}
1891
1892/*****************************************************************************
1893 *
1894 * LookupId - Lookup an OID in the IdTable
1895 *
1896 * Description:
1897 * Scans the IdTable to find the table entry of an OID.
1898 *
1899 * Returns:
1900 * The table index or -1 if not found.
1901 */
1902PNMI_STATIC int LookupId(
1903SK_U32 Id) /* Object identifier to be searched */
1904{
1905 int i;
1906
1907 for (i = 0; i < ID_TABLE_SIZE; i++) {
1908
1909 if (IdTable[i].Id == Id) {
1910
1911 return i;
1912 }
1913 }
1914
1915 return (-1);
1916}
1917
1918/*****************************************************************************
1919 *
1920 * OidStruct - Handler of OID_SKGE_ALL_DATA
1921 *
1922 * Description:
1923 * This OID performs a Get/Preset/SetStruct call and returns all data
1924 * in a SK_PNMI_STRUCT_DATA structure.
1925 *
1926 * Returns:
1927 * SK_PNMI_ERR_OK The request was successfully performed.
1928 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
1929 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
1930 * the correct data (e.g. a 32bit value is
1931 * needed, but a 16 bit value was passed).
1932 * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
1933 * value range.
1934 * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
1935 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
1936 * exist (e.g. port instance 3 on a two port
1937 * adapter.
1938 */
1939PNMI_STATIC int OidStruct(
1940SK_AC *pAC, /* Pointer to adapter context */
1941SK_IOC IoC, /* IO context handle */
1942int Action, /* GET/PRESET/SET action */
1943SK_U32 Id, /* Object ID that is to be processed */
1944char *pBuf, /* Buffer used for the management data transfer */
1945unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
1946SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
1947unsigned int TableIndex, /* Index to the Id table */
1948SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
1949{
1950 if (Id != OID_SKGE_ALL_DATA) {
1951
1952 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR003,
1953 SK_PNMI_ERR003MSG);
1954
1955 *pLen = 0;
1956 return (SK_PNMI_ERR_GENERAL);
1957 }
1958
1959 /*
1960 * Check instance. We only handle single instance variables
1961 */
1962 if (Instance != (SK_U32)(-1) && Instance != 1) {
1963
1964 *pLen = 0;
1965 return (SK_PNMI_ERR_UNKNOWN_INST);
1966 }
1967
1968 switch (Action) {
1969
1970 case SK_PNMI_GET:
1971 return (SkPnmiGetStruct(pAC, IoC, pBuf, pLen, NetIndex));
1972
1973 case SK_PNMI_PRESET:
1974 return (SkPnmiPreSetStruct(pAC, IoC, pBuf, pLen, NetIndex));
1975
1976 case SK_PNMI_SET:
1977 return (SkPnmiSetStruct(pAC, IoC, pBuf, pLen, NetIndex));
1978 }
1979
1980 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR004, SK_PNMI_ERR004MSG);
1981
1982 *pLen = 0;
1983 return (SK_PNMI_ERR_GENERAL);
1984}
1985
1986/*****************************************************************************
1987 *
1988 * Perform - OID handler of OID_SKGE_ACTION
1989 *
1990 * Description:
1991 * None.
1992 *
1993 * Returns:
1994 * SK_PNMI_ERR_OK The request was successfully performed.
1995 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
1996 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
1997 * the correct data (e.g. a 32bit value is
1998 * needed, but a 16 bit value was passed).
1999 * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
2000 * value range.
2001 * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
2002 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
2003 * exist (e.g. port instance 3 on a two port
2004 * adapter.
2005 */
2006PNMI_STATIC int Perform(
2007SK_AC *pAC, /* Pointer to adapter context */
2008SK_IOC IoC, /* IO context handle */
2009int Action, /* GET/PRESET/SET action */
2010SK_U32 Id, /* Object ID that is to be processed */
2011char *pBuf, /* Buffer used for the management data transfer */
2012unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
2013SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
2014unsigned int TableIndex, /* Index to the Id table */
2015SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
2016{
2017 int Ret;
2018 SK_U32 ActionOp;
2019
2020
2021 /*
2022 * Check instance. We only handle single instance variables
2023 */
2024 if (Instance != (SK_U32)(-1) && Instance != 1) {
2025
2026 *pLen = 0;
2027 return (SK_PNMI_ERR_UNKNOWN_INST);
2028 }
2029
2030 if (*pLen < sizeof(SK_U32)) {
2031
2032 *pLen = sizeof(SK_U32);
2033 return (SK_PNMI_ERR_TOO_SHORT);
2034 }
2035
2036 /* Check if a get should be performed */
2037 if (Action == SK_PNMI_GET) {
2038
2039 /* A get is easy. We always return the same value */
2040 ActionOp = (SK_U32)SK_PNMI_ACT_IDLE;
2041 SK_PNMI_STORE_U32(pBuf, ActionOp);
2042 *pLen = sizeof(SK_U32);
2043
2044 return (SK_PNMI_ERR_OK);
2045 }
2046
2047 /* Continue with PRESET/SET action */
2048 if (*pLen > sizeof(SK_U32)) {
2049
2050 return (SK_PNMI_ERR_BAD_VALUE);
2051 }
2052
2053 /* Check if the command is a known one */
2054 SK_PNMI_READ_U32(pBuf, ActionOp);
2055 if (*pLen > sizeof(SK_U32) ||
2056 (ActionOp != SK_PNMI_ACT_IDLE &&
2057 ActionOp != SK_PNMI_ACT_RESET &&
2058 ActionOp != SK_PNMI_ACT_SELFTEST &&
2059 ActionOp != SK_PNMI_ACT_RESETCNT)) {
2060
2061 *pLen = 0;
2062 return (SK_PNMI_ERR_BAD_VALUE);
2063 }
2064
2065 /* A preset ends here */
2066 if (Action == SK_PNMI_PRESET) {
2067
2068 return (SK_PNMI_ERR_OK);
2069 }
2070
2071 switch (ActionOp) {
2072
2073 case SK_PNMI_ACT_IDLE:
2074 /* Nothing to do */
2075 break;
2076
2077 case SK_PNMI_ACT_RESET:
2078 /*
2079 * Perform a driver reset or something that comes near
2080 * to this.
2081 */
2082 Ret = SK_DRIVER_RESET(pAC, IoC);
2083 if (Ret != 0) {
2084
2085 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR005,
2086 SK_PNMI_ERR005MSG);
2087
2088 return (SK_PNMI_ERR_GENERAL);
2089 }
2090 break;
2091
2092 case SK_PNMI_ACT_SELFTEST:
2093 /*
2094 * Perform a driver selftest or something similar to this.
2095 * Currently this feature is not used and will probably
2096 * implemented in another way.
2097 */
2098 Ret = SK_DRIVER_SELFTEST(pAC, IoC);
2099 pAC->Pnmi.TestResult = Ret;
2100 break;
2101
2102 case SK_PNMI_ACT_RESETCNT:
2103 /* Set all counters and timestamps to zero */
2104 ResetCounter(pAC, IoC, NetIndex);
2105 break;
2106
2107 default:
2108 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR006,
2109 SK_PNMI_ERR006MSG);
2110
2111 return (SK_PNMI_ERR_GENERAL);
2112 }
2113
2114 return (SK_PNMI_ERR_OK);
2115}
2116
2117/*****************************************************************************
2118 *
2119 * Mac8023Stat - OID handler of OID_GEN_XXX and OID_802_3_XXX
2120 *
2121 * Description:
2122 * Retrieves the statistic values of the virtual port (logical
2123 * index 0). Only special OIDs of NDIS are handled which consist
2124 * of a 32 bit instead of a 64 bit value. The OIDs are public
2125 * because perhaps some other platform can use them too.
2126 *
2127 * Returns:
2128 * SK_PNMI_ERR_OK The request was successfully performed.
2129 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
2130 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
2131 * the correct data (e.g. a 32bit value is
2132 * needed, but a 16 bit value was passed).
2133 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
2134 * exist (e.g. port instance 3 on a two port
2135 * adapter.
2136 */
2137PNMI_STATIC int Mac8023Stat(
2138SK_AC *pAC, /* Pointer to adapter context */
2139SK_IOC IoC, /* IO context handle */
2140int Action, /* GET/PRESET/SET action */
2141SK_U32 Id, /* Object ID that is to be processed */
2142char *pBuf, /* Buffer used for the management data transfer */
2143unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
2144SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
2145unsigned int TableIndex, /* Index to the Id table */
2146SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
2147{
2148 int Ret;
2149 SK_U64 StatVal;
2150 SK_U32 StatVal32;
2151 SK_BOOL Is64BitReq = SK_FALSE;
2152
2153 /*
2154 * Only the active Mac is returned
2155 */
2156 if (Instance != (SK_U32)(-1) && Instance != 1) {
2157
2158 *pLen = 0;
2159 return (SK_PNMI_ERR_UNKNOWN_INST);
2160 }
2161
2162 /*
2163 * Check action type
2164 */
2165 if (Action != SK_PNMI_GET) {
2166
2167 *pLen = 0;
2168 return (SK_PNMI_ERR_READ_ONLY);
2169 }
2170
2171 /* Check length */
2172 switch (Id) {
2173
2174 case OID_802_3_PERMANENT_ADDRESS:
2175 case OID_802_3_CURRENT_ADDRESS:
2176 if (*pLen < sizeof(SK_MAC_ADDR)) {
2177
2178 *pLen = sizeof(SK_MAC_ADDR);
2179 return (SK_PNMI_ERR_TOO_SHORT);
2180 }
2181 break;
2182
2183 default:
2184#ifndef SK_NDIS_64BIT_CTR
2185 if (*pLen < sizeof(SK_U32)) {
2186 *pLen = sizeof(SK_U32);
2187 return (SK_PNMI_ERR_TOO_SHORT);
2188 }
2189
2190#else /* SK_NDIS_64BIT_CTR */
2191
2192 /* for compatibility, at least 32bit are required for OID */
2193 if (*pLen < sizeof(SK_U32)) {
2194 /*
2195 * but indicate handling for 64bit values,
2196 * if insufficient space is provided
2197 */
2198 *pLen = sizeof(SK_U64);
2199 return (SK_PNMI_ERR_TOO_SHORT);
2200 }
2201
2202 Is64BitReq = (*pLen < sizeof(SK_U64)) ? SK_FALSE : SK_TRUE;
2203#endif /* SK_NDIS_64BIT_CTR */
2204 break;
2205 }
2206
2207 /*
2208 * Update all statistics, because we retrieve virtual MAC, which
2209 * consists of multiple physical statistics and increment semaphore
2210 * to indicate that an update was already done.
2211 */
2212 Ret = MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1);
2213 if ( Ret != SK_PNMI_ERR_OK) {
2214
2215 *pLen = 0;
2216 return (Ret);
2217 }
2218 pAC->Pnmi.MacUpdatedFlag ++;
2219
2220 /*
2221 * Get value (MAC Index 0 identifies the virtual MAC)
2222 */
2223 switch (Id) {
2224
2225 case OID_802_3_PERMANENT_ADDRESS:
2226 CopyMac(pBuf, &pAC->Addr.Net[NetIndex].PermanentMacAddress);
2227 *pLen = sizeof(SK_MAC_ADDR);
2228 break;
2229
2230 case OID_802_3_CURRENT_ADDRESS:
2231 CopyMac(pBuf, &pAC->Addr.Net[NetIndex].CurrentMacAddress);
2232 *pLen = sizeof(SK_MAC_ADDR);
2233 break;
2234
2235 default:
2236 StatVal = GetStatVal(pAC, IoC, 0, IdTable[TableIndex].Param, NetIndex);
2237
2238 /* by default 32bit values are evaluated */
2239 if (!Is64BitReq) {
2240 StatVal32 = (SK_U32)StatVal;
2241 SK_PNMI_STORE_U32(pBuf, StatVal32);
2242 *pLen = sizeof(SK_U32);
2243 }
2244 else {
2245 SK_PNMI_STORE_U64(pBuf, StatVal);
2246 *pLen = sizeof(SK_U64);
2247 }
2248 break;
2249 }
2250
2251 pAC->Pnmi.MacUpdatedFlag --;
2252
2253 return (SK_PNMI_ERR_OK);
2254}
2255
2256/*****************************************************************************
2257 *
2258 * MacPrivateStat - OID handler function of OID_SKGE_STAT_XXX
2259 *
2260 * Description:
2261 * Retrieves the MAC statistic data.
2262 *
2263 * Returns:
2264 * SK_PNMI_ERR_OK The request was successfully performed.
2265 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
2266 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
2267 * the correct data (e.g. a 32bit value is
2268 * needed, but a 16 bit value was passed).
2269 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
2270 * exist (e.g. port instance 3 on a two port
2271 * adapter.
2272 */
2273PNMI_STATIC int MacPrivateStat(
2274SK_AC *pAC, /* Pointer to adapter context */
2275SK_IOC IoC, /* IO context handle */
2276int Action, /* GET/PRESET/SET action */
2277SK_U32 Id, /* Object ID that is to be processed */
2278char *pBuf, /* Buffer used for the management data transfer */
2279unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
2280SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
2281unsigned int TableIndex, /* Index to the Id table */
2282SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
2283{
2284 unsigned int LogPortMax;
2285 unsigned int LogPortIndex;
2286 unsigned int PhysPortMax;
2287 unsigned int Limit;
2288 unsigned int Offset;
2289 int MacType;
2290 int Ret;
2291 SK_U64 StatVal;
2292
2293
2294
2295 /* Calculate instance if wished. MAC index 0 is the virtual MAC */
2296 PhysPortMax = pAC->GIni.GIMacsFound;
2297 LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax);
2298
2299 MacType = pAC->GIni.GIMacType;
2300
2301 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */
2302 LogPortMax--;
2303 }
2304
2305 if ((Instance != (SK_U32)(-1))) { /* Only one specific instance is queried */
2306 /* Check instance range */
2307 if ((Instance < 1) || (Instance > LogPortMax)) {
2308
2309 *pLen = 0;
2310 return (SK_PNMI_ERR_UNKNOWN_INST);
2311 }
2312 LogPortIndex = SK_PNMI_PORT_INST2LOG(Instance);
2313 Limit = LogPortIndex + 1;
2314 }
2315
2316 else { /* Instance == (SK_U32)(-1), get all Instances of that OID */
2317
2318 LogPortIndex = 0;
2319 Limit = LogPortMax;
2320 }
2321
2322 /* Check action */
2323 if (Action != SK_PNMI_GET) {
2324
2325 *pLen = 0;
2326 return (SK_PNMI_ERR_READ_ONLY);
2327 }
2328
2329 /* Check length */
2330 if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U64)) {
2331
2332 *pLen = (Limit - LogPortIndex) * sizeof(SK_U64);
2333 return (SK_PNMI_ERR_TOO_SHORT);
2334 }
2335
2336 /*
2337 * Update MAC statistic and increment semaphore to indicate that
2338 * an update was already done.
2339 */
2340 Ret = MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1);
2341 if (Ret != SK_PNMI_ERR_OK) {
2342
2343 *pLen = 0;
2344 return (Ret);
2345 }
2346 pAC->Pnmi.MacUpdatedFlag ++;
2347
2348 /* Get value */
2349 Offset = 0;
2350 for (; LogPortIndex < Limit; LogPortIndex ++) {
2351
2352 switch (Id) {
2353
2354/* XXX not yet implemented due to XMAC problems
2355 case OID_SKGE_STAT_TX_UTIL:
2356 return (SK_PNMI_ERR_GENERAL);
2357*/
2358/* XXX not yet implemented due to XMAC problems
2359 case OID_SKGE_STAT_RX_UTIL:
2360 return (SK_PNMI_ERR_GENERAL);
2361*/
2362 case OID_SKGE_STAT_RX:
2363 if (MacType == SK_MAC_GMAC) {
2364 StatVal =
2365 GetStatVal(pAC, IoC, LogPortIndex,
2366 SK_PNMI_HRX_BROADCAST, NetIndex) +
2367 GetStatVal(pAC, IoC, LogPortIndex,
2368 SK_PNMI_HRX_MULTICAST, NetIndex) +
2369 GetStatVal(pAC, IoC, LogPortIndex,
2370 SK_PNMI_HRX_UNICAST, NetIndex) +
2371 GetStatVal(pAC, IoC, LogPortIndex,
2372 SK_PNMI_HRX_UNDERSIZE, NetIndex);
2373 }
2374 else {
2375 StatVal = GetStatVal(pAC, IoC, LogPortIndex,
2376 IdTable[TableIndex].Param, NetIndex);
2377 }
2378 break;
2379
2380 case OID_SKGE_STAT_TX:
2381 if (MacType == SK_MAC_GMAC) {
2382 StatVal =
2383 GetStatVal(pAC, IoC, LogPortIndex,
2384 SK_PNMI_HTX_BROADCAST, NetIndex) +
2385 GetStatVal(pAC, IoC, LogPortIndex,
2386 SK_PNMI_HTX_MULTICAST, NetIndex) +
2387 GetStatVal(pAC, IoC, LogPortIndex,
2388 SK_PNMI_HTX_UNICAST, NetIndex);
2389 }
2390 else {
2391 StatVal = GetStatVal(pAC, IoC, LogPortIndex,
2392 IdTable[TableIndex].Param, NetIndex);
2393 }
2394 break;
2395
2396 default:
2397 StatVal = GetStatVal(pAC, IoC, LogPortIndex,
2398 IdTable[TableIndex].Param, NetIndex);
2399 }
2400 SK_PNMI_STORE_U64(pBuf + Offset, StatVal);
2401
2402 Offset += sizeof(SK_U64);
2403 }
2404 *pLen = Offset;
2405
2406 pAC->Pnmi.MacUpdatedFlag --;
2407
2408 return (SK_PNMI_ERR_OK);
2409}
2410
2411/*****************************************************************************
2412 *
2413 * Addr - OID handler function of OID_SKGE_PHYS_CUR_ADDR and _FAC_ADDR
2414 *
2415 * Description:
2416 * Get/Presets/Sets the current and factory MAC address. The MAC
2417 * address of the virtual port, which is reported to the OS, may
2418 * not be changed, but the physical ones. A set to the virtual port
2419 * will be ignored. No error should be reported because otherwise
2420 * a multiple instance set (-1) would always fail.
2421 *
2422 * Returns:
2423 * SK_PNMI_ERR_OK The request was successfully performed.
2424 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
2425 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
2426 * the correct data (e.g. a 32bit value is
2427 * needed, but a 16 bit value was passed).
2428 * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
2429 * value range.
2430 * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
2431 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
2432 * exist (e.g. port instance 3 on a two port
2433 * adapter.
2434 */
2435PNMI_STATIC int Addr(
2436SK_AC *pAC, /* Pointer to adapter context */
2437SK_IOC IoC, /* IO context handle */
2438int Action, /* GET/PRESET/SET action */
2439SK_U32 Id, /* Object ID that is to be processed */
2440char *pBuf, /* Buffer used for the management data transfer */
2441unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
2442SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
2443unsigned int TableIndex, /* Index to the Id table */
2444SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
2445{
2446 int Ret;
2447 unsigned int LogPortMax;
2448 unsigned int PhysPortMax;
2449 unsigned int LogPortIndex;
2450 unsigned int PhysPortIndex;
2451 unsigned int Limit;
2452 unsigned int Offset = 0;
2453
2454 /*
2455 * Calculate instance if wished. MAC index 0 is the virtual
2456 * MAC.
2457 */
2458 PhysPortMax = pAC->GIni.GIMacsFound;
2459 LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax);
2460
2461 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */
2462 LogPortMax--;
2463 }
2464
2465 if ((Instance != (SK_U32)(-1))) { /* Only one specific instance is queried */
2466 /* Check instance range */
2467 if ((Instance < 1) || (Instance > LogPortMax)) {
2468
2469 *pLen = 0;
2470 return (SK_PNMI_ERR_UNKNOWN_INST);
2471 }
2472 LogPortIndex = SK_PNMI_PORT_INST2LOG(Instance);
2473 Limit = LogPortIndex + 1;
2474 }
2475 else { /* Instance == (SK_U32)(-1), get all Instances of that OID */
2476
2477 LogPortIndex = 0;
2478 Limit = LogPortMax;
2479 }
2480
2481 /*
2482 * Perform Action
2483 */
2484 if (Action == SK_PNMI_GET) {
2485
2486 /* Check length */
2487 if (*pLen < (Limit - LogPortIndex) * 6) {
2488
2489 *pLen = (Limit - LogPortIndex) * 6;
2490 return (SK_PNMI_ERR_TOO_SHORT);
2491 }
2492
2493 /*
2494 * Get value
2495 */
2496 for (; LogPortIndex < Limit; LogPortIndex ++) {
2497
2498 switch (Id) {
2499
2500 case OID_SKGE_PHYS_CUR_ADDR:
2501 if (LogPortIndex == 0) {
2502 CopyMac(pBuf + Offset, &pAC->Addr.Net[NetIndex].CurrentMacAddress);
2503 }
2504 else {
2505 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex);
2506
2507 CopyMac(pBuf + Offset,
2508 &pAC->Addr.Port[PhysPortIndex].CurrentMacAddress);
2509 }
2510 Offset += 6;
2511 break;
2512
2513 case OID_SKGE_PHYS_FAC_ADDR:
2514 if (LogPortIndex == 0) {
2515 CopyMac(pBuf + Offset,
2516 &pAC->Addr.Net[NetIndex].PermanentMacAddress);
2517 }
2518 else {
2519 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
2520 pAC, LogPortIndex);
2521
2522 CopyMac(pBuf + Offset,
2523 &pAC->Addr.Port[PhysPortIndex].PermanentMacAddress);
2524 }
2525 Offset += 6;
2526 break;
2527
2528 default:
2529 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR008,
2530 SK_PNMI_ERR008MSG);
2531
2532 *pLen = 0;
2533 return (SK_PNMI_ERR_GENERAL);
2534 }
2535 }
2536
2537 *pLen = Offset;
2538 }
2539 else {
2540 /*
2541 * The logical MAC address may not be changed only
2542 * the physical ones
2543 */
2544 if (Id == OID_SKGE_PHYS_FAC_ADDR) {
2545
2546 *pLen = 0;
2547 return (SK_PNMI_ERR_READ_ONLY);
2548 }
2549
2550 /*
2551 * Only the current address may be changed
2552 */
2553 if (Id != OID_SKGE_PHYS_CUR_ADDR) {
2554
2555 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR009,
2556 SK_PNMI_ERR009MSG);
2557
2558 *pLen = 0;
2559 return (SK_PNMI_ERR_GENERAL);
2560 }
2561
2562 /* Check length */
2563 if (*pLen < (Limit - LogPortIndex) * 6) {
2564
2565 *pLen = (Limit - LogPortIndex) * 6;
2566 return (SK_PNMI_ERR_TOO_SHORT);
2567 }
2568 if (*pLen > (Limit - LogPortIndex) * 6) {
2569
2570 *pLen = 0;
2571 return (SK_PNMI_ERR_BAD_VALUE);
2572 }
2573
2574 /*
2575 * Check Action
2576 */
2577 if (Action == SK_PNMI_PRESET) {
2578
2579 *pLen = 0;
2580 return (SK_PNMI_ERR_OK);
2581 }
2582
2583 /*
2584 * Set OID_SKGE_MAC_CUR_ADDR
2585 */
2586 for (; LogPortIndex < Limit; LogPortIndex ++, Offset += 6) {
2587
2588 /*
2589 * A set to virtual port and set of broadcast
2590 * address will be ignored
2591 */
2592 if (LogPortIndex == 0 || SK_MEMCMP(pBuf + Offset,
2593 "\xff\xff\xff\xff\xff\xff", 6) == 0) {
2594
2595 continue;
2596 }
2597
2598 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC,
2599 LogPortIndex);
2600
2601 Ret = SkAddrOverride(pAC, IoC, PhysPortIndex,
2602 (SK_MAC_ADDR *)(pBuf + Offset),
2603 (LogPortIndex == 0 ? SK_ADDR_VIRTUAL_ADDRESS :
2604 SK_ADDR_PHYSICAL_ADDRESS));
2605 if (Ret != SK_ADDR_OVERRIDE_SUCCESS) {
2606
2607 return (SK_PNMI_ERR_GENERAL);
2608 }
2609 }
2610 *pLen = Offset;
2611 }
2612
2613 return (SK_PNMI_ERR_OK);
2614}
2615
2616/*****************************************************************************
2617 *
2618 * CsumStat - OID handler function of OID_SKGE_CHKSM_XXX
2619 *
2620 * Description:
2621 * Retrieves the statistic values of the CSUM module. The CSUM data
2622 * structure must be available in the SK_AC even if the CSUM module
2623 * is not included, because PNMI reads the statistic data from the
2624 * CSUM part of SK_AC directly.
2625 *
2626 * Returns:
2627 * SK_PNMI_ERR_OK The request was successfully performed.
2628 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
2629 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
2630 * the correct data (e.g. a 32bit value is
2631 * needed, but a 16 bit value was passed).
2632 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
2633 * exist (e.g. port instance 3 on a two port
2634 * adapter.
2635 */
2636PNMI_STATIC int CsumStat(
2637SK_AC *pAC, /* Pointer to adapter context */
2638SK_IOC IoC, /* IO context handle */
2639int Action, /* GET/PRESET/SET action */
2640SK_U32 Id, /* Object ID that is to be processed */
2641char *pBuf, /* Buffer used for the management data transfer */
2642unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
2643SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
2644unsigned int TableIndex, /* Index to the Id table */
2645SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
2646{
2647 unsigned int Index;
2648 unsigned int Limit;
2649 unsigned int Offset = 0;
2650 SK_U64 StatVal;
2651
2652
2653 /*
2654 * Calculate instance if wished
2655 */
2656 if (Instance != (SK_U32)(-1)) {
2657
2658 if ((Instance < 1) || (Instance > SKCS_NUM_PROTOCOLS)) {
2659
2660 *pLen = 0;
2661 return (SK_PNMI_ERR_UNKNOWN_INST);
2662 }
2663 Index = (unsigned int)Instance - 1;
2664 Limit = Index + 1;
2665 }
2666 else {
2667 Index = 0;
2668 Limit = SKCS_NUM_PROTOCOLS;
2669 }
2670
2671 /*
2672 * Check action
2673 */
2674 if (Action != SK_PNMI_GET) {
2675
2676 *pLen = 0;
2677 return (SK_PNMI_ERR_READ_ONLY);
2678 }
2679
2680 /* Check length */
2681 if (*pLen < (Limit - Index) * sizeof(SK_U64)) {
2682
2683 *pLen = (Limit - Index) * sizeof(SK_U64);
2684 return (SK_PNMI_ERR_TOO_SHORT);
2685 }
2686
2687 /*
2688 * Get value
2689 */
2690 for (; Index < Limit; Index ++) {
2691
2692 switch (Id) {
2693
2694 case OID_SKGE_CHKSM_RX_OK_CTS:
2695 StatVal = pAC->Csum.ProtoStats[NetIndex][Index].RxOkCts;
2696 break;
2697
2698 case OID_SKGE_CHKSM_RX_UNABLE_CTS:
2699 StatVal = pAC->Csum.ProtoStats[NetIndex][Index].RxUnableCts;
2700 break;
2701
2702 case OID_SKGE_CHKSM_RX_ERR_CTS:
2703 StatVal = pAC->Csum.ProtoStats[NetIndex][Index].RxErrCts;
2704 break;
2705
2706 case OID_SKGE_CHKSM_TX_OK_CTS:
2707 StatVal = pAC->Csum.ProtoStats[NetIndex][Index].TxOkCts;
2708 break;
2709
2710 case OID_SKGE_CHKSM_TX_UNABLE_CTS:
2711 StatVal = pAC->Csum.ProtoStats[NetIndex][Index].TxUnableCts;
2712 break;
2713
2714 default:
2715 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR010,
2716 SK_PNMI_ERR010MSG);
2717
2718 *pLen = 0;
2719 return (SK_PNMI_ERR_GENERAL);
2720 }
2721
2722 SK_PNMI_STORE_U64(pBuf + Offset, StatVal);
2723 Offset += sizeof(SK_U64);
2724 }
2725
2726 /*
2727 * Store used buffer space
2728 */
2729 *pLen = Offset;
2730
2731 return (SK_PNMI_ERR_OK);
2732}
2733
2734/*****************************************************************************
2735 *
2736 * SensorStat - OID handler function of OID_SKGE_SENSOR_XXX
2737 *
2738 * Description:
2739 * Retrieves the statistic values of the I2C module, which handles
2740 * the temperature and voltage sensors.
2741 *
2742 * Returns:
2743 * SK_PNMI_ERR_OK The request was successfully performed.
2744 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
2745 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
2746 * the correct data (e.g. a 32bit value is
2747 * needed, but a 16 bit value was passed).
2748 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
2749 * exist (e.g. port instance 3 on a two port
2750 * adapter.
2751 */
2752PNMI_STATIC int SensorStat(
2753SK_AC *pAC, /* Pointer to adapter context */
2754SK_IOC IoC, /* IO context handle */
2755int Action, /* GET/PRESET/SET action */
2756SK_U32 Id, /* Object ID that is to be processed */
2757char *pBuf, /* Buffer used for the management data transfer */
2758unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
2759SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
2760unsigned int TableIndex, /* Index to the Id table */
2761SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
2762{
2763 unsigned int i;
2764 unsigned int Index;
2765 unsigned int Limit;
2766 unsigned int Offset;
2767 unsigned int Len;
2768 SK_U32 Val32;
2769 SK_U64 Val64;
2770
2771
2772 /*
2773 * Calculate instance if wished
2774 */
2775 if ((Instance != (SK_U32)(-1))) {
2776
2777 if ((Instance < 1) || (Instance > (SK_U32)pAC->I2c.MaxSens)) {
2778
2779 *pLen = 0;
2780 return (SK_PNMI_ERR_UNKNOWN_INST);
2781 }
2782
2783 Index = (unsigned int)Instance -1;
2784 Limit = (unsigned int)Instance;
2785 }
2786 else {
2787 Index = 0;
2788 Limit = (unsigned int) pAC->I2c.MaxSens;
2789 }
2790
2791 /*
2792 * Check action
2793 */
2794 if (Action != SK_PNMI_GET) {
2795
2796 *pLen = 0;
2797 return (SK_PNMI_ERR_READ_ONLY);
2798 }
2799
2800 /* Check length */
2801 switch (Id) {
2802
2803 case OID_SKGE_SENSOR_VALUE:
2804 case OID_SKGE_SENSOR_WAR_THRES_LOW:
2805 case OID_SKGE_SENSOR_WAR_THRES_UPP:
2806 case OID_SKGE_SENSOR_ERR_THRES_LOW:
2807 case OID_SKGE_SENSOR_ERR_THRES_UPP:
2808 if (*pLen < (Limit - Index) * sizeof(SK_U32)) {
2809
2810 *pLen = (Limit - Index) * sizeof(SK_U32);
2811 return (SK_PNMI_ERR_TOO_SHORT);
2812 }
2813 break;
2814
2815 case OID_SKGE_SENSOR_DESCR:
2816 for (Offset = 0, i = Index; i < Limit; i ++) {
2817
2818 Len = (unsigned int)
2819 SK_STRLEN(pAC->I2c.SenTable[i].SenDesc) + 1;
2820 if (Len >= SK_PNMI_STRINGLEN2) {
2821
2822 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR011,
2823 SK_PNMI_ERR011MSG);
2824
2825 *pLen = 0;
2826 return (SK_PNMI_ERR_GENERAL);
2827 }
2828 Offset += Len;
2829 }
2830 if (*pLen < Offset) {
2831
2832 *pLen = Offset;
2833 return (SK_PNMI_ERR_TOO_SHORT);
2834 }
2835 break;
2836
2837 case OID_SKGE_SENSOR_INDEX:
2838 case OID_SKGE_SENSOR_TYPE:
2839 case OID_SKGE_SENSOR_STATUS:
2840 if (*pLen < Limit - Index) {
2841
2842 *pLen = Limit - Index;
2843 return (SK_PNMI_ERR_TOO_SHORT);
2844 }
2845 break;
2846
2847 case OID_SKGE_SENSOR_WAR_CTS:
2848 case OID_SKGE_SENSOR_WAR_TIME:
2849 case OID_SKGE_SENSOR_ERR_CTS:
2850 case OID_SKGE_SENSOR_ERR_TIME:
2851 if (*pLen < (Limit - Index) * sizeof(SK_U64)) {
2852
2853 *pLen = (Limit - Index) * sizeof(SK_U64);
2854 return (SK_PNMI_ERR_TOO_SHORT);
2855 }
2856 break;
2857
2858 default:
2859 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR012,
2860 SK_PNMI_ERR012MSG);
2861
2862 *pLen = 0;
2863 return (SK_PNMI_ERR_GENERAL);
2864
2865 }
2866
2867 /*
2868 * Get value
2869 */
2870 for (Offset = 0; Index < Limit; Index ++) {
2871
2872 switch (Id) {
2873
2874 case OID_SKGE_SENSOR_INDEX:
2875 *(pBuf + Offset) = (char)Index;
2876 Offset += sizeof(char);
2877 break;
2878
2879 case OID_SKGE_SENSOR_DESCR:
2880 Len = SK_STRLEN(pAC->I2c.SenTable[Index].SenDesc);
2881 SK_MEMCPY(pBuf + Offset + 1,
2882 pAC->I2c.SenTable[Index].SenDesc, Len);
2883 *(pBuf + Offset) = (char)Len;
2884 Offset += Len + 1;
2885 break;
2886
2887 case OID_SKGE_SENSOR_TYPE:
2888 *(pBuf + Offset) =
2889 (char)pAC->I2c.SenTable[Index].SenType;
2890 Offset += sizeof(char);
2891 break;
2892
2893 case OID_SKGE_SENSOR_VALUE:
2894 Val32 = (SK_U32)pAC->I2c.SenTable[Index].SenValue;
2895 SK_PNMI_STORE_U32(pBuf + Offset, Val32);
2896 Offset += sizeof(SK_U32);
2897 break;
2898
2899 case OID_SKGE_SENSOR_WAR_THRES_LOW:
2900 Val32 = (SK_U32)pAC->I2c.SenTable[Index].
2901 SenThreWarnLow;
2902 SK_PNMI_STORE_U32(pBuf + Offset, Val32);
2903 Offset += sizeof(SK_U32);
2904 break;
2905
2906 case OID_SKGE_SENSOR_WAR_THRES_UPP:
2907 Val32 = (SK_U32)pAC->I2c.SenTable[Index].
2908 SenThreWarnHigh;
2909 SK_PNMI_STORE_U32(pBuf + Offset, Val32);
2910 Offset += sizeof(SK_U32);
2911 break;
2912
2913 case OID_SKGE_SENSOR_ERR_THRES_LOW:
2914 Val32 = (SK_U32)pAC->I2c.SenTable[Index].
2915 SenThreErrLow;
2916 SK_PNMI_STORE_U32(pBuf + Offset, Val32);
2917 Offset += sizeof(SK_U32);
2918 break;
2919
2920 case OID_SKGE_SENSOR_ERR_THRES_UPP:
2921 Val32 = pAC->I2c.SenTable[Index].SenThreErrHigh;
2922 SK_PNMI_STORE_U32(pBuf + Offset, Val32);
2923 Offset += sizeof(SK_U32);
2924 break;
2925
2926 case OID_SKGE_SENSOR_STATUS:
2927 *(pBuf + Offset) =
2928 (char)pAC->I2c.SenTable[Index].SenErrFlag;
2929 Offset += sizeof(char);
2930 break;
2931
2932 case OID_SKGE_SENSOR_WAR_CTS:
2933 Val64 = pAC->I2c.SenTable[Index].SenWarnCts;
2934 SK_PNMI_STORE_U64(pBuf + Offset, Val64);
2935 Offset += sizeof(SK_U64);
2936 break;
2937
2938 case OID_SKGE_SENSOR_ERR_CTS:
2939 Val64 = pAC->I2c.SenTable[Index].SenErrCts;
2940 SK_PNMI_STORE_U64(pBuf + Offset, Val64);
2941 Offset += sizeof(SK_U64);
2942 break;
2943
2944 case OID_SKGE_SENSOR_WAR_TIME:
2945 Val64 = SK_PNMI_HUNDREDS_SEC(pAC->I2c.SenTable[Index].
2946 SenBegWarnTS);
2947 SK_PNMI_STORE_U64(pBuf + Offset, Val64);
2948 Offset += sizeof(SK_U64);
2949 break;
2950
2951 case OID_SKGE_SENSOR_ERR_TIME:
2952 Val64 = SK_PNMI_HUNDREDS_SEC(pAC->I2c.SenTable[Index].
2953 SenBegErrTS);
2954 SK_PNMI_STORE_U64(pBuf + Offset, Val64);
2955 Offset += sizeof(SK_U64);
2956 break;
2957
2958 default:
2959 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR,
2960 ("SensorStat: Unknown OID should be handled before"));
2961
2962 return (SK_PNMI_ERR_GENERAL);
2963 }
2964 }
2965
2966 /*
2967 * Store used buffer space
2968 */
2969 *pLen = Offset;
2970
2971 return (SK_PNMI_ERR_OK);
2972}
2973
2974/*****************************************************************************
2975 *
2976 * Vpd - OID handler function of OID_SKGE_VPD_XXX
2977 *
2978 * Description:
2979 * Get/preset/set of VPD data. As instance the name of a VPD key
2980 * can be passed. The Instance parameter is a SK_U32 and can be
2981 * used as a string buffer for the VPD key, because their maximum
2982 * length is 4 byte.
2983 *
2984 * Returns:
2985 * SK_PNMI_ERR_OK The request was successfully performed.
2986 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
2987 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
2988 * the correct data (e.g. a 32bit value is
2989 * needed, but a 16 bit value was passed).
2990 * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
2991 * value range.
2992 * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
2993 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
2994 * exist (e.g. port instance 3 on a two port
2995 * adapter.
2996 */
2997PNMI_STATIC int Vpd(
2998SK_AC *pAC, /* Pointer to adapter context */
2999SK_IOC IoC, /* IO context handle */
3000int Action, /* GET/PRESET/SET action */
3001SK_U32 Id, /* Object ID that is to be processed */
3002char *pBuf, /* Buffer used for the management data transfer */
3003unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
3004SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
3005unsigned int TableIndex, /* Index to the Id table */
3006SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
3007{
3008 SK_VPD_STATUS *pVpdStatus;
3009 unsigned int BufLen;
3010 char Buf[256];
3011 char KeyArr[SK_PNMI_VPD_ENTRIES][SK_PNMI_VPD_KEY_SIZE];
3012 char KeyStr[SK_PNMI_VPD_KEY_SIZE];
3013 unsigned int KeyNo;
3014 unsigned int Offset;
3015 unsigned int Index;
3016 unsigned int FirstIndex;
3017 unsigned int LastIndex;
3018 unsigned int Len;
3019 int Ret;
3020 SK_U32 Val32;
3021
3022 /*
3023 * Get array of all currently stored VPD keys
3024 */
3025 Ret = GetVpdKeyArr(pAC, IoC, &KeyArr[0][0], sizeof(KeyArr), &KeyNo);
3026 if (Ret != SK_PNMI_ERR_OK) {
3027 *pLen = 0;
3028 return (Ret);
3029 }
3030
3031 /*
3032 * If instance is not -1, try to find the requested VPD key for
3033 * the multiple instance variables. The other OIDs as for example
3034 * OID VPD_ACTION are single instance variables and must be
3035 * handled separatly.
3036 */
3037 FirstIndex = 0;
3038 LastIndex = KeyNo;
3039
3040 if ((Instance != (SK_U32)(-1))) {
3041
3042 if (Id == OID_SKGE_VPD_KEY || Id == OID_SKGE_VPD_VALUE ||
3043 Id == OID_SKGE_VPD_ACCESS) {
3044
3045 SK_STRNCPY(KeyStr, (char *)&Instance, 4);
3046 KeyStr[4] = 0;
3047
3048 for (Index = 0; Index < KeyNo; Index ++) {
3049
3050 if (SK_STRCMP(KeyStr, KeyArr[Index]) == 0) {
3051 FirstIndex = Index;
3052 LastIndex = Index+1;
3053 break;
3054 }
3055 }
3056 if (Index == KeyNo) {
3057
3058 *pLen = 0;
3059 return (SK_PNMI_ERR_UNKNOWN_INST);
3060 }
3061 }
3062 else if (Instance != 1) {
3063
3064 *pLen = 0;
3065 return (SK_PNMI_ERR_UNKNOWN_INST);
3066 }
3067 }
3068
3069 /*
3070 * Get value, if a query should be performed
3071 */
3072 if (Action == SK_PNMI_GET) {
3073
3074 switch (Id) {
3075
3076 case OID_SKGE_VPD_FREE_BYTES:
3077 /* Check length of buffer */
3078 if (*pLen < sizeof(SK_U32)) {
3079
3080 *pLen = sizeof(SK_U32);
3081 return (SK_PNMI_ERR_TOO_SHORT);
3082 }
3083 /* Get number of free bytes */
3084 pVpdStatus = VpdStat(pAC, IoC);
3085 if (pVpdStatus == NULL) {
3086
3087 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR017,
3088 SK_PNMI_ERR017MSG);
3089
3090 *pLen = 0;
3091 return (SK_PNMI_ERR_GENERAL);
3092 }
3093 if ((pVpdStatus->vpd_status & VPD_VALID) == 0) {
3094
3095 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR018,
3096 SK_PNMI_ERR018MSG);
3097
3098 *pLen = 0;
3099 return (SK_PNMI_ERR_GENERAL);
3100 }
3101
3102 Val32 = (SK_U32)pVpdStatus->vpd_free_rw;
3103 SK_PNMI_STORE_U32(pBuf, Val32);
3104 *pLen = sizeof(SK_U32);
3105 break;
3106
3107 case OID_SKGE_VPD_ENTRIES_LIST:
3108 /* Check length */
3109 for (Len = 0, Index = 0; Index < KeyNo; Index ++) {
3110
3111 Len += SK_STRLEN(KeyArr[Index]) + 1;
3112 }
3113 if (*pLen < Len) {
3114
3115 *pLen = Len;
3116 return (SK_PNMI_ERR_TOO_SHORT);
3117 }
3118
3119 /* Get value */
3120 *(pBuf) = (char)Len - 1;
3121 for (Offset = 1, Index = 0; Index < KeyNo; Index ++) {
3122
3123 Len = SK_STRLEN(KeyArr[Index]);
3124 SK_MEMCPY(pBuf + Offset, KeyArr[Index], Len);
3125
3126 Offset += Len;
3127
3128 if (Index < KeyNo - 1) {
3129
3130 *(pBuf + Offset) = ' ';
3131 Offset ++;
3132 }
3133 }
3134 *pLen = Offset;
3135 break;
3136
3137 case OID_SKGE_VPD_ENTRIES_NUMBER:
3138 /* Check length */
3139 if (*pLen < sizeof(SK_U32)) {
3140
3141 *pLen = sizeof(SK_U32);
3142 return (SK_PNMI_ERR_TOO_SHORT);
3143 }
3144
3145 Val32 = (SK_U32)KeyNo;
3146 SK_PNMI_STORE_U32(pBuf, Val32);
3147 *pLen = sizeof(SK_U32);
3148 break;
3149
3150 case OID_SKGE_VPD_KEY:
3151 /* Check buffer length, if it is large enough */
3152 for (Len = 0, Index = FirstIndex;
3153 Index < LastIndex; Index ++) {
3154
3155 Len += SK_STRLEN(KeyArr[Index]) + 1;
3156 }
3157 if (*pLen < Len) {
3158
3159 *pLen = Len;
3160 return (SK_PNMI_ERR_TOO_SHORT);
3161 }
3162
3163 /*
3164 * Get the key to an intermediate buffer, because
3165 * we have to prepend a length byte.
3166 */
3167 for (Offset = 0, Index = FirstIndex;
3168 Index < LastIndex; Index ++) {
3169
3170 Len = SK_STRLEN(KeyArr[Index]);
3171
3172 *(pBuf + Offset) = (char)Len;
3173 SK_MEMCPY(pBuf + Offset + 1, KeyArr[Index],
3174 Len);
3175 Offset += Len + 1;
3176 }
3177 *pLen = Offset;
3178 break;
3179
3180 case OID_SKGE_VPD_VALUE:
3181 /* Check the buffer length if it is large enough */
3182 for (Offset = 0, Index = FirstIndex;
3183 Index < LastIndex; Index ++) {
3184
3185 BufLen = 256;
3186 if (VpdRead(pAC, IoC, KeyArr[Index], Buf,
3187 (int *)&BufLen) > 0 ||
3188 BufLen >= SK_PNMI_VPD_DATALEN) {
3189
3190 SK_ERR_LOG(pAC, SK_ERRCL_SW,
3191 SK_PNMI_ERR021,
3192 SK_PNMI_ERR021MSG);
3193
3194 return (SK_PNMI_ERR_GENERAL);
3195 }
3196 Offset += BufLen + 1;
3197 }
3198 if (*pLen < Offset) {
3199
3200 *pLen = Offset;
3201 return (SK_PNMI_ERR_TOO_SHORT);
3202 }
3203
3204 /*
3205 * Get the value to an intermediate buffer, because
3206 * we have to prepend a length byte.
3207 */
3208 for (Offset = 0, Index = FirstIndex;
3209 Index < LastIndex; Index ++) {
3210
3211 BufLen = 256;
3212 if (VpdRead(pAC, IoC, KeyArr[Index], Buf,
3213 (int *)&BufLen) > 0 ||
3214 BufLen >= SK_PNMI_VPD_DATALEN) {
3215
3216 SK_ERR_LOG(pAC, SK_ERRCL_SW,
3217 SK_PNMI_ERR022,
3218 SK_PNMI_ERR022MSG);
3219
3220 *pLen = 0;
3221 return (SK_PNMI_ERR_GENERAL);
3222 }
3223
3224 *(pBuf + Offset) = (char)BufLen;
3225 SK_MEMCPY(pBuf + Offset + 1, Buf, BufLen);
3226 Offset += BufLen + 1;
3227 }
3228 *pLen = Offset;
3229 break;
3230
3231 case OID_SKGE_VPD_ACCESS:
3232 if (*pLen < LastIndex - FirstIndex) {
3233
3234 *pLen = LastIndex - FirstIndex;
3235 return (SK_PNMI_ERR_TOO_SHORT);
3236 }
3237
3238 for (Offset = 0, Index = FirstIndex;
3239 Index < LastIndex; Index ++) {
3240
3241 if (VpdMayWrite(KeyArr[Index])) {
3242
3243 *(pBuf + Offset) = SK_PNMI_VPD_RW;
3244 }
3245 else {
3246 *(pBuf + Offset) = SK_PNMI_VPD_RO;
3247 }
3248 Offset ++;
3249 }
3250 *pLen = Offset;
3251 break;
3252
3253 case OID_SKGE_VPD_ACTION:
3254 Offset = LastIndex - FirstIndex;
3255 if (*pLen < Offset) {
3256
3257 *pLen = Offset;
3258 return (SK_PNMI_ERR_TOO_SHORT);
3259 }
3260 SK_MEMSET(pBuf, 0, Offset);
3261 *pLen = Offset;
3262 break;
3263
3264 default:
3265 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR023,
3266 SK_PNMI_ERR023MSG);
3267
3268 *pLen = 0;
3269 return (SK_PNMI_ERR_GENERAL);
3270 }
3271 }
3272 else {
3273 /* The only OID which can be set is VPD_ACTION */
3274 if (Id != OID_SKGE_VPD_ACTION) {
3275
3276 if (Id == OID_SKGE_VPD_FREE_BYTES ||
3277 Id == OID_SKGE_VPD_ENTRIES_LIST ||
3278 Id == OID_SKGE_VPD_ENTRIES_NUMBER ||
3279 Id == OID_SKGE_VPD_KEY ||
3280 Id == OID_SKGE_VPD_VALUE ||
3281 Id == OID_SKGE_VPD_ACCESS) {
3282
3283 *pLen = 0;
3284 return (SK_PNMI_ERR_READ_ONLY);
3285 }
3286
3287 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR024,
3288 SK_PNMI_ERR024MSG);
3289
3290 *pLen = 0;
3291 return (SK_PNMI_ERR_GENERAL);
3292 }
3293
3294 /*
3295 * From this point we handle VPD_ACTION. Check the buffer
3296 * length. It should at least have the size of one byte.
3297 */
3298 if (*pLen < 1) {
3299
3300 *pLen = 1;
3301 return (SK_PNMI_ERR_TOO_SHORT);
3302 }
3303
3304 /*
3305 * The first byte contains the VPD action type we should
3306 * perform.
3307 */
3308 switch (*pBuf) {
3309
3310 case SK_PNMI_VPD_IGNORE:
3311 /* Nothing to do */
3312 break;
3313
3314 case SK_PNMI_VPD_CREATE:
3315 /*
3316 * We have to create a new VPD entry or we modify
3317 * an existing one. Check first the buffer length.
3318 */
3319 if (*pLen < 4) {
3320
3321 *pLen = 4;
3322 return (SK_PNMI_ERR_TOO_SHORT);
3323 }
3324 KeyStr[0] = pBuf[1];
3325 KeyStr[1] = pBuf[2];
3326 KeyStr[2] = 0;
3327
3328 /*
3329 * Is the entry writable or does it belong to the
3330 * read-only area?
3331 */
3332 if (!VpdMayWrite(KeyStr)) {
3333
3334 *pLen = 0;
3335 return (SK_PNMI_ERR_BAD_VALUE);
3336 }
3337
3338 Offset = (int)pBuf[3] & 0xFF;
3339
3340 SK_MEMCPY(Buf, pBuf + 4, Offset);
3341 Buf[Offset] = 0;
3342
3343 /* A preset ends here */
3344 if (Action == SK_PNMI_PRESET) {
3345
3346 return (SK_PNMI_ERR_OK);
3347 }
3348
3349 /* Write the new entry or modify an existing one */
3350 Ret = VpdWrite(pAC, IoC, KeyStr, Buf);
3351 if (Ret == SK_PNMI_VPD_NOWRITE ) {
3352
3353 *pLen = 0;
3354 return (SK_PNMI_ERR_BAD_VALUE);
3355 }
3356 else if (Ret != SK_PNMI_VPD_OK) {
3357
3358 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR025,
3359 SK_PNMI_ERR025MSG);
3360
3361 *pLen = 0;
3362 return (SK_PNMI_ERR_GENERAL);
3363 }
3364
3365 /*
3366 * Perform an update of the VPD data. This is
3367 * not mandantory, but just to be sure.
3368 */
3369 Ret = VpdUpdate(pAC, IoC);
3370 if (Ret != SK_PNMI_VPD_OK) {
3371
3372 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR026,
3373 SK_PNMI_ERR026MSG);
3374
3375 *pLen = 0;
3376 return (SK_PNMI_ERR_GENERAL);
3377 }
3378 break;
3379
3380 case SK_PNMI_VPD_DELETE:
3381 /* Check if the buffer size is plausible */
3382 if (*pLen < 3) {
3383
3384 *pLen = 3;
3385 return (SK_PNMI_ERR_TOO_SHORT);
3386 }
3387 if (*pLen > 3) {
3388
3389 *pLen = 0;
3390 return (SK_PNMI_ERR_BAD_VALUE);
3391 }
3392 KeyStr[0] = pBuf[1];
3393 KeyStr[1] = pBuf[2];
3394 KeyStr[2] = 0;
3395
3396 /* Find the passed key in the array */
3397 for (Index = 0; Index < KeyNo; Index ++) {
3398
3399 if (SK_STRCMP(KeyStr, KeyArr[Index]) == 0) {
3400
3401 break;
3402 }
3403 }
3404 /*
3405 * If we cannot find the key it is wrong, so we
3406 * return an appropriate error value.
3407 */
3408 if (Index == KeyNo) {
3409
3410 *pLen = 0;
3411 return (SK_PNMI_ERR_BAD_VALUE);
3412 }
3413
3414 if (Action == SK_PNMI_PRESET) {
3415
3416 return (SK_PNMI_ERR_OK);
3417 }
3418
3419 /* Ok, you wanted it and you will get it */
3420 Ret = VpdDelete(pAC, IoC, KeyStr);
3421 if (Ret != SK_PNMI_VPD_OK) {
3422
3423 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR027,
3424 SK_PNMI_ERR027MSG);
3425
3426 *pLen = 0;
3427 return (SK_PNMI_ERR_GENERAL);
3428 }
3429
3430 /*
3431 * Perform an update of the VPD data. This is
3432 * not mandantory, but just to be sure.
3433 */
3434 Ret = VpdUpdate(pAC, IoC);
3435 if (Ret != SK_PNMI_VPD_OK) {
3436
3437 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR028,
3438 SK_PNMI_ERR028MSG);
3439
3440 *pLen = 0;
3441 return (SK_PNMI_ERR_GENERAL);
3442 }
3443 break;
3444
3445 default:
3446 *pLen = 0;
3447 return (SK_PNMI_ERR_BAD_VALUE);
3448 }
3449 }
3450
3451 return (SK_PNMI_ERR_OK);
3452}
3453
3454/*****************************************************************************
3455 *
3456 * General - OID handler function of various single instance OIDs
3457 *
3458 * Description:
3459 * The code is simple. No description necessary.
3460 *
3461 * Returns:
3462 * SK_PNMI_ERR_OK The request was successfully performed.
3463 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
3464 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
3465 * the correct data (e.g. a 32bit value is
3466 * needed, but a 16 bit value was passed).
3467 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
3468 * exist (e.g. port instance 3 on a two port
3469 * adapter.
3470 */
3471PNMI_STATIC int General(
3472SK_AC *pAC, /* Pointer to adapter context */
3473SK_IOC IoC, /* IO context handle */
3474int Action, /* GET/PRESET/SET action */
3475SK_U32 Id, /* Object ID that is to be processed */
3476char *pBuf, /* Buffer used for the management data transfer */
3477unsigned int *pLen, /* On call: buffer length. On return: used buffer */
3478SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
3479unsigned int TableIndex, /* Index to the Id table */
3480SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
3481{
3482 int Ret;
3483 unsigned int Index;
3484 unsigned int Len;
3485 unsigned int Offset;
3486 unsigned int Val;
3487 SK_U8 Val8;
3488 SK_U16 Val16;
3489 SK_U32 Val32;
3490 SK_U64 Val64;
3491 SK_U64 Val64RxHwErrs = 0;
3492 SK_U64 Val64TxHwErrs = 0;
3493 SK_BOOL Is64BitReq = SK_FALSE;
3494 char Buf[256];
3495 int MacType;
3496
3497 /*
3498 * Check instance. We only handle single instance variables.
3499 */
3500 if (Instance != (SK_U32)(-1) && Instance != 1) {
3501
3502 *pLen = 0;
3503 return (SK_PNMI_ERR_UNKNOWN_INST);
3504 }
3505
3506 /*
3507 * Check action. We only allow get requests.
3508 */
3509 if (Action != SK_PNMI_GET) {
3510
3511 *pLen = 0;
3512 return (SK_PNMI_ERR_READ_ONLY);
3513 }
3514
3515 MacType = pAC->GIni.GIMacType;
3516
3517 /*
3518 * Check length for the various supported OIDs
3519 */
3520 switch (Id) {
3521
3522 case OID_GEN_XMIT_ERROR:
3523 case OID_GEN_RCV_ERROR:
3524 case OID_GEN_RCV_NO_BUFFER:
3525#ifndef SK_NDIS_64BIT_CTR
3526 if (*pLen < sizeof(SK_U32)) {
3527 *pLen = sizeof(SK_U32);
3528 return (SK_PNMI_ERR_TOO_SHORT);
3529 }
3530
3531#else /* SK_NDIS_64BIT_CTR */
3532
3533 /*
3534 * for compatibility, at least 32bit are required for oid
3535 */
3536 if (*pLen < sizeof(SK_U32)) {
3537 /*
3538 * but indicate handling for 64bit values,
3539 * if insufficient space is provided
3540 */
3541 *pLen = sizeof(SK_U64);
3542 return (SK_PNMI_ERR_TOO_SHORT);
3543 }
3544
3545 Is64BitReq = (*pLen < sizeof(SK_U64)) ? SK_FALSE : SK_TRUE;
3546#endif /* SK_NDIS_64BIT_CTR */
3547 break;
3548
3549 case OID_SKGE_PORT_NUMBER:
3550 case OID_SKGE_DEVICE_TYPE:
3551 case OID_SKGE_RESULT:
3552 case OID_SKGE_RLMT_MONITOR_NUMBER:
3553 case OID_GEN_TRANSMIT_QUEUE_LENGTH:
3554 case OID_SKGE_TRAP_NUMBER:
3555 case OID_SKGE_MDB_VERSION:
3556 case OID_SKGE_BOARDLEVEL:
3557 case OID_SKGE_CHIPID:
3558 case OID_SKGE_RAMSIZE:
3559 if (*pLen < sizeof(SK_U32)) {
3560
3561 *pLen = sizeof(SK_U32);
3562 return (SK_PNMI_ERR_TOO_SHORT);
3563 }
3564 break;
3565
3566 case OID_SKGE_CHIPSET:
3567 if (*pLen < sizeof(SK_U16)) {
3568
3569 *pLen = sizeof(SK_U16);
3570 return (SK_PNMI_ERR_TOO_SHORT);
3571 }
3572 break;
3573
3574 case OID_SKGE_BUS_TYPE:
3575 case OID_SKGE_BUS_SPEED:
3576 case OID_SKGE_BUS_WIDTH:
3577 case OID_SKGE_SENSOR_NUMBER:
3578 case OID_SKGE_CHKSM_NUMBER:
3579 case OID_SKGE_VAUXAVAIL:
3580 if (*pLen < sizeof(SK_U8)) {
3581
3582 *pLen = sizeof(SK_U8);
3583 return (SK_PNMI_ERR_TOO_SHORT);
3584 }
3585 break;
3586
3587 case OID_SKGE_TX_SW_QUEUE_LEN:
3588 case OID_SKGE_TX_SW_QUEUE_MAX:
3589 case OID_SKGE_TX_RETRY:
3590 case OID_SKGE_RX_INTR_CTS:
3591 case OID_SKGE_TX_INTR_CTS:
3592 case OID_SKGE_RX_NO_BUF_CTS:
3593 case OID_SKGE_TX_NO_BUF_CTS:
3594 case OID_SKGE_TX_USED_DESCR_NO:
3595 case OID_SKGE_RX_DELIVERED_CTS:
3596 case OID_SKGE_RX_OCTETS_DELIV_CTS:
3597 case OID_SKGE_RX_HW_ERROR_CTS:
3598 case OID_SKGE_TX_HW_ERROR_CTS:
3599 case OID_SKGE_IN_ERRORS_CTS:
3600 case OID_SKGE_OUT_ERROR_CTS:
3601 case OID_SKGE_ERR_RECOVERY_CTS:
3602 case OID_SKGE_SYSUPTIME:
3603 if (*pLen < sizeof(SK_U64)) {
3604
3605 *pLen = sizeof(SK_U64);
3606 return (SK_PNMI_ERR_TOO_SHORT);
3607 }
3608 break;
3609
3610 default:
3611 /* Checked later */
3612 break;
3613 }
3614
3615 /* Update statistic */
3616 if (Id == OID_SKGE_RX_HW_ERROR_CTS ||
3617 Id == OID_SKGE_TX_HW_ERROR_CTS ||
3618 Id == OID_SKGE_IN_ERRORS_CTS ||
3619 Id == OID_SKGE_OUT_ERROR_CTS ||
3620 Id == OID_GEN_XMIT_ERROR ||
3621 Id == OID_GEN_RCV_ERROR) {
3622
3623 /* Force the XMAC to update its statistic counters and
3624 * Increment semaphore to indicate that an update was
3625 * already done.
3626 */
3627 Ret = MacUpdate(pAC, IoC, 0, pAC->GIni.GIMacsFound - 1);
3628 if (Ret != SK_PNMI_ERR_OK) {
3629
3630 *pLen = 0;
3631 return (Ret);
3632 }
3633 pAC->Pnmi.MacUpdatedFlag ++;
3634
3635 /*
3636 * Some OIDs consist of multiple hardware counters. Those
3637 * values which are contained in all of them will be added
3638 * now.
3639 */
3640 switch (Id) {
3641
3642 case OID_SKGE_RX_HW_ERROR_CTS:
3643 case OID_SKGE_IN_ERRORS_CTS:
3644 case OID_GEN_RCV_ERROR:
3645 Val64RxHwErrs =
3646 GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_MISSED, NetIndex) +
3647 GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_FRAMING, NetIndex) +
3648 GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_OVERFLOW, NetIndex) +
3649 GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_JABBER, NetIndex) +
3650 GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_CARRIER, NetIndex) +
3651 GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_IRLENGTH, NetIndex) +
3652 GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_SYMBOL, NetIndex) +
3653 GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_SHORTS, NetIndex) +
3654 GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_RUNT, NetIndex) +
3655 GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_TOO_LONG, NetIndex) +
3656 GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_FCS, NetIndex) +
3657 GetStatVal(pAC, IoC, 0, SK_PNMI_HRX_CEXT, NetIndex);
3658 break;
3659
3660 case OID_SKGE_TX_HW_ERROR_CTS:
3661 case OID_SKGE_OUT_ERROR_CTS:
3662 case OID_GEN_XMIT_ERROR:
3663 Val64TxHwErrs =
3664 GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_EXCESS_COL, NetIndex) +
3665 GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_LATE_COL, NetIndex) +
3666 GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_UNDERRUN, NetIndex) +
3667 GetStatVal(pAC, IoC, 0, SK_PNMI_HTX_CARRIER, NetIndex);
3668 break;
3669 }
3670 }
3671
3672 /*
3673 * Retrieve value
3674 */
3675 switch (Id) {
3676
3677 case OID_SKGE_SUPPORTED_LIST:
3678 Len = ID_TABLE_SIZE * sizeof(SK_U32);
3679 if (*pLen < Len) {
3680
3681 *pLen = Len;
3682 return (SK_PNMI_ERR_TOO_SHORT);
3683 }
3684 for (Offset = 0, Index = 0; Offset < Len;
3685 Offset += sizeof(SK_U32), Index ++) {
3686
3687 Val32 = (SK_U32)IdTable[Index].Id;
3688 SK_PNMI_STORE_U32(pBuf + Offset, Val32);
3689 }
3690 *pLen = Len;
3691 break;
3692
3693 case OID_SKGE_BOARDLEVEL:
3694 Val32 = (SK_U32)pAC->GIni.GILevel;
3695 SK_PNMI_STORE_U32(pBuf, Val32);
3696 *pLen = sizeof(SK_U32);
3697 break;
3698
3699 case OID_SKGE_PORT_NUMBER:
3700 Val32 = (SK_U32)pAC->GIni.GIMacsFound;
3701 SK_PNMI_STORE_U32(pBuf, Val32);
3702 *pLen = sizeof(SK_U32);
3703 break;
3704
3705 case OID_SKGE_DEVICE_TYPE:
3706 Val32 = (SK_U32)pAC->Pnmi.DeviceType;
3707 SK_PNMI_STORE_U32(pBuf, Val32);
3708 *pLen = sizeof(SK_U32);
3709 break;
3710
3711 case OID_SKGE_DRIVER_DESCR:
3712 if (pAC->Pnmi.pDriverDescription == NULL) {
3713
3714 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR007,
3715 SK_PNMI_ERR007MSG);
3716
3717 *pLen = 0;
3718 return (SK_PNMI_ERR_GENERAL);
3719 }
3720
3721 Len = SK_STRLEN(pAC->Pnmi.pDriverDescription) + 1;
3722 if (Len > SK_PNMI_STRINGLEN1) {
3723
3724 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR029,
3725 SK_PNMI_ERR029MSG);
3726
3727 *pLen = 0;
3728 return (SK_PNMI_ERR_GENERAL);
3729 }
3730
3731 if (*pLen < Len) {
3732
3733 *pLen = Len;
3734 return (SK_PNMI_ERR_TOO_SHORT);
3735 }
3736 *pBuf = (char)(Len - 1);
3737 SK_MEMCPY(pBuf + 1, pAC->Pnmi.pDriverDescription, Len - 1);
3738 *pLen = Len;
3739 break;
3740
3741 case OID_SKGE_DRIVER_VERSION:
3742 if (pAC->Pnmi.pDriverVersion == NULL) {
3743
3744 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR030,
3745 SK_PNMI_ERR030MSG);
3746
3747 *pLen = 0;
3748 return (SK_PNMI_ERR_GENERAL);
3749 }
3750
3751 Len = SK_STRLEN(pAC->Pnmi.pDriverVersion) + 1;
3752 if (Len > SK_PNMI_STRINGLEN1) {
3753
3754 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR031,
3755 SK_PNMI_ERR031MSG);
3756
3757 *pLen = 0;
3758 return (SK_PNMI_ERR_GENERAL);
3759 }
3760
3761 if (*pLen < Len) {
3762
3763 *pLen = Len;
3764 return (SK_PNMI_ERR_TOO_SHORT);
3765 }
3766 *pBuf = (char)(Len - 1);
3767 SK_MEMCPY(pBuf + 1, pAC->Pnmi.pDriverVersion, Len - 1);
3768 *pLen = Len;
3769 break;
3770
3771 case OID_SKGE_DRIVER_RELDATE:
3772 if (pAC->Pnmi.pDriverReleaseDate == NULL) {
3773
3774 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR030,
3775 SK_PNMI_ERR053MSG);
3776
3777 *pLen = 0;
3778 return (SK_PNMI_ERR_GENERAL);
3779 }
3780
3781 Len = SK_STRLEN(pAC->Pnmi.pDriverReleaseDate) + 1;
3782 if (Len > SK_PNMI_STRINGLEN1) {
3783
3784 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR031,
3785 SK_PNMI_ERR054MSG);
3786
3787 *pLen = 0;
3788 return (SK_PNMI_ERR_GENERAL);
3789 }
3790
3791 if (*pLen < Len) {
3792
3793 *pLen = Len;
3794 return (SK_PNMI_ERR_TOO_SHORT);
3795 }
3796 *pBuf = (char)(Len - 1);
3797 SK_MEMCPY(pBuf + 1, pAC->Pnmi.pDriverReleaseDate, Len - 1);
3798 *pLen = Len;
3799 break;
3800
3801 case OID_SKGE_DRIVER_FILENAME:
3802 if (pAC->Pnmi.pDriverFileName == NULL) {
3803
3804 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR030,
3805 SK_PNMI_ERR055MSG);
3806
3807 *pLen = 0;
3808 return (SK_PNMI_ERR_GENERAL);
3809 }
3810
3811 Len = SK_STRLEN(pAC->Pnmi.pDriverFileName) + 1;
3812 if (Len > SK_PNMI_STRINGLEN1) {
3813
3814 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR031,
3815 SK_PNMI_ERR056MSG);
3816
3817 *pLen = 0;
3818 return (SK_PNMI_ERR_GENERAL);
3819 }
3820
3821 if (*pLen < Len) {
3822
3823 *pLen = Len;
3824 return (SK_PNMI_ERR_TOO_SHORT);
3825 }
3826 *pBuf = (char)(Len - 1);
3827 SK_MEMCPY(pBuf + 1, pAC->Pnmi.pDriverFileName, Len - 1);
3828 *pLen = Len;
3829 break;
3830
3831 case OID_SKGE_HW_DESCR:
3832 /*
3833 * The hardware description is located in the VPD. This
3834 * query may move to the initialisation routine. But
3835 * the VPD data is cached and therefore a call here
3836 * will not make much difference.
3837 */
3838 Len = 256;
3839 if (VpdRead(pAC, IoC, VPD_NAME, Buf, (int *)&Len) > 0) {
3840
3841 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR032,
3842 SK_PNMI_ERR032MSG);
3843
3844 *pLen = 0;
3845 return (SK_PNMI_ERR_GENERAL);
3846 }
3847 Len ++;
3848 if (Len > SK_PNMI_STRINGLEN1) {
3849
3850 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR033,
3851 SK_PNMI_ERR033MSG);
3852
3853 *pLen = 0;
3854 return (SK_PNMI_ERR_GENERAL);
3855 }
3856 if (*pLen < Len) {
3857
3858 *pLen = Len;
3859 return (SK_PNMI_ERR_TOO_SHORT);
3860 }
3861 *pBuf = (char)(Len - 1);
3862 SK_MEMCPY(pBuf + 1, Buf, Len - 1);
3863 *pLen = Len;
3864 break;
3865
3866 case OID_SKGE_HW_VERSION:
3867 /* Oh, I love to do some string manipulation */
3868 if (*pLen < 5) {
3869
3870 *pLen = 5;
3871 return (SK_PNMI_ERR_TOO_SHORT);
3872 }
3873 Val8 = (SK_U8)pAC->GIni.GIPciHwRev;
3874 pBuf[0] = 4;
3875 pBuf[1] = 'v';
3876 pBuf[2] = (char)(0x30 | ((Val8 >> 4) & 0x0F));
3877 pBuf[3] = '.';
3878 pBuf[4] = (char)(0x30 | (Val8 & 0x0F));
3879 *pLen = 5;
3880 break;
3881
3882 case OID_SKGE_CHIPSET:
3883 Val16 = pAC->Pnmi.Chipset;
3884 SK_PNMI_STORE_U16(pBuf, Val16);
3885 *pLen = sizeof(SK_U16);
3886 break;
3887
3888 case OID_SKGE_CHIPID:
3889 Val32 = pAC->GIni.GIChipId;
3890 SK_PNMI_STORE_U32(pBuf, Val32);
3891 *pLen = sizeof(SK_U32);
3892 break;
3893
3894 case OID_SKGE_RAMSIZE:
3895 Val32 = pAC->GIni.GIRamSize;
3896 SK_PNMI_STORE_U32(pBuf, Val32);
3897 *pLen = sizeof(SK_U32);
3898 break;
3899
3900 case OID_SKGE_VAUXAVAIL:
3901 *pBuf = (char) pAC->GIni.GIVauxAvail;
3902 *pLen = sizeof(char);
3903 break;
3904
3905 case OID_SKGE_BUS_TYPE:
3906 *pBuf = (char) SK_PNMI_BUS_PCI;
3907 *pLen = sizeof(char);
3908 break;
3909
3910 case OID_SKGE_BUS_SPEED:
3911 *pBuf = pAC->Pnmi.PciBusSpeed;
3912 *pLen = sizeof(char);
3913 break;
3914
3915 case OID_SKGE_BUS_WIDTH:
3916 *pBuf = pAC->Pnmi.PciBusWidth;
3917 *pLen = sizeof(char);
3918 break;
3919
3920 case OID_SKGE_RESULT:
3921 Val32 = pAC->Pnmi.TestResult;
3922 SK_PNMI_STORE_U32(pBuf, Val32);
3923 *pLen = sizeof(SK_U32);
3924 break;
3925
3926 case OID_SKGE_SENSOR_NUMBER:
3927 *pBuf = (char)pAC->I2c.MaxSens;
3928 *pLen = sizeof(char);
3929 break;
3930
3931 case OID_SKGE_CHKSM_NUMBER:
3932 *pBuf = SKCS_NUM_PROTOCOLS;
3933 *pLen = sizeof(char);
3934 break;
3935
3936 case OID_SKGE_TRAP_NUMBER:
3937 GetTrapQueueLen(pAC, &Len, &Val);
3938 Val32 = (SK_U32)Val;
3939 SK_PNMI_STORE_U32(pBuf, Val32);
3940 *pLen = sizeof(SK_U32);
3941 break;
3942
3943 case OID_SKGE_TRAP:
3944 GetTrapQueueLen(pAC, &Len, &Val);
3945 if (*pLen < Len) {
3946
3947 *pLen = Len;
3948 return (SK_PNMI_ERR_TOO_SHORT);
3949 }
3950 CopyTrapQueue(pAC, pBuf);
3951 *pLen = Len;
3952 break;
3953
3954 case OID_SKGE_RLMT_MONITOR_NUMBER:
3955/* XXX Not yet implemented by RLMT therefore we return zero elements */
3956 Val32 = 0;
3957 SK_PNMI_STORE_U32(pBuf, Val32);
3958 *pLen = sizeof(SK_U32);
3959 break;
3960
3961 case OID_SKGE_TX_SW_QUEUE_LEN:
3962 /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
3963 if (MacType == SK_MAC_XMAC) {
3964 /* Dual net mode */
3965 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
3966 Val64 = pAC->Pnmi.BufPort[NetIndex].TxSwQueueLen;
3967 }
3968 /* Single net mode */
3969 else {
3970 Val64 = pAC->Pnmi.BufPort[0].TxSwQueueLen +
3971 pAC->Pnmi.BufPort[1].TxSwQueueLen;
3972 }
3973 }
3974 else {
3975 /* Dual net mode */
3976 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
3977 Val64 = pAC->Pnmi.Port[NetIndex].TxSwQueueLen;
3978 }
3979 /* Single net mode */
3980 else {
3981 Val64 = pAC->Pnmi.Port[0].TxSwQueueLen +
3982 pAC->Pnmi.Port[1].TxSwQueueLen;
3983 }
3984 }
3985 SK_PNMI_STORE_U64(pBuf, Val64);
3986 *pLen = sizeof(SK_U64);
3987 break;
3988
3989
3990 case OID_SKGE_TX_SW_QUEUE_MAX:
3991 /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
3992 if (MacType == SK_MAC_XMAC) {
3993 /* Dual net mode */
3994 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
3995 Val64 = pAC->Pnmi.BufPort[NetIndex].TxSwQueueMax;
3996 }
3997 /* Single net mode */
3998 else {
3999 Val64 = pAC->Pnmi.BufPort[0].TxSwQueueMax +
4000 pAC->Pnmi.BufPort[1].TxSwQueueMax;
4001 }
4002 }
4003 else {
4004 /* Dual net mode */
4005 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4006 Val64 = pAC->Pnmi.Port[NetIndex].TxSwQueueMax;
4007 }
4008 /* Single net mode */
4009 else {
4010 Val64 = pAC->Pnmi.Port[0].TxSwQueueMax +
4011 pAC->Pnmi.Port[1].TxSwQueueMax;
4012 }
4013 }
4014 SK_PNMI_STORE_U64(pBuf, Val64);
4015 *pLen = sizeof(SK_U64);
4016 break;
4017
4018 case OID_SKGE_TX_RETRY:
4019 /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
4020 if (MacType == SK_MAC_XMAC) {
4021 /* Dual net mode */
4022 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4023 Val64 = pAC->Pnmi.BufPort[NetIndex].TxRetryCts;
4024 }
4025 /* Single net mode */
4026 else {
4027 Val64 = pAC->Pnmi.BufPort[0].TxRetryCts +
4028 pAC->Pnmi.BufPort[1].TxRetryCts;
4029 }
4030 }
4031 else {
4032 /* Dual net mode */
4033 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4034 Val64 = pAC->Pnmi.Port[NetIndex].TxRetryCts;
4035 }
4036 /* Single net mode */
4037 else {
4038 Val64 = pAC->Pnmi.Port[0].TxRetryCts +
4039 pAC->Pnmi.Port[1].TxRetryCts;
4040 }
4041 }
4042 SK_PNMI_STORE_U64(pBuf, Val64);
4043 *pLen = sizeof(SK_U64);
4044 break;
4045
4046 case OID_SKGE_RX_INTR_CTS:
4047 /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
4048 if (MacType == SK_MAC_XMAC) {
4049 /* Dual net mode */
4050 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4051 Val64 = pAC->Pnmi.BufPort[NetIndex].RxIntrCts;
4052 }
4053 /* Single net mode */
4054 else {
4055 Val64 = pAC->Pnmi.BufPort[0].RxIntrCts +
4056 pAC->Pnmi.BufPort[1].RxIntrCts;
4057 }
4058 }
4059 else {
4060 /* Dual net mode */
4061 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4062 Val64 = pAC->Pnmi.Port[NetIndex].RxIntrCts;
4063 }
4064 /* Single net mode */
4065 else {
4066 Val64 = pAC->Pnmi.Port[0].RxIntrCts +
4067 pAC->Pnmi.Port[1].RxIntrCts;
4068 }
4069 }
4070 SK_PNMI_STORE_U64(pBuf, Val64);
4071 *pLen = sizeof(SK_U64);
4072 break;
4073
4074 case OID_SKGE_TX_INTR_CTS:
4075 /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
4076 if (MacType == SK_MAC_XMAC) {
4077 /* Dual net mode */
4078 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4079 Val64 = pAC->Pnmi.BufPort[NetIndex].TxIntrCts;
4080 }
4081 /* Single net mode */
4082 else {
4083 Val64 = pAC->Pnmi.BufPort[0].TxIntrCts +
4084 pAC->Pnmi.BufPort[1].TxIntrCts;
4085 }
4086 }
4087 else {
4088 /* Dual net mode */
4089 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4090 Val64 = pAC->Pnmi.Port[NetIndex].TxIntrCts;
4091 }
4092 /* Single net mode */
4093 else {
4094 Val64 = pAC->Pnmi.Port[0].TxIntrCts +
4095 pAC->Pnmi.Port[1].TxIntrCts;
4096 }
4097 }
4098 SK_PNMI_STORE_U64(pBuf, Val64);
4099 *pLen = sizeof(SK_U64);
4100 break;
4101
4102 case OID_SKGE_RX_NO_BUF_CTS:
4103 /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
4104 if (MacType == SK_MAC_XMAC) {
4105 /* Dual net mode */
4106 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4107 Val64 = pAC->Pnmi.BufPort[NetIndex].RxNoBufCts;
4108 }
4109 /* Single net mode */
4110 else {
4111 Val64 = pAC->Pnmi.BufPort[0].RxNoBufCts +
4112 pAC->Pnmi.BufPort[1].RxNoBufCts;
4113 }
4114 }
4115 else {
4116 /* Dual net mode */
4117 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4118 Val64 = pAC->Pnmi.Port[NetIndex].RxNoBufCts;
4119 }
4120 /* Single net mode */
4121 else {
4122 Val64 = pAC->Pnmi.Port[0].RxNoBufCts +
4123 pAC->Pnmi.Port[1].RxNoBufCts;
4124 }
4125 }
4126 SK_PNMI_STORE_U64(pBuf, Val64);
4127 *pLen = sizeof(SK_U64);
4128 break;
4129
4130 case OID_SKGE_TX_NO_BUF_CTS:
4131 /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
4132 if (MacType == SK_MAC_XMAC) {
4133 /* Dual net mode */
4134 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4135 Val64 = pAC->Pnmi.BufPort[NetIndex].TxNoBufCts;
4136 }
4137 /* Single net mode */
4138 else {
4139 Val64 = pAC->Pnmi.BufPort[0].TxNoBufCts +
4140 pAC->Pnmi.BufPort[1].TxNoBufCts;
4141 }
4142 }
4143 else {
4144 /* Dual net mode */
4145 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4146 Val64 = pAC->Pnmi.Port[NetIndex].TxNoBufCts;
4147 }
4148 /* Single net mode */
4149 else {
4150 Val64 = pAC->Pnmi.Port[0].TxNoBufCts +
4151 pAC->Pnmi.Port[1].TxNoBufCts;
4152 }
4153 }
4154 SK_PNMI_STORE_U64(pBuf, Val64);
4155 *pLen = sizeof(SK_U64);
4156 break;
4157
4158 case OID_SKGE_TX_USED_DESCR_NO:
4159 /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
4160 if (MacType == SK_MAC_XMAC) {
4161 /* Dual net mode */
4162 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4163 Val64 = pAC->Pnmi.BufPort[NetIndex].TxUsedDescrNo;
4164 }
4165 /* Single net mode */
4166 else {
4167 Val64 = pAC->Pnmi.BufPort[0].TxUsedDescrNo +
4168 pAC->Pnmi.BufPort[1].TxUsedDescrNo;
4169 }
4170 }
4171 else {
4172 /* Dual net mode */
4173 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4174 Val64 = pAC->Pnmi.Port[NetIndex].TxUsedDescrNo;
4175 }
4176 /* Single net mode */
4177 else {
4178 Val64 = pAC->Pnmi.Port[0].TxUsedDescrNo +
4179 pAC->Pnmi.Port[1].TxUsedDescrNo;
4180 }
4181 }
4182 SK_PNMI_STORE_U64(pBuf, Val64);
4183 *pLen = sizeof(SK_U64);
4184 break;
4185
4186 case OID_SKGE_RX_DELIVERED_CTS:
4187 /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
4188 if (MacType == SK_MAC_XMAC) {
4189 /* Dual net mode */
4190 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4191 Val64 = pAC->Pnmi.BufPort[NetIndex].RxDeliveredCts;
4192 }
4193 /* Single net mode */
4194 else {
4195 Val64 = pAC->Pnmi.BufPort[0].RxDeliveredCts +
4196 pAC->Pnmi.BufPort[1].RxDeliveredCts;
4197 }
4198 }
4199 else {
4200 /* Dual net mode */
4201 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4202 Val64 = pAC->Pnmi.Port[NetIndex].RxDeliveredCts;
4203 }
4204 /* Single net mode */
4205 else {
4206 Val64 = pAC->Pnmi.Port[0].RxDeliveredCts +
4207 pAC->Pnmi.Port[1].RxDeliveredCts;
4208 }
4209 }
4210 SK_PNMI_STORE_U64(pBuf, Val64);
4211 *pLen = sizeof(SK_U64);
4212 break;
4213
4214 case OID_SKGE_RX_OCTETS_DELIV_CTS:
4215 /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
4216 if (MacType == SK_MAC_XMAC) {
4217 /* Dual net mode */
4218 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4219 Val64 = pAC->Pnmi.BufPort[NetIndex].RxOctetsDeliveredCts;
4220 }
4221 /* Single net mode */
4222 else {
4223 Val64 = pAC->Pnmi.BufPort[0].RxOctetsDeliveredCts +
4224 pAC->Pnmi.BufPort[1].RxOctetsDeliveredCts;
4225 }
4226 }
4227 else {
4228 /* Dual net mode */
4229 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4230 Val64 = pAC->Pnmi.Port[NetIndex].RxOctetsDeliveredCts;
4231 }
4232 /* Single net mode */
4233 else {
4234 Val64 = pAC->Pnmi.Port[0].RxOctetsDeliveredCts +
4235 pAC->Pnmi.Port[1].RxOctetsDeliveredCts;
4236 }
4237 }
4238 SK_PNMI_STORE_U64(pBuf, Val64);
4239 *pLen = sizeof(SK_U64);
4240 break;
4241
4242 case OID_SKGE_RX_HW_ERROR_CTS:
4243 SK_PNMI_STORE_U64(pBuf, Val64RxHwErrs);
4244 *pLen = sizeof(SK_U64);
4245 break;
4246
4247 case OID_SKGE_TX_HW_ERROR_CTS:
4248 SK_PNMI_STORE_U64(pBuf, Val64TxHwErrs);
4249 *pLen = sizeof(SK_U64);
4250 break;
4251
4252 case OID_SKGE_IN_ERRORS_CTS:
4253 /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
4254 if (MacType == SK_MAC_XMAC) {
4255 /* Dual net mode */
4256 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4257 Val64 = Val64RxHwErrs + pAC->Pnmi.BufPort[NetIndex].RxNoBufCts;
4258 }
4259 /* Single net mode */
4260 else {
4261 Val64 = Val64RxHwErrs +
4262 pAC->Pnmi.BufPort[0].RxNoBufCts +
4263 pAC->Pnmi.BufPort[1].RxNoBufCts;
4264 }
4265 }
4266 else {
4267 /* Dual net mode */
4268 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4269 Val64 = Val64RxHwErrs + pAC->Pnmi.Port[NetIndex].RxNoBufCts;
4270 }
4271 /* Single net mode */
4272 else {
4273 Val64 = Val64RxHwErrs +
4274 pAC->Pnmi.Port[0].RxNoBufCts +
4275 pAC->Pnmi.Port[1].RxNoBufCts;
4276 }
4277 }
4278 SK_PNMI_STORE_U64(pBuf, Val64);
4279 *pLen = sizeof(SK_U64);
4280 break;
4281
4282 case OID_SKGE_OUT_ERROR_CTS:
4283 /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
4284 if (MacType == SK_MAC_XMAC) {
4285 /* Dual net mode */
4286 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4287 Val64 = Val64TxHwErrs + pAC->Pnmi.BufPort[NetIndex].TxNoBufCts;
4288 }
4289 /* Single net mode */
4290 else {
4291 Val64 = Val64TxHwErrs +
4292 pAC->Pnmi.BufPort[0].TxNoBufCts +
4293 pAC->Pnmi.BufPort[1].TxNoBufCts;
4294 }
4295 }
4296 else {
4297 /* Dual net mode */
4298 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4299 Val64 = Val64TxHwErrs + pAC->Pnmi.Port[NetIndex].TxNoBufCts;
4300 }
4301 /* Single net mode */
4302 else {
4303 Val64 = Val64TxHwErrs +
4304 pAC->Pnmi.Port[0].TxNoBufCts +
4305 pAC->Pnmi.Port[1].TxNoBufCts;
4306 }
4307 }
4308 SK_PNMI_STORE_U64(pBuf, Val64);
4309 *pLen = sizeof(SK_U64);
4310 break;
4311
4312 case OID_SKGE_ERR_RECOVERY_CTS:
4313 /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
4314 if (MacType == SK_MAC_XMAC) {
4315 /* Dual net mode */
4316 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4317 Val64 = pAC->Pnmi.BufPort[NetIndex].ErrRecoveryCts;
4318 }
4319 /* Single net mode */
4320 else {
4321 Val64 = pAC->Pnmi.BufPort[0].ErrRecoveryCts +
4322 pAC->Pnmi.BufPort[1].ErrRecoveryCts;
4323 }
4324 }
4325 else {
4326 /* Dual net mode */
4327 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4328 Val64 = pAC->Pnmi.Port[NetIndex].ErrRecoveryCts;
4329 }
4330 /* Single net mode */
4331 else {
4332 Val64 = pAC->Pnmi.Port[0].ErrRecoveryCts +
4333 pAC->Pnmi.Port[1].ErrRecoveryCts;
4334 }
4335 }
4336 SK_PNMI_STORE_U64(pBuf, Val64);
4337 *pLen = sizeof(SK_U64);
4338 break;
4339
4340 case OID_SKGE_SYSUPTIME:
4341 Val64 = SK_PNMI_HUNDREDS_SEC(SkOsGetTime(pAC));
4342 Val64 -= pAC->Pnmi.StartUpTime;
4343 SK_PNMI_STORE_U64(pBuf, Val64);
4344 *pLen = sizeof(SK_U64);
4345 break;
4346
4347 case OID_SKGE_MDB_VERSION:
4348 Val32 = SK_PNMI_MDB_VERSION;
4349 SK_PNMI_STORE_U32(pBuf, Val32);
4350 *pLen = sizeof(SK_U32);
4351 break;
4352
4353 case OID_GEN_RCV_ERROR:
4354 /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
4355 if (MacType == SK_MAC_XMAC) {
4356 Val64 = Val64RxHwErrs + pAC->Pnmi.BufPort[NetIndex].RxNoBufCts;
4357 }
4358 else {
4359 Val64 = Val64RxHwErrs + pAC->Pnmi.Port[NetIndex].RxNoBufCts;
4360 }
4361
4362 /*
4363 * by default 32bit values are evaluated
4364 */
4365 if (!Is64BitReq) {
4366 Val32 = (SK_U32)Val64;
4367 SK_PNMI_STORE_U32(pBuf, Val32);
4368 *pLen = sizeof(SK_U32);
4369 }
4370 else {
4371 SK_PNMI_STORE_U64(pBuf, Val64);
4372 *pLen = sizeof(SK_U64);
4373 }
4374 break;
4375
4376 case OID_GEN_XMIT_ERROR:
4377 /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
4378 if (MacType == SK_MAC_XMAC) {
4379 Val64 = Val64TxHwErrs + pAC->Pnmi.BufPort[NetIndex].TxNoBufCts;
4380 }
4381 else {
4382 Val64 = Val64TxHwErrs + pAC->Pnmi.Port[NetIndex].TxNoBufCts;
4383 }
4384
4385 /*
4386 * by default 32bit values are evaluated
4387 */
4388 if (!Is64BitReq) {
4389 Val32 = (SK_U32)Val64;
4390 SK_PNMI_STORE_U32(pBuf, Val32);
4391 *pLen = sizeof(SK_U32);
4392 }
4393 else {
4394 SK_PNMI_STORE_U64(pBuf, Val64);
4395 *pLen = sizeof(SK_U64);
4396 }
4397 break;
4398
4399 case OID_GEN_RCV_NO_BUFFER:
4400 /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
4401 if (MacType == SK_MAC_XMAC) {
4402 Val64 = pAC->Pnmi.BufPort[NetIndex].RxNoBufCts;
4403 }
4404 else {
4405 Val64 = pAC->Pnmi.Port[NetIndex].RxNoBufCts;
4406 }
4407
4408 /*
4409 * by default 32bit values are evaluated
4410 */
4411 if (!Is64BitReq) {
4412 Val32 = (SK_U32)Val64;
4413 SK_PNMI_STORE_U32(pBuf, Val32);
4414 *pLen = sizeof(SK_U32);
4415 }
4416 else {
4417 SK_PNMI_STORE_U64(pBuf, Val64);
4418 *pLen = sizeof(SK_U64);
4419 }
4420 break;
4421
4422 case OID_GEN_TRANSMIT_QUEUE_LENGTH:
4423 Val32 = (SK_U32)pAC->Pnmi.Port[NetIndex].TxSwQueueLen;
4424 SK_PNMI_STORE_U32(pBuf, Val32);
4425 *pLen = sizeof(SK_U32);
4426 break;
4427
4428 default:
4429 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR034,
4430 SK_PNMI_ERR034MSG);
4431
4432 *pLen = 0;
4433 return (SK_PNMI_ERR_GENERAL);
4434 }
4435
4436 if (Id == OID_SKGE_RX_HW_ERROR_CTS ||
4437 Id == OID_SKGE_TX_HW_ERROR_CTS ||
4438 Id == OID_SKGE_IN_ERRORS_CTS ||
4439 Id == OID_SKGE_OUT_ERROR_CTS ||
4440 Id == OID_GEN_XMIT_ERROR ||
4441 Id == OID_GEN_RCV_ERROR) {
4442
4443 pAC->Pnmi.MacUpdatedFlag --;
4444 }
4445
4446 return (SK_PNMI_ERR_OK);
4447}
4448
4449/*****************************************************************************
4450 *
4451 * Rlmt - OID handler function of OID_SKGE_RLMT_XXX single instance.
4452 *
4453 * Description:
4454 * Get/Presets/Sets the RLMT OIDs.
4455 *
4456 * Returns:
4457 * SK_PNMI_ERR_OK The request was successfully performed.
4458 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
4459 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
4460 * the correct data (e.g. a 32bit value is
4461 * needed, but a 16 bit value was passed).
4462 * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
4463 * value range.
4464 * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
4465 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
4466 * exist (e.g. port instance 3 on a two port
4467 * adapter.
4468 */
4469PNMI_STATIC int Rlmt(
4470SK_AC *pAC, /* Pointer to adapter context */
4471SK_IOC IoC, /* IO context handle */
4472int Action, /* GET/PRESET/SET action */
4473SK_U32 Id, /* Object ID that is to be processed */
4474char *pBuf, /* Buffer used for the management data transfer */
4475unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
4476SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
4477unsigned int TableIndex, /* Index to the Id table */
4478SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
4479{
4480 int Ret;
4481 unsigned int PhysPortIndex;
4482 unsigned int PhysPortMax;
4483 SK_EVPARA EventParam;
4484 SK_U32 Val32;
4485 SK_U64 Val64;
4486
4487
4488 /*
4489 * Check instance. Only single instance OIDs are allowed here.
4490 */
4491 if (Instance != (SK_U32)(-1) && Instance != 1) {
4492
4493 *pLen = 0;
4494 return (SK_PNMI_ERR_UNKNOWN_INST);
4495 }
4496
4497 /*
4498 * Perform the requested action.
4499 */
4500 if (Action == SK_PNMI_GET) {
4501
4502 /*
4503 * Check if the buffer length is large enough.
4504 */
4505
4506 switch (Id) {
4507
4508 case OID_SKGE_RLMT_MODE:
4509 case OID_SKGE_RLMT_PORT_ACTIVE:
4510 case OID_SKGE_RLMT_PORT_PREFERRED:
4511 if (*pLen < sizeof(SK_U8)) {
4512
4513 *pLen = sizeof(SK_U8);
4514 return (SK_PNMI_ERR_TOO_SHORT);
4515 }
4516 break;
4517
4518 case OID_SKGE_RLMT_PORT_NUMBER:
4519 if (*pLen < sizeof(SK_U32)) {
4520
4521 *pLen = sizeof(SK_U32);
4522 return (SK_PNMI_ERR_TOO_SHORT);
4523 }
4524 break;
4525
4526 case OID_SKGE_RLMT_CHANGE_CTS:
4527 case OID_SKGE_RLMT_CHANGE_TIME:
4528 case OID_SKGE_RLMT_CHANGE_ESTIM:
4529 case OID_SKGE_RLMT_CHANGE_THRES:
4530 if (*pLen < sizeof(SK_U64)) {
4531
4532 *pLen = sizeof(SK_U64);
4533 return (SK_PNMI_ERR_TOO_SHORT);
4534 }
4535 break;
4536
4537 default:
4538 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR035,
4539 SK_PNMI_ERR035MSG);
4540
4541 *pLen = 0;
4542 return (SK_PNMI_ERR_GENERAL);
4543 }
4544
4545 /*
4546 * Update RLMT statistic and increment semaphores to indicate
4547 * that an update was already done. Maybe RLMT will hold its
4548 * statistic always up to date some time. Then we can
4549 * remove this type of call.
4550 */
4551 if ((Ret = RlmtUpdate(pAC, IoC, NetIndex)) != SK_PNMI_ERR_OK) {
4552
4553 *pLen = 0;
4554 return (Ret);
4555 }
4556 pAC->Pnmi.RlmtUpdatedFlag ++;
4557
4558 /*
4559 * Retrieve Value
4560 */
4561 switch (Id) {
4562
4563 case OID_SKGE_RLMT_MODE:
4564 *pBuf = (char)pAC->Rlmt.Net[0].RlmtMode;
4565 *pLen = sizeof(char);
4566 break;
4567
4568 case OID_SKGE_RLMT_PORT_NUMBER:
4569 Val32 = (SK_U32)pAC->GIni.GIMacsFound;
4570 SK_PNMI_STORE_U32(pBuf, Val32);
4571 *pLen = sizeof(SK_U32);
4572 break;
4573
4574 case OID_SKGE_RLMT_PORT_ACTIVE:
4575 *pBuf = 0;
4576 /*
4577 * If multiple ports may become active this OID
4578 * doesn't make sense any more. A new variable in
4579 * the port structure should be created. However,
4580 * for this variable the first active port is
4581 * returned.
4582 */
4583 PhysPortMax = pAC->GIni.GIMacsFound;
4584
4585 for (PhysPortIndex = 0; PhysPortIndex < PhysPortMax;
4586 PhysPortIndex ++) {
4587
4588 if (pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) {
4589
4590 *pBuf = (char)SK_PNMI_PORT_PHYS2LOG(PhysPortIndex);
4591 break;
4592 }
4593 }
4594 *pLen = sizeof(char);
4595 break;
4596
4597 case OID_SKGE_RLMT_PORT_PREFERRED:
4598 *pBuf = (char)SK_PNMI_PORT_PHYS2LOG(pAC->Rlmt.Net[NetIndex].Preference);
4599 *pLen = sizeof(char);
4600 break;
4601
4602 case OID_SKGE_RLMT_CHANGE_CTS:
4603 Val64 = pAC->Pnmi.RlmtChangeCts;
4604 SK_PNMI_STORE_U64(pBuf, Val64);
4605 *pLen = sizeof(SK_U64);
4606 break;
4607
4608 case OID_SKGE_RLMT_CHANGE_TIME:
4609 Val64 = pAC->Pnmi.RlmtChangeTime;
4610 SK_PNMI_STORE_U64(pBuf, Val64);
4611 *pLen = sizeof(SK_U64);
4612 break;
4613
4614 case OID_SKGE_RLMT_CHANGE_ESTIM:
4615 Val64 = pAC->Pnmi.RlmtChangeEstimate.Estimate;
4616 SK_PNMI_STORE_U64(pBuf, Val64);
4617 *pLen = sizeof(SK_U64);
4618 break;
4619
4620 case OID_SKGE_RLMT_CHANGE_THRES:
4621 Val64 = pAC->Pnmi.RlmtChangeThreshold;
4622 SK_PNMI_STORE_U64(pBuf, Val64);
4623 *pLen = sizeof(SK_U64);
4624 break;
4625
4626 default:
4627 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR,
4628 ("Rlmt: Unknown OID should be handled before"));
4629
4630 pAC->Pnmi.RlmtUpdatedFlag --;
4631 *pLen = 0;
4632 return (SK_PNMI_ERR_GENERAL);
4633 }
4634
4635 pAC->Pnmi.RlmtUpdatedFlag --;
4636 }
4637 else {
4638 /* Perform a preset or set */
4639 switch (Id) {
4640
4641 case OID_SKGE_RLMT_MODE:
4642 /* Check if the buffer length is plausible */
4643 if (*pLen < sizeof(char)) {
4644
4645 *pLen = sizeof(char);
4646 return (SK_PNMI_ERR_TOO_SHORT);
4647 }
4648 /* Check if the value range is correct */
4649 if (*pLen != sizeof(char) ||
4650 (*pBuf & SK_PNMI_RLMT_MODE_CHK_LINK) == 0 ||
4651 *(SK_U8 *)pBuf > 15) {
4652
4653 *pLen = 0;
4654 return (SK_PNMI_ERR_BAD_VALUE);
4655 }
4656 /* The preset ends here */
4657 if (Action == SK_PNMI_PRESET) {
4658
4659 *pLen = 0;
4660 return (SK_PNMI_ERR_OK);
4661 }
4662 /* Send an event to RLMT to change the mode */
4663 SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam));
4664 EventParam.Para32[0] |= (SK_U32)(*pBuf);
4665 EventParam.Para32[1] = 0;
4666 if (SkRlmtEvent(pAC, IoC, SK_RLMT_MODE_CHANGE,
4667 EventParam) > 0) {
4668
4669 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR037,
4670 SK_PNMI_ERR037MSG);
4671
4672 *pLen = 0;
4673 return (SK_PNMI_ERR_GENERAL);
4674 }
4675 break;
4676
4677 case OID_SKGE_RLMT_PORT_PREFERRED:
4678 /* Check if the buffer length is plausible */
4679 if (*pLen < sizeof(char)) {
4680
4681 *pLen = sizeof(char);
4682 return (SK_PNMI_ERR_TOO_SHORT);
4683 }
4684 /* Check if the value range is correct */
4685 if (*pLen != sizeof(char) || *(SK_U8 *)pBuf >
4686 (SK_U8)pAC->GIni.GIMacsFound) {
4687
4688 *pLen = 0;
4689 return (SK_PNMI_ERR_BAD_VALUE);
4690 }
4691 /* The preset ends here */
4692 if (Action == SK_PNMI_PRESET) {
4693
4694 *pLen = 0;
4695 return (SK_PNMI_ERR_OK);
4696 }
4697
4698 /*
4699 * Send an event to RLMT change the preferred port.
4700 * A param of -1 means automatic mode. RLMT will
4701 * make the decision which is the preferred port.
4702 */
4703 SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam));
4704 EventParam.Para32[0] = (SK_U32)(*pBuf) - 1;
4705 EventParam.Para32[1] = NetIndex;
4706 if (SkRlmtEvent(pAC, IoC, SK_RLMT_PREFPORT_CHANGE,
4707 EventParam) > 0) {
4708
4709 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR038,
4710 SK_PNMI_ERR038MSG);
4711
4712 *pLen = 0;
4713 return (SK_PNMI_ERR_GENERAL);
4714 }
4715 break;
4716
4717 case OID_SKGE_RLMT_CHANGE_THRES:
4718 /* Check if the buffer length is plausible */
4719 if (*pLen < sizeof(SK_U64)) {
4720
4721 *pLen = sizeof(SK_U64);
4722 return (SK_PNMI_ERR_TOO_SHORT);
4723 }
4724 /*
4725 * There are not many restrictions to the
4726 * value range.
4727 */
4728 if (*pLen != sizeof(SK_U64)) {
4729
4730 *pLen = 0;
4731 return (SK_PNMI_ERR_BAD_VALUE);
4732 }
4733 /* A preset ends here */
4734 if (Action == SK_PNMI_PRESET) {
4735
4736 *pLen = 0;
4737 return (SK_PNMI_ERR_OK);
4738 }
4739 /*
4740 * Store the new threshold, which will be taken
4741 * on the next timer event.
4742 */
4743 SK_PNMI_READ_U64(pBuf, Val64);
4744 pAC->Pnmi.RlmtChangeThreshold = Val64;
4745 break;
4746
4747 default:
4748 /* The other OIDs are not be able for set */
4749 *pLen = 0;
4750 return (SK_PNMI_ERR_READ_ONLY);
4751 }
4752 }
4753
4754 return (SK_PNMI_ERR_OK);
4755}
4756
4757/*****************************************************************************
4758 *
4759 * RlmtStat - OID handler function of OID_SKGE_RLMT_XXX multiple instance.
4760 *
4761 * Description:
4762 * Performs get requests on multiple instance variables.
4763 *
4764 * Returns:
4765 * SK_PNMI_ERR_OK The request was successfully performed.
4766 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
4767 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
4768 * the correct data (e.g. a 32bit value is
4769 * needed, but a 16 bit value was passed).
4770 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
4771 * exist (e.g. port instance 3 on a two port
4772 * adapter.
4773 */
4774PNMI_STATIC int RlmtStat(
4775SK_AC *pAC, /* Pointer to adapter context */
4776SK_IOC IoC, /* IO context handle */
4777int Action, /* GET/PRESET/SET action */
4778SK_U32 Id, /* Object ID that is to be processed */
4779char *pBuf, /* Buffer used for the management data transfer */
4780unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
4781SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
4782unsigned int TableIndex, /* Index to the Id table */
4783SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
4784{
4785 unsigned int PhysPortMax;
4786 unsigned int PhysPortIndex;
4787 unsigned int Limit;
4788 unsigned int Offset;
4789 int Ret;
4790 SK_U32 Val32;
4791 SK_U64 Val64;
4792
4793 /*
4794 * Calculate the port indexes from the instance.
4795 */
4796 PhysPortMax = pAC->GIni.GIMacsFound;
4797
4798 if ((Instance != (SK_U32)(-1))) {
4799 /* Check instance range */
4800 if ((Instance < 1) || (Instance > PhysPortMax)) {
4801
4802 *pLen = 0;
4803 return (SK_PNMI_ERR_UNKNOWN_INST);
4804 }
4805
4806 /* Single net mode */
4807 PhysPortIndex = Instance - 1;
4808
4809 /* Dual net mode */
4810 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4811 PhysPortIndex = NetIndex;
4812 }
4813
4814 /* Both net modes */
4815 Limit = PhysPortIndex + 1;
4816 }
4817 else {
4818 /* Single net mode */
4819 PhysPortIndex = 0;
4820 Limit = PhysPortMax;
4821
4822 /* Dual net mode */
4823 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
4824 PhysPortIndex = NetIndex;
4825 Limit = PhysPortIndex + 1;
4826 }
4827 }
4828
4829 /*
4830 * Currently only get requests are allowed.
4831 */
4832 if (Action != SK_PNMI_GET) {
4833
4834 *pLen = 0;
4835 return (SK_PNMI_ERR_READ_ONLY);
4836 }
4837
4838 /*
4839 * Check if the buffer length is large enough.
4840 */
4841 switch (Id) {
4842
4843 case OID_SKGE_RLMT_PORT_INDEX:
4844 case OID_SKGE_RLMT_STATUS:
4845 if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_U32)) {
4846
4847 *pLen = (Limit - PhysPortIndex) * sizeof(SK_U32);
4848 return (SK_PNMI_ERR_TOO_SHORT);
4849 }
4850 break;
4851
4852 case OID_SKGE_RLMT_TX_HELLO_CTS:
4853 case OID_SKGE_RLMT_RX_HELLO_CTS:
4854 case OID_SKGE_RLMT_TX_SP_REQ_CTS:
4855 case OID_SKGE_RLMT_RX_SP_CTS:
4856 if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_U64)) {
4857
4858 *pLen = (Limit - PhysPortIndex) * sizeof(SK_U64);
4859 return (SK_PNMI_ERR_TOO_SHORT);
4860 }
4861 break;
4862
4863 default:
4864 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR039,
4865 SK_PNMI_ERR039MSG);
4866
4867 *pLen = 0;
4868 return (SK_PNMI_ERR_GENERAL);
4869
4870 }
4871
4872 /*
4873 * Update statistic and increment semaphores to indicate that
4874 * an update was already done.
4875 */
4876 if ((Ret = RlmtUpdate(pAC, IoC, NetIndex)) != SK_PNMI_ERR_OK) {
4877
4878 *pLen = 0;
4879 return (Ret);
4880 }
4881 pAC->Pnmi.RlmtUpdatedFlag ++;
4882
4883 /*
4884 * Get value
4885 */
4886 Offset = 0;
4887 for (; PhysPortIndex < Limit; PhysPortIndex ++) {
4888
4889 switch (Id) {
4890
4891 case OID_SKGE_RLMT_PORT_INDEX:
4892 Val32 = PhysPortIndex;
4893 SK_PNMI_STORE_U32(pBuf + Offset, Val32);
4894 Offset += sizeof(SK_U32);
4895 break;
4896
4897 case OID_SKGE_RLMT_STATUS:
4898 if (pAC->Rlmt.Port[PhysPortIndex].PortState ==
4899 SK_RLMT_PS_INIT ||
4900 pAC->Rlmt.Port[PhysPortIndex].PortState ==
4901 SK_RLMT_PS_DOWN) {
4902
4903 Val32 = SK_PNMI_RLMT_STATUS_ERROR;
4904 }
4905 else if (pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) {
4906
4907 Val32 = SK_PNMI_RLMT_STATUS_ACTIVE;
4908 }
4909 else {
4910 Val32 = SK_PNMI_RLMT_STATUS_STANDBY;
4911 }
4912 SK_PNMI_STORE_U32(pBuf + Offset, Val32);
4913 Offset += sizeof(SK_U32);
4914 break;
4915
4916 case OID_SKGE_RLMT_TX_HELLO_CTS:
4917 Val64 = pAC->Rlmt.Port[PhysPortIndex].TxHelloCts;
4918 SK_PNMI_STORE_U64(pBuf + Offset, Val64);
4919 Offset += sizeof(SK_U64);
4920 break;
4921
4922 case OID_SKGE_RLMT_RX_HELLO_CTS:
4923 Val64 = pAC->Rlmt.Port[PhysPortIndex].RxHelloCts;
4924 SK_PNMI_STORE_U64(pBuf + Offset, Val64);
4925 Offset += sizeof(SK_U64);
4926 break;
4927
4928 case OID_SKGE_RLMT_TX_SP_REQ_CTS:
4929 Val64 = pAC->Rlmt.Port[PhysPortIndex].TxSpHelloReqCts;
4930 SK_PNMI_STORE_U64(pBuf + Offset, Val64);
4931 Offset += sizeof(SK_U64);
4932 break;
4933
4934 case OID_SKGE_RLMT_RX_SP_CTS:
4935 Val64 = pAC->Rlmt.Port[PhysPortIndex].RxSpHelloCts;
4936 SK_PNMI_STORE_U64(pBuf + Offset, Val64);
4937 Offset += sizeof(SK_U64);
4938 break;
4939
4940 default:
4941 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR,
4942 ("RlmtStat: Unknown OID should be errored before"));
4943
4944 pAC->Pnmi.RlmtUpdatedFlag --;
4945 *pLen = 0;
4946 return (SK_PNMI_ERR_GENERAL);
4947 }
4948 }
4949 *pLen = Offset;
4950
4951 pAC->Pnmi.RlmtUpdatedFlag --;
4952
4953 return (SK_PNMI_ERR_OK);
4954}
4955
4956/*****************************************************************************
4957 *
4958 * MacPrivateConf - OID handler function of OIDs concerning the configuration
4959 *
4960 * Description:
4961 * Get/Presets/Sets the OIDs concerning the configuration.
4962 *
4963 * Returns:
4964 * SK_PNMI_ERR_OK The request was successfully performed.
4965 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
4966 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
4967 * the correct data (e.g. a 32bit value is
4968 * needed, but a 16 bit value was passed).
4969 * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
4970 * value range.
4971 * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
4972 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
4973 * exist (e.g. port instance 3 on a two port
4974 * adapter.
4975 */
4976PNMI_STATIC int MacPrivateConf(
4977SK_AC *pAC, /* Pointer to adapter context */
4978SK_IOC IoC, /* IO context handle */
4979int Action, /* GET/PRESET/SET action */
4980SK_U32 Id, /* Object ID that is to be processed */
4981char *pBuf, /* Buffer used for the management data transfer */
4982unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
4983SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
4984unsigned int TableIndex, /* Index to the Id table */
4985SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
4986{
4987 unsigned int PhysPortMax;
4988 unsigned int PhysPortIndex;
4989 unsigned int LogPortMax;
4990 unsigned int LogPortIndex;
4991 unsigned int Limit;
4992 unsigned int Offset;
4993 char Val8;
4994 char *pBufPtr;
4995 int Ret;
4996 SK_EVPARA EventParam;
4997 SK_U32 Val32;
4998
4999 /*
5000 * Calculate instance if wished. MAC index 0 is the virtual MAC.
5001 */
5002 PhysPortMax = pAC->GIni.GIMacsFound;
5003 LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax);
5004
5005 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */
5006 LogPortMax--;
5007 }
5008
5009 if ((Instance != (SK_U32)(-1))) { /* Only one specific instance is queried */
5010 /* Check instance range */
5011 if ((Instance < 1) || (Instance > LogPortMax)) {
5012
5013 *pLen = 0;
5014 return (SK_PNMI_ERR_UNKNOWN_INST);
5015 }
5016 LogPortIndex = SK_PNMI_PORT_INST2LOG(Instance);
5017 Limit = LogPortIndex + 1;
5018 }
5019
5020 else { /* Instance == (SK_U32)(-1), get all Instances of that OID */
5021
5022 LogPortIndex = 0;
5023 Limit = LogPortMax;
5024 }
5025
5026 /*
5027 * Perform action
5028 */
5029 if (Action == SK_PNMI_GET) {
5030
5031 /* Check length */
5032 switch (Id) {
5033
5034 case OID_SKGE_PMD:
5035 case OID_SKGE_CONNECTOR:
5036 case OID_SKGE_LINK_CAP:
5037 case OID_SKGE_LINK_MODE:
5038 case OID_SKGE_LINK_MODE_STATUS:
5039 case OID_SKGE_LINK_STATUS:
5040 case OID_SKGE_FLOWCTRL_CAP:
5041 case OID_SKGE_FLOWCTRL_MODE:
5042 case OID_SKGE_FLOWCTRL_STATUS:
5043 case OID_SKGE_PHY_OPERATION_CAP:
5044 case OID_SKGE_PHY_OPERATION_MODE:
5045 case OID_SKGE_PHY_OPERATION_STATUS:
5046 case OID_SKGE_SPEED_CAP:
5047 case OID_SKGE_SPEED_MODE:
5048 case OID_SKGE_SPEED_STATUS:
5049 if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U8)) {
5050
5051 *pLen = (Limit - LogPortIndex) * sizeof(SK_U8);
5052 return (SK_PNMI_ERR_TOO_SHORT);
5053 }
5054 break;
5055
5056 case OID_SKGE_MTU:
5057 case OID_SKGE_PHY_TYPE:
5058 if (*pLen < (Limit - LogPortIndex) * sizeof(SK_U32)) {
5059
5060 *pLen = (Limit - LogPortIndex) * sizeof(SK_U32);
5061 return (SK_PNMI_ERR_TOO_SHORT);
5062 }
5063 break;
5064
5065 default:
5066 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR041,
5067 SK_PNMI_ERR041MSG);
5068 *pLen = 0;
5069 return (SK_PNMI_ERR_GENERAL);
5070 }
5071
5072 /*
5073 * Update statistic and increment semaphore to indicate
5074 * that an update was already done.
5075 */
5076 if ((Ret = SirqUpdate(pAC, IoC)) != SK_PNMI_ERR_OK) {
5077
5078 *pLen = 0;
5079 return (Ret);
5080 }
5081 pAC->Pnmi.SirqUpdatedFlag ++;
5082
5083 /*
5084 * Get value
5085 */
5086 Offset = 0;
5087 for (; LogPortIndex < Limit; LogPortIndex ++) {
5088
5089 pBufPtr = pBuf + Offset;
5090
5091 switch (Id) {
5092
5093 case OID_SKGE_PMD:
5094 *pBufPtr = pAC->Pnmi.PMD;
5095 Offset += sizeof(char);
5096 break;
5097
5098 case OID_SKGE_CONNECTOR:
5099 *pBufPtr = pAC->Pnmi.Connector;
5100 Offset += sizeof(char);
5101 break;
5102
5103 case OID_SKGE_PHY_TYPE:
5104 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
5105 if (LogPortIndex == 0) {
5106 continue;
5107 }
5108 else {
5109 /* Get value for physical ports */
5110 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
5111 pAC, LogPortIndex);
5112 Val32 = pAC->GIni.GP[PhysPortIndex].PhyType;
5113 SK_PNMI_STORE_U32(pBufPtr, Val32);
5114 }
5115 }
5116 else { /* DualNetMode */
5117
5118 Val32 = pAC->GIni.GP[NetIndex].PhyType;
5119 SK_PNMI_STORE_U32(pBufPtr, Val32);
5120 }
5121 Offset += sizeof(SK_U32);
5122 break;
5123
5124 case OID_SKGE_LINK_CAP:
5125 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
5126 if (LogPortIndex == 0) {
5127 /* Get value for virtual port */
5128 VirtualConf(pAC, IoC, Id, pBufPtr);
5129 }
5130 else {
5131 /* Get value for physical ports */
5132 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
5133 pAC, LogPortIndex);
5134
5135 *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkCap;
5136 }
5137 }
5138 else { /* DualNetMode */
5139
5140 *pBufPtr = pAC->GIni.GP[NetIndex].PLinkCap;
5141 }
5142 Offset += sizeof(char);
5143 break;
5144
5145 case OID_SKGE_LINK_MODE:
5146 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
5147 if (LogPortIndex == 0) {
5148 /* Get value for virtual port */
5149 VirtualConf(pAC, IoC, Id, pBufPtr);
5150 }
5151 else {
5152 /* Get value for physical ports */
5153 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
5154 pAC, LogPortIndex);
5155
5156 *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkModeConf;
5157 }
5158 }
5159 else { /* DualNetMode */
5160
5161 *pBufPtr = pAC->GIni.GP[NetIndex].PLinkModeConf;
5162 }
5163 Offset += sizeof(char);
5164 break;
5165
5166 case OID_SKGE_LINK_MODE_STATUS:
5167 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
5168 if (LogPortIndex == 0) {
5169 /* Get value for virtual port */
5170 VirtualConf(pAC, IoC, Id, pBufPtr);
5171 }
5172 else {
5173 /* Get value for physical port */
5174 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
5175 pAC, LogPortIndex);
5176
5177 *pBufPtr =
5178 CalculateLinkModeStatus(pAC, IoC, PhysPortIndex);
5179 }
5180 }
5181 else { /* DualNetMode */
5182
5183 *pBufPtr = CalculateLinkModeStatus(pAC, IoC, NetIndex);
5184 }
5185 Offset += sizeof(char);
5186 break;
5187
5188 case OID_SKGE_LINK_STATUS:
5189 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
5190 if (LogPortIndex == 0) {
5191 /* Get value for virtual port */
5192 VirtualConf(pAC, IoC, Id, pBufPtr);
5193 }
5194 else {
5195 /* Get value for physical ports */
5196 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
5197 pAC, LogPortIndex);
5198
5199 *pBufPtr = CalculateLinkStatus(pAC, IoC, PhysPortIndex);
5200 }
5201 }
5202 else { /* DualNetMode */
5203
5204 *pBufPtr = CalculateLinkStatus(pAC, IoC, NetIndex);
5205 }
5206 Offset += sizeof(char);
5207 break;
5208
5209 case OID_SKGE_FLOWCTRL_CAP:
5210 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
5211 if (LogPortIndex == 0) {
5212 /* Get value for virtual port */
5213 VirtualConf(pAC, IoC, Id, pBufPtr);
5214 }
5215 else {
5216 /* Get value for physical ports */
5217 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
5218 pAC, LogPortIndex);
5219
5220 *pBufPtr = pAC->GIni.GP[PhysPortIndex].PFlowCtrlCap;
5221 }
5222 }
5223 else { /* DualNetMode */
5224
5225 *pBufPtr = pAC->GIni.GP[NetIndex].PFlowCtrlCap;
5226 }
5227 Offset += sizeof(char);
5228 break;
5229
5230 case OID_SKGE_FLOWCTRL_MODE:
5231 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
5232 if (LogPortIndex == 0) {
5233 /* Get value for virtual port */
5234 VirtualConf(pAC, IoC, Id, pBufPtr);
5235 }
5236 else {
5237 /* Get value for physical port */
5238 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
5239 pAC, LogPortIndex);
5240
5241 *pBufPtr = pAC->GIni.GP[PhysPortIndex].PFlowCtrlMode;
5242 }
5243 }
5244 else { /* DualNetMode */
5245
5246 *pBufPtr = pAC->GIni.GP[NetIndex].PFlowCtrlMode;
5247 }
5248 Offset += sizeof(char);
5249 break;
5250
5251 case OID_SKGE_FLOWCTRL_STATUS:
5252 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
5253 if (LogPortIndex == 0) {
5254 /* Get value for virtual port */
5255 VirtualConf(pAC, IoC, Id, pBufPtr);
5256 }
5257 else {
5258 /* Get value for physical port */
5259 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
5260 pAC, LogPortIndex);
5261
5262 *pBufPtr = pAC->GIni.GP[PhysPortIndex].PFlowCtrlStatus;
5263 }
5264 }
5265 else { /* DualNetMode */
5266
5267 *pBufPtr = pAC->GIni.GP[NetIndex].PFlowCtrlStatus;
5268 }
5269 Offset += sizeof(char);
5270 break;
5271
5272 case OID_SKGE_PHY_OPERATION_CAP:
5273 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
5274 if (LogPortIndex == 0) {
5275 /* Get value for virtual port */
5276 VirtualConf(pAC, IoC, Id, pBufPtr);
5277 }
5278 else {
5279 /* Get value for physical ports */
5280 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
5281 pAC, LogPortIndex);
5282
5283 *pBufPtr = pAC->GIni.GP[PhysPortIndex].PMSCap;
5284 }
5285 }
5286 else { /* DualNetMode */
5287
5288 *pBufPtr = pAC->GIni.GP[NetIndex].PMSCap;
5289 }
5290 Offset += sizeof(char);
5291 break;
5292
5293 case OID_SKGE_PHY_OPERATION_MODE:
5294 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
5295 if (LogPortIndex == 0) {
5296 /* Get value for virtual port */
5297 VirtualConf(pAC, IoC, Id, pBufPtr);
5298 }
5299 else {
5300 /* Get value for physical port */
5301 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
5302 pAC, LogPortIndex);
5303
5304 *pBufPtr = pAC->GIni.GP[PhysPortIndex].PMSMode;
5305 }
5306 }
5307 else { /* DualNetMode */
5308
5309 *pBufPtr = pAC->GIni.GP[NetIndex].PMSMode;
5310 }
5311 Offset += sizeof(char);
5312 break;
5313
5314 case OID_SKGE_PHY_OPERATION_STATUS:
5315 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
5316 if (LogPortIndex == 0) {
5317 /* Get value for virtual port */
5318 VirtualConf(pAC, IoC, Id, pBufPtr);
5319 }
5320 else {
5321 /* Get value for physical port */
5322 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
5323 pAC, LogPortIndex);
5324
5325 *pBufPtr = pAC->GIni.GP[PhysPortIndex].PMSStatus;
5326 }
5327 }
5328 else {
5329
5330 *pBufPtr = pAC->GIni.GP[NetIndex].PMSStatus;
5331 }
5332 Offset += sizeof(char);
5333 break;
5334
5335 case OID_SKGE_SPEED_CAP:
5336 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
5337 if (LogPortIndex == 0) {
5338 /* Get value for virtual port */
5339 VirtualConf(pAC, IoC, Id, pBufPtr);
5340 }
5341 else {
5342 /* Get value for physical ports */
5343 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
5344 pAC, LogPortIndex);
5345
5346 *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkSpeedCap;
5347 }
5348 }
5349 else { /* DualNetMode */
5350
5351 *pBufPtr = pAC->GIni.GP[NetIndex].PLinkSpeedCap;
5352 }
5353 Offset += sizeof(char);
5354 break;
5355
5356 case OID_SKGE_SPEED_MODE:
5357 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
5358 if (LogPortIndex == 0) {
5359 /* Get value for virtual port */
5360 VirtualConf(pAC, IoC, Id, pBufPtr);
5361 }
5362 else {
5363 /* Get value for physical port */
5364 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
5365 pAC, LogPortIndex);
5366
5367 *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkSpeed;
5368 }
5369 }
5370 else { /* DualNetMode */
5371
5372 *pBufPtr = pAC->GIni.GP[NetIndex].PLinkSpeed;
5373 }
5374 Offset += sizeof(char);
5375 break;
5376
5377 case OID_SKGE_SPEED_STATUS:
5378 if (!pAC->Pnmi.DualNetActiveFlag) { /* SingleNetMode */
5379 if (LogPortIndex == 0) {
5380 /* Get value for virtual port */
5381 VirtualConf(pAC, IoC, Id, pBufPtr);
5382 }
5383 else {
5384 /* Get value for physical port */
5385 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(
5386 pAC, LogPortIndex);
5387
5388 *pBufPtr = pAC->GIni.GP[PhysPortIndex].PLinkSpeedUsed;
5389 }
5390 }
5391 else { /* DualNetMode */
5392
5393 *pBufPtr = pAC->GIni.GP[NetIndex].PLinkSpeedUsed;
5394 }
5395 Offset += sizeof(char);
5396 break;
5397
5398 case OID_SKGE_MTU:
5399 Val32 = SK_DRIVER_GET_MTU(pAC, IoC, NetIndex);
5400 SK_PNMI_STORE_U32(pBufPtr, Val32);
5401 Offset += sizeof(SK_U32);
5402 break;
5403
5404 default:
5405 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR,
5406 ("MacPrivateConf: Unknown OID should be handled before"));
5407
5408 pAC->Pnmi.SirqUpdatedFlag --;
5409 return (SK_PNMI_ERR_GENERAL);
5410 }
5411 }
5412 *pLen = Offset;
5413 pAC->Pnmi.SirqUpdatedFlag --;
5414
5415 return (SK_PNMI_ERR_OK);
5416 }
5417
5418 /*
5419 * From here SET or PRESET action. Check if the passed
5420 * buffer length is plausible.
5421 */
5422 switch (Id) {
5423
5424 case OID_SKGE_LINK_MODE:
5425 case OID_SKGE_FLOWCTRL_MODE:
5426 case OID_SKGE_PHY_OPERATION_MODE:
5427 case OID_SKGE_SPEED_MODE:
5428 if (*pLen < Limit - LogPortIndex) {
5429
5430 *pLen = Limit - LogPortIndex;
5431 return (SK_PNMI_ERR_TOO_SHORT);
5432 }
5433 if (*pLen != Limit - LogPortIndex) {
5434
5435 *pLen = 0;
5436 return (SK_PNMI_ERR_BAD_VALUE);
5437 }
5438 break;
5439
5440 case OID_SKGE_MTU:
5441 if (*pLen < sizeof(SK_U32)) {
5442
5443 *pLen = sizeof(SK_U32);
5444 return (SK_PNMI_ERR_TOO_SHORT);
5445 }
5446 if (*pLen != sizeof(SK_U32)) {
5447
5448 *pLen = 0;
5449 return (SK_PNMI_ERR_BAD_VALUE);
5450 }
5451 break;
5452
5453 default:
5454 *pLen = 0;
5455 return (SK_PNMI_ERR_READ_ONLY);
5456 }
5457
5458 /*
5459 * Perform preset or set
5460 */
5461 Offset = 0;
5462 for (; LogPortIndex < Limit; LogPortIndex ++) {
5463
5464 switch (Id) {
5465
5466 case OID_SKGE_LINK_MODE:
5467 /* Check the value range */
5468 Val8 = *(pBuf + Offset);
5469 if (Val8 == 0) {
5470
5471 Offset += sizeof(char);
5472 break;
5473 }
5474 if (Val8 < SK_LMODE_HALF ||
5475 (LogPortIndex != 0 && Val8 > SK_LMODE_AUTOSENSE) ||
5476 (LogPortIndex == 0 && Val8 > SK_LMODE_INDETERMINATED)) {
5477
5478 *pLen = 0;
5479 return (SK_PNMI_ERR_BAD_VALUE);
5480 }
5481
5482 /* The preset ends here */
5483 if (Action == SK_PNMI_PRESET) {
5484
5485 return (SK_PNMI_ERR_OK);
5486 }
5487
5488 if (LogPortIndex == 0) {
5489
5490 /*
5491 * The virtual port consists of all currently
5492 * active ports. Find them and send an event
5493 * with the new link mode to SIRQ.
5494 */
5495 for (PhysPortIndex = 0;
5496 PhysPortIndex < PhysPortMax;
5497 PhysPortIndex ++) {
5498
5499 if (!pAC->Pnmi.Port[PhysPortIndex].
5500 ActiveFlag) {
5501
5502 continue;
5503 }
5504
5505 EventParam.Para32[0] = PhysPortIndex;
5506 EventParam.Para32[1] = (SK_U32)Val8;
5507 if (SkGeSirqEvent(pAC, IoC,
5508 SK_HWEV_SET_LMODE,
5509 EventParam) > 0) {
5510
5511 SK_ERR_LOG(pAC, SK_ERRCL_SW,
5512 SK_PNMI_ERR043,
5513 SK_PNMI_ERR043MSG);
5514
5515 *pLen = 0;
5516 return (SK_PNMI_ERR_GENERAL);
5517 }
5518 }
5519 }
5520 else {
5521 /*
5522 * Send an event with the new link mode to
5523 * the SIRQ module.
5524 */
5525 EventParam.Para32[0] = SK_PNMI_PORT_LOG2PHYS(
5526 pAC, LogPortIndex);
5527 EventParam.Para32[1] = (SK_U32)Val8;
5528 if (SkGeSirqEvent(pAC, IoC, SK_HWEV_SET_LMODE,
5529 EventParam) > 0) {
5530
5531 SK_ERR_LOG(pAC, SK_ERRCL_SW,
5532 SK_PNMI_ERR043,
5533 SK_PNMI_ERR043MSG);
5534
5535 *pLen = 0;
5536 return (SK_PNMI_ERR_GENERAL);
5537 }
5538 }
5539 Offset += sizeof(char);
5540 break;
5541
5542 case OID_SKGE_FLOWCTRL_MODE:
5543 /* Check the value range */
5544 Val8 = *(pBuf + Offset);
5545 if (Val8 == 0) {
5546
5547 Offset += sizeof(char);
5548 break;
5549 }
5550 if (Val8 < SK_FLOW_MODE_NONE ||
5551 (LogPortIndex != 0 && Val8 > SK_FLOW_MODE_SYM_OR_REM) ||
5552 (LogPortIndex == 0 && Val8 > SK_FLOW_MODE_INDETERMINATED)) {
5553
5554 *pLen = 0;
5555 return (SK_PNMI_ERR_BAD_VALUE);
5556 }
5557
5558 /* The preset ends here */
5559 if (Action == SK_PNMI_PRESET) {
5560
5561 return (SK_PNMI_ERR_OK);
5562 }
5563
5564 if (LogPortIndex == 0) {
5565
5566 /*
5567 * The virtual port consists of all currently
5568 * active ports. Find them and send an event
5569 * with the new flow control mode to SIRQ.
5570 */
5571 for (PhysPortIndex = 0;
5572 PhysPortIndex < PhysPortMax;
5573 PhysPortIndex ++) {
5574
5575 if (!pAC->Pnmi.Port[PhysPortIndex].
5576 ActiveFlag) {
5577
5578 continue;
5579 }
5580
5581 EventParam.Para32[0] = PhysPortIndex;
5582 EventParam.Para32[1] = (SK_U32)Val8;
5583 if (SkGeSirqEvent(pAC, IoC,
5584 SK_HWEV_SET_FLOWMODE,
5585 EventParam) > 0) {
5586
5587 SK_ERR_LOG(pAC, SK_ERRCL_SW,
5588 SK_PNMI_ERR044,
5589 SK_PNMI_ERR044MSG);
5590
5591 *pLen = 0;
5592 return (SK_PNMI_ERR_GENERAL);
5593 }
5594 }
5595 }
5596 else {
5597 /*
5598 * Send an event with the new flow control
5599 * mode to the SIRQ module.
5600 */
5601 EventParam.Para32[0] = SK_PNMI_PORT_LOG2PHYS(
5602 pAC, LogPortIndex);
5603 EventParam.Para32[1] = (SK_U32)Val8;
5604 if (SkGeSirqEvent(pAC, IoC,
5605 SK_HWEV_SET_FLOWMODE, EventParam)
5606 > 0) {
5607
5608 SK_ERR_LOG(pAC, SK_ERRCL_SW,
5609 SK_PNMI_ERR044,
5610 SK_PNMI_ERR044MSG);
5611
5612 *pLen = 0;
5613 return (SK_PNMI_ERR_GENERAL);
5614 }
5615 }
5616 Offset += sizeof(char);
5617 break;
5618
5619 case OID_SKGE_PHY_OPERATION_MODE :
5620 /* Check the value range */
5621 Val8 = *(pBuf + Offset);
5622 if (Val8 == 0) {
5623 /* mode of this port remains unchanged */
5624 Offset += sizeof(char);
5625 break;
5626 }
5627 if (Val8 < SK_MS_MODE_AUTO ||
5628 (LogPortIndex != 0 && Val8 > SK_MS_MODE_SLAVE) ||
5629 (LogPortIndex == 0 && Val8 > SK_MS_MODE_INDETERMINATED)) {
5630
5631 *pLen = 0;
5632 return (SK_PNMI_ERR_BAD_VALUE);
5633 }
5634
5635 /* The preset ends here */
5636 if (Action == SK_PNMI_PRESET) {
5637
5638 return (SK_PNMI_ERR_OK);
5639 }
5640
5641 if (LogPortIndex == 0) {
5642
5643 /*
5644 * The virtual port consists of all currently
5645 * active ports. Find them and send an event
5646 * with new master/slave (role) mode to SIRQ.
5647 */
5648 for (PhysPortIndex = 0;
5649 PhysPortIndex < PhysPortMax;
5650 PhysPortIndex ++) {
5651
5652 if (!pAC->Pnmi.Port[PhysPortIndex].
5653 ActiveFlag) {
5654
5655 continue;
5656 }
5657
5658 EventParam.Para32[0] = PhysPortIndex;
5659 EventParam.Para32[1] = (SK_U32)Val8;
5660 if (SkGeSirqEvent(pAC, IoC,
5661 SK_HWEV_SET_ROLE,
5662 EventParam) > 0) {
5663
5664 SK_ERR_LOG(pAC, SK_ERRCL_SW,
5665 SK_PNMI_ERR042,
5666 SK_PNMI_ERR042MSG);
5667
5668 *pLen = 0;
5669 return (SK_PNMI_ERR_GENERAL);
5670 }
5671 }
5672 }
5673 else {
5674 /*
5675 * Send an event with the new master/slave
5676 * (role) mode to the SIRQ module.
5677 */
5678 EventParam.Para32[0] = SK_PNMI_PORT_LOG2PHYS(
5679 pAC, LogPortIndex);
5680 EventParam.Para32[1] = (SK_U32)Val8;
5681 if (SkGeSirqEvent(pAC, IoC,
5682 SK_HWEV_SET_ROLE, EventParam) > 0) {
5683
5684 SK_ERR_LOG(pAC, SK_ERRCL_SW,
5685 SK_PNMI_ERR042,
5686 SK_PNMI_ERR042MSG);
5687
5688 *pLen = 0;
5689 return (SK_PNMI_ERR_GENERAL);
5690 }
5691 }
5692
5693 Offset += sizeof(char);
5694 break;
5695
5696 case OID_SKGE_SPEED_MODE:
5697 /* Check the value range */
5698 Val8 = *(pBuf + Offset);
5699 if (Val8 == 0) {
5700
5701 Offset += sizeof(char);
5702 break;
5703 }
5704 if (Val8 < (SK_LSPEED_AUTO) ||
5705 (LogPortIndex != 0 && Val8 > (SK_LSPEED_1000MBPS)) ||
5706 (LogPortIndex == 0 && Val8 > (SK_LSPEED_INDETERMINATED))) {
5707
5708 *pLen = 0;
5709 return (SK_PNMI_ERR_BAD_VALUE);
5710 }
5711
5712 /* The preset ends here */
5713 if (Action == SK_PNMI_PRESET) {
5714
5715 return (SK_PNMI_ERR_OK);
5716 }
5717
5718 if (LogPortIndex == 0) {
5719
5720 /*
5721 * The virtual port consists of all currently
5722 * active ports. Find them and send an event
5723 * with the new flow control mode to SIRQ.
5724 */
5725 for (PhysPortIndex = 0;
5726 PhysPortIndex < PhysPortMax;
5727 PhysPortIndex ++) {
5728
5729 if (!pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) {
5730
5731 continue;
5732 }
5733
5734 EventParam.Para32[0] = PhysPortIndex;
5735 EventParam.Para32[1] = (SK_U32)Val8;
5736 if (SkGeSirqEvent(pAC, IoC,
5737 SK_HWEV_SET_SPEED,
5738 EventParam) > 0) {
5739
5740 SK_ERR_LOG(pAC, SK_ERRCL_SW,
5741 SK_PNMI_ERR045,
5742 SK_PNMI_ERR045MSG);
5743
5744 *pLen = 0;
5745 return (SK_PNMI_ERR_GENERAL);
5746 }
5747 }
5748 }
5749 else {
5750 /*
5751 * Send an event with the new flow control
5752 * mode to the SIRQ module.
5753 */
5754 EventParam.Para32[0] = SK_PNMI_PORT_LOG2PHYS(
5755 pAC, LogPortIndex);
5756 EventParam.Para32[1] = (SK_U32)Val8;
5757 if (SkGeSirqEvent(pAC, IoC,
5758 SK_HWEV_SET_SPEED,
5759 EventParam) > 0) {
5760
5761 SK_ERR_LOG(pAC, SK_ERRCL_SW,
5762 SK_PNMI_ERR045,
5763 SK_PNMI_ERR045MSG);
5764
5765 *pLen = 0;
5766 return (SK_PNMI_ERR_GENERAL);
5767 }
5768 }
5769 Offset += sizeof(char);
5770 break;
5771
5772 case OID_SKGE_MTU :
5773 /* Check the value range */
5774 Val32 = *(SK_U32*)(pBuf + Offset);
5775 if (Val32 == 0) {
5776 /* mtu of this port remains unchanged */
5777 Offset += sizeof(SK_U32);
5778 break;
5779 }
5780 if (SK_DRIVER_PRESET_MTU(pAC, IoC, NetIndex, Val32) != 0) {
5781 *pLen = 0;
5782 return (SK_PNMI_ERR_BAD_VALUE);
5783 }
5784
5785 /* The preset ends here */
5786 if (Action == SK_PNMI_PRESET) {
5787 return (SK_PNMI_ERR_OK);
5788 }
5789
5790 if (SK_DRIVER_SET_MTU(pAC, IoC, NetIndex, Val32) != 0) {
5791 return (SK_PNMI_ERR_GENERAL);
5792 }
5793
5794 Offset += sizeof(SK_U32);
5795 break;
5796
5797 default:
5798 SK_DBG_MSG(pAC, SK_DBGMOD_PNMI, SK_DBGCAT_ERR,
5799 ("MacPrivateConf: Unknown OID should be handled before set"));
5800
5801 *pLen = 0;
5802 return (SK_PNMI_ERR_GENERAL);
5803 }
5804 }
5805
5806 return (SK_PNMI_ERR_OK);
5807}
5808
5809/*****************************************************************************
5810 *
5811 * Monitor - OID handler function for RLMT_MONITOR_XXX
5812 *
5813 * Description:
5814 * Because RLMT currently does not support the monitoring of
5815 * remote adapter cards, we return always an empty table.
5816 *
5817 * Returns:
5818 * SK_PNMI_ERR_OK The request was successfully performed.
5819 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
5820 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
5821 * the correct data (e.g. a 32bit value is
5822 * needed, but a 16 bit value was passed).
5823 * SK_PNMI_ERR_BAD_VALUE The passed value is not in the valid
5824 * value range.
5825 * SK_PNMI_ERR_READ_ONLY The OID is read-only and cannot be set.
5826 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
5827 * exist (e.g. port instance 3 on a two port
5828 * adapter.
5829 */
5830PNMI_STATIC int Monitor(
5831SK_AC *pAC, /* Pointer to adapter context */
5832SK_IOC IoC, /* IO context handle */
5833int Action, /* GET/PRESET/SET action */
5834SK_U32 Id, /* Object ID that is to be processed */
5835char *pBuf, /* Buffer used for the management data transfer */
5836unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
5837SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
5838unsigned int TableIndex, /* Index to the Id table */
5839SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
5840{
5841 unsigned int Index;
5842 unsigned int Limit;
5843 unsigned int Offset;
5844 unsigned int Entries;
5845
5846
5847 /*
5848 * Calculate instance if wished.
5849 */
5850 /* XXX Not yet implemented. Return always an empty table. */
5851 Entries = 0;
5852
5853 if ((Instance != (SK_U32)(-1))) {
5854
5855 if ((Instance < 1) || (Instance > Entries)) {
5856
5857 *pLen = 0;
5858 return (SK_PNMI_ERR_UNKNOWN_INST);
5859 }
5860
5861 Index = (unsigned int)Instance - 1;
5862 Limit = (unsigned int)Instance;
5863 }
5864 else {
5865 Index = 0;
5866 Limit = Entries;
5867 }
5868
5869 /*
5870 * Get/Set value
5871 */
5872 if (Action == SK_PNMI_GET) {
5873
5874 for (Offset=0; Index < Limit; Index ++) {
5875
5876 switch (Id) {
5877
5878 case OID_SKGE_RLMT_MONITOR_INDEX:
5879 case OID_SKGE_RLMT_MONITOR_ADDR:
5880 case OID_SKGE_RLMT_MONITOR_ERRS:
5881 case OID_SKGE_RLMT_MONITOR_TIMESTAMP:
5882 case OID_SKGE_RLMT_MONITOR_ADMIN:
5883 break;
5884
5885 default:
5886 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR046,
5887 SK_PNMI_ERR046MSG);
5888
5889 *pLen = 0;
5890 return (SK_PNMI_ERR_GENERAL);
5891 }
5892 }
5893 *pLen = Offset;
5894 }
5895 else {
5896 /* Only MONITOR_ADMIN can be set */
5897 if (Id != OID_SKGE_RLMT_MONITOR_ADMIN) {
5898
5899 *pLen = 0;
5900 return (SK_PNMI_ERR_READ_ONLY);
5901 }
5902
5903 /* Check if the length is plausible */
5904 if (*pLen < (Limit - Index)) {
5905
5906 return (SK_PNMI_ERR_TOO_SHORT);
5907 }
5908 /* Okay, we have a wide value range */
5909 if (*pLen != (Limit - Index)) {
5910
5911 *pLen = 0;
5912 return (SK_PNMI_ERR_BAD_VALUE);
5913 }
5914/*
5915 for (Offset=0; Index < Limit; Index ++) {
5916 }
5917*/
5918/*
5919 * XXX Not yet implemented. Return always BAD_VALUE, because the table
5920 * is empty.
5921 */
5922 *pLen = 0;
5923 return (SK_PNMI_ERR_BAD_VALUE);
5924 }
5925
5926 return (SK_PNMI_ERR_OK);
5927}
5928
5929/*****************************************************************************
5930 *
5931 * VirtualConf - Calculates the values of configuration OIDs for virtual port
5932 *
5933 * Description:
5934 * We handle here the get of the configuration group OIDs, which are
5935 * a little bit complicated. The virtual port consists of all currently
5936 * active physical ports. If multiple ports are active and configured
5937 * differently we get in some trouble to return a single value. So we
5938 * get the value of the first active port and compare it with that of
5939 * the other active ports. If they are not the same, we return a value
5940 * that indicates that the state is indeterminated.
5941 *
5942 * Returns:
5943 * Nothing
5944 */
5945PNMI_STATIC void VirtualConf(
5946SK_AC *pAC, /* Pointer to adapter context */
5947SK_IOC IoC, /* IO context handle */
5948SK_U32 Id, /* Object ID that is to be processed */
5949char *pBuf) /* Buffer used for the management data transfer */
5950{
5951 unsigned int PhysPortMax;
5952 unsigned int PhysPortIndex;
5953 SK_U8 Val8;
5954 SK_U32 Val32;
5955 SK_BOOL PortActiveFlag;
5956 SK_GEPORT *pPrt;
5957
5958 *pBuf = 0;
5959 PortActiveFlag = SK_FALSE;
5960 PhysPortMax = pAC->GIni.GIMacsFound;
5961
5962 for (PhysPortIndex = 0; PhysPortIndex < PhysPortMax;
5963 PhysPortIndex ++) {
5964
5965 pPrt = &pAC->GIni.GP[PhysPortIndex];
5966
5967 /* Check if the physical port is active */
5968 if (!pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) {
5969
5970 continue;
5971 }
5972
5973 PortActiveFlag = SK_TRUE;
5974
5975 switch (Id) {
5976
5977 case OID_SKGE_PHY_TYPE:
5978 /* Check if it is the first active port */
5979 if (*pBuf == 0) {
5980 Val32 = pPrt->PhyType;
5981 SK_PNMI_STORE_U32(pBuf, Val32);
5982 continue;
5983 }
5984
5985 case OID_SKGE_LINK_CAP:
5986
5987 /*
5988 * Different capabilities should not happen, but
5989 * in the case of the cases OR them all together.
5990 * From a curious point of view the virtual port
5991 * is capable of all found capabilities.
5992 */
5993 *pBuf |= pPrt->PLinkCap;
5994 break;
5995
5996 case OID_SKGE_LINK_MODE:
5997 /* Check if it is the first active port */
5998 if (*pBuf == 0) {
5999
6000 *pBuf = pPrt->PLinkModeConf;
6001 continue;
6002 }
6003
6004 /*
6005 * If we find an active port with a different link
6006 * mode than the first one we return a value that
6007 * indicates that the link mode is indeterminated.
6008 */
6009 if (*pBuf != pPrt->PLinkModeConf) {
6010
6011 *pBuf = SK_LMODE_INDETERMINATED;
6012 }
6013 break;
6014
6015 case OID_SKGE_LINK_MODE_STATUS:
6016 /* Get the link mode of the physical port */
6017 Val8 = CalculateLinkModeStatus(pAC, IoC, PhysPortIndex);
6018
6019 /* Check if it is the first active port */
6020 if (*pBuf == 0) {
6021
6022 *pBuf = Val8;
6023 continue;
6024 }
6025
6026 /*
6027 * If we find an active port with a different link
6028 * mode status than the first one we return a value
6029 * that indicates that the link mode status is
6030 * indeterminated.
6031 */
6032 if (*pBuf != Val8) {
6033
6034 *pBuf = SK_LMODE_STAT_INDETERMINATED;
6035 }
6036 break;
6037
6038 case OID_SKGE_LINK_STATUS:
6039 /* Get the link status of the physical port */
6040 Val8 = CalculateLinkStatus(pAC, IoC, PhysPortIndex);
6041
6042 /* Check if it is the first active port */
6043 if (*pBuf == 0) {
6044
6045 *pBuf = Val8;
6046 continue;
6047 }
6048
6049 /*
6050 * If we find an active port with a different link
6051 * status than the first one, we return a value
6052 * that indicates that the link status is
6053 * indeterminated.
6054 */
6055 if (*pBuf != Val8) {
6056
6057 *pBuf = SK_PNMI_RLMT_LSTAT_INDETERMINATED;
6058 }
6059 break;
6060
6061 case OID_SKGE_FLOWCTRL_CAP:
6062 /* Check if it is the first active port */
6063 if (*pBuf == 0) {
6064
6065 *pBuf = pPrt->PFlowCtrlCap;
6066 continue;
6067 }
6068
6069 /*
6070 * From a curious point of view the virtual port
6071 * is capable of all found capabilities.
6072 */
6073 *pBuf |= pPrt->PFlowCtrlCap;
6074 break;
6075
6076 case OID_SKGE_FLOWCTRL_MODE:
6077 /* Check if it is the first active port */
6078 if (*pBuf == 0) {
6079
6080 *pBuf = pPrt->PFlowCtrlMode;
6081 continue;
6082 }
6083
6084 /*
6085 * If we find an active port with a different flow
6086 * control mode than the first one, we return a value
6087 * that indicates that the mode is indeterminated.
6088 */
6089 if (*pBuf != pPrt->PFlowCtrlMode) {
6090
6091 *pBuf = SK_FLOW_MODE_INDETERMINATED;
6092 }
6093 break;
6094
6095 case OID_SKGE_FLOWCTRL_STATUS:
6096 /* Check if it is the first active port */
6097 if (*pBuf == 0) {
6098
6099 *pBuf = pPrt->PFlowCtrlStatus;
6100 continue;
6101 }
6102
6103 /*
6104 * If we find an active port with a different flow
6105 * control status than the first one, we return a
6106 * value that indicates that the status is
6107 * indeterminated.
6108 */
6109 if (*pBuf != pPrt->PFlowCtrlStatus) {
6110
6111 *pBuf = SK_FLOW_STAT_INDETERMINATED;
6112 }
6113 break;
6114
6115 case OID_SKGE_PHY_OPERATION_CAP:
6116 /* Check if it is the first active port */
6117 if (*pBuf == 0) {
6118
6119 *pBuf = pPrt->PMSCap;
6120 continue;
6121 }
6122
6123 /*
6124 * From a curious point of view the virtual port
6125 * is capable of all found capabilities.
6126 */
6127 *pBuf |= pPrt->PMSCap;
6128 break;
6129
6130 case OID_SKGE_PHY_OPERATION_MODE:
6131 /* Check if it is the first active port */
6132 if (*pBuf == 0) {
6133
6134 *pBuf = pPrt->PMSMode;
6135 continue;
6136 }
6137
6138 /*
6139 * If we find an active port with a different master/
6140 * slave mode than the first one, we return a value
6141 * that indicates that the mode is indeterminated.
6142 */
6143 if (*pBuf != pPrt->PMSMode) {
6144
6145 *pBuf = SK_MS_MODE_INDETERMINATED;
6146 }
6147 break;
6148
6149 case OID_SKGE_PHY_OPERATION_STATUS:
6150 /* Check if it is the first active port */
6151 if (*pBuf == 0) {
6152
6153 *pBuf = pPrt->PMSStatus;
6154 continue;
6155 }
6156
6157 /*
6158 * If we find an active port with a different master/
6159 * slave status than the first one, we return a
6160 * value that indicates that the status is
6161 * indeterminated.
6162 */
6163 if (*pBuf != pPrt->PMSStatus) {
6164
6165 *pBuf = SK_MS_STAT_INDETERMINATED;
6166 }
6167 break;
6168
6169 case OID_SKGE_SPEED_MODE:
6170 /* Check if it is the first active port */
6171 if (*pBuf == 0) {
6172
6173 *pBuf = pPrt->PLinkSpeed;
6174 continue;
6175 }
6176
6177 /*
6178 * If we find an active port with a different flow
6179 * control mode than the first one, we return a value
6180 * that indicates that the mode is indeterminated.
6181 */
6182 if (*pBuf != pPrt->PLinkSpeed) {
6183
6184 *pBuf = SK_LSPEED_INDETERMINATED;
6185 }
6186 break;
6187
6188 case OID_SKGE_SPEED_STATUS:
6189 /* Check if it is the first active port */
6190 if (*pBuf == 0) {
6191
6192 *pBuf = pPrt->PLinkSpeedUsed;
6193 continue;
6194 }
6195
6196 /*
6197 * If we find an active port with a different flow
6198 * control status than the first one, we return a
6199 * value that indicates that the status is
6200 * indeterminated.
6201 */
6202 if (*pBuf != pPrt->PLinkSpeedUsed) {
6203
6204 *pBuf = SK_LSPEED_STAT_INDETERMINATED;
6205 }
6206 break;
6207 }
6208 }
6209
6210 /*
6211 * If no port is active return an indeterminated answer
6212 */
6213 if (!PortActiveFlag) {
6214
6215 switch (Id) {
6216
6217 case OID_SKGE_LINK_CAP:
6218 *pBuf = SK_LMODE_CAP_INDETERMINATED;
6219 break;
6220
6221 case OID_SKGE_LINK_MODE:
6222 *pBuf = SK_LMODE_INDETERMINATED;
6223 break;
6224
6225 case OID_SKGE_LINK_MODE_STATUS:
6226 *pBuf = SK_LMODE_STAT_INDETERMINATED;
6227 break;
6228
6229 case OID_SKGE_LINK_STATUS:
6230 *pBuf = SK_PNMI_RLMT_LSTAT_INDETERMINATED;
6231 break;
6232
6233 case OID_SKGE_FLOWCTRL_CAP:
6234 case OID_SKGE_FLOWCTRL_MODE:
6235 *pBuf = SK_FLOW_MODE_INDETERMINATED;
6236 break;
6237
6238 case OID_SKGE_FLOWCTRL_STATUS:
6239 *pBuf = SK_FLOW_STAT_INDETERMINATED;
6240 break;
6241
6242 case OID_SKGE_PHY_OPERATION_CAP:
6243 *pBuf = SK_MS_CAP_INDETERMINATED;
6244 break;
6245
6246 case OID_SKGE_PHY_OPERATION_MODE:
6247 *pBuf = SK_MS_MODE_INDETERMINATED;
6248 break;
6249
6250 case OID_SKGE_PHY_OPERATION_STATUS:
6251 *pBuf = SK_MS_STAT_INDETERMINATED;
6252 break;
6253 case OID_SKGE_SPEED_CAP:
6254 *pBuf = SK_LSPEED_CAP_INDETERMINATED;
6255 break;
6256
6257 case OID_SKGE_SPEED_MODE:
6258 *pBuf = SK_LSPEED_INDETERMINATED;
6259 break;
6260
6261 case OID_SKGE_SPEED_STATUS:
6262 *pBuf = SK_LSPEED_STAT_INDETERMINATED;
6263 break;
6264 }
6265 }
6266}
6267
6268/*****************************************************************************
6269 *
6270 * CalculateLinkStatus - Determins the link status of a physical port
6271 *
6272 * Description:
6273 * Determins the link status the following way:
6274 * LSTAT_PHY_DOWN: Link is down
6275 * LSTAT_AUTONEG: Auto-negotiation failed
6276 * LSTAT_LOG_DOWN: Link is up but RLMT did not yet put the port
6277 * logically up.
6278 * LSTAT_LOG_UP: RLMT marked the port as up
6279 *
6280 * Returns:
6281 * Link status of physical port
6282 */
6283PNMI_STATIC SK_U8 CalculateLinkStatus(
6284SK_AC *pAC, /* Pointer to adapter context */
6285SK_IOC IoC, /* IO context handle */
6286unsigned int PhysPortIndex) /* Physical port index */
6287{
6288 SK_U8 Result;
6289
6290 if (!pAC->GIni.GP[PhysPortIndex].PHWLinkUp) {
6291
6292 Result = SK_PNMI_RLMT_LSTAT_PHY_DOWN;
6293 }
6294 else if (pAC->GIni.GP[PhysPortIndex].PAutoNegFail > 0) {
6295
6296 Result = SK_PNMI_RLMT_LSTAT_AUTONEG;
6297 }
6298 else if (!pAC->Rlmt.Port[PhysPortIndex].PortDown) {
6299
6300 Result = SK_PNMI_RLMT_LSTAT_LOG_UP;
6301 }
6302 else {
6303 Result = SK_PNMI_RLMT_LSTAT_LOG_DOWN;
6304 }
6305
6306 return (Result);
6307}
6308
6309/*****************************************************************************
6310 *
6311 * CalculateLinkModeStatus - Determins the link mode status of a phys. port
6312 *
6313 * Description:
6314 * The COMMON module only tells us if the mode is half or full duplex.
6315 * But in the decade of auto sensing it is useful for the user to
6316 * know if the mode was negotiated or forced. Therefore we have a
6317 * look to the mode, which was last used by the negotiation process.
6318 *
6319 * Returns:
6320 * The link mode status
6321 */
6322PNMI_STATIC SK_U8 CalculateLinkModeStatus(
6323SK_AC *pAC, /* Pointer to adapter context */
6324SK_IOC IoC, /* IO context handle */
6325unsigned int PhysPortIndex) /* Physical port index */
6326{
6327 SK_U8 Result;
6328
6329 /* Get the current mode, which can be full or half duplex */
6330 Result = pAC->GIni.GP[PhysPortIndex].PLinkModeStatus;
6331
6332 /* Check if no valid mode could be found (link is down) */
6333 if (Result < SK_LMODE_STAT_HALF) {
6334
6335 Result = SK_LMODE_STAT_UNKNOWN;
6336 }
6337 else if (pAC->GIni.GP[PhysPortIndex].PLinkMode >= SK_LMODE_AUTOHALF) {
6338
6339 /*
6340 * Auto-negotiation was used to bring up the link. Change
6341 * the already found duplex status that it indicates
6342 * auto-negotiation was involved.
6343 */
6344 if (Result == SK_LMODE_STAT_HALF) {
6345
6346 Result = SK_LMODE_STAT_AUTOHALF;
6347 }
6348 else if (Result == SK_LMODE_STAT_FULL) {
6349
6350 Result = SK_LMODE_STAT_AUTOFULL;
6351 }
6352 }
6353
6354 return (Result);
6355}
6356
6357/*****************************************************************************
6358 *
6359 * GetVpdKeyArr - Obtain an array of VPD keys
6360 *
6361 * Description:
6362 * Read the VPD keys and build an array of VPD keys, which are
6363 * easy to access.
6364 *
6365 * Returns:
6366 * SK_PNMI_ERR_OK Task successfully performed.
6367 * SK_PNMI_ERR_GENERAL Something went wrong.
6368 */
6369PNMI_STATIC int GetVpdKeyArr(
6370SK_AC *pAC, /* Pointer to adapter context */
6371SK_IOC IoC, /* IO context handle */
6372char *pKeyArr, /* Ptr KeyArray */
6373unsigned int KeyArrLen, /* Length of array in bytes */
6374unsigned int *pKeyNo) /* Number of keys */
6375{
6376 unsigned int BufKeysLen = SK_PNMI_VPD_BUFSIZE;
6377 char BufKeys[SK_PNMI_VPD_BUFSIZE];
6378 unsigned int StartOffset;
6379 unsigned int Offset;
6380 int Index;
6381 int Ret;
6382
6383
6384 SK_MEMSET(pKeyArr, 0, KeyArrLen);
6385
6386 /*
6387 * Get VPD key list
6388 */
6389 Ret = VpdKeys(pAC, IoC, (char *)&BufKeys, (int *)&BufKeysLen,
6390 (int *)pKeyNo);
6391 if (Ret > 0) {
6392
6393 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR014,
6394 SK_PNMI_ERR014MSG);
6395
6396 return (SK_PNMI_ERR_GENERAL);
6397 }
6398 /* If no keys are available return now */
6399 if (*pKeyNo == 0 || BufKeysLen == 0) {
6400
6401 return (SK_PNMI_ERR_OK);
6402 }
6403 /*
6404 * If the key list is too long for us trunc it and give a
6405 * errorlog notification. This case should not happen because
6406 * the maximum number of keys is limited due to RAM limitations
6407 */
6408 if (*pKeyNo > SK_PNMI_VPD_ENTRIES) {
6409
6410 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR015,
6411 SK_PNMI_ERR015MSG);
6412
6413 *pKeyNo = SK_PNMI_VPD_ENTRIES;
6414 }
6415
6416 /*
6417 * Now build an array of fixed string length size and copy
6418 * the keys together.
6419 */
6420 for (Index = 0, StartOffset = 0, Offset = 0; Offset < BufKeysLen;
6421 Offset ++) {
6422
6423 if (BufKeys[Offset] != 0) {
6424
6425 continue;
6426 }
6427
6428 if (Offset - StartOffset > SK_PNMI_VPD_KEY_SIZE) {
6429
6430 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR016,
6431 SK_PNMI_ERR016MSG);
6432 return (SK_PNMI_ERR_GENERAL);
6433 }
6434
6435 SK_STRNCPY(pKeyArr + Index * SK_PNMI_VPD_KEY_SIZE,
6436 &BufKeys[StartOffset], SK_PNMI_VPD_KEY_SIZE);
6437
6438 Index ++;
6439 StartOffset = Offset + 1;
6440 }
6441
6442 /* Last key not zero terminated? Get it anyway */
6443 if (StartOffset < Offset) {
6444
6445 SK_STRNCPY(pKeyArr + Index * SK_PNMI_VPD_KEY_SIZE,
6446 &BufKeys[StartOffset], SK_PNMI_VPD_KEY_SIZE);
6447 }
6448
6449 return (SK_PNMI_ERR_OK);
6450}
6451
6452/*****************************************************************************
6453 *
6454 * SirqUpdate - Let the SIRQ update its internal values
6455 *
6456 * Description:
6457 * Just to be sure that the SIRQ module holds its internal data
6458 * structures up to date, we send an update event before we make
6459 * any access.
6460 *
6461 * Returns:
6462 * SK_PNMI_ERR_OK Task successfully performed.
6463 * SK_PNMI_ERR_GENERAL Something went wrong.
6464 */
6465PNMI_STATIC int SirqUpdate(
6466SK_AC *pAC, /* Pointer to adapter context */
6467SK_IOC IoC) /* IO context handle */
6468{
6469 SK_EVPARA EventParam;
6470
6471
6472 /* Was the module already updated during the current PNMI call? */
6473 if (pAC->Pnmi.SirqUpdatedFlag > 0) {
6474
6475 return (SK_PNMI_ERR_OK);
6476 }
6477
6478 /* Send an synchronuous update event to the module */
6479 SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam));
6480 if (SkGeSirqEvent(pAC, IoC, SK_HWEV_UPDATE_STAT, EventParam) > 0) {
6481
6482 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR047,
6483 SK_PNMI_ERR047MSG);
6484
6485 return (SK_PNMI_ERR_GENERAL);
6486 }
6487
6488 return (SK_PNMI_ERR_OK);
6489}
6490
6491/*****************************************************************************
6492 *
6493 * RlmtUpdate - Let the RLMT update its internal values
6494 *
6495 * Description:
6496 * Just to be sure that the RLMT module holds its internal data
6497 * structures up to date, we send an update event before we make
6498 * any access.
6499 *
6500 * Returns:
6501 * SK_PNMI_ERR_OK Task successfully performed.
6502 * SK_PNMI_ERR_GENERAL Something went wrong.
6503 */
6504PNMI_STATIC int RlmtUpdate(
6505SK_AC *pAC, /* Pointer to adapter context */
6506SK_IOC IoC, /* IO context handle */
6507SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */
6508{
6509 SK_EVPARA EventParam;
6510
6511
6512 /* Was the module already updated during the current PNMI call? */
6513 if (pAC->Pnmi.RlmtUpdatedFlag > 0) {
6514
6515 return (SK_PNMI_ERR_OK);
6516 }
6517
6518 /* Send an synchronuous update event to the module */
6519 SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam));
6520 EventParam.Para32[0] = NetIndex;
6521 EventParam.Para32[1] = (SK_U32)-1;
6522 if (SkRlmtEvent(pAC, IoC, SK_RLMT_STATS_UPDATE, EventParam) > 0) {
6523
6524 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR048,
6525 SK_PNMI_ERR048MSG);
6526
6527 return (SK_PNMI_ERR_GENERAL);
6528 }
6529
6530 return (SK_PNMI_ERR_OK);
6531}
6532
6533/*****************************************************************************
6534 *
6535 * MacUpdate - Force the XMAC to output the current statistic
6536 *
6537 * Description:
6538 * The XMAC holds its statistic internally. To obtain the current
6539 * values we must send a command so that the statistic data will
6540 * be written to a predefined memory area on the adapter.
6541 *
6542 * Returns:
6543 * SK_PNMI_ERR_OK Task successfully performed.
6544 * SK_PNMI_ERR_GENERAL Something went wrong.
6545 */
6546PNMI_STATIC int MacUpdate(
6547SK_AC *pAC, /* Pointer to adapter context */
6548SK_IOC IoC, /* IO context handle */
6549unsigned int FirstMac, /* Index of the first Mac to be updated */
6550unsigned int LastMac) /* Index of the last Mac to be updated */
6551{
6552 unsigned int MacIndex;
6553
6554 /*
6555 * Were the statistics already updated during the
6556 * current PNMI call?
6557 */
6558 if (pAC->Pnmi.MacUpdatedFlag > 0) {
6559
6560 return (SK_PNMI_ERR_OK);
6561 }
6562
6563 /* Send an update command to all MACs specified */
6564 for (MacIndex = FirstMac; MacIndex <= LastMac; MacIndex ++) {
6565
6566 /*
6567 * 2002-09-13 pweber: Freeze the current SW counters.
6568 * (That should be done as close as
6569 * possible to the update of the
6570 * HW counters)
6571 */
6572 if (pAC->GIni.GIMacType == SK_MAC_XMAC) {
6573 pAC->Pnmi.BufPort[MacIndex] = pAC->Pnmi.Port[MacIndex];
6574 }
6575
6576 /* 2002-09-13 pweber: Update the HW counter */
6577 if (pAC->GIni.GIFunc.pFnMacUpdateStats(pAC, IoC, MacIndex) != 0) {
6578
6579 return (SK_PNMI_ERR_GENERAL);
6580 }
6581 }
6582
6583 return (SK_PNMI_ERR_OK);
6584}
6585
6586/*****************************************************************************
6587 *
6588 * GetStatVal - Retrieve an XMAC statistic counter
6589 *
6590 * Description:
6591 * Retrieves the statistic counter of a virtual or physical port. The
6592 * virtual port is identified by the index 0. It consists of all
6593 * currently active ports. To obtain the counter value for this port
6594 * we must add the statistic counter of all active ports. To grant
6595 * continuous counter values for the virtual port even when port
6596 * switches occur we must additionally add a delta value, which was
6597 * calculated during a SK_PNMI_EVT_RLMT_ACTIVE_UP event.
6598 *
6599 * Returns:
6600 * Requested statistic value
6601 */
6602PNMI_STATIC SK_U64 GetStatVal(
6603SK_AC *pAC, /* Pointer to adapter context */
6604SK_IOC IoC, /* IO context handle */
6605unsigned int LogPortIndex, /* Index of the logical Port to be processed */
6606unsigned int StatIndex, /* Index to statistic value */
6607SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */
6608{
6609 unsigned int PhysPortIndex;
6610 unsigned int PhysPortMax;
6611 SK_U64 Val = 0;
6612
6613
6614 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) { /* Dual net mode */
6615
6616 PhysPortIndex = NetIndex;
6617
6618 Val = GetPhysStatVal(pAC, IoC, PhysPortIndex, StatIndex);
6619 }
6620 else { /* Single Net mode */
6621
6622 if (LogPortIndex == 0) {
6623
6624 PhysPortMax = pAC->GIni.GIMacsFound;
6625
6626 /* Add counter of all active ports */
6627 for (PhysPortIndex = 0; PhysPortIndex < PhysPortMax;
6628 PhysPortIndex ++) {
6629
6630 if (pAC->Pnmi.Port[PhysPortIndex].ActiveFlag) {
6631
6632 Val += GetPhysStatVal(pAC, IoC, PhysPortIndex, StatIndex);
6633 }
6634 }
6635
6636 /* Correct value because of port switches */
6637 Val += pAC->Pnmi.VirtualCounterOffset[StatIndex];
6638 }
6639 else {
6640 /* Get counter value of physical port */
6641 PhysPortIndex = SK_PNMI_PORT_LOG2PHYS(pAC, LogPortIndex);
6642
6643 Val = GetPhysStatVal(pAC, IoC, PhysPortIndex, StatIndex);
6644 }
6645 }
6646 return (Val);
6647}
6648
6649/*****************************************************************************
6650 *
6651 * GetPhysStatVal - Get counter value for physical port
6652 *
6653 * Description:
6654 * Builds a 64bit counter value. Except for the octet counters
6655 * the lower 32bit are counted in hardware and the upper 32bit
6656 * in software by monitoring counter overflow interrupts in the
6657 * event handler. To grant continous counter values during XMAC
6658 * resets (caused by a workaround) we must add a delta value.
6659 * The delta was calculated in the event handler when a
6660 * SK_PNMI_EVT_XMAC_RESET was received.
6661 *
6662 * Returns:
6663 * Counter value
6664 */
6665PNMI_STATIC SK_U64 GetPhysStatVal(
6666SK_AC *pAC, /* Pointer to adapter context */
6667SK_IOC IoC, /* IO context handle */
6668unsigned int PhysPortIndex, /* Index of the logical Port to be processed */
6669unsigned int StatIndex) /* Index to statistic value */
6670{
6671 SK_U64 Val = 0;
6672 SK_U32 LowVal = 0;
6673 SK_U32 HighVal = 0;
6674 SK_U16 Word;
6675 int MacType;
6676 unsigned int HelpIndex;
6677 SK_GEPORT *pPrt;
6678
6679 SK_PNMI_PORT *pPnmiPrt;
6680 SK_GEMACFUNC *pFnMac;
6681
6682 pPrt = &pAC->GIni.GP[PhysPortIndex];
6683
6684 MacType = pAC->GIni.GIMacType;
6685
6686 /* 2002-09-17 pweber: For XMAC, use the frozen SW counters (BufPort) */
6687 if (MacType == SK_MAC_XMAC) {
6688 pPnmiPrt = &pAC->Pnmi.BufPort[PhysPortIndex];
6689 }
6690 else {
6691 pPnmiPrt = &pAC->Pnmi.Port[PhysPortIndex];
6692 }
6693
6694 pFnMac = &pAC->GIni.GIFunc;
6695
6696 switch (StatIndex) {
6697 case SK_PNMI_HTX:
6698 if (MacType == SK_MAC_GMAC) {
6699 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6700 StatAddr[SK_PNMI_HTX_BROADCAST][MacType].Reg,
6701 &LowVal);
6702 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6703 StatAddr[SK_PNMI_HTX_MULTICAST][MacType].Reg,
6704 &HighVal);
6705 LowVal += HighVal;
6706 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6707 StatAddr[SK_PNMI_HTX_UNICAST][MacType].Reg,
6708 &HighVal);
6709 LowVal += HighVal;
6710 }
6711 else {
6712 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6713 StatAddr[StatIndex][MacType].Reg,
6714 &LowVal);
6715 }
6716 HighVal = pPnmiPrt->CounterHigh[StatIndex];
6717 break;
6718
6719 case SK_PNMI_HRX:
6720 if (MacType == SK_MAC_GMAC) {
6721 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6722 StatAddr[SK_PNMI_HRX_BROADCAST][MacType].Reg,
6723 &LowVal);
6724 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6725 StatAddr[SK_PNMI_HRX_MULTICAST][MacType].Reg,
6726 &HighVal);
6727 LowVal += HighVal;
6728 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6729 StatAddr[SK_PNMI_HRX_UNICAST][MacType].Reg,
6730 &HighVal);
6731 LowVal += HighVal;
6732 }
6733 else {
6734 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6735 StatAddr[StatIndex][MacType].Reg,
6736 &LowVal);
6737 }
6738 HighVal = pPnmiPrt->CounterHigh[StatIndex];
6739 break;
6740
6741 case SK_PNMI_HTX_OCTET:
6742 case SK_PNMI_HRX_OCTET:
6743 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6744 StatAddr[StatIndex][MacType].Reg,
6745 &HighVal);
6746 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6747 StatAddr[StatIndex + 1][MacType].Reg,
6748 &LowVal);
6749 break;
6750
6751 case SK_PNMI_HTX_BURST:
6752 case SK_PNMI_HTX_EXCESS_DEF:
6753 case SK_PNMI_HTX_CARRIER:
6754 /* Not supported by GMAC */
6755 if (MacType == SK_MAC_GMAC) {
6756 return (Val);
6757 }
6758
6759 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6760 StatAddr[StatIndex][MacType].Reg,
6761 &LowVal);
6762 HighVal = pPnmiPrt->CounterHigh[StatIndex];
6763 break;
6764
6765 case SK_PNMI_HTX_MACC:
6766 /* GMAC only supports PAUSE MAC control frames */
6767 if (MacType == SK_MAC_GMAC) {
6768 HelpIndex = SK_PNMI_HTX_PMACC;
6769 }
6770 else {
6771 HelpIndex = StatIndex;
6772 }
6773
6774 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6775 StatAddr[HelpIndex][MacType].Reg,
6776 &LowVal);
6777
6778 HighVal = pPnmiPrt->CounterHigh[StatIndex];
6779 break;
6780
6781 case SK_PNMI_HTX_COL:
6782 case SK_PNMI_HRX_UNDERSIZE:
6783 /* Not supported by XMAC */
6784 if (MacType == SK_MAC_XMAC) {
6785 return (Val);
6786 }
6787
6788 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6789 StatAddr[StatIndex][MacType].Reg,
6790 &LowVal);
6791 HighVal = pPnmiPrt->CounterHigh[StatIndex];
6792 break;
6793
6794 case SK_PNMI_HTX_DEFFERAL:
6795 /* Not supported by GMAC */
6796 if (MacType == SK_MAC_GMAC) {
6797 return (Val);
6798 }
6799
6800 /*
6801 * XMAC counts frames with deferred transmission
6802 * even in full-duplex mode.
6803 *
6804 * In full-duplex mode the counter remains constant!
6805 */
6806 if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOFULL) ||
6807 (pPrt->PLinkModeStatus == SK_LMODE_STAT_FULL)) {
6808
6809 LowVal = 0;
6810 HighVal = 0;
6811 }
6812 else {
6813 /* Otherwise get contents of hardware register */
6814 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6815 StatAddr[StatIndex][MacType].Reg,
6816 &LowVal);
6817 HighVal = pPnmiPrt->CounterHigh[StatIndex];
6818 }
6819 break;
6820
6821 case SK_PNMI_HRX_BADOCTET:
6822 /* Not supported by XMAC */
6823 if (MacType == SK_MAC_XMAC) {
6824 return (Val);
6825 }
6826
6827 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6828 StatAddr[StatIndex][MacType].Reg,
6829 &HighVal);
6830 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6831 StatAddr[StatIndex + 1][MacType].Reg,
6832 &LowVal);
6833 break;
6834
6835 case SK_PNMI_HTX_OCTETLOW:
6836 case SK_PNMI_HRX_OCTETLOW:
6837 case SK_PNMI_HRX_BADOCTETLOW:
6838 return (Val);
6839
6840 case SK_PNMI_HRX_LONGFRAMES:
6841 /* For XMAC the SW counter is managed by PNMI */
6842 if (MacType == SK_MAC_XMAC) {
6843 return (pPnmiPrt->StatRxLongFrameCts);
6844 }
6845
6846 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6847 StatAddr[StatIndex][MacType].Reg,
6848 &LowVal);
6849 HighVal = pPnmiPrt->CounterHigh[StatIndex];
6850 break;
6851
6852 case SK_PNMI_HRX_TOO_LONG:
6853 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6854 StatAddr[StatIndex][MacType].Reg,
6855 &LowVal);
6856 HighVal = pPnmiPrt->CounterHigh[StatIndex];
6857
6858 Val = (((SK_U64)HighVal << 32) | (SK_U64)LowVal);
6859
6860 if (MacType == SK_MAC_GMAC) {
6861 /* For GMAC the SW counter is additionally managed by PNMI */
6862 Val += pPnmiPrt->StatRxFrameTooLongCts;
6863 }
6864 else {
6865 /*
6866 * Frames longer than IEEE 802.3 frame max size are counted
6867 * by XMAC in frame_too_long counter even reception of long
6868 * frames was enabled and the frame was correct.
6869 * So correct the value by subtracting RxLongFrame counter.
6870 */
6871 Val -= pPnmiPrt->StatRxLongFrameCts;
6872 }
6873
6874 LowVal = (SK_U32)Val;
6875 HighVal = (SK_U32)(Val >> 32);
6876 break;
6877
6878 case SK_PNMI_HRX_SHORTS:
6879 /* Not supported by GMAC */
6880 if (MacType == SK_MAC_GMAC) {
6881 /* GM_RXE_FRAG?? */
6882 return (Val);
6883 }
6884
6885 /*
6886 * XMAC counts short frame errors even if link down (#10620)
6887 *
6888 * If link-down the counter remains constant
6889 */
6890 if (pPrt->PLinkModeStatus != SK_LMODE_STAT_UNKNOWN) {
6891
6892 /* Otherwise get incremental difference */
6893 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6894 StatAddr[StatIndex][MacType].Reg,
6895 &LowVal);
6896 HighVal = pPnmiPrt->CounterHigh[StatIndex];
6897
6898 Val = (((SK_U64)HighVal << 32) | (SK_U64)LowVal);
6899 Val -= pPnmiPrt->RxShortZeroMark;
6900
6901 LowVal = (SK_U32)Val;
6902 HighVal = (SK_U32)(Val >> 32);
6903 }
6904 break;
6905
6906 case SK_PNMI_HRX_MACC:
6907 case SK_PNMI_HRX_MACC_UNKWN:
6908 case SK_PNMI_HRX_BURST:
6909 case SK_PNMI_HRX_MISSED:
6910 case SK_PNMI_HRX_FRAMING:
6911 case SK_PNMI_HRX_CARRIER:
6912 case SK_PNMI_HRX_IRLENGTH:
6913 case SK_PNMI_HRX_SYMBOL:
6914 case SK_PNMI_HRX_CEXT:
6915 /* Not supported by GMAC */
6916 if (MacType == SK_MAC_GMAC) {
6917 return (Val);
6918 }
6919
6920 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6921 StatAddr[StatIndex][MacType].Reg,
6922 &LowVal);
6923 HighVal = pPnmiPrt->CounterHigh[StatIndex];
6924 break;
6925
6926 case SK_PNMI_HRX_PMACC_ERR:
6927 /* For GMAC the SW counter is managed by PNMI */
6928 if (MacType == SK_MAC_GMAC) {
6929 return (pPnmiPrt->StatRxPMaccErr);
6930 }
6931
6932 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6933 StatAddr[StatIndex][MacType].Reg,
6934 &LowVal);
6935 HighVal = pPnmiPrt->CounterHigh[StatIndex];
6936 break;
6937
6938 /* SW counter managed by PNMI */
6939 case SK_PNMI_HTX_SYNC:
6940 LowVal = (SK_U32)pPnmiPrt->StatSyncCts;
6941 HighVal = (SK_U32)(pPnmiPrt->StatSyncCts >> 32);
6942 break;
6943
6944 /* SW counter managed by PNMI */
6945 case SK_PNMI_HTX_SYNC_OCTET:
6946 LowVal = (SK_U32)pPnmiPrt->StatSyncOctetsCts;
6947 HighVal = (SK_U32)(pPnmiPrt->StatSyncOctetsCts >> 32);
6948 break;
6949
6950 case SK_PNMI_HRX_FCS:
6951 /*
6952 * Broadcom filters FCS errors and counts it in
6953 * Receive Error Counter register
6954 */
6955 if (pPrt->PhyType == SK_PHY_BCOM) {
6956 /* do not read while not initialized (PHY_READ hangs!)*/
6957 if (pPrt->PState != SK_PRT_RESET) {
6958 SkXmPhyRead(pAC, IoC, PhysPortIndex, PHY_BCOM_RE_CTR, &Word);
6959
6960 LowVal = Word;
6961 }
6962 HighVal = pPnmiPrt->CounterHigh[StatIndex];
6963 }
6964 else {
6965 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6966 StatAddr[StatIndex][MacType].Reg,
6967 &LowVal);
6968 HighVal = pPnmiPrt->CounterHigh[StatIndex];
6969 }
6970 break;
6971
6972 default:
6973 (void)pFnMac->pFnMacStatistic(pAC, IoC, PhysPortIndex,
6974 StatAddr[StatIndex][MacType].Reg,
6975 &LowVal);
6976 HighVal = pPnmiPrt->CounterHigh[StatIndex];
6977 break;
6978 }
6979
6980 Val = (((SK_U64)HighVal << 32) | (SK_U64)LowVal);
6981
6982 /* Correct value because of possible XMAC reset. XMAC Errata #2 */
6983 Val += pPnmiPrt->CounterOffset[StatIndex];
6984
6985 return (Val);
6986}
6987
6988/*****************************************************************************
6989 *
6990 * ResetCounter - Set all counters and timestamps to zero
6991 *
6992 * Description:
6993 * Notifies other common modules which store statistic data to
6994 * reset their counters and finally reset our own counters.
6995 *
6996 * Returns:
6997 * Nothing
6998 */
6999PNMI_STATIC void ResetCounter(
7000SK_AC *pAC, /* Pointer to adapter context */
7001SK_IOC IoC, /* IO context handle */
7002SK_U32 NetIndex)
7003{
7004 unsigned int PhysPortIndex;
7005 SK_EVPARA EventParam;
7006
7007
7008 SK_MEMSET((char *)&EventParam, 0, sizeof(EventParam));
7009
7010 /* Notify sensor module */
7011 SkEventQueue(pAC, SKGE_I2C, SK_I2CEV_CLEAR, EventParam);
7012
7013 /* Notify RLMT module */
7014 EventParam.Para32[0] = NetIndex;
7015 EventParam.Para32[1] = (SK_U32)-1;
7016 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_STATS_CLEAR, EventParam);
7017 EventParam.Para32[1] = 0;
7018
7019 /* Notify SIRQ module */
7020 SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_CLEAR_STAT, EventParam);
7021
7022 /* Notify CSUM module */
7023#ifdef SK_USE_CSUM
7024 EventParam.Para32[0] = NetIndex;
7025 EventParam.Para32[1] = (SK_U32)-1;
7026 SkEventQueue(pAC, SKGE_CSUM, SK_CSUM_EVENT_CLEAR_PROTO_STATS,
7027 EventParam);
7028#endif /* SK_USE_CSUM */
7029
7030 /* Clear XMAC statistic */
7031 for (PhysPortIndex = 0; PhysPortIndex <
7032 (unsigned int)pAC->GIni.GIMacsFound; PhysPortIndex ++) {
7033
7034 (void)pAC->GIni.GIFunc.pFnMacResetCounter(pAC, IoC, PhysPortIndex);
7035
7036 SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex].CounterHigh,
7037 0, sizeof(pAC->Pnmi.Port[PhysPortIndex].CounterHigh));
7038 SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex].
7039 CounterOffset, 0, sizeof(pAC->Pnmi.Port[
7040 PhysPortIndex].CounterOffset));
7041 SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex].StatSyncCts,
7042 0, sizeof(pAC->Pnmi.Port[PhysPortIndex].StatSyncCts));
7043 SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex].
7044 StatSyncOctetsCts, 0, sizeof(pAC->Pnmi.Port[
7045 PhysPortIndex].StatSyncOctetsCts));
7046 SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex].
7047 StatRxLongFrameCts, 0, sizeof(pAC->Pnmi.Port[
7048 PhysPortIndex].StatRxLongFrameCts));
7049 SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex].
7050 StatRxFrameTooLongCts, 0, sizeof(pAC->Pnmi.Port[
7051 PhysPortIndex].StatRxFrameTooLongCts));
7052 SK_MEMSET((char *)&pAC->Pnmi.Port[PhysPortIndex].
7053 StatRxPMaccErr, 0, sizeof(pAC->Pnmi.Port[
7054 PhysPortIndex].StatRxPMaccErr));
7055 }
7056
7057 /*
7058 * Clear local statistics
7059 */
7060 SK_MEMSET((char *)&pAC->Pnmi.VirtualCounterOffset, 0,
7061 sizeof(pAC->Pnmi.VirtualCounterOffset));
7062 pAC->Pnmi.RlmtChangeCts = 0;
7063 pAC->Pnmi.RlmtChangeTime = 0;
7064 SK_MEMSET((char *)&pAC->Pnmi.RlmtChangeEstimate.EstValue[0], 0,
7065 sizeof(pAC->Pnmi.RlmtChangeEstimate.EstValue));
7066 pAC->Pnmi.RlmtChangeEstimate.EstValueIndex = 0;
7067 pAC->Pnmi.RlmtChangeEstimate.Estimate = 0;
7068 pAC->Pnmi.Port[NetIndex].TxSwQueueMax = 0;
7069 pAC->Pnmi.Port[NetIndex].TxRetryCts = 0;
7070 pAC->Pnmi.Port[NetIndex].RxIntrCts = 0;
7071 pAC->Pnmi.Port[NetIndex].TxIntrCts = 0;
7072 pAC->Pnmi.Port[NetIndex].RxNoBufCts = 0;
7073 pAC->Pnmi.Port[NetIndex].TxNoBufCts = 0;
7074 pAC->Pnmi.Port[NetIndex].TxUsedDescrNo = 0;
7075 pAC->Pnmi.Port[NetIndex].RxDeliveredCts = 0;
7076 pAC->Pnmi.Port[NetIndex].RxOctetsDeliveredCts = 0;
7077 pAC->Pnmi.Port[NetIndex].ErrRecoveryCts = 0;
7078}
7079
7080/*****************************************************************************
7081 *
7082 * GetTrapEntry - Get an entry in the trap buffer
7083 *
7084 * Description:
7085 * The trap buffer stores various events. A user application somehow
7086 * gets notified that an event occured and retrieves the trap buffer
7087 * contens (or simply polls the buffer). The buffer is organized as
7088 * a ring which stores the newest traps at the beginning. The oldest
7089 * traps are overwritten by the newest ones. Each trap entry has a
7090 * unique number, so that applications may detect new trap entries.
7091 *
7092 * Returns:
7093 * A pointer to the trap entry
7094 */
7095PNMI_STATIC char* GetTrapEntry(
7096SK_AC *pAC, /* Pointer to adapter context */
7097SK_U32 TrapId, /* SNMP ID of the trap */
7098unsigned int Size) /* Space needed for trap entry */
7099{
7100 unsigned int BufPad = pAC->Pnmi.TrapBufPad;
7101 unsigned int BufFree = pAC->Pnmi.TrapBufFree;
7102 unsigned int Beg = pAC->Pnmi.TrapQueueBeg;
7103 unsigned int End = pAC->Pnmi.TrapQueueEnd;
7104 char *pBuf = &pAC->Pnmi.TrapBuf[0];
7105 int Wrap;
7106 unsigned int NeededSpace;
7107 unsigned int EntrySize;
7108 SK_U32 Val32;
7109 SK_U64 Val64;
7110
7111
7112 /* Last byte of entry will get a copy of the entry length */
7113 Size ++;
7114
7115 /*
7116 * Calculate needed buffer space */
7117 if (Beg >= Size) {
7118
7119 NeededSpace = Size;
7120 Wrap = SK_FALSE;
7121 }
7122 else {
7123 NeededSpace = Beg + Size;
7124 Wrap = SK_TRUE;
7125 }
7126
7127 /*
7128 * Check if enough buffer space is provided. Otherwise
7129 * free some entries. Leave one byte space between begin
7130 * and end of buffer to make it possible to detect whether
7131 * the buffer is full or empty
7132 */
7133 while (BufFree < NeededSpace + 1) {
7134
7135 if (End == 0) {
7136
7137 End = SK_PNMI_TRAP_QUEUE_LEN;
7138 }
7139
7140 EntrySize = (unsigned int)*((unsigned char *)pBuf + End - 1);
7141 BufFree += EntrySize;
7142 End -= EntrySize;
7143#ifdef DEBUG
7144 SK_MEMSET(pBuf + End, (char)(-1), EntrySize);
7145#endif /* DEBUG */
7146 if (End == BufPad) {
7147#ifdef DEBUG
7148 SK_MEMSET(pBuf, (char)(-1), End);
7149#endif /* DEBUG */
7150 BufFree += End;
7151 End = 0;
7152 BufPad = 0;
7153 }
7154 }
7155
7156 /*
7157 * Insert new entry as first entry. Newest entries are
7158 * stored at the beginning of the queue.
7159 */
7160 if (Wrap) {
7161
7162 BufPad = Beg;
7163 Beg = SK_PNMI_TRAP_QUEUE_LEN - Size;
7164 }
7165 else {
7166 Beg = Beg - Size;
7167 }
7168 BufFree -= NeededSpace;
7169
7170 /* Save the current offsets */
7171 pAC->Pnmi.TrapQueueBeg = Beg;
7172 pAC->Pnmi.TrapQueueEnd = End;
7173 pAC->Pnmi.TrapBufPad = BufPad;
7174 pAC->Pnmi.TrapBufFree = BufFree;
7175
7176 /* Initialize the trap entry */
7177 *(pBuf + Beg + Size - 1) = (char)Size;
7178 *(pBuf + Beg) = (char)Size;
7179 Val32 = (pAC->Pnmi.TrapUnique) ++;
7180 SK_PNMI_STORE_U32(pBuf + Beg + 1, Val32);
7181 SK_PNMI_STORE_U32(pBuf + Beg + 1 + sizeof(SK_U32), TrapId);
7182 Val64 = SK_PNMI_HUNDREDS_SEC(SkOsGetTime(pAC));
7183 SK_PNMI_STORE_U64(pBuf + Beg + 1 + 2 * sizeof(SK_U32), Val64);
7184
7185 return (pBuf + Beg);
7186}
7187
7188/*****************************************************************************
7189 *
7190 * CopyTrapQueue - Copies the trap buffer for the TRAP OID
7191 *
7192 * Description:
7193 * On a query of the TRAP OID the trap buffer contents will be
7194 * copied continuously to the request buffer, which must be large
7195 * enough. No length check is performed.
7196 *
7197 * Returns:
7198 * Nothing
7199 */
7200PNMI_STATIC void CopyTrapQueue(
7201SK_AC *pAC, /* Pointer to adapter context */
7202char *pDstBuf) /* Buffer to which the queued traps will be copied */
7203{
7204 unsigned int BufPad = pAC->Pnmi.TrapBufPad;
7205 unsigned int Trap = pAC->Pnmi.TrapQueueBeg;
7206 unsigned int End = pAC->Pnmi.TrapQueueEnd;
7207 char *pBuf = &pAC->Pnmi.TrapBuf[0];
7208 unsigned int Len;
7209 unsigned int DstOff = 0;
7210
7211
7212 while (Trap != End) {
7213
7214 Len = (unsigned int)*(pBuf + Trap);
7215
7216 /*
7217 * Last byte containing a copy of the length will
7218 * not be copied.
7219 */
7220 *(pDstBuf + DstOff) = (char)(Len - 1);
7221 SK_MEMCPY(pDstBuf + DstOff + 1, pBuf + Trap + 1, Len - 2);
7222 DstOff += Len - 1;
7223
7224 Trap += Len;
7225 if (Trap == SK_PNMI_TRAP_QUEUE_LEN) {
7226
7227 Trap = BufPad;
7228 }
7229 }
7230}
7231
7232/*****************************************************************************
7233 *
7234 * GetTrapQueueLen - Get the length of the trap buffer
7235 *
7236 * Description:
7237 * Evaluates the number of currently stored traps and the needed
7238 * buffer size to retrieve them.
7239 *
7240 * Returns:
7241 * Nothing
7242 */
7243PNMI_STATIC void GetTrapQueueLen(
7244SK_AC *pAC, /* Pointer to adapter context */
7245unsigned int *pLen, /* Length in Bytes of all queued traps */
7246unsigned int *pEntries) /* Returns number of trapes stored in queue */
7247{
7248 unsigned int BufPad = pAC->Pnmi.TrapBufPad;
7249 unsigned int Trap = pAC->Pnmi.TrapQueueBeg;
7250 unsigned int End = pAC->Pnmi.TrapQueueEnd;
7251 char *pBuf = &pAC->Pnmi.TrapBuf[0];
7252 unsigned int Len;
7253 unsigned int Entries = 0;
7254 unsigned int TotalLen = 0;
7255
7256
7257 while (Trap != End) {
7258
7259 Len = (unsigned int)*(pBuf + Trap);
7260 TotalLen += Len - 1;
7261 Entries ++;
7262
7263 Trap += Len;
7264 if (Trap == SK_PNMI_TRAP_QUEUE_LEN) {
7265
7266 Trap = BufPad;
7267 }
7268 }
7269
7270 *pEntries = Entries;
7271 *pLen = TotalLen;
7272}
7273
7274/*****************************************************************************
7275 *
7276 * QueueSimpleTrap - Store a simple trap to the trap buffer
7277 *
7278 * Description:
7279 * A simple trap is a trap with now additional data. It consists
7280 * simply of a trap code.
7281 *
7282 * Returns:
7283 * Nothing
7284 */
7285PNMI_STATIC void QueueSimpleTrap(
7286SK_AC *pAC, /* Pointer to adapter context */
7287SK_U32 TrapId) /* Type of sensor trap */
7288{
7289 GetTrapEntry(pAC, TrapId, SK_PNMI_TRAP_SIMPLE_LEN);
7290}
7291
7292/*****************************************************************************
7293 *
7294 * QueueSensorTrap - Stores a sensor trap in the trap buffer
7295 *
7296 * Description:
7297 * Gets an entry in the trap buffer and fills it with sensor related
7298 * data.
7299 *
7300 * Returns:
7301 * Nothing
7302 */
7303PNMI_STATIC void QueueSensorTrap(
7304SK_AC *pAC, /* Pointer to adapter context */
7305SK_U32 TrapId, /* Type of sensor trap */
7306unsigned int SensorIndex) /* Index of sensor which caused the trap */
7307{
7308 char *pBuf;
7309 unsigned int Offset;
7310 unsigned int DescrLen;
7311 SK_U32 Val32;
7312
7313
7314 /* Get trap buffer entry */
7315 DescrLen = SK_STRLEN(pAC->I2c.SenTable[SensorIndex].SenDesc);
7316 pBuf = GetTrapEntry(pAC, TrapId,
7317 SK_PNMI_TRAP_SENSOR_LEN_BASE + DescrLen);
7318 Offset = SK_PNMI_TRAP_SIMPLE_LEN;
7319
7320 /* Store additionally sensor trap related data */
7321 Val32 = OID_SKGE_SENSOR_INDEX;
7322 SK_PNMI_STORE_U32(pBuf + Offset, Val32);
7323 *(pBuf + Offset + 4) = 4;
7324 Val32 = (SK_U32)SensorIndex;
7325 SK_PNMI_STORE_U32(pBuf + Offset + 5, Val32);
7326 Offset += 9;
7327
7328 Val32 = (SK_U32)OID_SKGE_SENSOR_DESCR;
7329 SK_PNMI_STORE_U32(pBuf + Offset, Val32);
7330 *(pBuf + Offset + 4) = (char)DescrLen;
7331 SK_MEMCPY(pBuf + Offset + 5, pAC->I2c.SenTable[SensorIndex].SenDesc,
7332 DescrLen);
7333 Offset += DescrLen + 5;
7334
7335 Val32 = OID_SKGE_SENSOR_TYPE;
7336 SK_PNMI_STORE_U32(pBuf + Offset, Val32);
7337 *(pBuf + Offset + 4) = 1;
7338 *(pBuf + Offset + 5) = (char)pAC->I2c.SenTable[SensorIndex].SenType;
7339 Offset += 6;
7340
7341 Val32 = OID_SKGE_SENSOR_VALUE;
7342 SK_PNMI_STORE_U32(pBuf + Offset, Val32);
7343 *(pBuf + Offset + 4) = 4;
7344 Val32 = (SK_U32)pAC->I2c.SenTable[SensorIndex].SenValue;
7345 SK_PNMI_STORE_U32(pBuf + Offset + 5, Val32);
7346}
7347
7348/*****************************************************************************
7349 *
7350 * QueueRlmtNewMacTrap - Store a port switch trap in the trap buffer
7351 *
7352 * Description:
7353 * Nothing further to explain.
7354 *
7355 * Returns:
7356 * Nothing
7357 */
7358PNMI_STATIC void QueueRlmtNewMacTrap(
7359SK_AC *pAC, /* Pointer to adapter context */
7360unsigned int ActiveMac) /* Index (0..n) of the currently active port */
7361{
7362 char *pBuf;
7363 SK_U32 Val32;
7364
7365
7366 pBuf = GetTrapEntry(pAC, OID_SKGE_TRAP_RLMT_CHANGE_PORT,
7367 SK_PNMI_TRAP_RLMT_CHANGE_LEN);
7368
7369 Val32 = OID_SKGE_RLMT_PORT_ACTIVE;
7370 SK_PNMI_STORE_U32(pBuf + SK_PNMI_TRAP_SIMPLE_LEN, Val32);
7371 *(pBuf + SK_PNMI_TRAP_SIMPLE_LEN + 4) = 1;
7372 *(pBuf + SK_PNMI_TRAP_SIMPLE_LEN + 5) = (char)ActiveMac;
7373}
7374
7375/*****************************************************************************
7376 *
7377 * QueueRlmtPortTrap - Store port related RLMT trap to trap buffer
7378 *
7379 * Description:
7380 * Nothing further to explain.
7381 *
7382 * Returns:
7383 * Nothing
7384 */
7385PNMI_STATIC void QueueRlmtPortTrap(
7386SK_AC *pAC, /* Pointer to adapter context */
7387SK_U32 TrapId, /* Type of RLMT port trap */
7388unsigned int PortIndex) /* Index of the port, which changed its state */
7389{
7390 char *pBuf;
7391 SK_U32 Val32;
7392
7393
7394 pBuf = GetTrapEntry(pAC, TrapId, SK_PNMI_TRAP_RLMT_PORT_LEN);
7395
7396 Val32 = OID_SKGE_RLMT_PORT_INDEX;
7397 SK_PNMI_STORE_U32(pBuf + SK_PNMI_TRAP_SIMPLE_LEN, Val32);
7398 *(pBuf + SK_PNMI_TRAP_SIMPLE_LEN + 4) = 1;
7399 *(pBuf + SK_PNMI_TRAP_SIMPLE_LEN + 5) = (char)PortIndex;
7400}
7401
7402/*****************************************************************************
7403 *
7404 * CopyMac - Copies a MAC address
7405 *
7406 * Description:
7407 * Nothing further to explain.
7408 *
7409 * Returns:
7410 * Nothing
7411 */
7412PNMI_STATIC void CopyMac(
7413char *pDst, /* Pointer to destination buffer */
7414SK_MAC_ADDR *pMac) /* Pointer of Source */
7415{
7416 int i;
7417
7418
7419 for (i = 0; i < sizeof(SK_MAC_ADDR); i ++) {
7420
7421 *(pDst + i) = pMac->a[i];
7422 }
7423}
7424
7425#ifdef SK_POWER_MGMT
7426/*****************************************************************************
7427 *
7428 * PowerManagement - OID handler function of PowerManagement OIDs
7429 *
7430 * Description:
7431 * The code is simple. No description necessary.
7432 *
7433 * Returns:
7434 * SK_PNMI_ERR_OK The request was successfully performed.
7435 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
7436 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
7437 * the correct data (e.g. a 32bit value is
7438 * needed, but a 16 bit value was passed).
7439 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
7440 * exist (e.g. port instance 3 on a two port
7441 * adapter.
7442 */
7443
7444PNMI_STATIC int PowerManagement(
7445SK_AC *pAC, /* Pointer to adapter context */
7446SK_IOC IoC, /* IO context handle */
7447int Action, /* Get/PreSet/Set action */
7448SK_U32 Id, /* Object ID that is to be processed */
7449char *pBuf, /* Buffer to which to mgmt data will be retrieved */
7450unsigned int *pLen, /* On call: buffer length. On return: used buffer */
7451SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
7452unsigned int TableIndex, /* Index to the Id table */
7453SK_U32 NetIndex) /* NetIndex (0..n), in single net mode allways zero */
7454{
7455
7456 SK_U32 RetCode = SK_PNMI_ERR_GENERAL;
7457
7458 /*
7459 * Check instance. We only handle single instance variables
7460 */
7461 if (Instance != (SK_U32)(-1) && Instance != 1) {
7462
7463 *pLen = 0;
7464 return (SK_PNMI_ERR_UNKNOWN_INST);
7465 }
7466
7467
7468 /* Check length */
7469 switch (Id) {
7470
7471 case OID_PNP_CAPABILITIES:
7472 if (*pLen < sizeof(SK_PNP_CAPABILITIES)) {
7473
7474 *pLen = sizeof(SK_PNP_CAPABILITIES);
7475 return (SK_PNMI_ERR_TOO_SHORT);
7476 }
7477 break;
7478
7479 case OID_PNP_SET_POWER:
7480 case OID_PNP_QUERY_POWER:
7481 if (*pLen < sizeof(SK_DEVICE_POWER_STATE))
7482 {
7483 *pLen = sizeof(SK_DEVICE_POWER_STATE);
7484 return (SK_PNMI_ERR_TOO_SHORT);
7485 }
7486 break;
7487
7488 case OID_PNP_ADD_WAKE_UP_PATTERN:
7489 case OID_PNP_REMOVE_WAKE_UP_PATTERN:
7490 if (*pLen < sizeof(SK_PM_PACKET_PATTERN)) {
7491
7492 *pLen = sizeof(SK_PM_PACKET_PATTERN);
7493 return (SK_PNMI_ERR_TOO_SHORT);
7494 }
7495 break;
7496
7497 case OID_PNP_ENABLE_WAKE_UP:
7498 if (*pLen < sizeof(SK_U32)) {
7499
7500 *pLen = sizeof(SK_U32);
7501 return (SK_PNMI_ERR_TOO_SHORT);
7502 }
7503 break;
7504 }
7505
7506 /*
7507 * Perform action
7508 */
7509 if (Action == SK_PNMI_GET) {
7510
7511 /*
7512 * Get value
7513 */
7514 switch (Id) {
7515
7516 case OID_PNP_CAPABILITIES:
7517 RetCode = SkPowerQueryPnPCapabilities(pAC, IoC, pBuf, pLen);
7518 break;
7519
7520 case OID_PNP_QUERY_POWER:
7521 /* The Windows DDK describes: An OID_PNP_QUERY_POWER requests
7522 the miniport to indicate whether it can transition its NIC
7523 to the low-power state.
7524 A miniport driver must always return NDIS_STATUS_SUCCESS
7525 to a query of OID_PNP_QUERY_POWER. */
7526 *pLen = sizeof(SK_DEVICE_POWER_STATE);
7527 RetCode = SK_PNMI_ERR_OK;
7528 break;
7529
7530 /* NDIS handles these OIDs as write-only.
7531 * So in case of get action the buffer with written length = 0
7532 * is returned
7533 */
7534 case OID_PNP_SET_POWER:
7535 case OID_PNP_ADD_WAKE_UP_PATTERN:
7536 case OID_PNP_REMOVE_WAKE_UP_PATTERN:
7537 *pLen = 0;
7538 RetCode = SK_PNMI_ERR_NOT_SUPPORTED;
7539 break;
7540
7541 case OID_PNP_ENABLE_WAKE_UP:
7542 RetCode = SkPowerGetEnableWakeUp(pAC, IoC, pBuf, pLen);
7543 break;
7544
7545 default:
7546 RetCode = SK_PNMI_ERR_GENERAL;
7547 break;
7548 }
7549
7550 return (RetCode);
7551 }
7552
7553
7554 /*
7555 * Perform preset or set
7556 */
7557
7558 /* POWER module does not support PRESET action */
7559 if (Action == SK_PNMI_PRESET) {
7560 return (SK_PNMI_ERR_OK);
7561 }
7562
7563 switch (Id) {
7564 case OID_PNP_SET_POWER:
7565 RetCode = SkPowerSetPower(pAC, IoC, pBuf, pLen);
7566 break;
7567
7568 case OID_PNP_ADD_WAKE_UP_PATTERN:
7569 RetCode = SkPowerAddWakeUpPattern(pAC, IoC, pBuf, pLen);
7570 break;
7571
7572 case OID_PNP_REMOVE_WAKE_UP_PATTERN:
7573 RetCode = SkPowerRemoveWakeUpPattern(pAC, IoC, pBuf, pLen);
7574 break;
7575
7576 case OID_PNP_ENABLE_WAKE_UP:
7577 RetCode = SkPowerSetEnableWakeUp(pAC, IoC, pBuf, pLen);
7578 break;
7579
7580 default:
7581 RetCode = SK_PNMI_ERR_READ_ONLY;
7582 }
7583
7584 return (RetCode);
7585}
7586#endif /* SK_POWER_MGMT */
7587
7588#ifdef SK_DIAG_SUPPORT
7589/*****************************************************************************
7590 *
7591 * DiagActions - OID handler function of Diagnostic driver
7592 *
7593 * Description:
7594 * The code is simple. No description necessary.
7595 *
7596 * Returns:
7597 * SK_PNMI_ERR_OK The request was successfully performed.
7598 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
7599 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
7600 * the correct data (e.g. a 32bit value is
7601 * needed, but a 16 bit value was passed).
7602 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
7603 * exist (e.g. port instance 3 on a two port
7604 * adapter.
7605 */
7606
7607PNMI_STATIC int DiagActions(
7608SK_AC *pAC, /* Pointer to adapter context */
7609SK_IOC IoC, /* IO context handle */
7610int Action, /* GET/PRESET/SET action */
7611SK_U32 Id, /* Object ID that is to be processed */
7612char *pBuf, /* Buffer used for the management data transfer */
7613unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
7614SK_U32 Instance, /* Instance (1..n) that is to be queried or -1 */
7615unsigned int TableIndex, /* Index to the Id table */
7616SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
7617{
7618
7619 SK_U32 DiagStatus;
7620 SK_U32 RetCode = SK_PNMI_ERR_GENERAL;
7621
7622 /*
7623 * Check instance. We only handle single instance variables.
7624 */
7625 if (Instance != (SK_U32)(-1) && Instance != 1) {
7626
7627 *pLen = 0;
7628 return (SK_PNMI_ERR_UNKNOWN_INST);
7629 }
7630
7631 /*
7632 * Check length.
7633 */
7634 switch (Id) {
7635
7636 case OID_SKGE_DIAG_MODE:
7637 if (*pLen < sizeof(SK_U32)) {
7638
7639 *pLen = sizeof(SK_U32);
7640 return (SK_PNMI_ERR_TOO_SHORT);
7641 }
7642 break;
7643
7644 default:
7645 SK_ERR_LOG(pAC, SK_ERRCL_SW, SK_PNMI_ERR040, SK_PNMI_ERR040MSG);
7646 *pLen = 0;
7647 return (SK_PNMI_ERR_GENERAL);
7648 }
7649
7650 /* Perform action. */
7651
7652 /* GET value. */
7653 if (Action == SK_PNMI_GET) {
7654
7655 switch (Id) {
7656
7657 case OID_SKGE_DIAG_MODE:
7658 DiagStatus = pAC->Pnmi.DiagAttached;
7659 SK_PNMI_STORE_U32(pBuf, DiagStatus);
7660 *pLen = sizeof(SK_U32);
7661 RetCode = SK_PNMI_ERR_OK;
7662 break;
7663
7664 default:
7665 *pLen = 0;
7666 RetCode = SK_PNMI_ERR_GENERAL;
7667 break;
7668 }
7669 return (RetCode);
7670 }
7671
7672 /* From here SET or PRESET value. */
7673
7674 /* PRESET value is not supported. */
7675 if (Action == SK_PNMI_PRESET) {
7676 return (SK_PNMI_ERR_OK);
7677 }
7678
7679 /* SET value. */
7680 switch (Id) {
7681 case OID_SKGE_DIAG_MODE:
7682
7683 /* Handle the SET. */
7684 switch (*pBuf) {
7685
7686 /* Attach the DIAG to this adapter. */
7687 case SK_DIAG_ATTACHED:
7688 /* Check if we come from running */
7689 if (pAC->Pnmi.DiagAttached == SK_DIAG_RUNNING) {
7690
7691 RetCode = SkDrvLeaveDiagMode(pAC);
7692
7693 }
7694 else if (pAC->Pnmi.DiagAttached == SK_DIAG_IDLE) {
7695
7696 RetCode = SK_PNMI_ERR_OK;
7697 }
7698
7699 else {
7700
7701 RetCode = SK_PNMI_ERR_GENERAL;
7702
7703 }
7704
7705 if (RetCode == SK_PNMI_ERR_OK) {
7706
7707 pAC->Pnmi.DiagAttached = SK_DIAG_ATTACHED;
7708 }
7709 break;
7710
7711 /* Enter the DIAG mode in the driver. */
7712 case SK_DIAG_RUNNING:
7713 RetCode = SK_PNMI_ERR_OK;
7714
7715 /*
7716 * If DiagAttached is set, we can tell the driver
7717 * to enter the DIAG mode.
7718 */
7719 if (pAC->Pnmi.DiagAttached == SK_DIAG_ATTACHED) {
7720 /* If DiagMode is not active, we can enter it. */
7721 if (!pAC->DiagModeActive) {
7722
7723 RetCode = SkDrvEnterDiagMode(pAC);
7724 }
7725 else {
7726
7727 RetCode = SK_PNMI_ERR_GENERAL;
7728 }
7729 }
7730 else {
7731
7732 RetCode = SK_PNMI_ERR_GENERAL;
7733 }
7734
7735 if (RetCode == SK_PNMI_ERR_OK) {
7736
7737 pAC->Pnmi.DiagAttached = SK_DIAG_RUNNING;
7738 }
7739 break;
7740
7741 case SK_DIAG_IDLE:
7742 /* Check if we come from running */
7743 if (pAC->Pnmi.DiagAttached == SK_DIAG_RUNNING) {
7744
7745 RetCode = SkDrvLeaveDiagMode(pAC);
7746
7747 }
7748 else if (pAC->Pnmi.DiagAttached == SK_DIAG_ATTACHED) {
7749
7750 RetCode = SK_PNMI_ERR_OK;
7751 }
7752
7753 else {
7754
7755 RetCode = SK_PNMI_ERR_GENERAL;
7756
7757 }
7758
7759 if (RetCode == SK_PNMI_ERR_OK) {
7760
7761 pAC->Pnmi.DiagAttached = SK_DIAG_IDLE;
7762 }
7763 break;
7764
7765 default:
7766 RetCode = SK_PNMI_ERR_BAD_VALUE;
7767 break;
7768 }
7769 break;
7770
7771 default:
7772 RetCode = SK_PNMI_ERR_GENERAL;
7773 }
7774
7775 if (RetCode == SK_PNMI_ERR_OK) {
7776 *pLen = sizeof(SK_U32);
7777 }
7778 else {
7779
7780 *pLen = 0;
7781 }
7782 return (RetCode);
7783}
7784#endif /* SK_DIAG_SUPPORT */
7785
7786/*****************************************************************************
7787 *
7788 * Vct - OID handler function of OIDs
7789 *
7790 * Description:
7791 * The code is simple. No description necessary.
7792 *
7793 * Returns:
7794 * SK_PNMI_ERR_OK The request was performed successfully.
7795 * SK_PNMI_ERR_GENERAL A general severe internal error occured.
7796 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to contain
7797 * the correct data (e.g. a 32bit value is
7798 * needed, but a 16 bit value was passed).
7799 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
7800 * exist (e.g. port instance 3 on a two port
7801 * adapter).
7802 * SK_PNMI_ERR_READ_ONLY Only the Get action is allowed.
7803 *
7804 */
7805
7806PNMI_STATIC int Vct(
7807SK_AC *pAC, /* Pointer to adapter context */
7808SK_IOC IoC, /* IO context handle */
7809int Action, /* GET/PRESET/SET action */
7810SK_U32 Id, /* Object ID that is to be processed */
7811char *pBuf, /* Buffer used for the management data transfer */
7812unsigned int *pLen, /* On call: pBuf buffer length. On return: used buffer */
7813SK_U32 Instance, /* Instance (-1,2..n) that is to be queried */
7814unsigned int TableIndex, /* Index to the Id table */
7815SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
7816{
7817 SK_GEPORT *pPrt;
7818 SK_PNMI_VCT *pVctBackupData;
7819 SK_U32 LogPortMax;
7820 SK_U32 PhysPortMax;
7821 SK_U32 PhysPortIndex;
7822 SK_U32 Limit;
7823 SK_U32 Offset;
7824 SK_BOOL Link;
7825 SK_U32 RetCode = SK_PNMI_ERR_GENERAL;
7826 int i;
7827 SK_EVPARA Para;
7828 SK_U32 CableLength;
7829
7830 /*
7831 * Calculate the port indexes from the instance.
7832 */
7833 PhysPortMax = pAC->GIni.GIMacsFound;
7834 LogPortMax = SK_PNMI_PORT_PHYS2LOG(PhysPortMax);
7835
7836 /* Dual net mode? */
7837 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
7838 LogPortMax--;
7839 }
7840
7841 if ((Instance != (SK_U32) (-1))) {
7842 /* Check instance range. */
7843 if ((Instance < 2) || (Instance > LogPortMax)) {
7844 *pLen = 0;
7845 return (SK_PNMI_ERR_UNKNOWN_INST);
7846 }
7847
7848 if (pAC->Pnmi.DualNetActiveFlag == SK_TRUE) {
7849 PhysPortIndex = NetIndex;
7850 }
7851 else {
7852 PhysPortIndex = Instance - 2;
7853 }
7854 Limit = PhysPortIndex + 1;
7855 }
7856 else {
7857 /*
7858 * Instance == (SK_U32) (-1), get all Instances of that OID.
7859 *
7860 * Not implemented yet. May be used in future releases.
7861 */
7862 PhysPortIndex = 0;
7863 Limit = PhysPortMax;
7864 }
7865
7866 pPrt = &pAC->GIni.GP[PhysPortIndex];
7867 if (pPrt->PHWLinkUp) {
7868 Link = SK_TRUE;
7869 }
7870 else {
7871 Link = SK_FALSE;
7872 }
7873
7874 /* Check MAC type */
7875 if (pPrt->PhyType != SK_PHY_MARV_COPPER) {
7876 *pLen = 0;
7877 return (SK_PNMI_ERR_GENERAL);
7878 }
7879
7880 /* Initialize backup data pointer. */
7881 pVctBackupData = &pAC->Pnmi.VctBackup[PhysPortIndex];
7882
7883 /* Check action type */
7884 if (Action == SK_PNMI_GET) {
7885 /* Check length */
7886 switch (Id) {
7887
7888 case OID_SKGE_VCT_GET:
7889 if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_PNMI_VCT)) {
7890 *pLen = (Limit - PhysPortIndex) * sizeof(SK_PNMI_VCT);
7891 return (SK_PNMI_ERR_TOO_SHORT);
7892 }
7893 break;
7894
7895 case OID_SKGE_VCT_STATUS:
7896 if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_U8)) {
7897 *pLen = (Limit - PhysPortIndex) * sizeof(SK_U8);
7898 return (SK_PNMI_ERR_TOO_SHORT);
7899 }
7900 break;
7901
7902 default:
7903 *pLen = 0;
7904 return (SK_PNMI_ERR_GENERAL);
7905 }
7906
7907 /* Get value */
7908 Offset = 0;
7909 for (; PhysPortIndex < Limit; PhysPortIndex++) {
7910 switch (Id) {
7911
7912 case OID_SKGE_VCT_GET:
7913 if ((Link == SK_FALSE) &&
7914 (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_PENDING)) {
7915 RetCode = SkGmCableDiagStatus(pAC, IoC, PhysPortIndex, SK_FALSE);
7916 if (RetCode == 0) {
7917 pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_PENDING;
7918 pAC->Pnmi.VctStatus[PhysPortIndex] |=
7919 (SK_PNMI_VCT_NEW_VCT_DATA | SK_PNMI_VCT_TEST_DONE);
7920
7921 /* Copy results for later use to PNMI struct. */
7922 for (i = 0; i < 4; i++) {
7923 if (pPrt->PMdiPairSts[i] == SK_PNMI_VCT_NORMAL_CABLE) {
7924 if ((pPrt->PMdiPairLen[i] > 35) && (pPrt->PMdiPairLen[i] < 0xff)) {
7925 pPrt->PMdiPairSts[i] = SK_PNMI_VCT_IMPEDANCE_MISMATCH;
7926 }
7927 }
7928 if ((pPrt->PMdiPairLen[i] > 35) && (pPrt->PMdiPairLen[i] != 0xff)) {
7929 CableLength = 1000 * (((175 * pPrt->PMdiPairLen[i]) / 210) - 28);
7930 }
7931 else {
7932 CableLength = 0;
7933 }
7934 pVctBackupData->PMdiPairLen[i] = CableLength;
7935 pVctBackupData->PMdiPairSts[i] = pPrt->PMdiPairSts[i];
7936 }
7937
7938 Para.Para32[0] = PhysPortIndex;
7939 Para.Para32[1] = -1;
7940 SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_RESET, Para);
7941 SkEventDispatcher(pAC, IoC);
7942 }
7943 else {
7944 ; /* VCT test is running. */
7945 }
7946 }
7947
7948 /* Get all results. */
7949 CheckVctStatus(pAC, IoC, pBuf, Offset, PhysPortIndex);
7950 Offset += sizeof(SK_U8);
7951 *(pBuf + Offset) = pPrt->PCableLen;
7952 Offset += sizeof(SK_U8);
7953 for (i = 0; i < 4; i++) {
7954 SK_PNMI_STORE_U32((pBuf + Offset), pVctBackupData->PMdiPairLen[i]);
7955 Offset += sizeof(SK_U32);
7956 }
7957 for (i = 0; i < 4; i++) {
7958 *(pBuf + Offset) = pVctBackupData->PMdiPairSts[i];
7959 Offset += sizeof(SK_U8);
7960 }
7961
7962 RetCode = SK_PNMI_ERR_OK;
7963 break;
7964
7965 case OID_SKGE_VCT_STATUS:
7966 CheckVctStatus(pAC, IoC, pBuf, Offset, PhysPortIndex);
7967 Offset += sizeof(SK_U8);
7968 RetCode = SK_PNMI_ERR_OK;
7969 break;
7970
7971 default:
7972 *pLen = 0;
7973 return (SK_PNMI_ERR_GENERAL);
7974 }
7975 } /* for */
7976 *pLen = Offset;
7977 return (RetCode);
7978
7979 } /* if SK_PNMI_GET */
7980
7981 /*
7982 * From here SET or PRESET action. Check if the passed
7983 * buffer length is plausible.
7984 */
7985
7986 /* Check length */
7987 switch (Id) {
7988 case OID_SKGE_VCT_SET:
7989 if (*pLen < (Limit - PhysPortIndex) * sizeof(SK_U32)) {
7990 *pLen = (Limit - PhysPortIndex) * sizeof(SK_U32);
7991 return (SK_PNMI_ERR_TOO_SHORT);
7992 }
7993 break;
7994
7995 default:
7996 *pLen = 0;
7997 return (SK_PNMI_ERR_GENERAL);
7998 }
7999
8000 /*
8001 * Perform preset or set.
8002 */
8003
8004 /* VCT does not support PRESET action. */
8005 if (Action == SK_PNMI_PRESET) {
8006 return (SK_PNMI_ERR_OK);
8007 }
8008
8009 Offset = 0;
8010 for (; PhysPortIndex < Limit; PhysPortIndex++) {
8011 switch (Id) {
8012 case OID_SKGE_VCT_SET: /* Start VCT test. */
8013 if (Link == SK_FALSE) {
8014 SkGeStopPort(pAC, IoC, PhysPortIndex, SK_STOP_ALL, SK_SOFT_RST);
8015
8016 RetCode = SkGmCableDiagStatus(pAC, IoC, PhysPortIndex, SK_TRUE);
8017 if (RetCode == 0) { /* RetCode: 0 => Start! */
8018 pAC->Pnmi.VctStatus[PhysPortIndex] |= SK_PNMI_VCT_PENDING;
8019 pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_NEW_VCT_DATA;
8020 pAC->Pnmi.VctStatus[PhysPortIndex] &= ~SK_PNMI_VCT_LINK;
8021
8022 /*
8023 * Start VCT timer counter.
8024 */
8025 SK_MEMSET((char *) &Para, 0, sizeof(Para));
8026 Para.Para32[0] = PhysPortIndex;
8027 Para.Para32[1] = -1;
8028 SkTimerStart(pAC, IoC, &pAC->Pnmi.VctTimeout[PhysPortIndex].VctTimer,
8029 4000000, SKGE_PNMI, SK_PNMI_EVT_VCT_RESET, Para);
8030 SK_PNMI_STORE_U32((pBuf + Offset), RetCode);
8031 RetCode = SK_PNMI_ERR_OK;
8032 }
8033 else { /* RetCode: 2 => Running! */
8034 SK_PNMI_STORE_U32((pBuf + Offset), RetCode);
8035 RetCode = SK_PNMI_ERR_OK;
8036 }
8037 }
8038 else { /* RetCode: 4 => Link! */
8039 RetCode = 4;
8040 SK_PNMI_STORE_U32((pBuf + Offset), RetCode);
8041 RetCode = SK_PNMI_ERR_OK;
8042 }
8043 Offset += sizeof(SK_U32);
8044 break;
8045
8046 default:
8047 *pLen = 0;
8048 return (SK_PNMI_ERR_GENERAL);
8049 }
8050 } /* for */
8051 *pLen = Offset;
8052 return (RetCode);
8053
8054} /* Vct */
8055
8056
8057PNMI_STATIC void CheckVctStatus(
8058SK_AC *pAC,
8059SK_IOC IoC,
8060char *pBuf,
8061SK_U32 Offset,
8062SK_U32 PhysPortIndex)
8063{
8064 SK_GEPORT *pPrt;
8065 SK_PNMI_VCT *pVctData;
8066 SK_U32 RetCode;
8067
8068 pPrt = &pAC->GIni.GP[PhysPortIndex];
8069
8070 pVctData = (SK_PNMI_VCT *) (pBuf + Offset);
8071 pVctData->VctStatus = SK_PNMI_VCT_NONE;
8072
8073 if (!pPrt->PHWLinkUp) {
8074
8075 /* Was a VCT test ever made before? */
8076 if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_TEST_DONE) {
8077 if ((pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_LINK)) {
8078 pVctData->VctStatus |= SK_PNMI_VCT_OLD_VCT_DATA;
8079 }
8080 else {
8081 pVctData->VctStatus |= SK_PNMI_VCT_NEW_VCT_DATA;
8082 }
8083 }
8084
8085 /* Check VCT test status. */
8086 RetCode = SkGmCableDiagStatus(pAC,IoC, PhysPortIndex, SK_FALSE);
8087 if (RetCode == 2) { /* VCT test is running. */
8088 pVctData->VctStatus |= SK_PNMI_VCT_RUNNING;
8089 }
8090 else { /* VCT data was copied to pAC here. Check PENDING state. */
8091 if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_PENDING) {
8092 pVctData->VctStatus |= SK_PNMI_VCT_NEW_VCT_DATA;
8093 }
8094 }
8095
8096 if (pPrt->PCableLen != 0xff) { /* Old DSP value. */
8097 pVctData->VctStatus |= SK_PNMI_VCT_OLD_DSP_DATA;
8098 }
8099 }
8100 else {
8101
8102 /* Was a VCT test ever made before? */
8103 if (pAC->Pnmi.VctStatus[PhysPortIndex] & SK_PNMI_VCT_TEST_DONE) {
8104 pVctData->VctStatus &= ~SK_PNMI_VCT_NEW_VCT_DATA;
8105 pVctData->VctStatus |= SK_PNMI_VCT_OLD_VCT_DATA;
8106 }
8107
8108 /* DSP only valid in 100/1000 modes. */
8109 if (pAC->GIni.GP[PhysPortIndex].PLinkSpeedUsed !=
8110 SK_LSPEED_STAT_10MBPS) {
8111 pVctData->VctStatus |= SK_PNMI_VCT_NEW_DSP_DATA;
8112 }
8113 }
8114} /* CheckVctStatus */
8115
8116
8117/*****************************************************************************
8118 *
8119 * SkPnmiGenIoctl - Handles new generic PNMI IOCTL, calls the needed
8120 * PNMI function depending on the subcommand and
8121 * returns all data belonging to the complete database
8122 * or OID request.
8123 *
8124 * Description:
8125 * Looks up the requested subcommand, calls the corresponding handler
8126 * function and passes all required parameters to it.
8127 * The function is called by the driver. It is needed to handle the new
8128 * generic PNMI IOCTL. This IOCTL is given to the driver and contains both
8129 * the OID and a subcommand to decide what kind of request has to be done.
8130 *
8131 * Returns:
8132 * SK_PNMI_ERR_OK The request was successfully performed
8133 * SK_PNMI_ERR_GENERAL A general severe internal error occured
8134 * SK_PNMI_ERR_TOO_SHORT The passed buffer is too short to take
8135 * the data.
8136 * SK_PNMI_ERR_UNKNOWN_OID The requested OID is unknown
8137 * SK_PNMI_ERR_UNKNOWN_INST The requested instance of the OID doesn't
8138 * exist (e.g. port instance 3 on a two port
8139 * adapter.
8140 */
8141int SkPnmiGenIoctl(
8142SK_AC *pAC, /* Pointer to adapter context struct */
8143SK_IOC IoC, /* I/O context */
8144void *pBuf, /* Buffer used for the management data transfer */
8145unsigned int *pLen, /* Length of buffer */
8146SK_U32 NetIndex) /* NetIndex (0..n), in single net mode always zero */
8147{
8148SK_I32 Mode; /* Store value of subcommand. */
8149SK_U32 Oid; /* Store value of OID. */
8150int ReturnCode; /* Store return value to show status of PNMI action. */
8151int HeaderLength; /* Length of desired action plus OID. */
8152
8153 ReturnCode = SK_PNMI_ERR_GENERAL;
8154
8155 SK_MEMCPY(&Mode, pBuf, sizeof(SK_I32));
8156 SK_MEMCPY(&Oid, (char *) pBuf + sizeof(SK_I32), sizeof(SK_U32));
8157 HeaderLength = sizeof(SK_I32) + sizeof(SK_U32);
8158 *pLen = *pLen - HeaderLength;
8159 SK_MEMCPY((char *) pBuf + sizeof(SK_I32), (char *) pBuf + HeaderLength, *pLen);
8160
8161 switch(Mode) {
8162 case SK_GET_SINGLE_VAR:
8163 ReturnCode = SkPnmiGetVar(pAC, IoC, Oid,
8164 (char *) pBuf + sizeof(SK_I32), pLen,
8165 ((SK_U32) (-1)), NetIndex);
8166 SK_PNMI_STORE_U32(pBuf, ReturnCode);
8167 *pLen = *pLen + sizeof(SK_I32);
8168 break;
8169 case SK_PRESET_SINGLE_VAR:
8170 ReturnCode = SkPnmiPreSetVar(pAC, IoC, Oid,
8171 (char *) pBuf + sizeof(SK_I32), pLen,
8172 ((SK_U32) (-1)), NetIndex);
8173 SK_PNMI_STORE_U32(pBuf, ReturnCode);
8174 *pLen = *pLen + sizeof(SK_I32);
8175 break;
8176 case SK_SET_SINGLE_VAR:
8177 ReturnCode = SkPnmiSetVar(pAC, IoC, Oid,
8178 (char *) pBuf + sizeof(SK_I32), pLen,
8179 ((SK_U32) (-1)), NetIndex);
8180 SK_PNMI_STORE_U32(pBuf, ReturnCode);
8181 *pLen = *pLen + sizeof(SK_I32);
8182 break;
8183 case SK_GET_FULL_MIB:
8184 ReturnCode = SkPnmiGetStruct(pAC, IoC, pBuf, pLen, NetIndex);
8185 break;
8186 case SK_PRESET_FULL_MIB:
8187 ReturnCode = SkPnmiPreSetStruct(pAC, IoC, pBuf, pLen, NetIndex);
8188 break;
8189 case SK_SET_FULL_MIB:
8190 ReturnCode = SkPnmiSetStruct(pAC, IoC, pBuf, pLen, NetIndex);
8191 break;
8192 default:
8193 break;
8194 }
8195
8196 return (ReturnCode);
8197
8198} /* SkGeIocGen */
diff --git a/drivers/net/sk98lin/skgesirq.c b/drivers/net/sk98lin/skgesirq.c
deleted file mode 100644
index e5ee6d63ba4e..000000000000
--- a/drivers/net/sk98lin/skgesirq.c
+++ /dev/null
@@ -1,2229 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skgesirq.c
4 * Project: Gigabit Ethernet Adapters, Common Modules
5 * Version: $Revision: 1.92 $
6 * Date: $Date: 2003/09/16 14:37:07 $
7 * Purpose: Special IRQ module
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/*
26 * Special Interrupt handler
27 *
28 * The following abstract should show how this module is included
29 * in the driver path:
30 *
31 * In the ISR of the driver the bits for frame transmission complete and
32 * for receive complete are checked and handled by the driver itself.
33 * The bits of the slow path mask are checked after that and then the
34 * entry into the so-called "slow path" is prepared. It is an implementors
35 * decision whether this is executed directly or just scheduled by
36 * disabling the mask. In the interrupt service routine some events may be
37 * generated, so it would be a good idea to call the EventDispatcher
38 * right after this ISR.
39 *
40 * The Interrupt source register of the adapter is NOT read by this module.
41 * SO if the drivers implementor needs a while loop around the
42 * slow data paths interrupt bits, he needs to call the SkGeSirqIsr() for
43 * each loop entered.
44 *
45 * However, the MAC Interrupt status registers are read in a while loop.
46 *
47 */
48
49#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
50static const char SysKonnectFileId[] =
51 "@(#) $Id: skgesirq.c,v 1.92 2003/09/16 14:37:07 rschmidt Exp $ (C) Marvell.";
52#endif
53
54#include "h/skdrv1st.h" /* Driver Specific Definitions */
55#ifndef SK_SLIM
56#include "h/skgepnmi.h" /* PNMI Definitions */
57#include "h/skrlmt.h" /* RLMT Definitions */
58#endif
59#include "h/skdrv2nd.h" /* Adapter Control and Driver specific Def. */
60
61/* local function prototypes */
62#ifdef GENESIS
63static int SkGePortCheckUpXmac(SK_AC*, SK_IOC, int, SK_BOOL);
64static int SkGePortCheckUpBcom(SK_AC*, SK_IOC, int, SK_BOOL);
65static void SkPhyIsrBcom(SK_AC*, SK_IOC, int, SK_U16);
66#endif /* GENESIS */
67#ifdef YUKON
68static int SkGePortCheckUpGmac(SK_AC*, SK_IOC, int, SK_BOOL);
69static void SkPhyIsrGmac(SK_AC*, SK_IOC, int, SK_U16);
70#endif /* YUKON */
71#ifdef OTHER_PHY
72static int SkGePortCheckUpLone(SK_AC*, SK_IOC, int, SK_BOOL);
73static int SkGePortCheckUpNat(SK_AC*, SK_IOC, int, SK_BOOL);
74static void SkPhyIsrLone(SK_AC*, SK_IOC, int, SK_U16);
75#endif /* OTHER_PHY */
76
77#ifdef GENESIS
78/*
79 * array of Rx counter from XMAC which are checked
80 * in AutoSense mode to check whether a link is not able to auto-negotiate.
81 */
82static const SK_U16 SkGeRxRegs[]= {
83 XM_RXF_64B,
84 XM_RXF_127B,
85 XM_RXF_255B,
86 XM_RXF_511B,
87 XM_RXF_1023B,
88 XM_RXF_MAX_SZ
89} ;
90#endif /* GENESIS */
91
92#ifdef __C2MAN__
93/*
94 * Special IRQ function
95 *
96 * General Description:
97 *
98 */
99intro()
100{}
101#endif
102
103/******************************************************************************
104 *
105 * SkHWInitDefSense() - Default Autosensing mode initialization
106 *
107 * Description: sets the PLinkMode for HWInit
108 *
109 * Returns: N/A
110 */
111static void SkHWInitDefSense(
112SK_AC *pAC, /* adapter context */
113SK_IOC IoC, /* IO context */
114int Port) /* Port Index (MAC_1 + n) */
115{
116 SK_GEPORT *pPrt; /* GIni Port struct pointer */
117
118 pPrt = &pAC->GIni.GP[Port];
119
120 pPrt->PAutoNegTimeOut = 0;
121
122 if (pPrt->PLinkModeConf != SK_LMODE_AUTOSENSE) {
123 pPrt->PLinkMode = pPrt->PLinkModeConf;
124 return;
125 }
126
127 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
128 ("AutoSensing: First mode %d on Port %d\n",
129 (int)SK_LMODE_AUTOFULL, Port));
130
131 pPrt->PLinkMode = (SK_U8)SK_LMODE_AUTOFULL;
132
133 return;
134} /* SkHWInitDefSense */
135
136
137#ifdef GENESIS
138/******************************************************************************
139 *
140 * SkHWSenseGetNext() - Get Next Autosensing Mode
141 *
142 * Description: gets the appropriate next mode
143 *
144 * Note:
145 *
146 */
147static SK_U8 SkHWSenseGetNext(
148SK_AC *pAC, /* adapter context */
149SK_IOC IoC, /* IO context */
150int Port) /* Port Index (MAC_1 + n) */
151{
152 SK_GEPORT *pPrt; /* GIni Port struct pointer */
153
154 pPrt = &pAC->GIni.GP[Port];
155
156 pPrt->PAutoNegTimeOut = 0;
157
158 if (pPrt->PLinkModeConf != (SK_U8)SK_LMODE_AUTOSENSE) {
159 /* Leave all as configured */
160 return(pPrt->PLinkModeConf);
161 }
162
163 if (pPrt->PLinkMode == (SK_U8)SK_LMODE_AUTOFULL) {
164 /* Return next mode AUTOBOTH */
165 return ((SK_U8)SK_LMODE_AUTOBOTH);
166 }
167
168 /* Return default autofull */
169 return ((SK_U8)SK_LMODE_AUTOFULL);
170} /* SkHWSenseGetNext */
171
172
173/******************************************************************************
174 *
175 * SkHWSenseSetNext() - Autosensing Set next mode
176 *
177 * Description: sets the appropriate next mode
178 *
179 * Returns: N/A
180 */
181static void SkHWSenseSetNext(
182SK_AC *pAC, /* adapter context */
183SK_IOC IoC, /* IO context */
184int Port, /* Port Index (MAC_1 + n) */
185SK_U8 NewMode) /* New Mode to be written in sense mode */
186{
187 SK_GEPORT *pPrt; /* GIni Port struct pointer */
188
189 pPrt = &pAC->GIni.GP[Port];
190
191 pPrt->PAutoNegTimeOut = 0;
192
193 if (pPrt->PLinkModeConf != (SK_U8)SK_LMODE_AUTOSENSE) {
194 return;
195 }
196
197 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
198 ("AutoSensing: next mode %d on Port %d\n",
199 (int)NewMode, Port));
200
201 pPrt->PLinkMode = NewMode;
202
203 return;
204} /* SkHWSenseSetNext */
205#endif /* GENESIS */
206
207
208/******************************************************************************
209 *
210 * SkHWLinkDown() - Link Down handling
211 *
212 * Description: handles the hardware link down signal
213 *
214 * Returns: N/A
215 */
216void SkHWLinkDown(
217SK_AC *pAC, /* adapter context */
218SK_IOC IoC, /* IO context */
219int Port) /* Port Index (MAC_1 + n) */
220{
221 SK_GEPORT *pPrt; /* GIni Port struct pointer */
222
223 pPrt = &pAC->GIni.GP[Port];
224
225 /* Disable all MAC interrupts */
226 SkMacIrqDisable(pAC, IoC, Port);
227
228 /* Disable Receiver and Transmitter */
229 SkMacRxTxDisable(pAC, IoC, Port);
230
231 /* Init default sense mode */
232 SkHWInitDefSense(pAC, IoC, Port);
233
234 if (pPrt->PHWLinkUp == SK_FALSE) {
235 return;
236 }
237
238 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
239 ("Link down Port %d\n", Port));
240
241 /* Set Link to DOWN */
242 pPrt->PHWLinkUp = SK_FALSE;
243
244 /* Reset Port stati */
245 pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_UNKNOWN;
246 pPrt->PFlowCtrlStatus = (SK_U8)SK_FLOW_STAT_NONE;
247 pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_INDETERMINATED;
248
249 /* Re-init Phy especially when the AutoSense default is set now */
250 SkMacInitPhy(pAC, IoC, Port, SK_FALSE);
251
252 /* GP0: used for workaround of Rev. C Errata 2 */
253
254 /* Do NOT signal to RLMT */
255
256 /* Do NOT start the timer here */
257} /* SkHWLinkDown */
258
259
260/******************************************************************************
261 *
262 * SkHWLinkUp() - Link Up handling
263 *
264 * Description: handles the hardware link up signal
265 *
266 * Returns: N/A
267 */
268static void SkHWLinkUp(
269SK_AC *pAC, /* adapter context */
270SK_IOC IoC, /* IO context */
271int Port) /* Port Index (MAC_1 + n) */
272{
273 SK_GEPORT *pPrt; /* GIni Port struct pointer */
274
275 pPrt = &pAC->GIni.GP[Port];
276
277 if (pPrt->PHWLinkUp) {
278 /* We do NOT need to proceed on active link */
279 return;
280 }
281
282 pPrt->PHWLinkUp = SK_TRUE;
283 pPrt->PAutoNegFail = SK_FALSE;
284 pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_UNKNOWN;
285
286 if (pPrt->PLinkMode != (SK_U8)SK_LMODE_AUTOHALF &&
287 pPrt->PLinkMode != (SK_U8)SK_LMODE_AUTOFULL &&
288 pPrt->PLinkMode != (SK_U8)SK_LMODE_AUTOBOTH) {
289 /* Link is up and no Auto-negotiation should be done */
290
291 /* Link speed should be the configured one */
292 switch (pPrt->PLinkSpeed) {
293 case SK_LSPEED_AUTO:
294 /* default is 1000 Mbps */
295 case SK_LSPEED_1000MBPS:
296 pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_1000MBPS;
297 break;
298 case SK_LSPEED_100MBPS:
299 pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_100MBPS;
300 break;
301 case SK_LSPEED_10MBPS:
302 pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_10MBPS;
303 break;
304 }
305
306 /* Set Link Mode Status */
307 if (pPrt->PLinkMode == SK_LMODE_FULL) {
308 pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_FULL;
309 }
310 else {
311 pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_HALF;
312 }
313
314 /* No flow control without auto-negotiation */
315 pPrt->PFlowCtrlStatus = (SK_U8)SK_FLOW_STAT_NONE;
316
317 /* enable Rx/Tx */
318 (void)SkMacRxTxEnable(pAC, IoC, Port);
319 }
320} /* SkHWLinkUp */
321
322
323/******************************************************************************
324 *
325 * SkMacParity() - MAC parity workaround
326 *
327 * Description: handles MAC parity errors correctly
328 *
329 * Returns: N/A
330 */
331static void SkMacParity(
332SK_AC *pAC, /* adapter context */
333SK_IOC IoC, /* IO context */
334int Port) /* Port Index of the port failed */
335{
336 SK_EVPARA Para;
337 SK_GEPORT *pPrt; /* GIni Port struct pointer */
338 SK_U32 TxMax; /* Tx Max Size Counter */
339
340 pPrt = &pAC->GIni.GP[Port];
341
342 /* Clear IRQ Tx Parity Error */
343#ifdef GENESIS
344 if (pAC->GIni.GIGenesis) {
345
346 SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_CLR_PERR);
347 }
348#endif /* GENESIS */
349
350#ifdef YUKON
351 if (pAC->GIni.GIYukon) {
352 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
353 SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T),
354 (SK_U8)((pAC->GIni.GIChipId == CHIP_ID_YUKON &&
355 pAC->GIni.GIChipRev == 0) ? GMF_CLI_TX_FC : GMF_CLI_TX_PE));
356 }
357#endif /* YUKON */
358
359 if (pPrt->PCheckPar) {
360
361 if (Port == MAC_1) {
362 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E016, SKERR_SIRQ_E016MSG);
363 }
364 else {
365 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E017, SKERR_SIRQ_E017MSG);
366 }
367 Para.Para64 = Port;
368 SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
369
370 Para.Para32[0] = Port;
371 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
372
373 return;
374 }
375
376 /* Check whether frames with a size of 1k were sent */
377#ifdef GENESIS
378 if (pAC->GIni.GIGenesis) {
379 /* Snap statistic counters */
380 (void)SkXmUpdateStats(pAC, IoC, Port);
381
382 (void)SkXmMacStatistic(pAC, IoC, Port, XM_TXF_MAX_SZ, &TxMax);
383 }
384#endif /* GENESIS */
385
386#ifdef YUKON
387 if (pAC->GIni.GIYukon) {
388
389 (void)SkGmMacStatistic(pAC, IoC, Port, GM_TXF_1518B, &TxMax);
390 }
391#endif /* YUKON */
392
393 if (TxMax > 0) {
394 /* From now on check the parity */
395 pPrt->PCheckPar = SK_TRUE;
396 }
397} /* SkMacParity */
398
399
400/******************************************************************************
401 *
402 * SkGeHwErr() - Hardware Error service routine
403 *
404 * Description: handles all HW Error interrupts
405 *
406 * Returns: N/A
407 */
408static void SkGeHwErr(
409SK_AC *pAC, /* adapter context */
410SK_IOC IoC, /* IO context */
411SK_U32 HwStatus) /* Interrupt status word */
412{
413 SK_EVPARA Para;
414 SK_U16 Word;
415
416 if ((HwStatus & (IS_IRQ_MST_ERR | IS_IRQ_STAT)) != 0) {
417 /* PCI Errors occured */
418 if ((HwStatus & IS_IRQ_STAT) != 0) {
419 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E013, SKERR_SIRQ_E013MSG);
420 }
421 else {
422 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E012, SKERR_SIRQ_E012MSG);
423 }
424
425 /* Reset all bits in the PCI STATUS register */
426 SK_IN16(IoC, PCI_C(PCI_STATUS), &Word);
427
428 SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_ON);
429 SK_OUT16(IoC, PCI_C(PCI_STATUS), (SK_U16)(Word | PCI_ERRBITS));
430 SK_OUT8(IoC, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
431
432 Para.Para64 = 0;
433 SkEventQueue(pAC, SKGE_DRV, SK_DRV_ADAP_FAIL, Para);
434 }
435
436#ifdef GENESIS
437 if (pAC->GIni.GIGenesis) {
438
439 if ((HwStatus & IS_NO_STAT_M1) != 0) {
440 /* Ignore it */
441 /* This situation is also indicated in the descriptor */
442 SK_OUT16(IoC, MR_ADDR(MAC_1, RX_MFF_CTRL1), MFF_CLR_INSTAT);
443 }
444
445 if ((HwStatus & IS_NO_STAT_M2) != 0) {
446 /* Ignore it */
447 /* This situation is also indicated in the descriptor */
448 SK_OUT16(IoC, MR_ADDR(MAC_2, RX_MFF_CTRL1), MFF_CLR_INSTAT);
449 }
450
451 if ((HwStatus & IS_NO_TIST_M1) != 0) {
452 /* Ignore it */
453 /* This situation is also indicated in the descriptor */
454 SK_OUT16(IoC, MR_ADDR(MAC_1, RX_MFF_CTRL1), MFF_CLR_INTIST);
455 }
456
457 if ((HwStatus & IS_NO_TIST_M2) != 0) {
458 /* Ignore it */
459 /* This situation is also indicated in the descriptor */
460 SK_OUT16(IoC, MR_ADDR(MAC_2, RX_MFF_CTRL1), MFF_CLR_INTIST);
461 }
462 }
463#endif /* GENESIS */
464
465#ifdef YUKON
466 if (pAC->GIni.GIYukon) {
467 /* This is necessary only for Rx timing measurements */
468 if ((HwStatus & IS_IRQ_TIST_OV) != 0) {
469 /* increment Time Stamp Timer counter (high) */
470 pAC->GIni.GITimeStampCnt++;
471
472 /* Clear Time Stamp Timer IRQ */
473 SK_OUT8(IoC, GMAC_TI_ST_CTRL, (SK_U8)GMT_ST_CLR_IRQ);
474 }
475
476 if ((HwStatus & IS_IRQ_SENSOR) != 0) {
477 /* no sensors on 32-bit Yukon */
478 if (pAC->GIni.GIYukon32Bit) {
479 /* disable HW Error IRQ */
480 pAC->GIni.GIValIrqMask &= ~IS_HW_ERR;
481 }
482 }
483 }
484#endif /* YUKON */
485
486 if ((HwStatus & IS_RAM_RD_PAR) != 0) {
487 SK_OUT16(IoC, B3_RI_CTRL, RI_CLR_RD_PERR);
488 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E014, SKERR_SIRQ_E014MSG);
489 Para.Para64 = 0;
490 SkEventQueue(pAC, SKGE_DRV, SK_DRV_ADAP_FAIL, Para);
491 }
492
493 if ((HwStatus & IS_RAM_WR_PAR) != 0) {
494 SK_OUT16(IoC, B3_RI_CTRL, RI_CLR_WR_PERR);
495 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E015, SKERR_SIRQ_E015MSG);
496 Para.Para64 = 0;
497 SkEventQueue(pAC, SKGE_DRV, SK_DRV_ADAP_FAIL, Para);
498 }
499
500 if ((HwStatus & IS_M1_PAR_ERR) != 0) {
501 SkMacParity(pAC, IoC, MAC_1);
502 }
503
504 if ((HwStatus & IS_M2_PAR_ERR) != 0) {
505 SkMacParity(pAC, IoC, MAC_2);
506 }
507
508 if ((HwStatus & IS_R1_PAR_ERR) != 0) {
509 /* Clear IRQ */
510 SK_OUT32(IoC, B0_R1_CSR, CSR_IRQ_CL_P);
511
512 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E018, SKERR_SIRQ_E018MSG);
513 Para.Para64 = MAC_1;
514 SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
515
516 Para.Para32[0] = MAC_1;
517 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
518 }
519
520 if ((HwStatus & IS_R2_PAR_ERR) != 0) {
521 /* Clear IRQ */
522 SK_OUT32(IoC, B0_R2_CSR, CSR_IRQ_CL_P);
523
524 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E019, SKERR_SIRQ_E019MSG);
525 Para.Para64 = MAC_2;
526 SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
527
528 Para.Para32[0] = MAC_2;
529 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
530 }
531} /* SkGeHwErr */
532
533
534/******************************************************************************
535 *
536 * SkGeSirqIsr() - Special Interrupt Service Routine
537 *
538 * Description: handles all non data transfer specific interrupts (slow path)
539 *
540 * Returns: N/A
541 */
542void SkGeSirqIsr(
543SK_AC *pAC, /* adapter context */
544SK_IOC IoC, /* IO context */
545SK_U32 Istatus) /* Interrupt status word */
546{
547 SK_EVPARA Para;
548 SK_U32 RegVal32; /* Read register value */
549 SK_GEPORT *pPrt; /* GIni Port struct pointer */
550 SK_U16 PhyInt;
551 int i;
552
553 if (((Istatus & IS_HW_ERR) & pAC->GIni.GIValIrqMask) != 0) {
554 /* read the HW Error Interrupt source */
555 SK_IN32(IoC, B0_HWE_ISRC, &RegVal32);
556
557 SkGeHwErr(pAC, IoC, RegVal32);
558 }
559
560 /*
561 * Packet Timeout interrupts
562 */
563 /* Check whether MACs are correctly initialized */
564 if (((Istatus & (IS_PA_TO_RX1 | IS_PA_TO_TX1)) != 0) &&
565 pAC->GIni.GP[MAC_1].PState == SK_PRT_RESET) {
566 /* MAC 1 was not initialized but Packet timeout occured */
567 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E004,
568 SKERR_SIRQ_E004MSG);
569 }
570
571 if (((Istatus & (IS_PA_TO_RX2 | IS_PA_TO_TX2)) != 0) &&
572 pAC->GIni.GP[MAC_2].PState == SK_PRT_RESET) {
573 /* MAC 2 was not initialized but Packet timeout occured */
574 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E005,
575 SKERR_SIRQ_E005MSG);
576 }
577
578 if ((Istatus & IS_PA_TO_RX1) != 0) {
579 /* Means network is filling us up */
580 SK_ERR_LOG(pAC, SK_ERRCL_HW | SK_ERRCL_INIT, SKERR_SIRQ_E002,
581 SKERR_SIRQ_E002MSG);
582 SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_RX1);
583 }
584
585 if ((Istatus & IS_PA_TO_RX2) != 0) {
586 /* Means network is filling us up */
587 SK_ERR_LOG(pAC, SK_ERRCL_HW | SK_ERRCL_INIT, SKERR_SIRQ_E003,
588 SKERR_SIRQ_E003MSG);
589 SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_RX2);
590 }
591
592 if ((Istatus & IS_PA_TO_TX1) != 0) {
593
594 pPrt = &pAC->GIni.GP[0];
595
596 /* May be a normal situation in a server with a slow network */
597 SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_TX1);
598
599#ifdef GENESIS
600 if (pAC->GIni.GIGenesis) {
601 /*
602 * workaround: if in half duplex mode, check for Tx hangup.
603 * Read number of TX'ed bytes, wait for 10 ms, then compare
604 * the number with current value. If nothing changed, we assume
605 * that Tx is hanging and do a FIFO flush (see event routine).
606 */
607 if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF ||
608 pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) &&
609 !pPrt->HalfDupTimerActive) {
610 /*
611 * many more pack. arb. timeouts may come in between,
612 * we ignore those
613 */
614 pPrt->HalfDupTimerActive = SK_TRUE;
615 /* Snap statistic counters */
616 (void)SkXmUpdateStats(pAC, IoC, 0);
617
618 (void)SkXmMacStatistic(pAC, IoC, 0, XM_TXO_OK_HI, &RegVal32);
619
620 pPrt->LastOctets = (SK_U64)RegVal32 << 32;
621
622 (void)SkXmMacStatistic(pAC, IoC, 0, XM_TXO_OK_LO, &RegVal32);
623
624 pPrt->LastOctets += RegVal32;
625
626 Para.Para32[0] = 0;
627 SkTimerStart(pAC, IoC, &pPrt->HalfDupChkTimer, SK_HALFDUP_CHK_TIME,
628 SKGE_HWAC, SK_HWEV_HALFDUP_CHK, Para);
629 }
630 }
631#endif /* GENESIS */
632 }
633
634 if ((Istatus & IS_PA_TO_TX2) != 0) {
635
636 pPrt = &pAC->GIni.GP[1];
637
638 /* May be a normal situation in a server with a slow network */
639 SK_OUT16(IoC, B3_PA_CTRL, PA_CLR_TO_TX2);
640
641#ifdef GENESIS
642 if (pAC->GIni.GIGenesis) {
643 /* workaround: see above */
644 if ((pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF ||
645 pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) &&
646 !pPrt->HalfDupTimerActive) {
647 pPrt->HalfDupTimerActive = SK_TRUE;
648 /* Snap statistic counters */
649 (void)SkXmUpdateStats(pAC, IoC, 1);
650
651 (void)SkXmMacStatistic(pAC, IoC, 1, XM_TXO_OK_HI, &RegVal32);
652
653 pPrt->LastOctets = (SK_U64)RegVal32 << 32;
654
655 (void)SkXmMacStatistic(pAC, IoC, 1, XM_TXO_OK_LO, &RegVal32);
656
657 pPrt->LastOctets += RegVal32;
658
659 Para.Para32[0] = 1;
660 SkTimerStart(pAC, IoC, &pPrt->HalfDupChkTimer, SK_HALFDUP_CHK_TIME,
661 SKGE_HWAC, SK_HWEV_HALFDUP_CHK, Para);
662 }
663 }
664#endif /* GENESIS */
665 }
666
667 /* Check interrupts of the particular queues */
668 if ((Istatus & IS_R1_C) != 0) {
669 /* Clear IRQ */
670 SK_OUT32(IoC, B0_R1_CSR, CSR_IRQ_CL_C);
671 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E006,
672 SKERR_SIRQ_E006MSG);
673 Para.Para64 = MAC_1;
674 SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
675 Para.Para32[0] = MAC_1;
676 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
677 }
678
679 if ((Istatus & IS_R2_C) != 0) {
680 /* Clear IRQ */
681 SK_OUT32(IoC, B0_R2_CSR, CSR_IRQ_CL_C);
682 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E007,
683 SKERR_SIRQ_E007MSG);
684 Para.Para64 = MAC_2;
685 SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
686 Para.Para32[0] = MAC_2;
687 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
688 }
689
690 if ((Istatus & IS_XS1_C) != 0) {
691 /* Clear IRQ */
692 SK_OUT32(IoC, B0_XS1_CSR, CSR_IRQ_CL_C);
693 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E008,
694 SKERR_SIRQ_E008MSG);
695 Para.Para64 = MAC_1;
696 SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
697 Para.Para32[0] = MAC_1;
698 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
699 }
700
701 if ((Istatus & IS_XA1_C) != 0) {
702 /* Clear IRQ */
703 SK_OUT32(IoC, B0_XA1_CSR, CSR_IRQ_CL_C);
704 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E009,
705 SKERR_SIRQ_E009MSG);
706 Para.Para64 = MAC_1;
707 SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
708 Para.Para32[0] = MAC_1;
709 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
710 }
711
712 if ((Istatus & IS_XS2_C) != 0) {
713 /* Clear IRQ */
714 SK_OUT32(IoC, B0_XS2_CSR, CSR_IRQ_CL_C);
715 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E010,
716 SKERR_SIRQ_E010MSG);
717 Para.Para64 = MAC_2;
718 SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
719 Para.Para32[0] = MAC_2;
720 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
721 }
722
723 if ((Istatus & IS_XA2_C) != 0) {
724 /* Clear IRQ */
725 SK_OUT32(IoC, B0_XA2_CSR, CSR_IRQ_CL_C);
726 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_SIRQ_E011,
727 SKERR_SIRQ_E011MSG);
728 Para.Para64 = MAC_2;
729 SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_FAIL, Para);
730 Para.Para32[0] = MAC_2;
731 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
732 }
733
734 /* External reg interrupt */
735 if ((Istatus & IS_EXT_REG) != 0) {
736 /* Test IRQs from PHY */
737 for (i = 0; i < pAC->GIni.GIMacsFound; i++) {
738
739 pPrt = &pAC->GIni.GP[i];
740
741 if (pPrt->PState == SK_PRT_RESET) {
742 continue;
743 }
744
745#ifdef GENESIS
746 if (pAC->GIni.GIGenesis) {
747
748 switch (pPrt->PhyType) {
749
750 case SK_PHY_XMAC:
751 break;
752
753 case SK_PHY_BCOM:
754 SkXmPhyRead(pAC, IoC, i, PHY_BCOM_INT_STAT, &PhyInt);
755
756 if ((PhyInt & ~PHY_B_DEF_MSK) != 0) {
757 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
758 ("Port %d Bcom Int: 0x%04X\n",
759 i, PhyInt));
760 SkPhyIsrBcom(pAC, IoC, i, PhyInt);
761 }
762 break;
763#ifdef OTHER_PHY
764 case SK_PHY_LONE:
765 SkXmPhyRead(pAC, IoC, i, PHY_LONE_INT_STAT, &PhyInt);
766
767 if ((PhyInt & PHY_L_DEF_MSK) != 0) {
768 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
769 ("Port %d Lone Int: %x\n",
770 i, PhyInt));
771 SkPhyIsrLone(pAC, IoC, i, PhyInt);
772 }
773 break;
774#endif /* OTHER_PHY */
775 }
776 }
777#endif /* GENESIS */
778
779#ifdef YUKON
780 if (pAC->GIni.GIYukon) {
781 /* Read PHY Interrupt Status */
782 SkGmPhyRead(pAC, IoC, i, PHY_MARV_INT_STAT, &PhyInt);
783
784 if ((PhyInt & PHY_M_DEF_MSK) != 0) {
785 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
786 ("Port %d Marv Int: 0x%04X\n",
787 i, PhyInt));
788 SkPhyIsrGmac(pAC, IoC, i, PhyInt);
789 }
790 }
791#endif /* YUKON */
792 }
793 }
794
795 /* I2C Ready interrupt */
796 if ((Istatus & IS_I2C_READY) != 0) {
797#ifdef SK_SLIM
798 SK_OUT32(IoC, B2_I2C_IRQ, I2C_CLR_IRQ);
799#else
800 SkI2cIsr(pAC, IoC);
801#endif
802 }
803
804 /* SW forced interrupt */
805 if ((Istatus & IS_IRQ_SW) != 0) {
806 /* clear the software IRQ */
807 SK_OUT8(IoC, B0_CTST, CS_CL_SW_IRQ);
808 }
809
810 if ((Istatus & IS_LNK_SYNC_M1) != 0) {
811 /*
812 * We do NOT need the Link Sync interrupt, because it shows
813 * us only a link going down.
814 */
815 /* clear interrupt */
816 SK_OUT8(IoC, MR_ADDR(MAC_1, LNK_SYNC_CTRL), LED_CLR_IRQ);
817 }
818
819 /* Check MAC after link sync counter */
820 if ((Istatus & IS_MAC1) != 0) {
821 /* IRQ from MAC 1 */
822 SkMacIrq(pAC, IoC, MAC_1);
823 }
824
825 if ((Istatus & IS_LNK_SYNC_M2) != 0) {
826 /*
827 * We do NOT need the Link Sync interrupt, because it shows
828 * us only a link going down.
829 */
830 /* clear interrupt */
831 SK_OUT8(IoC, MR_ADDR(MAC_2, LNK_SYNC_CTRL), LED_CLR_IRQ);
832 }
833
834 /* Check MAC after link sync counter */
835 if ((Istatus & IS_MAC2) != 0) {
836 /* IRQ from MAC 2 */
837 SkMacIrq(pAC, IoC, MAC_2);
838 }
839
840 /* Timer interrupt (served last) */
841 if ((Istatus & IS_TIMINT) != 0) {
842 /* check for HW Errors */
843 if (((Istatus & IS_HW_ERR) & ~pAC->GIni.GIValIrqMask) != 0) {
844 /* read the HW Error Interrupt source */
845 SK_IN32(IoC, B0_HWE_ISRC, &RegVal32);
846
847 SkGeHwErr(pAC, IoC, RegVal32);
848 }
849
850 SkHwtIsr(pAC, IoC);
851 }
852
853} /* SkGeSirqIsr */
854
855
856#ifdef GENESIS
857/******************************************************************************
858 *
859 * SkGePortCheckShorts() - Implementing XMAC Workaround Errata # 2
860 *
861 * return:
862 * 0 o.k. nothing needed
863 * 1 Restart needed on this port
864 */
865static int SkGePortCheckShorts(
866SK_AC *pAC, /* Adapter Context */
867SK_IOC IoC, /* IO Context */
868int Port) /* Which port should be checked */
869{
870 SK_U32 Shorts; /* Short Event Counter */
871 SK_U32 CheckShorts; /* Check value for Short Event Counter */
872 SK_U64 RxCts; /* Rx Counter (packets on network) */
873 SK_U32 RxTmp; /* Rx temp. Counter */
874 SK_U32 FcsErrCts; /* FCS Error Counter */
875 SK_GEPORT *pPrt; /* GIni Port struct pointer */
876 int Rtv; /* Return value */
877 int i;
878
879 pPrt = &pAC->GIni.GP[Port];
880
881 /* Default: no action */
882 Rtv = SK_HW_PS_NONE;
883
884 (void)SkXmUpdateStats(pAC, IoC, Port);
885
886 /* Extra precaution: check for short Event counter */
887 (void)SkXmMacStatistic(pAC, IoC, Port, XM_RXE_SHT_ERR, &Shorts);
888
889 /*
890 * Read Rx counters (packets seen on the network and not necessarily
891 * really received.
892 */
893 RxCts = 0;
894
895 for (i = 0; i < ARRAY_SIZE(SkGeRxRegs); i++) {
896
897 (void)SkXmMacStatistic(pAC, IoC, Port, SkGeRxRegs[i], &RxTmp);
898
899 RxCts += (SK_U64)RxTmp;
900 }
901
902 /* On default: check shorts against zero */
903 CheckShorts = 0;
904
905 /* Extra precaution on active links */
906 if (pPrt->PHWLinkUp) {
907 /* Reset Link Restart counter */
908 pPrt->PLinkResCt = 0;
909 pPrt->PAutoNegTOCt = 0;
910
911 /* If link is up check for 2 */
912 CheckShorts = 2;
913
914 (void)SkXmMacStatistic(pAC, IoC, Port, XM_RXF_FCS_ERR, &FcsErrCts);
915
916 if (pPrt->PLinkModeConf == SK_LMODE_AUTOSENSE &&
917 pPrt->PLipaAutoNeg == SK_LIPA_UNKNOWN &&
918 (pPrt->PLinkMode == SK_LMODE_HALF ||
919 pPrt->PLinkMode == SK_LMODE_FULL)) {
920 /*
921 * This is autosensing and we are in the fallback
922 * manual full/half duplex mode.
923 */
924 if (RxCts == pPrt->PPrevRx) {
925 /* Nothing received, restart link */
926 pPrt->PPrevFcs = FcsErrCts;
927 pPrt->PPrevShorts = Shorts;
928
929 return(SK_HW_PS_RESTART);
930 }
931 else {
932 pPrt->PLipaAutoNeg = SK_LIPA_MANUAL;
933 }
934 }
935
936 if (((RxCts - pPrt->PPrevRx) > pPrt->PRxLim) ||
937 (!(FcsErrCts - pPrt->PPrevFcs))) {
938 /*
939 * Note: The compare with zero above has to be done the way shown,
940 * otherwise the Linux driver will have a problem.
941 */
942 /*
943 * We received a bunch of frames or no CRC error occured on the
944 * network -> ok.
945 */
946 pPrt->PPrevRx = RxCts;
947 pPrt->PPrevFcs = FcsErrCts;
948 pPrt->PPrevShorts = Shorts;
949
950 return(SK_HW_PS_NONE);
951 }
952
953 pPrt->PPrevFcs = FcsErrCts;
954 }
955
956
957 if ((Shorts - pPrt->PPrevShorts) > CheckShorts) {
958 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
959 ("Short Event Count Restart Port %d \n", Port));
960 Rtv = SK_HW_PS_RESTART;
961 }
962
963 pPrt->PPrevShorts = Shorts;
964 pPrt->PPrevRx = RxCts;
965
966 return(Rtv);
967} /* SkGePortCheckShorts */
968#endif /* GENESIS */
969
970
971/******************************************************************************
972 *
973 * SkGePortCheckUp() - Check if the link is up
974 *
975 * return:
976 * 0 o.k. nothing needed
977 * 1 Restart needed on this port
978 * 2 Link came up
979 */
980static int SkGePortCheckUp(
981SK_AC *pAC, /* Adapter Context */
982SK_IOC IoC, /* IO Context */
983int Port) /* Which port should be checked */
984{
985 SK_GEPORT *pPrt; /* GIni Port struct pointer */
986 SK_BOOL AutoNeg; /* Is Auto-negotiation used ? */
987 int Rtv; /* Return value */
988
989 Rtv = SK_HW_PS_NONE;
990
991 pPrt = &pAC->GIni.GP[Port];
992
993 if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) {
994 AutoNeg = SK_FALSE;
995 }
996 else {
997 AutoNeg = SK_TRUE;
998 }
999
1000#ifdef GENESIS
1001 if (pAC->GIni.GIGenesis) {
1002
1003 switch (pPrt->PhyType) {
1004
1005 case SK_PHY_XMAC:
1006 Rtv = SkGePortCheckUpXmac(pAC, IoC, Port, AutoNeg);
1007 break;
1008 case SK_PHY_BCOM:
1009 Rtv = SkGePortCheckUpBcom(pAC, IoC, Port, AutoNeg);
1010 break;
1011#ifdef OTHER_PHY
1012 case SK_PHY_LONE:
1013 Rtv = SkGePortCheckUpLone(pAC, IoC, Port, AutoNeg);
1014 break;
1015 case SK_PHY_NAT:
1016 Rtv = SkGePortCheckUpNat(pAC, IoC, Port, AutoNeg);
1017 break;
1018#endif /* OTHER_PHY */
1019 }
1020 }
1021#endif /* GENESIS */
1022
1023#ifdef YUKON
1024 if (pAC->GIni.GIYukon) {
1025
1026 Rtv = SkGePortCheckUpGmac(pAC, IoC, Port, AutoNeg);
1027 }
1028#endif /* YUKON */
1029
1030 return(Rtv);
1031} /* SkGePortCheckUp */
1032
1033
1034#ifdef GENESIS
1035/******************************************************************************
1036 *
1037 * SkGePortCheckUpXmac() - Implementing of the Workaround Errata # 2
1038 *
1039 * return:
1040 * 0 o.k. nothing needed
1041 * 1 Restart needed on this port
1042 * 2 Link came up
1043 */
1044static int SkGePortCheckUpXmac(
1045SK_AC *pAC, /* Adapter Context */
1046SK_IOC IoC, /* IO Context */
1047int Port, /* Which port should be checked */
1048SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */
1049{
1050 SK_U32 Shorts; /* Short Event Counter */
1051 SK_GEPORT *pPrt; /* GIni Port struct pointer */
1052 int Done;
1053 SK_U32 GpReg; /* General Purpose register value */
1054 SK_U16 Isrc; /* Interrupt source register */
1055 SK_U16 IsrcSum; /* Interrupt source register sum */
1056 SK_U16 LpAb; /* Link Partner Ability */
1057 SK_U16 ResAb; /* Resolved Ability */
1058 SK_U16 ExtStat; /* Extended Status Register */
1059 SK_U8 NextMode; /* Next AutoSensing Mode */
1060
1061 pPrt = &pAC->GIni.GP[Port];
1062
1063 if (pPrt->PHWLinkUp) {
1064 if (pPrt->PhyType != SK_PHY_XMAC) {
1065 return(SK_HW_PS_NONE);
1066 }
1067 else {
1068 return(SkGePortCheckShorts(pAC, IoC, Port));
1069 }
1070 }
1071
1072 IsrcSum = pPrt->PIsave;
1073 pPrt->PIsave = 0;
1074
1075 /* Now wait for each port's link */
1076 if (pPrt->PLinkBroken) {
1077 /* Link was broken */
1078 XM_IN32(IoC, Port, XM_GP_PORT, &GpReg);
1079
1080 if ((GpReg & XM_GP_INP_ASS) == 0) {
1081 /* The Link is in sync */
1082 XM_IN16(IoC, Port, XM_ISRC, &Isrc);
1083 IsrcSum |= Isrc;
1084 SkXmAutoNegLipaXmac(pAC, IoC, Port, IsrcSum);
1085
1086 if ((Isrc & XM_IS_INP_ASS) == 0) {
1087 /* It has been in sync since last time */
1088 /* Restart the PORT */
1089 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
1090 ("Link in sync Restart Port %d\n", Port));
1091
1092 (void)SkXmUpdateStats(pAC, IoC, Port);
1093
1094 /* We now need to reinitialize the PrevShorts counter */
1095 (void)SkXmMacStatistic(pAC, IoC, Port, XM_RXE_SHT_ERR, &Shorts);
1096 pPrt->PPrevShorts = Shorts;
1097
1098 pPrt->PLinkBroken = SK_FALSE;
1099
1100 /*
1101 * Link Restart Workaround:
1102 * it may be possible that the other Link side
1103 * restarts its link as well an we detect
1104 * another LinkBroken. To prevent this
1105 * happening we check for a maximum number
1106 * of consecutive restart. If those happens,
1107 * we do NOT restart the active link and
1108 * check whether the link is now o.k.
1109 */
1110 pPrt->PLinkResCt++;
1111
1112 pPrt->PAutoNegTimeOut = 0;
1113
1114 if (pPrt->PLinkResCt < SK_MAX_LRESTART) {
1115 return(SK_HW_PS_RESTART);
1116 }
1117
1118 pPrt->PLinkResCt = 0;
1119
1120 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1121 ("Do NOT restart on Port %d %x %x\n", Port, Isrc, IsrcSum));
1122 }
1123 else {
1124 pPrt->PIsave = (SK_U16)(IsrcSum & XM_IS_AND);
1125
1126 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1127 ("Save Sync/nosync Port %d %x %x\n", Port, Isrc, IsrcSum));
1128
1129 /* Do nothing more if link is broken */
1130 return(SK_HW_PS_NONE);
1131 }
1132 }
1133 else {
1134 /* Do nothing more if link is broken */
1135 return(SK_HW_PS_NONE);
1136 }
1137
1138 }
1139 else {
1140 /* Link was not broken, check if it is */
1141 XM_IN16(IoC, Port, XM_ISRC, &Isrc);
1142 IsrcSum |= Isrc;
1143 if ((Isrc & XM_IS_INP_ASS) != 0) {
1144 XM_IN16(IoC, Port, XM_ISRC, &Isrc);
1145 IsrcSum |= Isrc;
1146 if ((Isrc & XM_IS_INP_ASS) != 0) {
1147 XM_IN16(IoC, Port, XM_ISRC, &Isrc);
1148 IsrcSum |= Isrc;
1149 if ((Isrc & XM_IS_INP_ASS) != 0) {
1150 pPrt->PLinkBroken = SK_TRUE;
1151 /* Re-Init Link partner Autoneg flag */
1152 pPrt->PLipaAutoNeg = SK_LIPA_UNKNOWN;
1153 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
1154 ("Link broken Port %d\n", Port));
1155
1156 /* Cable removed-> reinit sense mode */
1157 SkHWInitDefSense(pAC, IoC, Port);
1158
1159 return(SK_HW_PS_RESTART);
1160 }
1161 }
1162 }
1163 else {
1164 SkXmAutoNegLipaXmac(pAC, IoC, Port, Isrc);
1165
1166 if (SkGePortCheckShorts(pAC, IoC, Port) == SK_HW_PS_RESTART) {
1167 return(SK_HW_PS_RESTART);
1168 }
1169 }
1170 }
1171
1172 /*
1173 * here we usually can check whether the link is in sync and
1174 * auto-negotiation is done.
1175 */
1176 XM_IN32(IoC, Port, XM_GP_PORT, &GpReg);
1177 XM_IN16(IoC, Port, XM_ISRC, &Isrc);
1178 IsrcSum |= Isrc;
1179
1180 SkXmAutoNegLipaXmac(pAC, IoC, Port, IsrcSum);
1181
1182 if ((GpReg & XM_GP_INP_ASS) != 0 || (IsrcSum & XM_IS_INP_ASS) != 0) {
1183 if ((GpReg & XM_GP_INP_ASS) == 0) {
1184 /* Save Auto-negotiation Done interrupt only if link is in sync */
1185 pPrt->PIsave = (SK_U16)(IsrcSum & XM_IS_AND);
1186 }
1187#ifdef DEBUG
1188 if ((pPrt->PIsave & XM_IS_AND) != 0) {
1189 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1190 ("AutoNeg done rescheduled Port %d\n", Port));
1191 }
1192#endif /* DEBUG */
1193 return(SK_HW_PS_NONE);
1194 }
1195
1196 if (AutoNeg) {
1197 if ((IsrcSum & XM_IS_AND) != 0) {
1198 SkHWLinkUp(pAC, IoC, Port);
1199 Done = SkMacAutoNegDone(pAC, IoC, Port);
1200 if (Done != SK_AND_OK) {
1201 /* Get PHY parameters, for debugging only */
1202 SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_AUNE_LP, &LpAb);
1203 SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_RES_ABI, &ResAb);
1204 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1205 ("AutoNeg FAIL Port %d (LpAb %x, ResAb %x)\n",
1206 Port, LpAb, ResAb));
1207
1208 /* Try next possible mode */
1209 NextMode = SkHWSenseGetNext(pAC, IoC, Port);
1210 SkHWLinkDown(pAC, IoC, Port);
1211 if (Done == SK_AND_DUP_CAP) {
1212 /* GoTo next mode */
1213 SkHWSenseSetNext(pAC, IoC, Port, NextMode);
1214 }
1215
1216 return(SK_HW_PS_RESTART);
1217 }
1218 /*
1219 * Dummy Read extended status to prevent extra link down/ups
1220 * (clear Page Received bit if set)
1221 */
1222 SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_AUNE_EXP, &ExtStat);
1223
1224 return(SK_HW_PS_LINK);
1225 }
1226
1227 /* AutoNeg not done, but HW link is up. Check for timeouts */
1228 pPrt->PAutoNegTimeOut++;
1229 if (pPrt->PAutoNegTimeOut >= SK_AND_MAX_TO) {
1230 /* Increase the Timeout counter */
1231 pPrt->PAutoNegTOCt++;
1232
1233 /* Timeout occured */
1234 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
1235 ("AutoNeg timeout Port %d\n", Port));
1236 if (pPrt->PLinkModeConf == SK_LMODE_AUTOSENSE &&
1237 pPrt->PLipaAutoNeg != SK_LIPA_AUTO) {
1238 /* Set Link manually up */
1239 SkHWSenseSetNext(pAC, IoC, Port, SK_LMODE_FULL);
1240 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
1241 ("Set manual full duplex Port %d\n", Port));
1242 }
1243
1244 if (pPrt->PLinkModeConf == SK_LMODE_AUTOSENSE &&
1245 pPrt->PLipaAutoNeg == SK_LIPA_AUTO &&
1246 pPrt->PAutoNegTOCt >= SK_MAX_ANEG_TO) {
1247 /*
1248 * This is rather complicated.
1249 * we need to check here whether the LIPA_AUTO
1250 * we saw before is false alert. We saw at one
1251 * switch ( SR8800) that on boot time it sends
1252 * just one auto-neg packet and does no further
1253 * auto-negotiation.
1254 * Solution: we restart the autosensing after
1255 * a few timeouts.
1256 */
1257 pPrt->PAutoNegTOCt = 0;
1258 pPrt->PLipaAutoNeg = SK_LIPA_UNKNOWN;
1259 SkHWInitDefSense(pAC, IoC, Port);
1260 }
1261
1262 /* Do the restart */
1263 return(SK_HW_PS_RESTART);
1264 }
1265 }
1266 else {
1267 /* Link is up and we don't need more */
1268#ifdef DEBUG
1269 if (pPrt->PLipaAutoNeg == SK_LIPA_AUTO) {
1270 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1271 ("ERROR: Lipa auto detected on port %d\n", Port));
1272 }
1273#endif /* DEBUG */
1274 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
1275 ("Link sync(GP), Port %d\n", Port));
1276 SkHWLinkUp(pAC, IoC, Port);
1277
1278 /*
1279 * Link sync (GP) and so assume a good connection. But if not received
1280 * a bunch of frames received in a time slot (maybe broken tx cable)
1281 * the port is restart.
1282 */
1283 return(SK_HW_PS_LINK);
1284 }
1285
1286 return(SK_HW_PS_NONE);
1287} /* SkGePortCheckUpXmac */
1288
1289
1290/******************************************************************************
1291 *
1292 * SkGePortCheckUpBcom() - Check if the link is up on Bcom PHY
1293 *
1294 * return:
1295 * 0 o.k. nothing needed
1296 * 1 Restart needed on this port
1297 * 2 Link came up
1298 */
1299static int SkGePortCheckUpBcom(
1300SK_AC *pAC, /* Adapter Context */
1301SK_IOC IoC, /* IO Context */
1302int Port, /* Which port should be checked */
1303SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */
1304{
1305 SK_GEPORT *pPrt; /* GIni Port struct pointer */
1306 int Done;
1307 SK_U16 Isrc; /* Interrupt source register */
1308 SK_U16 PhyStat; /* Phy Status Register */
1309 SK_U16 ResAb; /* Master/Slave resolution */
1310 SK_U16 Ctrl; /* Broadcom control flags */
1311#ifdef DEBUG
1312 SK_U16 LpAb;
1313 SK_U16 ExtStat;
1314#endif /* DEBUG */
1315
1316 pPrt = &pAC->GIni.GP[Port];
1317
1318 /* Check for No HCD Link events (#10523) */
1319 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_STAT, &Isrc);
1320
1321#ifdef xDEBUG
1322 if ((Isrc & ~(PHY_B_IS_HCT | PHY_B_IS_LCT)) ==
1323 (PHY_B_IS_SCR_S_ER | PHY_B_IS_RRS_CHANGE | PHY_B_IS_LRS_CHANGE)) {
1324
1325 SK_U32 Stat1, Stat2, Stat3;
1326
1327 Stat1 = 0;
1328 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_MASK, &Stat1);
1329 CMSMPrintString(
1330 pAC->pConfigTable,
1331 MSG_TYPE_RUNTIME_INFO,
1332 "CheckUp1 - Stat: %x, Mask: %x",
1333 (void *)Isrc,
1334 (void *)Stat1);
1335
1336 Stat1 = 0;
1337 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_CTRL, &Stat1);
1338 Stat2 = 0;
1339 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_STAT, &Stat2);
1340 Stat1 = Stat1 << 16 | Stat2;
1341 Stat2 = 0;
1342 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_ADV, &Stat2);
1343 Stat3 = 0;
1344 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_LP, &Stat3);
1345 Stat2 = Stat2 << 16 | Stat3;
1346 CMSMPrintString(
1347 pAC->pConfigTable,
1348 MSG_TYPE_RUNTIME_INFO,
1349 "Ctrl/Stat: %x, AN Adv/LP: %x",
1350 (void *)Stat1,
1351 (void *)Stat2);
1352
1353 Stat1 = 0;
1354 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_EXP, &Stat1);
1355 Stat2 = 0;
1356 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_EXT_STAT, &Stat2);
1357 Stat1 = Stat1 << 16 | Stat2;
1358 Stat2 = 0;
1359 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_CTRL, &Stat2);
1360 Stat3 = 0;
1361 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &Stat3);
1362 Stat2 = Stat2 << 16 | Stat3;
1363 CMSMPrintString(
1364 pAC->pConfigTable,
1365 MSG_TYPE_RUNTIME_INFO,
1366 "AN Exp/IEEE Ext: %x, 1000T Ctrl/Stat: %x",
1367 (void *)Stat1,
1368 (void *)Stat2);
1369
1370 Stat1 = 0;
1371 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_P_EXT_CTRL, &Stat1);
1372 Stat2 = 0;
1373 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_P_EXT_STAT, &Stat2);
1374 Stat1 = Stat1 << 16 | Stat2;
1375 Stat2 = 0;
1376 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &Stat2);
1377 Stat3 = 0;
1378 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_STAT, &Stat3);
1379 Stat2 = Stat2 << 16 | Stat3;
1380 CMSMPrintString(
1381 pAC->pConfigTable,
1382 MSG_TYPE_RUNTIME_INFO,
1383 "PHY Ext Ctrl/Stat: %x, Aux Ctrl/Stat: %x",
1384 (void *)Stat1,
1385 (void *)Stat2);
1386 }
1387#endif /* DEBUG */
1388
1389 if ((Isrc & (PHY_B_IS_NO_HDCL /* | PHY_B_IS_NO_HDC */)) != 0) {
1390 /*
1391 * Workaround BCom Errata:
1392 * enable and disable loopback mode if "NO HCD" occurs.
1393 */
1394 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_CTRL, &Ctrl);
1395 SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_CTRL,
1396 (SK_U16)(Ctrl | PHY_CT_LOOP));
1397 SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_CTRL,
1398 (SK_U16)(Ctrl & ~PHY_CT_LOOP));
1399 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1400 ("No HCD Link event, Port %d\n", Port));
1401#ifdef xDEBUG
1402 CMSMPrintString(
1403 pAC->pConfigTable,
1404 MSG_TYPE_RUNTIME_INFO,
1405 "No HCD link event, port %d.",
1406 (void *)Port,
1407 (void *)NULL);
1408#endif /* DEBUG */
1409 }
1410
1411 /* Not obsolete: link status bit is latched to 0 and autoclearing! */
1412 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_STAT, &PhyStat);
1413
1414 if (pPrt->PHWLinkUp) {
1415 return(SK_HW_PS_NONE);
1416 }
1417
1418#ifdef xDEBUG
1419 {
1420 SK_U32 Stat1, Stat2, Stat3;
1421
1422 Stat1 = 0;
1423 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_MASK, &Stat1);
1424 CMSMPrintString(
1425 pAC->pConfigTable,
1426 MSG_TYPE_RUNTIME_INFO,
1427 "CheckUp1a - Stat: %x, Mask: %x",
1428 (void *)Isrc,
1429 (void *)Stat1);
1430
1431 Stat1 = 0;
1432 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_CTRL, &Stat1);
1433 Stat2 = 0;
1434 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_STAT, &PhyStat);
1435 Stat1 = Stat1 << 16 | PhyStat;
1436 Stat2 = 0;
1437 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_ADV, &Stat2);
1438 Stat3 = 0;
1439 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_LP, &Stat3);
1440 Stat2 = Stat2 << 16 | Stat3;
1441 CMSMPrintString(
1442 pAC->pConfigTable,
1443 MSG_TYPE_RUNTIME_INFO,
1444 "Ctrl/Stat: %x, AN Adv/LP: %x",
1445 (void *)Stat1,
1446 (void *)Stat2);
1447
1448 Stat1 = 0;
1449 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_EXP, &Stat1);
1450 Stat2 = 0;
1451 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_EXT_STAT, &Stat2);
1452 Stat1 = Stat1 << 16 | Stat2;
1453 Stat2 = 0;
1454 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_CTRL, &Stat2);
1455 Stat3 = 0;
1456 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ResAb);
1457 Stat2 = Stat2 << 16 | ResAb;
1458 CMSMPrintString(
1459 pAC->pConfigTable,
1460 MSG_TYPE_RUNTIME_INFO,
1461 "AN Exp/IEEE Ext: %x, 1000T Ctrl/Stat: %x",
1462 (void *)Stat1,
1463 (void *)Stat2);
1464
1465 Stat1 = 0;
1466 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_P_EXT_CTRL, &Stat1);
1467 Stat2 = 0;
1468 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_P_EXT_STAT, &Stat2);
1469 Stat1 = Stat1 << 16 | Stat2;
1470 Stat2 = 0;
1471 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &Stat2);
1472 Stat3 = 0;
1473 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_STAT, &Stat3);
1474 Stat2 = Stat2 << 16 | Stat3;
1475 CMSMPrintString(
1476 pAC->pConfigTable,
1477 MSG_TYPE_RUNTIME_INFO,
1478 "PHY Ext Ctrl/Stat: %x, Aux Ctrl/Stat: %x",
1479 (void *)Stat1,
1480 (void *)Stat2);
1481 }
1482#endif /* DEBUG */
1483
1484 /*
1485 * Here we usually can check whether the link is in sync and
1486 * auto-negotiation is done.
1487 */
1488
1489 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_STAT, &PhyStat);
1490
1491 SkMacAutoNegLipaPhy(pAC, IoC, Port, PhyStat);
1492
1493 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1494 ("CheckUp Port %d, PhyStat: 0x%04X\n", Port, PhyStat));
1495
1496 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ResAb);
1497
1498 if ((ResAb & PHY_B_1000S_MSF) != 0) {
1499 /* Error */
1500 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1501 ("Master/Slave Fault port %d\n", Port));
1502
1503 pPrt->PAutoNegFail = SK_TRUE;
1504 pPrt->PMSStatus = SK_MS_STAT_FAULT;
1505
1506 return(SK_HW_PS_RESTART);
1507 }
1508
1509 if ((PhyStat & PHY_ST_LSYNC) == 0) {
1510 return(SK_HW_PS_NONE);
1511 }
1512
1513 pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ?
1514 SK_MS_STAT_MASTER : SK_MS_STAT_SLAVE;
1515
1516 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1517 ("Port %d, ResAb: 0x%04X\n", Port, ResAb));
1518
1519 if (AutoNeg) {
1520 if ((PhyStat & PHY_ST_AN_OVER) != 0) {
1521
1522 SkHWLinkUp(pAC, IoC, Port);
1523
1524 Done = SkMacAutoNegDone(pAC, IoC, Port);
1525
1526 if (Done != SK_AND_OK) {
1527#ifdef DEBUG
1528 /* Get PHY parameters, for debugging only */
1529 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_LP, &LpAb);
1530 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ExtStat);
1531 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1532 ("AutoNeg FAIL Port %d (LpAb %x, 1000TStat %x)\n",
1533 Port, LpAb, ExtStat));
1534#endif /* DEBUG */
1535 return(SK_HW_PS_RESTART);
1536 }
1537 else {
1538#ifdef xDEBUG
1539 /* Dummy read ISR to prevent extra link downs/ups */
1540 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_STAT, &ExtStat);
1541
1542 if ((ExtStat & ~(PHY_B_IS_HCT | PHY_B_IS_LCT)) != 0) {
1543 CMSMPrintString(
1544 pAC->pConfigTable,
1545 MSG_TYPE_RUNTIME_INFO,
1546 "CheckUp2 - Stat: %x",
1547 (void *)ExtStat,
1548 (void *)NULL);
1549 }
1550#endif /* DEBUG */
1551 return(SK_HW_PS_LINK);
1552 }
1553 }
1554 }
1555 else { /* !AutoNeg */
1556 /* Link is up and we don't need more. */
1557#ifdef DEBUG
1558 if (pPrt->PLipaAutoNeg == SK_LIPA_AUTO) {
1559 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1560 ("ERROR: Lipa auto detected on port %d\n", Port));
1561 }
1562#endif /* DEBUG */
1563
1564#ifdef xDEBUG
1565 /* Dummy read ISR to prevent extra link downs/ups */
1566 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_INT_STAT, &ExtStat);
1567
1568 if ((ExtStat & ~(PHY_B_IS_HCT | PHY_B_IS_LCT)) != 0) {
1569 CMSMPrintString(
1570 pAC->pConfigTable,
1571 MSG_TYPE_RUNTIME_INFO,
1572 "CheckUp3 - Stat: %x",
1573 (void *)ExtStat,
1574 (void *)NULL);
1575 }
1576#endif /* DEBUG */
1577
1578 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
1579 ("Link sync(GP), Port %d\n", Port));
1580 SkHWLinkUp(pAC, IoC, Port);
1581
1582 return(SK_HW_PS_LINK);
1583 }
1584
1585 return(SK_HW_PS_NONE);
1586} /* SkGePortCheckUpBcom */
1587#endif /* GENESIS */
1588
1589
1590#ifdef YUKON
1591/******************************************************************************
1592 *
1593 * SkGePortCheckUpGmac() - Check if the link is up on Marvell PHY
1594 *
1595 * return:
1596 * 0 o.k. nothing needed
1597 * 1 Restart needed on this port
1598 * 2 Link came up
1599 */
1600static int SkGePortCheckUpGmac(
1601SK_AC *pAC, /* Adapter Context */
1602SK_IOC IoC, /* IO Context */
1603int Port, /* Which port should be checked */
1604SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */
1605{
1606 SK_GEPORT *pPrt; /* GIni Port struct pointer */
1607 int Done;
1608 SK_U16 PhyIsrc; /* PHY Interrupt source */
1609 SK_U16 PhyStat; /* PPY Status */
1610 SK_U16 PhySpecStat;/* PHY Specific Status */
1611 SK_U16 ResAb; /* Master/Slave resolution */
1612 SK_EVPARA Para;
1613#ifdef DEBUG
1614 SK_U16 Word; /* I/O helper */
1615#endif /* DEBUG */
1616
1617 pPrt = &pAC->GIni.GP[Port];
1618
1619 if (pPrt->PHWLinkUp) {
1620 return(SK_HW_PS_NONE);
1621 }
1622
1623 /* Read PHY Status */
1624 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_STAT, &PhyStat);
1625
1626 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1627 ("CheckUp Port %d, PhyStat: 0x%04X\n", Port, PhyStat));
1628
1629 /* Read PHY Interrupt Status */
1630 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_INT_STAT, &PhyIsrc);
1631
1632 if ((PhyIsrc & PHY_M_IS_AN_COMPL) != 0) {
1633 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1634 ("Auto-Negotiation Completed, PhyIsrc: 0x%04X\n", PhyIsrc));
1635 }
1636
1637 if ((PhyIsrc & PHY_M_IS_LSP_CHANGE) != 0) {
1638 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1639 ("Link Speed Changed, PhyIsrc: 0x%04X\n", PhyIsrc));
1640 }
1641
1642 SkMacAutoNegLipaPhy(pAC, IoC, Port, PhyStat);
1643
1644 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_1000T_STAT, &ResAb);
1645
1646 if ((ResAb & PHY_B_1000S_MSF) != 0) {
1647 /* Error */
1648 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1649 ("Master/Slave Fault port %d\n", Port));
1650
1651 pPrt->PAutoNegFail = SK_TRUE;
1652 pPrt->PMSStatus = SK_MS_STAT_FAULT;
1653
1654 return(SK_HW_PS_RESTART);
1655 }
1656
1657 /* Read PHY Specific Status */
1658 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_STAT, &PhySpecStat);
1659
1660 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1661 ("Phy1000BT: 0x%04X, PhySpecStat: 0x%04X\n", ResAb, PhySpecStat));
1662
1663#ifdef DEBUG
1664 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_AUNE_EXP, &Word);
1665
1666 if ((PhyIsrc & PHY_M_IS_AN_PR) != 0 || (Word & PHY_ANE_RX_PG) != 0 ||
1667 (PhySpecStat & PHY_M_PS_PAGE_REC) != 0) {
1668 /* Read PHY Next Page Link Partner */
1669 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_NEPG_LP, &Word);
1670
1671 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1672 ("Page Received, NextPage: 0x%04X\n", Word));
1673 }
1674#endif /* DEBUG */
1675
1676 if ((PhySpecStat & PHY_M_PS_LINK_UP) == 0) {
1677 return(SK_HW_PS_NONE);
1678 }
1679
1680 if ((PhySpecStat & PHY_M_PS_DOWNS_STAT) != 0 ||
1681 (PhyIsrc & PHY_M_IS_DOWNSH_DET) != 0) {
1682 /* Downshift detected */
1683 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E025, SKERR_SIRQ_E025MSG);
1684
1685 Para.Para64 = Port;
1686 SkEventQueue(pAC, SKGE_DRV, SK_DRV_DOWNSHIFT_DET, Para);
1687
1688 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1689 ("Downshift detected, PhyIsrc: 0x%04X\n", PhyIsrc));
1690 }
1691
1692 pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ?
1693 SK_MS_STAT_MASTER : SK_MS_STAT_SLAVE;
1694
1695 pPrt->PCableLen = (SK_U8)((PhySpecStat & PHY_M_PS_CABLE_MSK) >> 7);
1696
1697 if (AutoNeg) {
1698 /* Auto-Negotiation Over ? */
1699 if ((PhyStat & PHY_ST_AN_OVER) != 0) {
1700
1701 SkHWLinkUp(pAC, IoC, Port);
1702
1703 Done = SkMacAutoNegDone(pAC, IoC, Port);
1704
1705 if (Done != SK_AND_OK) {
1706 return(SK_HW_PS_RESTART);
1707 }
1708
1709 return(SK_HW_PS_LINK);
1710 }
1711 }
1712 else { /* !AutoNeg */
1713 /* Link is up and we don't need more */
1714#ifdef DEBUG
1715 if (pPrt->PLipaAutoNeg == SK_LIPA_AUTO) {
1716 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1717 ("ERROR: Lipa auto detected on port %d\n", Port));
1718 }
1719#endif /* DEBUG */
1720
1721 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
1722 ("Link sync, Port %d\n", Port));
1723 SkHWLinkUp(pAC, IoC, Port);
1724
1725 return(SK_HW_PS_LINK);
1726 }
1727
1728 return(SK_HW_PS_NONE);
1729} /* SkGePortCheckUpGmac */
1730#endif /* YUKON */
1731
1732
1733#ifdef OTHER_PHY
1734/******************************************************************************
1735 *
1736 * SkGePortCheckUpLone() - Check if the link is up on Level One PHY
1737 *
1738 * return:
1739 * 0 o.k. nothing needed
1740 * 1 Restart needed on this port
1741 * 2 Link came up
1742 */
1743static int SkGePortCheckUpLone(
1744SK_AC *pAC, /* Adapter Context */
1745SK_IOC IoC, /* IO Context */
1746int Port, /* Which port should be checked */
1747SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */
1748{
1749 SK_GEPORT *pPrt; /* GIni Port struct pointer */
1750 int Done;
1751 SK_U16 Isrc; /* Interrupt source register */
1752 SK_U16 LpAb; /* Link Partner Ability */
1753 SK_U16 ExtStat; /* Extended Status Register */
1754 SK_U16 PhyStat; /* Phy Status Register */
1755 SK_U16 StatSum;
1756 SK_U8 NextMode; /* Next AutoSensing Mode */
1757
1758 pPrt = &pAC->GIni.GP[Port];
1759
1760 if (pPrt->PHWLinkUp) {
1761 return(SK_HW_PS_NONE);
1762 }
1763
1764 StatSum = pPrt->PIsave;
1765 pPrt->PIsave = 0;
1766
1767 /*
1768 * here we usually can check whether the link is in sync and
1769 * auto-negotiation is done.
1770 */
1771 SkXmPhyRead(pAC, IoC, Port, PHY_LONE_STAT, &PhyStat);
1772 StatSum |= PhyStat;
1773
1774 SkMacAutoNegLipaPhy(pAC, IoC, Port, PhyStat);
1775
1776 if ((PhyStat & PHY_ST_LSYNC) == 0) {
1777 /* Save Auto-negotiation Done bit */
1778 pPrt->PIsave = (SK_U16)(StatSum & PHY_ST_AN_OVER);
1779#ifdef DEBUG
1780 if ((pPrt->PIsave & PHY_ST_AN_OVER) != 0) {
1781 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1782 ("AutoNeg done rescheduled Port %d\n", Port));
1783 }
1784#endif /* DEBUG */
1785 return(SK_HW_PS_NONE);
1786 }
1787
1788 if (AutoNeg) {
1789 if ((StatSum & PHY_ST_AN_OVER) != 0) {
1790 SkHWLinkUp(pAC, IoC, Port);
1791 Done = SkMacAutoNegDone(pAC, IoC, Port);
1792 if (Done != SK_AND_OK) {
1793 /* Get PHY parameters, for debugging only */
1794 SkXmPhyRead(pAC, IoC, Port, PHY_LONE_AUNE_LP, &LpAb);
1795 SkXmPhyRead(pAC, IoC, Port, PHY_LONE_1000T_STAT, &ExtStat);
1796 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1797 ("AutoNeg FAIL Port %d (LpAb %x, 1000TStat %x)\n",
1798 Port, LpAb, ExtStat));
1799
1800 /* Try next possible mode */
1801 NextMode = SkHWSenseGetNext(pAC, IoC, Port);
1802 SkHWLinkDown(pAC, IoC, Port);
1803 if (Done == SK_AND_DUP_CAP) {
1804 /* GoTo next mode */
1805 SkHWSenseSetNext(pAC, IoC, Port, NextMode);
1806 }
1807
1808 return(SK_HW_PS_RESTART);
1809
1810 }
1811 else {
1812 /*
1813 * Dummy Read interrupt status to prevent
1814 * extra link down/ups
1815 */
1816 SkXmPhyRead(pAC, IoC, Port, PHY_LONE_INT_STAT, &ExtStat);
1817 return(SK_HW_PS_LINK);
1818 }
1819 }
1820
1821 /* AutoNeg not done, but HW link is up. Check for timeouts */
1822 pPrt->PAutoNegTimeOut++;
1823 if (pPrt->PAutoNegTimeOut >= SK_AND_MAX_TO) {
1824 /* Timeout occured */
1825 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
1826 ("AutoNeg timeout Port %d\n", Port));
1827 if (pPrt->PLinkModeConf == SK_LMODE_AUTOSENSE &&
1828 pPrt->PLipaAutoNeg != SK_LIPA_AUTO) {
1829 /* Set Link manually up */
1830 SkHWSenseSetNext(pAC, IoC, Port, SK_LMODE_FULL);
1831 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
1832 ("Set manual full duplex Port %d\n", Port));
1833 }
1834
1835 /* Do the restart */
1836 return(SK_HW_PS_RESTART);
1837 }
1838 }
1839 else {
1840 /* Link is up and we don't need more */
1841#ifdef DEBUG
1842 if (pPrt->PLipaAutoNeg == SK_LIPA_AUTO) {
1843 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1844 ("ERROR: Lipa auto detected on port %d\n", Port));
1845 }
1846#endif /* DEBUG */
1847
1848 /*
1849 * Dummy Read interrupt status to prevent
1850 * extra link down/ups
1851 */
1852 SkXmPhyRead(pAC, IoC, Port, PHY_LONE_INT_STAT, &ExtStat);
1853
1854 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
1855 ("Link sync(GP), Port %d\n", Port));
1856 SkHWLinkUp(pAC, IoC, Port);
1857
1858 return(SK_HW_PS_LINK);
1859 }
1860
1861 return(SK_HW_PS_NONE);
1862} /* SkGePortCheckUpLone */
1863
1864
1865/******************************************************************************
1866 *
1867 * SkGePortCheckUpNat() - Check if the link is up on National PHY
1868 *
1869 * return:
1870 * 0 o.k. nothing needed
1871 * 1 Restart needed on this port
1872 * 2 Link came up
1873 */
1874static int SkGePortCheckUpNat(
1875SK_AC *pAC, /* Adapter Context */
1876SK_IOC IoC, /* IO Context */
1877int Port, /* Which port should be checked */
1878SK_BOOL AutoNeg) /* Is Auto-negotiation used ? */
1879{
1880 /* todo: National */
1881 return(SK_HW_PS_NONE);
1882} /* SkGePortCheckUpNat */
1883#endif /* OTHER_PHY */
1884
1885
1886/******************************************************************************
1887 *
1888 * SkGeSirqEvent() - Event Service Routine
1889 *
1890 * Description:
1891 *
1892 * Notes:
1893 */
1894int SkGeSirqEvent(
1895SK_AC *pAC, /* Adapter Context */
1896SK_IOC IoC, /* Io Context */
1897SK_U32 Event, /* Module specific Event */
1898SK_EVPARA Para) /* Event specific Parameter */
1899{
1900 SK_GEPORT *pPrt; /* GIni Port struct pointer */
1901 SK_U32 Port;
1902 SK_U32 Val32;
1903 int PortStat;
1904 SK_U8 Val8;
1905#ifdef GENESIS
1906 SK_U64 Octets;
1907#endif /* GENESIS */
1908
1909 Port = Para.Para32[0];
1910 pPrt = &pAC->GIni.GP[Port];
1911
1912 switch (Event) {
1913 case SK_HWEV_WATIM:
1914 if (pPrt->PState == SK_PRT_RESET) {
1915
1916 PortStat = SK_HW_PS_NONE;
1917 }
1918 else {
1919 /* Check whether port came up */
1920 PortStat = SkGePortCheckUp(pAC, IoC, (int)Port);
1921 }
1922
1923 switch (PortStat) {
1924 case SK_HW_PS_RESTART:
1925 if (pPrt->PHWLinkUp) {
1926 /* Set Link to down */
1927 SkHWLinkDown(pAC, IoC, (int)Port);
1928
1929 /*
1930 * Signal directly to RLMT to ensure correct
1931 * sequence of SWITCH and RESET event.
1932 */
1933 SkRlmtEvent(pAC, IoC, SK_RLMT_LINK_DOWN, Para);
1934 }
1935
1936 /* Restart needed */
1937 SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_RESET, Para);
1938 break;
1939
1940 case SK_HW_PS_LINK:
1941 /* Signal to RLMT */
1942 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_UP, Para);
1943 break;
1944 }
1945
1946 /* Start again the check Timer */
1947 if (pPrt->PHWLinkUp) {
1948 Val32 = SK_WA_ACT_TIME;
1949 }
1950 else {
1951 Val32 = SK_WA_INA_TIME;
1952 }
1953
1954 /* Todo: still needed for non-XMAC PHYs??? */
1955 /* Start workaround Errata #2 timer */
1956 SkTimerStart(pAC, IoC, &pPrt->PWaTimer, Val32,
1957 SKGE_HWAC, SK_HWEV_WATIM, Para);
1958 break;
1959
1960 case SK_HWEV_PORT_START:
1961 if (pPrt->PHWLinkUp) {
1962 /*
1963 * Signal directly to RLMT to ensure correct
1964 * sequence of SWITCH and RESET event.
1965 */
1966 SkRlmtEvent(pAC, IoC, SK_RLMT_LINK_DOWN, Para);
1967 }
1968
1969 SkHWLinkDown(pAC, IoC, (int)Port);
1970
1971 /* Schedule Port RESET */
1972 SkEventQueue(pAC, SKGE_DRV, SK_DRV_PORT_RESET, Para);
1973
1974 /* Start workaround Errata #2 timer */
1975 SkTimerStart(pAC, IoC, &pPrt->PWaTimer, SK_WA_INA_TIME,
1976 SKGE_HWAC, SK_HWEV_WATIM, Para);
1977 break;
1978
1979 case SK_HWEV_PORT_STOP:
1980 if (pPrt->PHWLinkUp) {
1981 /*
1982 * Signal directly to RLMT to ensure correct
1983 * sequence of SWITCH and RESET event.
1984 */
1985 SkRlmtEvent(pAC, IoC, SK_RLMT_LINK_DOWN, Para);
1986 }
1987
1988 /* Stop Workaround Timer */
1989 SkTimerStop(pAC, IoC, &pPrt->PWaTimer);
1990
1991 SkHWLinkDown(pAC, IoC, (int)Port);
1992 break;
1993
1994 case SK_HWEV_UPDATE_STAT:
1995 /* We do NOT need to update any statistics */
1996 break;
1997
1998 case SK_HWEV_CLEAR_STAT:
1999 /* We do NOT need to clear any statistics */
2000 for (Port = 0; Port < (SK_U32)pAC->GIni.GIMacsFound; Port++) {
2001 pPrt->PPrevRx = 0;
2002 pPrt->PPrevFcs = 0;
2003 pPrt->PPrevShorts = 0;
2004 }
2005 break;
2006
2007 case SK_HWEV_SET_LMODE:
2008 Val8 = (SK_U8)Para.Para32[1];
2009 if (pPrt->PLinkModeConf != Val8) {
2010 /* Set New link mode */
2011 pPrt->PLinkModeConf = Val8;
2012
2013 /* Restart Port */
2014 SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_STOP, Para);
2015 SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_START, Para);
2016 }
2017 break;
2018
2019 case SK_HWEV_SET_FLOWMODE:
2020 Val8 = (SK_U8)Para.Para32[1];
2021 if (pPrt->PFlowCtrlMode != Val8) {
2022 /* Set New Flow Control mode */
2023 pPrt->PFlowCtrlMode = Val8;
2024
2025 /* Restart Port */
2026 SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_STOP, Para);
2027 SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_START, Para);
2028 }
2029 break;
2030
2031 case SK_HWEV_SET_ROLE:
2032 /* not possible for fiber */
2033 if (!pAC->GIni.GICopperType) {
2034 break;
2035 }
2036 Val8 = (SK_U8)Para.Para32[1];
2037 if (pPrt->PMSMode != Val8) {
2038 /* Set New Role (Master/Slave) mode */
2039 pPrt->PMSMode = Val8;
2040
2041 /* Restart Port */
2042 SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_STOP, Para);
2043 SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_START, Para);
2044 }
2045 break;
2046
2047 case SK_HWEV_SET_SPEED:
2048 if (pPrt->PhyType != SK_PHY_MARV_COPPER) {
2049 break;
2050 }
2051 Val8 = (SK_U8)Para.Para32[1];
2052 if (pPrt->PLinkSpeed != Val8) {
2053 /* Set New Speed parameter */
2054 pPrt->PLinkSpeed = Val8;
2055
2056 /* Restart Port */
2057 SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_STOP, Para);
2058 SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_START, Para);
2059 }
2060 break;
2061
2062#ifdef GENESIS
2063 case SK_HWEV_HALFDUP_CHK:
2064 if (pAC->GIni.GIGenesis) {
2065 /*
2066 * half duplex hangup workaround.
2067 * See packet arbiter timeout interrupt for description
2068 */
2069 pPrt->HalfDupTimerActive = SK_FALSE;
2070 if (pPrt->PLinkModeStatus == SK_LMODE_STAT_HALF ||
2071 pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOHALF) {
2072 /* Snap statistic counters */
2073 (void)SkXmUpdateStats(pAC, IoC, Port);
2074
2075 (void)SkXmMacStatistic(pAC, IoC, Port, XM_TXO_OK_HI, &Val32);
2076
2077 Octets = (SK_U64)Val32 << 32;
2078
2079 (void)SkXmMacStatistic(pAC, IoC, Port, XM_TXO_OK_LO, &Val32);
2080
2081 Octets += Val32;
2082
2083 if (pPrt->LastOctets == Octets) {
2084 /* Tx hanging, a FIFO flush restarts it */
2085 SkMacFlushTxFifo(pAC, IoC, Port);
2086 }
2087 }
2088 }
2089 break;
2090#endif /* GENESIS */
2091
2092 default:
2093 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_SIRQ_E001, SKERR_SIRQ_E001MSG);
2094 break;
2095 }
2096
2097 return(0);
2098} /* SkGeSirqEvent */
2099
2100
2101#ifdef GENESIS
2102/******************************************************************************
2103 *
2104 * SkPhyIsrBcom() - PHY interrupt service routine
2105 *
2106 * Description: handles all interrupts from BCom PHY
2107 *
2108 * Returns: N/A
2109 */
2110static void SkPhyIsrBcom(
2111SK_AC *pAC, /* Adapter Context */
2112SK_IOC IoC, /* Io Context */
2113int Port, /* Port Num = PHY Num */
2114SK_U16 IStatus) /* Interrupt Status */
2115{
2116 SK_GEPORT *pPrt; /* GIni Port struct pointer */
2117 SK_EVPARA Para;
2118
2119 pPrt = &pAC->GIni.GP[Port];
2120
2121 if ((IStatus & PHY_B_IS_PSE) != 0) {
2122 /* Incorrectable pair swap error */
2123 SK_ERR_LOG(pAC, SK_ERRCL_HW | SK_ERRCL_INIT, SKERR_SIRQ_E022,
2124 SKERR_SIRQ_E022MSG);
2125 }
2126
2127 if ((IStatus & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) != 0) {
2128
2129 SkHWLinkDown(pAC, IoC, Port);
2130
2131 Para.Para32[0] = (SK_U32)Port;
2132 /* Signal to RLMT */
2133 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
2134
2135 /* Start workaround Errata #2 timer */
2136 SkTimerStart(pAC, IoC, &pPrt->PWaTimer, SK_WA_INA_TIME,
2137 SKGE_HWAC, SK_HWEV_WATIM, Para);
2138 }
2139
2140} /* SkPhyIsrBcom */
2141#endif /* GENESIS */
2142
2143
2144#ifdef YUKON
2145/******************************************************************************
2146 *
2147 * SkPhyIsrGmac() - PHY interrupt service routine
2148 *
2149 * Description: handles all interrupts from Marvell PHY
2150 *
2151 * Returns: N/A
2152 */
2153static void SkPhyIsrGmac(
2154SK_AC *pAC, /* Adapter Context */
2155SK_IOC IoC, /* Io Context */
2156int Port, /* Port Num = PHY Num */
2157SK_U16 IStatus) /* Interrupt Status */
2158{
2159 SK_GEPORT *pPrt; /* GIni Port struct pointer */
2160 SK_EVPARA Para;
2161 SK_U16 Word;
2162
2163 pPrt = &pAC->GIni.GP[Port];
2164
2165 if ((IStatus & (PHY_M_IS_AN_PR | PHY_M_IS_LST_CHANGE)) != 0) {
2166
2167 SkHWLinkDown(pAC, IoC, Port);
2168
2169 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_AUNE_ADV, &Word);
2170
2171 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2172 ("AutoNeg.Adv: 0x%04X\n", Word));
2173
2174 /* Set Auto-negotiation advertisement */
2175 if (pPrt->PFlowCtrlMode == SK_FLOW_MODE_SYM_OR_REM) {
2176 /* restore Asymmetric Pause bit */
2177 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_AUNE_ADV,
2178 (SK_U16)(Word | PHY_M_AN_ASP));
2179 }
2180
2181 Para.Para32[0] = (SK_U32)Port;
2182 /* Signal to RLMT */
2183 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
2184 }
2185
2186 if ((IStatus & PHY_M_IS_AN_ERROR) != 0) {
2187 /* Auto-Negotiation Error */
2188 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E023, SKERR_SIRQ_E023MSG);
2189 }
2190
2191 if ((IStatus & PHY_M_IS_FIFO_ERROR) != 0) {
2192 /* FIFO Overflow/Underrun Error */
2193 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E024, SKERR_SIRQ_E024MSG);
2194 }
2195
2196} /* SkPhyIsrGmac */
2197#endif /* YUKON */
2198
2199
2200#ifdef OTHER_PHY
2201/******************************************************************************
2202 *
2203 * SkPhyIsrLone() - PHY interrupt service routine
2204 *
2205 * Description: handles all interrupts from LONE PHY
2206 *
2207 * Returns: N/A
2208 */
2209static void SkPhyIsrLone(
2210SK_AC *pAC, /* Adapter Context */
2211SK_IOC IoC, /* Io Context */
2212int Port, /* Port Num = PHY Num */
2213SK_U16 IStatus) /* Interrupt Status */
2214{
2215 SK_EVPARA Para;
2216
2217 if (IStatus & (PHY_L_IS_DUP | PHY_L_IS_ISOL)) {
2218
2219 SkHWLinkDown(pAC, IoC, Port);
2220
2221 Para.Para32[0] = (SK_U32)Port;
2222 /* Signal to RLMT */
2223 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
2224 }
2225
2226} /* SkPhyIsrLone */
2227#endif /* OTHER_PHY */
2228
2229/* End of File */
diff --git a/drivers/net/sk98lin/ski2c.c b/drivers/net/sk98lin/ski2c.c
deleted file mode 100644
index 79bf57cb5326..000000000000
--- a/drivers/net/sk98lin/ski2c.c
+++ /dev/null
@@ -1,1296 +0,0 @@
1/******************************************************************************
2 *
3 * Name: ski2c.c
4 * Project: Gigabit Ethernet Adapters, TWSI-Module
5 * Version: $Revision: 1.59 $
6 * Date: $Date: 2003/10/20 09:07:25 $
7 * Purpose: Functions to access Voltage and Temperature Sensor
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/*
26 * I2C Protocol
27 */
28#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
29static const char SysKonnectFileId[] =
30 "@(#) $Id: ski2c.c,v 1.59 2003/10/20 09:07:25 rschmidt Exp $ (C) Marvell. ";
31#endif
32
33#include "h/skdrv1st.h" /* Driver Specific Definitions */
34#include "h/lm80.h"
35#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */
36
37#ifdef __C2MAN__
38/*
39 I2C protocol implementation.
40
41 General Description:
42
43 The I2C protocol is used for the temperature sensors and for
44 the serial EEPROM which hold the configuration.
45
46 This file covers functions that allow to read write and do
47 some bulk requests a specified I2C address.
48
49 The Genesis has 2 I2C buses. One for the EEPROM which holds
50 the VPD Data and one for temperature and voltage sensor.
51 The following picture shows the I2C buses, I2C devices and
52 their control registers.
53
54 Note: The VPD functions are in skvpd.c
55.
56. PCI Config I2C Bus for VPD Data:
57.
58. +------------+
59. | VPD EEPROM |
60. +------------+
61. |
62. | <-- I2C
63. |
64. +-----------+-----------+
65. | |
66. +-----------------+ +-----------------+
67. | PCI_VPD_ADR_REG | | PCI_VPD_DAT_REG |
68. +-----------------+ +-----------------+
69.
70.
71. I2C Bus for LM80 sensor:
72.
73. +-----------------+
74. | Temperature and |
75. | Voltage Sensor |
76. | LM80 |
77. +-----------------+
78. |
79. |
80. I2C --> |
81. |
82. +----+
83. +-------------->| OR |<--+
84. | +----+ |
85. +------+------+ |
86. | | |
87. +--------+ +--------+ +----------+
88. | B2_I2C | | B2_I2C | | B2_I2C |
89. | _CTRL | | _DATA | | _SW |
90. +--------+ +--------+ +----------+
91.
92 The I2C bus may be driven by the B2_I2C_SW or by the B2_I2C_CTRL
93 and B2_I2C_DATA registers.
94 For driver software it is recommended to use the I2C control and
95 data register, because I2C bus timing is done by the ASIC and
96 an interrupt may be received when the I2C request is completed.
97
98 Clock Rate Timing: MIN MAX generated by
99 VPD EEPROM: 50 kHz 100 kHz HW
100 LM80 over I2C Ctrl/Data reg. 50 kHz 100 kHz HW
101 LM80 over B2_I2C_SW register 0 400 kHz SW
102
103 Note: The clock generated by the hardware is dependend on the
104 PCI clock. If the PCI bus clock is 33 MHz, the I2C/VPD
105 clock is 50 kHz.
106 */
107intro()
108{}
109#endif
110
111#ifdef SK_DIAG
112/*
113 * I2C Fast Mode timing values used by the LM80.
114 * If new devices are added to the I2C bus the timing values have to be checked.
115 */
116#ifndef I2C_SLOW_TIMING
117#define T_CLK_LOW 1300L /* clock low time in ns */
118#define T_CLK_HIGH 600L /* clock high time in ns */
119#define T_DATA_IN_SETUP 100L /* data in Set-up Time */
120#define T_START_HOLD 600L /* start condition hold time */
121#define T_START_SETUP 600L /* start condition Set-up time */
122#define T_STOP_SETUP 600L /* stop condition Set-up time */
123#define T_BUS_IDLE 1300L /* time the bus must free after Tx */
124#define T_CLK_2_DATA_OUT 900L /* max. clock low to data output valid */
125#else /* I2C_SLOW_TIMING */
126/* I2C Standard Mode Timing */
127#define T_CLK_LOW 4700L /* clock low time in ns */
128#define T_CLK_HIGH 4000L /* clock high time in ns */
129#define T_DATA_IN_SETUP 250L /* data in Set-up Time */
130#define T_START_HOLD 4000L /* start condition hold time */
131#define T_START_SETUP 4700L /* start condition Set-up time */
132#define T_STOP_SETUP 4000L /* stop condition Set-up time */
133#define T_BUS_IDLE 4700L /* time the bus must free after Tx */
134#endif /* !I2C_SLOW_TIMING */
135
136#define NS2BCLK(x) (((x)*125)/10000)
137
138/*
139 * I2C Wire Operations
140 *
141 * About I2C_CLK_LOW():
142 *
143 * The Data Direction bit (I2C_DATA_DIR) has to be set to input when setting
144 * clock to low, to prevent the ASIC and the I2C data client from driving the
145 * serial data line simultaneously (ASIC: last bit of a byte = '1', I2C client
146 * send an 'ACK'). See also Concentrator Bugreport No. 10192.
147 */
148#define I2C_DATA_HIGH(IoC) SK_I2C_SET_BIT(IoC, I2C_DATA)
149#define I2C_DATA_LOW(IoC) SK_I2C_CLR_BIT(IoC, I2C_DATA)
150#define I2C_DATA_OUT(IoC) SK_I2C_SET_BIT(IoC, I2C_DATA_DIR)
151#define I2C_DATA_IN(IoC) SK_I2C_CLR_BIT(IoC, I2C_DATA_DIR | I2C_DATA)
152#define I2C_CLK_HIGH(IoC) SK_I2C_SET_BIT(IoC, I2C_CLK)
153#define I2C_CLK_LOW(IoC) SK_I2C_CLR_BIT(IoC, I2C_CLK | I2C_DATA_DIR)
154#define I2C_START_COND(IoC) SK_I2C_CLR_BIT(IoC, I2C_CLK)
155
156#define NS2CLKT(x) ((x*125L)/10000)
157
158/*--------------- I2C Interface Register Functions --------------- */
159
160/*
161 * sending one bit
162 */
163void SkI2cSndBit(
164SK_IOC IoC, /* I/O Context */
165SK_U8 Bit) /* Bit to send */
166{
167 I2C_DATA_OUT(IoC);
168 if (Bit) {
169 I2C_DATA_HIGH(IoC);
170 }
171 else {
172 I2C_DATA_LOW(IoC);
173 }
174 SkDgWaitTime(IoC, NS2BCLK(T_DATA_IN_SETUP));
175 I2C_CLK_HIGH(IoC);
176 SkDgWaitTime(IoC, NS2BCLK(T_CLK_HIGH));
177 I2C_CLK_LOW(IoC);
178} /* SkI2cSndBit*/
179
180
181/*
182 * Signal a start to the I2C Bus.
183 *
184 * A start is signaled when data goes to low in a high clock cycle.
185 *
186 * Ends with Clock Low.
187 *
188 * Status: not tested
189 */
190void SkI2cStart(
191SK_IOC IoC) /* I/O Context */
192{
193 /* Init data and Clock to output lines */
194 /* Set Data high */
195 I2C_DATA_OUT(IoC);
196 I2C_DATA_HIGH(IoC);
197 /* Set Clock high */
198 I2C_CLK_HIGH(IoC);
199
200 SkDgWaitTime(IoC, NS2BCLK(T_START_SETUP));
201
202 /* Set Data Low */
203 I2C_DATA_LOW(IoC);
204
205 SkDgWaitTime(IoC, NS2BCLK(T_START_HOLD));
206
207 /* Clock low without Data to Input */
208 I2C_START_COND(IoC);
209
210 SkDgWaitTime(IoC, NS2BCLK(T_CLK_LOW));
211} /* SkI2cStart */
212
213
214void SkI2cStop(
215SK_IOC IoC) /* I/O Context */
216{
217 /* Init data and Clock to output lines */
218 /* Set Data low */
219 I2C_DATA_OUT(IoC);
220 I2C_DATA_LOW(IoC);
221
222 SkDgWaitTime(IoC, NS2BCLK(T_CLK_2_DATA_OUT));
223
224 /* Set Clock high */
225 I2C_CLK_HIGH(IoC);
226
227 SkDgWaitTime(IoC, NS2BCLK(T_STOP_SETUP));
228
229 /*
230 * Set Data High: Do it by setting the Data Line to Input.
231 * Because of a pull up resistor the Data Line
232 * floods to high.
233 */
234 I2C_DATA_IN(IoC);
235
236 /*
237 * When I2C activity is stopped
238 * o DATA should be set to input and
239 * o CLOCK should be set to high!
240 */
241 SkDgWaitTime(IoC, NS2BCLK(T_BUS_IDLE));
242} /* SkI2cStop */
243
244
245/*
246 * Receive just one bit via the I2C bus.
247 *
248 * Note: Clock must be set to LOW before calling this function.
249 *
250 * Returns The received bit.
251 */
252int SkI2cRcvBit(
253SK_IOC IoC) /* I/O Context */
254{
255 int Bit;
256 SK_U8 I2cSwCtrl;
257
258 /* Init data as input line */
259 I2C_DATA_IN(IoC);
260
261 SkDgWaitTime(IoC, NS2BCLK(T_CLK_2_DATA_OUT));
262
263 I2C_CLK_HIGH(IoC);
264
265 SkDgWaitTime(IoC, NS2BCLK(T_CLK_HIGH));
266
267 SK_I2C_GET_SW(IoC, &I2cSwCtrl);
268
269 Bit = (I2cSwCtrl & I2C_DATA) ? 1 : 0;
270
271 I2C_CLK_LOW(IoC);
272 SkDgWaitTime(IoC, NS2BCLK(T_CLK_LOW-T_CLK_2_DATA_OUT));
273
274 return(Bit);
275} /* SkI2cRcvBit */
276
277
278/*
279 * Receive an ACK.
280 *
281 * returns 0 If acknowledged
282 * 1 in case of an error
283 */
284int SkI2cRcvAck(
285SK_IOC IoC) /* I/O Context */
286{
287 /*
288 * Received bit must be zero.
289 */
290 return(SkI2cRcvBit(IoC) != 0);
291} /* SkI2cRcvAck */
292
293
294/*
295 * Send an NACK.
296 */
297void SkI2cSndNAck(
298SK_IOC IoC) /* I/O Context */
299{
300 /*
301 * Received bit must be zero.
302 */
303 SkI2cSndBit(IoC, 1);
304} /* SkI2cSndNAck */
305
306
307/*
308 * Send an ACK.
309 */
310void SkI2cSndAck(
311SK_IOC IoC) /* I/O Context */
312{
313 /*
314 * Received bit must be zero.
315 */
316 SkI2cSndBit(IoC, 0);
317} /* SkI2cSndAck */
318
319
320/*
321 * Send one byte to the I2C device and wait for ACK.
322 *
323 * Return acknowleged status.
324 */
325int SkI2cSndByte(
326SK_IOC IoC, /* I/O Context */
327int Byte) /* byte to send */
328{
329 int i;
330
331 for (i = 0; i < 8; i++) {
332 if (Byte & (1<<(7-i))) {
333 SkI2cSndBit(IoC, 1);
334 }
335 else {
336 SkI2cSndBit(IoC, 0);
337 }
338 }
339
340 return(SkI2cRcvAck(IoC));
341} /* SkI2cSndByte */
342
343
344/*
345 * Receive one byte and ack it.
346 *
347 * Return byte.
348 */
349int SkI2cRcvByte(
350SK_IOC IoC, /* I/O Context */
351int Last) /* Last Byte Flag */
352{
353 int i;
354 int Byte = 0;
355
356 for (i = 0; i < 8; i++) {
357 Byte <<= 1;
358 Byte |= SkI2cRcvBit(IoC);
359 }
360
361 if (Last) {
362 SkI2cSndNAck(IoC);
363 }
364 else {
365 SkI2cSndAck(IoC);
366 }
367
368 return(Byte);
369} /* SkI2cRcvByte */
370
371
372/*
373 * Start dialog and send device address
374 *
375 * Return 0 if acknowleged, 1 in case of an error
376 */
377int SkI2cSndDev(
378SK_IOC IoC, /* I/O Context */
379int Addr, /* Device Address */
380int Rw) /* Read / Write Flag */
381{
382 SkI2cStart(IoC);
383 Rw = ~Rw;
384 Rw &= I2C_WRITE;
385 return(SkI2cSndByte(IoC, (Addr<<1) | Rw));
386} /* SkI2cSndDev */
387
388#endif /* SK_DIAG */
389
390/*----------------- I2C CTRL Register Functions ----------*/
391
392/*
393 * waits for a completion of an I2C transfer
394 *
395 * returns 0: success, transfer completes
396 * 1: error, transfer does not complete, I2C transfer
397 * killed, wait loop terminated.
398 */
399static int SkI2cWait(
400SK_AC *pAC, /* Adapter Context */
401SK_IOC IoC, /* I/O Context */
402int Event) /* complete event to wait for (I2C_READ or I2C_WRITE) */
403{
404 SK_U64 StartTime;
405 SK_U64 CurrentTime;
406 SK_U32 I2cCtrl;
407
408 StartTime = SkOsGetTime(pAC);
409
410 do {
411 CurrentTime = SkOsGetTime(pAC);
412
413 if (CurrentTime - StartTime > SK_TICKS_PER_SEC / 8) {
414
415 SK_I2C_STOP(IoC);
416#ifndef SK_DIAG
417 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_I2C_E002, SKERR_I2C_E002MSG);
418#endif /* !SK_DIAG */
419 return(1);
420 }
421
422 SK_I2C_GET_CTL(IoC, &I2cCtrl);
423
424#ifdef xYUKON_DBG
425 printf("StartTime=%lu, CurrentTime=%lu\n",
426 StartTime, CurrentTime);
427 if (kbhit()) {
428 return(1);
429 }
430#endif /* YUKON_DBG */
431
432 } while ((I2cCtrl & I2C_FLAG) == (SK_U32)Event << 31);
433
434 return(0);
435} /* SkI2cWait */
436
437
438/*
439 * waits for a completion of an I2C transfer
440 *
441 * Returns
442 * Nothing
443 */
444void SkI2cWaitIrq(
445SK_AC *pAC, /* Adapter Context */
446SK_IOC IoC) /* I/O Context */
447{
448 SK_SENSOR *pSen;
449 SK_U64 StartTime;
450 SK_U32 IrqSrc;
451
452 pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens];
453
454 if (pSen->SenState == SK_SEN_IDLE) {
455 return;
456 }
457
458 StartTime = SkOsGetTime(pAC);
459
460 do {
461 if (SkOsGetTime(pAC) - StartTime > SK_TICKS_PER_SEC / 8) {
462
463 SK_I2C_STOP(IoC);
464#ifndef SK_DIAG
465 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_I2C_E016, SKERR_I2C_E016MSG);
466#endif /* !SK_DIAG */
467 return;
468 }
469
470 SK_IN32(IoC, B0_ISRC, &IrqSrc);
471
472 } while ((IrqSrc & IS_I2C_READY) == 0);
473
474 pSen->SenState = SK_SEN_IDLE;
475 return;
476} /* SkI2cWaitIrq */
477
478/*
479 * writes a single byte or 4 bytes into the I2C device
480 *
481 * returns 0: success
482 * 1: error
483 */
484static int SkI2cWrite(
485SK_AC *pAC, /* Adapter Context */
486SK_IOC IoC, /* I/O Context */
487SK_U32 I2cData, /* I2C Data to write */
488int I2cDev, /* I2C Device Address */
489int I2cDevSize, /* I2C Device Size (e.g. I2C_025K_DEV or I2C_2K_DEV) */
490int I2cReg, /* I2C Device Register Address */
491int I2cBurst) /* I2C Burst Flag */
492{
493 SK_OUT32(IoC, B2_I2C_DATA, I2cData);
494
495 SK_I2C_CTL(IoC, I2C_WRITE, I2cDev, I2cDevSize, I2cReg, I2cBurst);
496
497 return(SkI2cWait(pAC, IoC, I2C_WRITE));
498} /* SkI2cWrite*/
499
500
501#ifdef SK_DIAG
502/*
503 * reads a single byte or 4 bytes from the I2C device
504 *
505 * returns the word read
506 */
507SK_U32 SkI2cRead(
508SK_AC *pAC, /* Adapter Context */
509SK_IOC IoC, /* I/O Context */
510int I2cDev, /* I2C Device Address */
511int I2cDevSize, /* I2C Device Size (e.g. I2C_025K_DEV or I2C_2K_DEV) */
512int I2cReg, /* I2C Device Register Address */
513int I2cBurst) /* I2C Burst Flag */
514{
515 SK_U32 Data;
516
517 SK_OUT32(IoC, B2_I2C_DATA, 0);
518 SK_I2C_CTL(IoC, I2C_READ, I2cDev, I2cDevSize, I2cReg, I2cBurst);
519
520 if (SkI2cWait(pAC, IoC, I2C_READ) != 0) {
521 w_print("%s\n", SKERR_I2C_E002MSG);
522 }
523
524 SK_IN32(IoC, B2_I2C_DATA, &Data);
525
526 return(Data);
527} /* SkI2cRead */
528#endif /* SK_DIAG */
529
530
531/*
532 * read a sensor's value
533 *
534 * This function reads a sensor's value from the I2C sensor chip. The sensor
535 * is defined by its index into the sensors database in the struct pAC points
536 * to.
537 * Returns
538 * 1 if the read is completed
539 * 0 if the read must be continued (I2C Bus still allocated)
540 */
541static int SkI2cReadSensor(
542SK_AC *pAC, /* Adapter Context */
543SK_IOC IoC, /* I/O Context */
544SK_SENSOR *pSen) /* Sensor to be read */
545{
546 if (pSen->SenRead != NULL) {
547 return((*pSen->SenRead)(pAC, IoC, pSen));
548 }
549 else {
550 return(0); /* no success */
551 }
552} /* SkI2cReadSensor */
553
554/*
555 * Do the Init state 0 initialization
556 */
557static int SkI2cInit0(
558SK_AC *pAC) /* Adapter Context */
559{
560 int i;
561
562 /* Begin with first sensor */
563 pAC->I2c.CurrSens = 0;
564
565 /* Begin with timeout control for state machine */
566 pAC->I2c.TimerMode = SK_TIMER_WATCH_SM;
567
568 /* Set sensor number to zero */
569 pAC->I2c.MaxSens = 0;
570
571#ifndef SK_DIAG
572 /* Initialize Number of Dummy Reads */
573 pAC->I2c.DummyReads = SK_MAX_SENSORS;
574#endif
575
576 for (i = 0; i < SK_MAX_SENSORS; i++) {
577 pAC->I2c.SenTable[i].SenDesc = "unknown";
578 pAC->I2c.SenTable[i].SenType = SK_SEN_UNKNOWN;
579 pAC->I2c.SenTable[i].SenThreErrHigh = 0;
580 pAC->I2c.SenTable[i].SenThreErrLow = 0;
581 pAC->I2c.SenTable[i].SenThreWarnHigh = 0;
582 pAC->I2c.SenTable[i].SenThreWarnLow = 0;
583 pAC->I2c.SenTable[i].SenReg = LM80_FAN2_IN;
584 pAC->I2c.SenTable[i].SenInit = SK_SEN_DYN_INIT_NONE;
585 pAC->I2c.SenTable[i].SenValue = 0;
586 pAC->I2c.SenTable[i].SenErrFlag = SK_SEN_ERR_NOT_PRESENT;
587 pAC->I2c.SenTable[i].SenErrCts = 0;
588 pAC->I2c.SenTable[i].SenBegErrTS = 0;
589 pAC->I2c.SenTable[i].SenState = SK_SEN_IDLE;
590 pAC->I2c.SenTable[i].SenRead = NULL;
591 pAC->I2c.SenTable[i].SenDev = 0;
592 }
593
594 /* Now we are "INIT data"ed */
595 pAC->I2c.InitLevel = SK_INIT_DATA;
596 return(0);
597} /* SkI2cInit0*/
598
599
600/*
601 * Do the init state 1 initialization
602 *
603 * initialize the following register of the LM80:
604 * Configuration register:
605 * - START, noINT, activeLOW, noINT#Clear, noRESET, noCI, noGPO#, noINIT
606 *
607 * Interrupt Mask Register 1:
608 * - all interrupts are Disabled (0xff)
609 *
610 * Interrupt Mask Register 2:
611 * - all interrupts are Disabled (0xff) Interrupt modi doesn't matter.
612 *
613 * Fan Divisor/RST_OUT register:
614 * - Divisors set to 1 (bits 00), all others 0s.
615 *
616 * OS# Configuration/Temperature resolution Register:
617 * - all 0s
618 *
619 */
620static int SkI2cInit1(
621SK_AC *pAC, /* Adapter Context */
622SK_IOC IoC) /* I/O Context */
623{
624 int i;
625 SK_U8 I2cSwCtrl;
626 SK_GEPORT *pPrt; /* GIni Port struct pointer */
627
628 if (pAC->I2c.InitLevel != SK_INIT_DATA) {
629 /* ReInit not needed in I2C module */
630 return(0);
631 }
632
633 /* Set the Direction of I2C-Data Pin to IN */
634 SK_I2C_CLR_BIT(IoC, I2C_DATA_DIR | I2C_DATA);
635 /* Check for 32-Bit Yukon with Low at I2C-Data Pin */
636 SK_I2C_GET_SW(IoC, &I2cSwCtrl);
637
638 if ((I2cSwCtrl & I2C_DATA) == 0) {
639 /* this is a 32-Bit board */
640 pAC->GIni.GIYukon32Bit = SK_TRUE;
641 return(0);
642 }
643
644 /* Check for 64 Bit Yukon without sensors */
645 if (SkI2cWrite(pAC, IoC, 0, LM80_ADDR, I2C_025K_DEV, LM80_CFG, 0) != 0) {
646 return(0);
647 }
648
649 (void)SkI2cWrite(pAC, IoC, 0xffUL, LM80_ADDR, I2C_025K_DEV, LM80_IMSK_1, 0);
650
651 (void)SkI2cWrite(pAC, IoC, 0xffUL, LM80_ADDR, I2C_025K_DEV, LM80_IMSK_2, 0);
652
653 (void)SkI2cWrite(pAC, IoC, 0, LM80_ADDR, I2C_025K_DEV, LM80_FAN_CTRL, 0);
654
655 (void)SkI2cWrite(pAC, IoC, 0, LM80_ADDR, I2C_025K_DEV, LM80_TEMP_CTRL, 0);
656
657 (void)SkI2cWrite(pAC, IoC, (SK_U32)LM80_CFG_START, LM80_ADDR, I2C_025K_DEV,
658 LM80_CFG, 0);
659
660 /*
661 * MaxSens has to be updated here, because PhyType is not
662 * set when performing Init Level 0
663 */
664 pAC->I2c.MaxSens = 5;
665
666 pPrt = &pAC->GIni.GP[0];
667
668 if (pAC->GIni.GIGenesis) {
669 if (pPrt->PhyType == SK_PHY_BCOM) {
670 if (pAC->GIni.GIMacsFound == 1) {
671 pAC->I2c.MaxSens += 1;
672 }
673 else {
674 pAC->I2c.MaxSens += 3;
675 }
676 }
677 }
678 else {
679 pAC->I2c.MaxSens += 3;
680 }
681
682 for (i = 0; i < pAC->I2c.MaxSens; i++) {
683 switch (i) {
684 case 0:
685 pAC->I2c.SenTable[i].SenDesc = "Temperature";
686 pAC->I2c.SenTable[i].SenType = SK_SEN_TEMP;
687 pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_TEMP_HIGH_ERR;
688 pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_TEMP_HIGH_WARN;
689 pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_TEMP_LOW_WARN;
690 pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_TEMP_LOW_ERR;
691 pAC->I2c.SenTable[i].SenReg = LM80_TEMP_IN;
692 break;
693 case 1:
694 pAC->I2c.SenTable[i].SenDesc = "Voltage PCI";
695 pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT;
696 pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PCI_5V_HIGH_ERR;
697 pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PCI_5V_HIGH_WARN;
698 pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PCI_5V_LOW_WARN;
699 pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PCI_5V_LOW_ERR;
700 pAC->I2c.SenTable[i].SenReg = LM80_VT0_IN;
701 break;
702 case 2:
703 pAC->I2c.SenTable[i].SenDesc = "Voltage PCI-IO";
704 pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT;
705 pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PCI_IO_5V_HIGH_ERR;
706 pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PCI_IO_5V_HIGH_WARN;
707 pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PCI_IO_3V3_LOW_WARN;
708 pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PCI_IO_3V3_LOW_ERR;
709 pAC->I2c.SenTable[i].SenReg = LM80_VT1_IN;
710 pAC->I2c.SenTable[i].SenInit = SK_SEN_DYN_INIT_PCI_IO;
711 break;
712 case 3:
713 pAC->I2c.SenTable[i].SenDesc = "Voltage ASIC";
714 pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT;
715 pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_VDD_HIGH_ERR;
716 pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_VDD_HIGH_WARN;
717 pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_VDD_LOW_WARN;
718 pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_VDD_LOW_ERR;
719 pAC->I2c.SenTable[i].SenReg = LM80_VT2_IN;
720 break;
721 case 4:
722 if (pAC->GIni.GIGenesis) {
723 if (pPrt->PhyType == SK_PHY_BCOM) {
724 pAC->I2c.SenTable[i].SenDesc = "Voltage PHY A PLL";
725 pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PLL_3V3_HIGH_ERR;
726 pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PLL_3V3_HIGH_WARN;
727 pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PLL_3V3_LOW_WARN;
728 pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PLL_3V3_LOW_ERR;
729 }
730 else {
731 pAC->I2c.SenTable[i].SenDesc = "Voltage PMA";
732 pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PLL_3V3_HIGH_ERR;
733 pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PLL_3V3_HIGH_WARN;
734 pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PLL_3V3_LOW_WARN;
735 pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PLL_3V3_LOW_ERR;
736 }
737 }
738 else {
739 pAC->I2c.SenTable[i].SenDesc = "Voltage VAUX";
740 pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_VAUX_3V3_HIGH_ERR;
741 pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_VAUX_3V3_HIGH_WARN;
742 if (pAC->GIni.GIVauxAvail) {
743 pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_VAUX_3V3_LOW_WARN;
744 pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_VAUX_3V3_LOW_ERR;
745 }
746 else {
747 pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_VAUX_0V_WARN_ERR;
748 pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_VAUX_0V_WARN_ERR;
749 }
750 }
751 pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT;
752 pAC->I2c.SenTable[i].SenReg = LM80_VT3_IN;
753 break;
754 case 5:
755 if (pAC->GIni.GIGenesis) {
756 pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 2V5";
757 pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PHY_2V5_HIGH_ERR;
758 pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PHY_2V5_HIGH_WARN;
759 pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PHY_2V5_LOW_WARN;
760 pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PHY_2V5_LOW_ERR;
761 }
762 else {
763 pAC->I2c.SenTable[i].SenDesc = "Voltage Core 1V5";
764 pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_CORE_1V5_HIGH_ERR;
765 pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_CORE_1V5_HIGH_WARN;
766 pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_CORE_1V5_LOW_WARN;
767 pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_CORE_1V5_LOW_ERR;
768 }
769 pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT;
770 pAC->I2c.SenTable[i].SenReg = LM80_VT4_IN;
771 break;
772 case 6:
773 if (pAC->GIni.GIGenesis) {
774 pAC->I2c.SenTable[i].SenDesc = "Voltage PHY B PLL";
775 }
776 else {
777 pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 3V3";
778 }
779 pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT;
780 pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PLL_3V3_HIGH_ERR;
781 pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PLL_3V3_HIGH_WARN;
782 pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PLL_3V3_LOW_WARN;
783 pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PLL_3V3_LOW_ERR;
784 pAC->I2c.SenTable[i].SenReg = LM80_VT5_IN;
785 break;
786 case 7:
787 if (pAC->GIni.GIGenesis) {
788 pAC->I2c.SenTable[i].SenDesc = "Speed Fan";
789 pAC->I2c.SenTable[i].SenType = SK_SEN_FAN;
790 pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_FAN_HIGH_ERR;
791 pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_FAN_HIGH_WARN;
792 pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_FAN_LOW_WARN;
793 pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_FAN_LOW_ERR;
794 pAC->I2c.SenTable[i].SenReg = LM80_FAN2_IN;
795 }
796 else {
797 pAC->I2c.SenTable[i].SenDesc = "Voltage PHY 2V5";
798 pAC->I2c.SenTable[i].SenType = SK_SEN_VOLT;
799 pAC->I2c.SenTable[i].SenThreErrHigh = SK_SEN_PHY_2V5_HIGH_ERR;
800 pAC->I2c.SenTable[i].SenThreWarnHigh = SK_SEN_PHY_2V5_HIGH_WARN;
801 pAC->I2c.SenTable[i].SenThreWarnLow = SK_SEN_PHY_2V5_LOW_WARN;
802 pAC->I2c.SenTable[i].SenThreErrLow = SK_SEN_PHY_2V5_LOW_ERR;
803 pAC->I2c.SenTable[i].SenReg = LM80_VT6_IN;
804 }
805 break;
806 default:
807 SK_ERR_LOG(pAC, SK_ERRCL_INIT | SK_ERRCL_SW,
808 SKERR_I2C_E001, SKERR_I2C_E001MSG);
809 break;
810 }
811
812 pAC->I2c.SenTable[i].SenValue = 0;
813 pAC->I2c.SenTable[i].SenErrFlag = SK_SEN_ERR_OK;
814 pAC->I2c.SenTable[i].SenErrCts = 0;
815 pAC->I2c.SenTable[i].SenBegErrTS = 0;
816 pAC->I2c.SenTable[i].SenState = SK_SEN_IDLE;
817 pAC->I2c.SenTable[i].SenRead = SkLm80ReadSensor;
818 pAC->I2c.SenTable[i].SenDev = LM80_ADDR;
819 }
820
821#ifndef SK_DIAG
822 pAC->I2c.DummyReads = pAC->I2c.MaxSens;
823#endif /* !SK_DIAG */
824
825 /* Clear I2C IRQ */
826 SK_OUT32(IoC, B2_I2C_IRQ, I2C_CLR_IRQ);
827
828 /* Now we are I/O initialized */
829 pAC->I2c.InitLevel = SK_INIT_IO;
830 return(0);
831} /* SkI2cInit1 */
832
833
834/*
835 * Init level 2: Start first sensor read.
836 */
837static int SkI2cInit2(
838SK_AC *pAC, /* Adapter Context */
839SK_IOC IoC) /* I/O Context */
840{
841 int ReadComplete;
842 SK_SENSOR *pSen;
843
844 if (pAC->I2c.InitLevel != SK_INIT_IO) {
845 /* ReInit not needed in I2C module */
846 /* Init0 and Init2 not permitted */
847 return(0);
848 }
849
850 pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens];
851 ReadComplete = SkI2cReadSensor(pAC, IoC, pSen);
852
853 if (ReadComplete) {
854 SK_ERR_LOG(pAC, SK_ERRCL_INIT, SKERR_I2C_E008, SKERR_I2C_E008MSG);
855 }
856
857 /* Now we are correctly initialized */
858 pAC->I2c.InitLevel = SK_INIT_RUN;
859
860 return(0);
861} /* SkI2cInit2*/
862
863
864/*
865 * Initialize I2C devices
866 *
867 * Get the first voltage value and discard it.
868 * Go into temperature read mode. A default pointer is not set.
869 *
870 * The things to be done depend on the init level in the parameter list:
871 * Level 0:
872 * Initialize only the data structures. Do NOT access hardware.
873 * Level 1:
874 * Initialize hardware through SK_IN / SK_OUT commands. Do NOT use interrupts.
875 * Level 2:
876 * Everything is possible. Interrupts may be used from now on.
877 *
878 * return:
879 * 0 = success
880 * other = error.
881 */
882int SkI2cInit(
883SK_AC *pAC, /* Adapter Context */
884SK_IOC IoC, /* I/O Context needed in levels 1 and 2 */
885int Level) /* Init Level */
886{
887
888 switch (Level) {
889 case SK_INIT_DATA:
890 return(SkI2cInit0(pAC));
891 case SK_INIT_IO:
892 return(SkI2cInit1(pAC, IoC));
893 case SK_INIT_RUN:
894 return(SkI2cInit2(pAC, IoC));
895 default:
896 break;
897 }
898
899 return(0);
900} /* SkI2cInit */
901
902
903#ifndef SK_DIAG
904
905/*
906 * Interrupt service function for the I2C Interface
907 *
908 * Clears the Interrupt source
909 *
910 * Reads the register and check it for sending a trap.
911 *
912 * Starts the timer if necessary.
913 */
914void SkI2cIsr(
915SK_AC *pAC, /* Adapter Context */
916SK_IOC IoC) /* I/O Context */
917{
918 SK_EVPARA Para;
919
920 /* Clear I2C IRQ */
921 SK_OUT32(IoC, B2_I2C_IRQ, I2C_CLR_IRQ);
922
923 Para.Para64 = 0;
924 SkEventQueue(pAC, SKGE_I2C, SK_I2CEV_IRQ, Para);
925} /* SkI2cIsr */
926
927
928/*
929 * Check this sensors Value against the threshold and send events.
930 */
931static void SkI2cCheckSensor(
932SK_AC *pAC, /* Adapter Context */
933SK_SENSOR *pSen)
934{
935 SK_EVPARA ParaLocal;
936 SK_BOOL TooHigh; /* Is sensor too high? */
937 SK_BOOL TooLow; /* Is sensor too low? */
938 SK_U64 CurrTime; /* Current Time */
939 SK_BOOL DoTrapSend; /* We need to send a trap */
940 SK_BOOL DoErrLog; /* We need to log the error */
941 SK_BOOL IsError; /* We need to log the error */
942
943 /* Check Dummy Reads first */
944 if (pAC->I2c.DummyReads > 0) {
945 pAC->I2c.DummyReads--;
946 return;
947 }
948
949 /* Get the current time */
950 CurrTime = SkOsGetTime(pAC);
951
952 /* Set para to the most useful setting: The current sensor. */
953 ParaLocal.Para64 = (SK_U64)pAC->I2c.CurrSens;
954
955 /* Check the Value against the thresholds. First: Error Thresholds */
956 TooHigh = (pSen->SenValue > pSen->SenThreErrHigh);
957 TooLow = (pSen->SenValue < pSen->SenThreErrLow);
958
959 IsError = SK_FALSE;
960 if (TooHigh || TooLow) {
961 /* Error condition is satisfied */
962 DoTrapSend = SK_TRUE;
963 DoErrLog = SK_TRUE;
964
965 /* Now error condition is satisfied */
966 IsError = SK_TRUE;
967
968 if (pSen->SenErrFlag == SK_SEN_ERR_ERR) {
969 /* This state is the former one */
970
971 /* So check first whether we have to send a trap */
972 if (pSen->SenLastErrTrapTS + SK_SEN_ERR_TR_HOLD >
973 CurrTime) {
974 /*
975 * Do NOT send the Trap. The hold back time
976 * has to run out first.
977 */
978 DoTrapSend = SK_FALSE;
979 }
980
981 /* Check now whether we have to log an Error */
982 if (pSen->SenLastErrLogTS + SK_SEN_ERR_LOG_HOLD >
983 CurrTime) {
984 /*
985 * Do NOT log the error. The hold back time
986 * has to run out first.
987 */
988 DoErrLog = SK_FALSE;
989 }
990 }
991 else {
992 /* We came from a different state -> Set Begin Time Stamp */
993 pSen->SenBegErrTS = CurrTime;
994 pSen->SenErrFlag = SK_SEN_ERR_ERR;
995 }
996
997 if (DoTrapSend) {
998 /* Set current Time */
999 pSen->SenLastErrTrapTS = CurrTime;
1000 pSen->SenErrCts++;
1001
1002 /* Queue PNMI Event */
1003 SkEventQueue(pAC, SKGE_PNMI, (TooHigh ?
1004 SK_PNMI_EVT_SEN_ERR_UPP :
1005 SK_PNMI_EVT_SEN_ERR_LOW),
1006 ParaLocal);
1007 }
1008
1009 if (DoErrLog) {
1010 /* Set current Time */
1011 pSen->SenLastErrLogTS = CurrTime;
1012
1013 if (pSen->SenType == SK_SEN_TEMP) {
1014 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E011, SKERR_I2C_E011MSG);
1015 }
1016 else if (pSen->SenType == SK_SEN_VOLT) {
1017 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E012, SKERR_I2C_E012MSG);
1018 }
1019 else {
1020 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E015, SKERR_I2C_E015MSG);
1021 }
1022 }
1023 }
1024
1025 /* Check the Value against the thresholds */
1026 /* 2nd: Warning thresholds */
1027 TooHigh = (pSen->SenValue > pSen->SenThreWarnHigh);
1028 TooLow = (pSen->SenValue < pSen->SenThreWarnLow);
1029
1030 if (!IsError && (TooHigh || TooLow)) {
1031 /* Error condition is satisfied */
1032 DoTrapSend = SK_TRUE;
1033 DoErrLog = SK_TRUE;
1034
1035 if (pSen->SenErrFlag == SK_SEN_ERR_WARN) {
1036 /* This state is the former one */
1037
1038 /* So check first whether we have to send a trap */
1039 if (pSen->SenLastWarnTrapTS + SK_SEN_WARN_TR_HOLD > CurrTime) {
1040 /*
1041 * Do NOT send the Trap. The hold back time
1042 * has to run out first.
1043 */
1044 DoTrapSend = SK_FALSE;
1045 }
1046
1047 /* Check now whether we have to log an Error */
1048 if (pSen->SenLastWarnLogTS + SK_SEN_WARN_LOG_HOLD > CurrTime) {
1049 /*
1050 * Do NOT log the error. The hold back time
1051 * has to run out first.
1052 */
1053 DoErrLog = SK_FALSE;
1054 }
1055 }
1056 else {
1057 /* We came from a different state -> Set Begin Time Stamp */
1058 pSen->SenBegWarnTS = CurrTime;
1059 pSen->SenErrFlag = SK_SEN_ERR_WARN;
1060 }
1061
1062 if (DoTrapSend) {
1063 /* Set current Time */
1064 pSen->SenLastWarnTrapTS = CurrTime;
1065 pSen->SenWarnCts++;
1066
1067 /* Queue PNMI Event */
1068 SkEventQueue(pAC, SKGE_PNMI, (TooHigh ?
1069 SK_PNMI_EVT_SEN_WAR_UPP :
1070 SK_PNMI_EVT_SEN_WAR_LOW),
1071 ParaLocal);
1072 }
1073
1074 if (DoErrLog) {
1075 /* Set current Time */
1076 pSen->SenLastWarnLogTS = CurrTime;
1077
1078 if (pSen->SenType == SK_SEN_TEMP) {
1079 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E009, SKERR_I2C_E009MSG);
1080 }
1081 else if (pSen->SenType == SK_SEN_VOLT) {
1082 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E010, SKERR_I2C_E010MSG);
1083 }
1084 else {
1085 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E014, SKERR_I2C_E014MSG);
1086 }
1087 }
1088 }
1089
1090 /* Check for NO error at all */
1091 if (!IsError && !TooHigh && !TooLow) {
1092 /* Set o.k. Status if no error and no warning condition */
1093 pSen->SenErrFlag = SK_SEN_ERR_OK;
1094 }
1095
1096 /* End of check against the thresholds */
1097
1098 /* Bug fix AF: 16.Aug.2001: Correct the init base
1099 * of LM80 sensor.
1100 */
1101 if (pSen->SenInit == SK_SEN_DYN_INIT_PCI_IO) {
1102
1103 pSen->SenInit = SK_SEN_DYN_INIT_NONE;
1104
1105 if (pSen->SenValue > SK_SEN_PCI_IO_RANGE_LIMITER) {
1106 /* 5V PCI-IO Voltage */
1107 pSen->SenThreWarnLow = SK_SEN_PCI_IO_5V_LOW_WARN;
1108 pSen->SenThreErrLow = SK_SEN_PCI_IO_5V_LOW_ERR;
1109 }
1110 else {
1111 /* 3.3V PCI-IO Voltage */
1112 pSen->SenThreWarnHigh = SK_SEN_PCI_IO_3V3_HIGH_WARN;
1113 pSen->SenThreErrHigh = SK_SEN_PCI_IO_3V3_HIGH_ERR;
1114 }
1115 }
1116
1117#ifdef TEST_ONLY
1118 /* Dynamic thresholds also for VAUX of LM80 sensor */
1119 if (pSen->SenInit == SK_SEN_DYN_INIT_VAUX) {
1120
1121 pSen->SenInit = SK_SEN_DYN_INIT_NONE;
1122
1123 /* 3.3V VAUX Voltage */
1124 if (pSen->SenValue > SK_SEN_VAUX_RANGE_LIMITER) {
1125 pSen->SenThreWarnLow = SK_SEN_VAUX_3V3_LOW_WARN;
1126 pSen->SenThreErrLow = SK_SEN_VAUX_3V3_LOW_ERR;
1127 }
1128 /* 0V VAUX Voltage */
1129 else {
1130 pSen->SenThreWarnHigh = SK_SEN_VAUX_0V_WARN_ERR;
1131 pSen->SenThreErrHigh = SK_SEN_VAUX_0V_WARN_ERR;
1132 }
1133 }
1134
1135 /*
1136 * Check initialization state:
1137 * The VIO Thresholds need adaption
1138 */
1139 if (!pSen->SenInit && pSen->SenReg == LM80_VT1_IN &&
1140 pSen->SenValue > SK_SEN_WARNLOW2C &&
1141 pSen->SenValue < SK_SEN_WARNHIGH2) {
1142 pSen->SenThreErrLow = SK_SEN_ERRLOW2C;
1143 pSen->SenThreWarnLow = SK_SEN_WARNLOW2C;
1144 pSen->SenInit = SK_TRUE;
1145 }
1146
1147 if (!pSen->SenInit && pSen->SenReg == LM80_VT1_IN &&
1148 pSen->SenValue > SK_SEN_WARNLOW2 &&
1149 pSen->SenValue < SK_SEN_WARNHIGH2C) {
1150 pSen->SenThreErrHigh = SK_SEN_ERRHIGH2C;
1151 pSen->SenThreWarnHigh = SK_SEN_WARNHIGH2C;
1152 pSen->SenInit = SK_TRUE;
1153 }
1154#endif
1155
1156 if (pSen->SenInit != SK_SEN_DYN_INIT_NONE) {
1157 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_I2C_E013, SKERR_I2C_E013MSG);
1158 }
1159} /* SkI2cCheckSensor */
1160
1161
1162/*
1163 * The only Event to be served is the timeout event
1164 *
1165 */
1166int SkI2cEvent(
1167SK_AC *pAC, /* Adapter Context */
1168SK_IOC IoC, /* I/O Context */
1169SK_U32 Event, /* Module specific Event */
1170SK_EVPARA Para) /* Event specific Parameter */
1171{
1172 int ReadComplete;
1173 SK_SENSOR *pSen;
1174 SK_U32 Time;
1175 SK_EVPARA ParaLocal;
1176 int i;
1177
1178 /* New case: no sensors */
1179 if (pAC->I2c.MaxSens == 0) {
1180 return(0);
1181 }
1182
1183 switch (Event) {
1184 case SK_I2CEV_IRQ:
1185 pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens];
1186 ReadComplete = SkI2cReadSensor(pAC, IoC, pSen);
1187
1188 if (ReadComplete) {
1189 /* Check sensor against defined thresholds */
1190 SkI2cCheckSensor(pAC, pSen);
1191
1192 /* Increment Current sensor and set appropriate Timeout */
1193 pAC->I2c.CurrSens++;
1194 if (pAC->I2c.CurrSens >= pAC->I2c.MaxSens) {
1195 pAC->I2c.CurrSens = 0;
1196 Time = SK_I2C_TIM_LONG;
1197 }
1198 else {
1199 Time = SK_I2C_TIM_SHORT;
1200 }
1201
1202 /* Start Timer */
1203 ParaLocal.Para64 = (SK_U64)0;
1204
1205 pAC->I2c.TimerMode = SK_TIMER_NEW_GAUGING;
1206
1207 SkTimerStart(pAC, IoC, &pAC->I2c.SenTimer, Time,
1208 SKGE_I2C, SK_I2CEV_TIM, ParaLocal);
1209 }
1210 else {
1211 /* Start Timer */
1212 ParaLocal.Para64 = (SK_U64)0;
1213
1214 pAC->I2c.TimerMode = SK_TIMER_WATCH_SM;
1215
1216 SkTimerStart(pAC, IoC, &pAC->I2c.SenTimer, SK_I2C_TIM_WATCH,
1217 SKGE_I2C, SK_I2CEV_TIM, ParaLocal);
1218 }
1219 break;
1220 case SK_I2CEV_TIM:
1221 if (pAC->I2c.TimerMode == SK_TIMER_NEW_GAUGING) {
1222
1223 ParaLocal.Para64 = (SK_U64)0;
1224 SkTimerStop(pAC, IoC, &pAC->I2c.SenTimer);
1225
1226 pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens];
1227 ReadComplete = SkI2cReadSensor(pAC, IoC, pSen);
1228
1229 if (ReadComplete) {
1230 /* Check sensor against defined thresholds */
1231 SkI2cCheckSensor(pAC, pSen);
1232
1233 /* Increment Current sensor and set appropriate Timeout */
1234 pAC->I2c.CurrSens++;
1235 if (pAC->I2c.CurrSens == pAC->I2c.MaxSens) {
1236 pAC->I2c.CurrSens = 0;
1237 Time = SK_I2C_TIM_LONG;
1238 }
1239 else {
1240 Time = SK_I2C_TIM_SHORT;
1241 }
1242
1243 /* Start Timer */
1244 ParaLocal.Para64 = (SK_U64)0;
1245
1246 pAC->I2c.TimerMode = SK_TIMER_NEW_GAUGING;
1247
1248 SkTimerStart(pAC, IoC, &pAC->I2c.SenTimer, Time,
1249 SKGE_I2C, SK_I2CEV_TIM, ParaLocal);
1250 }
1251 }
1252 else {
1253 pSen = &pAC->I2c.SenTable[pAC->I2c.CurrSens];
1254 pSen->SenErrFlag = SK_SEN_ERR_FAULTY;
1255 SK_I2C_STOP(IoC);
1256
1257 /* Increment Current sensor and set appropriate Timeout */
1258 pAC->I2c.CurrSens++;
1259 if (pAC->I2c.CurrSens == pAC->I2c.MaxSens) {
1260 pAC->I2c.CurrSens = 0;
1261 Time = SK_I2C_TIM_LONG;
1262 }
1263 else {
1264 Time = SK_I2C_TIM_SHORT;
1265 }
1266
1267 /* Start Timer */
1268 ParaLocal.Para64 = (SK_U64)0;
1269
1270 pAC->I2c.TimerMode = SK_TIMER_NEW_GAUGING;
1271
1272 SkTimerStart(pAC, IoC, &pAC->I2c.SenTimer, Time,
1273 SKGE_I2C, SK_I2CEV_TIM, ParaLocal);
1274 }
1275 break;
1276 case SK_I2CEV_CLEAR:
1277 for (i = 0; i < SK_MAX_SENSORS; i++) {
1278 pAC->I2c.SenTable[i].SenErrFlag = SK_SEN_ERR_OK;
1279 pAC->I2c.SenTable[i].SenErrCts = 0;
1280 pAC->I2c.SenTable[i].SenWarnCts = 0;
1281 pAC->I2c.SenTable[i].SenBegErrTS = 0;
1282 pAC->I2c.SenTable[i].SenBegWarnTS = 0;
1283 pAC->I2c.SenTable[i].SenLastErrTrapTS = (SK_U64)0;
1284 pAC->I2c.SenTable[i].SenLastErrLogTS = (SK_U64)0;
1285 pAC->I2c.SenTable[i].SenLastWarnTrapTS = (SK_U64)0;
1286 pAC->I2c.SenTable[i].SenLastWarnLogTS = (SK_U64)0;
1287 }
1288 break;
1289 default:
1290 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_I2C_E006, SKERR_I2C_E006MSG);
1291 }
1292
1293 return(0);
1294} /* SkI2cEvent*/
1295
1296#endif /* !SK_DIAG */
diff --git a/drivers/net/sk98lin/sklm80.c b/drivers/net/sk98lin/sklm80.c
deleted file mode 100644
index a204f5bb55d4..000000000000
--- a/drivers/net/sk98lin/sklm80.c
+++ /dev/null
@@ -1,141 +0,0 @@
1/******************************************************************************
2 *
3 * Name: sklm80.c
4 * Project: Gigabit Ethernet Adapters, TWSI-Module
5 * Version: $Revision: 1.22 $
6 * Date: $Date: 2003/10/20 09:08:21 $
7 * Purpose: Functions to access Voltage and Temperature Sensor (LM80)
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/*
26 LM80 functions
27*/
28#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
29static const char SysKonnectFileId[] =
30 "@(#) $Id: sklm80.c,v 1.22 2003/10/20 09:08:21 rschmidt Exp $ (C) Marvell. ";
31#endif
32
33#include "h/skdrv1st.h" /* Driver Specific Definitions */
34#include "h/lm80.h"
35#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */
36
37#define BREAK_OR_WAIT(pAC,IoC,Event) break
38
39/*
40 * read a sensors value (LM80 specific)
41 *
42 * This function reads a sensors value from the I2C sensor chip LM80.
43 * The sensor is defined by its index into the sensors database in the struct
44 * pAC points to.
45 *
46 * Returns 1 if the read is completed
47 * 0 if the read must be continued (I2C Bus still allocated)
48 */
49int SkLm80ReadSensor(
50SK_AC *pAC, /* Adapter Context */
51SK_IOC IoC, /* I/O Context needed in level 1 and 2 */
52SK_SENSOR *pSen) /* Sensor to be read */
53{
54 SK_I32 Value;
55
56 switch (pSen->SenState) {
57 case SK_SEN_IDLE:
58 /* Send address to ADDR register */
59 SK_I2C_CTL(IoC, I2C_READ, pSen->SenDev, I2C_025K_DEV, pSen->SenReg, 0);
60
61 pSen->SenState = SK_SEN_VALUE ;
62 BREAK_OR_WAIT(pAC, IoC, I2C_READ);
63
64 case SK_SEN_VALUE:
65 /* Read value from data register */
66 SK_IN32(IoC, B2_I2C_DATA, ((SK_U32 *)&Value));
67
68 Value &= 0xff; /* only least significant byte is valid */
69
70 /* Do NOT check the Value against the thresholds */
71 /* Checking is done in the calling instance */
72
73 if (pSen->SenType == SK_SEN_VOLT) {
74 /* Voltage sensor */
75 pSen->SenValue = Value * SK_LM80_VT_LSB;
76 pSen->SenState = SK_SEN_IDLE ;
77 return(1);
78 }
79
80 if (pSen->SenType == SK_SEN_FAN) {
81 if (Value != 0 && Value != 0xff) {
82 /* Fan speed counter */
83 pSen->SenValue = SK_LM80_FAN_FAKTOR/Value;
84 }
85 else {
86 /* Indicate Fan error */
87 pSen->SenValue = 0;
88 }
89 pSen->SenState = SK_SEN_IDLE ;
90 return(1);
91 }
92
93 /* First: correct the value: it might be negative */
94 if ((Value & 0x80) != 0) {
95 /* Value is negative */
96 Value = Value - 256;
97 }
98
99 /* We have a temperature sensor and need to get the signed extension.
100 * For now we get the extension from the last reading, so in the normal
101 * case we won't see flickering temperatures.
102 */
103 pSen->SenValue = (Value * SK_LM80_TEMP_LSB) +
104 (pSen->SenValue % SK_LM80_TEMP_LSB);
105
106 /* Send address to ADDR register */
107 SK_I2C_CTL(IoC, I2C_READ, pSen->SenDev, I2C_025K_DEV, LM80_TEMP_CTRL, 0);
108
109 pSen->SenState = SK_SEN_VALEXT ;
110 BREAK_OR_WAIT(pAC, IoC, I2C_READ);
111
112 case SK_SEN_VALEXT:
113 /* Read value from data register */
114 SK_IN32(IoC, B2_I2C_DATA, ((SK_U32 *)&Value));
115 Value &= LM80_TEMP_LSB_9; /* only bit 7 is valid */
116
117 /* cut the LSB bit */
118 pSen->SenValue = ((pSen->SenValue / SK_LM80_TEMP_LSB) *
119 SK_LM80_TEMP_LSB);
120
121 if (pSen->SenValue < 0) {
122 /* Value negative: The bit value must be subtracted */
123 pSen->SenValue -= ((Value >> 7) * SK_LM80_TEMPEXT_LSB);
124 }
125 else {
126 /* Value positive: The bit value must be added */
127 pSen->SenValue += ((Value >> 7) * SK_LM80_TEMPEXT_LSB);
128 }
129
130 pSen->SenState = SK_SEN_IDLE ;
131 return(1);
132
133 default:
134 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_I2C_E007, SKERR_I2C_E007MSG);
135 return(1);
136 }
137
138 /* Not completed */
139 return(0);
140}
141
diff --git a/drivers/net/sk98lin/skqueue.c b/drivers/net/sk98lin/skqueue.c
deleted file mode 100644
index 0275b4f71d9b..000000000000
--- a/drivers/net/sk98lin/skqueue.c
+++ /dev/null
@@ -1,179 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skqueue.c
4 * Project: Gigabit Ethernet Adapters, Event Scheduler Module
5 * Version: $Revision: 1.20 $
6 * Date: $Date: 2003/09/16 13:44:00 $
7 * Purpose: Management of an event queue.
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25
26/*
27 * Event queue and dispatcher
28 */
29#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
30static const char SysKonnectFileId[] =
31 "@(#) $Id: skqueue.c,v 1.20 2003/09/16 13:44:00 rschmidt Exp $ (C) Marvell.";
32#endif
33
34#include "h/skdrv1st.h" /* Driver Specific Definitions */
35#include "h/skqueue.h" /* Queue Definitions */
36#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */
37
38#ifdef __C2MAN__
39/*
40 Event queue management.
41
42 General Description:
43
44 */
45intro()
46{}
47#endif
48
49#define PRINTF(a,b,c)
50
51/*
52 * init event queue management
53 *
54 * Must be called during init level 0.
55 */
56void SkEventInit(
57SK_AC *pAC, /* Adapter context */
58SK_IOC Ioc, /* IO context */
59int Level) /* Init level */
60{
61 switch (Level) {
62 case SK_INIT_DATA:
63 pAC->Event.EvPut = pAC->Event.EvGet = pAC->Event.EvQueue;
64 break;
65 default:
66 break;
67 }
68}
69
70/*
71 * add event to queue
72 */
73void SkEventQueue(
74SK_AC *pAC, /* Adapters context */
75SK_U32 Class, /* Event Class */
76SK_U32 Event, /* Event to be queued */
77SK_EVPARA Para) /* Event parameter */
78{
79 pAC->Event.EvPut->Class = Class;
80 pAC->Event.EvPut->Event = Event;
81 pAC->Event.EvPut->Para = Para;
82
83 if (++pAC->Event.EvPut == &pAC->Event.EvQueue[SK_MAX_EVENT])
84 pAC->Event.EvPut = pAC->Event.EvQueue;
85
86 if (pAC->Event.EvPut == pAC->Event.EvGet) {
87 SK_ERR_LOG(pAC, SK_ERRCL_NORES, SKERR_Q_E001, SKERR_Q_E001MSG);
88 }
89}
90
91/*
92 * event dispatcher
93 * while event queue is not empty
94 * get event from queue
95 * send command to state machine
96 * end
97 * return error reported by individual Event function
98 * 0 if no error occured.
99 */
100int SkEventDispatcher(
101SK_AC *pAC, /* Adapters Context */
102SK_IOC Ioc) /* Io context */
103{
104 SK_EVENTELEM *pEv; /* pointer into queue */
105 SK_U32 Class;
106 int Rtv;
107
108 pEv = pAC->Event.EvGet;
109
110 PRINTF("dispatch get %x put %x\n", pEv, pAC->Event.ev_put);
111
112 while (pEv != pAC->Event.EvPut) {
113 PRINTF("dispatch Class %d Event %d\n", pEv->Class, pEv->Event);
114
115 switch (Class = pEv->Class) {
116#ifndef SK_USE_LAC_EV
117#ifndef SK_SLIM
118 case SKGE_RLMT: /* RLMT Event */
119 Rtv = SkRlmtEvent(pAC, Ioc, pEv->Event, pEv->Para);
120 break;
121 case SKGE_I2C: /* I2C Event */
122 Rtv = SkI2cEvent(pAC, Ioc, pEv->Event, pEv->Para);
123 break;
124 case SKGE_PNMI: /* PNMI Event */
125 Rtv = SkPnmiEvent(pAC, Ioc, pEv->Event, pEv->Para);
126 break;
127#endif /* not SK_SLIM */
128#endif /* not SK_USE_LAC_EV */
129 case SKGE_DRV: /* Driver Event */
130 Rtv = SkDrvEvent(pAC, Ioc, pEv->Event, pEv->Para);
131 break;
132#ifndef SK_USE_SW_TIMER
133 case SKGE_HWAC:
134 Rtv = SkGeSirqEvent(pAC, Ioc, pEv->Event, pEv->Para);
135 break;
136#else /* !SK_USE_SW_TIMER */
137 case SKGE_SWT :
138 Rtv = SkSwtEvent(pAC, Ioc, pEv->Event, pEv->Para);
139 break;
140#endif /* !SK_USE_SW_TIMER */
141#ifdef SK_USE_LAC_EV
142 case SKGE_LACP :
143 Rtv = SkLacpEvent(pAC, Ioc, pEv->Event, pEv->Para);
144 break;
145 case SKGE_RSF :
146 Rtv = SkRsfEvent(pAC, Ioc, pEv->Event, pEv->Para);
147 break;
148 case SKGE_MARKER :
149 Rtv = SkMarkerEvent(pAC, Ioc, pEv->Event, pEv->Para);
150 break;
151 case SKGE_FD :
152 Rtv = SkFdEvent(pAC, Ioc, pEv->Event, pEv->Para);
153 break;
154#endif /* SK_USE_LAC_EV */
155#ifdef SK_USE_CSUM
156 case SKGE_CSUM :
157 Rtv = SkCsEvent(pAC, Ioc, pEv->Event, pEv->Para);
158 break;
159#endif /* SK_USE_CSUM */
160 default :
161 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_Q_E002, SKERR_Q_E002MSG);
162 Rtv = 0;
163 }
164
165 if (Rtv != 0) {
166 return(Rtv);
167 }
168
169 if (++pEv == &pAC->Event.EvQueue[SK_MAX_EVENT])
170 pEv = pAC->Event.EvQueue;
171
172 /* Renew get: it is used in queue_events to detect overruns */
173 pAC->Event.EvGet = pEv;
174 }
175
176 return(0);
177}
178
179/* End of file */
diff --git a/drivers/net/sk98lin/skrlmt.c b/drivers/net/sk98lin/skrlmt.c
deleted file mode 100644
index be8d1ccddf6d..000000000000
--- a/drivers/net/sk98lin/skrlmt.c
+++ /dev/null
@@ -1,3257 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skrlmt.c
4 * Project: GEnesis, PCI Gigabit Ethernet Adapter
5 * Version: $Revision: 1.69 $
6 * Date: $Date: 2003/04/15 09:39:22 $
7 * Purpose: Manage links on SK-NET Adapters, esp. redundant ones.
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25/******************************************************************************
26 *
27 * Description:
28 *
29 * This module contains code for Link ManagemenT (LMT) of SK-NET Adapters.
30 * It is mainly intended for adapters with more than one link.
31 * For such adapters, this module realizes Redundant Link ManagemenT (RLMT).
32 *
33 * Include File Hierarchy:
34 *
35 * "skdrv1st.h"
36 * "skdrv2nd.h"
37 *
38 ******************************************************************************/
39
40#ifndef lint
41static const char SysKonnectFileId[] =
42 "@(#) $Id: skrlmt.c,v 1.69 2003/04/15 09:39:22 tschilli Exp $ (C) Marvell.";
43#endif /* !defined(lint) */
44
45#define __SKRLMT_C
46
47#ifdef __cplusplus
48extern "C" {
49#endif /* cplusplus */
50
51#include "h/skdrv1st.h"
52#include "h/skdrv2nd.h"
53
54/* defines ********************************************************************/
55
56#ifndef SK_HWAC_LINK_LED
57#define SK_HWAC_LINK_LED(a,b,c,d)
58#endif /* !defined(SK_HWAC_LINK_LED) */
59
60#ifndef DEBUG
61#define RLMT_STATIC static
62#else /* DEBUG */
63#define RLMT_STATIC
64
65#ifndef SK_LITTLE_ENDIAN
66/* First 32 bits */
67#define OFFS_LO32 1
68
69/* Second 32 bits */
70#define OFFS_HI32 0
71#else /* SK_LITTLE_ENDIAN */
72/* First 32 bits */
73#define OFFS_LO32 0
74
75/* Second 32 bits */
76#define OFFS_HI32 1
77#endif /* SK_LITTLE_ENDIAN */
78
79#endif /* DEBUG */
80
81/* ----- Private timeout values ----- */
82
83#define SK_RLMT_MIN_TO_VAL 125000 /* 1/8 sec. */
84#define SK_RLMT_DEF_TO_VAL 1000000 /* 1 sec. */
85#define SK_RLMT_PORTDOWN_TIM_VAL 900000 /* another 0.9 sec. */
86#define SK_RLMT_PORTSTART_TIM_VAL 100000 /* 0.1 sec. */
87#define SK_RLMT_PORTUP_TIM_VAL 2500000 /* 2.5 sec. */
88#define SK_RLMT_SEG_TO_VAL 900000000 /* 15 min. */
89
90/* Assume tick counter increment is 1 - may be set OS-dependent. */
91#ifndef SK_TICK_INCR
92#define SK_TICK_INCR SK_CONSTU64(1)
93#endif /* !defined(SK_TICK_INCR) */
94
95/*
96 * Amount that a time stamp must be later to be recognized as "substantially
97 * later". This is about 1/128 sec, but above 1 tick counter increment.
98 */
99#define SK_RLMT_BC_DELTA (1 + ((SK_TICKS_PER_SEC >> 7) > SK_TICK_INCR ? \
100 (SK_TICKS_PER_SEC >> 7) : SK_TICK_INCR))
101
102/* ----- Private RLMT defaults ----- */
103
104#define SK_RLMT_DEF_PREF_PORT 0 /* "Lower" port. */
105#define SK_RLMT_DEF_MODE SK_RLMT_CHECK_LINK /* Default RLMT Mode. */
106
107/* ----- Private RLMT checking states ----- */
108
109#define SK_RLMT_RCS_SEG 1 /* RLMT Check State: check seg. */
110#define SK_RLMT_RCS_START_SEG 2 /* RLMT Check State: start check seg. */
111#define SK_RLMT_RCS_SEND_SEG 4 /* RLMT Check State: send BPDU packet */
112#define SK_RLMT_RCS_REPORT_SEG 8 /* RLMT Check State: report seg. */
113
114/* ----- Private PORT checking states ----- */
115
116#define SK_RLMT_PCS_TX 1 /* Port Check State: check tx. */
117#define SK_RLMT_PCS_RX 2 /* Port Check State: check rx. */
118
119/* ----- Private PORT events ----- */
120
121/* Note: Update simulation when changing these. */
122#define SK_RLMT_PORTSTART_TIM 1100 /* Port start timeout. */
123#define SK_RLMT_PORTUP_TIM 1101 /* Port can now go up. */
124#define SK_RLMT_PORTDOWN_RX_TIM 1102 /* Port did not receive once ... */
125#define SK_RLMT_PORTDOWN 1103 /* Port went down. */
126#define SK_RLMT_PORTDOWN_TX_TIM 1104 /* Partner did not receive ... */
127
128/* ----- Private RLMT events ----- */
129
130/* Note: Update simulation when changing these. */
131#define SK_RLMT_TIM 2100 /* RLMT timeout. */
132#define SK_RLMT_SEG_TIM 2101 /* RLMT segmentation check timeout. */
133
134#define TO_SHORTEN(tim) ((tim) / 2)
135
136/* Error numbers and messages. */
137#define SKERR_RLMT_E001 (SK_ERRBASE_RLMT + 0)
138#define SKERR_RLMT_E001_MSG "No Packet."
139#define SKERR_RLMT_E002 (SKERR_RLMT_E001 + 1)
140#define SKERR_RLMT_E002_MSG "Short Packet."
141#define SKERR_RLMT_E003 (SKERR_RLMT_E002 + 1)
142#define SKERR_RLMT_E003_MSG "Unknown RLMT event."
143#define SKERR_RLMT_E004 (SKERR_RLMT_E003 + 1)
144#define SKERR_RLMT_E004_MSG "PortsUp incorrect."
145#define SKERR_RLMT_E005 (SKERR_RLMT_E004 + 1)
146#define SKERR_RLMT_E005_MSG \
147 "Net seems to be segmented (different root bridges are reported on the ports)."
148#define SKERR_RLMT_E006 (SKERR_RLMT_E005 + 1)
149#define SKERR_RLMT_E006_MSG "Duplicate MAC Address detected."
150#define SKERR_RLMT_E007 (SKERR_RLMT_E006 + 1)
151#define SKERR_RLMT_E007_MSG "LinksUp incorrect."
152#define SKERR_RLMT_E008 (SKERR_RLMT_E007 + 1)
153#define SKERR_RLMT_E008_MSG "Port not started but link came up."
154#define SKERR_RLMT_E009 (SKERR_RLMT_E008 + 1)
155#define SKERR_RLMT_E009_MSG "Corrected illegal setting of Preferred Port."
156#define SKERR_RLMT_E010 (SKERR_RLMT_E009 + 1)
157#define SKERR_RLMT_E010_MSG "Ignored illegal Preferred Port."
158
159/* LLC field values. */
160#define LLC_COMMAND_RESPONSE_BIT 1
161#define LLC_TEST_COMMAND 0xE3
162#define LLC_UI 0x03
163
164/* RLMT Packet fields. */
165#define SK_RLMT_DSAP 0
166#define SK_RLMT_SSAP 0
167#define SK_RLMT_CTRL (LLC_TEST_COMMAND)
168#define SK_RLMT_INDICATOR0 0x53 /* S */
169#define SK_RLMT_INDICATOR1 0x4B /* K */
170#define SK_RLMT_INDICATOR2 0x2D /* - */
171#define SK_RLMT_INDICATOR3 0x52 /* R */
172#define SK_RLMT_INDICATOR4 0x4C /* L */
173#define SK_RLMT_INDICATOR5 0x4D /* M */
174#define SK_RLMT_INDICATOR6 0x54 /* T */
175#define SK_RLMT_PACKET_VERSION 0
176
177/* RLMT SPT Flag values. */
178#define SK_RLMT_SPT_FLAG_CHANGE 0x01
179#define SK_RLMT_SPT_FLAG_CHANGE_ACK 0x80
180
181/* RLMT SPT Packet fields. */
182#define SK_RLMT_SPT_DSAP 0x42
183#define SK_RLMT_SPT_SSAP 0x42
184#define SK_RLMT_SPT_CTRL (LLC_UI)
185#define SK_RLMT_SPT_PROTOCOL_ID0 0x00
186#define SK_RLMT_SPT_PROTOCOL_ID1 0x00
187#define SK_RLMT_SPT_PROTOCOL_VERSION_ID 0x00
188#define SK_RLMT_SPT_BPDU_TYPE 0x00
189#define SK_RLMT_SPT_FLAGS 0x00 /* ?? */
190#define SK_RLMT_SPT_ROOT_ID0 0xFF /* Lowest possible priority. */
191#define SK_RLMT_SPT_ROOT_ID1 0xFF /* Lowest possible priority. */
192
193/* Remaining 6 bytes will be the current port address. */
194#define SK_RLMT_SPT_ROOT_PATH_COST0 0x00
195#define SK_RLMT_SPT_ROOT_PATH_COST1 0x00
196#define SK_RLMT_SPT_ROOT_PATH_COST2 0x00
197#define SK_RLMT_SPT_ROOT_PATH_COST3 0x00
198#define SK_RLMT_SPT_BRIDGE_ID0 0xFF /* Lowest possible priority. */
199#define SK_RLMT_SPT_BRIDGE_ID1 0xFF /* Lowest possible priority. */
200
201/* Remaining 6 bytes will be the current port address. */
202#define SK_RLMT_SPT_PORT_ID0 0xFF /* Lowest possible priority. */
203#define SK_RLMT_SPT_PORT_ID1 0xFF /* Lowest possible priority. */
204#define SK_RLMT_SPT_MSG_AGE0 0x00
205#define SK_RLMT_SPT_MSG_AGE1 0x00
206#define SK_RLMT_SPT_MAX_AGE0 0x00
207#define SK_RLMT_SPT_MAX_AGE1 0xFF
208#define SK_RLMT_SPT_HELLO_TIME0 0x00
209#define SK_RLMT_SPT_HELLO_TIME1 0xFF
210#define SK_RLMT_SPT_FWD_DELAY0 0x00
211#define SK_RLMT_SPT_FWD_DELAY1 0x40
212
213/* Size defines. */
214#define SK_RLMT_MIN_PACKET_SIZE 34
215#define SK_RLMT_MAX_PACKET_SIZE (SK_RLMT_MAX_TX_BUF_SIZE)
216#define SK_PACKET_DATA_LEN (SK_RLMT_MAX_PACKET_SIZE - \
217 SK_RLMT_MIN_PACKET_SIZE)
218
219/* ----- RLMT packet types ----- */
220#define SK_PACKET_ANNOUNCE 1 /* Port announcement. */
221#define SK_PACKET_ALIVE 2 /* Alive packet to port. */
222#define SK_PACKET_ADDR_CHANGED 3 /* Port address changed. */
223#define SK_PACKET_CHECK_TX 4 /* Check your tx line. */
224
225#ifdef SK_LITTLE_ENDIAN
226#define SK_U16_TO_NETWORK_ORDER(Val,Addr) { \
227 SK_U8 *_Addr = (SK_U8*)(Addr); \
228 SK_U16 _Val = (SK_U16)(Val); \
229 *_Addr++ = (SK_U8)(_Val >> 8); \
230 *_Addr = (SK_U8)(_Val & 0xFF); \
231}
232#endif /* SK_LITTLE_ENDIAN */
233
234#ifdef SK_BIG_ENDIAN
235#define SK_U16_TO_NETWORK_ORDER(Val,Addr) (*(SK_U16*)(Addr) = (SK_U16)(Val))
236#endif /* SK_BIG_ENDIAN */
237
238#define AUTONEG_FAILED SK_FALSE
239#define AUTONEG_SUCCESS SK_TRUE
240
241
242/* typedefs *******************************************************************/
243
244/* RLMT packet. Length: SK_RLMT_MAX_PACKET_SIZE (60) bytes. */
245typedef struct s_RlmtPacket {
246 SK_U8 DstAddr[SK_MAC_ADDR_LEN];
247 SK_U8 SrcAddr[SK_MAC_ADDR_LEN];
248 SK_U8 TypeLen[2];
249 SK_U8 DSap;
250 SK_U8 SSap;
251 SK_U8 Ctrl;
252 SK_U8 Indicator[7];
253 SK_U8 RlmtPacketType[2];
254 SK_U8 Align1[2];
255 SK_U8 Random[4]; /* Random value of requesting(!) station. */
256 SK_U8 RlmtPacketVersion[2]; /* RLMT Packet version. */
257 SK_U8 Data[SK_PACKET_DATA_LEN];
258} SK_RLMT_PACKET;
259
260typedef struct s_SpTreeRlmtPacket {
261 SK_U8 DstAddr[SK_MAC_ADDR_LEN];
262 SK_U8 SrcAddr[SK_MAC_ADDR_LEN];
263 SK_U8 TypeLen[2];
264 SK_U8 DSap;
265 SK_U8 SSap;
266 SK_U8 Ctrl;
267 SK_U8 ProtocolId[2];
268 SK_U8 ProtocolVersionId;
269 SK_U8 BpduType;
270 SK_U8 Flags;
271 SK_U8 RootId[8];
272 SK_U8 RootPathCost[4];
273 SK_U8 BridgeId[8];
274 SK_U8 PortId[2];
275 SK_U8 MessageAge[2];
276 SK_U8 MaxAge[2];
277 SK_U8 HelloTime[2];
278 SK_U8 ForwardDelay[2];
279} SK_SPTREE_PACKET;
280
281/* global variables ***********************************************************/
282
283SK_MAC_ADDR SkRlmtMcAddr = {{0x01, 0x00, 0x5A, 0x52, 0x4C, 0x4D}};
284SK_MAC_ADDR BridgeMcAddr = {{0x01, 0x80, 0xC2, 0x00, 0x00, 0x00}};
285
286/* local variables ************************************************************/
287
288/* None. */
289
290/* functions ******************************************************************/
291
292RLMT_STATIC void SkRlmtCheckSwitch(
293 SK_AC *pAC,
294 SK_IOC IoC,
295 SK_U32 NetIdx);
296RLMT_STATIC void SkRlmtCheckSeg(
297 SK_AC *pAC,
298 SK_IOC IoC,
299 SK_U32 NetIdx);
300RLMT_STATIC void SkRlmtEvtSetNets(
301 SK_AC *pAC,
302 SK_IOC IoC,
303 SK_EVPARA Para);
304
305/******************************************************************************
306 *
307 * SkRlmtInit - initialize data, set state to init
308 *
309 * Description:
310 *
311 * SK_INIT_DATA
312 * ============
313 *
314 * This routine initializes all RLMT-related variables to a known state.
315 * The initial state is SK_RLMT_RS_INIT.
316 * All ports are initialized to SK_RLMT_PS_INIT.
317 *
318 *
319 * SK_INIT_IO
320 * ==========
321 *
322 * Nothing.
323 *
324 *
325 * SK_INIT_RUN
326 * ===========
327 *
328 * Determine the adapter's random value.
329 * Set the hw registers, the "logical MAC address", the
330 * RLMT multicast address, and eventually the BPDU multicast address.
331 *
332 * Context:
333 * init, pageable
334 *
335 * Returns:
336 * Nothing.
337 */
338void SkRlmtInit(
339SK_AC *pAC, /* Adapter Context */
340SK_IOC IoC, /* I/O Context */
341int Level) /* Initialization Level */
342{
343 SK_U32 i, j;
344 SK_U64 Random;
345 SK_EVPARA Para;
346 SK_MAC_ADDR VirtualMacAddress;
347 SK_MAC_ADDR PhysicalAMacAddress;
348 SK_BOOL VirtualMacAddressSet;
349 SK_BOOL PhysicalAMacAddressSet;
350
351 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_INIT,
352 ("RLMT Init level %d.\n", Level))
353
354 switch (Level) {
355 case SK_INIT_DATA: /* Initialize data structures. */
356 SK_MEMSET((char *)&pAC->Rlmt, 0, sizeof(SK_RLMT));
357
358 for (i = 0; i < SK_MAX_MACS; i++) {
359 pAC->Rlmt.Port[i].PortState = SK_RLMT_PS_INIT;
360 pAC->Rlmt.Port[i].LinkDown = SK_TRUE;
361 pAC->Rlmt.Port[i].PortDown = SK_TRUE;
362 pAC->Rlmt.Port[i].PortStarted = SK_FALSE;
363 pAC->Rlmt.Port[i].PortNoRx = SK_FALSE;
364 pAC->Rlmt.Port[i].RootIdSet = SK_FALSE;
365 pAC->Rlmt.Port[i].PortNumber = i;
366 pAC->Rlmt.Port[i].Net = &pAC->Rlmt.Net[0];
367 pAC->Rlmt.Port[i].AddrPort = &pAC->Addr.Port[i];
368 }
369
370 pAC->Rlmt.NumNets = 1;
371 for (i = 0; i < SK_MAX_NETS; i++) {
372 pAC->Rlmt.Net[i].RlmtState = SK_RLMT_RS_INIT;
373 pAC->Rlmt.Net[i].RootIdSet = SK_FALSE;
374 pAC->Rlmt.Net[i].PrefPort = SK_RLMT_DEF_PREF_PORT;
375 pAC->Rlmt.Net[i].Preference = 0xFFFFFFFF; /* Automatic. */
376 /* Just assuming. */
377 pAC->Rlmt.Net[i].ActivePort = pAC->Rlmt.Net[i].PrefPort;
378 pAC->Rlmt.Net[i].RlmtMode = SK_RLMT_DEF_MODE;
379 pAC->Rlmt.Net[i].TimeoutValue = SK_RLMT_DEF_TO_VAL;
380 pAC->Rlmt.Net[i].NetNumber = i;
381 }
382
383 pAC->Rlmt.Net[0].Port[0] = &pAC->Rlmt.Port[0];
384 pAC->Rlmt.Net[0].Port[1] = &pAC->Rlmt.Port[1];
385#if SK_MAX_NETS > 1
386 pAC->Rlmt.Net[1].Port[0] = &pAC->Rlmt.Port[1];
387#endif /* SK_MAX_NETS > 1 */
388 break;
389
390 case SK_INIT_IO: /* GIMacsFound first available here. */
391 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_INIT,
392 ("RLMT: %d MACs were detected.\n", pAC->GIni.GIMacsFound))
393
394 pAC->Rlmt.Net[0].NumPorts = pAC->GIni.GIMacsFound;
395
396 /* Initialize HW registers? */
397 if (pAC->GIni.GIMacsFound == 1) {
398 Para.Para32[0] = SK_RLMT_MODE_CLS;
399 Para.Para32[1] = 0;
400 (void)SkRlmtEvent(pAC, IoC, SK_RLMT_MODE_CHANGE, Para);
401 }
402 break;
403
404 case SK_INIT_RUN:
405 /* Ensure RLMT is set to one net. */
406 if (pAC->Rlmt.NumNets > 1) {
407 Para.Para32[0] = 1;
408 Para.Para32[1] = -1;
409 SkRlmtEvtSetNets(pAC, IoC, Para);
410 }
411
412 for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
413 Random = SkOsGetTime(pAC);
414 *(SK_U32*)&pAC->Rlmt.Port[i].Random = *(SK_U32*)&Random;
415
416 for (j = 0; j < 4; j++) {
417 pAC->Rlmt.Port[i].Random[j] ^= pAC->Rlmt.Port[i].AddrPort->
418 CurrentMacAddress.a[SK_MAC_ADDR_LEN - 1 - j];
419 }
420
421 (void)SkAddrMcClear(pAC, IoC, i, SK_ADDR_PERMANENT | SK_MC_SW_ONLY);
422
423 /* Add RLMT MC address. */
424 (void)SkAddrMcAdd(pAC, IoC, i, &SkRlmtMcAddr, SK_ADDR_PERMANENT);
425
426 if (pAC->Rlmt.Net[0].RlmtMode & SK_RLMT_CHECK_SEG) {
427 /* Add BPDU MC address. */
428 (void)SkAddrMcAdd(pAC, IoC, i, &BridgeMcAddr, SK_ADDR_PERMANENT);
429 }
430
431 (void)SkAddrMcUpdate(pAC, IoC, i);
432 }
433
434 VirtualMacAddressSet = SK_FALSE;
435 /* Read virtual MAC address from Control Register File. */
436 for (j = 0; j < SK_MAC_ADDR_LEN; j++) {
437
438 SK_IN8(IoC, B2_MAC_1 + j, &VirtualMacAddress.a[j]);
439 VirtualMacAddressSet |= VirtualMacAddress.a[j];
440 }
441
442 PhysicalAMacAddressSet = SK_FALSE;
443 /* Read physical MAC address for MAC A from Control Register File. */
444 for (j = 0; j < SK_MAC_ADDR_LEN; j++) {
445
446 SK_IN8(IoC, B2_MAC_2 + j, &PhysicalAMacAddress.a[j]);
447 PhysicalAMacAddressSet |= PhysicalAMacAddress.a[j];
448 }
449
450 /* check if the two mac addresses contain reasonable values */
451 if (!VirtualMacAddressSet || !PhysicalAMacAddressSet) {
452
453 pAC->Rlmt.RlmtOff = SK_TRUE;
454 }
455
456 /* if the two mac addresses are equal switch off the RLMT_PRE_LOOKAHEAD
457 and the RLMT_LOOKAHEAD macros */
458 else if (SK_ADDR_EQUAL(PhysicalAMacAddress.a, VirtualMacAddress.a)) {
459
460 pAC->Rlmt.RlmtOff = SK_TRUE;
461 }
462 else {
463 pAC->Rlmt.RlmtOff = SK_FALSE;
464 }
465 break;
466
467 default: /* error */
468 break;
469 }
470 return;
471} /* SkRlmtInit */
472
473
474/******************************************************************************
475 *
476 * SkRlmtBuildCheckChain - build the check chain
477 *
478 * Description:
479 * This routine builds the local check chain:
480 * - Each port that is up checks the next port.
481 * - The last port that is up checks the first port that is up.
482 *
483 * Notes:
484 * - Currently only local ports are considered when building the chain.
485 * - Currently the SuspectState is just reset;
486 * it would be better to save it ...
487 *
488 * Context:
489 * runtime, pageable?
490 *
491 * Returns:
492 * Nothing
493 */
494RLMT_STATIC void SkRlmtBuildCheckChain(
495SK_AC *pAC, /* Adapter Context */
496SK_U32 NetIdx) /* Net Number */
497{
498 SK_U32 i;
499 SK_U32 NumMacsUp;
500 SK_RLMT_PORT * FirstMacUp;
501 SK_RLMT_PORT * PrevMacUp;
502
503 FirstMacUp = NULL;
504 PrevMacUp = NULL;
505
506 if (!(pAC->Rlmt.Net[NetIdx].RlmtMode & SK_RLMT_CHECK_LOC_LINK)) {
507 for (i = 0; i < pAC->Rlmt.Net[i].NumPorts; i++) {
508 pAC->Rlmt.Net[NetIdx].Port[i]->PortsChecked = 0;
509 }
510 return; /* Done. */
511 }
512
513 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
514 ("SkRlmtBuildCheckChain.\n"))
515
516 NumMacsUp = 0;
517
518 for (i = 0; i < pAC->Rlmt.Net[NetIdx].NumPorts; i++) {
519 pAC->Rlmt.Net[NetIdx].Port[i]->PortsChecked = 0;
520 pAC->Rlmt.Net[NetIdx].Port[i]->PortsSuspect = 0;
521 pAC->Rlmt.Net[NetIdx].Port[i]->CheckingState &=
522 ~(SK_RLMT_PCS_RX | SK_RLMT_PCS_TX);
523
524 /*
525 * If more than two links are detected we should consider
526 * checking at least two other ports:
527 * 1. the next port that is not LinkDown and
528 * 2. the next port that is not PortDown.
529 */
530 if (!pAC->Rlmt.Net[NetIdx].Port[i]->LinkDown) {
531 if (NumMacsUp == 0) {
532 FirstMacUp = pAC->Rlmt.Net[NetIdx].Port[i];
533 }
534 else {
535 PrevMacUp->PortCheck[
536 pAC->Rlmt.Net[NetIdx].Port[i]->PortsChecked].CheckAddr =
537 pAC->Rlmt.Net[NetIdx].Port[i]->AddrPort->CurrentMacAddress;
538 PrevMacUp->PortCheck[
539 PrevMacUp->PortsChecked].SuspectTx = SK_FALSE;
540 PrevMacUp->PortsChecked++;
541 }
542 PrevMacUp = pAC->Rlmt.Net[NetIdx].Port[i];
543 NumMacsUp++;
544 }
545 }
546
547 if (NumMacsUp > 1) {
548 PrevMacUp->PortCheck[PrevMacUp->PortsChecked].CheckAddr =
549 FirstMacUp->AddrPort->CurrentMacAddress;
550 PrevMacUp->PortCheck[PrevMacUp->PortsChecked].SuspectTx =
551 SK_FALSE;
552 PrevMacUp->PortsChecked++;
553 }
554
555#ifdef DEBUG
556 for (i = 0; i < pAC->Rlmt.Net[NetIdx].NumPorts; i++) {
557 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
558 ("Port %d checks %d other ports: %2X.\n", i,
559 pAC->Rlmt.Net[NetIdx].Port[i]->PortsChecked,
560 pAC->Rlmt.Net[NetIdx].Port[i]->PortCheck[0].CheckAddr.a[5]))
561 }
562#endif /* DEBUG */
563
564 return;
565} /* SkRlmtBuildCheckChain */
566
567
568/******************************************************************************
569 *
570 * SkRlmtBuildPacket - build an RLMT packet
571 *
572 * Description:
573 * This routine sets up an RLMT packet.
574 *
575 * Context:
576 * runtime, pageable?
577 *
578 * Returns:
579 * NULL or pointer to RLMT mbuf
580 */
581RLMT_STATIC SK_MBUF *SkRlmtBuildPacket(
582SK_AC *pAC, /* Adapter Context */
583SK_IOC IoC, /* I/O Context */
584SK_U32 PortNumber, /* Sending port */
585SK_U16 PacketType, /* RLMT packet type */
586SK_MAC_ADDR *SrcAddr, /* Source address */
587SK_MAC_ADDR *DestAddr) /* Destination address */
588{
589 int i;
590 SK_U16 Length;
591 SK_MBUF *pMb;
592 SK_RLMT_PACKET *pPacket;
593
594#ifdef DEBUG
595 SK_U8 CheckSrc = 0;
596 SK_U8 CheckDest = 0;
597
598 for (i = 0; i < SK_MAC_ADDR_LEN; ++i) {
599 CheckSrc |= SrcAddr->a[i];
600 CheckDest |= DestAddr->a[i];
601 }
602
603 if ((CheckSrc == 0) || (CheckDest == 0)) {
604 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_ERR,
605 ("SkRlmtBuildPacket: Invalid %s%saddr.\n",
606 (CheckSrc == 0 ? "Src" : ""), (CheckDest == 0 ? "Dest" : "")))
607 }
608#endif
609
610 if ((pMb = SkDrvAllocRlmtMbuf(pAC, IoC, SK_RLMT_MAX_PACKET_SIZE)) != NULL) {
611 pPacket = (SK_RLMT_PACKET*)pMb->pData;
612 for (i = 0; i < SK_MAC_ADDR_LEN; i++) {
613 pPacket->DstAddr[i] = DestAddr->a[i];
614 pPacket->SrcAddr[i] = SrcAddr->a[i];
615 }
616 pPacket->DSap = SK_RLMT_DSAP;
617 pPacket->SSap = SK_RLMT_SSAP;
618 pPacket->Ctrl = SK_RLMT_CTRL;
619 pPacket->Indicator[0] = SK_RLMT_INDICATOR0;
620 pPacket->Indicator[1] = SK_RLMT_INDICATOR1;
621 pPacket->Indicator[2] = SK_RLMT_INDICATOR2;
622 pPacket->Indicator[3] = SK_RLMT_INDICATOR3;
623 pPacket->Indicator[4] = SK_RLMT_INDICATOR4;
624 pPacket->Indicator[5] = SK_RLMT_INDICATOR5;
625 pPacket->Indicator[6] = SK_RLMT_INDICATOR6;
626
627 SK_U16_TO_NETWORK_ORDER(PacketType, &pPacket->RlmtPacketType[0]);
628
629 for (i = 0; i < 4; i++) {
630 pPacket->Random[i] = pAC->Rlmt.Port[PortNumber].Random[i];
631 }
632
633 SK_U16_TO_NETWORK_ORDER(
634 SK_RLMT_PACKET_VERSION, &pPacket->RlmtPacketVersion[0]);
635
636 for (i = 0; i < SK_PACKET_DATA_LEN; i++) {
637 pPacket->Data[i] = 0x00;
638 }
639
640 Length = SK_RLMT_MAX_PACKET_SIZE; /* Or smaller. */
641 pMb->Length = Length;
642 pMb->PortIdx = PortNumber;
643 Length -= 14;
644 SK_U16_TO_NETWORK_ORDER(Length, &pPacket->TypeLen[0]);
645
646 if (PacketType == SK_PACKET_ALIVE) {
647 pAC->Rlmt.Port[PortNumber].TxHelloCts++;
648 }
649 }
650
651 return (pMb);
652} /* SkRlmtBuildPacket */
653
654
655/******************************************************************************
656 *
657 * SkRlmtBuildSpanningTreePacket - build spanning tree check packet
658 *
659 * Description:
660 * This routine sets up a BPDU packet for spanning tree check.
661 *
662 * Context:
663 * runtime, pageable?
664 *
665 * Returns:
666 * NULL or pointer to RLMT mbuf
667 */
668RLMT_STATIC SK_MBUF *SkRlmtBuildSpanningTreePacket(
669SK_AC *pAC, /* Adapter Context */
670SK_IOC IoC, /* I/O Context */
671SK_U32 PortNumber) /* Sending port */
672{
673 unsigned i;
674 SK_U16 Length;
675 SK_MBUF *pMb;
676 SK_SPTREE_PACKET *pSPacket;
677
678 if ((pMb = SkDrvAllocRlmtMbuf(pAC, IoC, SK_RLMT_MAX_PACKET_SIZE)) !=
679 NULL) {
680 pSPacket = (SK_SPTREE_PACKET*)pMb->pData;
681 for (i = 0; i < SK_MAC_ADDR_LEN; i++) {
682 pSPacket->DstAddr[i] = BridgeMcAddr.a[i];
683 pSPacket->SrcAddr[i] =
684 pAC->Addr.Port[PortNumber].CurrentMacAddress.a[i];
685 }
686 pSPacket->DSap = SK_RLMT_SPT_DSAP;
687 pSPacket->SSap = SK_RLMT_SPT_SSAP;
688 pSPacket->Ctrl = SK_RLMT_SPT_CTRL;
689
690 pSPacket->ProtocolId[0] = SK_RLMT_SPT_PROTOCOL_ID0;
691 pSPacket->ProtocolId[1] = SK_RLMT_SPT_PROTOCOL_ID1;
692 pSPacket->ProtocolVersionId = SK_RLMT_SPT_PROTOCOL_VERSION_ID;
693 pSPacket->BpduType = SK_RLMT_SPT_BPDU_TYPE;
694 pSPacket->Flags = SK_RLMT_SPT_FLAGS;
695 pSPacket->RootId[0] = SK_RLMT_SPT_ROOT_ID0;
696 pSPacket->RootId[1] = SK_RLMT_SPT_ROOT_ID1;
697 pSPacket->RootPathCost[0] = SK_RLMT_SPT_ROOT_PATH_COST0;
698 pSPacket->RootPathCost[1] = SK_RLMT_SPT_ROOT_PATH_COST1;
699 pSPacket->RootPathCost[2] = SK_RLMT_SPT_ROOT_PATH_COST2;
700 pSPacket->RootPathCost[3] = SK_RLMT_SPT_ROOT_PATH_COST3;
701 pSPacket->BridgeId[0] = SK_RLMT_SPT_BRIDGE_ID0;
702 pSPacket->BridgeId[1] = SK_RLMT_SPT_BRIDGE_ID1;
703
704 /*
705 * Use logical MAC address as bridge ID and filter these packets
706 * on receive.
707 */
708 for (i = 0; i < SK_MAC_ADDR_LEN; i++) {
709 pSPacket->BridgeId[i + 2] = pSPacket->RootId[i + 2] =
710 pAC->Addr.Net[pAC->Rlmt.Port[PortNumber].Net->NetNumber].
711 CurrentMacAddress.a[i];
712 }
713 pSPacket->PortId[0] = SK_RLMT_SPT_PORT_ID0;
714 pSPacket->PortId[1] = SK_RLMT_SPT_PORT_ID1;
715 pSPacket->MessageAge[0] = SK_RLMT_SPT_MSG_AGE0;
716 pSPacket->MessageAge[1] = SK_RLMT_SPT_MSG_AGE1;
717 pSPacket->MaxAge[0] = SK_RLMT_SPT_MAX_AGE0;
718 pSPacket->MaxAge[1] = SK_RLMT_SPT_MAX_AGE1;
719 pSPacket->HelloTime[0] = SK_RLMT_SPT_HELLO_TIME0;
720 pSPacket->HelloTime[1] = SK_RLMT_SPT_HELLO_TIME1;
721 pSPacket->ForwardDelay[0] = SK_RLMT_SPT_FWD_DELAY0;
722 pSPacket->ForwardDelay[1] = SK_RLMT_SPT_FWD_DELAY1;
723
724 Length = SK_RLMT_MAX_PACKET_SIZE; /* Or smaller. */
725 pMb->Length = Length;
726 pMb->PortIdx = PortNumber;
727 Length -= 14;
728 SK_U16_TO_NETWORK_ORDER(Length, &pSPacket->TypeLen[0]);
729
730 pAC->Rlmt.Port[PortNumber].TxSpHelloReqCts++;
731 }
732
733 return (pMb);
734} /* SkRlmtBuildSpanningTreePacket */
735
736
737/******************************************************************************
738 *
739 * SkRlmtSend - build and send check packets
740 *
741 * Description:
742 * Depending on the RLMT state and the checking state, several packets
743 * are sent through the indicated port.
744 *
745 * Context:
746 * runtime, pageable?
747 *
748 * Returns:
749 * Nothing.
750 */
751RLMT_STATIC void SkRlmtSend(
752SK_AC *pAC, /* Adapter Context */
753SK_IOC IoC, /* I/O Context */
754SK_U32 PortNumber) /* Sending port */
755{
756 unsigned j;
757 SK_EVPARA Para;
758 SK_RLMT_PORT *pRPort;
759
760 pRPort = &pAC->Rlmt.Port[PortNumber];
761 if (pAC->Rlmt.Port[PortNumber].Net->RlmtMode & SK_RLMT_CHECK_LOC_LINK) {
762 if (pRPort->CheckingState & (SK_RLMT_PCS_TX | SK_RLMT_PCS_RX)) {
763 /* Port is suspicious. Send the RLMT packet to the RLMT mc addr. */
764 if ((Para.pParaPtr = SkRlmtBuildPacket(pAC, IoC, PortNumber,
765 SK_PACKET_ALIVE, &pAC->Addr.Port[PortNumber].CurrentMacAddress,
766 &SkRlmtMcAddr)) != NULL) {
767 SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para);
768 }
769 }
770 else {
771 /*
772 * Send a directed RLMT packet to all ports that are
773 * checked by the indicated port.
774 */
775 for (j = 0; j < pRPort->PortsChecked; j++) {
776 if ((Para.pParaPtr = SkRlmtBuildPacket(pAC, IoC, PortNumber,
777 SK_PACKET_ALIVE, &pAC->Addr.Port[PortNumber].CurrentMacAddress,
778 &pRPort->PortCheck[j].CheckAddr)) != NULL) {
779 SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para);
780 }
781 }
782 }
783 }
784
785 if ((pAC->Rlmt.Port[PortNumber].Net->RlmtMode & SK_RLMT_CHECK_SEG) &&
786 (pAC->Rlmt.Port[PortNumber].Net->CheckingState & SK_RLMT_RCS_SEND_SEG)) {
787 /*
788 * Send a BPDU packet to make a connected switch tell us
789 * the correct root bridge.
790 */
791 if ((Para.pParaPtr =
792 SkRlmtBuildSpanningTreePacket(pAC, IoC, PortNumber)) != NULL) {
793 pAC->Rlmt.Port[PortNumber].Net->CheckingState &= ~SK_RLMT_RCS_SEND_SEG;
794 pRPort->RootIdSet = SK_FALSE;
795
796 SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para);
797 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_TX,
798 ("SkRlmtSend: BPDU Packet on Port %u.\n", PortNumber))
799 }
800 }
801 return;
802} /* SkRlmtSend */
803
804
805/******************************************************************************
806 *
807 * SkRlmtPortReceives - check if port is (going) down and bring it up
808 *
809 * Description:
810 * This routine checks if a port who received a non-BPDU packet
811 * needs to go up or needs to be stopped going down.
812 *
813 * Context:
814 * runtime, pageable?
815 *
816 * Returns:
817 * Nothing.
818 */
819RLMT_STATIC void SkRlmtPortReceives(
820SK_AC *pAC, /* Adapter Context */
821SK_IOC IoC, /* I/O Context */
822SK_U32 PortNumber) /* Port to check */
823{
824 SK_RLMT_PORT *pRPort;
825 SK_EVPARA Para;
826
827 pRPort = &pAC->Rlmt.Port[PortNumber];
828 pRPort->PortNoRx = SK_FALSE;
829
830 if ((pRPort->PortState == SK_RLMT_PS_DOWN) &&
831 !(pRPort->CheckingState & SK_RLMT_PCS_TX)) {
832 /*
833 * Port is marked down (rx), but received a non-BPDU packet.
834 * Bring it up.
835 */
836 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
837 ("SkRlmtPacketReceive: Received on PortDown.\n"))
838
839 pRPort->PortState = SK_RLMT_PS_GOING_UP;
840 pRPort->GuTimeStamp = SkOsGetTime(pAC);
841 Para.Para32[0] = PortNumber;
842 Para.Para32[1] = (SK_U32)-1;
843 SkTimerStart(pAC, IoC, &pRPort->UpTimer, SK_RLMT_PORTUP_TIM_VAL,
844 SKGE_RLMT, SK_RLMT_PORTUP_TIM, Para);
845 pRPort->CheckingState &= ~SK_RLMT_PCS_RX;
846 /* pAC->Rlmt.CheckSwitch = SK_TRUE; */
847 SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber);
848 } /* PortDown && !SuspectTx */
849 else if (pRPort->CheckingState & SK_RLMT_PCS_RX) {
850 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
851 ("SkRlmtPacketReceive: Stop bringing port down.\n"))
852 SkTimerStop(pAC, IoC, &pRPort->DownRxTimer);
853 pRPort->CheckingState &= ~SK_RLMT_PCS_RX;
854 /* pAC->Rlmt.CheckSwitch = SK_TRUE; */
855 SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber);
856 } /* PortGoingDown */
857
858 return;
859} /* SkRlmtPortReceives */
860
861
862/******************************************************************************
863 *
864 * SkRlmtPacketReceive - receive a packet for closer examination
865 *
866 * Description:
867 * This routine examines a packet more closely than SK_RLMT_LOOKAHEAD.
868 *
869 * Context:
870 * runtime, pageable?
871 *
872 * Returns:
873 * Nothing.
874 */
875RLMT_STATIC void SkRlmtPacketReceive(
876SK_AC *pAC, /* Adapter Context */
877SK_IOC IoC, /* I/O Context */
878SK_MBUF *pMb) /* Received packet */
879{
880#ifdef xDEBUG
881 extern void DumpData(char *p, int size);
882#endif /* DEBUG */
883 int i;
884 unsigned j;
885 SK_U16 PacketType;
886 SK_U32 PortNumber;
887 SK_ADDR_PORT *pAPort;
888 SK_RLMT_PORT *pRPort;
889 SK_RLMT_PACKET *pRPacket;
890 SK_SPTREE_PACKET *pSPacket;
891 SK_EVPARA Para;
892
893 PortNumber = pMb->PortIdx;
894 pAPort = &pAC->Addr.Port[PortNumber];
895 pRPort = &pAC->Rlmt.Port[PortNumber];
896
897 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
898 ("SkRlmtPacketReceive: PortNumber == %d.\n", PortNumber))
899
900 pRPacket = (SK_RLMT_PACKET*)pMb->pData;
901 pSPacket = (SK_SPTREE_PACKET*)pRPacket;
902
903#ifdef xDEBUG
904 DumpData((char *)pRPacket, 32);
905#endif /* DEBUG */
906
907 if ((pRPort->PacketsPerTimeSlot - pRPort->BpduPacketsPerTimeSlot) != 0) {
908 SkRlmtPortReceives(pAC, IoC, PortNumber);
909 }
910
911 /* Check destination address. */
912
913 if (!SK_ADDR_EQUAL(pAPort->CurrentMacAddress.a, pRPacket->DstAddr) &&
914 !SK_ADDR_EQUAL(SkRlmtMcAddr.a, pRPacket->DstAddr) &&
915 !SK_ADDR_EQUAL(BridgeMcAddr.a, pRPacket->DstAddr)) {
916
917 /* Not sent to current MAC or registered MC address => Trash it. */
918 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
919 ("SkRlmtPacketReceive: Not for me.\n"))
920
921 SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
922 return;
923 }
924 else if (SK_ADDR_EQUAL(pAPort->CurrentMacAddress.a, pRPacket->SrcAddr)) {
925
926 /*
927 * Was sent by same port (may happen during port switching
928 * or in case of duplicate MAC addresses).
929 */
930
931 /*
932 * Check for duplicate address here:
933 * If Packet.Random != My.Random => DupAddr.
934 */
935 for (i = 3; i >= 0; i--) {
936 if (pRPort->Random[i] != pRPacket->Random[i]) {
937 break;
938 }
939 }
940
941 /*
942 * CAUTION: Do not check for duplicate MAC address in RLMT Alive Reply
943 * packets (they have the LLC_COMMAND_RESPONSE_BIT set in
944 * pRPacket->SSap).
945 */
946 if (i >= 0 && pRPacket->DSap == SK_RLMT_DSAP &&
947 pRPacket->Ctrl == SK_RLMT_CTRL &&
948 pRPacket->SSap == SK_RLMT_SSAP &&
949 pRPacket->Indicator[0] == SK_RLMT_INDICATOR0 &&
950 pRPacket->Indicator[1] == SK_RLMT_INDICATOR1 &&
951 pRPacket->Indicator[2] == SK_RLMT_INDICATOR2 &&
952 pRPacket->Indicator[3] == SK_RLMT_INDICATOR3 &&
953 pRPacket->Indicator[4] == SK_RLMT_INDICATOR4 &&
954 pRPacket->Indicator[5] == SK_RLMT_INDICATOR5 &&
955 pRPacket->Indicator[6] == SK_RLMT_INDICATOR6) {
956 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
957 ("SkRlmtPacketReceive: Duplicate MAC Address.\n"))
958
959 /* Error Log entry. */
960 SK_ERR_LOG(pAC, SK_ERRCL_COMM, SKERR_RLMT_E006, SKERR_RLMT_E006_MSG);
961 }
962 else {
963 /* Simply trash it. */
964 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
965 ("SkRlmtPacketReceive: Sent by me.\n"))
966 }
967
968 SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
969 return;
970 }
971
972 /* Check SuspectTx entries. */
973 if (pRPort->PortsSuspect > 0) {
974 for (j = 0; j < pRPort->PortsChecked; j++) {
975 if (pRPort->PortCheck[j].SuspectTx &&
976 SK_ADDR_EQUAL(
977 pRPacket->SrcAddr, pRPort->PortCheck[j].CheckAddr.a)) {
978 pRPort->PortCheck[j].SuspectTx = SK_FALSE;
979 pRPort->PortsSuspect--;
980 break;
981 }
982 }
983 }
984
985 /* Determine type of packet. */
986 if (pRPacket->DSap == SK_RLMT_DSAP &&
987 pRPacket->Ctrl == SK_RLMT_CTRL &&
988 (pRPacket->SSap & ~LLC_COMMAND_RESPONSE_BIT) == SK_RLMT_SSAP &&
989 pRPacket->Indicator[0] == SK_RLMT_INDICATOR0 &&
990 pRPacket->Indicator[1] == SK_RLMT_INDICATOR1 &&
991 pRPacket->Indicator[2] == SK_RLMT_INDICATOR2 &&
992 pRPacket->Indicator[3] == SK_RLMT_INDICATOR3 &&
993 pRPacket->Indicator[4] == SK_RLMT_INDICATOR4 &&
994 pRPacket->Indicator[5] == SK_RLMT_INDICATOR5 &&
995 pRPacket->Indicator[6] == SK_RLMT_INDICATOR6) {
996
997 /* It's an RLMT packet. */
998 PacketType = (SK_U16)((pRPacket->RlmtPacketType[0] << 8) |
999 pRPacket->RlmtPacketType[1]);
1000
1001 switch (PacketType) {
1002 case SK_PACKET_ANNOUNCE: /* Not yet used. */
1003#if 0
1004 /* Build the check chain. */
1005 SkRlmtBuildCheckChain(pAC);
1006#endif /* 0 */
1007
1008 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
1009 ("SkRlmtPacketReceive: Announce.\n"))
1010
1011 SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
1012 break;
1013
1014 case SK_PACKET_ALIVE:
1015 if (pRPacket->SSap & LLC_COMMAND_RESPONSE_BIT) {
1016 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
1017 ("SkRlmtPacketReceive: Alive Reply.\n"))
1018
1019 if (!(pAC->Addr.Port[PortNumber].PromMode & SK_PROM_MODE_LLC) ||
1020 SK_ADDR_EQUAL(
1021 pRPacket->DstAddr, pAPort->CurrentMacAddress.a)) {
1022 /* Obviously we could send something. */
1023 if (pRPort->CheckingState & SK_RLMT_PCS_TX) {
1024 pRPort->CheckingState &= ~SK_RLMT_PCS_TX;
1025 SkTimerStop(pAC, IoC, &pRPort->DownTxTimer);
1026 }
1027
1028 if ((pRPort->PortState == SK_RLMT_PS_DOWN) &&
1029 !(pRPort->CheckingState & SK_RLMT_PCS_RX)) {
1030 pRPort->PortState = SK_RLMT_PS_GOING_UP;
1031 pRPort->GuTimeStamp = SkOsGetTime(pAC);
1032
1033 SkTimerStop(pAC, IoC, &pRPort->DownTxTimer);
1034
1035 Para.Para32[0] = PortNumber;
1036 Para.Para32[1] = (SK_U32)-1;
1037 SkTimerStart(pAC, IoC, &pRPort->UpTimer,
1038 SK_RLMT_PORTUP_TIM_VAL, SKGE_RLMT,
1039 SK_RLMT_PORTUP_TIM, Para);
1040 }
1041 }
1042
1043 /* Mark sending port as alive? */
1044 SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
1045 }
1046 else { /* Alive Request Packet. */
1047 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
1048 ("SkRlmtPacketReceive: Alive Request.\n"))
1049
1050 pRPort->RxHelloCts++;
1051
1052 /* Answer. */
1053 for (i = 0; i < SK_MAC_ADDR_LEN; i++) {
1054 pRPacket->DstAddr[i] = pRPacket->SrcAddr[i];
1055 pRPacket->SrcAddr[i] =
1056 pAC->Addr.Port[PortNumber].CurrentMacAddress.a[i];
1057 }
1058 pRPacket->SSap |= LLC_COMMAND_RESPONSE_BIT;
1059
1060 Para.pParaPtr = pMb;
1061 SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para);
1062 }
1063 break;
1064
1065 case SK_PACKET_CHECK_TX:
1066 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
1067 ("SkRlmtPacketReceive: Check your tx line.\n"))
1068
1069 /* A port checking us requests us to check our tx line. */
1070 pRPort->CheckingState |= SK_RLMT_PCS_TX;
1071
1072 /* Start PortDownTx timer. */
1073 Para.Para32[0] = PortNumber;
1074 Para.Para32[1] = (SK_U32)-1;
1075 SkTimerStart(pAC, IoC, &pRPort->DownTxTimer,
1076 SK_RLMT_PORTDOWN_TIM_VAL, SKGE_RLMT,
1077 SK_RLMT_PORTDOWN_TX_TIM, Para);
1078
1079 SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
1080
1081 if ((Para.pParaPtr = SkRlmtBuildPacket(pAC, IoC, PortNumber,
1082 SK_PACKET_ALIVE, &pAC->Addr.Port[PortNumber].CurrentMacAddress,
1083 &SkRlmtMcAddr)) != NULL) {
1084 SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para);
1085 }
1086 break;
1087
1088 case SK_PACKET_ADDR_CHANGED:
1089 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
1090 ("SkRlmtPacketReceive: Address Change.\n"))
1091
1092 /* Build the check chain. */
1093 SkRlmtBuildCheckChain(pAC, pRPort->Net->NetNumber);
1094 SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
1095 break;
1096
1097 default:
1098 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
1099 ("SkRlmtPacketReceive: Unknown RLMT packet.\n"))
1100
1101 /* RA;:;: ??? */
1102 SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
1103 }
1104 }
1105 else if (pSPacket->DSap == SK_RLMT_SPT_DSAP &&
1106 pSPacket->Ctrl == SK_RLMT_SPT_CTRL &&
1107 (pSPacket->SSap & ~LLC_COMMAND_RESPONSE_BIT) == SK_RLMT_SPT_SSAP) {
1108 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
1109 ("SkRlmtPacketReceive: BPDU Packet.\n"))
1110
1111 /* Spanning Tree packet. */
1112 pRPort->RxSpHelloCts++;
1113
1114 if (!SK_ADDR_EQUAL(&pSPacket->RootId[2], &pAC->Addr.Net[pAC->Rlmt.
1115 Port[PortNumber].Net->NetNumber].CurrentMacAddress.a[0])) {
1116 /*
1117 * Check segmentation if a new root bridge is set and
1118 * the segmentation check is not currently running.
1119 */
1120 if (!SK_ADDR_EQUAL(&pSPacket->RootId[2], &pRPort->Root.Id[2]) &&
1121 (pAC->Rlmt.Port[PortNumber].Net->LinksUp > 1) &&
1122 (pAC->Rlmt.Port[PortNumber].Net->RlmtMode & SK_RLMT_CHECK_SEG)
1123 != 0 && (pAC->Rlmt.Port[PortNumber].Net->CheckingState &
1124 SK_RLMT_RCS_SEG) == 0) {
1125 pAC->Rlmt.Port[PortNumber].Net->CheckingState |=
1126 SK_RLMT_RCS_START_SEG | SK_RLMT_RCS_SEND_SEG;
1127 }
1128
1129 /* Store tree view of this port. */
1130 for (i = 0; i < 8; i++) {
1131 pRPort->Root.Id[i] = pSPacket->RootId[i];
1132 }
1133 pRPort->RootIdSet = SK_TRUE;
1134
1135 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_DUMP,
1136 ("Root ID %d: %02x %02x %02x %02x %02x %02x %02x %02x.\n",
1137 PortNumber,
1138 pRPort->Root.Id[0], pRPort->Root.Id[1],
1139 pRPort->Root.Id[2], pRPort->Root.Id[3],
1140 pRPort->Root.Id[4], pRPort->Root.Id[5],
1141 pRPort->Root.Id[6], pRPort->Root.Id[7]))
1142 }
1143
1144 SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
1145 if ((pAC->Rlmt.Port[PortNumber].Net->CheckingState &
1146 SK_RLMT_RCS_REPORT_SEG) != 0) {
1147 SkRlmtCheckSeg(pAC, IoC, pAC->Rlmt.Port[PortNumber].Net->NetNumber);
1148 }
1149 }
1150 else {
1151 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_RX,
1152 ("SkRlmtPacketReceive: Unknown Packet Type.\n"))
1153
1154 /* Unknown packet. */
1155 SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
1156 }
1157 return;
1158} /* SkRlmtPacketReceive */
1159
1160
1161/******************************************************************************
1162 *
1163 * SkRlmtCheckPort - check if a port works
1164 *
1165 * Description:
1166 * This routine checks if a port whose link is up received something
1167 * and if it seems to transmit successfully.
1168 *
1169 * # PortState: PsInit, PsLinkDown, PsDown, PsGoingUp, PsUp
1170 * # PortCheckingState (Bitfield): ChkTx, ChkRx, ChkSeg
1171 * # RlmtCheckingState (Bitfield): ChkSeg, StartChkSeg, ReportSeg
1172 *
1173 * if (Rx - RxBpdu == 0) { # No rx.
1174 * if (state == PsUp) {
1175 * PortCheckingState |= ChkRx
1176 * }
1177 * if (ModeCheckSeg && (Timeout ==
1178 * TO_SHORTEN(RLMT_DEFAULT_TIMEOUT))) {
1179 * RlmtCheckingState |= ChkSeg)
1180 * PortCheckingState |= ChkSeg
1181 * }
1182 * NewTimeout = TO_SHORTEN(Timeout)
1183 * if (NewTimeout < RLMT_MIN_TIMEOUT) {
1184 * NewTimeout = RLMT_MIN_TIMEOUT
1185 * PortState = PsDown
1186 * ...
1187 * }
1188 * }
1189 * else { # something was received
1190 * # Set counter to 0 at LinkDown?
1191 * # No - rx may be reported after LinkDown ???
1192 * PortCheckingState &= ~ChkRx
1193 * NewTimeout = RLMT_DEFAULT_TIMEOUT
1194 * if (RxAck == 0) {
1195 * possible reasons:
1196 * is my tx line bad? --
1197 * send RLMT multicast and report
1198 * back internally? (only possible
1199 * between ports on same adapter)
1200 * }
1201 * if (RxChk == 0) {
1202 * possible reasons:
1203 * - tx line of port set to check me
1204 * maybe bad
1205 * - no other port/adapter available or set
1206 * to check me
1207 * - adapter checking me has a longer
1208 * timeout
1209 * ??? anything that can be done here?
1210 * }
1211 * }
1212 *
1213 * Context:
1214 * runtime, pageable?
1215 *
1216 * Returns:
1217 * New timeout value.
1218 */
1219RLMT_STATIC SK_U32 SkRlmtCheckPort(
1220SK_AC *pAC, /* Adapter Context */
1221SK_IOC IoC, /* I/O Context */
1222SK_U32 PortNumber) /* Port to check */
1223{
1224 unsigned i;
1225 SK_U32 NewTimeout;
1226 SK_RLMT_PORT *pRPort;
1227 SK_EVPARA Para;
1228
1229 pRPort = &pAC->Rlmt.Port[PortNumber];
1230
1231 if ((pRPort->PacketsPerTimeSlot - pRPort->BpduPacketsPerTimeSlot) == 0) {
1232 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
1233 ("SkRlmtCheckPort %d: No (%d) receives in last time slot.\n",
1234 PortNumber, pRPort->PacketsPerTimeSlot))
1235
1236 /*
1237 * Check segmentation if there was no receive at least twice
1238 * in a row (PortNoRx is already set) and the segmentation
1239 * check is not currently running.
1240 */
1241
1242 if (pRPort->PortNoRx && (pAC->Rlmt.Port[PortNumber].Net->LinksUp > 1) &&
1243 (pAC->Rlmt.Port[PortNumber].Net->RlmtMode & SK_RLMT_CHECK_SEG) &&
1244 !(pAC->Rlmt.Port[PortNumber].Net->CheckingState & SK_RLMT_RCS_SEG)) {
1245 pAC->Rlmt.Port[PortNumber].Net->CheckingState |=
1246 SK_RLMT_RCS_START_SEG | SK_RLMT_RCS_SEND_SEG;
1247 }
1248
1249 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
1250 ("SkRlmtCheckPort: PortsSuspect %d, PcsRx %d.\n",
1251 pRPort->PortsSuspect, pRPort->CheckingState & SK_RLMT_PCS_RX))
1252
1253 if (pRPort->PortState != SK_RLMT_PS_DOWN) {
1254 NewTimeout = TO_SHORTEN(pAC->Rlmt.Port[PortNumber].Net->TimeoutValue);
1255 if (NewTimeout < SK_RLMT_MIN_TO_VAL) {
1256 NewTimeout = SK_RLMT_MIN_TO_VAL;
1257 }
1258
1259 if (!(pRPort->CheckingState & SK_RLMT_PCS_RX)) {
1260 Para.Para32[0] = PortNumber;
1261 pRPort->CheckingState |= SK_RLMT_PCS_RX;
1262
1263 /*
1264 * What shall we do if the port checked by this one receives
1265 * our request frames? What's bad - our rx line or his tx line?
1266 */
1267 Para.Para32[1] = (SK_U32)-1;
1268 SkTimerStart(pAC, IoC, &pRPort->DownRxTimer,
1269 SK_RLMT_PORTDOWN_TIM_VAL, SKGE_RLMT,
1270 SK_RLMT_PORTDOWN_RX_TIM, Para);
1271
1272 for (i = 0; i < pRPort->PortsChecked; i++) {
1273 if (pRPort->PortCheck[i].SuspectTx) {
1274 continue;
1275 }
1276 pRPort->PortCheck[i].SuspectTx = SK_TRUE;
1277 pRPort->PortsSuspect++;
1278 if ((Para.pParaPtr =
1279 SkRlmtBuildPacket(pAC, IoC, PortNumber, SK_PACKET_CHECK_TX,
1280 &pAC->Addr.Port[PortNumber].CurrentMacAddress,
1281 &pRPort->PortCheck[i].CheckAddr)) != NULL) {
1282 SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para);
1283 }
1284 }
1285 }
1286 }
1287 else { /* PortDown -- or all partners suspect. */
1288 NewTimeout = SK_RLMT_DEF_TO_VAL;
1289 }
1290 pRPort->PortNoRx = SK_TRUE;
1291 }
1292 else { /* A non-BPDU packet was received. */
1293 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
1294 ("SkRlmtCheckPort %d: %d (%d) receives in last time slot.\n",
1295 PortNumber,
1296 pRPort->PacketsPerTimeSlot - pRPort->BpduPacketsPerTimeSlot,
1297 pRPort->PacketsPerTimeSlot))
1298
1299 SkRlmtPortReceives(pAC, IoC, PortNumber);
1300 if (pAC->Rlmt.CheckSwitch) {
1301 SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber);
1302 }
1303
1304 NewTimeout = SK_RLMT_DEF_TO_VAL;
1305 }
1306
1307 return (NewTimeout);
1308} /* SkRlmtCheckPort */
1309
1310
1311/******************************************************************************
1312 *
1313 * SkRlmtSelectBcRx - select new active port, criteria 1 (CLP)
1314 *
1315 * Description:
1316 * This routine selects the port that received a broadcast frame
1317 * substantially later than all other ports.
1318 *
1319 * Context:
1320 * runtime, pageable?
1321 *
1322 * Returns:
1323 * SK_BOOL
1324 */
1325RLMT_STATIC SK_BOOL SkRlmtSelectBcRx(
1326SK_AC *pAC, /* Adapter Context */
1327SK_IOC IoC, /* I/O Context */
1328SK_U32 Active, /* Active port */
1329SK_U32 PrefPort, /* Preferred port */
1330SK_U32 *pSelect) /* New active port */
1331{
1332 SK_U64 BcTimeStamp;
1333 SK_U32 i;
1334 SK_BOOL PortFound;
1335
1336 BcTimeStamp = 0; /* Not totally necessary, but feeling better. */
1337 PortFound = SK_FALSE;
1338
1339 /* Select port with the latest TimeStamp. */
1340 for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
1341
1342 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
1343 ("TimeStamp Port %d (Down: %d, NoRx: %d): %08x %08x.\n",
1344 i,
1345 pAC->Rlmt.Port[i].PortDown, pAC->Rlmt.Port[i].PortNoRx,
1346 *((SK_U32*)(&pAC->Rlmt.Port[i].BcTimeStamp) + OFFS_HI32),
1347 *((SK_U32*)(&pAC->Rlmt.Port[i].BcTimeStamp) + OFFS_LO32)))
1348
1349 if (!pAC->Rlmt.Port[i].PortDown && !pAC->Rlmt.Port[i].PortNoRx) {
1350 if (!PortFound || pAC->Rlmt.Port[i].BcTimeStamp > BcTimeStamp) {
1351 BcTimeStamp = pAC->Rlmt.Port[i].BcTimeStamp;
1352 *pSelect = i;
1353 PortFound = SK_TRUE;
1354 }
1355 }
1356 }
1357
1358 if (PortFound) {
1359 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
1360 ("Port %d received the last broadcast.\n", *pSelect))
1361
1362 /* Look if another port's time stamp is similar. */
1363 for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
1364 if (i == *pSelect) {
1365 continue;
1366 }
1367 if (!pAC->Rlmt.Port[i].PortDown && !pAC->Rlmt.Port[i].PortNoRx &&
1368 (pAC->Rlmt.Port[i].BcTimeStamp >
1369 BcTimeStamp - SK_RLMT_BC_DELTA ||
1370 pAC->Rlmt.Port[i].BcTimeStamp +
1371 SK_RLMT_BC_DELTA > BcTimeStamp)) {
1372 PortFound = SK_FALSE;
1373
1374 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
1375 ("Port %d received a broadcast at a similar time.\n", i))
1376 break;
1377 }
1378 }
1379 }
1380
1381#ifdef DEBUG
1382 if (PortFound) {
1383 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
1384 ("SK_RLMT_SELECT_BCRX found Port %d receiving the substantially "
1385 "latest broadcast (%u).\n",
1386 *pSelect,
1387 BcTimeStamp - pAC->Rlmt.Port[1 - *pSelect].BcTimeStamp))
1388 }
1389#endif /* DEBUG */
1390
1391 return (PortFound);
1392} /* SkRlmtSelectBcRx */
1393
1394
1395/******************************************************************************
1396 *
1397 * SkRlmtSelectNotSuspect - select new active port, criteria 2 (CLP)
1398 *
1399 * Description:
1400 * This routine selects a good port (it is PortUp && !SuspectRx).
1401 *
1402 * Context:
1403 * runtime, pageable?
1404 *
1405 * Returns:
1406 * SK_BOOL
1407 */
1408RLMT_STATIC SK_BOOL SkRlmtSelectNotSuspect(
1409SK_AC *pAC, /* Adapter Context */
1410SK_IOC IoC, /* I/O Context */
1411SK_U32 Active, /* Active port */
1412SK_U32 PrefPort, /* Preferred port */
1413SK_U32 *pSelect) /* New active port */
1414{
1415 SK_U32 i;
1416 SK_BOOL PortFound;
1417
1418 PortFound = SK_FALSE;
1419
1420 /* Select first port that is PortUp && !SuspectRx. */
1421 for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
1422 if (!pAC->Rlmt.Port[i].PortDown &&
1423 !(pAC->Rlmt.Port[i].CheckingState & SK_RLMT_PCS_RX)) {
1424 *pSelect = i;
1425 if (!pAC->Rlmt.Port[Active].PortDown &&
1426 !(pAC->Rlmt.Port[Active].CheckingState & SK_RLMT_PCS_RX)) {
1427 *pSelect = Active;
1428 }
1429 if (!pAC->Rlmt.Port[PrefPort].PortDown &&
1430 !(pAC->Rlmt.Port[PrefPort].CheckingState & SK_RLMT_PCS_RX)) {
1431 *pSelect = PrefPort;
1432 }
1433 PortFound = SK_TRUE;
1434 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
1435 ("SK_RLMT_SELECT_NOTSUSPECT found Port %d up and not check RX.\n",
1436 *pSelect))
1437 break;
1438 }
1439 }
1440 return (PortFound);
1441} /* SkRlmtSelectNotSuspect */
1442
1443
1444/******************************************************************************
1445 *
1446 * SkRlmtSelectUp - select new active port, criteria 3, 4 (CLP)
1447 *
1448 * Description:
1449 * This routine selects a port that is up.
1450 *
1451 * Context:
1452 * runtime, pageable?
1453 *
1454 * Returns:
1455 * SK_BOOL
1456 */
1457RLMT_STATIC SK_BOOL SkRlmtSelectUp(
1458SK_AC *pAC, /* Adapter Context */
1459SK_IOC IoC, /* I/O Context */
1460SK_U32 Active, /* Active port */
1461SK_U32 PrefPort, /* Preferred port */
1462SK_U32 *pSelect, /* New active port */
1463SK_BOOL AutoNegDone) /* Successfully auto-negotiated? */
1464{
1465 SK_U32 i;
1466 SK_BOOL PortFound;
1467
1468 PortFound = SK_FALSE;
1469
1470 /* Select first port that is PortUp. */
1471 for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
1472 if (pAC->Rlmt.Port[i].PortState == SK_RLMT_PS_UP &&
1473 pAC->GIni.GP[i].PAutoNegFail != AutoNegDone) {
1474 *pSelect = i;
1475 if (pAC->Rlmt.Port[Active].PortState == SK_RLMT_PS_UP &&
1476 pAC->GIni.GP[Active].PAutoNegFail != AutoNegDone) {
1477 *pSelect = Active;
1478 }
1479 if (pAC->Rlmt.Port[PrefPort].PortState == SK_RLMT_PS_UP &&
1480 pAC->GIni.GP[PrefPort].PAutoNegFail != AutoNegDone) {
1481 *pSelect = PrefPort;
1482 }
1483 PortFound = SK_TRUE;
1484 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
1485 ("SK_RLMT_SELECT_UP found Port %d up.\n", *pSelect))
1486 break;
1487 }
1488 }
1489 return (PortFound);
1490} /* SkRlmtSelectUp */
1491
1492
1493/******************************************************************************
1494 *
1495 * SkRlmtSelectGoingUp - select new active port, criteria 5, 6 (CLP)
1496 *
1497 * Description:
1498 * This routine selects the port that is going up for the longest time.
1499 *
1500 * Context:
1501 * runtime, pageable?
1502 *
1503 * Returns:
1504 * SK_BOOL
1505 */
1506RLMT_STATIC SK_BOOL SkRlmtSelectGoingUp(
1507SK_AC *pAC, /* Adapter Context */
1508SK_IOC IoC, /* I/O Context */
1509SK_U32 Active, /* Active port */
1510SK_U32 PrefPort, /* Preferred port */
1511SK_U32 *pSelect, /* New active port */
1512SK_BOOL AutoNegDone) /* Successfully auto-negotiated? */
1513{
1514 SK_U64 GuTimeStamp;
1515 SK_U32 i;
1516 SK_BOOL PortFound;
1517
1518 GuTimeStamp = 0;
1519 PortFound = SK_FALSE;
1520
1521 /* Select port that is PortGoingUp for the longest time. */
1522 for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
1523 if (pAC->Rlmt.Port[i].PortState == SK_RLMT_PS_GOING_UP &&
1524 pAC->GIni.GP[i].PAutoNegFail != AutoNegDone) {
1525 GuTimeStamp = pAC->Rlmt.Port[i].GuTimeStamp;
1526 *pSelect = i;
1527 PortFound = SK_TRUE;
1528 break;
1529 }
1530 }
1531
1532 if (!PortFound) {
1533 return (SK_FALSE);
1534 }
1535
1536 for (i = *pSelect + 1; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
1537 if (pAC->Rlmt.Port[i].PortState == SK_RLMT_PS_GOING_UP &&
1538 pAC->Rlmt.Port[i].GuTimeStamp < GuTimeStamp &&
1539 pAC->GIni.GP[i].PAutoNegFail != AutoNegDone) {
1540 GuTimeStamp = pAC->Rlmt.Port[i].GuTimeStamp;
1541 *pSelect = i;
1542 }
1543 }
1544
1545 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
1546 ("SK_RLMT_SELECT_GOINGUP found Port %d going up.\n", *pSelect))
1547 return (SK_TRUE);
1548} /* SkRlmtSelectGoingUp */
1549
1550
1551/******************************************************************************
1552 *
1553 * SkRlmtSelectDown - select new active port, criteria 7, 8 (CLP)
1554 *
1555 * Description:
1556 * This routine selects a port that is down.
1557 *
1558 * Context:
1559 * runtime, pageable?
1560 *
1561 * Returns:
1562 * SK_BOOL
1563 */
1564RLMT_STATIC SK_BOOL SkRlmtSelectDown(
1565SK_AC *pAC, /* Adapter Context */
1566SK_IOC IoC, /* I/O Context */
1567SK_U32 Active, /* Active port */
1568SK_U32 PrefPort, /* Preferred port */
1569SK_U32 *pSelect, /* New active port */
1570SK_BOOL AutoNegDone) /* Successfully auto-negotiated? */
1571{
1572 SK_U32 i;
1573 SK_BOOL PortFound;
1574
1575 PortFound = SK_FALSE;
1576
1577 /* Select first port that is PortDown. */
1578 for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
1579 if (pAC->Rlmt.Port[i].PortState == SK_RLMT_PS_DOWN &&
1580 pAC->GIni.GP[i].PAutoNegFail != AutoNegDone) {
1581 *pSelect = i;
1582 if (pAC->Rlmt.Port[Active].PortState == SK_RLMT_PS_DOWN &&
1583 pAC->GIni.GP[Active].PAutoNegFail != AutoNegDone) {
1584 *pSelect = Active;
1585 }
1586 if (pAC->Rlmt.Port[PrefPort].PortState == SK_RLMT_PS_DOWN &&
1587 pAC->GIni.GP[PrefPort].PAutoNegFail != AutoNegDone) {
1588 *pSelect = PrefPort;
1589 }
1590 PortFound = SK_TRUE;
1591 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
1592 ("SK_RLMT_SELECT_DOWN found Port %d down.\n", *pSelect))
1593 break;
1594 }
1595 }
1596 return (PortFound);
1597} /* SkRlmtSelectDown */
1598
1599
1600/******************************************************************************
1601 *
1602 * SkRlmtCheckSwitch - select new active port and switch to it
1603 *
1604 * Description:
1605 * This routine decides which port should be the active one and queues
1606 * port switching if necessary.
1607 *
1608 * Context:
1609 * runtime, pageable?
1610 *
1611 * Returns:
1612 * Nothing.
1613 */
1614RLMT_STATIC void SkRlmtCheckSwitch(
1615SK_AC *pAC, /* Adapter Context */
1616SK_IOC IoC, /* I/O Context */
1617SK_U32 NetIdx) /* Net index */
1618{
1619 SK_EVPARA Para;
1620 SK_U32 Active;
1621 SK_U32 PrefPort;
1622 SK_U32 i;
1623 SK_BOOL PortFound;
1624
1625 Active = pAC->Rlmt.Net[NetIdx].ActivePort; /* Index of active port. */
1626 PrefPort = pAC->Rlmt.Net[NetIdx].PrefPort; /* Index of preferred port. */
1627 PortFound = SK_FALSE;
1628 pAC->Rlmt.CheckSwitch = SK_FALSE;
1629
1630#if 0 /* RW 2001/10/18 - active port becomes always prefered one */
1631 if (pAC->Rlmt.Net[NetIdx].Preference == 0xFFFFFFFF) { /* Automatic */
1632 /* disable auto-fail back */
1633 PrefPort = Active;
1634 }
1635#endif
1636
1637 if (pAC->Rlmt.Net[NetIdx].LinksUp == 0) {
1638 /* Last link went down - shut down the net. */
1639 pAC->Rlmt.Net[NetIdx].RlmtState = SK_RLMT_RS_NET_DOWN;
1640 Para.Para32[0] = SK_RLMT_NET_DOWN_TEMP;
1641 Para.Para32[1] = NetIdx;
1642 SkEventQueue(pAC, SKGE_DRV, SK_DRV_NET_DOWN, Para);
1643
1644 Para.Para32[0] = pAC->Rlmt.Net[NetIdx].
1645 Port[pAC->Rlmt.Net[NetIdx].ActivePort]->PortNumber;
1646 Para.Para32[1] = NetIdx;
1647 SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_ACTIVE_DOWN, Para);
1648 return;
1649 } /* pAC->Rlmt.LinksUp == 0 */
1650 else if (pAC->Rlmt.Net[NetIdx].LinksUp == 1 &&
1651 pAC->Rlmt.Net[NetIdx].RlmtState == SK_RLMT_RS_NET_DOWN) {
1652 /* First link came up - get the net up. */
1653 pAC->Rlmt.Net[NetIdx].RlmtState = SK_RLMT_RS_NET_UP;
1654
1655 /*
1656 * If pAC->Rlmt.ActivePort != Para.Para32[0],
1657 * the DRV switches to the port that came up.
1658 */
1659 for (i = 0; i < pAC->Rlmt.Net[NetIdx].NumPorts; i++) {
1660 if (!pAC->Rlmt.Net[NetIdx].Port[i]->LinkDown) {
1661 if (!pAC->Rlmt.Net[NetIdx].Port[Active]->LinkDown) {
1662 i = Active;
1663 }
1664 if (!pAC->Rlmt.Net[NetIdx].Port[PrefPort]->LinkDown) {
1665 i = PrefPort;
1666 }
1667 PortFound = SK_TRUE;
1668 break;
1669 }
1670 }
1671
1672 if (PortFound) {
1673 Para.Para32[0] = pAC->Rlmt.Net[NetIdx].Port[i]->PortNumber;
1674 Para.Para32[1] = NetIdx;
1675 SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_ACTIVE_UP, Para);
1676
1677 pAC->Rlmt.Net[NetIdx].ActivePort = i;
1678 Para.Para32[0] = pAC->Rlmt.Net[NetIdx].Port[i]->PortNumber;
1679 Para.Para32[1] = NetIdx;
1680 SkEventQueue(pAC, SKGE_DRV, SK_DRV_NET_UP, Para);
1681
1682 if ((pAC->Rlmt.Net[NetIdx].RlmtMode & SK_RLMT_TRANSPARENT) == 0 &&
1683 (Para.pParaPtr = SkRlmtBuildPacket(pAC, IoC,
1684 pAC->Rlmt.Net[NetIdx].Port[i]->PortNumber,
1685 SK_PACKET_ANNOUNCE, &pAC->Addr.Net[NetIdx].
1686 CurrentMacAddress, &SkRlmtMcAddr)) != NULL) {
1687 /*
1688 * Send announce packet to RLMT multicast address to force
1689 * switches to learn the new location of the logical MAC address.
1690 */
1691 SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para);
1692 }
1693 }
1694 else {
1695 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E007, SKERR_RLMT_E007_MSG);
1696 }
1697
1698 return;
1699 } /* LinksUp == 1 && RlmtState == SK_RLMT_RS_NET_DOWN */
1700 else { /* Cannot be reached in dual-net mode. */
1701 Para.Para32[0] = Active;
1702
1703 /*
1704 * Preselection:
1705 * If RLMT Mode != CheckLinkState
1706 * select port that received a broadcast frame substantially later
1707 * than all other ports
1708 * else select first port that is not SuspectRx
1709 * else select first port that is PortUp
1710 * else select port that is PortGoingUp for the longest time
1711 * else select first port that is PortDown
1712 * else stop.
1713 *
1714 * For the preselected port:
1715 * If ActivePort is equal in quality, select ActivePort.
1716 *
1717 * If PrefPort is equal in quality, select PrefPort.
1718 *
1719 * If ActivePort != SelectedPort,
1720 * If old ActivePort is LinkDown,
1721 * SwitchHard
1722 * else
1723 * SwitchSoft
1724 */
1725 /* check of ChgBcPrio flag added */
1726 if ((pAC->Rlmt.Net[0].RlmtMode != SK_RLMT_MODE_CLS) &&
1727 (!pAC->Rlmt.Net[0].ChgBcPrio)) {
1728
1729 if (!PortFound) {
1730 PortFound = SkRlmtSelectBcRx(
1731 pAC, IoC, Active, PrefPort, &Para.Para32[1]);
1732 }
1733
1734 if (!PortFound) {
1735 PortFound = SkRlmtSelectNotSuspect(
1736 pAC, IoC, Active, PrefPort, &Para.Para32[1]);
1737 }
1738 } /* pAC->Rlmt.RlmtMode != SK_RLMT_MODE_CLS */
1739
1740 /* with changed priority for last broadcast received */
1741 if ((pAC->Rlmt.Net[0].RlmtMode != SK_RLMT_MODE_CLS) &&
1742 (pAC->Rlmt.Net[0].ChgBcPrio)) {
1743 if (!PortFound) {
1744 PortFound = SkRlmtSelectNotSuspect(
1745 pAC, IoC, Active, PrefPort, &Para.Para32[1]);
1746 }
1747
1748 if (!PortFound) {
1749 PortFound = SkRlmtSelectBcRx(
1750 pAC, IoC, Active, PrefPort, &Para.Para32[1]);
1751 }
1752 } /* pAC->Rlmt.RlmtMode != SK_RLMT_MODE_CLS */
1753
1754 if (!PortFound) {
1755 PortFound = SkRlmtSelectUp(
1756 pAC, IoC, Active, PrefPort, &Para.Para32[1], AUTONEG_SUCCESS);
1757 }
1758
1759 if (!PortFound) {
1760 PortFound = SkRlmtSelectUp(
1761 pAC, IoC, Active, PrefPort, &Para.Para32[1], AUTONEG_FAILED);
1762 }
1763
1764 if (!PortFound) {
1765 PortFound = SkRlmtSelectGoingUp(
1766 pAC, IoC, Active, PrefPort, &Para.Para32[1], AUTONEG_SUCCESS);
1767 }
1768
1769 if (!PortFound) {
1770 PortFound = SkRlmtSelectGoingUp(
1771 pAC, IoC, Active, PrefPort, &Para.Para32[1], AUTONEG_FAILED);
1772 }
1773
1774 if (pAC->Rlmt.Net[0].RlmtMode != SK_RLMT_MODE_CLS) {
1775 if (!PortFound) {
1776 PortFound = SkRlmtSelectDown(pAC, IoC,
1777 Active, PrefPort, &Para.Para32[1], AUTONEG_SUCCESS);
1778 }
1779
1780 if (!PortFound) {
1781 PortFound = SkRlmtSelectDown(pAC, IoC,
1782 Active, PrefPort, &Para.Para32[1], AUTONEG_FAILED);
1783 }
1784 } /* pAC->Rlmt.RlmtMode != SK_RLMT_MODE_CLS */
1785
1786 if (PortFound) {
1787
1788 if (Para.Para32[1] != Active) {
1789 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
1790 ("Active: %d, Para1: %d.\n", Active, Para.Para32[1]))
1791 pAC->Rlmt.Net[NetIdx].ActivePort = Para.Para32[1];
1792 Para.Para32[0] = pAC->Rlmt.Net[NetIdx].
1793 Port[Para.Para32[0]]->PortNumber;
1794 Para.Para32[1] = pAC->Rlmt.Net[NetIdx].
1795 Port[Para.Para32[1]]->PortNumber;
1796 SK_HWAC_LINK_LED(pAC, IoC, Para.Para32[1], SK_LED_ACTIVE);
1797 if (pAC->Rlmt.Port[Active].LinkDown) {
1798 SkEventQueue(pAC, SKGE_DRV, SK_DRV_SWITCH_HARD, Para);
1799 }
1800 else {
1801 SK_HWAC_LINK_LED(pAC, IoC, Para.Para32[0], SK_LED_STANDBY);
1802 SkEventQueue(pAC, SKGE_DRV, SK_DRV_SWITCH_SOFT, Para);
1803 }
1804 Para.Para32[1] = NetIdx;
1805 Para.Para32[0] =
1806 pAC->Rlmt.Net[NetIdx].Port[Para.Para32[0]]->PortNumber;
1807 SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_ACTIVE_DOWN, Para);
1808 Para.Para32[0] = pAC->Rlmt.Net[NetIdx].
1809 Port[pAC->Rlmt.Net[NetIdx].ActivePort]->PortNumber;
1810 SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_ACTIVE_UP, Para);
1811 if ((pAC->Rlmt.Net[NetIdx].RlmtMode & SK_RLMT_TRANSPARENT) == 0 &&
1812 (Para.pParaPtr = SkRlmtBuildPacket(pAC, IoC, Para.Para32[0],
1813 SK_PACKET_ANNOUNCE, &pAC->Addr.Net[NetIdx].CurrentMacAddress,
1814 &SkRlmtMcAddr)) != NULL) {
1815 /*
1816 * Send announce packet to RLMT multicast address to force
1817 * switches to learn the new location of the logical
1818 * MAC address.
1819 */
1820 SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para);
1821 } /* (Para.pParaPtr = SkRlmtBuildPacket(...)) != NULL */
1822 } /* Para.Para32[1] != Active */
1823 } /* PortFound */
1824 else {
1825 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E004, SKERR_RLMT_E004_MSG);
1826 }
1827 } /* LinksUp > 1 || LinksUp == 1 && RlmtState != SK_RLMT_RS_NET_DOWN */
1828 return;
1829} /* SkRlmtCheckSwitch */
1830
1831
1832/******************************************************************************
1833 *
1834 * SkRlmtCheckSeg - Report if segmentation is detected
1835 *
1836 * Description:
1837 * This routine checks if the ports see different root bridges and reports
1838 * segmentation in such a case.
1839 *
1840 * Context:
1841 * runtime, pageable?
1842 *
1843 * Returns:
1844 * Nothing.
1845 */
1846RLMT_STATIC void SkRlmtCheckSeg(
1847SK_AC *pAC, /* Adapter Context */
1848SK_IOC IoC, /* I/O Context */
1849SK_U32 NetIdx) /* Net number */
1850{
1851 SK_EVPARA Para;
1852 SK_RLMT_NET *pNet;
1853 SK_U32 i, j;
1854 SK_BOOL Equal;
1855
1856 pNet = &pAC->Rlmt.Net[NetIdx];
1857 pNet->RootIdSet = SK_FALSE;
1858 Equal = SK_TRUE;
1859
1860 for (i = 0; i < pNet->NumPorts; i++) {
1861 if (pNet->Port[i]->LinkDown || !pNet->Port[i]->RootIdSet) {
1862 continue;
1863 }
1864
1865 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_DUMP,
1866 ("Root ID %d: %02x %02x %02x %02x %02x %02x %02x %02x.\n", i,
1867 pNet->Port[i]->Root.Id[0], pNet->Port[i]->Root.Id[1],
1868 pNet->Port[i]->Root.Id[2], pNet->Port[i]->Root.Id[3],
1869 pNet->Port[i]->Root.Id[4], pNet->Port[i]->Root.Id[5],
1870 pNet->Port[i]->Root.Id[6], pNet->Port[i]->Root.Id[7]))
1871
1872 if (!pNet->RootIdSet) {
1873 pNet->Root = pNet->Port[i]->Root;
1874 pNet->RootIdSet = SK_TRUE;
1875 continue;
1876 }
1877
1878 for (j = 0; j < 8; j ++) {
1879 Equal &= pNet->Port[i]->Root.Id[j] == pNet->Root.Id[j];
1880 if (!Equal) {
1881 break;
1882 }
1883 }
1884
1885 if (!Equal) {
1886 SK_ERR_LOG(pAC, SK_ERRCL_COMM, SKERR_RLMT_E005, SKERR_RLMT_E005_MSG);
1887 Para.Para32[0] = NetIdx;
1888 Para.Para32[1] = (SK_U32)-1;
1889 SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_SEGMENTATION, Para);
1890
1891 pNet->CheckingState &= ~SK_RLMT_RCS_REPORT_SEG;
1892
1893 /* 2000-03-06 RA: New. */
1894 Para.Para32[0] = NetIdx;
1895 Para.Para32[1] = (SK_U32)-1;
1896 SkTimerStart(pAC, IoC, &pNet->SegTimer, SK_RLMT_SEG_TO_VAL,
1897 SKGE_RLMT, SK_RLMT_SEG_TIM, Para);
1898 break;
1899 }
1900 } /* for (i = 0; i < pNet->NumPorts; i++) */
1901
1902 /* 2000-03-06 RA: Moved here. */
1903 /* Segmentation check not running anymore. */
1904 pNet->CheckingState &= ~SK_RLMT_RCS_SEG;
1905
1906} /* SkRlmtCheckSeg */
1907
1908
1909/******************************************************************************
1910 *
1911 * SkRlmtPortStart - initialize port variables and start port
1912 *
1913 * Description:
1914 * This routine initializes a port's variables and issues a PORT_START
1915 * to the HWAC module. This handles retries if the start fails or the
1916 * link eventually goes down.
1917 *
1918 * Context:
1919 * runtime, pageable?
1920 *
1921 * Returns:
1922 * Nothing
1923 */
1924RLMT_STATIC void SkRlmtPortStart(
1925SK_AC *pAC, /* Adapter Context */
1926SK_IOC IoC, /* I/O Context */
1927SK_U32 PortNumber) /* Port number */
1928{
1929 SK_EVPARA Para;
1930
1931 pAC->Rlmt.Port[PortNumber].PortState = SK_RLMT_PS_LINK_DOWN;
1932 pAC->Rlmt.Port[PortNumber].PortStarted = SK_TRUE;
1933 pAC->Rlmt.Port[PortNumber].LinkDown = SK_TRUE;
1934 pAC->Rlmt.Port[PortNumber].PortDown = SK_TRUE;
1935 pAC->Rlmt.Port[PortNumber].CheckingState = 0;
1936 pAC->Rlmt.Port[PortNumber].RootIdSet = SK_FALSE;
1937 Para.Para32[0] = PortNumber;
1938 Para.Para32[1] = (SK_U32)-1;
1939 SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_START, Para);
1940} /* SkRlmtPortStart */
1941
1942
1943/******************************************************************************
1944 *
1945 * SkRlmtEvtPortStartTim - PORT_START_TIM
1946 *
1947 * Description:
1948 * This routine handles PORT_START_TIM events.
1949 *
1950 * Context:
1951 * runtime, pageable?
1952 * may be called after SK_INIT_IO
1953 *
1954 * Returns:
1955 * Nothing
1956 */
1957RLMT_STATIC void SkRlmtEvtPortStartTim(
1958SK_AC *pAC, /* Adapter Context */
1959SK_IOC IoC, /* I/O Context */
1960SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 -1 */
1961{
1962 SK_U32 i;
1963
1964 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
1965 ("SK_RLMT_PORTSTART_TIMEOUT Port %d Event BEGIN.\n", Para.Para32[0]))
1966
1967 if (Para.Para32[1] != (SK_U32)-1) {
1968 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
1969 ("Bad Parameter.\n"))
1970 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
1971 ("SK_RLMT_PORTSTART_TIMEOUT Event EMPTY.\n"))
1972 return;
1973 }
1974
1975 /*
1976 * Used to start non-preferred ports if the preferred one
1977 * does not come up.
1978 * This timeout needs only be set when starting the first
1979 * (preferred) port.
1980 */
1981 if (pAC->Rlmt.Port[Para.Para32[0]].LinkDown) {
1982 /* PORT_START failed. */
1983 for (i = 0; i < pAC->Rlmt.Port[Para.Para32[0]].Net->NumPorts; i++) {
1984 if (!pAC->Rlmt.Port[Para.Para32[0]].Net->Port[i]->PortStarted) {
1985 SkRlmtPortStart(pAC, IoC,
1986 pAC->Rlmt.Port[Para.Para32[0]].Net->Port[i]->PortNumber);
1987 }
1988 }
1989 }
1990
1991 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
1992 ("SK_RLMT_PORTSTART_TIMEOUT Event END.\n"))
1993} /* SkRlmtEvtPortStartTim */
1994
1995
1996/******************************************************************************
1997 *
1998 * SkRlmtEvtLinkUp - LINK_UP
1999 *
2000 * Description:
2001 * This routine handles LLINK_UP events.
2002 *
2003 * Context:
2004 * runtime, pageable?
2005 * may be called after SK_INIT_IO
2006 *
2007 * Returns:
2008 * Nothing
2009 */
2010RLMT_STATIC void SkRlmtEvtLinkUp(
2011SK_AC *pAC, /* Adapter Context */
2012SK_IOC IoC, /* I/O Context */
2013SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 Undefined */
2014{
2015 SK_U32 i;
2016 SK_RLMT_PORT *pRPort;
2017 SK_EVPARA Para2;
2018
2019 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2020 ("SK_RLMT_LINK_UP Port %d Event BEGIN.\n", Para.Para32[0]))
2021
2022 pRPort = &pAC->Rlmt.Port[Para.Para32[0]];
2023 if (!pRPort->PortStarted) {
2024 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E008, SKERR_RLMT_E008_MSG);
2025
2026 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2027 ("SK_RLMT_LINK_UP Event EMPTY.\n"))
2028 return;
2029 }
2030
2031 if (!pRPort->LinkDown) {
2032 /* RA;:;: Any better solution? */
2033 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2034 ("SK_RLMT_LINK_UP Event EMPTY.\n"))
2035 return;
2036 }
2037
2038 SkTimerStop(pAC, IoC, &pRPort->UpTimer);
2039 SkTimerStop(pAC, IoC, &pRPort->DownRxTimer);
2040 SkTimerStop(pAC, IoC, &pRPort->DownTxTimer);
2041
2042 /* Do something if timer already fired? */
2043
2044 pRPort->LinkDown = SK_FALSE;
2045 pRPort->PortState = SK_RLMT_PS_GOING_UP;
2046 pRPort->GuTimeStamp = SkOsGetTime(pAC);
2047 pRPort->BcTimeStamp = 0;
2048 pRPort->Net->LinksUp++;
2049 if (pRPort->Net->LinksUp == 1) {
2050 SK_HWAC_LINK_LED(pAC, IoC, Para.Para32[0], SK_LED_ACTIVE);
2051 }
2052 else {
2053 SK_HWAC_LINK_LED(pAC, IoC, Para.Para32[0], SK_LED_STANDBY);
2054 }
2055
2056 for (i = 0; i < pRPort->Net->NumPorts; i++) {
2057 if (!pRPort->Net->Port[i]->PortStarted) {
2058 SkRlmtPortStart(pAC, IoC, pRPort->Net->Port[i]->PortNumber);
2059 }
2060 }
2061
2062 SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber);
2063
2064 if (pRPort->Net->LinksUp >= 2) {
2065 if (pRPort->Net->RlmtMode & SK_RLMT_CHECK_LOC_LINK) {
2066 /* Build the check chain. */
2067 SkRlmtBuildCheckChain(pAC, pRPort->Net->NetNumber);
2068 }
2069 }
2070
2071 /* If the first link comes up, start the periodical RLMT timeout. */
2072 if (pRPort->Net->NumPorts > 1 && pRPort->Net->LinksUp == 1 &&
2073 (pRPort->Net->RlmtMode & SK_RLMT_CHECK_OTHERS) != 0) {
2074 Para2.Para32[0] = pRPort->Net->NetNumber;
2075 Para2.Para32[1] = (SK_U32)-1;
2076 SkTimerStart(pAC, IoC, &pRPort->Net->LocTimer,
2077 pRPort->Net->TimeoutValue, SKGE_RLMT, SK_RLMT_TIM, Para2);
2078 }
2079
2080 Para2 = Para;
2081 Para2.Para32[1] = (SK_U32)-1;
2082 SkTimerStart(pAC, IoC, &pRPort->UpTimer, SK_RLMT_PORTUP_TIM_VAL,
2083 SKGE_RLMT, SK_RLMT_PORTUP_TIM, Para2);
2084
2085 /* Later: if (pAC->Rlmt.RlmtMode & SK_RLMT_CHECK_LOC_LINK) && */
2086 if ((pRPort->Net->RlmtMode & SK_RLMT_TRANSPARENT) == 0 &&
2087 (pRPort->Net->RlmtMode & SK_RLMT_CHECK_LINK) != 0 &&
2088 (Para2.pParaPtr =
2089 SkRlmtBuildPacket(pAC, IoC, Para.Para32[0], SK_PACKET_ANNOUNCE,
2090 &pAC->Addr.Port[Para.Para32[0]].CurrentMacAddress, &SkRlmtMcAddr)
2091 ) != NULL) {
2092 /* Send "new" packet to RLMT multicast address. */
2093 SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para2);
2094 }
2095
2096 if (pRPort->Net->RlmtMode & SK_RLMT_CHECK_SEG) {
2097 if ((Para2.pParaPtr =
2098 SkRlmtBuildSpanningTreePacket(pAC, IoC, Para.Para32[0])) != NULL) {
2099 pAC->Rlmt.Port[Para.Para32[0]].RootIdSet = SK_FALSE;
2100 pRPort->Net->CheckingState |=
2101 SK_RLMT_RCS_SEG | SK_RLMT_RCS_REPORT_SEG;
2102
2103 SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para2);
2104
2105 Para.Para32[1] = (SK_U32)-1;
2106 SkTimerStart(pAC, IoC, &pRPort->Net->SegTimer,
2107 SK_RLMT_SEG_TO_VAL, SKGE_RLMT, SK_RLMT_SEG_TIM, Para);
2108 }
2109 }
2110
2111 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2112 ("SK_RLMT_LINK_UP Event END.\n"))
2113} /* SkRlmtEvtLinkUp */
2114
2115
2116/******************************************************************************
2117 *
2118 * SkRlmtEvtPortUpTim - PORT_UP_TIM
2119 *
2120 * Description:
2121 * This routine handles PORT_UP_TIM events.
2122 *
2123 * Context:
2124 * runtime, pageable?
2125 * may be called after SK_INIT_IO
2126 *
2127 * Returns:
2128 * Nothing
2129 */
2130RLMT_STATIC void SkRlmtEvtPortUpTim(
2131SK_AC *pAC, /* Adapter Context */
2132SK_IOC IoC, /* I/O Context */
2133SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 -1 */
2134{
2135 SK_RLMT_PORT *pRPort;
2136
2137 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2138 ("SK_RLMT_PORTUP_TIM Port %d Event BEGIN.\n", Para.Para32[0]))
2139
2140 if (Para.Para32[1] != (SK_U32)-1) {
2141 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2142 ("Bad Parameter.\n"))
2143 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2144 ("SK_RLMT_PORTUP_TIM Event EMPTY.\n"))
2145 return;
2146 }
2147
2148 pRPort = &pAC->Rlmt.Port[Para.Para32[0]];
2149 if (pRPort->LinkDown || (pRPort->PortState == SK_RLMT_PS_UP)) {
2150 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2151 ("SK_RLMT_PORTUP_TIM Port %d Event EMPTY.\n", Para.Para32[0]))
2152 return;
2153 }
2154
2155 pRPort->PortDown = SK_FALSE;
2156 pRPort->PortState = SK_RLMT_PS_UP;
2157 pRPort->Net->PortsUp++;
2158 if (pRPort->Net->RlmtState != SK_RLMT_RS_INIT) {
2159 if (pAC->Rlmt.NumNets <= 1) {
2160 SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber);
2161 }
2162 SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_PORT_UP, Para);
2163 }
2164
2165 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2166 ("SK_RLMT_PORTUP_TIM Event END.\n"))
2167} /* SkRlmtEvtPortUpTim */
2168
2169
2170/******************************************************************************
2171 *
2172 * SkRlmtEvtPortDownTim - PORT_DOWN_*
2173 *
2174 * Description:
2175 * This routine handles PORT_DOWN_* events.
2176 *
2177 * Context:
2178 * runtime, pageable?
2179 * may be called after SK_INIT_IO
2180 *
2181 * Returns:
2182 * Nothing
2183 */
2184RLMT_STATIC void SkRlmtEvtPortDownX(
2185SK_AC *pAC, /* Adapter Context */
2186SK_IOC IoC, /* I/O Context */
2187SK_U32 Event, /* Event code */
2188SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 -1 */
2189{
2190 SK_RLMT_PORT *pRPort;
2191
2192 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2193 ("SK_RLMT_PORTDOWN* Port %d Event (%d) BEGIN.\n",
2194 Para.Para32[0], Event))
2195
2196 if (Para.Para32[1] != (SK_U32)-1) {
2197 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2198 ("Bad Parameter.\n"))
2199 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2200 ("SK_RLMT_PORTDOWN* Event EMPTY.\n"))
2201 return;
2202 }
2203
2204 pRPort = &pAC->Rlmt.Port[Para.Para32[0]];
2205 if (!pRPort->PortStarted || (Event == SK_RLMT_PORTDOWN_TX_TIM &&
2206 !(pRPort->CheckingState & SK_RLMT_PCS_TX))) {
2207 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2208 ("SK_RLMT_PORTDOWN* Event (%d) EMPTY.\n", Event))
2209 return;
2210 }
2211
2212 /* Stop port's timers. */
2213 SkTimerStop(pAC, IoC, &pRPort->UpTimer);
2214 SkTimerStop(pAC, IoC, &pRPort->DownRxTimer);
2215 SkTimerStop(pAC, IoC, &pRPort->DownTxTimer);
2216
2217 if (pRPort->PortState != SK_RLMT_PS_LINK_DOWN) {
2218 pRPort->PortState = SK_RLMT_PS_DOWN;
2219 }
2220
2221 if (!pRPort->PortDown) {
2222 pRPort->Net->PortsUp--;
2223 pRPort->PortDown = SK_TRUE;
2224 SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_PORT_DOWN, Para);
2225 }
2226
2227 pRPort->PacketsPerTimeSlot = 0;
2228 /* pRPort->DataPacketsPerTimeSlot = 0; */
2229 pRPort->BpduPacketsPerTimeSlot = 0;
2230 pRPort->BcTimeStamp = 0;
2231
2232 /*
2233 * RA;:;: To be checked:
2234 * - actions at RLMT_STOP: We should not switch anymore.
2235 */
2236 if (pRPort->Net->RlmtState != SK_RLMT_RS_INIT) {
2237 if (Para.Para32[0] ==
2238 pRPort->Net->Port[pRPort->Net->ActivePort]->PortNumber) {
2239 /* Active Port went down. */
2240 SkRlmtCheckSwitch(pAC, IoC, pRPort->Net->NetNumber);
2241 }
2242 }
2243
2244 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2245 ("SK_RLMT_PORTDOWN* Event (%d) END.\n", Event))
2246} /* SkRlmtEvtPortDownX */
2247
2248
2249/******************************************************************************
2250 *
2251 * SkRlmtEvtLinkDown - LINK_DOWN
2252 *
2253 * Description:
2254 * This routine handles LINK_DOWN events.
2255 *
2256 * Context:
2257 * runtime, pageable?
2258 * may be called after SK_INIT_IO
2259 *
2260 * Returns:
2261 * Nothing
2262 */
2263RLMT_STATIC void SkRlmtEvtLinkDown(
2264SK_AC *pAC, /* Adapter Context */
2265SK_IOC IoC, /* I/O Context */
2266SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 Undefined */
2267{
2268 SK_RLMT_PORT *pRPort;
2269
2270 pRPort = &pAC->Rlmt.Port[Para.Para32[0]];
2271 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2272 ("SK_RLMT_LINK_DOWN Port %d Event BEGIN.\n", Para.Para32[0]))
2273
2274 if (!pAC->Rlmt.Port[Para.Para32[0]].LinkDown) {
2275 pRPort->Net->LinksUp--;
2276 pRPort->LinkDown = SK_TRUE;
2277 pRPort->PortState = SK_RLMT_PS_LINK_DOWN;
2278 SK_HWAC_LINK_LED(pAC, IoC, Para.Para32[0], SK_LED_OFF);
2279
2280 if ((pRPort->Net->RlmtMode & SK_RLMT_CHECK_LOC_LINK) != 0) {
2281 /* Build the check chain. */
2282 SkRlmtBuildCheckChain(pAC, pRPort->Net->NetNumber);
2283 }
2284
2285 /* Ensure that port is marked down. */
2286 Para.Para32[1] = -1;
2287 (void)SkRlmtEvent(pAC, IoC, SK_RLMT_PORTDOWN, Para);
2288 }
2289
2290 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2291 ("SK_RLMT_LINK_DOWN Event END.\n"))
2292} /* SkRlmtEvtLinkDown */
2293
2294
2295/******************************************************************************
2296 *
2297 * SkRlmtEvtPortAddr - PORT_ADDR
2298 *
2299 * Description:
2300 * This routine handles PORT_ADDR events.
2301 *
2302 * Context:
2303 * runtime, pageable?
2304 * may be called after SK_INIT_IO
2305 *
2306 * Returns:
2307 * Nothing
2308 */
2309RLMT_STATIC void SkRlmtEvtPortAddr(
2310SK_AC *pAC, /* Adapter Context */
2311SK_IOC IoC, /* I/O Context */
2312SK_EVPARA Para) /* SK_U32 PortNumber; SK_U32 -1 */
2313{
2314 SK_U32 i, j;
2315 SK_RLMT_PORT *pRPort;
2316 SK_MAC_ADDR *pOldMacAddr;
2317 SK_MAC_ADDR *pNewMacAddr;
2318
2319 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2320 ("SK_RLMT_PORT_ADDR Port %d Event BEGIN.\n", Para.Para32[0]))
2321
2322 if (Para.Para32[1] != (SK_U32)-1) {
2323 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2324 ("Bad Parameter.\n"))
2325 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2326 ("SK_RLMT_PORT_ADDR Event EMPTY.\n"))
2327 return;
2328 }
2329
2330 /* Port's physical MAC address changed. */
2331 pOldMacAddr = &pAC->Addr.Port[Para.Para32[0]].PreviousMacAddress;
2332 pNewMacAddr = &pAC->Addr.Port[Para.Para32[0]].CurrentMacAddress;
2333
2334 /*
2335 * NOTE: This is not scalable for solutions where ports are
2336 * checked remotely. There, we need to send an RLMT
2337 * address change packet - and how do we ensure delivery?
2338 */
2339 for (i = 0; i < (SK_U32)pAC->GIni.GIMacsFound; i++) {
2340 pRPort = &pAC->Rlmt.Port[i];
2341 for (j = 0; j < pRPort->PortsChecked; j++) {
2342 if (SK_ADDR_EQUAL(
2343 pRPort->PortCheck[j].CheckAddr.a, pOldMacAddr->a)) {
2344 pRPort->PortCheck[j].CheckAddr = *pNewMacAddr;
2345 }
2346 }
2347 }
2348
2349 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2350 ("SK_RLMT_PORT_ADDR Event END.\n"))
2351} /* SkRlmtEvtPortAddr */
2352
2353
2354/******************************************************************************
2355 *
2356 * SkRlmtEvtStart - START
2357 *
2358 * Description:
2359 * This routine handles START events.
2360 *
2361 * Context:
2362 * runtime, pageable?
2363 * may be called after SK_INIT_IO
2364 *
2365 * Returns:
2366 * Nothing
2367 */
2368RLMT_STATIC void SkRlmtEvtStart(
2369SK_AC *pAC, /* Adapter Context */
2370SK_IOC IoC, /* I/O Context */
2371SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */
2372{
2373 SK_EVPARA Para2;
2374 SK_U32 PortIdx;
2375 SK_U32 PortNumber;
2376
2377 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2378 ("SK_RLMT_START Net %d Event BEGIN.\n", Para.Para32[0]))
2379
2380 if (Para.Para32[1] != (SK_U32)-1) {
2381 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2382 ("Bad Parameter.\n"))
2383 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2384 ("SK_RLMT_START Event EMPTY.\n"))
2385 return;
2386 }
2387
2388 if (Para.Para32[0] >= pAC->Rlmt.NumNets) {
2389 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2390 ("Bad NetNumber %d.\n", Para.Para32[0]))
2391 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2392 ("SK_RLMT_START Event EMPTY.\n"))
2393 return;
2394 }
2395
2396 if (pAC->Rlmt.Net[Para.Para32[0]].RlmtState != SK_RLMT_RS_INIT) {
2397 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2398 ("SK_RLMT_START Event EMPTY.\n"))
2399 return;
2400 }
2401
2402 if (pAC->Rlmt.NetsStarted >= pAC->Rlmt.NumNets) {
2403 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2404 ("All nets should have been started.\n"))
2405 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2406 ("SK_RLMT_START Event EMPTY.\n"))
2407 return;
2408 }
2409
2410 if (pAC->Rlmt.Net[Para.Para32[0]].PrefPort >=
2411 pAC->Rlmt.Net[Para.Para32[0]].NumPorts) {
2412 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E009, SKERR_RLMT_E009_MSG);
2413
2414 /* Change PrefPort to internal default. */
2415 Para2.Para32[0] = 0xFFFFFFFF;
2416 Para2.Para32[1] = Para.Para32[0];
2417 (void)SkRlmtEvent(pAC, IoC, SK_RLMT_PREFPORT_CHANGE, Para2);
2418 }
2419
2420 PortIdx = pAC->Rlmt.Net[Para.Para32[0]].PrefPort;
2421 PortNumber = pAC->Rlmt.Net[Para.Para32[0]].Port[PortIdx]->PortNumber;
2422
2423 pAC->Rlmt.Net[Para.Para32[0]].LinksUp = 0;
2424 pAC->Rlmt.Net[Para.Para32[0]].PortsUp = 0;
2425 pAC->Rlmt.Net[Para.Para32[0]].CheckingState = 0;
2426 pAC->Rlmt.Net[Para.Para32[0]].RlmtState = SK_RLMT_RS_NET_DOWN;
2427
2428 /* Start preferred port. */
2429 SkRlmtPortStart(pAC, IoC, PortNumber);
2430
2431 /* Start Timer (for first port only). */
2432 Para2.Para32[0] = PortNumber;
2433 Para2.Para32[1] = (SK_U32)-1;
2434 SkTimerStart(pAC, IoC, &pAC->Rlmt.Port[PortNumber].UpTimer,
2435 SK_RLMT_PORTSTART_TIM_VAL, SKGE_RLMT, SK_RLMT_PORTSTART_TIM, Para2);
2436
2437 pAC->Rlmt.NetsStarted++;
2438
2439 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2440 ("SK_RLMT_START Event END.\n"))
2441} /* SkRlmtEvtStart */
2442
2443
2444/******************************************************************************
2445 *
2446 * SkRlmtEvtStop - STOP
2447 *
2448 * Description:
2449 * This routine handles STOP events.
2450 *
2451 * Context:
2452 * runtime, pageable?
2453 * may be called after SK_INIT_IO
2454 *
2455 * Returns:
2456 * Nothing
2457 */
2458RLMT_STATIC void SkRlmtEvtStop(
2459SK_AC *pAC, /* Adapter Context */
2460SK_IOC IoC, /* I/O Context */
2461SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */
2462{
2463 SK_EVPARA Para2;
2464 SK_U32 PortNumber;
2465 SK_U32 i;
2466
2467 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2468 ("SK_RLMT_STOP Net %d Event BEGIN.\n", Para.Para32[0]))
2469
2470 if (Para.Para32[1] != (SK_U32)-1) {
2471 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2472 ("Bad Parameter.\n"))
2473 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2474 ("SK_RLMT_STOP Event EMPTY.\n"))
2475 return;
2476 }
2477
2478 if (Para.Para32[0] >= pAC->Rlmt.NumNets) {
2479 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2480 ("Bad NetNumber %d.\n", Para.Para32[0]))
2481 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2482 ("SK_RLMT_STOP Event EMPTY.\n"))
2483 return;
2484 }
2485
2486 if (pAC->Rlmt.Net[Para.Para32[0]].RlmtState == SK_RLMT_RS_INIT) {
2487 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2488 ("SK_RLMT_STOP Event EMPTY.\n"))
2489 return;
2490 }
2491
2492 if (pAC->Rlmt.NetsStarted == 0) {
2493 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2494 ("All nets are stopped.\n"))
2495 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2496 ("SK_RLMT_STOP Event EMPTY.\n"))
2497 return;
2498 }
2499
2500 /* Stop RLMT timers. */
2501 SkTimerStop(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[0]].LocTimer);
2502 SkTimerStop(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[0]].SegTimer);
2503
2504 /* Stop net. */
2505 pAC->Rlmt.Net[Para.Para32[0]].RlmtState = SK_RLMT_RS_INIT;
2506 pAC->Rlmt.Net[Para.Para32[0]].RootIdSet = SK_FALSE;
2507 Para2.Para32[0] = SK_RLMT_NET_DOWN_FINAL;
2508 Para2.Para32[1] = Para.Para32[0]; /* Net# */
2509 SkEventQueue(pAC, SKGE_DRV, SK_DRV_NET_DOWN, Para2);
2510
2511 /* Stop ports. */
2512 for (i = 0; i < pAC->Rlmt.Net[Para.Para32[0]].NumPorts; i++) {
2513 PortNumber = pAC->Rlmt.Net[Para.Para32[0]].Port[i]->PortNumber;
2514 if (pAC->Rlmt.Port[PortNumber].PortState != SK_RLMT_PS_INIT) {
2515 SkTimerStop(pAC, IoC, &pAC->Rlmt.Port[PortNumber].UpTimer);
2516 SkTimerStop(pAC, IoC, &pAC->Rlmt.Port[PortNumber].DownRxTimer);
2517 SkTimerStop(pAC, IoC, &pAC->Rlmt.Port[PortNumber].DownTxTimer);
2518
2519 pAC->Rlmt.Port[PortNumber].PortState = SK_RLMT_PS_INIT;
2520 pAC->Rlmt.Port[PortNumber].RootIdSet = SK_FALSE;
2521 pAC->Rlmt.Port[PortNumber].PortStarted = SK_FALSE;
2522 Para2.Para32[0] = PortNumber;
2523 Para2.Para32[1] = (SK_U32)-1;
2524 SkEventQueue(pAC, SKGE_HWAC, SK_HWEV_PORT_STOP, Para2);
2525 }
2526 }
2527
2528 pAC->Rlmt.NetsStarted--;
2529
2530 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2531 ("SK_RLMT_STOP Event END.\n"))
2532} /* SkRlmtEvtStop */
2533
2534
2535/******************************************************************************
2536 *
2537 * SkRlmtEvtTim - TIM
2538 *
2539 * Description:
2540 * This routine handles TIM events.
2541 *
2542 * Context:
2543 * runtime, pageable?
2544 * may be called after SK_INIT_IO
2545 *
2546 * Returns:
2547 * Nothing
2548 */
2549RLMT_STATIC void SkRlmtEvtTim(
2550SK_AC *pAC, /* Adapter Context */
2551SK_IOC IoC, /* I/O Context */
2552SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */
2553{
2554 SK_RLMT_PORT *pRPort;
2555 SK_U32 Timeout;
2556 SK_U32 NewTimeout;
2557 SK_U32 PortNumber;
2558 SK_U32 i;
2559
2560 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2561 ("SK_RLMT_TIM Event BEGIN.\n"))
2562
2563 if (Para.Para32[1] != (SK_U32)-1) {
2564 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2565 ("Bad Parameter.\n"))
2566 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2567 ("SK_RLMT_TIM Event EMPTY.\n"))
2568 return;
2569 }
2570
2571 if ((pAC->Rlmt.Net[Para.Para32[0]].RlmtMode & SK_RLMT_CHECK_OTHERS) == 0 ||
2572 pAC->Rlmt.Net[Para.Para32[0]].LinksUp == 0) {
2573 /* Mode changed or all links down: No more link checking. */
2574 return;
2575 }
2576
2577#if 0
2578 pAC->Rlmt.SwitchCheckCounter--;
2579 if (pAC->Rlmt.SwitchCheckCounter == 0) {
2580 pAC->Rlmt.SwitchCheckCounter;
2581 }
2582#endif /* 0 */
2583
2584 NewTimeout = SK_RLMT_DEF_TO_VAL;
2585 for (i = 0; i < pAC->Rlmt.Net[Para.Para32[0]].NumPorts; i++) {
2586 PortNumber = pAC->Rlmt.Net[Para.Para32[0]].Port[i]->PortNumber;
2587 pRPort = &pAC->Rlmt.Port[PortNumber];
2588 if (!pRPort->LinkDown) {
2589 Timeout = SkRlmtCheckPort(pAC, IoC, PortNumber);
2590 if (Timeout < NewTimeout) {
2591 NewTimeout = Timeout;
2592 }
2593
2594 /*
2595 * These counters should be set to 0 for all ports before the
2596 * first frame is sent in the next loop.
2597 */
2598 pRPort->PacketsPerTimeSlot = 0;
2599 /* pRPort->DataPacketsPerTimeSlot = 0; */
2600 pRPort->BpduPacketsPerTimeSlot = 0;
2601 }
2602 }
2603 pAC->Rlmt.Net[Para.Para32[0]].TimeoutValue = NewTimeout;
2604
2605 if (pAC->Rlmt.Net[Para.Para32[0]].LinksUp > 1) {
2606 /*
2607 * If checking remote ports, also send packets if
2608 * (LinksUp == 1) &&
2609 * this port checks at least one (remote) port.
2610 */
2611
2612 /*
2613 * Must be new loop, as SkRlmtCheckPort can request to
2614 * check segmentation when e.g. checking the last port.
2615 */
2616 for (i = 0; i < pAC->Rlmt.Net[Para.Para32[0]].NumPorts; i++) {
2617 if (!pAC->Rlmt.Net[Para.Para32[0]].Port[i]->LinkDown) {
2618 SkRlmtSend(pAC, IoC,
2619 pAC->Rlmt.Net[Para.Para32[0]].Port[i]->PortNumber);
2620 }
2621 }
2622 }
2623
2624 SkTimerStart(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[0]].LocTimer,
2625 pAC->Rlmt.Net[Para.Para32[0]].TimeoutValue, SKGE_RLMT, SK_RLMT_TIM,
2626 Para);
2627
2628 if (pAC->Rlmt.Net[Para.Para32[0]].LinksUp > 1 &&
2629 (pAC->Rlmt.Net[Para.Para32[0]].RlmtMode & SK_RLMT_CHECK_SEG) &&
2630 (pAC->Rlmt.Net[Para.Para32[0]].CheckingState & SK_RLMT_RCS_START_SEG)) {
2631 SkTimerStart(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[0]].SegTimer,
2632 SK_RLMT_SEG_TO_VAL, SKGE_RLMT, SK_RLMT_SEG_TIM, Para);
2633 pAC->Rlmt.Net[Para.Para32[0]].CheckingState &= ~SK_RLMT_RCS_START_SEG;
2634 pAC->Rlmt.Net[Para.Para32[0]].CheckingState |=
2635 SK_RLMT_RCS_SEG | SK_RLMT_RCS_REPORT_SEG;
2636 }
2637
2638 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2639 ("SK_RLMT_TIM Event END.\n"))
2640} /* SkRlmtEvtTim */
2641
2642
2643/******************************************************************************
2644 *
2645 * SkRlmtEvtSegTim - SEG_TIM
2646 *
2647 * Description:
2648 * This routine handles SEG_TIM events.
2649 *
2650 * Context:
2651 * runtime, pageable?
2652 * may be called after SK_INIT_IO
2653 *
2654 * Returns:
2655 * Nothing
2656 */
2657RLMT_STATIC void SkRlmtEvtSegTim(
2658SK_AC *pAC, /* Adapter Context */
2659SK_IOC IoC, /* I/O Context */
2660SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */
2661{
2662#ifdef xDEBUG
2663 int j;
2664#endif /* DEBUG */
2665
2666 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2667 ("SK_RLMT_SEG_TIM Event BEGIN.\n"))
2668
2669 if (Para.Para32[1] != (SK_U32)-1) {
2670 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2671 ("Bad Parameter.\n"))
2672 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2673 ("SK_RLMT_SEG_TIM Event EMPTY.\n"))
2674 return;
2675 }
2676
2677#ifdef xDEBUG
2678 for (j = 0; j < pAC->Rlmt.Net[Para.Para32[0]].NumPorts; j++) {
2679 SK_ADDR_PORT *pAPort;
2680 SK_U32 k;
2681 SK_U16 *InAddr;
2682 SK_U8 InAddr8[6];
2683
2684 InAddr = (SK_U16 *)&InAddr8[0];
2685 pAPort = pAC->Rlmt.Net[Para.Para32[0]].Port[j]->AddrPort;
2686 for (k = 0; k < pAPort->NextExactMatchRlmt; k++) {
2687 /* Get exact match address k from port j. */
2688 XM_INADDR(IoC, pAC->Rlmt.Net[Para.Para32[0]].Port[j]->PortNumber,
2689 XM_EXM(k), InAddr);
2690 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2691 ("MC address %d on Port %u: %02x %02x %02x %02x %02x %02x -- %02x %02x %02x %02x %02x %02x.\n",
2692 k, pAC->Rlmt.Net[Para.Para32[0]].Port[j]->PortNumber,
2693 InAddr8[0], InAddr8[1], InAddr8[2],
2694 InAddr8[3], InAddr8[4], InAddr8[5],
2695 pAPort->Exact[k].a[0], pAPort->Exact[k].a[1],
2696 pAPort->Exact[k].a[2], pAPort->Exact[k].a[3],
2697 pAPort->Exact[k].a[4], pAPort->Exact[k].a[5]))
2698 }
2699 }
2700#endif /* xDEBUG */
2701
2702 SkRlmtCheckSeg(pAC, IoC, Para.Para32[0]);
2703
2704 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2705 ("SK_RLMT_SEG_TIM Event END.\n"))
2706} /* SkRlmtEvtSegTim */
2707
2708
2709/******************************************************************************
2710 *
2711 * SkRlmtEvtPacketRx - PACKET_RECEIVED
2712 *
2713 * Description:
2714 * This routine handles PACKET_RECEIVED events.
2715 *
2716 * Context:
2717 * runtime, pageable?
2718 * may be called after SK_INIT_IO
2719 *
2720 * Returns:
2721 * Nothing
2722 */
2723RLMT_STATIC void SkRlmtEvtPacketRx(
2724SK_AC *pAC, /* Adapter Context */
2725SK_IOC IoC, /* I/O Context */
2726SK_EVPARA Para) /* SK_MBUF *pMb */
2727{
2728 SK_MBUF *pMb;
2729 SK_MBUF *pNextMb;
2730 SK_U32 NetNumber;
2731
2732
2733 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2734 ("SK_RLMT_PACKET_RECEIVED Event BEGIN.\n"))
2735
2736 /* Should we ignore frames during port switching? */
2737
2738#ifdef DEBUG
2739 pMb = Para.pParaPtr;
2740 if (pMb == NULL) {
2741 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL, ("No mbuf.\n"))
2742 }
2743 else if (pMb->pNext != NULL) {
2744 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2745 ("More than one mbuf or pMb->pNext not set.\n"))
2746 }
2747#endif /* DEBUG */
2748
2749 for (pMb = Para.pParaPtr; pMb != NULL; pMb = pNextMb) {
2750 pNextMb = pMb->pNext;
2751 pMb->pNext = NULL;
2752
2753 NetNumber = pAC->Rlmt.Port[pMb->PortIdx].Net->NetNumber;
2754 if (pAC->Rlmt.Net[NetNumber].RlmtState == SK_RLMT_RS_INIT) {
2755 SkDrvFreeRlmtMbuf(pAC, IoC, pMb);
2756 }
2757 else {
2758 SkRlmtPacketReceive(pAC, IoC, pMb);
2759 }
2760 }
2761
2762 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2763 ("SK_RLMT_PACKET_RECEIVED Event END.\n"))
2764} /* SkRlmtEvtPacketRx */
2765
2766
2767/******************************************************************************
2768 *
2769 * SkRlmtEvtStatsClear - STATS_CLEAR
2770 *
2771 * Description:
2772 * This routine handles STATS_CLEAR events.
2773 *
2774 * Context:
2775 * runtime, pageable?
2776 * may be called after SK_INIT_IO
2777 *
2778 * Returns:
2779 * Nothing
2780 */
2781RLMT_STATIC void SkRlmtEvtStatsClear(
2782SK_AC *pAC, /* Adapter Context */
2783SK_IOC IoC, /* I/O Context */
2784SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */
2785{
2786 SK_U32 i;
2787 SK_RLMT_PORT *pRPort;
2788
2789 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2790 ("SK_RLMT_STATS_CLEAR Event BEGIN.\n"))
2791
2792 if (Para.Para32[1] != (SK_U32)-1) {
2793 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2794 ("Bad Parameter.\n"))
2795 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2796 ("SK_RLMT_STATS_CLEAR Event EMPTY.\n"))
2797 return;
2798 }
2799
2800 if (Para.Para32[0] >= pAC->Rlmt.NumNets) {
2801 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2802 ("Bad NetNumber %d.\n", Para.Para32[0]))
2803 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2804 ("SK_RLMT_STATS_CLEAR Event EMPTY.\n"))
2805 return;
2806 }
2807
2808 /* Clear statistics for logical and physical ports. */
2809 for (i = 0; i < pAC->Rlmt.Net[Para.Para32[0]].NumPorts; i++) {
2810 pRPort =
2811 &pAC->Rlmt.Port[pAC->Rlmt.Net[Para.Para32[0]].Port[i]->PortNumber];
2812 pRPort->TxHelloCts = 0;
2813 pRPort->RxHelloCts = 0;
2814 pRPort->TxSpHelloReqCts = 0;
2815 pRPort->RxSpHelloCts = 0;
2816 }
2817
2818 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2819 ("SK_RLMT_STATS_CLEAR Event END.\n"))
2820} /* SkRlmtEvtStatsClear */
2821
2822
2823/******************************************************************************
2824 *
2825 * SkRlmtEvtStatsUpdate - STATS_UPDATE
2826 *
2827 * Description:
2828 * This routine handles STATS_UPDATE events.
2829 *
2830 * Context:
2831 * runtime, pageable?
2832 * may be called after SK_INIT_IO
2833 *
2834 * Returns:
2835 * Nothing
2836 */
2837RLMT_STATIC void SkRlmtEvtStatsUpdate(
2838SK_AC *pAC, /* Adapter Context */
2839SK_IOC IoC, /* I/O Context */
2840SK_EVPARA Para) /* SK_U32 NetNumber; SK_U32 -1 */
2841{
2842 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2843 ("SK_RLMT_STATS_UPDATE Event BEGIN.\n"))
2844
2845 if (Para.Para32[1] != (SK_U32)-1) {
2846 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2847 ("Bad Parameter.\n"))
2848 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2849 ("SK_RLMT_STATS_UPDATE Event EMPTY.\n"))
2850 return;
2851 }
2852
2853 if (Para.Para32[0] >= pAC->Rlmt.NumNets) {
2854 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2855 ("Bad NetNumber %d.\n", Para.Para32[0]))
2856 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2857 ("SK_RLMT_STATS_UPDATE Event EMPTY.\n"))
2858 return;
2859 }
2860
2861 /* Update statistics - currently always up-to-date. */
2862
2863 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2864 ("SK_RLMT_STATS_UPDATE Event END.\n"))
2865} /* SkRlmtEvtStatsUpdate */
2866
2867
2868/******************************************************************************
2869 *
2870 * SkRlmtEvtPrefportChange - PREFPORT_CHANGE
2871 *
2872 * Description:
2873 * This routine handles PREFPORT_CHANGE events.
2874 *
2875 * Context:
2876 * runtime, pageable?
2877 * may be called after SK_INIT_IO
2878 *
2879 * Returns:
2880 * Nothing
2881 */
2882RLMT_STATIC void SkRlmtEvtPrefportChange(
2883SK_AC *pAC, /* Adapter Context */
2884SK_IOC IoC, /* I/O Context */
2885SK_EVPARA Para) /* SK_U32 PortIndex; SK_U32 NetNumber */
2886{
2887 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2888 ("SK_RLMT_PREFPORT_CHANGE to Port %d Event BEGIN.\n", Para.Para32[0]))
2889
2890 if (Para.Para32[1] >= pAC->Rlmt.NumNets) {
2891 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2892 ("Bad NetNumber %d.\n", Para.Para32[1]))
2893 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2894 ("SK_RLMT_PREFPORT_CHANGE Event EMPTY.\n"))
2895 return;
2896 }
2897
2898 /* 0xFFFFFFFF == auto-mode. */
2899 if (Para.Para32[0] == 0xFFFFFFFF) {
2900 pAC->Rlmt.Net[Para.Para32[1]].PrefPort = SK_RLMT_DEF_PREF_PORT;
2901 }
2902 else {
2903 if (Para.Para32[0] >= pAC->Rlmt.Net[Para.Para32[1]].NumPorts) {
2904 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E010, SKERR_RLMT_E010_MSG);
2905
2906 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2907 ("SK_RLMT_PREFPORT_CHANGE Event EMPTY.\n"))
2908 return;
2909 }
2910
2911 pAC->Rlmt.Net[Para.Para32[1]].PrefPort = Para.Para32[0];
2912 }
2913
2914 pAC->Rlmt.Net[Para.Para32[1]].Preference = Para.Para32[0];
2915
2916 if (pAC->Rlmt.Net[Para.Para32[1]].RlmtState != SK_RLMT_RS_INIT) {
2917 SkRlmtCheckSwitch(pAC, IoC, Para.Para32[1]);
2918 }
2919
2920 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2921 ("SK_RLMT_PREFPORT_CHANGE Event END.\n"))
2922} /* SkRlmtEvtPrefportChange */
2923
2924
2925/******************************************************************************
2926 *
2927 * SkRlmtEvtSetNets - SET_NETS
2928 *
2929 * Description:
2930 * This routine handles SET_NETS events.
2931 *
2932 * Context:
2933 * runtime, pageable?
2934 * may be called after SK_INIT_IO
2935 *
2936 * Returns:
2937 * Nothing
2938 */
2939RLMT_STATIC void SkRlmtEvtSetNets(
2940SK_AC *pAC, /* Adapter Context */
2941SK_IOC IoC, /* I/O Context */
2942SK_EVPARA Para) /* SK_U32 NumNets; SK_U32 -1 */
2943{
2944 int i;
2945
2946 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2947 ("SK_RLMT_SET_NETS Event BEGIN.\n"))
2948
2949 if (Para.Para32[1] != (SK_U32)-1) {
2950 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2951 ("Bad Parameter.\n"))
2952 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2953 ("SK_RLMT_SET_NETS Event EMPTY.\n"))
2954 return;
2955 }
2956
2957 if (Para.Para32[0] == 0 || Para.Para32[0] > SK_MAX_NETS ||
2958 Para.Para32[0] > (SK_U32)pAC->GIni.GIMacsFound) {
2959 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2960 ("Bad number of nets: %d.\n", Para.Para32[0]))
2961 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2962 ("SK_RLMT_SET_NETS Event EMPTY.\n"))
2963 return;
2964 }
2965
2966 if (Para.Para32[0] == pAC->Rlmt.NumNets) { /* No change. */
2967 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2968 ("SK_RLMT_SET_NETS Event EMPTY.\n"))
2969 return;
2970 }
2971
2972 /* Entering and leaving dual mode only allowed while nets are stopped. */
2973 if (pAC->Rlmt.NetsStarted > 0) {
2974 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2975 ("Changing dual mode only allowed while all nets are stopped.\n"))
2976 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
2977 ("SK_RLMT_SET_NETS Event EMPTY.\n"))
2978 return;
2979 }
2980
2981 if (Para.Para32[0] == 1) {
2982 if (pAC->Rlmt.NumNets > 1) {
2983 /* Clear logical MAC addr from second net's active port. */
2984 (void)SkAddrOverride(pAC, IoC, pAC->Rlmt.Net[1].Port[pAC->Addr.
2985 Net[1].ActivePort]->PortNumber, NULL, SK_ADDR_CLEAR_LOGICAL);
2986 pAC->Rlmt.Net[1].NumPorts = 0;
2987 }
2988
2989 pAC->Rlmt.NumNets = Para.Para32[0];
2990 for (i = 0; (SK_U32)i < pAC->Rlmt.NumNets; i++) {
2991 pAC->Rlmt.Net[i].RlmtState = SK_RLMT_RS_INIT;
2992 pAC->Rlmt.Net[i].RootIdSet = SK_FALSE;
2993 pAC->Rlmt.Net[i].Preference = 0xFFFFFFFF; /* "Automatic" */
2994 pAC->Rlmt.Net[i].PrefPort = SK_RLMT_DEF_PREF_PORT;
2995 /* Just assuming. */
2996 pAC->Rlmt.Net[i].ActivePort = pAC->Rlmt.Net[i].PrefPort;
2997 pAC->Rlmt.Net[i].RlmtMode = SK_RLMT_DEF_MODE;
2998 pAC->Rlmt.Net[i].TimeoutValue = SK_RLMT_DEF_TO_VAL;
2999 pAC->Rlmt.Net[i].NetNumber = i;
3000 }
3001
3002 pAC->Rlmt.Port[1].Net= &pAC->Rlmt.Net[0];
3003 pAC->Rlmt.Net[0].NumPorts = pAC->GIni.GIMacsFound;
3004
3005 SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_SET_NETS, Para);
3006
3007 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
3008 ("RLMT: Changed to one net with two ports.\n"))
3009 }
3010 else if (Para.Para32[0] == 2) {
3011 pAC->Rlmt.Port[1].Net= &pAC->Rlmt.Net[1];
3012 pAC->Rlmt.Net[1].NumPorts = pAC->GIni.GIMacsFound - 1;
3013 pAC->Rlmt.Net[0].NumPorts =
3014 pAC->GIni.GIMacsFound - pAC->Rlmt.Net[1].NumPorts;
3015
3016 pAC->Rlmt.NumNets = Para.Para32[0];
3017 for (i = 0; (SK_U32)i < pAC->Rlmt.NumNets; i++) {
3018 pAC->Rlmt.Net[i].RlmtState = SK_RLMT_RS_INIT;
3019 pAC->Rlmt.Net[i].RootIdSet = SK_FALSE;
3020 pAC->Rlmt.Net[i].Preference = 0xFFFFFFFF; /* "Automatic" */
3021 pAC->Rlmt.Net[i].PrefPort = SK_RLMT_DEF_PREF_PORT;
3022 /* Just assuming. */
3023 pAC->Rlmt.Net[i].ActivePort = pAC->Rlmt.Net[i].PrefPort;
3024 pAC->Rlmt.Net[i].RlmtMode = SK_RLMT_DEF_MODE;
3025 pAC->Rlmt.Net[i].TimeoutValue = SK_RLMT_DEF_TO_VAL;
3026
3027 pAC->Rlmt.Net[i].NetNumber = i;
3028 }
3029
3030 /* Set logical MAC addr on second net's active port. */
3031 (void)SkAddrOverride(pAC, IoC, pAC->Rlmt.Net[1].Port[pAC->Addr.
3032 Net[1].ActivePort]->PortNumber, NULL, SK_ADDR_SET_LOGICAL);
3033
3034 SkEventQueue(pAC, SKGE_PNMI, SK_PNMI_EVT_RLMT_SET_NETS, Para);
3035
3036 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
3037 ("RLMT: Changed to two nets with one port each.\n"))
3038 }
3039 else {
3040 /* Not implemented for more than two nets. */
3041 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
3042 ("SetNets not implemented for more than two nets.\n"))
3043 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
3044 ("SK_RLMT_SET_NETS Event EMPTY.\n"))
3045 return;
3046 }
3047
3048 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
3049 ("SK_RLMT_SET_NETS Event END.\n"))
3050} /* SkRlmtSetNets */
3051
3052
3053/******************************************************************************
3054 *
3055 * SkRlmtEvtModeChange - MODE_CHANGE
3056 *
3057 * Description:
3058 * This routine handles MODE_CHANGE events.
3059 *
3060 * Context:
3061 * runtime, pageable?
3062 * may be called after SK_INIT_IO
3063 *
3064 * Returns:
3065 * Nothing
3066 */
3067RLMT_STATIC void SkRlmtEvtModeChange(
3068SK_AC *pAC, /* Adapter Context */
3069SK_IOC IoC, /* I/O Context */
3070SK_EVPARA Para) /* SK_U32 NewMode; SK_U32 NetNumber */
3071{
3072 SK_EVPARA Para2;
3073 SK_U32 i;
3074 SK_U32 PrevRlmtMode;
3075
3076 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
3077 ("SK_RLMT_MODE_CHANGE Event BEGIN.\n"))
3078
3079 if (Para.Para32[1] >= pAC->Rlmt.NumNets) {
3080 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
3081 ("Bad NetNumber %d.\n", Para.Para32[1]))
3082 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
3083 ("SK_RLMT_MODE_CHANGE Event EMPTY.\n"))
3084 return;
3085 }
3086
3087 Para.Para32[0] |= SK_RLMT_CHECK_LINK;
3088
3089 if ((pAC->Rlmt.Net[Para.Para32[1]].NumPorts == 1) &&
3090 Para.Para32[0] != SK_RLMT_MODE_CLS) {
3091 pAC->Rlmt.Net[Para.Para32[1]].RlmtMode = SK_RLMT_MODE_CLS;
3092 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
3093 ("Forced RLMT mode to CLS on single port net.\n"))
3094 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
3095 ("SK_RLMT_MODE_CHANGE Event EMPTY.\n"))
3096 return;
3097 }
3098
3099 /* Update RLMT mode. */
3100 PrevRlmtMode = pAC->Rlmt.Net[Para.Para32[1]].RlmtMode;
3101 pAC->Rlmt.Net[Para.Para32[1]].RlmtMode = Para.Para32[0];
3102
3103 if ((PrevRlmtMode & SK_RLMT_CHECK_LOC_LINK) !=
3104 (pAC->Rlmt.Net[Para.Para32[1]].RlmtMode & SK_RLMT_CHECK_LOC_LINK)) {
3105 /* SK_RLMT_CHECK_LOC_LINK bit changed. */
3106 if ((PrevRlmtMode & SK_RLMT_CHECK_OTHERS) == 0 &&
3107 pAC->Rlmt.Net[Para.Para32[1]].NumPorts > 1 &&
3108 pAC->Rlmt.Net[Para.Para32[1]].PortsUp >= 1) {
3109 /* 20001207 RA: Was "PortsUp == 1". */
3110 Para2.Para32[0] = Para.Para32[1];
3111 Para2.Para32[1] = (SK_U32)-1;
3112 SkTimerStart(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[1]].LocTimer,
3113 pAC->Rlmt.Net[Para.Para32[1]].TimeoutValue,
3114 SKGE_RLMT, SK_RLMT_TIM, Para2);
3115 }
3116 }
3117
3118 if ((PrevRlmtMode & SK_RLMT_CHECK_SEG) !=
3119 (pAC->Rlmt.Net[Para.Para32[1]].RlmtMode & SK_RLMT_CHECK_SEG)) {
3120 /* SK_RLMT_CHECK_SEG bit changed. */
3121 for (i = 0; i < pAC->Rlmt.Net[Para.Para32[1]].NumPorts; i++) {
3122 (void)SkAddrMcClear(pAC, IoC,
3123 pAC->Rlmt.Net[Para.Para32[1]].Port[i]->PortNumber,
3124 SK_ADDR_PERMANENT | SK_MC_SW_ONLY);
3125
3126 /* Add RLMT MC address. */
3127 (void)SkAddrMcAdd(pAC, IoC,
3128 pAC->Rlmt.Net[Para.Para32[1]].Port[i]->PortNumber,
3129 &SkRlmtMcAddr, SK_ADDR_PERMANENT);
3130
3131 if ((pAC->Rlmt.Net[Para.Para32[1]].RlmtMode &
3132 SK_RLMT_CHECK_SEG) != 0) {
3133 /* Add BPDU MC address. */
3134 (void)SkAddrMcAdd(pAC, IoC,
3135 pAC->Rlmt.Net[Para.Para32[1]].Port[i]->PortNumber,
3136 &BridgeMcAddr, SK_ADDR_PERMANENT);
3137
3138 if (pAC->Rlmt.Net[Para.Para32[1]].RlmtState != SK_RLMT_RS_INIT) {
3139 if (!pAC->Rlmt.Net[Para.Para32[1]].Port[i]->LinkDown &&
3140 (Para2.pParaPtr = SkRlmtBuildSpanningTreePacket(
3141 pAC, IoC, i)) != NULL) {
3142 pAC->Rlmt.Net[Para.Para32[1]].Port[i]->RootIdSet =
3143 SK_FALSE;
3144 SkEventQueue(pAC, SKGE_DRV, SK_DRV_RLMT_SEND, Para2);
3145 }
3146 }
3147 }
3148 (void)SkAddrMcUpdate(pAC, IoC,
3149 pAC->Rlmt.Net[Para.Para32[1]].Port[i]->PortNumber);
3150 } /* for ... */
3151
3152 if ((pAC->Rlmt.Net[Para.Para32[1]].RlmtMode & SK_RLMT_CHECK_SEG) != 0) {
3153 Para2.Para32[0] = Para.Para32[1];
3154 Para2.Para32[1] = (SK_U32)-1;
3155 SkTimerStart(pAC, IoC, &pAC->Rlmt.Net[Para.Para32[1]].SegTimer,
3156 SK_RLMT_SEG_TO_VAL, SKGE_RLMT, SK_RLMT_SEG_TIM, Para2);
3157 }
3158 } /* SK_RLMT_CHECK_SEG bit changed. */
3159
3160 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
3161 ("SK_RLMT_MODE_CHANGE Event END.\n"))
3162} /* SkRlmtEvtModeChange */
3163
3164
3165/******************************************************************************
3166 *
3167 * SkRlmtEvent - a PORT- or an RLMT-specific event happened
3168 *
3169 * Description:
3170 * This routine calls subroutines to handle PORT- and RLMT-specific events.
3171 *
3172 * Context:
3173 * runtime, pageable?
3174 * may be called after SK_INIT_IO
3175 *
3176 * Returns:
3177 * 0
3178 */
3179int SkRlmtEvent(
3180SK_AC *pAC, /* Adapter Context */
3181SK_IOC IoC, /* I/O Context */
3182SK_U32 Event, /* Event code */
3183SK_EVPARA Para) /* Event-specific parameter */
3184{
3185 switch (Event) {
3186
3187 /* ----- PORT events ----- */
3188
3189 case SK_RLMT_PORTSTART_TIM: /* From RLMT via TIME. */
3190 SkRlmtEvtPortStartTim(pAC, IoC, Para);
3191 break;
3192 case SK_RLMT_LINK_UP: /* From SIRQ. */
3193 SkRlmtEvtLinkUp(pAC, IoC, Para);
3194 break;
3195 case SK_RLMT_PORTUP_TIM: /* From RLMT via TIME. */
3196 SkRlmtEvtPortUpTim(pAC, IoC, Para);
3197 break;
3198 case SK_RLMT_PORTDOWN: /* From RLMT. */
3199 case SK_RLMT_PORTDOWN_RX_TIM: /* From RLMT via TIME. */
3200 case SK_RLMT_PORTDOWN_TX_TIM: /* From RLMT via TIME. */
3201 SkRlmtEvtPortDownX(pAC, IoC, Event, Para);
3202 break;
3203 case SK_RLMT_LINK_DOWN: /* From SIRQ. */
3204 SkRlmtEvtLinkDown(pAC, IoC, Para);
3205 break;
3206 case SK_RLMT_PORT_ADDR: /* From ADDR. */
3207 SkRlmtEvtPortAddr(pAC, IoC, Para);
3208 break;
3209
3210 /* ----- RLMT events ----- */
3211
3212 case SK_RLMT_START: /* From DRV. */
3213 SkRlmtEvtStart(pAC, IoC, Para);
3214 break;
3215 case SK_RLMT_STOP: /* From DRV. */
3216 SkRlmtEvtStop(pAC, IoC, Para);
3217 break;
3218 case SK_RLMT_TIM: /* From RLMT via TIME. */
3219 SkRlmtEvtTim(pAC, IoC, Para);
3220 break;
3221 case SK_RLMT_SEG_TIM:
3222 SkRlmtEvtSegTim(pAC, IoC, Para);
3223 break;
3224 case SK_RLMT_PACKET_RECEIVED: /* From DRV. */
3225 SkRlmtEvtPacketRx(pAC, IoC, Para);
3226 break;
3227 case SK_RLMT_STATS_CLEAR: /* From PNMI. */
3228 SkRlmtEvtStatsClear(pAC, IoC, Para);
3229 break;
3230 case SK_RLMT_STATS_UPDATE: /* From PNMI. */
3231 SkRlmtEvtStatsUpdate(pAC, IoC, Para);
3232 break;
3233 case SK_RLMT_PREFPORT_CHANGE: /* From PNMI. */
3234 SkRlmtEvtPrefportChange(pAC, IoC, Para);
3235 break;
3236 case SK_RLMT_MODE_CHANGE: /* From PNMI. */
3237 SkRlmtEvtModeChange(pAC, IoC, Para);
3238 break;
3239 case SK_RLMT_SET_NETS: /* From DRV. */
3240 SkRlmtEvtSetNets(pAC, IoC, Para);
3241 break;
3242
3243 /* ----- Unknown events ----- */
3244
3245 default: /* Create error log entry. */
3246 SK_DBG_MSG(pAC, SK_DBGMOD_RLMT, SK_DBGCAT_CTRL,
3247 ("Unknown RLMT Event %d.\n", Event))
3248 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_RLMT_E003, SKERR_RLMT_E003_MSG);
3249 break;
3250 } /* switch() */
3251
3252 return (0);
3253} /* SkRlmtEvent */
3254
3255#ifdef __cplusplus
3256}
3257#endif /* __cplusplus */
diff --git a/drivers/net/sk98lin/sktimer.c b/drivers/net/sk98lin/sktimer.c
deleted file mode 100644
index 4e462955ecd8..000000000000
--- a/drivers/net/sk98lin/sktimer.c
+++ /dev/null
@@ -1,250 +0,0 @@
1/******************************************************************************
2 *
3 * Name: sktimer.c
4 * Project: Gigabit Ethernet Adapters, Event Scheduler Module
5 * Version: $Revision: 1.14 $
6 * Date: $Date: 2003/09/16 13:46:51 $
7 * Purpose: High level timer functions.
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect GmbH.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25
26/*
27 * Event queue and dispatcher
28 */
29#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
30static const char SysKonnectFileId[] =
31 "@(#) $Id: sktimer.c,v 1.14 2003/09/16 13:46:51 rschmidt Exp $ (C) Marvell.";
32#endif
33
34#include "h/skdrv1st.h" /* Driver Specific Definitions */
35#include "h/skdrv2nd.h" /* Adapter Control- and Driver specific Def. */
36
37#ifdef __C2MAN__
38/*
39 Event queue management.
40
41 General Description:
42
43 */
44intro()
45{}
46#endif
47
48
49/* Forward declaration */
50static void timer_done(SK_AC *pAC,SK_IOC Ioc,int Restart);
51
52
53/*
54 * Inits the software timer
55 *
56 * needs to be called during Init level 1.
57 */
58void SkTimerInit(
59SK_AC *pAC, /* Adapters context */
60SK_IOC Ioc, /* IoContext */
61int Level) /* Init Level */
62{
63 switch (Level) {
64 case SK_INIT_DATA:
65 pAC->Tim.StQueue = NULL;
66 break;
67 case SK_INIT_IO:
68 SkHwtInit(pAC, Ioc);
69 SkTimerDone(pAC, Ioc);
70 break;
71 default:
72 break;
73 }
74}
75
76/*
77 * Stops a high level timer
78 * - If a timer is not in the queue the function returns normally, too.
79 */
80void SkTimerStop(
81SK_AC *pAC, /* Adapters context */
82SK_IOC Ioc, /* IoContext */
83SK_TIMER *pTimer) /* Timer Pointer to be started */
84{
85 SK_TIMER **ppTimPrev;
86 SK_TIMER *pTm;
87
88 /*
89 * remove timer from queue
90 */
91 pTimer->TmActive = SK_FALSE;
92
93 if (pAC->Tim.StQueue == pTimer && !pTimer->TmNext) {
94 SkHwtStop(pAC, Ioc);
95 }
96
97 for (ppTimPrev = &pAC->Tim.StQueue; (pTm = *ppTimPrev);
98 ppTimPrev = &pTm->TmNext ) {
99
100 if (pTm == pTimer) {
101 /*
102 * Timer found in queue
103 * - dequeue it and
104 * - correct delta of the next timer
105 */
106 *ppTimPrev = pTm->TmNext;
107
108 if (pTm->TmNext) {
109 /* correct delta of next timer in queue */
110 pTm->TmNext->TmDelta += pTm->TmDelta;
111 }
112 return;
113 }
114 }
115}
116
117/*
118 * Start a high level software timer
119 */
120void SkTimerStart(
121SK_AC *pAC, /* Adapters context */
122SK_IOC Ioc, /* IoContext */
123SK_TIMER *pTimer, /* Timer Pointer to be started */
124SK_U32 Time, /* Time value */
125SK_U32 Class, /* Event Class for this timer */
126SK_U32 Event, /* Event Value for this timer */
127SK_EVPARA Para) /* Event Parameter for this timer */
128{
129 SK_TIMER **ppTimPrev;
130 SK_TIMER *pTm;
131 SK_U32 Delta;
132
133 Time /= 16; /* input is uS, clock ticks are 16uS */
134
135 if (!Time)
136 Time = 1;
137
138 SkTimerStop(pAC, Ioc, pTimer);
139
140 pTimer->TmClass = Class;
141 pTimer->TmEvent = Event;
142 pTimer->TmPara = Para;
143 pTimer->TmActive = SK_TRUE;
144
145 if (!pAC->Tim.StQueue) {
146 /* First Timer to be started */
147 pAC->Tim.StQueue = pTimer;
148 pTimer->TmNext = NULL;
149 pTimer->TmDelta = Time;
150
151 SkHwtStart(pAC, Ioc, Time);
152
153 return;
154 }
155
156 /*
157 * timer correction
158 */
159 timer_done(pAC, Ioc, 0);
160
161 /*
162 * find position in queue
163 */
164 Delta = 0;
165 for (ppTimPrev = &pAC->Tim.StQueue; (pTm = *ppTimPrev);
166 ppTimPrev = &pTm->TmNext ) {
167
168 if (Delta + pTm->TmDelta > Time) {
169 /* Position found */
170 /* Here the timer needs to be inserted. */
171 break;
172 }
173 Delta += pTm->TmDelta;
174 }
175
176 /* insert in queue */
177 *ppTimPrev = pTimer;
178 pTimer->TmNext = pTm;
179 pTimer->TmDelta = Time - Delta;
180
181 if (pTm) {
182 /* There is a next timer
183 * -> correct its Delta value.
184 */
185 pTm->TmDelta -= pTimer->TmDelta;
186 }
187
188 /* restart with first */
189 SkHwtStart(pAC, Ioc, pAC->Tim.StQueue->TmDelta);
190}
191
192
193void SkTimerDone(
194SK_AC *pAC, /* Adapters context */
195SK_IOC Ioc) /* IoContext */
196{
197 timer_done(pAC, Ioc, 1);
198}
199
200
201static void timer_done(
202SK_AC *pAC, /* Adapters context */
203SK_IOC Ioc, /* IoContext */
204int Restart) /* Do we need to restart the Hardware timer ? */
205{
206 SK_U32 Delta;
207 SK_TIMER *pTm;
208 SK_TIMER *pTComp; /* Timer completed now now */
209 SK_TIMER **ppLast; /* Next field of Last timer to be deq */
210 int Done = 0;
211
212 Delta = SkHwtRead(pAC, Ioc);
213
214 ppLast = &pAC->Tim.StQueue;
215 pTm = pAC->Tim.StQueue;
216 while (pTm && !Done) {
217 if (Delta >= pTm->TmDelta) {
218 /* Timer ran out */
219 pTm->TmActive = SK_FALSE;
220 Delta -= pTm->TmDelta;
221 ppLast = &pTm->TmNext;
222 pTm = pTm->TmNext;
223 }
224 else {
225 /* We found the first timer that did not run out */
226 pTm->TmDelta -= Delta;
227 Delta = 0;
228 Done = 1;
229 }
230 }
231 *ppLast = NULL;
232 /*
233 * pTm points to the first Timer that did not run out.
234 * StQueue points to the first Timer that run out.
235 */
236
237 for ( pTComp = pAC->Tim.StQueue; pTComp; pTComp = pTComp->TmNext) {
238 SkEventQueue(pAC,pTComp->TmClass, pTComp->TmEvent, pTComp->TmPara);
239 }
240
241 /* Set head of timer queue to the first timer that did not run out */
242 pAC->Tim.StQueue = pTm;
243
244 if (Restart && pAC->Tim.StQueue) {
245 /* Restart HW timer */
246 SkHwtStart(pAC, Ioc, pAC->Tim.StQueue->TmDelta);
247 }
248}
249
250/* End of file */
diff --git a/drivers/net/sk98lin/skvpd.c b/drivers/net/sk98lin/skvpd.c
deleted file mode 100644
index 1e662aaebf84..000000000000
--- a/drivers/net/sk98lin/skvpd.c
+++ /dev/null
@@ -1,1091 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skvpd.c
4 * Project: GEnesis, PCI Gigabit Ethernet Adapter
5 * Version: $Revision: 1.37 $
6 * Date: $Date: 2003/01/13 10:42:45 $
7 * Purpose: Shared software to read and write VPD data
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2003 SysKonnect GmbH.
14 *
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
19 *
20 * The information in this file is provided "AS IS" without warranty.
21 *
22 ******************************************************************************/
23
24/*
25 Please refer skvpd.txt for information how to include this module
26 */
27static const char SysKonnectFileId[] =
28 "@(#)$Id: skvpd.c,v 1.37 2003/01/13 10:42:45 rschmidt Exp $ (C) SK";
29
30#include "h/skdrv1st.h"
31#include "h/sktypes.h"
32#include "h/skdebug.h"
33#include "h/skdrv2nd.h"
34
35/*
36 * Static functions
37 */
38#ifndef SK_KR_PROTO
39static SK_VPD_PARA *vpd_find_para(
40 SK_AC *pAC,
41 const char *key,
42 SK_VPD_PARA *p);
43#else /* SK_KR_PROTO */
44static SK_VPD_PARA *vpd_find_para();
45#endif /* SK_KR_PROTO */
46
47/*
48 * waits for a completion of a VPD transfer
49 * The VPD transfer must complete within SK_TICKS_PER_SEC/16
50 *
51 * returns 0: success, transfer completes
52 * error exit(9) with a error message
53 */
54static int VpdWait(
55SK_AC *pAC, /* Adapters context */
56SK_IOC IoC, /* IO Context */
57int event) /* event to wait for (VPD_READ / VPD_write) completion*/
58{
59 SK_U64 start_time;
60 SK_U16 state;
61
62 SK_DBG_MSG(pAC,SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
63 ("VPD wait for %s\n", event?"Write":"Read"));
64 start_time = SkOsGetTime(pAC);
65 do {
66 if (SkOsGetTime(pAC) - start_time > SK_TICKS_PER_SEC) {
67
68 /* Bug fix AF: Thu Mar 28 2002
69 * Do not call: VPD_STOP(pAC, IoC);
70 * A pending VPD read cycle can not be aborted by writing
71 * VPD_WRITE to the PCI_VPD_ADR_REG (VPD address register).
72 * Although the write threshold in the OUR-register protects
73 * VPD read only space from being overwritten this does not
74 * protect a VPD read from being `converted` into a VPD write
75 * operation (on the fly). As a consequence the VPD_STOP would
76 * delete VPD read only data. In case of any problems with the
77 * I2C bus we exit the loop here. The I2C read operation can
78 * not be aborted except by a reset (->LR).
79 */
80 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_FATAL | SK_DBGCAT_ERR,
81 ("ERROR:VPD wait timeout\n"));
82 return(1);
83 }
84
85 VPD_IN16(pAC, IoC, PCI_VPD_ADR_REG, &state);
86
87 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
88 ("state = %x, event %x\n",state,event));
89 } while((int)(state & PCI_VPD_FLAG) == event);
90
91 return(0);
92}
93
94#ifdef SKDIAG
95
96/*
97 * Read the dword at address 'addr' from the VPD EEPROM.
98 *
99 * Needed Time: MIN 1,3 ms MAX 2,6 ms
100 *
101 * Note: The DWord is returned in the endianess of the machine the routine
102 * is running on.
103 *
104 * Returns the data read.
105 */
106SK_U32 VpdReadDWord(
107SK_AC *pAC, /* Adapters context */
108SK_IOC IoC, /* IO Context */
109int addr) /* VPD address */
110{
111 SK_U32 Rtv;
112
113 /* start VPD read */
114 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
115 ("VPD read dword at 0x%x\n",addr));
116 addr &= ~VPD_WRITE; /* ensure the R/W bit is set to read */
117
118 VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, (SK_U16)addr);
119
120 /* ignore return code here */
121 (void)VpdWait(pAC, IoC, VPD_READ);
122
123 /* Don't swap here, it's a data stream of bytes */
124 Rtv = 0;
125
126 VPD_IN32(pAC, IoC, PCI_VPD_DAT_REG, &Rtv);
127
128 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
129 ("VPD read dword data = 0x%x\n",Rtv));
130 return(Rtv);
131}
132
133#endif /* SKDIAG */
134
135/*
136 * Read one Stream of 'len' bytes of VPD data, starting at 'addr' from
137 * or to the I2C EEPROM.
138 *
139 * Returns number of bytes read / written.
140 */
141static int VpdWriteStream(
142SK_AC *pAC, /* Adapters context */
143SK_IOC IoC, /* IO Context */
144char *buf, /* data buffer */
145int Addr, /* VPD start address */
146int Len) /* number of bytes to read / to write */
147{
148 int i;
149 int j;
150 SK_U16 AdrReg;
151 int Rtv;
152 SK_U8 * pComp; /* Compare pointer */
153 SK_U8 Data; /* Input Data for Compare */
154
155 /* Init Compare Pointer */
156 pComp = (SK_U8 *) buf;
157
158 for (i = 0; i < Len; i++, buf++) {
159 if ((i%sizeof(SK_U32)) == 0) {
160 /*
161 * At the begin of each cycle read the Data Reg
162 * So it is initialized even if only a few bytes
163 * are written.
164 */
165 AdrReg = (SK_U16) Addr;
166 AdrReg &= ~VPD_WRITE; /* READ operation */
167
168 VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, AdrReg);
169
170 /* Wait for termination */
171 Rtv = VpdWait(pAC, IoC, VPD_READ);
172 if (Rtv != 0) {
173 return(i);
174 }
175 }
176
177 /* Write current Byte */
178 VPD_OUT8(pAC, IoC, PCI_VPD_DAT_REG + (i%sizeof(SK_U32)),
179 *(SK_U8*)buf);
180
181 if (((i%sizeof(SK_U32)) == 3) || (i == (Len - 1))) {
182 /* New Address needs to be written to VPD_ADDR reg */
183 AdrReg = (SK_U16) Addr;
184 Addr += sizeof(SK_U32);
185 AdrReg |= VPD_WRITE; /* WRITE operation */
186
187 VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, AdrReg);
188
189 /* Wait for termination */
190 Rtv = VpdWait(pAC, IoC, VPD_WRITE);
191 if (Rtv != 0) {
192 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
193 ("Write Timed Out\n"));
194 return(i - (i%sizeof(SK_U32)));
195 }
196
197 /*
198 * Now re-read to verify
199 */
200 AdrReg &= ~VPD_WRITE; /* READ operation */
201
202 VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, AdrReg);
203
204 /* Wait for termination */
205 Rtv = VpdWait(pAC, IoC, VPD_READ);
206 if (Rtv != 0) {
207 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
208 ("Verify Timed Out\n"));
209 return(i - (i%sizeof(SK_U32)));
210 }
211
212 for (j = 0; j <= (int)(i%sizeof(SK_U32)); j++, pComp++) {
213
214 VPD_IN8(pAC, IoC, PCI_VPD_DAT_REG + j, &Data);
215
216 if (Data != *pComp) {
217 /* Verify Error */
218 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
219 ("WriteStream Verify Error\n"));
220 return(i - (i%sizeof(SK_U32)) + j);
221 }
222 }
223 }
224 }
225
226 return(Len);
227}
228
229
230/*
231 * Read one Stream of 'len' bytes of VPD data, starting at 'addr' from
232 * or to the I2C EEPROM.
233 *
234 * Returns number of bytes read / written.
235 */
236static int VpdReadStream(
237SK_AC *pAC, /* Adapters context */
238SK_IOC IoC, /* IO Context */
239char *buf, /* data buffer */
240int Addr, /* VPD start address */
241int Len) /* number of bytes to read / to write */
242{
243 int i;
244 SK_U16 AdrReg;
245 int Rtv;
246
247 for (i = 0; i < Len; i++, buf++) {
248 if ((i%sizeof(SK_U32)) == 0) {
249 /* New Address needs to be written to VPD_ADDR reg */
250 AdrReg = (SK_U16) Addr;
251 Addr += sizeof(SK_U32);
252 AdrReg &= ~VPD_WRITE; /* READ operation */
253
254 VPD_OUT16(pAC, IoC, PCI_VPD_ADR_REG, AdrReg);
255
256 /* Wait for termination */
257 Rtv = VpdWait(pAC, IoC, VPD_READ);
258 if (Rtv != 0) {
259 return(i);
260 }
261 }
262 VPD_IN8(pAC, IoC, PCI_VPD_DAT_REG + (i%sizeof(SK_U32)),
263 (SK_U8 *)buf);
264 }
265
266 return(Len);
267}
268
269/*
270 * Read ore writes 'len' bytes of VPD data, starting at 'addr' from
271 * or to the I2C EEPROM.
272 *
273 * Returns number of bytes read / written.
274 */
275static int VpdTransferBlock(
276SK_AC *pAC, /* Adapters context */
277SK_IOC IoC, /* IO Context */
278char *buf, /* data buffer */
279int addr, /* VPD start address */
280int len, /* number of bytes to read / to write */
281int dir) /* transfer direction may be VPD_READ or VPD_WRITE */
282{
283 int Rtv; /* Return value */
284 int vpd_rom_size;
285
286 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
287 ("VPD %s block, addr = 0x%x, len = %d\n",
288 dir ? "write" : "read", addr, len));
289
290 if (len == 0)
291 return(0);
292
293 vpd_rom_size = pAC->vpd.rom_size;
294
295 if (addr > vpd_rom_size - 4) {
296 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
297 ("Address error: 0x%x, exp. < 0x%x\n",
298 addr, vpd_rom_size - 4));
299 return(0);
300 }
301
302 if (addr + len > vpd_rom_size) {
303 len = vpd_rom_size - addr;
304 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
305 ("Warning: len was cut to %d\n", len));
306 }
307
308 if (dir == VPD_READ) {
309 Rtv = VpdReadStream(pAC, IoC, buf, addr, len);
310 }
311 else {
312 Rtv = VpdWriteStream(pAC, IoC, buf, addr, len);
313 }
314
315 return(Rtv);
316}
317
318#ifdef SKDIAG
319
320/*
321 * Read 'len' bytes of VPD data, starting at 'addr'.
322 *
323 * Returns number of bytes read.
324 */
325int VpdReadBlock(
326SK_AC *pAC, /* pAC pointer */
327SK_IOC IoC, /* IO Context */
328char *buf, /* buffer were the data should be stored */
329int addr, /* start reading at the VPD address */
330int len) /* number of bytes to read */
331{
332 return(VpdTransferBlock(pAC, IoC, buf, addr, len, VPD_READ));
333}
334
335/*
336 * Write 'len' bytes of *but to the VPD EEPROM, starting at 'addr'.
337 *
338 * Returns number of bytes writes.
339 */
340int VpdWriteBlock(
341SK_AC *pAC, /* pAC pointer */
342SK_IOC IoC, /* IO Context */
343char *buf, /* buffer, holds the data to write */
344int addr, /* start writing at the VPD address */
345int len) /* number of bytes to write */
346{
347 return(VpdTransferBlock(pAC, IoC, buf, addr, len, VPD_WRITE));
348}
349#endif /* SKDIAG */
350
351/*
352 * (re)initialize the VPD buffer
353 *
354 * Reads the VPD data from the EEPROM into the VPD buffer.
355 * Get the remaining read only and read / write space.
356 *
357 * return 0: success
358 * 1: fatal VPD error
359 */
360static int VpdInit(
361SK_AC *pAC, /* Adapters context */
362SK_IOC IoC) /* IO Context */
363{
364 SK_VPD_PARA *r, rp; /* RW or RV */
365 int i;
366 unsigned char x;
367 int vpd_size;
368 SK_U16 dev_id;
369 SK_U32 our_reg2;
370
371 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_INIT, ("VpdInit .. "));
372
373 VPD_IN16(pAC, IoC, PCI_DEVICE_ID, &dev_id);
374
375 VPD_IN32(pAC, IoC, PCI_OUR_REG_2, &our_reg2);
376
377 pAC->vpd.rom_size = 256 << ((our_reg2 & PCI_VPD_ROM_SZ) >> 14);
378
379 /*
380 * this function might get used before the hardware is initialized
381 * therefore we cannot always trust in GIChipId
382 */
383 if (((pAC->vpd.v.vpd_status & VPD_VALID) == 0 &&
384 dev_id != VPD_DEV_ID_GENESIS) ||
385 ((pAC->vpd.v.vpd_status & VPD_VALID) != 0 &&
386 !pAC->GIni.GIGenesis)) {
387
388 /* for Yukon the VPD size is always 256 */
389 vpd_size = VPD_SIZE_YUKON;
390 }
391 else {
392 /* Genesis uses the maximum ROM size up to 512 for VPD */
393 if (pAC->vpd.rom_size > VPD_SIZE_GENESIS) {
394 vpd_size = VPD_SIZE_GENESIS;
395 }
396 else {
397 vpd_size = pAC->vpd.rom_size;
398 }
399 }
400
401 /* read the VPD data into the VPD buffer */
402 if (VpdTransferBlock(pAC, IoC, pAC->vpd.vpd_buf, 0, vpd_size, VPD_READ)
403 != vpd_size) {
404
405 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
406 ("Block Read Error\n"));
407 return(1);
408 }
409
410 pAC->vpd.vpd_size = vpd_size;
411
412 /* Asus K8V Se Deluxe bugfix. Correct VPD content */
413 /* MBo April 2004 */
414 if (((unsigned char)pAC->vpd.vpd_buf[0x3f] == 0x38) &&
415 ((unsigned char)pAC->vpd.vpd_buf[0x40] == 0x3c) &&
416 ((unsigned char)pAC->vpd.vpd_buf[0x41] == 0x45)) {
417 printk("sk98lin: Asus mainboard with buggy VPD? "
418 "Correcting data.\n");
419 pAC->vpd.vpd_buf[0x40] = 0x38;
420 }
421
422
423 /* find the end tag of the RO area */
424 if (!(r = vpd_find_para(pAC, VPD_RV, &rp))) {
425 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
426 ("Encoding Error: RV Tag not found\n"));
427 return(1);
428 }
429
430 if (r->p_val + r->p_len > pAC->vpd.vpd_buf + vpd_size/2) {
431 SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
432 ("Encoding Error: Invalid VPD struct size\n"));
433 return(1);
434 }
435 pAC->vpd.v.vpd_free_ro = r->p_len - 1;
436
437 /* test the checksum */
438 for (i = 0, x = 0; (unsigned)i <= (unsigned)vpd_size/2 - r->p_len; i++) {
439 x += pAC->vpd.vpd_buf[i];
440 }
441
442 if (x != 0) {
443 /* checksum error */
444 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
445 ("VPD Checksum Error\n"));
446 return(1);
447 }
448
449 /* find and check the end tag of the RW area */
450 if (!(r = vpd_find_para(pAC, VPD_RW, &rp))) {
451 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
452 ("Encoding Error: RV Tag not found\n"));
453 return(1);
454 }
455
456 if (r->p_val < pAC->vpd.vpd_buf + vpd_size/2) {
457 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
458 ("Encoding Error: Invalid VPD struct size\n"));
459 return(1);
460 }
461 pAC->vpd.v.vpd_free_rw = r->p_len;
462
463 /* everything seems to be ok */
464 if (pAC->GIni.GIChipId != 0) {
465 pAC->vpd.v.vpd_status |= VPD_VALID;
466 }
467
468 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_INIT,
469 ("done. Free RO = %d, Free RW = %d\n",
470 pAC->vpd.v.vpd_free_ro, pAC->vpd.v.vpd_free_rw));
471
472 return(0);
473}
474
475/*
476 * find the Keyword 'key' in the VPD buffer and fills the
477 * parameter struct 'p' with it's values
478 *
479 * returns *p success
480 * 0: parameter was not found or VPD encoding error
481 */
482static SK_VPD_PARA *vpd_find_para(
483SK_AC *pAC, /* common data base */
484const char *key, /* keyword to find (e.g. "MN") */
485SK_VPD_PARA *p) /* parameter description struct */
486{
487 char *v ; /* points to VPD buffer */
488 int max; /* Maximum Number of Iterations */
489
490 v = pAC->vpd.vpd_buf;
491 max = 128;
492
493 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
494 ("VPD find para %s .. ",key));
495
496 /* check mandatory resource type ID string (Product Name) */
497 if (*v != (char)RES_ID) {
498 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
499 ("Error: 0x%x missing\n", RES_ID));
500 return NULL;
501 }
502
503 if (strcmp(key, VPD_NAME) == 0) {
504 p->p_len = VPD_GET_RES_LEN(v);
505 p->p_val = VPD_GET_VAL(v);
506 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
507 ("found, len = %d\n", p->p_len));
508 return(p);
509 }
510
511 v += 3 + VPD_GET_RES_LEN(v) + 3;
512 for (;; ) {
513 if (SK_MEMCMP(key,v,2) == 0) {
514 p->p_len = VPD_GET_VPD_LEN(v);
515 p->p_val = VPD_GET_VAL(v);
516 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
517 ("found, len = %d\n",p->p_len));
518 return(p);
519 }
520
521 /* exit when reaching the "RW" Tag or the maximum of itera. */
522 max--;
523 if (SK_MEMCMP(VPD_RW,v,2) == 0 || max == 0) {
524 break;
525 }
526
527 if (SK_MEMCMP(VPD_RV,v,2) == 0) {
528 v += 3 + VPD_GET_VPD_LEN(v) + 3; /* skip VPD-W */
529 }
530 else {
531 v += 3 + VPD_GET_VPD_LEN(v);
532 }
533 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
534 ("scanning '%c%c' len = %d\n",v[0],v[1],v[2]));
535 }
536
537#ifdef DEBUG
538 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL, ("not found\n"));
539 if (max == 0) {
540 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
541 ("Key/Len Encoding error\n"));
542 }
543#endif /* DEBUG */
544 return NULL;
545}
546
547/*
548 * Move 'n' bytes. Begin with the last byte if 'n' is > 0,
549 * Start with the last byte if n is < 0.
550 *
551 * returns nothing
552 */
553static void vpd_move_para(
554char *start, /* start of memory block */
555char *end, /* end of memory block to move */
556int n) /* number of bytes the memory block has to be moved */
557{
558 char *p;
559 int i; /* number of byte copied */
560
561 if (n == 0)
562 return;
563
564 i = (int) (end - start + 1);
565 if (n < 0) {
566 p = start + n;
567 while (i != 0) {
568 *p++ = *start++;
569 i--;
570 }
571 }
572 else {
573 p = end + n;
574 while (i != 0) {
575 *p-- = *end--;
576 i--;
577 }
578 }
579}
580
581/*
582 * setup the VPD keyword 'key' at 'ip'.
583 *
584 * returns nothing
585 */
586static void vpd_insert_key(
587const char *key, /* keyword to insert */
588const char *buf, /* buffer with the keyword value */
589int len, /* length of the value string */
590char *ip) /* inseration point */
591{
592 SK_VPD_KEY *p;
593
594 p = (SK_VPD_KEY *) ip;
595 p->p_key[0] = key[0];
596 p->p_key[1] = key[1];
597 p->p_len = (unsigned char) len;
598 SK_MEMCPY(&p->p_val,buf,len);
599}
600
601/*
602 * Setup the VPD end tag "RV" / "RW".
603 * Also correct the remaining space variables vpd_free_ro / vpd_free_rw.
604 *
605 * returns 0: success
606 * 1: encoding error
607 */
608static int vpd_mod_endtag(
609SK_AC *pAC, /* common data base */
610char *etp) /* end pointer input position */
611{
612 SK_VPD_KEY *p;
613 unsigned char x;
614 int i;
615 int vpd_size;
616
617 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
618 ("VPD modify endtag at 0x%x = '%c%c'\n",etp,etp[0],etp[1]));
619
620 vpd_size = pAC->vpd.vpd_size;
621
622 p = (SK_VPD_KEY *) etp;
623
624 if (p->p_key[0] != 'R' || (p->p_key[1] != 'V' && p->p_key[1] != 'W')) {
625 /* something wrong here, encoding error */
626 SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR | SK_DBGCAT_FATAL,
627 ("Encoding Error: invalid end tag\n"));
628 return(1);
629 }
630 if (etp > pAC->vpd.vpd_buf + vpd_size/2) {
631 /* create "RW" tag */
632 p->p_len = (unsigned char)(pAC->vpd.vpd_buf+vpd_size-etp-3-1);
633 pAC->vpd.v.vpd_free_rw = (int) p->p_len;
634 i = pAC->vpd.v.vpd_free_rw;
635 etp += 3;
636 }
637 else {
638 /* create "RV" tag */
639 p->p_len = (unsigned char)(pAC->vpd.vpd_buf+vpd_size/2-etp-3);
640 pAC->vpd.v.vpd_free_ro = (int) p->p_len - 1;
641
642 /* setup checksum */
643 for (i = 0, x = 0; i < vpd_size/2 - p->p_len; i++) {
644 x += pAC->vpd.vpd_buf[i];
645 }
646 p->p_val = (char) 0 - x;
647 i = pAC->vpd.v.vpd_free_ro;
648 etp += 4;
649 }
650 while (i) {
651 *etp++ = 0x00;
652 i--;
653 }
654
655 return(0);
656}
657
658/*
659 * Insert a VPD keyword into the VPD buffer.
660 *
661 * The keyword 'key' is inserted at the position 'ip' in the
662 * VPD buffer.
663 * The keywords behind the input position will
664 * be moved. The VPD end tag "RV" or "RW" is generated again.
665 *
666 * returns 0: success
667 * 2: value string was cut
668 * 4: VPD full, keyword was not written
669 * 6: fatal VPD error
670 *
671 */
672static int VpdSetupPara(
673SK_AC *pAC, /* common data base */
674const char *key, /* keyword to insert */
675const char *buf, /* buffer with the keyword value */
676int len, /* length of the keyword value */
677int type, /* VPD_RO_KEY or VPD_RW_KEY */
678int op) /* operation to do: ADD_KEY or OWR_KEY */
679{
680 SK_VPD_PARA vp;
681 char *etp; /* end tag position */
682 int free; /* remaining space in selected area */
683 char *ip; /* input position inside the VPD buffer */
684 int rtv; /* return code */
685 int head; /* additional haeder bytes to move */
686 int found; /* additinoal bytes if the keyword was found */
687 int vpd_size;
688
689 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
690 ("VPD setup para key = %s, val = %s\n",key,buf));
691
692 vpd_size = pAC->vpd.vpd_size;
693
694 rtv = 0;
695 ip = NULL;
696 if (type == VPD_RW_KEY) {
697 /* end tag is "RW" */
698 free = pAC->vpd.v.vpd_free_rw;
699 etp = pAC->vpd.vpd_buf + (vpd_size - free - 1 - 3);
700 }
701 else {
702 /* end tag is "RV" */
703 free = pAC->vpd.v.vpd_free_ro;
704 etp = pAC->vpd.vpd_buf + (vpd_size/2 - free - 4);
705 }
706 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
707 ("Free RO = %d, Free RW = %d\n",
708 pAC->vpd.v.vpd_free_ro, pAC->vpd.v.vpd_free_rw));
709
710 head = 0;
711 found = 0;
712 if (op == OWR_KEY) {
713 if (vpd_find_para(pAC, key, &vp)) {
714 found = 3;
715 ip = vp.p_val - 3;
716 free += vp.p_len + 3;
717 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
718 ("Overwrite Key\n"));
719 }
720 else {
721 op = ADD_KEY;
722 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_CTRL,
723 ("Add Key\n"));
724 }
725 }
726 if (op == ADD_KEY) {
727 ip = etp;
728 vp.p_len = 0;
729 head = 3;
730 }
731
732 if (len + 3 > free) {
733 if (free < 7) {
734 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
735 ("VPD Buffer Overflow, keyword not written\n"));
736 return(4);
737 }
738 /* cut it again */
739 len = free - 3;
740 rtv = 2;
741 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
742 ("VPD Buffer Full, Keyword was cut\n"));
743 }
744
745 vpd_move_para(ip + vp.p_len + found, etp+2, len-vp.p_len+head);
746 vpd_insert_key(key, buf, len, ip);
747 if (vpd_mod_endtag(pAC, etp + len - vp.p_len + head)) {
748 pAC->vpd.v.vpd_status &= ~VPD_VALID;
749 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
750 ("VPD Encoding Error\n"));
751 return(6);
752 }
753
754 return(rtv);
755}
756
757
758/*
759 * Read the contents of the VPD EEPROM and copy it to the
760 * VPD buffer if not already done.
761 *
762 * return: A pointer to the vpd_status structure. The structure contains
763 * this fields.
764 */
765SK_VPD_STATUS *VpdStat(
766SK_AC *pAC, /* Adapters context */
767SK_IOC IoC) /* IO Context */
768{
769 if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) {
770 (void)VpdInit(pAC, IoC);
771 }
772 return(&pAC->vpd.v);
773}
774
775
776/*
777 * Read the contents of the VPD EEPROM and copy it to the VPD
778 * buffer if not already done.
779 * Scan the VPD buffer for VPD keywords and create the VPD
780 * keyword list by copying the keywords to 'buf', all after
781 * each other and terminated with a '\0'.
782 *
783 * Exceptions: o The Resource Type ID String (product name) is called "Name"
784 * o The VPD end tags 'RV' and 'RW' are not listed
785 *
786 * The number of copied keywords is counted in 'elements'.
787 *
788 * returns 0: success
789 * 2: buffer overfull, one or more keywords are missing
790 * 6: fatal VPD error
791 *
792 * example values after returning:
793 *
794 * buf = "Name\0PN\0EC\0MN\0SN\0CP\0VF\0VL\0YA\0"
795 * *len = 30
796 * *elements = 9
797 */
798int VpdKeys(
799SK_AC *pAC, /* common data base */
800SK_IOC IoC, /* IO Context */
801char *buf, /* buffer where to copy the keywords */
802int *len, /* buffer length */
803int *elements) /* number of keywords returned */
804{
805 char *v;
806 int n;
807
808 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, ("list VPD keys .. "));
809 *elements = 0;
810 if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) {
811 if (VpdInit(pAC, IoC) != 0) {
812 *len = 0;
813 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
814 ("VPD Init Error, terminated\n"));
815 return(6);
816 }
817 }
818
819 if ((signed)strlen(VPD_NAME) + 1 <= *len) {
820 v = pAC->vpd.vpd_buf;
821 strcpy(buf,VPD_NAME);
822 n = strlen(VPD_NAME) + 1;
823 buf += n;
824 *elements = 1;
825 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX,
826 ("'%c%c' ",v[0],v[1]));
827 }
828 else {
829 *len = 0;
830 SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_ERR,
831 ("buffer overflow\n"));
832 return(2);
833 }
834
835 v += 3 + VPD_GET_RES_LEN(v) + 3;
836 for (;; ) {
837 /* exit when reaching the "RW" Tag */
838 if (SK_MEMCMP(VPD_RW,v,2) == 0) {
839 break;
840 }
841
842 if (SK_MEMCMP(VPD_RV,v,2) == 0) {
843 v += 3 + VPD_GET_VPD_LEN(v) + 3; /* skip VPD-W */
844 continue;
845 }
846
847 if (n+3 <= *len) {
848 SK_MEMCPY(buf,v,2);
849 buf += 2;
850 *buf++ = '\0';
851 n += 3;
852 v += 3 + VPD_GET_VPD_LEN(v);
853 *elements += 1;
854 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX,
855 ("'%c%c' ",v[0],v[1]));
856 }
857 else {
858 *len = n;
859 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
860 ("buffer overflow\n"));
861 return(2);
862 }
863 }
864
865 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, ("\n"));
866 *len = n;
867 return(0);
868}
869
870
871/*
872 * Read the contents of the VPD EEPROM and copy it to the
873 * VPD buffer if not already done. Search for the VPD keyword
874 * 'key' and copy its value to 'buf'. Add a terminating '\0'.
875 * If the value does not fit into the buffer cut it after
876 * 'len' - 1 bytes.
877 *
878 * returns 0: success
879 * 1: keyword not found
880 * 2: value string was cut
881 * 3: VPD transfer timeout
882 * 6: fatal VPD error
883 */
884int VpdRead(
885SK_AC *pAC, /* common data base */
886SK_IOC IoC, /* IO Context */
887const char *key, /* keyword to read (e.g. "MN") */
888char *buf, /* buffer where to copy the keyword value */
889int *len) /* buffer length */
890{
891 SK_VPD_PARA *p, vp;
892
893 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX, ("VPD read %s .. ", key));
894 if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) {
895 if (VpdInit(pAC, IoC) != 0) {
896 *len = 0;
897 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
898 ("VPD init error\n"));
899 return(6);
900 }
901 }
902
903 if ((p = vpd_find_para(pAC, key, &vp)) != NULL) {
904 if (p->p_len > (*(unsigned *)len)-1) {
905 p->p_len = *len - 1;
906 }
907 SK_MEMCPY(buf, p->p_val, p->p_len);
908 buf[p->p_len] = '\0';
909 *len = p->p_len;
910 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_RX,
911 ("%c%c%c%c.., len = %d\n",
912 buf[0],buf[1],buf[2],buf[3],*len));
913 }
914 else {
915 *len = 0;
916 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR, ("not found\n"));
917 return(1);
918 }
919 return(0);
920}
921
922
923/*
924 * Check whether a given key may be written
925 *
926 * returns
927 * SK_TRUE Yes it may be written
928 * SK_FALSE No it may be written
929 */
930SK_BOOL VpdMayWrite(
931char *key) /* keyword to write (allowed values "Yx", "Vx") */
932{
933 if ((*key != 'Y' && *key != 'V') ||
934 key[1] < '0' || key[1] > 'Z' ||
935 (key[1] > '9' && key[1] < 'A') || strlen(key) != 2) {
936
937 return(SK_FALSE);
938 }
939 return(SK_TRUE);
940}
941
942/*
943 * Read the contents of the VPD EEPROM and copy it to the VPD
944 * buffer if not already done. Insert/overwrite the keyword 'key'
945 * in the VPD buffer. Cut the keyword value if it does not fit
946 * into the VPD read / write area.
947 *
948 * returns 0: success
949 * 2: value string was cut
950 * 3: VPD transfer timeout
951 * 4: VPD full, keyword was not written
952 * 5: keyword cannot be written
953 * 6: fatal VPD error
954 */
955int VpdWrite(
956SK_AC *pAC, /* common data base */
957SK_IOC IoC, /* IO Context */
958const char *key, /* keyword to write (allowed values "Yx", "Vx") */
959const char *buf) /* buffer where the keyword value can be read from */
960{
961 int len; /* length of the keyword to write */
962 int rtv; /* return code */
963 int rtv2;
964
965 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX,
966 ("VPD write %s = %s\n",key,buf));
967
968 if ((*key != 'Y' && *key != 'V') ||
969 key[1] < '0' || key[1] > 'Z' ||
970 (key[1] > '9' && key[1] < 'A') || strlen(key) != 2) {
971
972 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
973 ("illegal key tag, keyword not written\n"));
974 return(5);
975 }
976
977 if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) {
978 if (VpdInit(pAC, IoC) != 0) {
979 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
980 ("VPD init error\n"));
981 return(6);
982 }
983 }
984
985 rtv = 0;
986 len = strlen(buf);
987 if (len > VPD_MAX_LEN) {
988 /* cut it */
989 len = VPD_MAX_LEN;
990 rtv = 2;
991 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
992 ("keyword too long, cut after %d bytes\n",VPD_MAX_LEN));
993 }
994 if ((rtv2 = VpdSetupPara(pAC, key, buf, len, VPD_RW_KEY, OWR_KEY)) != 0) {
995 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
996 ("VPD write error\n"));
997 return(rtv2);
998 }
999
1000 return(rtv);
1001}
1002
1003/*
1004 * Read the contents of the VPD EEPROM and copy it to the
1005 * VPD buffer if not already done. Remove the VPD keyword
1006 * 'key' from the VPD buffer.
1007 * Only the keywords in the read/write area can be deleted.
1008 * Keywords in the read only area cannot be deleted.
1009 *
1010 * returns 0: success, keyword was removed
1011 * 1: keyword not found
1012 * 5: keyword cannot be deleted
1013 * 6: fatal VPD error
1014 */
1015int VpdDelete(
1016SK_AC *pAC, /* common data base */
1017SK_IOC IoC, /* IO Context */
1018char *key) /* keyword to read (e.g. "MN") */
1019{
1020 SK_VPD_PARA *p, vp;
1021 char *etp;
1022 int vpd_size;
1023
1024 vpd_size = pAC->vpd.vpd_size;
1025
1026 SK_DBG_MSG(pAC,SK_DBGMOD_VPD,SK_DBGCAT_TX,("VPD delete key %s\n",key));
1027 if ((pAC->vpd.v.vpd_status & VPD_VALID) == 0) {
1028 if (VpdInit(pAC, IoC) != 0) {
1029 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
1030 ("VPD init error\n"));
1031 return(6);
1032 }
1033 }
1034
1035 if ((p = vpd_find_para(pAC, key, &vp)) != NULL) {
1036 if (p->p_val < pAC->vpd.vpd_buf + vpd_size/2) {
1037 /* try to delete read only keyword */
1038 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
1039 ("cannot delete RO keyword\n"));
1040 return(5);
1041 }
1042
1043 etp = pAC->vpd.vpd_buf + (vpd_size-pAC->vpd.v.vpd_free_rw-1-3);
1044
1045 vpd_move_para(vp.p_val+vp.p_len, etp+2,
1046 - ((int)(vp.p_len + 3)));
1047 if (vpd_mod_endtag(pAC, etp - vp.p_len - 3)) {
1048 pAC->vpd.v.vpd_status &= ~VPD_VALID;
1049 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
1050 ("VPD encoding error\n"));
1051 return(6);
1052 }
1053 }
1054 else {
1055 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
1056 ("keyword not found\n"));
1057 return(1);
1058 }
1059
1060 return(0);
1061}
1062
1063/*
1064 * If the VPD buffer contains valid data write the VPD
1065 * read/write area back to the VPD EEPROM.
1066 *
1067 * returns 0: success
1068 * 3: VPD transfer timeout
1069 */
1070int VpdUpdate(
1071SK_AC *pAC, /* Adapters context */
1072SK_IOC IoC) /* IO Context */
1073{
1074 int vpd_size;
1075
1076 vpd_size = pAC->vpd.vpd_size;
1077
1078 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("VPD update .. "));
1079 if ((pAC->vpd.v.vpd_status & VPD_VALID) != 0) {
1080 if (VpdTransferBlock(pAC, IoC, pAC->vpd.vpd_buf + vpd_size/2,
1081 vpd_size/2, vpd_size/2, VPD_WRITE) != vpd_size/2) {
1082
1083 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_ERR,
1084 ("transfer timed out\n"));
1085 return(3);
1086 }
1087 }
1088 SK_DBG_MSG(pAC, SK_DBGMOD_VPD, SK_DBGCAT_TX, ("done\n"));
1089 return(0);
1090}
1091
diff --git a/drivers/net/sk98lin/skxmac2.c b/drivers/net/sk98lin/skxmac2.c
deleted file mode 100644
index b4e75022a657..000000000000
--- a/drivers/net/sk98lin/skxmac2.c
+++ /dev/null
@@ -1,4160 +0,0 @@
1/******************************************************************************
2 *
3 * Name: skxmac2.c
4 * Project: Gigabit Ethernet Adapters, Common Modules
5 * Version: $Revision: 1.102 $
6 * Date: $Date: 2003/10/02 16:53:58 $
7 * Purpose: Contains functions to initialize the MACs and PHYs
8 *
9 ******************************************************************************/
10
11/******************************************************************************
12 *
13 * (C)Copyright 1998-2002 SysKonnect.
14 * (C)Copyright 2002-2003 Marvell.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * The information in this file is provided "AS IS" without warranty.
22 *
23 ******************************************************************************/
24
25#include "h/skdrv1st.h"
26#include "h/skdrv2nd.h"
27
28/* typedefs *******************************************************************/
29
30/* BCOM PHY magic pattern list */
31typedef struct s_PhyHack {
32 int PhyReg; /* Phy register */
33 SK_U16 PhyVal; /* Value to write */
34} BCOM_HACK;
35
36/* local variables ************************************************************/
37
38#if (defined(DEBUG) || ((!defined(LINT)) && (!defined(SK_SLIM))))
39static const char SysKonnectFileId[] =
40 "@(#) $Id: skxmac2.c,v 1.102 2003/10/02 16:53:58 rschmidt Exp $ (C) Marvell.";
41#endif
42
43#ifdef GENESIS
44static BCOM_HACK BcomRegA1Hack[] = {
45 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 },
46 { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 },
47 { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 },
48 { 0, 0 }
49};
50static BCOM_HACK BcomRegC0Hack[] = {
51 { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, { 0x17, 0x0013 },
52 { 0x15, 0x0A04 }, { 0x18, 0x0420 },
53 { 0, 0 }
54};
55#endif
56
57/* function prototypes ********************************************************/
58#ifdef GENESIS
59static void SkXmInitPhyXmac(SK_AC*, SK_IOC, int, SK_BOOL);
60static void SkXmInitPhyBcom(SK_AC*, SK_IOC, int, SK_BOOL);
61static int SkXmAutoNegDoneXmac(SK_AC*, SK_IOC, int);
62static int SkXmAutoNegDoneBcom(SK_AC*, SK_IOC, int);
63#endif /* GENESIS */
64#ifdef YUKON
65static void SkGmInitPhyMarv(SK_AC*, SK_IOC, int, SK_BOOL);
66static int SkGmAutoNegDoneMarv(SK_AC*, SK_IOC, int);
67#endif /* YUKON */
68#ifdef OTHER_PHY
69static void SkXmInitPhyLone(SK_AC*, SK_IOC, int, SK_BOOL);
70static void SkXmInitPhyNat (SK_AC*, SK_IOC, int, SK_BOOL);
71static int SkXmAutoNegDoneLone(SK_AC*, SK_IOC, int);
72static int SkXmAutoNegDoneNat (SK_AC*, SK_IOC, int);
73#endif /* OTHER_PHY */
74
75
76#ifdef GENESIS
77/******************************************************************************
78 *
79 * SkXmPhyRead() - Read from XMAC PHY register
80 *
81 * Description: reads a 16-bit word from XMAC PHY or ext. PHY
82 *
83 * Returns:
84 * nothing
85 */
86void SkXmPhyRead(
87SK_AC *pAC, /* Adapter Context */
88SK_IOC IoC, /* I/O Context */
89int Port, /* Port Index (MAC_1 + n) */
90int PhyReg, /* Register Address (Offset) */
91SK_U16 SK_FAR *pVal) /* Pointer to Value */
92{
93 SK_U16 Mmu;
94 SK_GEPORT *pPrt;
95
96 pPrt = &pAC->GIni.GP[Port];
97
98 /* write the PHY register's address */
99 XM_OUT16(IoC, Port, XM_PHY_ADDR, PhyReg | pPrt->PhyAddr);
100
101 /* get the PHY register's value */
102 XM_IN16(IoC, Port, XM_PHY_DATA, pVal);
103
104 if (pPrt->PhyType != SK_PHY_XMAC) {
105 do {
106 XM_IN16(IoC, Port, XM_MMU_CMD, &Mmu);
107 /* wait until 'Ready' is set */
108 } while ((Mmu & XM_MMU_PHY_RDY) == 0);
109
110 /* get the PHY register's value */
111 XM_IN16(IoC, Port, XM_PHY_DATA, pVal);
112 }
113} /* SkXmPhyRead */
114
115
116/******************************************************************************
117 *
118 * SkXmPhyWrite() - Write to XMAC PHY register
119 *
120 * Description: writes a 16-bit word to XMAC PHY or ext. PHY
121 *
122 * Returns:
123 * nothing
124 */
125void SkXmPhyWrite(
126SK_AC *pAC, /* Adapter Context */
127SK_IOC IoC, /* I/O Context */
128int Port, /* Port Index (MAC_1 + n) */
129int PhyReg, /* Register Address (Offset) */
130SK_U16 Val) /* Value */
131{
132 SK_U16 Mmu;
133 SK_GEPORT *pPrt;
134
135 pPrt = &pAC->GIni.GP[Port];
136
137 if (pPrt->PhyType != SK_PHY_XMAC) {
138 do {
139 XM_IN16(IoC, Port, XM_MMU_CMD, &Mmu);
140 /* wait until 'Busy' is cleared */
141 } while ((Mmu & XM_MMU_PHY_BUSY) != 0);
142 }
143
144 /* write the PHY register's address */
145 XM_OUT16(IoC, Port, XM_PHY_ADDR, PhyReg | pPrt->PhyAddr);
146
147 /* write the PHY register's value */
148 XM_OUT16(IoC, Port, XM_PHY_DATA, Val);
149
150 if (pPrt->PhyType != SK_PHY_XMAC) {
151 do {
152 XM_IN16(IoC, Port, XM_MMU_CMD, &Mmu);
153 /* wait until 'Busy' is cleared */
154 } while ((Mmu & XM_MMU_PHY_BUSY) != 0);
155 }
156} /* SkXmPhyWrite */
157#endif /* GENESIS */
158
159
160#ifdef YUKON
161/******************************************************************************
162 *
163 * SkGmPhyRead() - Read from GPHY register
164 *
165 * Description: reads a 16-bit word from GPHY through MDIO
166 *
167 * Returns:
168 * nothing
169 */
170void SkGmPhyRead(
171SK_AC *pAC, /* Adapter Context */
172SK_IOC IoC, /* I/O Context */
173int Port, /* Port Index (MAC_1 + n) */
174int PhyReg, /* Register Address (Offset) */
175SK_U16 SK_FAR *pVal) /* Pointer to Value */
176{
177 SK_U16 Ctrl;
178 SK_GEPORT *pPrt;
179#ifdef VCPU
180 u_long SimCyle;
181 u_long SimLowTime;
182
183 VCPUgetTime(&SimCyle, &SimLowTime);
184 VCPUprintf(0, "SkGmPhyRead(%u), SimCyle=%u, SimLowTime=%u\n",
185 PhyReg, SimCyle, SimLowTime);
186#endif /* VCPU */
187
188 pPrt = &pAC->GIni.GP[Port];
189
190 /* set PHY-Register offset and 'Read' OpCode (= 1) */
191 *pVal = (SK_U16)(GM_SMI_CT_PHY_AD(pPrt->PhyAddr) |
192 GM_SMI_CT_REG_AD(PhyReg) | GM_SMI_CT_OP_RD);
193
194 GM_OUT16(IoC, Port, GM_SMI_CTRL, *pVal);
195
196 GM_IN16(IoC, Port, GM_SMI_CTRL, &Ctrl);
197
198 /* additional check for MDC/MDIO activity */
199 if ((Ctrl & GM_SMI_CT_BUSY) == 0) {
200 *pVal = 0;
201 return;
202 }
203
204 *pVal |= GM_SMI_CT_BUSY;
205
206 do {
207#ifdef VCPU
208 VCPUwaitTime(1000);
209#endif /* VCPU */
210
211 GM_IN16(IoC, Port, GM_SMI_CTRL, &Ctrl);
212
213 /* wait until 'ReadValid' is set */
214 } while (Ctrl == *pVal);
215
216 /* get the PHY register's value */
217 GM_IN16(IoC, Port, GM_SMI_DATA, pVal);
218
219#ifdef VCPU
220 VCPUgetTime(&SimCyle, &SimLowTime);
221 VCPUprintf(0, "VCPUgetTime(), SimCyle=%u, SimLowTime=%u\n",
222 SimCyle, SimLowTime);
223#endif /* VCPU */
224
225} /* SkGmPhyRead */
226
227
228/******************************************************************************
229 *
230 * SkGmPhyWrite() - Write to GPHY register
231 *
232 * Description: writes a 16-bit word to GPHY through MDIO
233 *
234 * Returns:
235 * nothing
236 */
237void SkGmPhyWrite(
238SK_AC *pAC, /* Adapter Context */
239SK_IOC IoC, /* I/O Context */
240int Port, /* Port Index (MAC_1 + n) */
241int PhyReg, /* Register Address (Offset) */
242SK_U16 Val) /* Value */
243{
244 SK_U16 Ctrl;
245 SK_GEPORT *pPrt;
246#ifdef VCPU
247 SK_U32 DWord;
248 u_long SimCyle;
249 u_long SimLowTime;
250
251 VCPUgetTime(&SimCyle, &SimLowTime);
252 VCPUprintf(0, "SkGmPhyWrite(Reg=%u, Val=0x%04x), SimCyle=%u, SimLowTime=%u\n",
253 PhyReg, Val, SimCyle, SimLowTime);
254#endif /* VCPU */
255
256 pPrt = &pAC->GIni.GP[Port];
257
258 /* write the PHY register's value */
259 GM_OUT16(IoC, Port, GM_SMI_DATA, Val);
260
261 /* set PHY-Register offset and 'Write' OpCode (= 0) */
262 Val = GM_SMI_CT_PHY_AD(pPrt->PhyAddr) | GM_SMI_CT_REG_AD(PhyReg);
263
264 GM_OUT16(IoC, Port, GM_SMI_CTRL, Val);
265
266 GM_IN16(IoC, Port, GM_SMI_CTRL, &Ctrl);
267
268 /* additional check for MDC/MDIO activity */
269 if ((Ctrl & GM_SMI_CT_BUSY) == 0) {
270 return;
271 }
272
273 Val |= GM_SMI_CT_BUSY;
274
275 do {
276#ifdef VCPU
277 /* read Timer value */
278 SK_IN32(IoC, B2_TI_VAL, &DWord);
279
280 VCPUwaitTime(1000);
281#endif /* VCPU */
282
283 GM_IN16(IoC, Port, GM_SMI_CTRL, &Ctrl);
284
285 /* wait until 'Busy' is cleared */
286 } while (Ctrl == Val);
287
288#ifdef VCPU
289 VCPUgetTime(&SimCyle, &SimLowTime);
290 VCPUprintf(0, "VCPUgetTime(), SimCyle=%u, SimLowTime=%u\n",
291 SimCyle, SimLowTime);
292#endif /* VCPU */
293
294} /* SkGmPhyWrite */
295#endif /* YUKON */
296
297
298#ifdef SK_DIAG
299/******************************************************************************
300 *
301 * SkGePhyRead() - Read from PHY register
302 *
303 * Description: calls a read PHY routine dep. on board type
304 *
305 * Returns:
306 * nothing
307 */
308void SkGePhyRead(
309SK_AC *pAC, /* Adapter Context */
310SK_IOC IoC, /* I/O Context */
311int Port, /* Port Index (MAC_1 + n) */
312int PhyReg, /* Register Address (Offset) */
313SK_U16 *pVal) /* Pointer to Value */
314{
315 void (*r_func)(SK_AC *pAC, SK_IOC IoC, int Port, int Reg, SK_U16 *pVal);
316
317 if (pAC->GIni.GIGenesis) {
318 r_func = SkXmPhyRead;
319 }
320 else {
321 r_func = SkGmPhyRead;
322 }
323
324 r_func(pAC, IoC, Port, PhyReg, pVal);
325} /* SkGePhyRead */
326
327
328/******************************************************************************
329 *
330 * SkGePhyWrite() - Write to PHY register
331 *
332 * Description: calls a write PHY routine dep. on board type
333 *
334 * Returns:
335 * nothing
336 */
337void SkGePhyWrite(
338SK_AC *pAC, /* Adapter Context */
339SK_IOC IoC, /* I/O Context */
340int Port, /* Port Index (MAC_1 + n) */
341int PhyReg, /* Register Address (Offset) */
342SK_U16 Val) /* Value */
343{
344 void (*w_func)(SK_AC *pAC, SK_IOC IoC, int Port, int Reg, SK_U16 Val);
345
346 if (pAC->GIni.GIGenesis) {
347 w_func = SkXmPhyWrite;
348 }
349 else {
350 w_func = SkGmPhyWrite;
351 }
352
353 w_func(pAC, IoC, Port, PhyReg, Val);
354} /* SkGePhyWrite */
355#endif /* SK_DIAG */
356
357
358/******************************************************************************
359 *
360 * SkMacPromiscMode() - Enable / Disable Promiscuous Mode
361 *
362 * Description:
363 * enables / disables promiscuous mode by setting Mode Register (XMAC) or
364 * Receive Control Register (GMAC) dep. on board type
365 *
366 * Returns:
367 * nothing
368 */
369void SkMacPromiscMode(
370SK_AC *pAC, /* adapter context */
371SK_IOC IoC, /* IO context */
372int Port, /* Port Index (MAC_1 + n) */
373SK_BOOL Enable) /* Enable / Disable */
374{
375#ifdef YUKON
376 SK_U16 RcReg;
377#endif
378#ifdef GENESIS
379 SK_U32 MdReg;
380#endif
381
382#ifdef GENESIS
383 if (pAC->GIni.GIGenesis) {
384
385 XM_IN32(IoC, Port, XM_MODE, &MdReg);
386 /* enable or disable promiscuous mode */
387 if (Enable) {
388 MdReg |= XM_MD_ENA_PROM;
389 }
390 else {
391 MdReg &= ~XM_MD_ENA_PROM;
392 }
393 /* setup Mode Register */
394 XM_OUT32(IoC, Port, XM_MODE, MdReg);
395 }
396#endif /* GENESIS */
397
398#ifdef YUKON
399 if (pAC->GIni.GIYukon) {
400
401 GM_IN16(IoC, Port, GM_RX_CTRL, &RcReg);
402
403 /* enable or disable unicast and multicast filtering */
404 if (Enable) {
405 RcReg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
406 }
407 else {
408 RcReg |= (GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
409 }
410 /* setup Receive Control Register */
411 GM_OUT16(IoC, Port, GM_RX_CTRL, RcReg);
412 }
413#endif /* YUKON */
414
415} /* SkMacPromiscMode*/
416
417
418/******************************************************************************
419 *
420 * SkMacHashing() - Enable / Disable Hashing
421 *
422 * Description:
423 * enables / disables hashing by setting Mode Register (XMAC) or
424 * Receive Control Register (GMAC) dep. on board type
425 *
426 * Returns:
427 * nothing
428 */
429void SkMacHashing(
430SK_AC *pAC, /* adapter context */
431SK_IOC IoC, /* IO context */
432int Port, /* Port Index (MAC_1 + n) */
433SK_BOOL Enable) /* Enable / Disable */
434{
435#ifdef YUKON
436 SK_U16 RcReg;
437#endif
438#ifdef GENESIS
439 SK_U32 MdReg;
440#endif
441
442#ifdef GENESIS
443 if (pAC->GIni.GIGenesis) {
444
445 XM_IN32(IoC, Port, XM_MODE, &MdReg);
446 /* enable or disable hashing */
447 if (Enable) {
448 MdReg |= XM_MD_ENA_HASH;
449 }
450 else {
451 MdReg &= ~XM_MD_ENA_HASH;
452 }
453 /* setup Mode Register */
454 XM_OUT32(IoC, Port, XM_MODE, MdReg);
455 }
456#endif /* GENESIS */
457
458#ifdef YUKON
459 if (pAC->GIni.GIYukon) {
460
461 GM_IN16(IoC, Port, GM_RX_CTRL, &RcReg);
462
463 /* enable or disable multicast filtering */
464 if (Enable) {
465 RcReg |= GM_RXCR_MCF_ENA;
466 }
467 else {
468 RcReg &= ~GM_RXCR_MCF_ENA;
469 }
470 /* setup Receive Control Register */
471 GM_OUT16(IoC, Port, GM_RX_CTRL, RcReg);
472 }
473#endif /* YUKON */
474
475} /* SkMacHashing*/
476
477
478#ifdef SK_DIAG
479/******************************************************************************
480 *
481 * SkXmSetRxCmd() - Modify the value of the XMAC's Rx Command Register
482 *
483 * Description:
484 * The features
485 * - FCS stripping, SK_STRIP_FCS_ON/OFF
486 * - pad byte stripping, SK_STRIP_PAD_ON/OFF
487 * - don't set XMR_FS_ERR in status SK_LENERR_OK_ON/OFF
488 * for inrange length error frames
489 * - don't set XMR_FS_ERR in status SK_BIG_PK_OK_ON/OFF
490 * for frames > 1514 bytes
491 * - enable Rx of own packets SK_SELF_RX_ON/OFF
492 *
493 * for incoming packets may be enabled/disabled by this function.
494 * Additional modes may be added later.
495 * Multiple modes can be enabled/disabled at the same time.
496 * The new configuration is written to the Rx Command register immediately.
497 *
498 * Returns:
499 * nothing
500 */
501static void SkXmSetRxCmd(
502SK_AC *pAC, /* adapter context */
503SK_IOC IoC, /* IO context */
504int Port, /* Port Index (MAC_1 + n) */
505int Mode) /* Mode is SK_STRIP_FCS_ON/OFF, SK_STRIP_PAD_ON/OFF,
506 SK_LENERR_OK_ON/OFF, or SK_BIG_PK_OK_ON/OFF */
507{
508 SK_U16 OldRxCmd;
509 SK_U16 RxCmd;
510
511 XM_IN16(IoC, Port, XM_RX_CMD, &OldRxCmd);
512
513 RxCmd = OldRxCmd;
514
515 switch (Mode & (SK_STRIP_FCS_ON | SK_STRIP_FCS_OFF)) {
516 case SK_STRIP_FCS_ON:
517 RxCmd |= XM_RX_STRIP_FCS;
518 break;
519 case SK_STRIP_FCS_OFF:
520 RxCmd &= ~XM_RX_STRIP_FCS;
521 break;
522 }
523
524 switch (Mode & (SK_STRIP_PAD_ON | SK_STRIP_PAD_OFF)) {
525 case SK_STRIP_PAD_ON:
526 RxCmd |= XM_RX_STRIP_PAD;
527 break;
528 case SK_STRIP_PAD_OFF:
529 RxCmd &= ~XM_RX_STRIP_PAD;
530 break;
531 }
532
533 switch (Mode & (SK_LENERR_OK_ON | SK_LENERR_OK_OFF)) {
534 case SK_LENERR_OK_ON:
535 RxCmd |= XM_RX_LENERR_OK;
536 break;
537 case SK_LENERR_OK_OFF:
538 RxCmd &= ~XM_RX_LENERR_OK;
539 break;
540 }
541
542 switch (Mode & (SK_BIG_PK_OK_ON | SK_BIG_PK_OK_OFF)) {
543 case SK_BIG_PK_OK_ON:
544 RxCmd |= XM_RX_BIG_PK_OK;
545 break;
546 case SK_BIG_PK_OK_OFF:
547 RxCmd &= ~XM_RX_BIG_PK_OK;
548 break;
549 }
550
551 switch (Mode & (SK_SELF_RX_ON | SK_SELF_RX_OFF)) {
552 case SK_SELF_RX_ON:
553 RxCmd |= XM_RX_SELF_RX;
554 break;
555 case SK_SELF_RX_OFF:
556 RxCmd &= ~XM_RX_SELF_RX;
557 break;
558 }
559
560 /* Write the new mode to the Rx command register if required */
561 if (OldRxCmd != RxCmd) {
562 XM_OUT16(IoC, Port, XM_RX_CMD, RxCmd);
563 }
564} /* SkXmSetRxCmd */
565
566
567/******************************************************************************
568 *
569 * SkGmSetRxCmd() - Modify the value of the GMAC's Rx Control Register
570 *
571 * Description:
572 * The features
573 * - FCS (CRC) stripping, SK_STRIP_FCS_ON/OFF
574 * - don't set GMR_FS_LONG_ERR SK_BIG_PK_OK_ON/OFF
575 * for frames > 1514 bytes
576 * - enable Rx of own packets SK_SELF_RX_ON/OFF
577 *
578 * for incoming packets may be enabled/disabled by this function.
579 * Additional modes may be added later.
580 * Multiple modes can be enabled/disabled at the same time.
581 * The new configuration is written to the Rx Command register immediately.
582 *
583 * Returns:
584 * nothing
585 */
586static void SkGmSetRxCmd(
587SK_AC *pAC, /* adapter context */
588SK_IOC IoC, /* IO context */
589int Port, /* Port Index (MAC_1 + n) */
590int Mode) /* Mode is SK_STRIP_FCS_ON/OFF, SK_STRIP_PAD_ON/OFF,
591 SK_LENERR_OK_ON/OFF, or SK_BIG_PK_OK_ON/OFF */
592{
593 SK_U16 OldRxCmd;
594 SK_U16 RxCmd;
595
596 if ((Mode & (SK_STRIP_FCS_ON | SK_STRIP_FCS_OFF)) != 0) {
597
598 GM_IN16(IoC, Port, GM_RX_CTRL, &OldRxCmd);
599
600 RxCmd = OldRxCmd;
601
602 if ((Mode & SK_STRIP_FCS_ON) != 0) {
603 RxCmd |= GM_RXCR_CRC_DIS;
604 }
605 else {
606 RxCmd &= ~GM_RXCR_CRC_DIS;
607 }
608 /* Write the new mode to the Rx control register if required */
609 if (OldRxCmd != RxCmd) {
610 GM_OUT16(IoC, Port, GM_RX_CTRL, RxCmd);
611 }
612 }
613
614 if ((Mode & (SK_BIG_PK_OK_ON | SK_BIG_PK_OK_OFF)) != 0) {
615
616 GM_IN16(IoC, Port, GM_SERIAL_MODE, &OldRxCmd);
617
618 RxCmd = OldRxCmd;
619
620 if ((Mode & SK_BIG_PK_OK_ON) != 0) {
621 RxCmd |= GM_SMOD_JUMBO_ENA;
622 }
623 else {
624 RxCmd &= ~GM_SMOD_JUMBO_ENA;
625 }
626 /* Write the new mode to the Rx control register if required */
627 if (OldRxCmd != RxCmd) {
628 GM_OUT16(IoC, Port, GM_SERIAL_MODE, RxCmd);
629 }
630 }
631} /* SkGmSetRxCmd */
632
633
634/******************************************************************************
635 *
636 * SkMacSetRxCmd() - Modify the value of the MAC's Rx Control Register
637 *
638 * Description: modifies the MAC's Rx Control reg. dep. on board type
639 *
640 * Returns:
641 * nothing
642 */
643void SkMacSetRxCmd(
644SK_AC *pAC, /* adapter context */
645SK_IOC IoC, /* IO context */
646int Port, /* Port Index (MAC_1 + n) */
647int Mode) /* Rx Mode */
648{
649 if (pAC->GIni.GIGenesis) {
650
651 SkXmSetRxCmd(pAC, IoC, Port, Mode);
652 }
653 else {
654
655 SkGmSetRxCmd(pAC, IoC, Port, Mode);
656 }
657
658} /* SkMacSetRxCmd */
659
660
661/******************************************************************************
662 *
663 * SkMacCrcGener() - Enable / Disable CRC Generation
664 *
665 * Description: enables / disables CRC generation dep. on board type
666 *
667 * Returns:
668 * nothing
669 */
670void SkMacCrcGener(
671SK_AC *pAC, /* adapter context */
672SK_IOC IoC, /* IO context */
673int Port, /* Port Index (MAC_1 + n) */
674SK_BOOL Enable) /* Enable / Disable */
675{
676 SK_U16 Word;
677
678 if (pAC->GIni.GIGenesis) {
679
680 XM_IN16(IoC, Port, XM_TX_CMD, &Word);
681
682 if (Enable) {
683 Word &= ~XM_TX_NO_CRC;
684 }
685 else {
686 Word |= XM_TX_NO_CRC;
687 }
688 /* setup Tx Command Register */
689 XM_OUT16(IoC, Port, XM_TX_CMD, Word);
690 }
691 else {
692
693 GM_IN16(IoC, Port, GM_TX_CTRL, &Word);
694
695 if (Enable) {
696 Word &= ~GM_TXCR_CRC_DIS;
697 }
698 else {
699 Word |= GM_TXCR_CRC_DIS;
700 }
701 /* setup Tx Control Register */
702 GM_OUT16(IoC, Port, GM_TX_CTRL, Word);
703 }
704
705} /* SkMacCrcGener*/
706
707#endif /* SK_DIAG */
708
709
710#ifdef GENESIS
711/******************************************************************************
712 *
713 * SkXmClrExactAddr() - Clear Exact Match Address Registers
714 *
715 * Description:
716 * All Exact Match Address registers of the XMAC 'Port' will be
717 * cleared starting with 'StartNum' up to (and including) the
718 * Exact Match address number of 'StopNum'.
719 *
720 * Returns:
721 * nothing
722 */
723void SkXmClrExactAddr(
724SK_AC *pAC, /* adapter context */
725SK_IOC IoC, /* IO context */
726int Port, /* Port Index (MAC_1 + n) */
727int StartNum, /* Begin with this Address Register Index (0..15) */
728int StopNum) /* Stop after finished with this Register Idx (0..15) */
729{
730 int i;
731 SK_U16 ZeroAddr[3] = {0x0000, 0x0000, 0x0000};
732
733 if ((unsigned)StartNum > 15 || (unsigned)StopNum > 15 ||
734 StartNum > StopNum) {
735
736 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E001, SKERR_HWI_E001MSG);
737 return;
738 }
739
740 for (i = StartNum; i <= StopNum; i++) {
741 XM_OUTADDR(IoC, Port, XM_EXM(i), &ZeroAddr[0]);
742 }
743} /* SkXmClrExactAddr */
744#endif /* GENESIS */
745
746
747/******************************************************************************
748 *
749 * SkMacFlushTxFifo() - Flush the MAC's transmit FIFO
750 *
751 * Description:
752 * Flush the transmit FIFO of the MAC specified by the index 'Port'
753 *
754 * Returns:
755 * nothing
756 */
757void SkMacFlushTxFifo(
758SK_AC *pAC, /* adapter context */
759SK_IOC IoC, /* IO context */
760int Port) /* Port Index (MAC_1 + n) */
761{
762#ifdef GENESIS
763 SK_U32 MdReg;
764
765 if (pAC->GIni.GIGenesis) {
766
767 XM_IN32(IoC, Port, XM_MODE, &MdReg);
768
769 XM_OUT32(IoC, Port, XM_MODE, MdReg | XM_MD_FTF);
770 }
771#endif /* GENESIS */
772
773#ifdef YUKON
774 if (pAC->GIni.GIYukon) {
775 /* no way to flush the FIFO we have to issue a reset */
776 /* TBD */
777 }
778#endif /* YUKON */
779
780} /* SkMacFlushTxFifo */
781
782
783/******************************************************************************
784 *
785 * SkMacFlushRxFifo() - Flush the MAC's receive FIFO
786 *
787 * Description:
788 * Flush the receive FIFO of the MAC specified by the index 'Port'
789 *
790 * Returns:
791 * nothing
792 */
793static void SkMacFlushRxFifo(
794SK_AC *pAC, /* adapter context */
795SK_IOC IoC, /* IO context */
796int Port) /* Port Index (MAC_1 + n) */
797{
798#ifdef GENESIS
799 SK_U32 MdReg;
800
801 if (pAC->GIni.GIGenesis) {
802
803 XM_IN32(IoC, Port, XM_MODE, &MdReg);
804
805 XM_OUT32(IoC, Port, XM_MODE, MdReg | XM_MD_FRF);
806 }
807#endif /* GENESIS */
808
809#ifdef YUKON
810 if (pAC->GIni.GIYukon) {
811 /* no way to flush the FIFO we have to issue a reset */
812 /* TBD */
813 }
814#endif /* YUKON */
815
816} /* SkMacFlushRxFifo */
817
818
819#ifdef GENESIS
820/******************************************************************************
821 *
822 * SkXmSoftRst() - Do a XMAC software reset
823 *
824 * Description:
825 * The PHY registers should not be destroyed during this
826 * kind of software reset. Therefore the XMAC Software Reset
827 * (XM_GP_RES_MAC bit in XM_GP_PORT) must not be used!
828 *
829 * The software reset is done by
830 * - disabling the Rx and Tx state machine,
831 * - resetting the statistics module,
832 * - clear all other significant XMAC Mode,
833 * Command, and Control Registers
834 * - clearing the Hash Register and the
835 * Exact Match Address registers, and
836 * - flushing the XMAC's Rx and Tx FIFOs.
837 *
838 * Note:
839 * Another requirement when stopping the XMAC is to
840 * avoid sending corrupted frames on the network.
841 * Disabling the Tx state machine will NOT interrupt
842 * the currently transmitted frame. But we must take care
843 * that the Tx FIFO is cleared AFTER the current frame
844 * is complete sent to the network.
845 *
846 * It takes about 12ns to send a frame with 1538 bytes.
847 * One PCI clock goes at least 15ns (66MHz). Therefore
848 * after reading XM_GP_PORT back, we are sure that the
849 * transmitter is disabled AND idle. And this means
850 * we may flush the transmit FIFO now.
851 *
852 * Returns:
853 * nothing
854 */
855static void SkXmSoftRst(
856SK_AC *pAC, /* adapter context */
857SK_IOC IoC, /* IO context */
858int Port) /* Port Index (MAC_1 + n) */
859{
860 SK_U16 ZeroAddr[4] = {0x0000, 0x0000, 0x0000, 0x0000};
861
862 /* reset the statistics module */
863 XM_OUT32(IoC, Port, XM_GP_PORT, XM_GP_RES_STAT);
864
865 /* disable all XMAC IRQs */
866 XM_OUT16(IoC, Port, XM_IMSK, 0xffff);
867
868 XM_OUT32(IoC, Port, XM_MODE, 0); /* clear Mode Reg */
869
870 XM_OUT16(IoC, Port, XM_TX_CMD, 0); /* reset TX CMD Reg */
871 XM_OUT16(IoC, Port, XM_RX_CMD, 0); /* reset RX CMD Reg */
872
873 /* disable all PHY IRQs */
874 switch (pAC->GIni.GP[Port].PhyType) {
875 case SK_PHY_BCOM:
876 SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_INT_MASK, 0xffff);
877 break;
878#ifdef OTHER_PHY
879 case SK_PHY_LONE:
880 SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_INT_ENAB, 0);
881 break;
882 case SK_PHY_NAT:
883 /* todo: National
884 SkXmPhyWrite(pAC, IoC, Port, PHY_NAT_INT_MASK, 0xffff); */
885 break;
886#endif /* OTHER_PHY */
887 }
888
889 /* clear the Hash Register */
890 XM_OUTHASH(IoC, Port, XM_HSM, &ZeroAddr);
891
892 /* clear the Exact Match Address registers */
893 SkXmClrExactAddr(pAC, IoC, Port, 0, 15);
894
895 /* clear the Source Check Address registers */
896 XM_OUTHASH(IoC, Port, XM_SRC_CHK, &ZeroAddr);
897
898} /* SkXmSoftRst */
899
900
901/******************************************************************************
902 *
903 * SkXmHardRst() - Do a XMAC hardware reset
904 *
905 * Description:
906 * The XMAC of the specified 'Port' and all connected devices
907 * (PHY and SERDES) will receive a reset signal on its *Reset pins.
908 * External PHYs must be reset by clearing a bit in the GPIO register
909 * (Timing requirements: Broadcom: 400ns, Level One: none, National: 80ns).
910 *
911 * ATTENTION:
912 * It is absolutely necessary to reset the SW_RST Bit first
913 * before calling this function.
914 *
915 * Returns:
916 * nothing
917 */
918static void SkXmHardRst(
919SK_AC *pAC, /* adapter context */
920SK_IOC IoC, /* IO context */
921int Port) /* Port Index (MAC_1 + n) */
922{
923 SK_U32 Reg;
924 int i;
925 int TOut;
926 SK_U16 Word;
927
928 for (i = 0; i < 4; i++) {
929 /* TX_MFF_CTRL1 has 32 bits, but only the lowest 16 bits are used */
930 SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
931
932 TOut = 0;
933 do {
934 if (TOut++ > 10000) {
935 /*
936 * Adapter seems to be in RESET state.
937 * Registers cannot be written.
938 */
939 return;
940 }
941
942 SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_SET_MAC_RST);
943
944 SK_IN16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), &Word);
945
946 } while ((Word & MFF_SET_MAC_RST) == 0);
947 }
948
949 /* For external PHYs there must be special handling */
950 if (pAC->GIni.GP[Port].PhyType != SK_PHY_XMAC) {
951
952 SK_IN32(IoC, B2_GP_IO, &Reg);
953
954 if (Port == 0) {
955 Reg |= GP_DIR_0; /* set to output */
956 Reg &= ~GP_IO_0; /* set PHY reset (active low) */
957 }
958 else {
959 Reg |= GP_DIR_2; /* set to output */
960 Reg &= ~GP_IO_2; /* set PHY reset (active low) */
961 }
962 /* reset external PHY */
963 SK_OUT32(IoC, B2_GP_IO, Reg);
964
965 /* short delay */
966 SK_IN32(IoC, B2_GP_IO, &Reg);
967 }
968} /* SkXmHardRst */
969
970
971/******************************************************************************
972 *
973 * SkXmClearRst() - Release the PHY & XMAC reset
974 *
975 * Description:
976 *
977 * Returns:
978 * nothing
979 */
980static void SkXmClearRst(
981SK_AC *pAC, /* adapter context */
982SK_IOC IoC, /* IO context */
983int Port) /* Port Index (MAC_1 + n) */
984{
985 SK_U32 DWord;
986
987 /* clear HW reset */
988 SK_OUT16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
989
990 if (pAC->GIni.GP[Port].PhyType != SK_PHY_XMAC) {
991
992 SK_IN32(IoC, B2_GP_IO, &DWord);
993
994 if (Port == 0) {
995 DWord |= (GP_DIR_0 | GP_IO_0); /* set to output */
996 }
997 else {
998 DWord |= (GP_DIR_2 | GP_IO_2); /* set to output */
999 }
1000 /* Clear PHY reset */
1001 SK_OUT32(IoC, B2_GP_IO, DWord);
1002
1003 /* Enable GMII interface */
1004 XM_OUT16(IoC, Port, XM_HW_CFG, XM_HW_GMII_MD);
1005 }
1006} /* SkXmClearRst */
1007#endif /* GENESIS */
1008
1009
1010#ifdef YUKON
1011/******************************************************************************
1012 *
1013 * SkGmSoftRst() - Do a GMAC software reset
1014 *
1015 * Description:
1016 * The GPHY registers should not be destroyed during this
1017 * kind of software reset.
1018 *
1019 * Returns:
1020 * nothing
1021 */
1022static void SkGmSoftRst(
1023SK_AC *pAC, /* adapter context */
1024SK_IOC IoC, /* IO context */
1025int Port) /* Port Index (MAC_1 + n) */
1026{
1027 SK_U16 EmptyHash[4] = {0x0000, 0x0000, 0x0000, 0x0000};
1028 SK_U16 RxCtrl;
1029
1030 /* reset the statistics module */
1031
1032 /* disable all GMAC IRQs */
1033 SK_OUT8(IoC, GMAC_IRQ_MSK, 0);
1034
1035 /* disable all PHY IRQs */
1036 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_INT_MASK, 0);
1037
1038 /* clear the Hash Register */
1039 GM_OUTHASH(IoC, Port, GM_MC_ADDR_H1, EmptyHash);
1040
1041 /* Enable Unicast and Multicast filtering */
1042 GM_IN16(IoC, Port, GM_RX_CTRL, &RxCtrl);
1043
1044 GM_OUT16(IoC, Port, GM_RX_CTRL,
1045 (SK_U16)(RxCtrl | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA));
1046
1047} /* SkGmSoftRst */
1048
1049
1050/******************************************************************************
1051 *
1052 * SkGmHardRst() - Do a GMAC hardware reset
1053 *
1054 * Description:
1055 *
1056 * Returns:
1057 * nothing
1058 */
1059static void SkGmHardRst(
1060SK_AC *pAC, /* adapter context */
1061SK_IOC IoC, /* IO context */
1062int Port) /* Port Index (MAC_1 + n) */
1063{
1064 SK_U32 DWord;
1065
1066 /* WA code for COMA mode */
1067 if (pAC->GIni.GIYukonLite &&
1068 pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) {
1069
1070 SK_IN32(IoC, B2_GP_IO, &DWord);
1071
1072 DWord |= (GP_DIR_9 | GP_IO_9);
1073
1074 /* set PHY reset */
1075 SK_OUT32(IoC, B2_GP_IO, DWord);
1076 }
1077
1078 /* set GPHY Control reset */
1079 SK_OUT32(IoC, MR_ADDR(Port, GPHY_CTRL), GPC_RST_SET);
1080
1081 /* set GMAC Control reset */
1082 SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_RST_SET);
1083
1084} /* SkGmHardRst */
1085
1086
1087/******************************************************************************
1088 *
1089 * SkGmClearRst() - Release the GPHY & GMAC reset
1090 *
1091 * Description:
1092 *
1093 * Returns:
1094 * nothing
1095 */
1096static void SkGmClearRst(
1097SK_AC *pAC, /* adapter context */
1098SK_IOC IoC, /* IO context */
1099int Port) /* Port Index (MAC_1 + n) */
1100{
1101 SK_U32 DWord;
1102
1103#ifdef XXX
1104 /* clear GMAC Control reset */
1105 SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_RST_CLR);
1106
1107 /* set GMAC Control reset */
1108 SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_RST_SET);
1109#endif /* XXX */
1110
1111 /* WA code for COMA mode */
1112 if (pAC->GIni.GIYukonLite &&
1113 pAC->GIni.GIChipRev >= CHIP_REV_YU_LITE_A3) {
1114
1115 SK_IN32(IoC, B2_GP_IO, &DWord);
1116
1117 DWord |= GP_DIR_9; /* set to output */
1118 DWord &= ~GP_IO_9; /* clear PHY reset (active high) */
1119
1120 /* clear PHY reset */
1121 SK_OUT32(IoC, B2_GP_IO, DWord);
1122 }
1123
1124 /* set HWCFG_MODE */
1125 DWord = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP |
1126 GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE |
1127 (pAC->GIni.GICopperType ? GPC_HWCFG_GMII_COP :
1128 GPC_HWCFG_GMII_FIB);
1129
1130 /* set GPHY Control reset */
1131 SK_OUT32(IoC, MR_ADDR(Port, GPHY_CTRL), DWord | GPC_RST_SET);
1132
1133 /* release GPHY Control reset */
1134 SK_OUT32(IoC, MR_ADDR(Port, GPHY_CTRL), DWord | GPC_RST_CLR);
1135
1136#ifdef VCPU
1137 VCpuWait(9000);
1138#endif /* VCPU */
1139
1140 /* clear GMAC Control reset */
1141 SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
1142
1143#ifdef VCPU
1144 VCpuWait(2000);
1145
1146 SK_IN32(IoC, MR_ADDR(Port, GPHY_CTRL), &DWord);
1147
1148 SK_IN32(IoC, B0_ISRC, &DWord);
1149#endif /* VCPU */
1150
1151} /* SkGmClearRst */
1152#endif /* YUKON */
1153
1154
1155/******************************************************************************
1156 *
1157 * SkMacSoftRst() - Do a MAC software reset
1158 *
1159 * Description: calls a MAC software reset routine dep. on board type
1160 *
1161 * Returns:
1162 * nothing
1163 */
1164void SkMacSoftRst(
1165SK_AC *pAC, /* adapter context */
1166SK_IOC IoC, /* IO context */
1167int Port) /* Port Index (MAC_1 + n) */
1168{
1169 SK_GEPORT *pPrt;
1170
1171 pPrt = &pAC->GIni.GP[Port];
1172
1173 /* disable receiver and transmitter */
1174 SkMacRxTxDisable(pAC, IoC, Port);
1175
1176#ifdef GENESIS
1177 if (pAC->GIni.GIGenesis) {
1178
1179 SkXmSoftRst(pAC, IoC, Port);
1180 }
1181#endif /* GENESIS */
1182
1183#ifdef YUKON
1184 if (pAC->GIni.GIYukon) {
1185
1186 SkGmSoftRst(pAC, IoC, Port);
1187 }
1188#endif /* YUKON */
1189
1190 /* flush the MAC's Rx and Tx FIFOs */
1191 SkMacFlushTxFifo(pAC, IoC, Port);
1192
1193 SkMacFlushRxFifo(pAC, IoC, Port);
1194
1195 pPrt->PState = SK_PRT_STOP;
1196
1197} /* SkMacSoftRst */
1198
1199
1200/******************************************************************************
1201 *
1202 * SkMacHardRst() - Do a MAC hardware reset
1203 *
1204 * Description: calls a MAC hardware reset routine dep. on board type
1205 *
1206 * Returns:
1207 * nothing
1208 */
1209void SkMacHardRst(
1210SK_AC *pAC, /* adapter context */
1211SK_IOC IoC, /* IO context */
1212int Port) /* Port Index (MAC_1 + n) */
1213{
1214
1215#ifdef GENESIS
1216 if (pAC->GIni.GIGenesis) {
1217
1218 SkXmHardRst(pAC, IoC, Port);
1219 }
1220#endif /* GENESIS */
1221
1222#ifdef YUKON
1223 if (pAC->GIni.GIYukon) {
1224
1225 SkGmHardRst(pAC, IoC, Port);
1226 }
1227#endif /* YUKON */
1228
1229 pAC->GIni.GP[Port].PState = SK_PRT_RESET;
1230
1231} /* SkMacHardRst */
1232
1233
1234#ifdef GENESIS
1235/******************************************************************************
1236 *
1237 * SkXmInitMac() - Initialize the XMAC II
1238 *
1239 * Description:
1240 * Initialize the XMAC of the specified port.
1241 * The XMAC must be reset or stopped before calling this function.
1242 *
1243 * Note:
1244 * The XMAC's Rx and Tx state machine is still disabled when returning.
1245 *
1246 * Returns:
1247 * nothing
1248 */
1249void SkXmInitMac(
1250SK_AC *pAC, /* adapter context */
1251SK_IOC IoC, /* IO context */
1252int Port) /* Port Index (MAC_1 + n) */
1253{
1254 SK_GEPORT *pPrt;
1255 int i;
1256 SK_U16 SWord;
1257
1258 pPrt = &pAC->GIni.GP[Port];
1259
1260 if (pPrt->PState == SK_PRT_STOP) {
1261 /* Port State: SK_PRT_STOP */
1262 /* Verify that the reset bit is cleared */
1263 SK_IN16(IoC, MR_ADDR(Port, TX_MFF_CTRL1), &SWord);
1264
1265 if ((SWord & MFF_SET_MAC_RST) != 0) {
1266 /* PState does not match HW state */
1267 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E006, SKERR_HWI_E006MSG);
1268 /* Correct it */
1269 pPrt->PState = SK_PRT_RESET;
1270 }
1271 }
1272
1273 if (pPrt->PState == SK_PRT_RESET) {
1274
1275 SkXmClearRst(pAC, IoC, Port);
1276
1277 if (pPrt->PhyType != SK_PHY_XMAC) {
1278 /* read Id from external PHY (all have the same address) */
1279 SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_ID1, &pPrt->PhyId1);
1280
1281 /*
1282 * Optimize MDIO transfer by suppressing preamble.
1283 * Must be done AFTER first access to BCOM chip.
1284 */
1285 XM_IN16(IoC, Port, XM_MMU_CMD, &SWord);
1286
1287 XM_OUT16(IoC, Port, XM_MMU_CMD, SWord | XM_MMU_NO_PRE);
1288
1289 if (pPrt->PhyId1 == PHY_BCOM_ID1_C0) {
1290 /*
1291 * Workaround BCOM Errata for the C0 type.
1292 * Write magic patterns to reserved registers.
1293 */
1294 i = 0;
1295 while (BcomRegC0Hack[i].PhyReg != 0) {
1296 SkXmPhyWrite(pAC, IoC, Port, BcomRegC0Hack[i].PhyReg,
1297 BcomRegC0Hack[i].PhyVal);
1298 i++;
1299 }
1300 }
1301 else if (pPrt->PhyId1 == PHY_BCOM_ID1_A1) {
1302 /*
1303 * Workaround BCOM Errata for the A1 type.
1304 * Write magic patterns to reserved registers.
1305 */
1306 i = 0;
1307 while (BcomRegA1Hack[i].PhyReg != 0) {
1308 SkXmPhyWrite(pAC, IoC, Port, BcomRegA1Hack[i].PhyReg,
1309 BcomRegA1Hack[i].PhyVal);
1310 i++;
1311 }
1312 }
1313
1314 /*
1315 * Workaround BCOM Errata (#10523) for all BCom PHYs.
1316 * Disable Power Management after reset.
1317 */
1318 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &SWord);
1319
1320 SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUX_CTRL,
1321 (SK_U16)(SWord | PHY_B_AC_DIS_PM));
1322
1323 /* PHY LED initialization is done in SkGeXmitLED() */
1324 }
1325
1326 /* Dummy read the Interrupt source register */
1327 XM_IN16(IoC, Port, XM_ISRC, &SWord);
1328
1329 /*
1330 * The auto-negotiation process starts immediately after
1331 * clearing the reset. The auto-negotiation process should be
1332 * started by the SIRQ, therefore stop it here immediately.
1333 */
1334 SkMacInitPhy(pAC, IoC, Port, SK_FALSE);
1335
1336#ifdef TEST_ONLY
1337 /* temp. code: enable signal detect */
1338 /* WARNING: do not override GMII setting above */
1339 XM_OUT16(IoC, Port, XM_HW_CFG, XM_HW_COM4SIG);
1340#endif
1341 }
1342
1343 /*
1344 * configure the XMACs Station Address
1345 * B2_MAC_2 = xx xx xx xx xx x1 is programmed to XMAC A
1346 * B2_MAC_3 = xx xx xx xx xx x2 is programmed to XMAC B
1347 */
1348 for (i = 0; i < 3; i++) {
1349 /*
1350 * The following 2 statements are together endianess
1351 * independent. Remember this when changing.
1352 */
1353 SK_IN16(IoC, (B2_MAC_2 + Port * 8 + i * 2), &SWord);
1354
1355 XM_OUT16(IoC, Port, (XM_SA + i * 2), SWord);
1356 }
1357
1358 /* Tx Inter Packet Gap (XM_TX_IPG): use default */
1359 /* Tx High Water Mark (XM_TX_HI_WM): use default */
1360 /* Tx Low Water Mark (XM_TX_LO_WM): use default */
1361 /* Host Request Threshold (XM_HT_THR): use default */
1362 /* Rx Request Threshold (XM_RX_THR): use default */
1363 /* Rx Low Water Mark (XM_RX_LO_WM): use default */
1364
1365 /* configure Rx High Water Mark (XM_RX_HI_WM) */
1366 XM_OUT16(IoC, Port, XM_RX_HI_WM, SK_XM_RX_HI_WM);
1367
1368 /* Configure Tx Request Threshold */
1369 SWord = SK_XM_THR_SL; /* for single port */
1370
1371 if (pAC->GIni.GIMacsFound > 1) {
1372 switch (pAC->GIni.GIPortUsage) {
1373 case SK_RED_LINK:
1374 SWord = SK_XM_THR_REDL; /* redundant link */
1375 break;
1376 case SK_MUL_LINK:
1377 SWord = SK_XM_THR_MULL; /* load balancing */
1378 break;
1379 case SK_JUMBO_LINK:
1380 SWord = SK_XM_THR_JUMBO; /* jumbo frames */
1381 break;
1382 default:
1383 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E014, SKERR_HWI_E014MSG);
1384 break;
1385 }
1386 }
1387 XM_OUT16(IoC, Port, XM_TX_THR, SWord);
1388
1389 /* setup register defaults for the Tx Command Register */
1390 XM_OUT16(IoC, Port, XM_TX_CMD, XM_TX_AUTO_PAD);
1391
1392 /* setup register defaults for the Rx Command Register */
1393 SWord = XM_RX_STRIP_FCS | XM_RX_LENERR_OK;
1394
1395 if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) {
1396 SWord |= XM_RX_BIG_PK_OK;
1397 }
1398
1399 if (pPrt->PLinkMode == SK_LMODE_HALF) {
1400 /*
1401 * If in manual half duplex mode the other side might be in
1402 * full duplex mode, so ignore if a carrier extension is not seen
1403 * on frames received
1404 */
1405 SWord |= XM_RX_DIS_CEXT;
1406 }
1407
1408 XM_OUT16(IoC, Port, XM_RX_CMD, SWord);
1409
1410 /*
1411 * setup register defaults for the Mode Register
1412 * - Don't strip error frames to avoid Store & Forward
1413 * on the Rx side.
1414 * - Enable 'Check Station Address' bit
1415 * - Enable 'Check Address Array' bit
1416 */
1417 XM_OUT32(IoC, Port, XM_MODE, XM_DEF_MODE);
1418
1419 /*
1420 * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
1421 * - Enable all bits excepting 'Octets Rx OK Low CntOv'
1422 * and 'Octets Rx OK Hi Cnt Ov'.
1423 */
1424 XM_OUT32(IoC, Port, XM_RX_EV_MSK, XMR_DEF_MSK);
1425
1426 /*
1427 * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
1428 * - Enable all bits excepting 'Octets Tx OK Low CntOv'
1429 * and 'Octets Tx OK Hi Cnt Ov'.
1430 */
1431 XM_OUT32(IoC, Port, XM_TX_EV_MSK, XMT_DEF_MSK);
1432
1433 /*
1434 * Do NOT init XMAC interrupt mask here.
1435 * All interrupts remain disable until link comes up!
1436 */
1437
1438 /*
1439 * Any additional configuration changes may be done now.
1440 * The last action is to enable the Rx and Tx state machine.
1441 * This should be done after the auto-negotiation process
1442 * has been completed successfully.
1443 */
1444} /* SkXmInitMac */
1445#endif /* GENESIS */
1446
1447
1448#ifdef YUKON
1449/******************************************************************************
1450 *
1451 * SkGmInitMac() - Initialize the GMAC
1452 *
1453 * Description:
1454 * Initialize the GMAC of the specified port.
1455 * The GMAC must be reset or stopped before calling this function.
1456 *
1457 * Note:
1458 * The GMAC's Rx and Tx state machine is still disabled when returning.
1459 *
1460 * Returns:
1461 * nothing
1462 */
1463void SkGmInitMac(
1464SK_AC *pAC, /* adapter context */
1465SK_IOC IoC, /* IO context */
1466int Port) /* Port Index (MAC_1 + n) */
1467{
1468 SK_GEPORT *pPrt;
1469 int i;
1470 SK_U16 SWord;
1471 SK_U32 DWord;
1472
1473 pPrt = &pAC->GIni.GP[Port];
1474
1475 if (pPrt->PState == SK_PRT_STOP) {
1476 /* Port State: SK_PRT_STOP */
1477 /* Verify that the reset bit is cleared */
1478 SK_IN32(IoC, MR_ADDR(Port, GMAC_CTRL), &DWord);
1479
1480 if ((DWord & GMC_RST_SET) != 0) {
1481 /* PState does not match HW state */
1482 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E006, SKERR_HWI_E006MSG);
1483 /* Correct it */
1484 pPrt->PState = SK_PRT_RESET;
1485 }
1486 }
1487
1488 if (pPrt->PState == SK_PRT_RESET) {
1489
1490 SkGmHardRst(pAC, IoC, Port);
1491
1492 SkGmClearRst(pAC, IoC, Port);
1493
1494 /* Auto-negotiation ? */
1495 if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) {
1496 /* Auto-negotiation disabled */
1497
1498 /* get General Purpose Control */
1499 GM_IN16(IoC, Port, GM_GP_CTRL, &SWord);
1500
1501 /* disable auto-update for speed, duplex and flow-control */
1502 SWord |= GM_GPCR_AU_ALL_DIS;
1503
1504 /* setup General Purpose Control Register */
1505 GM_OUT16(IoC, Port, GM_GP_CTRL, SWord);
1506
1507 SWord = GM_GPCR_AU_ALL_DIS;
1508 }
1509 else {
1510 SWord = 0;
1511 }
1512
1513 /* speed settings */
1514 switch (pPrt->PLinkSpeed) {
1515 case SK_LSPEED_AUTO:
1516 case SK_LSPEED_1000MBPS:
1517 SWord |= GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100;
1518 break;
1519 case SK_LSPEED_100MBPS:
1520 SWord |= GM_GPCR_SPEED_100;
1521 break;
1522 case SK_LSPEED_10MBPS:
1523 break;
1524 }
1525
1526 /* duplex settings */
1527 if (pPrt->PLinkMode != SK_LMODE_HALF) {
1528 /* set full duplex */
1529 SWord |= GM_GPCR_DUP_FULL;
1530 }
1531
1532 /* flow-control settings */
1533 switch (pPrt->PFlowCtrlMode) {
1534 case SK_FLOW_MODE_NONE:
1535 /* set Pause Off */
1536 SK_OUT32(IoC, MR_ADDR(Port, GMAC_CTRL), GMC_PAUSE_OFF);
1537 /* disable Tx & Rx flow-control */
1538 SWord |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1539 break;
1540 case SK_FLOW_MODE_LOC_SEND:
1541 /* disable Rx flow-control */
1542 SWord |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1543 break;
1544 case SK_FLOW_MODE_SYMMETRIC:
1545 case SK_FLOW_MODE_SYM_OR_REM:
1546 /* enable Tx & Rx flow-control */
1547 break;
1548 }
1549
1550 /* setup General Purpose Control Register */
1551 GM_OUT16(IoC, Port, GM_GP_CTRL, SWord);
1552
1553 /* dummy read the Interrupt Source Register */
1554 SK_IN16(IoC, GMAC_IRQ_SRC, &SWord);
1555
1556#ifndef VCPU
1557 /* read Id from PHY */
1558 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_ID1, &pPrt->PhyId1);
1559
1560 SkGmInitPhyMarv(pAC, IoC, Port, SK_FALSE);
1561#endif /* VCPU */
1562 }
1563
1564 (void)SkGmResetCounter(pAC, IoC, Port);
1565
1566 /* setup Transmit Control Register */
1567 GM_OUT16(IoC, Port, GM_TX_CTRL, TX_COL_THR(pPrt->PMacColThres));
1568
1569 /* setup Receive Control Register */
1570 GM_OUT16(IoC, Port, GM_RX_CTRL, GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA |
1571 GM_RXCR_CRC_DIS);
1572
1573 /* setup Transmit Flow Control Register */
1574 GM_OUT16(IoC, Port, GM_TX_FLOW_CTRL, 0xffff);
1575
1576 /* setup Transmit Parameter Register */
1577#ifdef VCPU
1578 GM_IN16(IoC, Port, GM_TX_PARAM, &SWord);
1579#endif /* VCPU */
1580
1581 SWord = TX_JAM_LEN_VAL(pPrt->PMacJamLen) |
1582 TX_JAM_IPG_VAL(pPrt->PMacJamIpgVal) |
1583 TX_IPG_JAM_DATA(pPrt->PMacJamIpgData);
1584
1585 GM_OUT16(IoC, Port, GM_TX_PARAM, SWord);
1586
1587 /* configure the Serial Mode Register */
1588#ifdef VCPU
1589 GM_IN16(IoC, Port, GM_SERIAL_MODE, &SWord);
1590#endif /* VCPU */
1591
1592 SWord = GM_SMOD_VLAN_ENA | IPG_DATA_VAL(pPrt->PMacIpgData);
1593
1594 if (pPrt->PMacLimit4) {
1595 /* reset of collision counter after 4 consecutive collisions */
1596 SWord |= GM_SMOD_LIMIT_4;
1597 }
1598
1599 if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) {
1600 /* enable jumbo mode (Max. Frame Length = 9018) */
1601 SWord |= GM_SMOD_JUMBO_ENA;
1602 }
1603
1604 GM_OUT16(IoC, Port, GM_SERIAL_MODE, SWord);
1605
1606 /*
1607 * configure the GMACs Station Addresses
1608 * in PROM you can find our addresses at:
1609 * B2_MAC_1 = xx xx xx xx xx x0 virtual address
1610 * B2_MAC_2 = xx xx xx xx xx x1 is programmed to GMAC A
1611 * B2_MAC_3 = xx xx xx xx xx x2 is reserved for DualPort
1612 */
1613
1614 for (i = 0; i < 3; i++) {
1615 /*
1616 * The following 2 statements are together endianess
1617 * independent. Remember this when changing.
1618 */
1619 /* physical address: will be used for pause frames */
1620 SK_IN16(IoC, (B2_MAC_2 + Port * 8 + i * 2), &SWord);
1621
1622#ifdef WA_DEV_16
1623 /* WA for deviation #16 */
1624 if (pAC->GIni.GIChipId == CHIP_ID_YUKON && pAC->GIni.GIChipRev == 0) {
1625 /* swap the address bytes */
1626 SWord = ((SWord & 0xff00) >> 8) | ((SWord & 0x00ff) << 8);
1627
1628 /* write to register in reversed order */
1629 GM_OUT16(IoC, Port, (GM_SRC_ADDR_1L + (2 - i) * 4), SWord);
1630 }
1631 else {
1632 GM_OUT16(IoC, Port, (GM_SRC_ADDR_1L + i * 4), SWord);
1633 }
1634#else
1635 GM_OUT16(IoC, Port, (GM_SRC_ADDR_1L + i * 4), SWord);
1636#endif /* WA_DEV_16 */
1637
1638 /* virtual address: will be used for data */
1639 SK_IN16(IoC, (B2_MAC_1 + Port * 8 + i * 2), &SWord);
1640
1641 GM_OUT16(IoC, Port, (GM_SRC_ADDR_2L + i * 4), SWord);
1642
1643 /* reset Multicast filtering Hash registers 1-3 */
1644 GM_OUT16(IoC, Port, GM_MC_ADDR_H1 + 4*i, 0);
1645 }
1646
1647 /* reset Multicast filtering Hash register 4 */
1648 GM_OUT16(IoC, Port, GM_MC_ADDR_H4, 0);
1649
1650 /* enable interrupt mask for counter overflows */
1651 GM_OUT16(IoC, Port, GM_TX_IRQ_MSK, 0);
1652 GM_OUT16(IoC, Port, GM_RX_IRQ_MSK, 0);
1653 GM_OUT16(IoC, Port, GM_TR_IRQ_MSK, 0);
1654
1655#if defined(SK_DIAG) || defined(DEBUG)
1656 /* read General Purpose Status */
1657 GM_IN16(IoC, Port, GM_GP_STAT, &SWord);
1658
1659 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1660 ("MAC Stat Reg.=0x%04X\n", SWord));
1661#endif /* SK_DIAG || DEBUG */
1662
1663#ifdef SK_DIAG
1664 c_print("MAC Stat Reg=0x%04X\n", SWord);
1665#endif /* SK_DIAG */
1666
1667} /* SkGmInitMac */
1668#endif /* YUKON */
1669
1670
1671#ifdef GENESIS
1672/******************************************************************************
1673 *
1674 * SkXmInitDupMd() - Initialize the XMACs Duplex Mode
1675 *
1676 * Description:
1677 * This function initializes the XMACs Duplex Mode.
1678 * It should be called after successfully finishing
1679 * the Auto-negotiation Process
1680 *
1681 * Returns:
1682 * nothing
1683 */
1684static void SkXmInitDupMd(
1685SK_AC *pAC, /* adapter context */
1686SK_IOC IoC, /* IO context */
1687int Port) /* Port Index (MAC_1 + n) */
1688{
1689 switch (pAC->GIni.GP[Port].PLinkModeStatus) {
1690 case SK_LMODE_STAT_AUTOHALF:
1691 case SK_LMODE_STAT_HALF:
1692 /* Configuration Actions for Half Duplex Mode */
1693 /*
1694 * XM_BURST = default value. We are probable not quick
1695 * enough at the 'XMAC' bus to burst 8kB.
1696 * The XMAC stops bursting if no transmit frames
1697 * are available or the burst limit is exceeded.
1698 */
1699 /* XM_TX_RT_LIM = default value (15) */
1700 /* XM_TX_STIME = default value (0xff = 4096 bit times) */
1701 break;
1702 case SK_LMODE_STAT_AUTOFULL:
1703 case SK_LMODE_STAT_FULL:
1704 /* Configuration Actions for Full Duplex Mode */
1705 /*
1706 * The duplex mode is configured by the PHY,
1707 * therefore it seems to be that there is nothing
1708 * to do here.
1709 */
1710 break;
1711 case SK_LMODE_STAT_UNKNOWN:
1712 default:
1713 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E007, SKERR_HWI_E007MSG);
1714 break;
1715 }
1716} /* SkXmInitDupMd */
1717
1718
1719/******************************************************************************
1720 *
1721 * SkXmInitPauseMd() - initialize the Pause Mode to be used for this port
1722 *
1723 * Description:
1724 * This function initializes the Pause Mode which should
1725 * be used for this port.
1726 * It should be called after successfully finishing
1727 * the Auto-negotiation Process
1728 *
1729 * Returns:
1730 * nothing
1731 */
1732static void SkXmInitPauseMd(
1733SK_AC *pAC, /* adapter context */
1734SK_IOC IoC, /* IO context */
1735int Port) /* Port Index (MAC_1 + n) */
1736{
1737 SK_GEPORT *pPrt;
1738 SK_U32 DWord;
1739 SK_U16 Word;
1740
1741 pPrt = &pAC->GIni.GP[Port];
1742
1743 XM_IN16(IoC, Port, XM_MMU_CMD, &Word);
1744
1745 if (pPrt->PFlowCtrlStatus == SK_FLOW_STAT_NONE ||
1746 pPrt->PFlowCtrlStatus == SK_FLOW_STAT_LOC_SEND) {
1747
1748 /* Disable Pause Frame Reception */
1749 Word |= XM_MMU_IGN_PF;
1750 }
1751 else {
1752 /*
1753 * enabling pause frame reception is required for 1000BT
1754 * because the XMAC is not reset if the link is going down
1755 */
1756 /* Enable Pause Frame Reception */
1757 Word &= ~XM_MMU_IGN_PF;
1758 }
1759
1760 XM_OUT16(IoC, Port, XM_MMU_CMD, Word);
1761
1762 XM_IN32(IoC, Port, XM_MODE, &DWord);
1763
1764 if (pPrt->PFlowCtrlStatus == SK_FLOW_STAT_SYMMETRIC ||
1765 pPrt->PFlowCtrlStatus == SK_FLOW_STAT_LOC_SEND) {
1766
1767 /*
1768 * Configure Pause Frame Generation
1769 * Use internal and external Pause Frame Generation.
1770 * Sending pause frames is edge triggered.
1771 * Send a Pause frame with the maximum pause time if
1772 * internal oder external FIFO full condition occurs.
1773 * Send a zero pause time frame to re-start transmission.
1774 */
1775
1776 /* XM_PAUSE_DA = '010000C28001' (default) */
1777
1778 /* XM_MAC_PTIME = 0xffff (maximum) */
1779 /* remember this value is defined in big endian (!) */
1780 XM_OUT16(IoC, Port, XM_MAC_PTIME, 0xffff);
1781
1782 /* Set Pause Mode in Mode Register */
1783 DWord |= XM_PAUSE_MODE;
1784
1785 /* Set Pause Mode in MAC Rx FIFO */
1786 SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_ENA_PAUSE);
1787 }
1788 else {
1789 /*
1790 * disable pause frame generation is required for 1000BT
1791 * because the XMAC is not reset if the link is going down
1792 */
1793 /* Disable Pause Mode in Mode Register */
1794 DWord &= ~XM_PAUSE_MODE;
1795
1796 /* Disable Pause Mode in MAC Rx FIFO */
1797 SK_OUT16(IoC, MR_ADDR(Port, RX_MFF_CTRL1), MFF_DIS_PAUSE);
1798 }
1799
1800 XM_OUT32(IoC, Port, XM_MODE, DWord);
1801} /* SkXmInitPauseMd*/
1802
1803
1804/******************************************************************************
1805 *
1806 * SkXmInitPhyXmac() - Initialize the XMAC Phy registers
1807 *
1808 * Description: initializes all the XMACs Phy registers
1809 *
1810 * Note:
1811 *
1812 * Returns:
1813 * nothing
1814 */
1815static void SkXmInitPhyXmac(
1816SK_AC *pAC, /* adapter context */
1817SK_IOC IoC, /* IO context */
1818int Port, /* Port Index (MAC_1 + n) */
1819SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */
1820{
1821 SK_GEPORT *pPrt;
1822 SK_U16 Ctrl;
1823
1824 pPrt = &pAC->GIni.GP[Port];
1825 Ctrl = 0;
1826
1827 /* Auto-negotiation ? */
1828 if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) {
1829 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1830 ("InitPhyXmac: no auto-negotiation Port %d\n", Port));
1831 /* Set DuplexMode in Config register */
1832 if (pPrt->PLinkMode == SK_LMODE_FULL) {
1833 Ctrl |= PHY_CT_DUP_MD;
1834 }
1835
1836 /*
1837 * Do NOT enable Auto-negotiation here. This would hold
1838 * the link down because no IDLEs are transmitted
1839 */
1840 }
1841 else {
1842 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1843 ("InitPhyXmac: with auto-negotiation Port %d\n", Port));
1844 /* Set Auto-negotiation advertisement */
1845
1846 /* Set Full/half duplex capabilities */
1847 switch (pPrt->PLinkMode) {
1848 case SK_LMODE_AUTOHALF:
1849 Ctrl |= PHY_X_AN_HD;
1850 break;
1851 case SK_LMODE_AUTOFULL:
1852 Ctrl |= PHY_X_AN_FD;
1853 break;
1854 case SK_LMODE_AUTOBOTH:
1855 Ctrl |= PHY_X_AN_FD | PHY_X_AN_HD;
1856 break;
1857 default:
1858 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015,
1859 SKERR_HWI_E015MSG);
1860 }
1861
1862 /* Set Flow-control capabilities */
1863 switch (pPrt->PFlowCtrlMode) {
1864 case SK_FLOW_MODE_NONE:
1865 Ctrl |= PHY_X_P_NO_PAUSE;
1866 break;
1867 case SK_FLOW_MODE_LOC_SEND:
1868 Ctrl |= PHY_X_P_ASYM_MD;
1869 break;
1870 case SK_FLOW_MODE_SYMMETRIC:
1871 Ctrl |= PHY_X_P_SYM_MD;
1872 break;
1873 case SK_FLOW_MODE_SYM_OR_REM:
1874 Ctrl |= PHY_X_P_BOTH_MD;
1875 break;
1876 default:
1877 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016,
1878 SKERR_HWI_E016MSG);
1879 }
1880
1881 /* Write AutoNeg Advertisement Register */
1882 SkXmPhyWrite(pAC, IoC, Port, PHY_XMAC_AUNE_ADV, Ctrl);
1883
1884 /* Restart Auto-negotiation */
1885 Ctrl = PHY_CT_ANE | PHY_CT_RE_CFG;
1886 }
1887
1888 if (DoLoop) {
1889 /* Set the Phy Loopback bit, too */
1890 Ctrl |= PHY_CT_LOOP;
1891 }
1892
1893 /* Write to the Phy control register */
1894 SkXmPhyWrite(pAC, IoC, Port, PHY_XMAC_CTRL, Ctrl);
1895} /* SkXmInitPhyXmac */
1896
1897
1898/******************************************************************************
1899 *
1900 * SkXmInitPhyBcom() - Initialize the Broadcom Phy registers
1901 *
1902 * Description: initializes all the Broadcom Phy registers
1903 *
1904 * Note:
1905 *
1906 * Returns:
1907 * nothing
1908 */
1909static void SkXmInitPhyBcom(
1910SK_AC *pAC, /* adapter context */
1911SK_IOC IoC, /* IO context */
1912int Port, /* Port Index (MAC_1 + n) */
1913SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */
1914{
1915 SK_GEPORT *pPrt;
1916 SK_U16 Ctrl1;
1917 SK_U16 Ctrl2;
1918 SK_U16 Ctrl3;
1919 SK_U16 Ctrl4;
1920 SK_U16 Ctrl5;
1921
1922 Ctrl1 = PHY_CT_SP1000;
1923 Ctrl2 = 0;
1924 Ctrl3 = PHY_SEL_TYPE;
1925 Ctrl4 = PHY_B_PEC_EN_LTR;
1926 Ctrl5 = PHY_B_AC_TX_TST;
1927
1928 pPrt = &pAC->GIni.GP[Port];
1929
1930 /* manually Master/Slave ? */
1931 if (pPrt->PMSMode != SK_MS_MODE_AUTO) {
1932 Ctrl2 |= PHY_B_1000C_MSE;
1933
1934 if (pPrt->PMSMode == SK_MS_MODE_MASTER) {
1935 Ctrl2 |= PHY_B_1000C_MSC;
1936 }
1937 }
1938 /* Auto-negotiation ? */
1939 if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) {
1940 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1941 ("InitPhyBcom: no auto-negotiation Port %d\n", Port));
1942 /* Set DuplexMode in Config register */
1943 if (pPrt->PLinkMode == SK_LMODE_FULL) {
1944 Ctrl1 |= PHY_CT_DUP_MD;
1945 }
1946
1947 /* Determine Master/Slave manually if not already done */
1948 if (pPrt->PMSMode == SK_MS_MODE_AUTO) {
1949 Ctrl2 |= PHY_B_1000C_MSE; /* set it to Slave */
1950 }
1951
1952 /*
1953 * Do NOT enable Auto-negotiation here. This would hold
1954 * the link down because no IDLES are transmitted
1955 */
1956 }
1957 else {
1958 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
1959 ("InitPhyBcom: with auto-negotiation Port %d\n", Port));
1960 /* Set Auto-negotiation advertisement */
1961
1962 /*
1963 * Workaround BCOM Errata #1 for the C5 type.
1964 * 1000Base-T Link Acquisition Failure in Slave Mode
1965 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
1966 */
1967 Ctrl2 |= PHY_B_1000C_RD;
1968
1969 /* Set Full/half duplex capabilities */
1970 switch (pPrt->PLinkMode) {
1971 case SK_LMODE_AUTOHALF:
1972 Ctrl2 |= PHY_B_1000C_AHD;
1973 break;
1974 case SK_LMODE_AUTOFULL:
1975 Ctrl2 |= PHY_B_1000C_AFD;
1976 break;
1977 case SK_LMODE_AUTOBOTH:
1978 Ctrl2 |= PHY_B_1000C_AFD | PHY_B_1000C_AHD;
1979 break;
1980 default:
1981 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015,
1982 SKERR_HWI_E015MSG);
1983 }
1984
1985 /* Set Flow-control capabilities */
1986 switch (pPrt->PFlowCtrlMode) {
1987 case SK_FLOW_MODE_NONE:
1988 Ctrl3 |= PHY_B_P_NO_PAUSE;
1989 break;
1990 case SK_FLOW_MODE_LOC_SEND:
1991 Ctrl3 |= PHY_B_P_ASYM_MD;
1992 break;
1993 case SK_FLOW_MODE_SYMMETRIC:
1994 Ctrl3 |= PHY_B_P_SYM_MD;
1995 break;
1996 case SK_FLOW_MODE_SYM_OR_REM:
1997 Ctrl3 |= PHY_B_P_BOTH_MD;
1998 break;
1999 default:
2000 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016,
2001 SKERR_HWI_E016MSG);
2002 }
2003
2004 /* Restart Auto-negotiation */
2005 Ctrl1 |= PHY_CT_ANE | PHY_CT_RE_CFG;
2006 }
2007
2008 /* Initialize LED register here? */
2009 /* No. Please do it in SkDgXmitLed() (if required) and swap
2010 init order of LEDs and XMAC. (MAl) */
2011
2012 /* Write 1000Base-T Control Register */
2013 SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_1000T_CTRL, Ctrl2);
2014 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2015 ("Set 1000B-T Ctrl Reg=0x%04X\n", Ctrl2));
2016
2017 /* Write AutoNeg Advertisement Register */
2018 SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUNE_ADV, Ctrl3);
2019 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2020 ("Set Auto-Neg.Adv.Reg=0x%04X\n", Ctrl3));
2021
2022 if (DoLoop) {
2023 /* Set the Phy Loopback bit, too */
2024 Ctrl1 |= PHY_CT_LOOP;
2025 }
2026
2027 if (pAC->GIni.GIPortUsage == SK_JUMBO_LINK) {
2028 /* configure FIFO to high latency for transmission of ext. packets */
2029 Ctrl4 |= PHY_B_PEC_HIGH_LA;
2030
2031 /* configure reception of extended packets */
2032 Ctrl5 |= PHY_B_AC_LONG_PACK;
2033
2034 SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, Ctrl5);
2035 }
2036
2037 /* Configure LED Traffic Mode and Jumbo Frame usage if specified */
2038 SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_P_EXT_CTRL, Ctrl4);
2039
2040 /* Write to the Phy control register */
2041 SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_CTRL, Ctrl1);
2042 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2043 ("PHY Control Reg=0x%04X\n", Ctrl1));
2044} /* SkXmInitPhyBcom */
2045#endif /* GENESIS */
2046
2047#ifdef YUKON
2048/******************************************************************************
2049 *
2050 * SkGmInitPhyMarv() - Initialize the Marvell Phy registers
2051 *
2052 * Description: initializes all the Marvell Phy registers
2053 *
2054 * Note:
2055 *
2056 * Returns:
2057 * nothing
2058 */
2059static void SkGmInitPhyMarv(
2060SK_AC *pAC, /* adapter context */
2061SK_IOC IoC, /* IO context */
2062int Port, /* Port Index (MAC_1 + n) */
2063SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */
2064{
2065 SK_GEPORT *pPrt;
2066 SK_U16 PhyCtrl;
2067 SK_U16 C1000BaseT;
2068 SK_U16 AutoNegAdv;
2069 SK_U16 ExtPhyCtrl;
2070 SK_U16 LedCtrl;
2071 SK_BOOL AutoNeg;
2072#if defined(SK_DIAG) || defined(DEBUG)
2073 SK_U16 PhyStat;
2074 SK_U16 PhyStat1;
2075 SK_U16 PhySpecStat;
2076#endif /* SK_DIAG || DEBUG */
2077
2078 pPrt = &pAC->GIni.GP[Port];
2079
2080 /* Auto-negotiation ? */
2081 if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) {
2082 AutoNeg = SK_FALSE;
2083 }
2084 else {
2085 AutoNeg = SK_TRUE;
2086 }
2087
2088 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2089 ("InitPhyMarv: Port %d, auto-negotiation %s\n",
2090 Port, AutoNeg ? "ON" : "OFF"));
2091
2092#ifdef VCPU
2093 VCPUprintf(0, "SkGmInitPhyMarv(), Port=%u, DoLoop=%u\n",
2094 Port, DoLoop);
2095#else /* VCPU */
2096 if (DoLoop) {
2097 /* Set 'MAC Power up'-bit, set Manual MDI configuration */
2098 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL,
2099 PHY_M_PC_MAC_POW_UP);
2100 }
2101 else if (AutoNeg && pPrt->PLinkSpeed == SK_LSPEED_AUTO) {
2102 /* Read Ext. PHY Specific Control */
2103 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_EXT_CTRL, &ExtPhyCtrl);
2104
2105 ExtPhyCtrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
2106 PHY_M_EC_MAC_S_MSK);
2107
2108 ExtPhyCtrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ) |
2109 PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
2110
2111 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_CTRL, ExtPhyCtrl);
2112 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2113 ("Set Ext. PHY Ctrl=0x%04X\n", ExtPhyCtrl));
2114 }
2115
2116 /* Read PHY Control */
2117 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &PhyCtrl);
2118
2119 if (!AutoNeg) {
2120 /* Disable Auto-negotiation */
2121 PhyCtrl &= ~PHY_CT_ANE;
2122 }
2123
2124 PhyCtrl |= PHY_CT_RESET;
2125 /* Assert software reset */
2126 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, PhyCtrl);
2127#endif /* VCPU */
2128
2129 PhyCtrl = 0 /* PHY_CT_COL_TST */;
2130 C1000BaseT = 0;
2131 AutoNegAdv = PHY_SEL_TYPE;
2132
2133 /* manually Master/Slave ? */
2134 if (pPrt->PMSMode != SK_MS_MODE_AUTO) {
2135 /* enable Manual Master/Slave */
2136 C1000BaseT |= PHY_M_1000C_MSE;
2137
2138 if (pPrt->PMSMode == SK_MS_MODE_MASTER) {
2139 C1000BaseT |= PHY_M_1000C_MSC; /* set it to Master */
2140 }
2141 }
2142
2143 /* Auto-negotiation ? */
2144 if (!AutoNeg) {
2145
2146 if (pPrt->PLinkMode == SK_LMODE_FULL) {
2147 /* Set Full Duplex Mode */
2148 PhyCtrl |= PHY_CT_DUP_MD;
2149 }
2150
2151 /* Set Master/Slave manually if not already done */
2152 if (pPrt->PMSMode == SK_MS_MODE_AUTO) {
2153 C1000BaseT |= PHY_M_1000C_MSE; /* set it to Slave */
2154 }
2155
2156 /* Set Speed */
2157 switch (pPrt->PLinkSpeed) {
2158 case SK_LSPEED_AUTO:
2159 case SK_LSPEED_1000MBPS:
2160 PhyCtrl |= PHY_CT_SP1000;
2161 break;
2162 case SK_LSPEED_100MBPS:
2163 PhyCtrl |= PHY_CT_SP100;
2164 break;
2165 case SK_LSPEED_10MBPS:
2166 break;
2167 default:
2168 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E019,
2169 SKERR_HWI_E019MSG);
2170 }
2171
2172 if (!DoLoop) {
2173 PhyCtrl |= PHY_CT_RESET;
2174 }
2175 }
2176 else {
2177 /* Set Auto-negotiation advertisement */
2178
2179 if (pAC->GIni.GICopperType) {
2180 /* Set Speed capabilities */
2181 switch (pPrt->PLinkSpeed) {
2182 case SK_LSPEED_AUTO:
2183 C1000BaseT |= PHY_M_1000C_AHD | PHY_M_1000C_AFD;
2184 AutoNegAdv |= PHY_M_AN_100_FD | PHY_M_AN_100_HD |
2185 PHY_M_AN_10_FD | PHY_M_AN_10_HD;
2186 break;
2187 case SK_LSPEED_1000MBPS:
2188 C1000BaseT |= PHY_M_1000C_AHD | PHY_M_1000C_AFD;
2189 break;
2190 case SK_LSPEED_100MBPS:
2191 AutoNegAdv |= PHY_M_AN_100_FD | PHY_M_AN_100_HD |
2192 /* advertise 10Base-T also */
2193 PHY_M_AN_10_FD | PHY_M_AN_10_HD;
2194 break;
2195 case SK_LSPEED_10MBPS:
2196 AutoNegAdv |= PHY_M_AN_10_FD | PHY_M_AN_10_HD;
2197 break;
2198 default:
2199 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E019,
2200 SKERR_HWI_E019MSG);
2201 }
2202
2203 /* Set Full/half duplex capabilities */
2204 switch (pPrt->PLinkMode) {
2205 case SK_LMODE_AUTOHALF:
2206 C1000BaseT &= ~PHY_M_1000C_AFD;
2207 AutoNegAdv &= ~(PHY_M_AN_100_FD | PHY_M_AN_10_FD);
2208 break;
2209 case SK_LMODE_AUTOFULL:
2210 C1000BaseT &= ~PHY_M_1000C_AHD;
2211 AutoNegAdv &= ~(PHY_M_AN_100_HD | PHY_M_AN_10_HD);
2212 break;
2213 case SK_LMODE_AUTOBOTH:
2214 break;
2215 default:
2216 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015,
2217 SKERR_HWI_E015MSG);
2218 }
2219
2220 /* Set Flow-control capabilities */
2221 switch (pPrt->PFlowCtrlMode) {
2222 case SK_FLOW_MODE_NONE:
2223 AutoNegAdv |= PHY_B_P_NO_PAUSE;
2224 break;
2225 case SK_FLOW_MODE_LOC_SEND:
2226 AutoNegAdv |= PHY_B_P_ASYM_MD;
2227 break;
2228 case SK_FLOW_MODE_SYMMETRIC:
2229 AutoNegAdv |= PHY_B_P_SYM_MD;
2230 break;
2231 case SK_FLOW_MODE_SYM_OR_REM:
2232 AutoNegAdv |= PHY_B_P_BOTH_MD;
2233 break;
2234 default:
2235 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016,
2236 SKERR_HWI_E016MSG);
2237 }
2238 }
2239 else { /* special defines for FIBER (88E1011S only) */
2240
2241 /* Set Full/half duplex capabilities */
2242 switch (pPrt->PLinkMode) {
2243 case SK_LMODE_AUTOHALF:
2244 AutoNegAdv |= PHY_M_AN_1000X_AHD;
2245 break;
2246 case SK_LMODE_AUTOFULL:
2247 AutoNegAdv |= PHY_M_AN_1000X_AFD;
2248 break;
2249 case SK_LMODE_AUTOBOTH:
2250 AutoNegAdv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
2251 break;
2252 default:
2253 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015,
2254 SKERR_HWI_E015MSG);
2255 }
2256
2257 /* Set Flow-control capabilities */
2258 switch (pPrt->PFlowCtrlMode) {
2259 case SK_FLOW_MODE_NONE:
2260 AutoNegAdv |= PHY_M_P_NO_PAUSE_X;
2261 break;
2262 case SK_FLOW_MODE_LOC_SEND:
2263 AutoNegAdv |= PHY_M_P_ASYM_MD_X;
2264 break;
2265 case SK_FLOW_MODE_SYMMETRIC:
2266 AutoNegAdv |= PHY_M_P_SYM_MD_X;
2267 break;
2268 case SK_FLOW_MODE_SYM_OR_REM:
2269 AutoNegAdv |= PHY_M_P_BOTH_MD_X;
2270 break;
2271 default:
2272 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016,
2273 SKERR_HWI_E016MSG);
2274 }
2275 }
2276
2277 if (!DoLoop) {
2278 /* Restart Auto-negotiation */
2279 PhyCtrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
2280 }
2281 }
2282
2283#ifdef VCPU
2284 /*
2285 * E-mail from Gu Lin (08-03-2002):
2286 */
2287
2288 /* Program PHY register 30 as 16'h0708 for simulation speed up */
2289 SkGmPhyWrite(pAC, IoC, Port, 30, 0x0700 /* 0x0708 */);
2290
2291 VCpuWait(2000);
2292
2293#else /* VCPU */
2294
2295 /* Write 1000Base-T Control Register */
2296 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_1000T_CTRL, C1000BaseT);
2297 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2298 ("Set 1000B-T Ctrl =0x%04X\n", C1000BaseT));
2299
2300 /* Write AutoNeg Advertisement Register */
2301 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_AUNE_ADV, AutoNegAdv);
2302 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2303 ("Set Auto-Neg.Adv.=0x%04X\n", AutoNegAdv));
2304#endif /* VCPU */
2305
2306 if (DoLoop) {
2307 /* Set the PHY Loopback bit */
2308 PhyCtrl |= PHY_CT_LOOP;
2309
2310#ifdef XXX
2311 /* Program PHY register 16 as 16'h0400 to force link good */
2312 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL, PHY_M_PC_FL_GOOD);
2313#endif /* XXX */
2314
2315#ifndef VCPU
2316 if (pPrt->PLinkSpeed != SK_LSPEED_AUTO) {
2317 /* Write Ext. PHY Specific Control */
2318 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_CTRL,
2319 (SK_U16)((pPrt->PLinkSpeed + 2) << 4));
2320 }
2321#endif /* VCPU */
2322 }
2323#ifdef TEST_ONLY
2324 else if (pPrt->PLinkSpeed == SK_LSPEED_10MBPS) {
2325 /* Write PHY Specific Control */
2326 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_PHY_CTRL,
2327 PHY_M_PC_EN_DET_MSK);
2328 }
2329#endif
2330
2331 /* Write to the PHY Control register */
2332 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CTRL, PhyCtrl);
2333 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2334 ("Set PHY Ctrl Reg.=0x%04X\n", PhyCtrl));
2335
2336#ifdef VCPU
2337 VCpuWait(2000);
2338#else
2339
2340 LedCtrl = PHY_M_LED_PULS_DUR(PULS_170MS) | PHY_M_LED_BLINK_RT(BLINK_84MS);
2341
2342 if ((pAC->GIni.GILedBlinkCtrl & SK_ACT_LED_BLINK) != 0) {
2343 LedCtrl |= PHY_M_LEDC_RX_CTRL | PHY_M_LEDC_TX_CTRL;
2344 }
2345
2346 if ((pAC->GIni.GILedBlinkCtrl & SK_DUP_LED_NORMAL) != 0) {
2347 LedCtrl |= PHY_M_LEDC_DP_CTRL;
2348 }
2349
2350 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_LED_CTRL, LedCtrl);
2351
2352 if ((pAC->GIni.GILedBlinkCtrl & SK_LED_LINK100_ON) != 0) {
2353 /* only in forced 100 Mbps mode */
2354 if (!AutoNeg && pPrt->PLinkSpeed == SK_LSPEED_100MBPS) {
2355
2356 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_LED_OVER,
2357 PHY_M_LED_MO_100(MO_LED_ON));
2358 }
2359 }
2360
2361#ifdef SK_DIAG
2362 c_print("Set PHY Ctrl=0x%04X\n", PhyCtrl);
2363 c_print("Set 1000 B-T=0x%04X\n", C1000BaseT);
2364 c_print("Set Auto-Neg=0x%04X\n", AutoNegAdv);
2365 c_print("Set Ext Ctrl=0x%04X\n", ExtPhyCtrl);
2366#endif /* SK_DIAG */
2367
2368#if defined(SK_DIAG) || defined(DEBUG)
2369 /* Read PHY Control */
2370 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CTRL, &PhyCtrl);
2371 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2372 ("PHY Ctrl Reg.=0x%04X\n", PhyCtrl));
2373
2374 /* Read 1000Base-T Control Register */
2375 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_1000T_CTRL, &C1000BaseT);
2376 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2377 ("1000B-T Ctrl =0x%04X\n", C1000BaseT));
2378
2379 /* Read AutoNeg Advertisement Register */
2380 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_AUNE_ADV, &AutoNegAdv);
2381 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2382 ("Auto-Neg.Adv.=0x%04X\n", AutoNegAdv));
2383
2384 /* Read Ext. PHY Specific Control */
2385 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_EXT_CTRL, &ExtPhyCtrl);
2386 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2387 ("Ext. PHY Ctrl=0x%04X\n", ExtPhyCtrl));
2388
2389 /* Read PHY Status */
2390 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_STAT, &PhyStat);
2391 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2392 ("PHY Stat Reg.=0x%04X\n", PhyStat));
2393 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_STAT, &PhyStat1);
2394 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2395 ("PHY Stat Reg.=0x%04X\n", PhyStat1));
2396
2397 /* Read PHY Specific Status */
2398 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_STAT, &PhySpecStat);
2399 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2400 ("PHY Spec Stat=0x%04X\n", PhySpecStat));
2401#endif /* SK_DIAG || DEBUG */
2402
2403#ifdef SK_DIAG
2404 c_print("PHY Ctrl Reg=0x%04X\n", PhyCtrl);
2405 c_print("PHY 1000 Reg=0x%04X\n", C1000BaseT);
2406 c_print("PHY AnAd Reg=0x%04X\n", AutoNegAdv);
2407 c_print("Ext Ctrl Reg=0x%04X\n", ExtPhyCtrl);
2408 c_print("PHY Stat Reg=0x%04X\n", PhyStat);
2409 c_print("PHY Stat Reg=0x%04X\n", PhyStat1);
2410 c_print("PHY Spec Reg=0x%04X\n", PhySpecStat);
2411#endif /* SK_DIAG */
2412
2413#endif /* VCPU */
2414
2415} /* SkGmInitPhyMarv */
2416#endif /* YUKON */
2417
2418
2419#ifdef OTHER_PHY
2420/******************************************************************************
2421 *
2422 * SkXmInitPhyLone() - Initialize the Level One Phy registers
2423 *
2424 * Description: initializes all the Level One Phy registers
2425 *
2426 * Note:
2427 *
2428 * Returns:
2429 * nothing
2430 */
2431static void SkXmInitPhyLone(
2432SK_AC *pAC, /* adapter context */
2433SK_IOC IoC, /* IO context */
2434int Port, /* Port Index (MAC_1 + n) */
2435SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */
2436{
2437 SK_GEPORT *pPrt;
2438 SK_U16 Ctrl1;
2439 SK_U16 Ctrl2;
2440 SK_U16 Ctrl3;
2441
2442 Ctrl1 = PHY_CT_SP1000;
2443 Ctrl2 = 0;
2444 Ctrl3 = PHY_SEL_TYPE;
2445
2446 pPrt = &pAC->GIni.GP[Port];
2447
2448 /* manually Master/Slave ? */
2449 if (pPrt->PMSMode != SK_MS_MODE_AUTO) {
2450 Ctrl2 |= PHY_L_1000C_MSE;
2451
2452 if (pPrt->PMSMode == SK_MS_MODE_MASTER) {
2453 Ctrl2 |= PHY_L_1000C_MSC;
2454 }
2455 }
2456 /* Auto-negotiation ? */
2457 if (pPrt->PLinkMode == SK_LMODE_HALF || pPrt->PLinkMode == SK_LMODE_FULL) {
2458 /*
2459 * level one spec say: "1000 Mbps: manual mode not allowed"
2460 * but lets see what happens...
2461 */
2462 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2463 ("InitPhyLone: no auto-negotiation Port %d\n", Port));
2464 /* Set DuplexMode in Config register */
2465 if (pPrt->PLinkMode == SK_LMODE_FULL) {
2466 Ctrl1 |= PHY_CT_DUP_MD;
2467 }
2468
2469 /* Determine Master/Slave manually if not already done */
2470 if (pPrt->PMSMode == SK_MS_MODE_AUTO) {
2471 Ctrl2 |= PHY_L_1000C_MSE; /* set it to Slave */
2472 }
2473
2474 /*
2475 * Do NOT enable Auto-negotiation here. This would hold
2476 * the link down because no IDLES are transmitted
2477 */
2478 }
2479 else {
2480 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2481 ("InitPhyLone: with auto-negotiation Port %d\n", Port));
2482 /* Set Auto-negotiation advertisement */
2483
2484 /* Set Full/half duplex capabilities */
2485 switch (pPrt->PLinkMode) {
2486 case SK_LMODE_AUTOHALF:
2487 Ctrl2 |= PHY_L_1000C_AHD;
2488 break;
2489 case SK_LMODE_AUTOFULL:
2490 Ctrl2 |= PHY_L_1000C_AFD;
2491 break;
2492 case SK_LMODE_AUTOBOTH:
2493 Ctrl2 |= PHY_L_1000C_AFD | PHY_L_1000C_AHD;
2494 break;
2495 default:
2496 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E015,
2497 SKERR_HWI_E015MSG);
2498 }
2499
2500 /* Set Flow-control capabilities */
2501 switch (pPrt->PFlowCtrlMode) {
2502 case SK_FLOW_MODE_NONE:
2503 Ctrl3 |= PHY_L_P_NO_PAUSE;
2504 break;
2505 case SK_FLOW_MODE_LOC_SEND:
2506 Ctrl3 |= PHY_L_P_ASYM_MD;
2507 break;
2508 case SK_FLOW_MODE_SYMMETRIC:
2509 Ctrl3 |= PHY_L_P_SYM_MD;
2510 break;
2511 case SK_FLOW_MODE_SYM_OR_REM:
2512 Ctrl3 |= PHY_L_P_BOTH_MD;
2513 break;
2514 default:
2515 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016,
2516 SKERR_HWI_E016MSG);
2517 }
2518
2519 /* Restart Auto-negotiation */
2520 Ctrl1 = PHY_CT_ANE | PHY_CT_RE_CFG;
2521 }
2522
2523 /* Write 1000Base-T Control Register */
2524 SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_1000T_CTRL, Ctrl2);
2525 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2526 ("1000B-T Ctrl Reg=0x%04X\n", Ctrl2));
2527
2528 /* Write AutoNeg Advertisement Register */
2529 SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_AUNE_ADV, Ctrl3);
2530 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2531 ("Auto-Neg.Adv.Reg=0x%04X\n", Ctrl3));
2532
2533 if (DoLoop) {
2534 /* Set the Phy Loopback bit, too */
2535 Ctrl1 |= PHY_CT_LOOP;
2536 }
2537
2538 /* Write to the Phy control register */
2539 SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_CTRL, Ctrl1);
2540 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2541 ("PHY Control Reg=0x%04X\n", Ctrl1));
2542} /* SkXmInitPhyLone */
2543
2544
2545/******************************************************************************
2546 *
2547 * SkXmInitPhyNat() - Initialize the National Phy registers
2548 *
2549 * Description: initializes all the National Phy registers
2550 *
2551 * Note:
2552 *
2553 * Returns:
2554 * nothing
2555 */
2556static void SkXmInitPhyNat(
2557SK_AC *pAC, /* adapter context */
2558SK_IOC IoC, /* IO context */
2559int Port, /* Port Index (MAC_1 + n) */
2560SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */
2561{
2562/* todo: National */
2563} /* SkXmInitPhyNat */
2564#endif /* OTHER_PHY */
2565
2566
2567/******************************************************************************
2568 *
2569 * SkMacInitPhy() - Initialize the PHY registers
2570 *
2571 * Description: calls the Init PHY routines dep. on board type
2572 *
2573 * Note:
2574 *
2575 * Returns:
2576 * nothing
2577 */
2578void SkMacInitPhy(
2579SK_AC *pAC, /* adapter context */
2580SK_IOC IoC, /* IO context */
2581int Port, /* Port Index (MAC_1 + n) */
2582SK_BOOL DoLoop) /* Should a Phy LoopBack be set-up? */
2583{
2584 SK_GEPORT *pPrt;
2585
2586 pPrt = &pAC->GIni.GP[Port];
2587
2588#ifdef GENESIS
2589 if (pAC->GIni.GIGenesis) {
2590
2591 switch (pPrt->PhyType) {
2592 case SK_PHY_XMAC:
2593 SkXmInitPhyXmac(pAC, IoC, Port, DoLoop);
2594 break;
2595 case SK_PHY_BCOM:
2596 SkXmInitPhyBcom(pAC, IoC, Port, DoLoop);
2597 break;
2598#ifdef OTHER_PHY
2599 case SK_PHY_LONE:
2600 SkXmInitPhyLone(pAC, IoC, Port, DoLoop);
2601 break;
2602 case SK_PHY_NAT:
2603 SkXmInitPhyNat(pAC, IoC, Port, DoLoop);
2604 break;
2605#endif /* OTHER_PHY */
2606 }
2607 }
2608#endif /* GENESIS */
2609
2610#ifdef YUKON
2611 if (pAC->GIni.GIYukon) {
2612
2613 SkGmInitPhyMarv(pAC, IoC, Port, DoLoop);
2614 }
2615#endif /* YUKON */
2616
2617} /* SkMacInitPhy */
2618
2619
2620#ifdef GENESIS
2621/******************************************************************************
2622 *
2623 * SkXmAutoNegDoneXmac() - Auto-negotiation handling
2624 *
2625 * Description:
2626 * This function handles the auto-negotiation if the Done bit is set.
2627 *
2628 * Returns:
2629 * SK_AND_OK o.k.
2630 * SK_AND_DUP_CAP Duplex capability error happened
2631 * SK_AND_OTHER Other error happened
2632 */
2633static int SkXmAutoNegDoneXmac(
2634SK_AC *pAC, /* adapter context */
2635SK_IOC IoC, /* IO context */
2636int Port) /* Port Index (MAC_1 + n) */
2637{
2638 SK_GEPORT *pPrt;
2639 SK_U16 ResAb; /* Resolved Ability */
2640 SK_U16 LPAb; /* Link Partner Ability */
2641
2642 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2643 ("AutoNegDoneXmac, Port %d\n", Port));
2644
2645 pPrt = &pAC->GIni.GP[Port];
2646
2647 /* Get PHY parameters */
2648 SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_AUNE_LP, &LPAb);
2649 SkXmPhyRead(pAC, IoC, Port, PHY_XMAC_RES_ABI, &ResAb);
2650
2651 if ((LPAb & PHY_X_AN_RFB) != 0) {
2652 /* At least one of the remote fault bit is set */
2653 /* Error */
2654 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2655 ("AutoNegFail: Remote fault bit set Port %d\n", Port));
2656 pPrt->PAutoNegFail = SK_TRUE;
2657 return(SK_AND_OTHER);
2658 }
2659
2660 /* Check Duplex mismatch */
2661 if ((ResAb & (PHY_X_RS_HD | PHY_X_RS_FD)) == PHY_X_RS_FD) {
2662 pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOFULL;
2663 }
2664 else if ((ResAb & (PHY_X_RS_HD | PHY_X_RS_FD)) == PHY_X_RS_HD) {
2665 pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOHALF;
2666 }
2667 else {
2668 /* Error */
2669 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2670 ("AutoNegFail: Duplex mode mismatch Port %d\n", Port));
2671 pPrt->PAutoNegFail = SK_TRUE;
2672 return(SK_AND_DUP_CAP);
2673 }
2674
2675 /* Check PAUSE mismatch */
2676 /* We are NOT using chapter 4.23 of the Xaqti manual */
2677 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
2678 if ((pPrt->PFlowCtrlMode == SK_FLOW_MODE_SYMMETRIC ||
2679 pPrt->PFlowCtrlMode == SK_FLOW_MODE_SYM_OR_REM) &&
2680 (LPAb & PHY_X_P_SYM_MD) != 0) {
2681 /* Symmetric PAUSE */
2682 pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC;
2683 }
2684 else if (pPrt->PFlowCtrlMode == SK_FLOW_MODE_SYM_OR_REM &&
2685 (LPAb & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) {
2686 /* Enable PAUSE receive, disable PAUSE transmit */
2687 pPrt->PFlowCtrlStatus = SK_FLOW_STAT_REM_SEND;
2688 }
2689 else if (pPrt->PFlowCtrlMode == SK_FLOW_MODE_LOC_SEND &&
2690 (LPAb & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) {
2691 /* Disable PAUSE receive, enable PAUSE transmit */
2692 pPrt->PFlowCtrlStatus = SK_FLOW_STAT_LOC_SEND;
2693 }
2694 else {
2695 /* PAUSE mismatch -> no PAUSE */
2696 pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE;
2697 }
2698 pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_1000MBPS;
2699
2700 return(SK_AND_OK);
2701} /* SkXmAutoNegDoneXmac */
2702
2703
2704/******************************************************************************
2705 *
2706 * SkXmAutoNegDoneBcom() - Auto-negotiation handling
2707 *
2708 * Description:
2709 * This function handles the auto-negotiation if the Done bit is set.
2710 *
2711 * Returns:
2712 * SK_AND_OK o.k.
2713 * SK_AND_DUP_CAP Duplex capability error happened
2714 * SK_AND_OTHER Other error happened
2715 */
2716static int SkXmAutoNegDoneBcom(
2717SK_AC *pAC, /* adapter context */
2718SK_IOC IoC, /* IO context */
2719int Port) /* Port Index (MAC_1 + n) */
2720{
2721 SK_GEPORT *pPrt;
2722 SK_U16 LPAb; /* Link Partner Ability */
2723 SK_U16 AuxStat; /* Auxiliary Status */
2724
2725#ifdef TEST_ONLY
272601-Sep-2000 RA;:;:
2727 SK_U16 ResAb; /* Resolved Ability */
2728#endif /* 0 */
2729
2730 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2731 ("AutoNegDoneBcom, Port %d\n", Port));
2732 pPrt = &pAC->GIni.GP[Port];
2733
2734 /* Get PHY parameters */
2735 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUNE_LP, &LPAb);
2736#ifdef TEST_ONLY
273701-Sep-2000 RA;:;:
2738 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_1000T_STAT, &ResAb);
2739#endif /* 0 */
2740
2741 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_STAT, &AuxStat);
2742
2743 if ((LPAb & PHY_B_AN_RF) != 0) {
2744 /* Remote fault bit is set: Error */
2745 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2746 ("AutoNegFail: Remote fault bit set Port %d\n", Port));
2747 pPrt->PAutoNegFail = SK_TRUE;
2748 return(SK_AND_OTHER);
2749 }
2750
2751 /* Check Duplex mismatch */
2752 if ((AuxStat & PHY_B_AS_AN_RES_MSK) == PHY_B_RES_1000FD) {
2753 pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOFULL;
2754 }
2755 else if ((AuxStat & PHY_B_AS_AN_RES_MSK) == PHY_B_RES_1000HD) {
2756 pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOHALF;
2757 }
2758 else {
2759 /* Error */
2760 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2761 ("AutoNegFail: Duplex mode mismatch Port %d\n", Port));
2762 pPrt->PAutoNegFail = SK_TRUE;
2763 return(SK_AND_DUP_CAP);
2764 }
2765
2766#ifdef TEST_ONLY
276701-Sep-2000 RA;:;:
2768 /* Check Master/Slave resolution */
2769 if ((ResAb & PHY_B_1000S_MSF) != 0) {
2770 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2771 ("Master/Slave Fault Port %d\n", Port));
2772 pPrt->PAutoNegFail = SK_TRUE;
2773 pPrt->PMSStatus = SK_MS_STAT_FAULT;
2774 return(SK_AND_OTHER);
2775 }
2776
2777 pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ?
2778 SK_MS_STAT_MASTER : SK_MS_STAT_SLAVE;
2779#endif /* 0 */
2780
2781 /* Check PAUSE mismatch ??? */
2782 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
2783 if ((AuxStat & PHY_B_AS_PAUSE_MSK) == PHY_B_AS_PAUSE_MSK) {
2784 /* Symmetric PAUSE */
2785 pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC;
2786 }
2787 else if ((AuxStat & PHY_B_AS_PAUSE_MSK) == PHY_B_AS_PRR) {
2788 /* Enable PAUSE receive, disable PAUSE transmit */
2789 pPrt->PFlowCtrlStatus = SK_FLOW_STAT_REM_SEND;
2790 }
2791 else if ((AuxStat & PHY_B_AS_PAUSE_MSK) == PHY_B_AS_PRT) {
2792 /* Disable PAUSE receive, enable PAUSE transmit */
2793 pPrt->PFlowCtrlStatus = SK_FLOW_STAT_LOC_SEND;
2794 }
2795 else {
2796 /* PAUSE mismatch -> no PAUSE */
2797 pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE;
2798 }
2799 pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_1000MBPS;
2800
2801 return(SK_AND_OK);
2802} /* SkXmAutoNegDoneBcom */
2803#endif /* GENESIS */
2804
2805
2806#ifdef YUKON
2807/******************************************************************************
2808 *
2809 * SkGmAutoNegDoneMarv() - Auto-negotiation handling
2810 *
2811 * Description:
2812 * This function handles the auto-negotiation if the Done bit is set.
2813 *
2814 * Returns:
2815 * SK_AND_OK o.k.
2816 * SK_AND_DUP_CAP Duplex capability error happened
2817 * SK_AND_OTHER Other error happened
2818 */
2819static int SkGmAutoNegDoneMarv(
2820SK_AC *pAC, /* adapter context */
2821SK_IOC IoC, /* IO context */
2822int Port) /* Port Index (MAC_1 + n) */
2823{
2824 SK_GEPORT *pPrt;
2825 SK_U16 LPAb; /* Link Partner Ability */
2826 SK_U16 ResAb; /* Resolved Ability */
2827 SK_U16 AuxStat; /* Auxiliary Status */
2828
2829 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2830 ("AutoNegDoneMarv, Port %d\n", Port));
2831 pPrt = &pAC->GIni.GP[Port];
2832
2833 /* Get PHY parameters */
2834 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_AUNE_LP, &LPAb);
2835 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2836 ("Link P.Abil.=0x%04X\n", LPAb));
2837
2838 if ((LPAb & PHY_M_AN_RF) != 0) {
2839 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2840 ("AutoNegFail: Remote fault bit set Port %d\n", Port));
2841 pPrt->PAutoNegFail = SK_TRUE;
2842 return(SK_AND_OTHER);
2843 }
2844
2845 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_1000T_STAT, &ResAb);
2846
2847 /* Check Master/Slave resolution */
2848 if ((ResAb & PHY_B_1000S_MSF) != 0) {
2849 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2850 ("Master/Slave Fault Port %d\n", Port));
2851 pPrt->PAutoNegFail = SK_TRUE;
2852 pPrt->PMSStatus = SK_MS_STAT_FAULT;
2853 return(SK_AND_OTHER);
2854 }
2855
2856 pPrt->PMSStatus = ((ResAb & PHY_B_1000S_MSR) != 0) ?
2857 (SK_U8)SK_MS_STAT_MASTER : (SK_U8)SK_MS_STAT_SLAVE;
2858
2859 /* Read PHY Specific Status */
2860 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_PHY_STAT, &AuxStat);
2861
2862 /* Check Speed & Duplex resolved */
2863 if ((AuxStat & PHY_M_PS_SPDUP_RES) == 0) {
2864 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2865 ("AutoNegFail: Speed & Duplex not resolved, Port %d\n", Port));
2866 pPrt->PAutoNegFail = SK_TRUE;
2867 pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_UNKNOWN;
2868 return(SK_AND_DUP_CAP);
2869 }
2870
2871 if ((AuxStat & PHY_M_PS_FULL_DUP) != 0) {
2872 pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOFULL;
2873 }
2874 else {
2875 pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOHALF;
2876 }
2877
2878 /* Check PAUSE mismatch ??? */
2879 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
2880 if ((AuxStat & PHY_M_PS_PAUSE_MSK) == PHY_M_PS_PAUSE_MSK) {
2881 /* Symmetric PAUSE */
2882 pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC;
2883 }
2884 else if ((AuxStat & PHY_M_PS_PAUSE_MSK) == PHY_M_PS_RX_P_EN) {
2885 /* Enable PAUSE receive, disable PAUSE transmit */
2886 pPrt->PFlowCtrlStatus = SK_FLOW_STAT_REM_SEND;
2887 }
2888 else if ((AuxStat & PHY_M_PS_PAUSE_MSK) == PHY_M_PS_TX_P_EN) {
2889 /* Disable PAUSE receive, enable PAUSE transmit */
2890 pPrt->PFlowCtrlStatus = SK_FLOW_STAT_LOC_SEND;
2891 }
2892 else {
2893 /* PAUSE mismatch -> no PAUSE */
2894 pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE;
2895 }
2896
2897 /* set used link speed */
2898 switch ((unsigned)(AuxStat & PHY_M_PS_SPEED_MSK)) {
2899 case (unsigned)PHY_M_PS_SPEED_1000:
2900 pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_1000MBPS;
2901 break;
2902 case PHY_M_PS_SPEED_100:
2903 pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_100MBPS;
2904 break;
2905 default:
2906 pPrt->PLinkSpeedUsed = (SK_U8)SK_LSPEED_STAT_10MBPS;
2907 }
2908
2909 return(SK_AND_OK);
2910} /* SkGmAutoNegDoneMarv */
2911#endif /* YUKON */
2912
2913
2914#ifdef OTHER_PHY
2915/******************************************************************************
2916 *
2917 * SkXmAutoNegDoneLone() - Auto-negotiation handling
2918 *
2919 * Description:
2920 * This function handles the auto-negotiation if the Done bit is set.
2921 *
2922 * Returns:
2923 * SK_AND_OK o.k.
2924 * SK_AND_DUP_CAP Duplex capability error happened
2925 * SK_AND_OTHER Other error happened
2926 */
2927static int SkXmAutoNegDoneLone(
2928SK_AC *pAC, /* adapter context */
2929SK_IOC IoC, /* IO context */
2930int Port) /* Port Index (MAC_1 + n) */
2931{
2932 SK_GEPORT *pPrt;
2933 SK_U16 ResAb; /* Resolved Ability */
2934 SK_U16 LPAb; /* Link Partner Ability */
2935 SK_U16 QuickStat; /* Auxiliary Status */
2936
2937 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2938 ("AutoNegDoneLone, Port %d\n", Port));
2939 pPrt = &pAC->GIni.GP[Port];
2940
2941 /* Get PHY parameters */
2942 SkXmPhyRead(pAC, IoC, Port, PHY_LONE_AUNE_LP, &LPAb);
2943 SkXmPhyRead(pAC, IoC, Port, PHY_LONE_1000T_STAT, &ResAb);
2944 SkXmPhyRead(pAC, IoC, Port, PHY_LONE_Q_STAT, &QuickStat);
2945
2946 if ((LPAb & PHY_L_AN_RF) != 0) {
2947 /* Remote fault bit is set */
2948 /* Error */
2949 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2950 ("AutoNegFail: Remote fault bit set Port %d\n", Port));
2951 pPrt->PAutoNegFail = SK_TRUE;
2952 return(SK_AND_OTHER);
2953 }
2954
2955 /* Check Duplex mismatch */
2956 if ((QuickStat & PHY_L_QS_DUP_MOD) != 0) {
2957 pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOFULL;
2958 }
2959 else {
2960 pPrt->PLinkModeStatus = (SK_U8)SK_LMODE_STAT_AUTOHALF;
2961 }
2962
2963 /* Check Master/Slave resolution */
2964 if ((ResAb & PHY_L_1000S_MSF) != 0) {
2965 /* Error */
2966 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
2967 ("Master/Slave Fault Port %d\n", Port));
2968 pPrt->PAutoNegFail = SK_TRUE;
2969 pPrt->PMSStatus = SK_MS_STAT_FAULT;
2970 return(SK_AND_OTHER);
2971 }
2972 else if (ResAb & PHY_L_1000S_MSR) {
2973 pPrt->PMSStatus = SK_MS_STAT_MASTER;
2974 }
2975 else {
2976 pPrt->PMSStatus = SK_MS_STAT_SLAVE;
2977 }
2978
2979 /* Check PAUSE mismatch */
2980 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
2981 /* we must manually resolve the abilities here */
2982 pPrt->PFlowCtrlStatus = SK_FLOW_STAT_NONE;
2983
2984 switch (pPrt->PFlowCtrlMode) {
2985 case SK_FLOW_MODE_NONE:
2986 /* default */
2987 break;
2988 case SK_FLOW_MODE_LOC_SEND:
2989 if ((QuickStat & (PHY_L_QS_PAUSE | PHY_L_QS_AS_PAUSE)) ==
2990 (PHY_L_QS_PAUSE | PHY_L_QS_AS_PAUSE)) {
2991 /* Disable PAUSE receive, enable PAUSE transmit */
2992 pPrt->PFlowCtrlStatus = SK_FLOW_STAT_LOC_SEND;
2993 }
2994 break;
2995 case SK_FLOW_MODE_SYMMETRIC:
2996 if ((QuickStat & PHY_L_QS_PAUSE) != 0) {
2997 /* Symmetric PAUSE */
2998 pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC;
2999 }
3000 break;
3001 case SK_FLOW_MODE_SYM_OR_REM:
3002 if ((QuickStat & (PHY_L_QS_PAUSE | PHY_L_QS_AS_PAUSE)) ==
3003 PHY_L_QS_AS_PAUSE) {
3004 /* Enable PAUSE receive, disable PAUSE transmit */
3005 pPrt->PFlowCtrlStatus = SK_FLOW_STAT_REM_SEND;
3006 }
3007 else if ((QuickStat & PHY_L_QS_PAUSE) != 0) {
3008 /* Symmetric PAUSE */
3009 pPrt->PFlowCtrlStatus = SK_FLOW_STAT_SYMMETRIC;
3010 }
3011 break;
3012 default:
3013 SK_ERR_LOG(pAC, SK_ERRCL_SW | SK_ERRCL_INIT, SKERR_HWI_E016,
3014 SKERR_HWI_E016MSG);
3015 }
3016
3017 return(SK_AND_OK);
3018} /* SkXmAutoNegDoneLone */
3019
3020
3021/******************************************************************************
3022 *
3023 * SkXmAutoNegDoneNat() - Auto-negotiation handling
3024 *
3025 * Description:
3026 * This function handles the auto-negotiation if the Done bit is set.
3027 *
3028 * Returns:
3029 * SK_AND_OK o.k.
3030 * SK_AND_DUP_CAP Duplex capability error happened
3031 * SK_AND_OTHER Other error happened
3032 */
3033static int SkXmAutoNegDoneNat(
3034SK_AC *pAC, /* adapter context */
3035SK_IOC IoC, /* IO context */
3036int Port) /* Port Index (MAC_1 + n) */
3037{
3038/* todo: National */
3039 return(SK_AND_OK);
3040} /* SkXmAutoNegDoneNat */
3041#endif /* OTHER_PHY */
3042
3043
3044/******************************************************************************
3045 *
3046 * SkMacAutoNegDone() - Auto-negotiation handling
3047 *
3048 * Description: calls the auto-negotiation done routines dep. on board type
3049 *
3050 * Returns:
3051 * SK_AND_OK o.k.
3052 * SK_AND_DUP_CAP Duplex capability error happened
3053 * SK_AND_OTHER Other error happened
3054 */
3055int SkMacAutoNegDone(
3056SK_AC *pAC, /* adapter context */
3057SK_IOC IoC, /* IO context */
3058int Port) /* Port Index (MAC_1 + n) */
3059{
3060 SK_GEPORT *pPrt;
3061 int Rtv;
3062
3063 Rtv = SK_AND_OK;
3064
3065 pPrt = &pAC->GIni.GP[Port];
3066
3067#ifdef GENESIS
3068 if (pAC->GIni.GIGenesis) {
3069
3070 switch (pPrt->PhyType) {
3071
3072 case SK_PHY_XMAC:
3073 Rtv = SkXmAutoNegDoneXmac(pAC, IoC, Port);
3074 break;
3075 case SK_PHY_BCOM:
3076 Rtv = SkXmAutoNegDoneBcom(pAC, IoC, Port);
3077 break;
3078#ifdef OTHER_PHY
3079 case SK_PHY_LONE:
3080 Rtv = SkXmAutoNegDoneLone(pAC, IoC, Port);
3081 break;
3082 case SK_PHY_NAT:
3083 Rtv = SkXmAutoNegDoneNat(pAC, IoC, Port);
3084 break;
3085#endif /* OTHER_PHY */
3086 default:
3087 return(SK_AND_OTHER);
3088 }
3089 }
3090#endif /* GENESIS */
3091
3092#ifdef YUKON
3093 if (pAC->GIni.GIYukon) {
3094
3095 Rtv = SkGmAutoNegDoneMarv(pAC, IoC, Port);
3096 }
3097#endif /* YUKON */
3098
3099 if (Rtv != SK_AND_OK) {
3100 return(Rtv);
3101 }
3102
3103 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
3104 ("AutoNeg done Port %d\n", Port));
3105
3106 /* We checked everything and may now enable the link */
3107 pPrt->PAutoNegFail = SK_FALSE;
3108
3109 SkMacRxTxEnable(pAC, IoC, Port);
3110
3111 return(SK_AND_OK);
3112} /* SkMacAutoNegDone */
3113
3114
3115/******************************************************************************
3116 *
3117 * SkMacRxTxEnable() - Enable Rx/Tx activity if port is up
3118 *
3119 * Description: enables Rx/Tx dep. on board type
3120 *
3121 * Returns:
3122 * 0 o.k.
3123 * != 0 Error happened
3124 */
3125int SkMacRxTxEnable(
3126SK_AC *pAC, /* adapter context */
3127SK_IOC IoC, /* IO context */
3128int Port) /* Port Index (MAC_1 + n) */
3129{
3130 SK_GEPORT *pPrt;
3131 SK_U16 Reg; /* 16-bit register value */
3132 SK_U16 IntMask; /* MAC interrupt mask */
3133#ifdef GENESIS
3134 SK_U16 SWord;
3135#endif
3136
3137 pPrt = &pAC->GIni.GP[Port];
3138
3139 if (!pPrt->PHWLinkUp) {
3140 /* The Hardware link is NOT up */
3141 return(0);
3142 }
3143
3144 if ((pPrt->PLinkMode == SK_LMODE_AUTOHALF ||
3145 pPrt->PLinkMode == SK_LMODE_AUTOFULL ||
3146 pPrt->PLinkMode == SK_LMODE_AUTOBOTH) &&
3147 pPrt->PAutoNegFail) {
3148 /* Auto-negotiation is not done or failed */
3149 return(0);
3150 }
3151
3152#ifdef GENESIS
3153 if (pAC->GIni.GIGenesis) {
3154 /* set Duplex Mode and Pause Mode */
3155 SkXmInitDupMd(pAC, IoC, Port);
3156
3157 SkXmInitPauseMd(pAC, IoC, Port);
3158
3159 /*
3160 * Initialize the Interrupt Mask Register. Default IRQs are...
3161 * - Link Asynchronous Event
3162 * - Link Partner requests config
3163 * - Auto Negotiation Done
3164 * - Rx Counter Event Overflow
3165 * - Tx Counter Event Overflow
3166 * - Transmit FIFO Underrun
3167 */
3168 IntMask = XM_DEF_MSK;
3169
3170#ifdef DEBUG
3171 /* add IRQ for Receive FIFO Overflow */
3172 IntMask &= ~XM_IS_RXF_OV;
3173#endif /* DEBUG */
3174
3175 if (pPrt->PhyType != SK_PHY_XMAC) {
3176 /* disable GP0 interrupt bit */
3177 IntMask |= XM_IS_INP_ASS;
3178 }
3179 XM_OUT16(IoC, Port, XM_IMSK, IntMask);
3180
3181 /* get MMU Command Reg. */
3182 XM_IN16(IoC, Port, XM_MMU_CMD, &Reg);
3183
3184 if (pPrt->PhyType != SK_PHY_XMAC &&
3185 (pPrt->PLinkModeStatus == SK_LMODE_STAT_FULL ||
3186 pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOFULL)) {
3187 /* set to Full Duplex */
3188 Reg |= XM_MMU_GMII_FD;
3189 }
3190
3191 switch (pPrt->PhyType) {
3192 case SK_PHY_BCOM:
3193 /*
3194 * Workaround BCOM Errata (#10523) for all BCom Phys
3195 * Enable Power Management after link up
3196 */
3197 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &SWord);
3198 SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUX_CTRL,
3199 (SK_U16)(SWord & ~PHY_B_AC_DIS_PM));
3200 SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_INT_MASK,
3201 (SK_U16)PHY_B_DEF_MSK);
3202 break;
3203#ifdef OTHER_PHY
3204 case SK_PHY_LONE:
3205 SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_INT_ENAB, PHY_L_DEF_MSK);
3206 break;
3207 case SK_PHY_NAT:
3208 /* todo National:
3209 SkXmPhyWrite(pAC, IoC, Port, PHY_NAT_INT_MASK, PHY_N_DEF_MSK); */
3210 /* no interrupts possible from National ??? */
3211 break;
3212#endif /* OTHER_PHY */
3213 }
3214
3215 /* enable Rx/Tx */
3216 XM_OUT16(IoC, Port, XM_MMU_CMD, Reg | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
3217 }
3218#endif /* GENESIS */
3219
3220#ifdef YUKON
3221 if (pAC->GIni.GIYukon) {
3222 /*
3223 * Initialize the Interrupt Mask Register. Default IRQs are...
3224 * - Rx Counter Event Overflow
3225 * - Tx Counter Event Overflow
3226 * - Transmit FIFO Underrun
3227 */
3228 IntMask = GMAC_DEF_MSK;
3229
3230#ifdef DEBUG
3231 /* add IRQ for Receive FIFO Overrun */
3232 IntMask |= GM_IS_RX_FF_OR;
3233#endif /* DEBUG */
3234
3235 SK_OUT8(IoC, GMAC_IRQ_MSK, (SK_U8)IntMask);
3236
3237 /* get General Purpose Control */
3238 GM_IN16(IoC, Port, GM_GP_CTRL, &Reg);
3239
3240 if (pPrt->PLinkModeStatus == SK_LMODE_STAT_FULL ||
3241 pPrt->PLinkModeStatus == SK_LMODE_STAT_AUTOFULL) {
3242 /* set to Full Duplex */
3243 Reg |= GM_GPCR_DUP_FULL;
3244 }
3245
3246 /* enable Rx/Tx */
3247 GM_OUT16(IoC, Port, GM_GP_CTRL, (SK_U16)(Reg | GM_GPCR_RX_ENA |
3248 GM_GPCR_TX_ENA));
3249
3250#ifndef VCPU
3251 /* Enable all PHY interrupts */
3252 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_INT_MASK,
3253 (SK_U16)PHY_M_DEF_MSK);
3254#endif /* VCPU */
3255 }
3256#endif /* YUKON */
3257
3258 return(0);
3259
3260} /* SkMacRxTxEnable */
3261
3262
3263/******************************************************************************
3264 *
3265 * SkMacRxTxDisable() - Disable Receiver and Transmitter
3266 *
3267 * Description: disables Rx/Tx dep. on board type
3268 *
3269 * Returns: N/A
3270 */
3271void SkMacRxTxDisable(
3272SK_AC *pAC, /* Adapter Context */
3273SK_IOC IoC, /* IO context */
3274int Port) /* Port Index (MAC_1 + n) */
3275{
3276 SK_U16 Word;
3277
3278#ifdef GENESIS
3279 if (pAC->GIni.GIGenesis) {
3280
3281 XM_IN16(IoC, Port, XM_MMU_CMD, &Word);
3282
3283 XM_OUT16(IoC, Port, XM_MMU_CMD, Word & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
3284
3285 /* dummy read to ensure writing */
3286 XM_IN16(IoC, Port, XM_MMU_CMD, &Word);
3287 }
3288#endif /* GENESIS */
3289
3290#ifdef YUKON
3291 if (pAC->GIni.GIYukon) {
3292
3293 GM_IN16(IoC, Port, GM_GP_CTRL, &Word);
3294
3295 GM_OUT16(IoC, Port, GM_GP_CTRL, (SK_U16)(Word & ~(GM_GPCR_RX_ENA |
3296 GM_GPCR_TX_ENA)));
3297
3298 /* dummy read to ensure writing */
3299 GM_IN16(IoC, Port, GM_GP_CTRL, &Word);
3300 }
3301#endif /* YUKON */
3302
3303} /* SkMacRxTxDisable */
3304
3305
3306/******************************************************************************
3307 *
3308 * SkMacIrqDisable() - Disable IRQ from MAC
3309 *
3310 * Description: sets the IRQ-mask to disable IRQ dep. on board type
3311 *
3312 * Returns: N/A
3313 */
3314void SkMacIrqDisable(
3315SK_AC *pAC, /* Adapter Context */
3316SK_IOC IoC, /* IO context */
3317int Port) /* Port Index (MAC_1 + n) */
3318{
3319 SK_GEPORT *pPrt;
3320#ifdef GENESIS
3321 SK_U16 Word;
3322#endif
3323
3324 pPrt = &pAC->GIni.GP[Port];
3325
3326#ifdef GENESIS
3327 if (pAC->GIni.GIGenesis) {
3328
3329 /* disable all XMAC IRQs */
3330 XM_OUT16(IoC, Port, XM_IMSK, 0xffff);
3331
3332 /* Disable all PHY interrupts */
3333 switch (pPrt->PhyType) {
3334 case SK_PHY_BCOM:
3335 /* Make sure that PHY is initialized */
3336 if (pPrt->PState != SK_PRT_RESET) {
3337 /* NOT allowed if BCOM is in RESET state */
3338 /* Workaround BCOM Errata (#10523) all BCom */
3339 /* Disable Power Management if link is down */
3340 SkXmPhyRead(pAC, IoC, Port, PHY_BCOM_AUX_CTRL, &Word);
3341 SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_AUX_CTRL,
3342 (SK_U16)(Word | PHY_B_AC_DIS_PM));
3343 SkXmPhyWrite(pAC, IoC, Port, PHY_BCOM_INT_MASK, 0xffff);
3344 }
3345 break;
3346#ifdef OTHER_PHY
3347 case SK_PHY_LONE:
3348 SkXmPhyWrite(pAC, IoC, Port, PHY_LONE_INT_ENAB, 0);
3349 break;
3350 case SK_PHY_NAT:
3351 /* todo: National
3352 SkXmPhyWrite(pAC, IoC, Port, PHY_NAT_INT_MASK, 0xffff); */
3353 break;
3354#endif /* OTHER_PHY */
3355 }
3356 }
3357#endif /* GENESIS */
3358
3359#ifdef YUKON
3360 if (pAC->GIni.GIYukon) {
3361 /* disable all GMAC IRQs */
3362 SK_OUT8(IoC, GMAC_IRQ_MSK, 0);
3363
3364#ifndef VCPU
3365 /* Disable all PHY interrupts */
3366 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_INT_MASK, 0);
3367#endif /* VCPU */
3368 }
3369#endif /* YUKON */
3370
3371} /* SkMacIrqDisable */
3372
3373
3374#ifdef SK_DIAG
3375/******************************************************************************
3376 *
3377 * SkXmSendCont() - Enable / Disable Send Continuous Mode
3378 *
3379 * Description: enable / disable Send Continuous Mode on XMAC
3380 *
3381 * Returns:
3382 * nothing
3383 */
3384void SkXmSendCont(
3385SK_AC *pAC, /* adapter context */
3386SK_IOC IoC, /* IO context */
3387int Port, /* Port Index (MAC_1 + n) */
3388SK_BOOL Enable) /* Enable / Disable */
3389{
3390 SK_U32 MdReg;
3391
3392 XM_IN32(IoC, Port, XM_MODE, &MdReg);
3393
3394 if (Enable) {
3395 MdReg |= XM_MD_TX_CONT;
3396 }
3397 else {
3398 MdReg &= ~XM_MD_TX_CONT;
3399 }
3400 /* setup Mode Register */
3401 XM_OUT32(IoC, Port, XM_MODE, MdReg);
3402
3403} /* SkXmSendCont */
3404
3405
3406/******************************************************************************
3407 *
3408 * SkMacTimeStamp() - Enable / Disable Time Stamp
3409 *
3410 * Description: enable / disable Time Stamp generation for Rx packets
3411 *
3412 * Returns:
3413 * nothing
3414 */
3415void SkMacTimeStamp(
3416SK_AC *pAC, /* adapter context */
3417SK_IOC IoC, /* IO context */
3418int Port, /* Port Index (MAC_1 + n) */
3419SK_BOOL Enable) /* Enable / Disable */
3420{
3421 SK_U32 MdReg;
3422 SK_U8 TimeCtrl;
3423
3424 if (pAC->GIni.GIGenesis) {
3425
3426 XM_IN32(IoC, Port, XM_MODE, &MdReg);
3427
3428 if (Enable) {
3429 MdReg |= XM_MD_ATS;
3430 }
3431 else {
3432 MdReg &= ~XM_MD_ATS;
3433 }
3434 /* setup Mode Register */
3435 XM_OUT32(IoC, Port, XM_MODE, MdReg);
3436 }
3437 else {
3438 if (Enable) {
3439 TimeCtrl = GMT_ST_START | GMT_ST_CLR_IRQ;
3440 }
3441 else {
3442 TimeCtrl = GMT_ST_STOP | GMT_ST_CLR_IRQ;
3443 }
3444 /* Start/Stop Time Stamp Timer */
3445 SK_OUT8(IoC, GMAC_TI_ST_CTRL, TimeCtrl);
3446 }
3447
3448} /* SkMacTimeStamp*/
3449
3450#else /* !SK_DIAG */
3451
3452#ifdef GENESIS
3453/******************************************************************************
3454 *
3455 * SkXmAutoNegLipaXmac() - Decides whether Link Partner could do auto-neg
3456 *
3457 * This function analyses the Interrupt status word. If any of the
3458 * Auto-negotiating interrupt bits are set, the PLipaAutoNeg variable
3459 * is set true.
3460 */
3461void SkXmAutoNegLipaXmac(
3462SK_AC *pAC, /* adapter context */
3463SK_IOC IoC, /* IO context */
3464int Port, /* Port Index (MAC_1 + n) */
3465SK_U16 IStatus) /* Interrupt Status word to analyse */
3466{
3467 SK_GEPORT *pPrt;
3468
3469 pPrt = &pAC->GIni.GP[Port];
3470
3471 if (pPrt->PLipaAutoNeg != SK_LIPA_AUTO &&
3472 (IStatus & (XM_IS_LIPA_RC | XM_IS_RX_PAGE | XM_IS_AND)) != 0) {
3473
3474 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
3475 ("AutoNegLipa: AutoNeg detected on Port %d, IStatus=0x%04X\n",
3476 Port, IStatus));
3477 pPrt->PLipaAutoNeg = SK_LIPA_AUTO;
3478 }
3479} /* SkXmAutoNegLipaXmac */
3480#endif /* GENESIS */
3481
3482
3483/******************************************************************************
3484 *
3485 * SkMacAutoNegLipaPhy() - Decides whether Link Partner could do auto-neg
3486 *
3487 * This function analyses the PHY status word.
3488 * If any of the Auto-negotiating bits are set, the PLipaAutoNeg variable
3489 * is set true.
3490 */
3491void SkMacAutoNegLipaPhy(
3492SK_AC *pAC, /* adapter context */
3493SK_IOC IoC, /* IO context */
3494int Port, /* Port Index (MAC_1 + n) */
3495SK_U16 PhyStat) /* PHY Status word to analyse */
3496{
3497 SK_GEPORT *pPrt;
3498
3499 pPrt = &pAC->GIni.GP[Port];
3500
3501 if (pPrt->PLipaAutoNeg != SK_LIPA_AUTO &&
3502 (PhyStat & PHY_ST_AN_OVER) != 0) {
3503
3504 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
3505 ("AutoNegLipa: AutoNeg detected on Port %d, PhyStat=0x%04X\n",
3506 Port, PhyStat));
3507 pPrt->PLipaAutoNeg = SK_LIPA_AUTO;
3508 }
3509} /* SkMacAutoNegLipaPhy */
3510
3511
3512#ifdef GENESIS
3513/******************************************************************************
3514 *
3515 * SkXmIrq() - Interrupt Service Routine
3516 *
3517 * Description: services an Interrupt Request of the XMAC
3518 *
3519 * Note:
3520 * With an external PHY, some interrupt bits are not meaningfull any more:
3521 * - LinkAsyncEvent (bit #14) XM_IS_LNK_AE
3522 * - LinkPartnerReqConfig (bit #10) XM_IS_LIPA_RC
3523 * - Page Received (bit #9) XM_IS_RX_PAGE
3524 * - NextPageLoadedForXmt (bit #8) XM_IS_TX_PAGE
3525 * - AutoNegDone (bit #7) XM_IS_AND
3526 * Also probably not valid any more is the GP0 input bit:
3527 * - GPRegisterBit0set XM_IS_INP_ASS
3528 *
3529 * Returns:
3530 * nothing
3531 */
3532static void SkXmIrq(
3533SK_AC *pAC, /* adapter context */
3534SK_IOC IoC, /* IO context */
3535int Port) /* Port Index (MAC_1 + n) */
3536{
3537 SK_GEPORT *pPrt;
3538 SK_EVPARA Para;
3539 SK_U16 IStatus; /* Interrupt status read from the XMAC */
3540 SK_U16 IStatus2;
3541#ifdef SK_SLIM
3542 SK_U64 OverflowStatus;
3543#endif
3544
3545 pPrt = &pAC->GIni.GP[Port];
3546
3547 XM_IN16(IoC, Port, XM_ISRC, &IStatus);
3548
3549 /* LinkPartner Auto-negable? */
3550 if (pPrt->PhyType == SK_PHY_XMAC) {
3551 SkXmAutoNegLipaXmac(pAC, IoC, Port, IStatus);
3552 }
3553 else {
3554 /* mask bits that are not used with ext. PHY */
3555 IStatus &= ~(XM_IS_LNK_AE | XM_IS_LIPA_RC |
3556 XM_IS_RX_PAGE | XM_IS_TX_PAGE |
3557 XM_IS_AND | XM_IS_INP_ASS);
3558 }
3559
3560 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
3561 ("XmacIrq Port %d Isr 0x%04X\n", Port, IStatus));
3562
3563 if (!pPrt->PHWLinkUp) {
3564 /* Spurious XMAC interrupt */
3565 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
3566 ("SkXmIrq: spurious interrupt on Port %d\n", Port));
3567 return;
3568 }
3569
3570 if ((IStatus & XM_IS_INP_ASS) != 0) {
3571 /* Reread ISR Register if link is not in sync */
3572 XM_IN16(IoC, Port, XM_ISRC, &IStatus2);
3573
3574 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
3575 ("SkXmIrq: Link async. Double check Port %d 0x%04X 0x%04X\n",
3576 Port, IStatus, IStatus2));
3577 IStatus &= ~XM_IS_INP_ASS;
3578 IStatus |= IStatus2;
3579 }
3580
3581 if ((IStatus & XM_IS_LNK_AE) != 0) {
3582 /* not used, GP0 is used instead */
3583 }
3584
3585 if ((IStatus & XM_IS_TX_ABORT) != 0) {
3586 /* not used */
3587 }
3588
3589 if ((IStatus & XM_IS_FRC_INT) != 0) {
3590 /* not used, use ASIC IRQ instead if needed */
3591 }
3592
3593 if ((IStatus & (XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE)) != 0) {
3594 SkHWLinkDown(pAC, IoC, Port);
3595
3596 /* Signal to RLMT */
3597 Para.Para32[0] = (SK_U32)Port;
3598 SkEventQueue(pAC, SKGE_RLMT, SK_RLMT_LINK_DOWN, Para);
3599
3600 /* Start workaround Errata #2 timer */
3601 SkTimerStart(pAC, IoC, &pPrt->PWaTimer, SK_WA_INA_TIME,
3602 SKGE_HWAC, SK_HWEV_WATIM, Para);
3603 }
3604
3605 if ((IStatus & XM_IS_RX_PAGE) != 0) {
3606 /* not used */
3607 }
3608
3609 if ((IStatus & XM_IS_TX_PAGE) != 0) {
3610 /* not used */
3611 }
3612
3613 if ((IStatus & XM_IS_AND) != 0) {
3614 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
3615 ("SkXmIrq: AND on link that is up Port %d\n", Port));
3616 }
3617
3618 if ((IStatus & XM_IS_TSC_OV) != 0) {
3619 /* not used */
3620 }
3621
3622 /* Combined Tx & Rx Counter Overflow SIRQ Event */
3623 if ((IStatus & (XM_IS_RXC_OV | XM_IS_TXC_OV)) != 0) {
3624#ifdef SK_SLIM
3625 SkXmOverflowStatus(pAC, IoC, Port, IStatus, &OverflowStatus);
3626#else
3627 Para.Para32[0] = (SK_U32)Port;
3628 Para.Para32[1] = (SK_U32)IStatus;
3629 SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_SIRQ_OVERFLOW, Para);
3630#endif /* SK_SLIM */
3631 }
3632
3633 if ((IStatus & XM_IS_RXF_OV) != 0) {
3634 /* normal situation -> no effect */
3635#ifdef DEBUG
3636 pPrt->PRxOverCnt++;
3637#endif /* DEBUG */
3638 }
3639
3640 if ((IStatus & XM_IS_TXF_UR) != 0) {
3641 /* may NOT happen -> error log */
3642 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E020, SKERR_SIRQ_E020MSG);
3643 }
3644
3645 if ((IStatus & XM_IS_TX_COMP) != 0) {
3646 /* not served here */
3647 }
3648
3649 if ((IStatus & XM_IS_RX_COMP) != 0) {
3650 /* not served here */
3651 }
3652} /* SkXmIrq */
3653#endif /* GENESIS */
3654
3655
3656#ifdef YUKON
3657/******************************************************************************
3658 *
3659 * SkGmIrq() - Interrupt Service Routine
3660 *
3661 * Description: services an Interrupt Request of the GMAC
3662 *
3663 * Note:
3664 *
3665 * Returns:
3666 * nothing
3667 */
3668static void SkGmIrq(
3669SK_AC *pAC, /* adapter context */
3670SK_IOC IoC, /* IO context */
3671int Port) /* Port Index (MAC_1 + n) */
3672{
3673 SK_GEPORT *pPrt;
3674 SK_U8 IStatus; /* Interrupt status */
3675#ifdef SK_SLIM
3676 SK_U64 OverflowStatus;
3677#else
3678 SK_EVPARA Para;
3679#endif
3680
3681 pPrt = &pAC->GIni.GP[Port];
3682
3683 SK_IN8(IoC, GMAC_IRQ_SRC, &IStatus);
3684
3685#ifdef XXX
3686 /* LinkPartner Auto-negable? */
3687 SkMacAutoNegLipaPhy(pAC, IoC, Port, IStatus);
3688#endif /* XXX */
3689
3690 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_IRQ,
3691 ("GmacIrq Port %d Isr 0x%04X\n", Port, IStatus));
3692
3693 /* Combined Tx & Rx Counter Overflow SIRQ Event */
3694 if (IStatus & (GM_IS_RX_CO_OV | GM_IS_TX_CO_OV)) {
3695 /* these IRQs will be cleared by reading GMACs register */
3696#ifdef SK_SLIM
3697 SkGmOverflowStatus(pAC, IoC, Port, IStatus, &OverflowStatus);
3698#else
3699 Para.Para32[0] = (SK_U32)Port;
3700 Para.Para32[1] = (SK_U32)IStatus;
3701 SkPnmiEvent(pAC, IoC, SK_PNMI_EVT_SIRQ_OVERFLOW, Para);
3702#endif
3703 }
3704
3705 if (IStatus & GM_IS_RX_FF_OR) {
3706 /* clear GMAC Rx FIFO Overrun IRQ */
3707 SK_OUT8(IoC, MR_ADDR(Port, RX_GMF_CTRL_T), (SK_U8)GMF_CLI_RX_FO);
3708#ifdef DEBUG
3709 pPrt->PRxOverCnt++;
3710#endif /* DEBUG */
3711 }
3712
3713 if (IStatus & GM_IS_TX_FF_UR) {
3714 /* clear GMAC Tx FIFO Underrun IRQ */
3715 SK_OUT8(IoC, MR_ADDR(Port, TX_GMF_CTRL_T), (SK_U8)GMF_CLI_TX_FU);
3716 /* may NOT happen -> error log */
3717 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_SIRQ_E020, SKERR_SIRQ_E020MSG);
3718 }
3719
3720 if (IStatus & GM_IS_TX_COMPL) {
3721 /* not served here */
3722 }
3723
3724 if (IStatus & GM_IS_RX_COMPL) {
3725 /* not served here */
3726 }
3727} /* SkGmIrq */
3728#endif /* YUKON */
3729
3730
3731/******************************************************************************
3732 *
3733 * SkMacIrq() - Interrupt Service Routine for MAC
3734 *
3735 * Description: calls the Interrupt Service Routine dep. on board type
3736 *
3737 * Returns:
3738 * nothing
3739 */
3740void SkMacIrq(
3741SK_AC *pAC, /* adapter context */
3742SK_IOC IoC, /* IO context */
3743int Port) /* Port Index (MAC_1 + n) */
3744{
3745#ifdef GENESIS
3746 if (pAC->GIni.GIGenesis) {
3747 /* IRQ from XMAC */
3748 SkXmIrq(pAC, IoC, Port);
3749 }
3750#endif /* GENESIS */
3751
3752#ifdef YUKON
3753 if (pAC->GIni.GIYukon) {
3754 /* IRQ from GMAC */
3755 SkGmIrq(pAC, IoC, Port);
3756 }
3757#endif /* YUKON */
3758
3759} /* SkMacIrq */
3760
3761#endif /* !SK_DIAG */
3762
3763#ifdef GENESIS
3764/******************************************************************************
3765 *
3766 * SkXmUpdateStats() - Force the XMAC to output the current statistic
3767 *
3768 * Description:
3769 * The XMAC holds its statistic internally. To obtain the current
3770 * values a command must be sent so that the statistic data will
3771 * be written to a predefined memory area on the adapter.
3772 *
3773 * Returns:
3774 * 0: success
3775 * 1: something went wrong
3776 */
3777int SkXmUpdateStats(
3778SK_AC *pAC, /* adapter context */
3779SK_IOC IoC, /* IO context */
3780unsigned int Port) /* Port Index (MAC_1 + n) */
3781{
3782 SK_GEPORT *pPrt;
3783 SK_U16 StatReg;
3784 int WaitIndex;
3785
3786 pPrt = &pAC->GIni.GP[Port];
3787 WaitIndex = 0;
3788
3789 /* Send an update command to XMAC specified */
3790 XM_OUT16(IoC, Port, XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC);
3791
3792 /*
3793 * It is an auto-clearing register. If the command bits
3794 * went to zero again, the statistics are transferred.
3795 * Normally the command should be executed immediately.
3796 * But just to be sure we execute a loop.
3797 */
3798 do {
3799
3800 XM_IN16(IoC, Port, XM_STAT_CMD, &StatReg);
3801
3802 if (++WaitIndex > 10) {
3803
3804 SK_ERR_LOG(pAC, SK_ERRCL_HW, SKERR_HWI_E021, SKERR_HWI_E021MSG);
3805
3806 return(1);
3807 }
3808 } while ((StatReg & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) != 0);
3809
3810 return(0);
3811} /* SkXmUpdateStats */
3812
3813
3814/******************************************************************************
3815 *
3816 * SkXmMacStatistic() - Get XMAC counter value
3817 *
3818 * Description:
3819 * Gets the 32bit counter value. Except for the octet counters
3820 * the lower 32bit are counted in hardware and the upper 32bit
3821 * must be counted in software by monitoring counter overflow interrupts.
3822 *
3823 * Returns:
3824 * 0: success
3825 * 1: something went wrong
3826 */
3827int SkXmMacStatistic(
3828SK_AC *pAC, /* adapter context */
3829SK_IOC IoC, /* IO context */
3830unsigned int Port, /* Port Index (MAC_1 + n) */
3831SK_U16 StatAddr, /* MIB counter base address */
3832SK_U32 SK_FAR *pVal) /* ptr to return statistic value */
3833{
3834 if ((StatAddr < XM_TXF_OK) || (StatAddr > XM_RXF_MAX_SZ)) {
3835
3836 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E022, SKERR_HWI_E022MSG);
3837
3838 return(1);
3839 }
3840
3841 XM_IN32(IoC, Port, StatAddr, pVal);
3842
3843 return(0);
3844} /* SkXmMacStatistic */
3845
3846
3847/******************************************************************************
3848 *
3849 * SkXmResetCounter() - Clear MAC statistic counter
3850 *
3851 * Description:
3852 * Force the XMAC to clear its statistic counter.
3853 *
3854 * Returns:
3855 * 0: success
3856 * 1: something went wrong
3857 */
3858int SkXmResetCounter(
3859SK_AC *pAC, /* adapter context */
3860SK_IOC IoC, /* IO context */
3861unsigned int Port) /* Port Index (MAC_1 + n) */
3862{
3863 XM_OUT16(IoC, Port, XM_STAT_CMD, XM_SC_CLR_RXC | XM_SC_CLR_TXC);
3864 /* Clear two times according to Errata #3 */
3865 XM_OUT16(IoC, Port, XM_STAT_CMD, XM_SC_CLR_RXC | XM_SC_CLR_TXC);
3866
3867 return(0);
3868} /* SkXmResetCounter */
3869
3870
3871/******************************************************************************
3872 *
3873 * SkXmOverflowStatus() - Gets the status of counter overflow interrupt
3874 *
3875 * Description:
3876 * Checks the source causing an counter overflow interrupt. On success the
3877 * resulting counter overflow status is written to <pStatus>, whereas the
3878 * upper dword stores the XMAC ReceiveCounterEvent register and the lower
3879 * dword the XMAC TransmitCounterEvent register.
3880 *
3881 * Note:
3882 * For XMAC the interrupt source is a self-clearing register, so the source
3883 * must be checked only once. SIRQ module does another check to be sure
3884 * that no interrupt get lost during process time.
3885 *
3886 * Returns:
3887 * 0: success
3888 * 1: something went wrong
3889 */
3890int SkXmOverflowStatus(
3891SK_AC *pAC, /* adapter context */
3892SK_IOC IoC, /* IO context */
3893unsigned int Port, /* Port Index (MAC_1 + n) */
3894SK_U16 IStatus, /* Interupt Status from MAC */
3895SK_U64 SK_FAR *pStatus) /* ptr for return overflow status value */
3896{
3897 SK_U64 Status; /* Overflow status */
3898 SK_U32 RegVal;
3899
3900 Status = 0;
3901
3902 if ((IStatus & XM_IS_RXC_OV) != 0) {
3903
3904 XM_IN32(IoC, Port, XM_RX_CNT_EV, &RegVal);
3905 Status |= (SK_U64)RegVal << 32;
3906 }
3907
3908 if ((IStatus & XM_IS_TXC_OV) != 0) {
3909
3910 XM_IN32(IoC, Port, XM_TX_CNT_EV, &RegVal);
3911 Status |= (SK_U64)RegVal;
3912 }
3913
3914 *pStatus = Status;
3915
3916 return(0);
3917} /* SkXmOverflowStatus */
3918#endif /* GENESIS */
3919
3920
3921#ifdef YUKON
3922/******************************************************************************
3923 *
3924 * SkGmUpdateStats() - Force the GMAC to output the current statistic
3925 *
3926 * Description:
3927 * Empty function for GMAC. Statistic data is accessible in direct way.
3928 *
3929 * Returns:
3930 * 0: success
3931 * 1: something went wrong
3932 */
3933int SkGmUpdateStats(
3934SK_AC *pAC, /* adapter context */
3935SK_IOC IoC, /* IO context */
3936unsigned int Port) /* Port Index (MAC_1 + n) */
3937{
3938 return(0);
3939}
3940
3941
3942/******************************************************************************
3943 *
3944 * SkGmMacStatistic() - Get GMAC counter value
3945 *
3946 * Description:
3947 * Gets the 32bit counter value. Except for the octet counters
3948 * the lower 32bit are counted in hardware and the upper 32bit
3949 * must be counted in software by monitoring counter overflow interrupts.
3950 *
3951 * Returns:
3952 * 0: success
3953 * 1: something went wrong
3954 */
3955int SkGmMacStatistic(
3956SK_AC *pAC, /* adapter context */
3957SK_IOC IoC, /* IO context */
3958unsigned int Port, /* Port Index (MAC_1 + n) */
3959SK_U16 StatAddr, /* MIB counter base address */
3960SK_U32 SK_FAR *pVal) /* ptr to return statistic value */
3961{
3962
3963 if ((StatAddr < GM_RXF_UC_OK) || (StatAddr > GM_TXE_FIFO_UR)) {
3964
3965 SK_ERR_LOG(pAC, SK_ERRCL_SW, SKERR_HWI_E022, SKERR_HWI_E022MSG);
3966
3967 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
3968 ("SkGmMacStat: wrong MIB counter 0x%04X\n", StatAddr));
3969 return(1);
3970 }
3971
3972 GM_IN32(IoC, Port, StatAddr, pVal);
3973
3974 return(0);
3975} /* SkGmMacStatistic */
3976
3977
3978/******************************************************************************
3979 *
3980 * SkGmResetCounter() - Clear MAC statistic counter
3981 *
3982 * Description:
3983 * Force GMAC to clear its statistic counter.
3984 *
3985 * Returns:
3986 * 0: success
3987 * 1: something went wrong
3988 */
3989int SkGmResetCounter(
3990SK_AC *pAC, /* adapter context */
3991SK_IOC IoC, /* IO context */
3992unsigned int Port) /* Port Index (MAC_1 + n) */
3993{
3994 SK_U16 Reg; /* Phy Address Register */
3995 SK_U16 Word;
3996 int i;
3997
3998 GM_IN16(IoC, Port, GM_PHY_ADDR, &Reg);
3999
4000 /* set MIB Clear Counter Mode */
4001 GM_OUT16(IoC, Port, GM_PHY_ADDR, Reg | GM_PAR_MIB_CLR);
4002
4003 /* read all MIB Counters with Clear Mode set */
4004 for (i = 0; i < GM_MIB_CNT_SIZE; i++) {
4005 /* the reset is performed only when the lower 16 bits are read */
4006 GM_IN16(IoC, Port, GM_MIB_CNT_BASE + 8*i, &Word);
4007 }
4008
4009 /* clear MIB Clear Counter Mode */
4010 GM_OUT16(IoC, Port, GM_PHY_ADDR, Reg);
4011
4012 return(0);
4013} /* SkGmResetCounter */
4014
4015
4016/******************************************************************************
4017 *
4018 * SkGmOverflowStatus() - Gets the status of counter overflow interrupt
4019 *
4020 * Description:
4021 * Checks the source causing an counter overflow interrupt. On success the
4022 * resulting counter overflow status is written to <pStatus>, whereas the
4023 * the following bit coding is used:
4024 * 63:56 - unused
4025 * 55:48 - TxRx interrupt register bit7:0
4026 * 32:47 - Rx interrupt register
4027 * 31:24 - unused
4028 * 23:16 - TxRx interrupt register bit15:8
4029 * 15:0 - Tx interrupt register
4030 *
4031 * Returns:
4032 * 0: success
4033 * 1: something went wrong
4034 */
4035int SkGmOverflowStatus(
4036SK_AC *pAC, /* adapter context */
4037SK_IOC IoC, /* IO context */
4038unsigned int Port, /* Port Index (MAC_1 + n) */
4039SK_U16 IStatus, /* Interupt Status from MAC */
4040SK_U64 SK_FAR *pStatus) /* ptr for return overflow status value */
4041{
4042 SK_U64 Status; /* Overflow status */
4043 SK_U16 RegVal;
4044
4045 Status = 0;
4046
4047 if ((IStatus & GM_IS_RX_CO_OV) != 0) {
4048 /* this register is self-clearing after read */
4049 GM_IN16(IoC, Port, GM_RX_IRQ_SRC, &RegVal);
4050 Status |= (SK_U64)RegVal << 32;
4051 }
4052
4053 if ((IStatus & GM_IS_TX_CO_OV) != 0) {
4054 /* this register is self-clearing after read */
4055 GM_IN16(IoC, Port, GM_TX_IRQ_SRC, &RegVal);
4056 Status |= (SK_U64)RegVal;
4057 }
4058
4059 /* this register is self-clearing after read */
4060 GM_IN16(IoC, Port, GM_TR_IRQ_SRC, &RegVal);
4061 /* Rx overflow interrupt register bits (LoByte)*/
4062 Status |= (SK_U64)((SK_U8)RegVal) << 48;
4063 /* Tx overflow interrupt register bits (HiByte)*/
4064 Status |= (SK_U64)(RegVal >> 8) << 16;
4065
4066 *pStatus = Status;
4067
4068 return(0);
4069} /* SkGmOverflowStatus */
4070
4071
4072#ifndef SK_SLIM
4073/******************************************************************************
4074 *
4075 * SkGmCableDiagStatus() - Starts / Gets status of cable diagnostic test
4076 *
4077 * Description:
4078 * starts the cable diagnostic test if 'StartTest' is true
4079 * gets the results if 'StartTest' is true
4080 *
4081 * NOTE: this test is meaningful only when link is down
4082 *
4083 * Returns:
4084 * 0: success
4085 * 1: no YUKON copper
4086 * 2: test in progress
4087 */
4088int SkGmCableDiagStatus(
4089SK_AC *pAC, /* adapter context */
4090SK_IOC IoC, /* IO context */
4091int Port, /* Port Index (MAC_1 + n) */
4092SK_BOOL StartTest) /* flag for start / get result */
4093{
4094 int i;
4095 SK_U16 RegVal;
4096 SK_GEPORT *pPrt;
4097
4098 pPrt = &pAC->GIni.GP[Port];
4099
4100 if (pPrt->PhyType != SK_PHY_MARV_COPPER) {
4101
4102 return(1);
4103 }
4104
4105 if (StartTest) {
4106 /* only start the cable test */
4107 if ((pPrt->PhyId1 & PHY_I1_REV_MSK) < 4) {
4108 /* apply TDR workaround from Marvell */
4109 SkGmPhyWrite(pAC, IoC, Port, 29, 0x001e);
4110
4111 SkGmPhyWrite(pAC, IoC, Port, 30, 0xcc00);
4112 SkGmPhyWrite(pAC, IoC, Port, 30, 0xc800);
4113 SkGmPhyWrite(pAC, IoC, Port, 30, 0xc400);
4114 SkGmPhyWrite(pAC, IoC, Port, 30, 0xc000);
4115 SkGmPhyWrite(pAC, IoC, Port, 30, 0xc100);
4116 }
4117
4118 /* set address to 0 for MDI[0] */
4119 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_ADR, 0);
4120
4121 /* Read Cable Diagnostic Reg */
4122 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CABLE_DIAG, &RegVal);
4123
4124 /* start Cable Diagnostic Test */
4125 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_CABLE_DIAG,
4126 (SK_U16)(RegVal | PHY_M_CABD_ENA_TEST));
4127
4128 return(0);
4129 }
4130
4131 /* Read Cable Diagnostic Reg */
4132 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CABLE_DIAG, &RegVal);
4133
4134 SK_DBG_MSG(pAC, SK_DBGMOD_HWM, SK_DBGCAT_CTRL,
4135 ("PHY Cable Diag.=0x%04X\n", RegVal));
4136
4137 if ((RegVal & PHY_M_CABD_ENA_TEST) != 0) {
4138 /* test is running */
4139 return(2);
4140 }
4141
4142 /* get the test results */
4143 for (i = 0; i < 4; i++) {
4144 /* set address to i for MDI[i] */
4145 SkGmPhyWrite(pAC, IoC, Port, PHY_MARV_EXT_ADR, (SK_U16)i);
4146
4147 /* get Cable Diagnostic values */
4148 SkGmPhyRead(pAC, IoC, Port, PHY_MARV_CABLE_DIAG, &RegVal);
4149
4150 pPrt->PMdiPairLen[i] = (SK_U8)(RegVal & PHY_M_CABD_DIST_MSK);
4151
4152 pPrt->PMdiPairSts[i] = (SK_U8)((RegVal & PHY_M_CABD_STAT_MSK) >> 13);
4153 }
4154
4155 return(0);
4156} /* SkGmCableDiagStatus */
4157#endif /* !SK_SLIM */
4158#endif /* YUKON */
4159
4160/* End of file */
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index 76dc8adc9441..6028bbb3b28a 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -401,18 +401,18 @@ static void copy_tx_mac(struct s_smc *smc, u_long td, struct fddi_mac *mac,
401/* int len ; length of the frame including the FC */ 401/* int len ; length of the frame including the FC */
402{ 402{
403 int i ; 403 int i ;
404 u_int *p ; 404 __le32 *p ;
405 405
406 CHECK_NPP() ; 406 CHECK_NPP() ;
407 MARW(off) ; /* set memory address reg for writes */ 407 MARW(off) ; /* set memory address reg for writes */
408 408
409 p = (u_int *) mac ; 409 p = (__le32 *) mac ;
410 for (i = (len + 3)/4 ; i ; i--) { 410 for (i = (len + 3)/4 ; i ; i--) {
411 if (i == 1) { 411 if (i == 1) {
412 /* last word, set the tag bit */ 412 /* last word, set the tag bit */
413 outpw(FM_A(FM_CMDREG2),FM_ISTTB) ; 413 outpw(FM_A(FM_CMDREG2),FM_ISTTB) ;
414 } 414 }
415 write_mdr(smc,MDR_REVERSE(*p)) ; 415 write_mdr(smc,le32_to_cpu(*p)) ;
416 p++ ; 416 p++ ;
417 } 417 }
418 418
@@ -444,7 +444,7 @@ static void copy_tx_mac(struct s_smc *smc, u_long td, struct fddi_mac *mac,
444 */ 444 */
445static void directed_beacon(struct s_smc *smc) 445static void directed_beacon(struct s_smc *smc)
446{ 446{
447 SK_LOC_DECL(u_int,a[2]) ; 447 SK_LOC_DECL(__le32,a[2]) ;
448 448
449 /* 449 /*
450 * set UNA in frame 450 * set UNA in frame
@@ -458,9 +458,9 @@ static void directed_beacon(struct s_smc *smc)
458 CHECK_NPP() ; 458 CHECK_NPP() ;
459 /* set memory address reg for writes */ 459 /* set memory address reg for writes */
460 MARW(smc->hw.fp.fifo.rbc_ram_start+DBEACON_FRAME_OFF+4) ; 460 MARW(smc->hw.fp.fifo.rbc_ram_start+DBEACON_FRAME_OFF+4) ;
461 write_mdr(smc,MDR_REVERSE(a[0])) ; 461 write_mdr(smc,le32_to_cpu(a[0])) ;
462 outpw(FM_A(FM_CMDREG2),FM_ISTTB) ; /* set the tag bit */ 462 outpw(FM_A(FM_CMDREG2),FM_ISTTB) ; /* set the tag bit */
463 write_mdr(smc,MDR_REVERSE(a[1])) ; 463 write_mdr(smc,le32_to_cpu(a[1])) ;
464 464
465 outpw(FM_A(FM_SABC),smc->hw.fp.fifo.rbc_ram_start + DBEACON_FRAME_OFF) ; 465 outpw(FM_A(FM_SABC),smc->hw.fp.fifo.rbc_ram_start + DBEACON_FRAME_OFF) ;
466} 466}
diff --git a/drivers/net/skfp/h/fplustm.h b/drivers/net/skfp/h/fplustm.h
index 98bbf654d12f..6d738e1e2393 100644
--- a/drivers/net/skfp/h/fplustm.h
+++ b/drivers/net/skfp/h/fplustm.h
@@ -50,12 +50,12 @@ struct err_st {
50 * Transmit Descriptor struct 50 * Transmit Descriptor struct
51 */ 51 */
52struct s_smt_fp_txd { 52struct s_smt_fp_txd {
53 u_int txd_tbctrl ; /* transmit buffer control */ 53 __le32 txd_tbctrl ; /* transmit buffer control */
54 u_int txd_txdscr ; /* transmit frame status word */ 54 __le32 txd_txdscr ; /* transmit frame status word */
55 u_int txd_tbadr ; /* physical tx buffer address */ 55 __le32 txd_tbadr ; /* physical tx buffer address */
56 u_int txd_ntdadr ; /* physical pointer to the next TxD */ 56 __le32 txd_ntdadr ; /* physical pointer to the next TxD */
57#ifdef ENA_64BIT_SUP 57#ifdef ENA_64BIT_SUP
58 u_int txd_tbadr_hi ; /* physical tx buffer addr (high dword)*/ 58 __le32 txd_tbadr_hi ; /* physical tx buffer addr (high dword)*/
59#endif 59#endif
60 char far *txd_virt ; /* virtual pointer to the data frag */ 60 char far *txd_virt ; /* virtual pointer to the data frag */
61 /* virt pointer to the next TxD */ 61 /* virt pointer to the next TxD */
@@ -67,12 +67,12 @@ struct s_smt_fp_txd {
67 * Receive Descriptor struct 67 * Receive Descriptor struct
68 */ 68 */
69struct s_smt_fp_rxd { 69struct s_smt_fp_rxd {
70 u_int rxd_rbctrl ; /* receive buffer control */ 70 __le32 rxd_rbctrl ; /* receive buffer control */
71 u_int rxd_rfsw ; /* receive frame status word */ 71 __le32 rxd_rfsw ; /* receive frame status word */
72 u_int rxd_rbadr ; /* physical rx buffer address */ 72 __le32 rxd_rbadr ; /* physical rx buffer address */
73 u_int rxd_nrdadr ; /* physical pointer to the next RxD */ 73 __le32 rxd_nrdadr ; /* physical pointer to the next RxD */
74#ifdef ENA_64BIT_SUP 74#ifdef ENA_64BIT_SUP
75 u_int rxd_rbadr_hi ; /* physical tx buffer addr (high dword)*/ 75 __le32 rxd_rbadr_hi ; /* physical tx buffer addr (high dword)*/
76#endif 76#endif
77 char far *rxd_virt ; /* virtual pointer to the data frag */ 77 char far *rxd_virt ; /* virtual pointer to the data frag */
78 /* virt pointer to the next RxD */ 78 /* virt pointer to the next RxD */
diff --git a/drivers/net/skfp/hwmtm.c b/drivers/net/skfp/hwmtm.c
index 46e339315656..4218e97033c9 100644
--- a/drivers/net/skfp/hwmtm.c
+++ b/drivers/net/skfp/hwmtm.c
@@ -208,7 +208,7 @@ SMbuf* smt_get_mbuf(struct s_smc *smc);
208#if defined(NDIS_OS2) || defined(ODI2) 208#if defined(NDIS_OS2) || defined(ODI2)
209#define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff)) 209#define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff))
210#else 210#else
211#define CR_READ(var) (u_long)(var) 211#define CR_READ(var) (__le32)(var)
212#endif 212#endif
213 213
214#define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \ 214#define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \
@@ -343,16 +343,16 @@ static u_long init_descr_ring(struct s_smc *smc,
343 for (i=count-1, d1=start; i ; i--) { 343 for (i=count-1, d1=start; i ; i--) {
344 d2 = d1 ; 344 d2 = d1 ;
345 d1++ ; /* descr is owned by the host */ 345 d1++ ; /* descr is owned by the host */
346 d2->r.rxd_rbctrl = AIX_REVERSE(BMU_CHECK) ; 346 d2->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
347 d2->r.rxd_next = &d1->r ; 347 d2->r.rxd_next = &d1->r ;
348 phys = mac_drv_virt2phys(smc,(void *)d1) ; 348 phys = mac_drv_virt2phys(smc,(void *)d1) ;
349 d2->r.rxd_nrdadr = AIX_REVERSE(phys) ; 349 d2->r.rxd_nrdadr = cpu_to_le32(phys) ;
350 } 350 }
351 DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ; 351 DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ;
352 d1->r.rxd_rbctrl = AIX_REVERSE(BMU_CHECK) ; 352 d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ;
353 d1->r.rxd_next = &start->r ; 353 d1->r.rxd_next = &start->r ;
354 phys = mac_drv_virt2phys(smc,(void *)start) ; 354 phys = mac_drv_virt2phys(smc,(void *)start) ;
355 d1->r.rxd_nrdadr = AIX_REVERSE(phys) ; 355 d1->r.rxd_nrdadr = cpu_to_le32(phys) ;
356 356
357 for (i=count, d1=start; i ; i--) { 357 for (i=count, d1=start; i ; i--) {
358 DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ; 358 DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ;
@@ -376,7 +376,7 @@ static void init_txd_ring(struct s_smc *smc)
376 DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ; 376 DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ;
377 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, 377 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
378 HWM_ASYNC_TXD_COUNT) ; 378 HWM_ASYNC_TXD_COUNT) ;
379 phys = AIX_REVERSE(ds->txd_ntdadr) ; 379 phys = le32_to_cpu(ds->txd_ntdadr) ;
380 ds++ ; 380 ds++ ;
381 queue->tx_curr_put = queue->tx_curr_get = ds ; 381 queue->tx_curr_put = queue->tx_curr_get = ds ;
382 ds-- ; 382 ds-- ;
@@ -390,7 +390,7 @@ static void init_txd_ring(struct s_smc *smc)
390 DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ; 390 DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ;
391 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, 391 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
392 HWM_SYNC_TXD_COUNT) ; 392 HWM_SYNC_TXD_COUNT) ;
393 phys = AIX_REVERSE(ds->txd_ntdadr) ; 393 phys = le32_to_cpu(ds->txd_ntdadr) ;
394 ds++ ; 394 ds++ ;
395 queue->tx_curr_put = queue->tx_curr_get = ds ; 395 queue->tx_curr_put = queue->tx_curr_get = ds ;
396 queue->tx_free = HWM_SYNC_TXD_COUNT ; 396 queue->tx_free = HWM_SYNC_TXD_COUNT ;
@@ -412,7 +412,7 @@ static void init_rxd_ring(struct s_smc *smc)
412 DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ; 412 DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ;
413 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, 413 (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds,
414 SMT_R1_RXD_COUNT) ; 414 SMT_R1_RXD_COUNT) ;
415 phys = AIX_REVERSE(ds->rxd_nrdadr) ; 415 phys = le32_to_cpu(ds->rxd_nrdadr) ;
416 ds++ ; 416 ds++ ;
417 queue->rx_curr_put = queue->rx_curr_get = ds ; 417 queue->rx_curr_put = queue->rx_curr_get = ds ;
418 queue->rx_free = SMT_R1_RXD_COUNT ; 418 queue->rx_free = SMT_R1_RXD_COUNT ;
@@ -607,12 +607,12 @@ static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
607 for (i = tx_used+queue->tx_free-1 ; i ; i-- ) { 607 for (i = tx_used+queue->tx_free-1 ; i ; i-- ) {
608 t = t->txd_next ; 608 t = t->txd_next ;
609 } 609 }
610 phys = AIX_REVERSE(t->txd_ntdadr) ; 610 phys = le32_to_cpu(t->txd_ntdadr) ;
611 611
612 t = queue->tx_curr_get ; 612 t = queue->tx_curr_get ;
613 while (tx_used) { 613 while (tx_used) {
614 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; 614 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
615 tbctrl = AIX_REVERSE(t->txd_tbctrl) ; 615 tbctrl = le32_to_cpu(t->txd_tbctrl) ;
616 616
617 if (tbctrl & BMU_OWN) { 617 if (tbctrl & BMU_OWN) {
618 if (tbctrl & BMU_STF) { 618 if (tbctrl & BMU_STF) {
@@ -622,10 +622,10 @@ static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue)
622 /* 622 /*
623 * repair the descriptor 623 * repair the descriptor
624 */ 624 */
625 t->txd_tbctrl &= AIX_REVERSE(~BMU_OWN) ; 625 t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
626 } 626 }
627 } 627 }
628 phys = AIX_REVERSE(t->txd_ntdadr) ; 628 phys = le32_to_cpu(t->txd_ntdadr) ;
629 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; 629 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
630 t = t->txd_next ; 630 t = t->txd_next ;
631 tx_used-- ; 631 tx_used-- ;
@@ -659,12 +659,12 @@ static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
659 for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) { 659 for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) {
660 r = r->rxd_next ; 660 r = r->rxd_next ;
661 } 661 }
662 phys = AIX_REVERSE(r->rxd_nrdadr) ; 662 phys = le32_to_cpu(r->rxd_nrdadr) ;
663 663
664 r = queue->rx_curr_get ; 664 r = queue->rx_curr_get ;
665 while (rx_used) { 665 while (rx_used) {
666 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 666 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
667 rbctrl = AIX_REVERSE(r->rxd_rbctrl) ; 667 rbctrl = le32_to_cpu(r->rxd_rbctrl) ;
668 668
669 if (rbctrl & BMU_OWN) { 669 if (rbctrl & BMU_OWN) {
670 if (rbctrl & BMU_STF) { 670 if (rbctrl & BMU_STF) {
@@ -674,10 +674,10 @@ static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue)
674 /* 674 /*
675 * repair the descriptor 675 * repair the descriptor
676 */ 676 */
677 r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ; 677 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
678 } 678 }
679 } 679 }
680 phys = AIX_REVERSE(r->rxd_nrdadr) ; 680 phys = le32_to_cpu(r->rxd_nrdadr) ;
681 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; 681 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
682 r = r->rxd_next ; 682 r = r->rxd_next ;
683 rx_used-- ; 683 rx_used-- ;
@@ -1094,8 +1094,7 @@ void process_receive(struct s_smc *smc)
1094 do { 1094 do {
1095 DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ; 1095 DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ;
1096 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1096 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1097 rbctrl = CR_READ(r->rxd_rbctrl) ; 1097 rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl));
1098 rbctrl = AIX_REVERSE(rbctrl) ;
1099 1098
1100 if (rbctrl & BMU_OWN) { 1099 if (rbctrl & BMU_OWN) {
1101 NDD_TRACE("RHxE",r,rfsw,rbctrl) ; 1100 NDD_TRACE("RHxE",r,rfsw,rbctrl) ;
@@ -1118,7 +1117,7 @@ void process_receive(struct s_smc *smc)
1118 smc->os.hwm.detec_count = 0 ; 1117 smc->os.hwm.detec_count = 0 ;
1119 goto rx_end ; 1118 goto rx_end ;
1120 } 1119 }
1121 rfsw = AIX_REVERSE(r->rxd_rfsw) ; 1120 rfsw = le32_to_cpu(r->rxd_rfsw) ;
1122 if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) { 1121 if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) {
1123 /* 1122 /*
1124 * The BMU_STF bit is deleted, 1 frame is 1123 * The BMU_STF bit is deleted, 1 frame is
@@ -1151,7 +1150,7 @@ void process_receive(struct s_smc *smc)
1151 /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */ 1150 /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */
1152 /* BMU_ST_BUF will not be changed by the ASIC */ 1151 /* BMU_ST_BUF will not be changed by the ASIC */
1153 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1152 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1154 while (rx_used && !(r->rxd_rbctrl & AIX_REVERSE(BMU_ST_BUF))) { 1153 while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
1155 DB_RX("Check STF bit in %x",(void *)r,0,5) ; 1154 DB_RX("Check STF bit in %x",(void *)r,0,5) ;
1156 r = r->rxd_next ; 1155 r = r->rxd_next ;
1157 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1156 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
@@ -1171,7 +1170,7 @@ void process_receive(struct s_smc *smc)
1171 /* 1170 /*
1172 * ASIC Errata no. 7 (STF - Bit Bug) 1171 * ASIC Errata no. 7 (STF - Bit Bug)
1173 */ 1172 */
1174 rxd->rxd_rbctrl &= AIX_REVERSE(~BMU_STF) ; 1173 rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ;
1175 1174
1176 for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){ 1175 for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){
1177 DB_RX("dma_complete for RxD %x",(void *)r,0,5) ; 1176 DB_RX("dma_complete for RxD %x",(void *)r,0,5) ;
@@ -1287,7 +1286,7 @@ void process_receive(struct s_smc *smc)
1287 hwm_cpy_rxd2mb(rxd,data,len) ; 1286 hwm_cpy_rxd2mb(rxd,data,len) ;
1288#else 1287#else
1289 for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){ 1288 for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){
1290 n = AIX_REVERSE(r->rxd_rbctrl) & RD_LENGTH ; 1289 n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ;
1291 DB_RX("cp SMT frame to mb: len = %d",n,0,6) ; 1290 DB_RX("cp SMT frame to mb: len = %d",n,0,6) ;
1292 memcpy(data,r->rxd_virt,n) ; 1291 memcpy(data,r->rxd_virt,n) ;
1293 data += n ; 1292 data += n ;
@@ -1426,14 +1425,14 @@ void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1426 int frame_status) 1425 int frame_status)
1427{ 1426{
1428 struct s_smt_fp_rxd volatile *r ; 1427 struct s_smt_fp_rxd volatile *r ;
1429 u_int rbctrl ; 1428 __le32 rbctrl;
1430 1429
1431 NDD_TRACE("RHfB",virt,len,frame_status) ; 1430 NDD_TRACE("RHfB",virt,len,frame_status) ;
1432 DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ; 1431 DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ;
1433 r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ; 1432 r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ;
1434 r->rxd_virt = virt ; 1433 r->rxd_virt = virt ;
1435 r->rxd_rbadr = AIX_REVERSE(phys) ; 1434 r->rxd_rbadr = cpu_to_le32(phys) ;
1436 rbctrl = AIX_REVERSE( (((u_long)frame_status & 1435 rbctrl = cpu_to_le32( (((__u32)frame_status &
1437 (FIRST_FRAG|LAST_FRAG))<<26) | 1436 (FIRST_FRAG|LAST_FRAG))<<26) |
1438 (((u_long) frame_status & FIRST_FRAG) << 21) | 1437 (((u_long) frame_status & FIRST_FRAG) << 21) |
1439 BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ; 1438 BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ;
@@ -1444,7 +1443,7 @@ void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1444 smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ; 1443 smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ;
1445 smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ; 1444 smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ;
1446 smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ; 1445 smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ;
1447 NDD_TRACE("RHfE",r,AIX_REVERSE(r->rxd_rbadr),0) ; 1446 NDD_TRACE("RHfE",r,le32_to_cpu(r->rxd_rbadr),0) ;
1448} 1447}
1449 1448
1450/* 1449/*
@@ -1494,15 +1493,15 @@ void mac_drv_clear_rx_queue(struct s_smc *smc)
1494 while (queue->rx_used) { 1493 while (queue->rx_used) {
1495 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1494 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1496 DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ; 1495 DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ;
1497 r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ; 1496 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
1498 frag_count = 1 ; 1497 frag_count = 1 ;
1499 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; 1498 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1500 r = r->rxd_next ; 1499 r = r->rxd_next ;
1501 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1500 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
1502 while (r != queue->rx_curr_put && 1501 while (r != queue->rx_curr_put &&
1503 !(r->rxd_rbctrl & AIX_REVERSE(BMU_ST_BUF))) { 1502 !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) {
1504 DB_RX("Check STF bit in %x",(void *)r,0,5) ; 1503 DB_RX("Check STF bit in %x",(void *)r,0,5) ;
1505 r->rxd_rbctrl &= AIX_REVERSE(~BMU_OWN) ; 1504 r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ;
1506 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; 1505 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ;
1507 r = r->rxd_next ; 1506 r = r->rxd_next ;
1508 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; 1507 DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ;
@@ -1640,7 +1639,7 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1640{ 1639{
1641 struct s_smt_fp_txd volatile *t ; 1640 struct s_smt_fp_txd volatile *t ;
1642 struct s_smt_tx_queue *queue ; 1641 struct s_smt_tx_queue *queue ;
1643 u_int tbctrl ; 1642 __le32 tbctrl ;
1644 1643
1645 queue = smc->os.hwm.tx_p ; 1644 queue = smc->os.hwm.tx_p ;
1646 1645
@@ -1657,9 +1656,9 @@ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len,
1657 /* '*t' is already defined */ 1656 /* '*t' is already defined */
1658 DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ; 1657 DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ;
1659 t->txd_virt = virt ; 1658 t->txd_virt = virt ;
1660 t->txd_txdscr = AIX_REVERSE(smc->os.hwm.tx_descr) ; 1659 t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ;
1661 t->txd_tbadr = AIX_REVERSE(phys) ; 1660 t->txd_tbadr = cpu_to_le32(phys) ;
1662 tbctrl = AIX_REVERSE((((u_long)frame_status & 1661 tbctrl = cpu_to_le32((((__u32)frame_status &
1663 (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) | 1662 (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) |
1664 BMU_OWN|BMU_CHECK |len) ; 1663 BMU_OWN|BMU_CHECK |len) ;
1665 t->txd_tbctrl = tbctrl ; 1664 t->txd_tbctrl = tbctrl ;
@@ -1826,7 +1825,7 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
1826 struct s_smt_tx_queue *queue ; 1825 struct s_smt_tx_queue *queue ;
1827 struct s_smt_fp_txd volatile *t ; 1826 struct s_smt_fp_txd volatile *t ;
1828 u_long phys ; 1827 u_long phys ;
1829 u_int tbctrl ; 1828 __le32 tbctrl;
1830 1829
1831 NDD_TRACE("THSB",mb,fc,0) ; 1830 NDD_TRACE("THSB",mb,fc,0) ;
1832 DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ; 1831 DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ;
@@ -1894,14 +1893,14 @@ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc)
1894 DB_TX("init TxD = 0x%x",(void *)t,0,5) ; 1893 DB_TX("init TxD = 0x%x",(void *)t,0,5) ;
1895 if (i == frag_count-1) { 1894 if (i == frag_count-1) {
1896 frame_status |= LAST_FRAG ; 1895 frame_status |= LAST_FRAG ;
1897 t->txd_txdscr = AIX_REVERSE(TX_DESCRIPTOR | 1896 t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR |
1898 (((u_long)(mb->sm_len-1)&3) << 27)) ; 1897 (((__u32)(mb->sm_len-1)&3) << 27)) ;
1899 } 1898 }
1900 t->txd_virt = virt[i] ; 1899 t->txd_virt = virt[i] ;
1901 phys = dma_master(smc, (void far *)virt[i], 1900 phys = dma_master(smc, (void far *)virt[i],
1902 frag_len[i], DMA_RD|SMT_BUF) ; 1901 frag_len[i], DMA_RD|SMT_BUF) ;
1903 t->txd_tbadr = AIX_REVERSE(phys) ; 1902 t->txd_tbadr = cpu_to_le32(phys) ;
1904 tbctrl = AIX_REVERSE((((u_long) frame_status & 1903 tbctrl = cpu_to_le32((((__u32)frame_status &
1905 (FIRST_FRAG|LAST_FRAG)) << 26) | 1904 (FIRST_FRAG|LAST_FRAG)) << 26) |
1906 BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ; 1905 BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ;
1907 t->txd_tbctrl = tbctrl ; 1906 t->txd_tbctrl = tbctrl ;
@@ -1971,8 +1970,7 @@ static void mac_drv_clear_txd(struct s_smc *smc)
1971 do { 1970 do {
1972 DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ; 1971 DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ;
1973 DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ; 1972 DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ;
1974 tbctrl = CR_READ(t1->txd_tbctrl) ; 1973 tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl));
1975 tbctrl = AIX_REVERSE(tbctrl) ;
1976 1974
1977 if (tbctrl & BMU_OWN || !queue->tx_used){ 1975 if (tbctrl & BMU_OWN || !queue->tx_used){
1978 DB_TX("End of TxDs queue %d",i,0,4) ; 1976 DB_TX("End of TxDs queue %d",i,0,4) ;
@@ -1984,7 +1982,7 @@ static void mac_drv_clear_txd(struct s_smc *smc)
1984 1982
1985 t1 = queue->tx_curr_get ; 1983 t1 = queue->tx_curr_get ;
1986 for (n = frag_count; n; n--) { 1984 for (n = frag_count; n; n--) {
1987 tbctrl = AIX_REVERSE(t1->txd_tbctrl) ; 1985 tbctrl = le32_to_cpu(t1->txd_tbctrl) ;
1988 dma_complete(smc, 1986 dma_complete(smc,
1989 (union s_fp_descr volatile *) t1, 1987 (union s_fp_descr volatile *) t1,
1990 (int) (DMA_RD | 1988 (int) (DMA_RD |
@@ -2064,7 +2062,7 @@ void mac_drv_clear_tx_queue(struct s_smc *smc)
2064 while (tx_used) { 2062 while (tx_used) {
2065 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; 2063 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ;
2066 DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ; 2064 DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ;
2067 t->txd_tbctrl &= AIX_REVERSE(~BMU_OWN) ; 2065 t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ;
2068 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; 2066 DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ;
2069 t = t->txd_next ; 2067 t = t->txd_next ;
2070 tx_used-- ; 2068 tx_used-- ;
@@ -2086,10 +2084,10 @@ void mac_drv_clear_tx_queue(struct s_smc *smc)
2086 * tx_curr_get and tx_curr_put to this position 2084 * tx_curr_get and tx_curr_put to this position
2087 */ 2085 */
2088 if (i == QUEUE_S) { 2086 if (i == QUEUE_S) {
2089 outpd(ADDR(B5_XS_DA),AIX_REVERSE(t->txd_ntdadr)) ; 2087 outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ;
2090 } 2088 }
2091 else { 2089 else {
2092 outpd(ADDR(B5_XA_DA),AIX_REVERSE(t->txd_ntdadr)) ; 2090 outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ;
2093 } 2091 }
2094 2092
2095 queue->tx_curr_put = queue->tx_curr_get->txd_next ; 2093 queue->tx_curr_put = queue->tx_curr_get->txd_next ;
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index 7cf9b9f35dee..a2b092bb3626 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -495,7 +495,7 @@ static int skfp_open(struct net_device *dev)
495 495
496 PRINTK(KERN_INFO "entering skfp_open\n"); 496 PRINTK(KERN_INFO "entering skfp_open\n");
497 /* Register IRQ - support shared interrupts by passing device ptr */ 497 /* Register IRQ - support shared interrupts by passing device ptr */
498 err = request_irq(dev->irq, (void *) skfp_interrupt, IRQF_SHARED, 498 err = request_irq(dev->irq, skfp_interrupt, IRQF_SHARED,
499 dev->name, dev); 499 dev->name, dev);
500 if (err) 500 if (err)
501 return err; 501 return err;
@@ -1644,7 +1644,7 @@ void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd,
1644 // Get RIF length from Routing Control (RC) field. 1644 // Get RIF length from Routing Control (RC) field.
1645 cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header. 1645 cp = virt + FDDI_MAC_HDR_LEN; // Point behind MAC header.
1646 1646
1647 ri = ntohs(*((unsigned short *) cp)); 1647 ri = ntohs(*((__be16 *) cp));
1648 RifLength = ri & FDDI_RCF_LEN_MASK; 1648 RifLength = ri & FDDI_RCF_LEN_MASK;
1649 if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) { 1649 if (len < (int) (FDDI_MAC_HDR_LEN + RifLength)) {
1650 printk("fddi: Invalid RIF.\n"); 1650 printk("fddi: Invalid RIF.\n");
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 63a54e29d563..600b92af3334 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -220,22 +220,22 @@ static void PRINT_PKT(u_char *buf, int length)
220 220
221 221
222/* this enables an interrupt in the interrupt mask register */ 222/* this enables an interrupt in the interrupt mask register */
223#define SMC_ENABLE_INT(x) do { \ 223#define SMC_ENABLE_INT(lp, x) do { \
224 unsigned char mask; \ 224 unsigned char mask; \
225 spin_lock_irq(&lp->lock); \ 225 spin_lock_irq(&lp->lock); \
226 mask = SMC_GET_INT_MASK(); \ 226 mask = SMC_GET_INT_MASK(lp); \
227 mask |= (x); \ 227 mask |= (x); \
228 SMC_SET_INT_MASK(mask); \ 228 SMC_SET_INT_MASK(lp, mask); \
229 spin_unlock_irq(&lp->lock); \ 229 spin_unlock_irq(&lp->lock); \
230} while (0) 230} while (0)
231 231
232/* this disables an interrupt from the interrupt mask register */ 232/* this disables an interrupt from the interrupt mask register */
233#define SMC_DISABLE_INT(x) do { \ 233#define SMC_DISABLE_INT(lp, x) do { \
234 unsigned char mask; \ 234 unsigned char mask; \
235 spin_lock_irq(&lp->lock); \ 235 spin_lock_irq(&lp->lock); \
236 mask = SMC_GET_INT_MASK(); \ 236 mask = SMC_GET_INT_MASK(lp); \
237 mask &= ~(x); \ 237 mask &= ~(x); \
238 SMC_SET_INT_MASK(mask); \ 238 SMC_SET_INT_MASK(lp, mask); \
239 spin_unlock_irq(&lp->lock); \ 239 spin_unlock_irq(&lp->lock); \
240} while (0) 240} while (0)
241 241
@@ -244,10 +244,10 @@ static void PRINT_PKT(u_char *buf, int length)
244 * if at all, but let's avoid deadlocking the system if the hardware 244 * if at all, but let's avoid deadlocking the system if the hardware
245 * decides to go south. 245 * decides to go south.
246 */ 246 */
247#define SMC_WAIT_MMU_BUSY() do { \ 247#define SMC_WAIT_MMU_BUSY(lp) do { \
248 if (unlikely(SMC_GET_MMU_CMD() & MC_BUSY)) { \ 248 if (unlikely(SMC_GET_MMU_CMD(lp) & MC_BUSY)) { \
249 unsigned long timeout = jiffies + 2; \ 249 unsigned long timeout = jiffies + 2; \
250 while (SMC_GET_MMU_CMD() & MC_BUSY) { \ 250 while (SMC_GET_MMU_CMD(lp) & MC_BUSY) { \
251 if (time_after(jiffies, timeout)) { \ 251 if (time_after(jiffies, timeout)) { \
252 printk("%s: timeout %s line %d\n", \ 252 printk("%s: timeout %s line %d\n", \
253 dev->name, __FILE__, __LINE__); \ 253 dev->name, __FILE__, __LINE__); \
@@ -273,8 +273,8 @@ static void smc_reset(struct net_device *dev)
273 273
274 /* Disable all interrupts, block TX tasklet */ 274 /* Disable all interrupts, block TX tasklet */
275 spin_lock_irq(&lp->lock); 275 spin_lock_irq(&lp->lock);
276 SMC_SELECT_BANK(2); 276 SMC_SELECT_BANK(lp, 2);
277 SMC_SET_INT_MASK(0); 277 SMC_SET_INT_MASK(lp, 0);
278 pending_skb = lp->pending_tx_skb; 278 pending_skb = lp->pending_tx_skb;
279 lp->pending_tx_skb = NULL; 279 lp->pending_tx_skb = NULL;
280 spin_unlock_irq(&lp->lock); 280 spin_unlock_irq(&lp->lock);
@@ -290,15 +290,15 @@ static void smc_reset(struct net_device *dev)
290 * This resets the registers mostly to defaults, but doesn't 290 * This resets the registers mostly to defaults, but doesn't
291 * affect EEPROM. That seems unnecessary 291 * affect EEPROM. That seems unnecessary
292 */ 292 */
293 SMC_SELECT_BANK(0); 293 SMC_SELECT_BANK(lp, 0);
294 SMC_SET_RCR(RCR_SOFTRST); 294 SMC_SET_RCR(lp, RCR_SOFTRST);
295 295
296 /* 296 /*
297 * Setup the Configuration Register 297 * Setup the Configuration Register
298 * This is necessary because the CONFIG_REG is not affected 298 * This is necessary because the CONFIG_REG is not affected
299 * by a soft reset 299 * by a soft reset
300 */ 300 */
301 SMC_SELECT_BANK(1); 301 SMC_SELECT_BANK(lp, 1);
302 302
303 cfg = CONFIG_DEFAULT; 303 cfg = CONFIG_DEFAULT;
304 304
@@ -316,7 +316,7 @@ static void smc_reset(struct net_device *dev)
316 */ 316 */
317 cfg |= CONFIG_EPH_POWER_EN; 317 cfg |= CONFIG_EPH_POWER_EN;
318 318
319 SMC_SET_CONFIG(cfg); 319 SMC_SET_CONFIG(lp, cfg);
320 320
321 /* this should pause enough for the chip to be happy */ 321 /* this should pause enough for the chip to be happy */
322 /* 322 /*
@@ -329,12 +329,12 @@ static void smc_reset(struct net_device *dev)
329 udelay(1); 329 udelay(1);
330 330
331 /* Disable transmit and receive functionality */ 331 /* Disable transmit and receive functionality */
332 SMC_SELECT_BANK(0); 332 SMC_SELECT_BANK(lp, 0);
333 SMC_SET_RCR(RCR_CLEAR); 333 SMC_SET_RCR(lp, RCR_CLEAR);
334 SMC_SET_TCR(TCR_CLEAR); 334 SMC_SET_TCR(lp, TCR_CLEAR);
335 335
336 SMC_SELECT_BANK(1); 336 SMC_SELECT_BANK(lp, 1);
337 ctl = SMC_GET_CTL() | CTL_LE_ENABLE; 337 ctl = SMC_GET_CTL(lp) | CTL_LE_ENABLE;
338 338
339 /* 339 /*
340 * Set the control register to automatically release successfully 340 * Set the control register to automatically release successfully
@@ -345,12 +345,12 @@ static void smc_reset(struct net_device *dev)
345 ctl |= CTL_AUTO_RELEASE; 345 ctl |= CTL_AUTO_RELEASE;
346 else 346 else
347 ctl &= ~CTL_AUTO_RELEASE; 347 ctl &= ~CTL_AUTO_RELEASE;
348 SMC_SET_CTL(ctl); 348 SMC_SET_CTL(lp, ctl);
349 349
350 /* Reset the MMU */ 350 /* Reset the MMU */
351 SMC_SELECT_BANK(2); 351 SMC_SELECT_BANK(lp, 2);
352 SMC_SET_MMU_CMD(MC_RESET); 352 SMC_SET_MMU_CMD(lp, MC_RESET);
353 SMC_WAIT_MMU_BUSY(); 353 SMC_WAIT_MMU_BUSY(lp);
354} 354}
355 355
356/* 356/*
@@ -365,19 +365,19 @@ static void smc_enable(struct net_device *dev)
365 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 365 DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
366 366
367 /* see the header file for options in TCR/RCR DEFAULT */ 367 /* see the header file for options in TCR/RCR DEFAULT */
368 SMC_SELECT_BANK(0); 368 SMC_SELECT_BANK(lp, 0);
369 SMC_SET_TCR(lp->tcr_cur_mode); 369 SMC_SET_TCR(lp, lp->tcr_cur_mode);
370 SMC_SET_RCR(lp->rcr_cur_mode); 370 SMC_SET_RCR(lp, lp->rcr_cur_mode);
371 371
372 SMC_SELECT_BANK(1); 372 SMC_SELECT_BANK(lp, 1);
373 SMC_SET_MAC_ADDR(dev->dev_addr); 373 SMC_SET_MAC_ADDR(lp, dev->dev_addr);
374 374
375 /* now, enable interrupts */ 375 /* now, enable interrupts */
376 mask = IM_EPH_INT|IM_RX_OVRN_INT|IM_RCV_INT; 376 mask = IM_EPH_INT|IM_RX_OVRN_INT|IM_RCV_INT;
377 if (lp->version >= (CHIP_91100 << 4)) 377 if (lp->version >= (CHIP_91100 << 4))
378 mask |= IM_MDINT; 378 mask |= IM_MDINT;
379 SMC_SELECT_BANK(2); 379 SMC_SELECT_BANK(lp, 2);
380 SMC_SET_INT_MASK(mask); 380 SMC_SET_INT_MASK(lp, mask);
381 381
382 /* 382 /*
383 * From this point the register bank must _NOT_ be switched away 383 * From this point the register bank must _NOT_ be switched away
@@ -400,8 +400,8 @@ static void smc_shutdown(struct net_device *dev)
400 400
401 /* no more interrupts for me */ 401 /* no more interrupts for me */
402 spin_lock_irq(&lp->lock); 402 spin_lock_irq(&lp->lock);
403 SMC_SELECT_BANK(2); 403 SMC_SELECT_BANK(lp, 2);
404 SMC_SET_INT_MASK(0); 404 SMC_SET_INT_MASK(lp, 0);
405 pending_skb = lp->pending_tx_skb; 405 pending_skb = lp->pending_tx_skb;
406 lp->pending_tx_skb = NULL; 406 lp->pending_tx_skb = NULL;
407 spin_unlock_irq(&lp->lock); 407 spin_unlock_irq(&lp->lock);
@@ -409,14 +409,14 @@ static void smc_shutdown(struct net_device *dev)
409 dev_kfree_skb(pending_skb); 409 dev_kfree_skb(pending_skb);
410 410
411 /* and tell the card to stay away from that nasty outside world */ 411 /* and tell the card to stay away from that nasty outside world */
412 SMC_SELECT_BANK(0); 412 SMC_SELECT_BANK(lp, 0);
413 SMC_SET_RCR(RCR_CLEAR); 413 SMC_SET_RCR(lp, RCR_CLEAR);
414 SMC_SET_TCR(TCR_CLEAR); 414 SMC_SET_TCR(lp, TCR_CLEAR);
415 415
416#ifdef POWER_DOWN 416#ifdef POWER_DOWN
417 /* finally, shut the chip down */ 417 /* finally, shut the chip down */
418 SMC_SELECT_BANK(1); 418 SMC_SELECT_BANK(lp, 1);
419 SMC_SET_CONFIG(SMC_GET_CONFIG() & ~CONFIG_EPH_POWER_EN); 419 SMC_SET_CONFIG(lp, SMC_GET_CONFIG(lp) & ~CONFIG_EPH_POWER_EN);
420#endif 420#endif
421} 421}
422 422
@@ -431,17 +431,17 @@ static inline void smc_rcv(struct net_device *dev)
431 431
432 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 432 DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
433 433
434 packet_number = SMC_GET_RXFIFO(); 434 packet_number = SMC_GET_RXFIFO(lp);
435 if (unlikely(packet_number & RXFIFO_REMPTY)) { 435 if (unlikely(packet_number & RXFIFO_REMPTY)) {
436 PRINTK("%s: smc_rcv with nothing on FIFO.\n", dev->name); 436 PRINTK("%s: smc_rcv with nothing on FIFO.\n", dev->name);
437 return; 437 return;
438 } 438 }
439 439
440 /* read from start of packet */ 440 /* read from start of packet */
441 SMC_SET_PTR(PTR_READ | PTR_RCV | PTR_AUTOINC); 441 SMC_SET_PTR(lp, PTR_READ | PTR_RCV | PTR_AUTOINC);
442 442
443 /* First two words are status and packet length */ 443 /* First two words are status and packet length */
444 SMC_GET_PKT_HDR(status, packet_len); 444 SMC_GET_PKT_HDR(lp, status, packet_len);
445 packet_len &= 0x07ff; /* mask off top bits */ 445 packet_len &= 0x07ff; /* mask off top bits */
446 DBG(2, "%s: RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n", 446 DBG(2, "%s: RX PNR 0x%x STATUS 0x%04x LENGTH 0x%04x (%d)\n",
447 dev->name, packet_number, status, 447 dev->name, packet_number, status,
@@ -460,8 +460,8 @@ static inline void smc_rcv(struct net_device *dev)
460 dev->name, packet_len, status); 460 dev->name, packet_len, status);
461 status |= RS_TOOSHORT; 461 status |= RS_TOOSHORT;
462 } 462 }
463 SMC_WAIT_MMU_BUSY(); 463 SMC_WAIT_MMU_BUSY(lp);
464 SMC_SET_MMU_CMD(MC_RELEASE); 464 SMC_SET_MMU_CMD(lp, MC_RELEASE);
465 dev->stats.rx_errors++; 465 dev->stats.rx_errors++;
466 if (status & RS_ALGNERR) 466 if (status & RS_ALGNERR)
467 dev->stats.rx_frame_errors++; 467 dev->stats.rx_frame_errors++;
@@ -490,8 +490,8 @@ static inline void smc_rcv(struct net_device *dev)
490 if (unlikely(skb == NULL)) { 490 if (unlikely(skb == NULL)) {
491 printk(KERN_NOTICE "%s: Low memory, packet dropped.\n", 491 printk(KERN_NOTICE "%s: Low memory, packet dropped.\n",
492 dev->name); 492 dev->name);
493 SMC_WAIT_MMU_BUSY(); 493 SMC_WAIT_MMU_BUSY(lp);
494 SMC_SET_MMU_CMD(MC_RELEASE); 494 SMC_SET_MMU_CMD(lp, MC_RELEASE);
495 dev->stats.rx_dropped++; 495 dev->stats.rx_dropped++;
496 return; 496 return;
497 } 497 }
@@ -510,10 +510,10 @@ static inline void smc_rcv(struct net_device *dev)
510 */ 510 */
511 data_len = packet_len - ((status & RS_ODDFRAME) ? 5 : 6); 511 data_len = packet_len - ((status & RS_ODDFRAME) ? 5 : 6);
512 data = skb_put(skb, data_len); 512 data = skb_put(skb, data_len);
513 SMC_PULL_DATA(data, packet_len - 4); 513 SMC_PULL_DATA(lp, data, packet_len - 4);
514 514
515 SMC_WAIT_MMU_BUSY(); 515 SMC_WAIT_MMU_BUSY(lp);
516 SMC_SET_MMU_CMD(MC_RELEASE); 516 SMC_SET_MMU_CMD(lp, MC_RELEASE);
517 517
518 PRINT_PKT(data, packet_len - 4); 518 PRINT_PKT(data, packet_len - 4);
519 519
@@ -591,7 +591,7 @@ static void smc_hardware_send_pkt(unsigned long data)
591 } 591 }
592 lp->pending_tx_skb = NULL; 592 lp->pending_tx_skb = NULL;
593 593
594 packet_no = SMC_GET_AR(); 594 packet_no = SMC_GET_AR(lp);
595 if (unlikely(packet_no & AR_FAILED)) { 595 if (unlikely(packet_no & AR_FAILED)) {
596 printk("%s: Memory allocation failed.\n", dev->name); 596 printk("%s: Memory allocation failed.\n", dev->name);
597 dev->stats.tx_errors++; 597 dev->stats.tx_errors++;
@@ -601,8 +601,8 @@ static void smc_hardware_send_pkt(unsigned long data)
601 } 601 }
602 602
603 /* point to the beginning of the packet */ 603 /* point to the beginning of the packet */
604 SMC_SET_PN(packet_no); 604 SMC_SET_PN(lp, packet_no);
605 SMC_SET_PTR(PTR_AUTOINC); 605 SMC_SET_PTR(lp, PTR_AUTOINC);
606 606
607 buf = skb->data; 607 buf = skb->data;
608 len = skb->len; 608 len = skb->len;
@@ -614,13 +614,13 @@ static void smc_hardware_send_pkt(unsigned long data)
614 * Send the packet length (+6 for status words, length, and ctl. 614 * Send the packet length (+6 for status words, length, and ctl.
615 * The card will pad to 64 bytes with zeroes if packet is too small. 615 * The card will pad to 64 bytes with zeroes if packet is too small.
616 */ 616 */
617 SMC_PUT_PKT_HDR(0, len + 6); 617 SMC_PUT_PKT_HDR(lp, 0, len + 6);
618 618
619 /* send the actual data */ 619 /* send the actual data */
620 SMC_PUSH_DATA(buf, len & ~1); 620 SMC_PUSH_DATA(lp, buf, len & ~1);
621 621
622 /* Send final ctl word with the last byte if there is one */ 622 /* Send final ctl word with the last byte if there is one */
623 SMC_outw(((len & 1) ? (0x2000 | buf[len-1]) : 0), ioaddr, DATA_REG); 623 SMC_outw(((len & 1) ? (0x2000 | buf[len-1]) : 0), ioaddr, DATA_REG(lp));
624 624
625 /* 625 /*
626 * If THROTTLE_TX_PKTS is set, we stop the queue here. This will 626 * If THROTTLE_TX_PKTS is set, we stop the queue here. This will
@@ -634,14 +634,14 @@ static void smc_hardware_send_pkt(unsigned long data)
634 netif_stop_queue(dev); 634 netif_stop_queue(dev);
635 635
636 /* queue the packet for TX */ 636 /* queue the packet for TX */
637 SMC_SET_MMU_CMD(MC_ENQUEUE); 637 SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
638 smc_special_unlock(&lp->lock); 638 smc_special_unlock(&lp->lock);
639 639
640 dev->trans_start = jiffies; 640 dev->trans_start = jiffies;
641 dev->stats.tx_packets++; 641 dev->stats.tx_packets++;
642 dev->stats.tx_bytes += len; 642 dev->stats.tx_bytes += len;
643 643
644 SMC_ENABLE_INT(IM_TX_INT | IM_TX_EMPTY_INT); 644 SMC_ENABLE_INT(lp, IM_TX_INT | IM_TX_EMPTY_INT);
645 645
646done: if (!THROTTLE_TX_PKTS) 646done: if (!THROTTLE_TX_PKTS)
647 netif_wake_queue(dev); 647 netif_wake_queue(dev);
@@ -688,7 +688,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
688 smc_special_lock(&lp->lock); 688 smc_special_lock(&lp->lock);
689 689
690 /* now, try to allocate the memory */ 690 /* now, try to allocate the memory */
691 SMC_SET_MMU_CMD(MC_ALLOC | numPages); 691 SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages);
692 692
693 /* 693 /*
694 * Poll the chip for a short amount of time in case the 694 * Poll the chip for a short amount of time in case the
@@ -696,9 +696,9 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
696 */ 696 */
697 poll_count = MEMORY_WAIT_TIME; 697 poll_count = MEMORY_WAIT_TIME;
698 do { 698 do {
699 status = SMC_GET_INT(); 699 status = SMC_GET_INT(lp);
700 if (status & IM_ALLOC_INT) { 700 if (status & IM_ALLOC_INT) {
701 SMC_ACK_INT(IM_ALLOC_INT); 701 SMC_ACK_INT(lp, IM_ALLOC_INT);
702 break; 702 break;
703 } 703 }
704 } while (--poll_count); 704 } while (--poll_count);
@@ -710,7 +710,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
710 /* oh well, wait until the chip finds memory later */ 710 /* oh well, wait until the chip finds memory later */
711 netif_stop_queue(dev); 711 netif_stop_queue(dev);
712 DBG(2, "%s: TX memory allocation deferred.\n", dev->name); 712 DBG(2, "%s: TX memory allocation deferred.\n", dev->name);
713 SMC_ENABLE_INT(IM_ALLOC_INT); 713 SMC_ENABLE_INT(lp, IM_ALLOC_INT);
714 } else { 714 } else {
715 /* 715 /*
716 * Allocation succeeded: push packet to the chip's own memory 716 * Allocation succeeded: push packet to the chip's own memory
@@ -736,19 +736,19 @@ static void smc_tx(struct net_device *dev)
736 DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 736 DBG(3, "%s: %s\n", dev->name, __FUNCTION__);
737 737
738 /* If the TX FIFO is empty then nothing to do */ 738 /* If the TX FIFO is empty then nothing to do */
739 packet_no = SMC_GET_TXFIFO(); 739 packet_no = SMC_GET_TXFIFO(lp);
740 if (unlikely(packet_no & TXFIFO_TEMPTY)) { 740 if (unlikely(packet_no & TXFIFO_TEMPTY)) {
741 PRINTK("%s: smc_tx with nothing on FIFO.\n", dev->name); 741 PRINTK("%s: smc_tx with nothing on FIFO.\n", dev->name);
742 return; 742 return;
743 } 743 }
744 744
745 /* select packet to read from */ 745 /* select packet to read from */
746 saved_packet = SMC_GET_PN(); 746 saved_packet = SMC_GET_PN(lp);
747 SMC_SET_PN(packet_no); 747 SMC_SET_PN(lp, packet_no);
748 748
749 /* read the first word (status word) from this packet */ 749 /* read the first word (status word) from this packet */
750 SMC_SET_PTR(PTR_AUTOINC | PTR_READ); 750 SMC_SET_PTR(lp, PTR_AUTOINC | PTR_READ);
751 SMC_GET_PKT_HDR(tx_status, pkt_len); 751 SMC_GET_PKT_HDR(lp, tx_status, pkt_len);
752 DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n", 752 DBG(2, "%s: TX STATUS 0x%04x PNR 0x%02x\n",
753 dev->name, tx_status, packet_no); 753 dev->name, tx_status, packet_no);
754 754
@@ -771,17 +771,17 @@ static void smc_tx(struct net_device *dev)
771 } 771 }
772 772
773 /* kill the packet */ 773 /* kill the packet */
774 SMC_WAIT_MMU_BUSY(); 774 SMC_WAIT_MMU_BUSY(lp);
775 SMC_SET_MMU_CMD(MC_FREEPKT); 775 SMC_SET_MMU_CMD(lp, MC_FREEPKT);
776 776
777 /* Don't restore Packet Number Reg until busy bit is cleared */ 777 /* Don't restore Packet Number Reg until busy bit is cleared */
778 SMC_WAIT_MMU_BUSY(); 778 SMC_WAIT_MMU_BUSY(lp);
779 SMC_SET_PN(saved_packet); 779 SMC_SET_PN(lp, saved_packet);
780 780
781 /* re-enable transmit */ 781 /* re-enable transmit */
782 SMC_SELECT_BANK(0); 782 SMC_SELECT_BANK(lp, 0);
783 SMC_SET_TCR(lp->tcr_cur_mode); 783 SMC_SET_TCR(lp, lp->tcr_cur_mode);
784 SMC_SELECT_BANK(2); 784 SMC_SELECT_BANK(lp, 2);
785} 785}
786 786
787 787
@@ -793,7 +793,7 @@ static void smc_mii_out(struct net_device *dev, unsigned int val, int bits)
793 void __iomem *ioaddr = lp->base; 793 void __iomem *ioaddr = lp->base;
794 unsigned int mii_reg, mask; 794 unsigned int mii_reg, mask;
795 795
796 mii_reg = SMC_GET_MII() & ~(MII_MCLK | MII_MDOE | MII_MDO); 796 mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO);
797 mii_reg |= MII_MDOE; 797 mii_reg |= MII_MDOE;
798 798
799 for (mask = 1 << (bits - 1); mask; mask >>= 1) { 799 for (mask = 1 << (bits - 1); mask; mask >>= 1) {
@@ -802,9 +802,9 @@ static void smc_mii_out(struct net_device *dev, unsigned int val, int bits)
802 else 802 else
803 mii_reg &= ~MII_MDO; 803 mii_reg &= ~MII_MDO;
804 804
805 SMC_SET_MII(mii_reg); 805 SMC_SET_MII(lp, mii_reg);
806 udelay(MII_DELAY); 806 udelay(MII_DELAY);
807 SMC_SET_MII(mii_reg | MII_MCLK); 807 SMC_SET_MII(lp, mii_reg | MII_MCLK);
808 udelay(MII_DELAY); 808 udelay(MII_DELAY);
809 } 809 }
810} 810}
@@ -815,16 +815,16 @@ static unsigned int smc_mii_in(struct net_device *dev, int bits)
815 void __iomem *ioaddr = lp->base; 815 void __iomem *ioaddr = lp->base;
816 unsigned int mii_reg, mask, val; 816 unsigned int mii_reg, mask, val;
817 817
818 mii_reg = SMC_GET_MII() & ~(MII_MCLK | MII_MDOE | MII_MDO); 818 mii_reg = SMC_GET_MII(lp) & ~(MII_MCLK | MII_MDOE | MII_MDO);
819 SMC_SET_MII(mii_reg); 819 SMC_SET_MII(lp, mii_reg);
820 820
821 for (mask = 1 << (bits - 1), val = 0; mask; mask >>= 1) { 821 for (mask = 1 << (bits - 1), val = 0; mask; mask >>= 1) {
822 if (SMC_GET_MII() & MII_MDI) 822 if (SMC_GET_MII(lp) & MII_MDI)
823 val |= mask; 823 val |= mask;
824 824
825 SMC_SET_MII(mii_reg); 825 SMC_SET_MII(lp, mii_reg);
826 udelay(MII_DELAY); 826 udelay(MII_DELAY);
827 SMC_SET_MII(mii_reg | MII_MCLK); 827 SMC_SET_MII(lp, mii_reg | MII_MCLK);
828 udelay(MII_DELAY); 828 udelay(MII_DELAY);
829 } 829 }
830 830
@@ -840,7 +840,7 @@ static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg)
840 void __iomem *ioaddr = lp->base; 840 void __iomem *ioaddr = lp->base;
841 unsigned int phydata; 841 unsigned int phydata;
842 842
843 SMC_SELECT_BANK(3); 843 SMC_SELECT_BANK(lp, 3);
844 844
845 /* Idle - 32 ones */ 845 /* Idle - 32 ones */
846 smc_mii_out(dev, 0xffffffff, 32); 846 smc_mii_out(dev, 0xffffffff, 32);
@@ -852,12 +852,12 @@ static int smc_phy_read(struct net_device *dev, int phyaddr, int phyreg)
852 phydata = smc_mii_in(dev, 18); 852 phydata = smc_mii_in(dev, 18);
853 853
854 /* Return to idle state */ 854 /* Return to idle state */
855 SMC_SET_MII(SMC_GET_MII() & ~(MII_MCLK|MII_MDOE|MII_MDO)); 855 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
856 856
857 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", 857 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
858 __FUNCTION__, phyaddr, phyreg, phydata); 858 __FUNCTION__, phyaddr, phyreg, phydata);
859 859
860 SMC_SELECT_BANK(2); 860 SMC_SELECT_BANK(lp, 2);
861 return phydata; 861 return phydata;
862} 862}
863 863
@@ -870,7 +870,7 @@ static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg,
870 struct smc_local *lp = netdev_priv(dev); 870 struct smc_local *lp = netdev_priv(dev);
871 void __iomem *ioaddr = lp->base; 871 void __iomem *ioaddr = lp->base;
872 872
873 SMC_SELECT_BANK(3); 873 SMC_SELECT_BANK(lp, 3);
874 874
875 /* Idle - 32 ones */ 875 /* Idle - 32 ones */
876 smc_mii_out(dev, 0xffffffff, 32); 876 smc_mii_out(dev, 0xffffffff, 32);
@@ -879,12 +879,12 @@ static void smc_phy_write(struct net_device *dev, int phyaddr, int phyreg,
879 smc_mii_out(dev, 5 << 28 | phyaddr << 23 | phyreg << 18 | 2 << 16 | phydata, 32); 879 smc_mii_out(dev, 5 << 28 | phyaddr << 23 | phyreg << 18 | 2 << 16 | phydata, 32);
880 880
881 /* Return to idle state */ 881 /* Return to idle state */
882 SMC_SET_MII(SMC_GET_MII() & ~(MII_MCLK|MII_MDOE|MII_MDO)); 882 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO));
883 883
884 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", 884 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
885 __FUNCTION__, phyaddr, phyreg, phydata); 885 __FUNCTION__, phyaddr, phyreg, phydata);
886 886
887 SMC_SELECT_BANK(2); 887 SMC_SELECT_BANK(lp, 2);
888} 888}
889 889
890/* 890/*
@@ -957,9 +957,9 @@ static int smc_phy_fixed(struct net_device *dev)
957 smc_phy_write(dev, phyaddr, MII_BMCR, bmcr); 957 smc_phy_write(dev, phyaddr, MII_BMCR, bmcr);
958 958
959 /* Re-Configure the Receive/Phy Control register */ 959 /* Re-Configure the Receive/Phy Control register */
960 SMC_SELECT_BANK(0); 960 SMC_SELECT_BANK(lp, 0);
961 SMC_SET_RPC(lp->rpc_cur_mode); 961 SMC_SET_RPC(lp, lp->rpc_cur_mode);
962 SMC_SELECT_BANK(2); 962 SMC_SELECT_BANK(lp, 2);
963 963
964 return 1; 964 return 1;
965} 965}
@@ -1050,8 +1050,8 @@ static void smc_phy_check_media(struct net_device *dev, int init)
1050 lp->tcr_cur_mode &= ~TCR_SWFDUP; 1050 lp->tcr_cur_mode &= ~TCR_SWFDUP;
1051 } 1051 }
1052 1052
1053 SMC_SELECT_BANK(0); 1053 SMC_SELECT_BANK(lp, 0);
1054 SMC_SET_TCR(lp->tcr_cur_mode); 1054 SMC_SET_TCR(lp, lp->tcr_cur_mode);
1055 } 1055 }
1056} 1056}
1057 1057
@@ -1100,8 +1100,8 @@ static void smc_phy_configure(struct work_struct *work)
1100 PHY_INT_SPDDET | PHY_INT_DPLXDET); 1100 PHY_INT_SPDDET | PHY_INT_DPLXDET);
1101 1101
1102 /* Configure the Receive/Phy Control register */ 1102 /* Configure the Receive/Phy Control register */
1103 SMC_SELECT_BANK(0); 1103 SMC_SELECT_BANK(lp, 0);
1104 SMC_SET_RPC(lp->rpc_cur_mode); 1104 SMC_SET_RPC(lp, lp->rpc_cur_mode);
1105 1105
1106 /* If the user requested no auto neg, then go set his request */ 1106 /* If the user requested no auto neg, then go set his request */
1107 if (lp->mii.force_media) { 1107 if (lp->mii.force_media) {
@@ -1158,7 +1158,7 @@ static void smc_phy_configure(struct work_struct *work)
1158 smc_phy_check_media(dev, 1); 1158 smc_phy_check_media(dev, 1);
1159 1159
1160smc_phy_configure_exit: 1160smc_phy_configure_exit:
1161 SMC_SELECT_BANK(2); 1161 SMC_SELECT_BANK(lp, 2);
1162 spin_unlock_irq(&lp->lock); 1162 spin_unlock_irq(&lp->lock);
1163 lp->work_pending = 0; 1163 lp->work_pending = 0;
1164} 1164}
@@ -1200,9 +1200,9 @@ static void smc_10bt_check_media(struct net_device *dev, int init)
1200 1200
1201 old_carrier = netif_carrier_ok(dev) ? 1 : 0; 1201 old_carrier = netif_carrier_ok(dev) ? 1 : 0;
1202 1202
1203 SMC_SELECT_BANK(0); 1203 SMC_SELECT_BANK(lp, 0);
1204 new_carrier = (SMC_GET_EPH_STATUS() & ES_LINK_OK) ? 1 : 0; 1204 new_carrier = (SMC_GET_EPH_STATUS(lp) & ES_LINK_OK) ? 1 : 0;
1205 SMC_SELECT_BANK(2); 1205 SMC_SELECT_BANK(lp, 2);
1206 1206
1207 if (init || (old_carrier != new_carrier)) { 1207 if (init || (old_carrier != new_carrier)) {
1208 if (!new_carrier) { 1208 if (!new_carrier) {
@@ -1224,11 +1224,11 @@ static void smc_eph_interrupt(struct net_device *dev)
1224 1224
1225 smc_10bt_check_media(dev, 0); 1225 smc_10bt_check_media(dev, 0);
1226 1226
1227 SMC_SELECT_BANK(1); 1227 SMC_SELECT_BANK(lp, 1);
1228 ctl = SMC_GET_CTL(); 1228 ctl = SMC_GET_CTL(lp);
1229 SMC_SET_CTL(ctl & ~CTL_LE_ENABLE); 1229 SMC_SET_CTL(lp, ctl & ~CTL_LE_ENABLE);
1230 SMC_SET_CTL(ctl); 1230 SMC_SET_CTL(lp, ctl);
1231 SMC_SELECT_BANK(2); 1231 SMC_SELECT_BANK(lp, 2);
1232} 1232}
1233 1233
1234/* 1234/*
@@ -1252,22 +1252,22 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1252 * ISR. */ 1252 * ISR. */
1253 SMC_INTERRUPT_PREAMBLE; 1253 SMC_INTERRUPT_PREAMBLE;
1254 1254
1255 saved_pointer = SMC_GET_PTR(); 1255 saved_pointer = SMC_GET_PTR(lp);
1256 mask = SMC_GET_INT_MASK(); 1256 mask = SMC_GET_INT_MASK(lp);
1257 SMC_SET_INT_MASK(0); 1257 SMC_SET_INT_MASK(lp, 0);
1258 1258
1259 /* set a timeout value, so I don't stay here forever */ 1259 /* set a timeout value, so I don't stay here forever */
1260 timeout = MAX_IRQ_LOOPS; 1260 timeout = MAX_IRQ_LOOPS;
1261 1261
1262 do { 1262 do {
1263 status = SMC_GET_INT(); 1263 status = SMC_GET_INT(lp);
1264 1264
1265 DBG(2, "%s: INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n", 1265 DBG(2, "%s: INT 0x%02x MASK 0x%02x MEM 0x%04x FIFO 0x%04x\n",
1266 dev->name, status, mask, 1266 dev->name, status, mask,
1267 ({ int meminfo; SMC_SELECT_BANK(0); 1267 ({ int meminfo; SMC_SELECT_BANK(lp, 0);
1268 meminfo = SMC_GET_MIR(); 1268 meminfo = SMC_GET_MIR(lp);
1269 SMC_SELECT_BANK(2); meminfo; }), 1269 SMC_SELECT_BANK(lp, 2); meminfo; }),
1270 SMC_GET_FIFO()); 1270 SMC_GET_FIFO(lp));
1271 1271
1272 status &= mask; 1272 status &= mask;
1273 if (!status) 1273 if (!status)
@@ -1277,7 +1277,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1277 /* do this before RX as it will free memory quickly */ 1277 /* do this before RX as it will free memory quickly */
1278 DBG(3, "%s: TX int\n", dev->name); 1278 DBG(3, "%s: TX int\n", dev->name);
1279 smc_tx(dev); 1279 smc_tx(dev);
1280 SMC_ACK_INT(IM_TX_INT); 1280 SMC_ACK_INT(lp, IM_TX_INT);
1281 if (THROTTLE_TX_PKTS) 1281 if (THROTTLE_TX_PKTS)
1282 netif_wake_queue(dev); 1282 netif_wake_queue(dev);
1283 } else if (status & IM_RCV_INT) { 1283 } else if (status & IM_RCV_INT) {
@@ -1292,9 +1292,9 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1292 mask &= ~IM_TX_EMPTY_INT; 1292 mask &= ~IM_TX_EMPTY_INT;
1293 1293
1294 /* update stats */ 1294 /* update stats */
1295 SMC_SELECT_BANK(0); 1295 SMC_SELECT_BANK(lp, 0);
1296 card_stats = SMC_GET_COUNTER(); 1296 card_stats = SMC_GET_COUNTER(lp);
1297 SMC_SELECT_BANK(2); 1297 SMC_SELECT_BANK(lp, 2);
1298 1298
1299 /* single collisions */ 1299 /* single collisions */
1300 dev->stats.collisions += card_stats & 0xF; 1300 dev->stats.collisions += card_stats & 0xF;
@@ -1304,26 +1304,26 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1304 dev->stats.collisions += card_stats & 0xF; 1304 dev->stats.collisions += card_stats & 0xF;
1305 } else if (status & IM_RX_OVRN_INT) { 1305 } else if (status & IM_RX_OVRN_INT) {
1306 DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name, 1306 DBG(1, "%s: RX overrun (EPH_ST 0x%04x)\n", dev->name,
1307 ({ int eph_st; SMC_SELECT_BANK(0); 1307 ({ int eph_st; SMC_SELECT_BANK(lp, 0);
1308 eph_st = SMC_GET_EPH_STATUS(); 1308 eph_st = SMC_GET_EPH_STATUS(lp);
1309 SMC_SELECT_BANK(2); eph_st; }) ); 1309 SMC_SELECT_BANK(lp, 2); eph_st; }));
1310 SMC_ACK_INT(IM_RX_OVRN_INT); 1310 SMC_ACK_INT(lp, IM_RX_OVRN_INT);
1311 dev->stats.rx_errors++; 1311 dev->stats.rx_errors++;
1312 dev->stats.rx_fifo_errors++; 1312 dev->stats.rx_fifo_errors++;
1313 } else if (status & IM_EPH_INT) { 1313 } else if (status & IM_EPH_INT) {
1314 smc_eph_interrupt(dev); 1314 smc_eph_interrupt(dev);
1315 } else if (status & IM_MDINT) { 1315 } else if (status & IM_MDINT) {
1316 SMC_ACK_INT(IM_MDINT); 1316 SMC_ACK_INT(lp, IM_MDINT);
1317 smc_phy_interrupt(dev); 1317 smc_phy_interrupt(dev);
1318 } else if (status & IM_ERCV_INT) { 1318 } else if (status & IM_ERCV_INT) {
1319 SMC_ACK_INT(IM_ERCV_INT); 1319 SMC_ACK_INT(lp, IM_ERCV_INT);
1320 PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT \n", dev->name); 1320 PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT \n", dev->name);
1321 } 1321 }
1322 } while (--timeout); 1322 } while (--timeout);
1323 1323
1324 /* restore register states */ 1324 /* restore register states */
1325 SMC_SET_PTR(saved_pointer); 1325 SMC_SET_PTR(lp, saved_pointer);
1326 SMC_SET_INT_MASK(mask); 1326 SMC_SET_INT_MASK(lp, mask);
1327 spin_unlock(&lp->lock); 1327 spin_unlock(&lp->lock);
1328 1328
1329#ifndef CONFIG_NET_POLL_CONTROLLER 1329#ifndef CONFIG_NET_POLL_CONTROLLER
@@ -1368,13 +1368,13 @@ static void smc_timeout(struct net_device *dev)
1368 DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1368 DBG(2, "%s: %s\n", dev->name, __FUNCTION__);
1369 1369
1370 spin_lock_irq(&lp->lock); 1370 spin_lock_irq(&lp->lock);
1371 status = SMC_GET_INT(); 1371 status = SMC_GET_INT(lp);
1372 mask = SMC_GET_INT_MASK(); 1372 mask = SMC_GET_INT_MASK(lp);
1373 fifo = SMC_GET_FIFO(); 1373 fifo = SMC_GET_FIFO(lp);
1374 SMC_SELECT_BANK(0); 1374 SMC_SELECT_BANK(lp, 0);
1375 eph_st = SMC_GET_EPH_STATUS(); 1375 eph_st = SMC_GET_EPH_STATUS(lp);
1376 meminfo = SMC_GET_MIR(); 1376 meminfo = SMC_GET_MIR(lp);
1377 SMC_SELECT_BANK(2); 1377 SMC_SELECT_BANK(lp, 2);
1378 spin_unlock_irq(&lp->lock); 1378 spin_unlock_irq(&lp->lock);
1379 PRINTK( "%s: TX timeout (INT 0x%02x INTMASK 0x%02x " 1379 PRINTK( "%s: TX timeout (INT 0x%02x INTMASK 0x%02x "
1380 "MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n", 1380 "MEM 0x%04x FIFO 0x%04x EPH_ST 0x%04x)\n",
@@ -1494,13 +1494,13 @@ static void smc_set_multicast_list(struct net_device *dev)
1494 } 1494 }
1495 1495
1496 spin_lock_irq(&lp->lock); 1496 spin_lock_irq(&lp->lock);
1497 SMC_SELECT_BANK(0); 1497 SMC_SELECT_BANK(lp, 0);
1498 SMC_SET_RCR(lp->rcr_cur_mode); 1498 SMC_SET_RCR(lp, lp->rcr_cur_mode);
1499 if (update_multicast) { 1499 if (update_multicast) {
1500 SMC_SELECT_BANK(3); 1500 SMC_SELECT_BANK(lp, 3);
1501 SMC_SET_MCAST(multicast_table); 1501 SMC_SET_MCAST(lp, multicast_table);
1502 } 1502 }
1503 SMC_SELECT_BANK(2); 1503 SMC_SELECT_BANK(lp, 2);
1504 spin_unlock_irq(&lp->lock); 1504 spin_unlock_irq(&lp->lock);
1505} 1505}
1506 1506
@@ -1704,8 +1704,9 @@ static const struct ethtool_ops smc_ethtool_ops = {
1704 * I just deleted auto_irq.c, since it was never built... 1704 * I just deleted auto_irq.c, since it was never built...
1705 * --jgarzik 1705 * --jgarzik
1706 */ 1706 */
1707static int __init smc_findirq(void __iomem *ioaddr) 1707static int __init smc_findirq(struct smc_local *lp)
1708{ 1708{
1709 void __iomem *ioaddr = lp->base;
1709 int timeout = 20; 1710 int timeout = 20;
1710 unsigned long cookie; 1711 unsigned long cookie;
1711 1712
@@ -1719,14 +1720,14 @@ static int __init smc_findirq(void __iomem *ioaddr)
1719 * when done. 1720 * when done.
1720 */ 1721 */
1721 /* enable ALLOCation interrupts ONLY */ 1722 /* enable ALLOCation interrupts ONLY */
1722 SMC_SELECT_BANK(2); 1723 SMC_SELECT_BANK(lp, 2);
1723 SMC_SET_INT_MASK(IM_ALLOC_INT); 1724 SMC_SET_INT_MASK(lp, IM_ALLOC_INT);
1724 1725
1725 /* 1726 /*
1726 * Allocate 512 bytes of memory. Note that the chip was just 1727 * Allocate 512 bytes of memory. Note that the chip was just
1727 * reset so all the memory is available 1728 * reset so all the memory is available
1728 */ 1729 */
1729 SMC_SET_MMU_CMD(MC_ALLOC | 1); 1730 SMC_SET_MMU_CMD(lp, MC_ALLOC | 1);
1730 1731
1731 /* 1732 /*
1732 * Wait until positive that the interrupt has been generated 1733 * Wait until positive that the interrupt has been generated
@@ -1734,7 +1735,7 @@ static int __init smc_findirq(void __iomem *ioaddr)
1734 do { 1735 do {
1735 int int_status; 1736 int int_status;
1736 udelay(10); 1737 udelay(10);
1737 int_status = SMC_GET_INT(); 1738 int_status = SMC_GET_INT(lp);
1738 if (int_status & IM_ALLOC_INT) 1739 if (int_status & IM_ALLOC_INT)
1739 break; /* got the interrupt */ 1740 break; /* got the interrupt */
1740 } while (--timeout); 1741 } while (--timeout);
@@ -1747,7 +1748,7 @@ static int __init smc_findirq(void __iomem *ioaddr)
1747 */ 1748 */
1748 1749
1749 /* and disable all interrupts again */ 1750 /* and disable all interrupts again */
1750 SMC_SET_INT_MASK(0); 1751 SMC_SET_INT_MASK(lp, 0);
1751 1752
1752 /* and return what I found */ 1753 /* and return what I found */
1753 return probe_irq_off(cookie); 1754 return probe_irq_off(cookie);
@@ -1790,7 +1791,7 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1790 DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); 1791 DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__);
1791 1792
1792 /* First, see if the high byte is 0x33 */ 1793 /* First, see if the high byte is 0x33 */
1793 val = SMC_CURRENT_BANK(); 1794 val = SMC_CURRENT_BANK(lp);
1794 DBG(2, "%s: bank signature probe returned 0x%04x\n", CARDNAME, val); 1795 DBG(2, "%s: bank signature probe returned 0x%04x\n", CARDNAME, val);
1795 if ((val & 0xFF00) != 0x3300) { 1796 if ((val & 0xFF00) != 0x3300) {
1796 if ((val & 0xFF) == 0x33) { 1797 if ((val & 0xFF) == 0x33) {
@@ -1806,8 +1807,8 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1806 * The above MIGHT indicate a device, but I need to write to 1807 * The above MIGHT indicate a device, but I need to write to
1807 * further test this. 1808 * further test this.
1808 */ 1809 */
1809 SMC_SELECT_BANK(0); 1810 SMC_SELECT_BANK(lp, 0);
1810 val = SMC_CURRENT_BANK(); 1811 val = SMC_CURRENT_BANK(lp);
1811 if ((val & 0xFF00) != 0x3300) { 1812 if ((val & 0xFF00) != 0x3300) {
1812 retval = -ENODEV; 1813 retval = -ENODEV;
1813 goto err_out; 1814 goto err_out;
@@ -1819,8 +1820,8 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1819 * register to bank 1, so I can access the base address 1820 * register to bank 1, so I can access the base address
1820 * register 1821 * register
1821 */ 1822 */
1822 SMC_SELECT_BANK(1); 1823 SMC_SELECT_BANK(lp, 1);
1823 val = SMC_GET_BASE(); 1824 val = SMC_GET_BASE(lp);
1824 val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT; 1825 val = ((val & 0x1F00) >> 3) << SMC_IO_SHIFT;
1825 if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) { 1826 if (((unsigned int)ioaddr & (0x3e0 << SMC_IO_SHIFT)) != val) {
1826 printk("%s: IOADDR %p doesn't match configuration (%x).\n", 1827 printk("%s: IOADDR %p doesn't match configuration (%x).\n",
@@ -1832,8 +1833,8 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1832 * recognize. These might need to be added to later, 1833 * recognize. These might need to be added to later,
1833 * as future revisions could be added. 1834 * as future revisions could be added.
1834 */ 1835 */
1835 SMC_SELECT_BANK(3); 1836 SMC_SELECT_BANK(lp, 3);
1836 revision_register = SMC_GET_REV(); 1837 revision_register = SMC_GET_REV(lp);
1837 DBG(2, "%s: revision = 0x%04x\n", CARDNAME, revision_register); 1838 DBG(2, "%s: revision = 0x%04x\n", CARDNAME, revision_register);
1838 version_string = chip_ids[ (revision_register >> 4) & 0xF]; 1839 version_string = chip_ids[ (revision_register >> 4) & 0xF];
1839 if (!version_string || (revision_register & 0xff00) != 0x3300) { 1840 if (!version_string || (revision_register & 0xff00) != 0x3300) {
@@ -1857,8 +1858,8 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1857 spin_lock_init(&lp->lock); 1858 spin_lock_init(&lp->lock);
1858 1859
1859 /* Get the MAC address */ 1860 /* Get the MAC address */
1860 SMC_SELECT_BANK(1); 1861 SMC_SELECT_BANK(lp, 1);
1861 SMC_GET_MAC_ADDR(dev->dev_addr); 1862 SMC_GET_MAC_ADDR(lp, dev->dev_addr);
1862 1863
1863 /* now, reset the chip, and put it into a known state */ 1864 /* now, reset the chip, and put it into a known state */
1864 smc_reset(dev); 1865 smc_reset(dev);
@@ -1883,7 +1884,7 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
1883 1884
1884 trials = 3; 1885 trials = 3;
1885 while (trials--) { 1886 while (trials--) {
1886 dev->irq = smc_findirq(ioaddr); 1887 dev->irq = smc_findirq(lp);
1887 if (dev->irq) 1888 if (dev->irq)
1888 break; 1889 break;
1889 /* kick the card and try again */ 1890 /* kick the card and try again */
@@ -1998,6 +1999,8 @@ err_out:
1998 1999
1999static int smc_enable_device(struct platform_device *pdev) 2000static int smc_enable_device(struct platform_device *pdev)
2000{ 2001{
2002 struct net_device *ndev = platform_get_drvdata(pdev);
2003 struct smc_local *lp = netdev_priv(ndev);
2001 unsigned long flags; 2004 unsigned long flags;
2002 unsigned char ecor, ecsr; 2005 unsigned char ecor, ecsr;
2003 void __iomem *addr; 2006 void __iomem *addr;
@@ -2040,7 +2043,7 @@ static int smc_enable_device(struct platform_device *pdev)
2040 * Set the appropriate byte/word mode. 2043 * Set the appropriate byte/word mode.
2041 */ 2044 */
2042 ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8; 2045 ecsr = readb(addr + (ECSR << SMC_IO_SHIFT)) & ~ECSR_IOIS8;
2043 if (!SMC_CAN_USE_16BIT) 2046 if (!SMC_16BIT(lp))
2044 ecsr |= ECSR_IOIS8; 2047 ecsr |= ECSR_IOIS8;
2045 writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT)); 2048 writeb(ecsr, addr + (ECSR << SMC_IO_SHIFT));
2046 local_irq_restore(flags); 2049 local_irq_restore(flags);
@@ -2125,10 +2128,11 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
2125 */ 2128 */
2126static int smc_drv_probe(struct platform_device *pdev) 2129static int smc_drv_probe(struct platform_device *pdev)
2127{ 2130{
2131 struct smc91x_platdata *pd = pdev->dev.platform_data;
2132 struct smc_local *lp;
2128 struct net_device *ndev; 2133 struct net_device *ndev;
2129 struct resource *res, *ires; 2134 struct resource *res, *ires;
2130 unsigned int __iomem *addr; 2135 unsigned int __iomem *addr;
2131 unsigned long irq_flags = SMC_IRQ_FLAGS;
2132 int ret; 2136 int ret;
2133 2137
2134 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs"); 2138 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-regs");
@@ -2153,6 +2157,27 @@ static int smc_drv_probe(struct platform_device *pdev)
2153 } 2157 }
2154 SET_NETDEV_DEV(ndev, &pdev->dev); 2158 SET_NETDEV_DEV(ndev, &pdev->dev);
2155 2159
2160 /* get configuration from platform data, only allow use of
2161 * bus width if both SMC_CAN_USE_xxx and SMC91X_USE_xxx are set.
2162 */
2163
2164 lp = netdev_priv(ndev);
2165 lp->cfg.irq_flags = SMC_IRQ_FLAGS;
2166
2167#ifdef SMC_DYNAMIC_BUS_CONFIG
2168 if (pd)
2169 memcpy(&lp->cfg, pd, sizeof(lp->cfg));
2170 else {
2171 lp->cfg.flags = SMC91X_USE_8BIT;
2172 lp->cfg.flags |= SMC91X_USE_16BIT;
2173 lp->cfg.flags |= SMC91X_USE_32BIT;
2174 }
2175
2176 lp->cfg.flags &= ~(SMC_CAN_USE_8BIT ? 0 : SMC91X_USE_8BIT);
2177 lp->cfg.flags &= ~(SMC_CAN_USE_16BIT ? 0 : SMC91X_USE_16BIT);
2178 lp->cfg.flags &= ~(SMC_CAN_USE_32BIT ? 0 : SMC91X_USE_32BIT);
2179#endif
2180
2156 ndev->dma = (unsigned char)-1; 2181 ndev->dma = (unsigned char)-1;
2157 2182
2158 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2183 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -2163,7 +2188,7 @@ static int smc_drv_probe(struct platform_device *pdev)
2163 2188
2164 ndev->irq = ires->start; 2189 ndev->irq = ires->start;
2165 if (SMC_IRQ_FLAGS == -1) 2190 if (SMC_IRQ_FLAGS == -1)
2166 irq_flags = ires->flags & IRQF_TRIGGER_MASK; 2191 lp->cfg.irq_flags = ires->flags & IRQF_TRIGGER_MASK;
2167 2192
2168 ret = smc_request_attrib(pdev); 2193 ret = smc_request_attrib(pdev);
2169 if (ret) 2194 if (ret)
@@ -2171,6 +2196,7 @@ static int smc_drv_probe(struct platform_device *pdev)
2171#if defined(CONFIG_SA1100_ASSABET) 2196#if defined(CONFIG_SA1100_ASSABET)
2172 NCR_0 |= NCR_ENET_OSC_EN; 2197 NCR_0 |= NCR_ENET_OSC_EN;
2173#endif 2198#endif
2199 platform_set_drvdata(pdev, ndev);
2174 ret = smc_enable_device(pdev); 2200 ret = smc_enable_device(pdev);
2175 if (ret) 2201 if (ret)
2176 goto out_release_attrib; 2202 goto out_release_attrib;
@@ -2189,8 +2215,7 @@ static int smc_drv_probe(struct platform_device *pdev)
2189 } 2215 }
2190#endif 2216#endif
2191 2217
2192 platform_set_drvdata(pdev, ndev); 2218 ret = smc_probe(ndev, addr, lp->cfg.irq_flags);
2193 ret = smc_probe(ndev, addr, irq_flags);
2194 if (ret != 0) 2219 if (ret != 0)
2195 goto out_iounmap; 2220 goto out_iounmap;
2196 2221
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 51d4134b37b1..69e97a1cb1c4 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -34,6 +34,7 @@
34#ifndef _SMC91X_H_ 34#ifndef _SMC91X_H_
35#define _SMC91X_H_ 35#define _SMC91X_H_
36 36
37#include <linux/smc91x.h>
37 38
38/* 39/*
39 * Define your architecture specific bus configuration parameters here. 40 * Define your architecture specific bus configuration parameters here.
@@ -291,36 +292,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
291#define SMC_insw(a, r, p, l) insw((a) + (r), p, l) 292#define SMC_insw(a, r, p, l) insw((a) + (r), p, l)
292#define SMC_outsw(a, r, p, l) outsw((a) + (r), p, l) 293#define SMC_outsw(a, r, p, l) outsw((a) + (r), p, l)
293 294
294#elif defined(CONFIG_SUPERH)
295
296#ifdef CONFIG_SOLUTION_ENGINE
297#define SMC_IRQ_FLAGS (0)
298#define SMC_CAN_USE_8BIT 0
299#define SMC_CAN_USE_16BIT 1
300#define SMC_CAN_USE_32BIT 0
301#define SMC_IO_SHIFT 0
302#define SMC_NOWAIT 1
303
304#define SMC_inw(a, r) inw((a) + (r))
305#define SMC_outw(v, a, r) outw(v, (a) + (r))
306#define SMC_insw(a, r, p, l) insw((a) + (r), p, l)
307#define SMC_outsw(a, r, p, l) outsw((a) + (r), p, l)
308
309#else /* BOARDS */
310
311#define SMC_CAN_USE_8BIT 1
312#define SMC_CAN_USE_16BIT 1
313#define SMC_CAN_USE_32BIT 0
314
315#define SMC_inb(a, r) inb((a) + (r))
316#define SMC_inw(a, r) inw((a) + (r))
317#define SMC_outb(v, a, r) outb(v, (a) + (r))
318#define SMC_outw(v, a, r) outw(v, (a) + (r))
319#define SMC_insw(a, r, p, l) insw((a) + (r), p, l)
320#define SMC_outsw(a, r, p, l) outsw((a) + (r), p, l)
321
322#endif /* BOARDS */
323
324#elif defined(CONFIG_M32R) 295#elif defined(CONFIG_M32R)
325 296
326#define SMC_CAN_USE_8BIT 0 297#define SMC_CAN_USE_8BIT 0
@@ -475,12 +446,15 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
475#define SMC_outb(v, a, r) writeb(v, (a) + (r)) 446#define SMC_outb(v, a, r) writeb(v, (a) + (r))
476#define SMC_outw(v, a, r) writew(v, (a) + (r)) 447#define SMC_outw(v, a, r) writew(v, (a) + (r))
477#define SMC_outl(v, a, r) writel(v, (a) + (r)) 448#define SMC_outl(v, a, r) writel(v, (a) + (r))
449#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
450#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
478#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) 451#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
479#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) 452#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
480 453
481#define RPC_LSA_DEFAULT RPC_LED_100_10 454#define RPC_LSA_DEFAULT RPC_LED_100_10
482#define RPC_LSB_DEFAULT RPC_LED_TX_RX 455#define RPC_LSB_DEFAULT RPC_LED_TX_RX
483 456
457#define SMC_DYNAMIC_BUS_CONFIG
484#endif 458#endif
485 459
486 460
@@ -526,8 +500,19 @@ struct smc_local {
526#endif 500#endif
527 void __iomem *base; 501 void __iomem *base;
528 void __iomem *datacs; 502 void __iomem *datacs;
503
504 struct smc91x_platdata cfg;
529}; 505};
530 506
507#ifdef SMC_DYNAMIC_BUS_CONFIG
508#define SMC_8BIT(p) (((p)->cfg.flags & SMC91X_USE_8BIT) && SMC_CAN_USE_8BIT)
509#define SMC_16BIT(p) (((p)->cfg.flags & SMC91X_USE_16BIT) && SMC_CAN_USE_16BIT)
510#define SMC_32BIT(p) (((p)->cfg.flags & SMC91X_USE_32BIT) && SMC_CAN_USE_32BIT)
511#else
512#define SMC_8BIT(p) SMC_CAN_USE_8BIT
513#define SMC_16BIT(p) SMC_CAN_USE_16BIT
514#define SMC_32BIT(p) SMC_CAN_USE_32BIT
515#endif
531 516
532#ifdef SMC_USE_PXA_DMA 517#ifdef SMC_USE_PXA_DMA
533/* 518/*
@@ -720,7 +705,7 @@ smc_pxa_dma_irq(int dma, void *dummy)
720 705
721// Transmit Control Register 706// Transmit Control Register
722/* BANK 0 */ 707/* BANK 0 */
723#define TCR_REG SMC_REG(0x0000, 0) 708#define TCR_REG(lp) SMC_REG(lp, 0x0000, 0)
724#define TCR_ENABLE 0x0001 // When 1 we can transmit 709#define TCR_ENABLE 0x0001 // When 1 we can transmit
725#define TCR_LOOP 0x0002 // Controls output pin LBK 710#define TCR_LOOP 0x0002 // Controls output pin LBK
726#define TCR_FORCOL 0x0004 // When 1 will force a collision 711#define TCR_FORCOL 0x0004 // When 1 will force a collision
@@ -739,7 +724,7 @@ smc_pxa_dma_irq(int dma, void *dummy)
739 724
740// EPH Status Register 725// EPH Status Register
741/* BANK 0 */ 726/* BANK 0 */
742#define EPH_STATUS_REG SMC_REG(0x0002, 0) 727#define EPH_STATUS_REG(lp) SMC_REG(lp, 0x0002, 0)
743#define ES_TX_SUC 0x0001 // Last TX was successful 728#define ES_TX_SUC 0x0001 // Last TX was successful
744#define ES_SNGL_COL 0x0002 // Single collision detected for last tx 729#define ES_SNGL_COL 0x0002 // Single collision detected for last tx
745#define ES_MUL_COL 0x0004 // Multiple collisions detected for last tx 730#define ES_MUL_COL 0x0004 // Multiple collisions detected for last tx
@@ -758,7 +743,7 @@ smc_pxa_dma_irq(int dma, void *dummy)
758 743
759// Receive Control Register 744// Receive Control Register
760/* BANK 0 */ 745/* BANK 0 */
761#define RCR_REG SMC_REG(0x0004, 0) 746#define RCR_REG(lp) SMC_REG(lp, 0x0004, 0)
762#define RCR_RX_ABORT 0x0001 // Set if a rx frame was aborted 747#define RCR_RX_ABORT 0x0001 // Set if a rx frame was aborted
763#define RCR_PRMS 0x0002 // Enable promiscuous mode 748#define RCR_PRMS 0x0002 // Enable promiscuous mode
764#define RCR_ALMUL 0x0004 // When set accepts all multicast frames 749#define RCR_ALMUL 0x0004 // When set accepts all multicast frames
@@ -775,17 +760,17 @@ smc_pxa_dma_irq(int dma, void *dummy)
775 760
776// Counter Register 761// Counter Register
777/* BANK 0 */ 762/* BANK 0 */
778#define COUNTER_REG SMC_REG(0x0006, 0) 763#define COUNTER_REG(lp) SMC_REG(lp, 0x0006, 0)
779 764
780 765
781// Memory Information Register 766// Memory Information Register
782/* BANK 0 */ 767/* BANK 0 */
783#define MIR_REG SMC_REG(0x0008, 0) 768#define MIR_REG(lp) SMC_REG(lp, 0x0008, 0)
784 769
785 770
786// Receive/Phy Control Register 771// Receive/Phy Control Register
787/* BANK 0 */ 772/* BANK 0 */
788#define RPC_REG SMC_REG(0x000A, 0) 773#define RPC_REG(lp) SMC_REG(lp, 0x000A, 0)
789#define RPC_SPEED 0x2000 // When 1 PHY is in 100Mbps mode. 774#define RPC_SPEED 0x2000 // When 1 PHY is in 100Mbps mode.
790#define RPC_DPLX 0x1000 // When 1 PHY is in Full-Duplex Mode 775#define RPC_DPLX 0x1000 // When 1 PHY is in Full-Duplex Mode
791#define RPC_ANEG 0x0800 // When 1 PHY is in Auto-Negotiate Mode 776#define RPC_ANEG 0x0800 // When 1 PHY is in Auto-Negotiate Mode
@@ -819,7 +804,7 @@ smc_pxa_dma_irq(int dma, void *dummy)
819 804
820// Configuration Reg 805// Configuration Reg
821/* BANK 1 */ 806/* BANK 1 */
822#define CONFIG_REG SMC_REG(0x0000, 1) 807#define CONFIG_REG(lp) SMC_REG(lp, 0x0000, 1)
823#define CONFIG_EXT_PHY 0x0200 // 1=external MII, 0=internal Phy 808#define CONFIG_EXT_PHY 0x0200 // 1=external MII, 0=internal Phy
824#define CONFIG_GPCNTRL 0x0400 // Inverse value drives pin nCNTRL 809#define CONFIG_GPCNTRL 0x0400 // Inverse value drives pin nCNTRL
825#define CONFIG_NO_WAIT 0x1000 // When 1 no extra wait states on ISA bus 810#define CONFIG_NO_WAIT 0x1000 // When 1 no extra wait states on ISA bus
@@ -831,24 +816,24 @@ smc_pxa_dma_irq(int dma, void *dummy)
831 816
832// Base Address Register 817// Base Address Register
833/* BANK 1 */ 818/* BANK 1 */
834#define BASE_REG SMC_REG(0x0002, 1) 819#define BASE_REG(lp) SMC_REG(lp, 0x0002, 1)
835 820
836 821
837// Individual Address Registers 822// Individual Address Registers
838/* BANK 1 */ 823/* BANK 1 */
839#define ADDR0_REG SMC_REG(0x0004, 1) 824#define ADDR0_REG(lp) SMC_REG(lp, 0x0004, 1)
840#define ADDR1_REG SMC_REG(0x0006, 1) 825#define ADDR1_REG(lp) SMC_REG(lp, 0x0006, 1)
841#define ADDR2_REG SMC_REG(0x0008, 1) 826#define ADDR2_REG(lp) SMC_REG(lp, 0x0008, 1)
842 827
843 828
844// General Purpose Register 829// General Purpose Register
845/* BANK 1 */ 830/* BANK 1 */
846#define GP_REG SMC_REG(0x000A, 1) 831#define GP_REG(lp) SMC_REG(lp, 0x000A, 1)
847 832
848 833
849// Control Register 834// Control Register
850/* BANK 1 */ 835/* BANK 1 */
851#define CTL_REG SMC_REG(0x000C, 1) 836#define CTL_REG(lp) SMC_REG(lp, 0x000C, 1)
852#define CTL_RCV_BAD 0x4000 // When 1 bad CRC packets are received 837#define CTL_RCV_BAD 0x4000 // When 1 bad CRC packets are received
853#define CTL_AUTO_RELEASE 0x0800 // When 1 tx pages are released automatically 838#define CTL_AUTO_RELEASE 0x0800 // When 1 tx pages are released automatically
854#define CTL_LE_ENABLE 0x0080 // When 1 enables Link Error interrupt 839#define CTL_LE_ENABLE 0x0080 // When 1 enables Link Error interrupt
@@ -861,7 +846,7 @@ smc_pxa_dma_irq(int dma, void *dummy)
861 846
862// MMU Command Register 847// MMU Command Register
863/* BANK 2 */ 848/* BANK 2 */
864#define MMU_CMD_REG SMC_REG(0x0000, 2) 849#define MMU_CMD_REG(lp) SMC_REG(lp, 0x0000, 2)
865#define MC_BUSY 1 // When 1 the last release has not completed 850#define MC_BUSY 1 // When 1 the last release has not completed
866#define MC_NOP (0<<5) // No Op 851#define MC_NOP (0<<5) // No Op
867#define MC_ALLOC (1<<5) // OR with number of 256 byte packets 852#define MC_ALLOC (1<<5) // OR with number of 256 byte packets
@@ -875,30 +860,30 @@ smc_pxa_dma_irq(int dma, void *dummy)
875 860
876// Packet Number Register 861// Packet Number Register
877/* BANK 2 */ 862/* BANK 2 */
878#define PN_REG SMC_REG(0x0002, 2) 863#define PN_REG(lp) SMC_REG(lp, 0x0002, 2)
879 864
880 865
881// Allocation Result Register 866// Allocation Result Register
882/* BANK 2 */ 867/* BANK 2 */
883#define AR_REG SMC_REG(0x0003, 2) 868#define AR_REG(lp) SMC_REG(lp, 0x0003, 2)
884#define AR_FAILED 0x80 // Alocation Failed 869#define AR_FAILED 0x80 // Alocation Failed
885 870
886 871
887// TX FIFO Ports Register 872// TX FIFO Ports Register
888/* BANK 2 */ 873/* BANK 2 */
889#define TXFIFO_REG SMC_REG(0x0004, 2) 874#define TXFIFO_REG(lp) SMC_REG(lp, 0x0004, 2)
890#define TXFIFO_TEMPTY 0x80 // TX FIFO Empty 875#define TXFIFO_TEMPTY 0x80 // TX FIFO Empty
891 876
892// RX FIFO Ports Register 877// RX FIFO Ports Register
893/* BANK 2 */ 878/* BANK 2 */
894#define RXFIFO_REG SMC_REG(0x0005, 2) 879#define RXFIFO_REG(lp) SMC_REG(lp, 0x0005, 2)
895#define RXFIFO_REMPTY 0x80 // RX FIFO Empty 880#define RXFIFO_REMPTY 0x80 // RX FIFO Empty
896 881
897#define FIFO_REG SMC_REG(0x0004, 2) 882#define FIFO_REG(lp) SMC_REG(lp, 0x0004, 2)
898 883
899// Pointer Register 884// Pointer Register
900/* BANK 2 */ 885/* BANK 2 */
901#define PTR_REG SMC_REG(0x0006, 2) 886#define PTR_REG(lp) SMC_REG(lp, 0x0006, 2)
902#define PTR_RCV 0x8000 // 1=Receive area, 0=Transmit area 887#define PTR_RCV 0x8000 // 1=Receive area, 0=Transmit area
903#define PTR_AUTOINC 0x4000 // Auto increment the pointer on each access 888#define PTR_AUTOINC 0x4000 // Auto increment the pointer on each access
904#define PTR_READ 0x2000 // When 1 the operation is a read 889#define PTR_READ 0x2000 // When 1 the operation is a read
@@ -906,17 +891,17 @@ smc_pxa_dma_irq(int dma, void *dummy)
906 891
907// Data Register 892// Data Register
908/* BANK 2 */ 893/* BANK 2 */
909#define DATA_REG SMC_REG(0x0008, 2) 894#define DATA_REG(lp) SMC_REG(lp, 0x0008, 2)
910 895
911 896
912// Interrupt Status/Acknowledge Register 897// Interrupt Status/Acknowledge Register
913/* BANK 2 */ 898/* BANK 2 */
914#define INT_REG SMC_REG(0x000C, 2) 899#define INT_REG(lp) SMC_REG(lp, 0x000C, 2)
915 900
916 901
917// Interrupt Mask Register 902// Interrupt Mask Register
918/* BANK 2 */ 903/* BANK 2 */
919#define IM_REG SMC_REG(0x000D, 2) 904#define IM_REG(lp) SMC_REG(lp, 0x000D, 2)
920#define IM_MDINT 0x80 // PHY MI Register 18 Interrupt 905#define IM_MDINT 0x80 // PHY MI Register 18 Interrupt
921#define IM_ERCV_INT 0x40 // Early Receive Interrupt 906#define IM_ERCV_INT 0x40 // Early Receive Interrupt
922#define IM_EPH_INT 0x20 // Set by Ethernet Protocol Handler section 907#define IM_EPH_INT 0x20 // Set by Ethernet Protocol Handler section
@@ -929,15 +914,15 @@ smc_pxa_dma_irq(int dma, void *dummy)
929 914
930// Multicast Table Registers 915// Multicast Table Registers
931/* BANK 3 */ 916/* BANK 3 */
932#define MCAST_REG1 SMC_REG(0x0000, 3) 917#define MCAST_REG1(lp) SMC_REG(lp, 0x0000, 3)
933#define MCAST_REG2 SMC_REG(0x0002, 3) 918#define MCAST_REG2(lp) SMC_REG(lp, 0x0002, 3)
934#define MCAST_REG3 SMC_REG(0x0004, 3) 919#define MCAST_REG3(lp) SMC_REG(lp, 0x0004, 3)
935#define MCAST_REG4 SMC_REG(0x0006, 3) 920#define MCAST_REG4(lp) SMC_REG(lp, 0x0006, 3)
936 921
937 922
938// Management Interface Register (MII) 923// Management Interface Register (MII)
939/* BANK 3 */ 924/* BANK 3 */
940#define MII_REG SMC_REG(0x0008, 3) 925#define MII_REG(lp) SMC_REG(lp, 0x0008, 3)
941#define MII_MSK_CRS100 0x4000 // Disables CRS100 detection during tx half dup 926#define MII_MSK_CRS100 0x4000 // Disables CRS100 detection during tx half dup
942#define MII_MDOE 0x0008 // MII Output Enable 927#define MII_MDOE 0x0008 // MII Output Enable
943#define MII_MCLK 0x0004 // MII Clock, pin MDCLK 928#define MII_MCLK 0x0004 // MII Clock, pin MDCLK
@@ -948,20 +933,20 @@ smc_pxa_dma_irq(int dma, void *dummy)
948// Revision Register 933// Revision Register
949/* BANK 3 */ 934/* BANK 3 */
950/* ( hi: chip id low: rev # ) */ 935/* ( hi: chip id low: rev # ) */
951#define REV_REG SMC_REG(0x000A, 3) 936#define REV_REG(lp) SMC_REG(lp, 0x000A, 3)
952 937
953 938
954// Early RCV Register 939// Early RCV Register
955/* BANK 3 */ 940/* BANK 3 */
956/* this is NOT on SMC9192 */ 941/* this is NOT on SMC9192 */
957#define ERCV_REG SMC_REG(0x000C, 3) 942#define ERCV_REG(lp) SMC_REG(lp, 0x000C, 3)
958#define ERCV_RCV_DISCRD 0x0080 // When 1 discards a packet being received 943#define ERCV_RCV_DISCRD 0x0080 // When 1 discards a packet being received
959#define ERCV_THRESHOLD 0x001F // ERCV Threshold Mask 944#define ERCV_THRESHOLD 0x001F // ERCV Threshold Mask
960 945
961 946
962// External Register 947// External Register
963/* BANK 7 */ 948/* BANK 7 */
964#define EXT_REG SMC_REG(0x0000, 7) 949#define EXT_REG(lp) SMC_REG(lp, 0x0000, 7)
965 950
966 951
967#define CHIP_9192 3 952#define CHIP_9192 3
@@ -1085,9 +1070,9 @@ static const char * chip_ids[ 16 ] = {
1085 */ 1070 */
1086 1071
1087#if SMC_DEBUG > 0 1072#if SMC_DEBUG > 0
1088#define SMC_REG(reg, bank) \ 1073#define SMC_REG(lp, reg, bank) \
1089 ({ \ 1074 ({ \
1090 int __b = SMC_CURRENT_BANK(); \ 1075 int __b = SMC_CURRENT_BANK(lp); \
1091 if (unlikely((__b & ~0xf0) != (0x3300 | bank))) { \ 1076 if (unlikely((__b & ~0xf0) != (0x3300 | bank))) { \
1092 printk( "%s: bank reg screwed (0x%04x)\n", \ 1077 printk( "%s: bank reg screwed (0x%04x)\n", \
1093 CARDNAME, __b ); \ 1078 CARDNAME, __b ); \
@@ -1096,7 +1081,7 @@ static const char * chip_ids[ 16 ] = {
1096 reg<<SMC_IO_SHIFT; \ 1081 reg<<SMC_IO_SHIFT; \
1097 }) 1082 })
1098#else 1083#else
1099#define SMC_REG(reg, bank) (reg<<SMC_IO_SHIFT) 1084#define SMC_REG(lp, reg, bank) (reg<<SMC_IO_SHIFT)
1100#endif 1085#endif
1101 1086
1102/* 1087/*
@@ -1108,212 +1093,215 @@ static const char * chip_ids[ 16 ] = {
1108 * 1093 *
1109 * Enforce it on any 32-bit capable setup for now. 1094 * Enforce it on any 32-bit capable setup for now.
1110 */ 1095 */
1111#define SMC_MUST_ALIGN_WRITE SMC_CAN_USE_32BIT 1096#define SMC_MUST_ALIGN_WRITE(lp) SMC_32BIT(lp)
1112 1097
1113#define SMC_GET_PN() \ 1098#define SMC_GET_PN(lp) \
1114 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, PN_REG)) \ 1099 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, PN_REG(lp))) \
1115 : (SMC_inw(ioaddr, PN_REG) & 0xFF) ) 1100 : (SMC_inw(ioaddr, PN_REG(lp)) & 0xFF))
1116 1101
1117#define SMC_SET_PN(x) \ 1102#define SMC_SET_PN(lp, x) \
1118 do { \ 1103 do { \
1119 if (SMC_MUST_ALIGN_WRITE) \ 1104 if (SMC_MUST_ALIGN_WRITE(lp)) \
1120 SMC_outl((x)<<16, ioaddr, SMC_REG(0, 2)); \ 1105 SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 0, 2)); \
1121 else if (SMC_CAN_USE_8BIT) \ 1106 else if (SMC_8BIT(lp)) \
1122 SMC_outb(x, ioaddr, PN_REG); \ 1107 SMC_outb(x, ioaddr, PN_REG(lp)); \
1123 else \ 1108 else \
1124 SMC_outw(x, ioaddr, PN_REG); \ 1109 SMC_outw(x, ioaddr, PN_REG(lp)); \
1125 } while (0) 1110 } while (0)
1126 1111
1127#define SMC_GET_AR() \ 1112#define SMC_GET_AR(lp) \
1128 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, AR_REG)) \ 1113 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, AR_REG(lp))) \
1129 : (SMC_inw(ioaddr, PN_REG) >> 8) ) 1114 : (SMC_inw(ioaddr, PN_REG(lp)) >> 8))
1130 1115
1131#define SMC_GET_TXFIFO() \ 1116#define SMC_GET_TXFIFO(lp) \
1132 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, TXFIFO_REG)) \ 1117 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, TXFIFO_REG(lp))) \
1133 : (SMC_inw(ioaddr, TXFIFO_REG) & 0xFF) ) 1118 : (SMC_inw(ioaddr, TXFIFO_REG(lp)) & 0xFF))
1134 1119
1135#define SMC_GET_RXFIFO() \ 1120#define SMC_GET_RXFIFO(lp) \
1136 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, RXFIFO_REG)) \ 1121 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, RXFIFO_REG(lp))) \
1137 : (SMC_inw(ioaddr, TXFIFO_REG) >> 8) ) 1122 : (SMC_inw(ioaddr, TXFIFO_REG(lp)) >> 8))
1138 1123
1139#define SMC_GET_INT() \ 1124#define SMC_GET_INT(lp) \
1140 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, INT_REG)) \ 1125 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, INT_REG(lp))) \
1141 : (SMC_inw(ioaddr, INT_REG) & 0xFF) ) 1126 : (SMC_inw(ioaddr, INT_REG(lp)) & 0xFF))
1142 1127
1143#define SMC_ACK_INT(x) \ 1128#define SMC_ACK_INT(lp, x) \
1144 do { \ 1129 do { \
1145 if (SMC_CAN_USE_8BIT) \ 1130 if (SMC_8BIT(lp)) \
1146 SMC_outb(x, ioaddr, INT_REG); \ 1131 SMC_outb(x, ioaddr, INT_REG(lp)); \
1147 else { \ 1132 else { \
1148 unsigned long __flags; \ 1133 unsigned long __flags; \
1149 int __mask; \ 1134 int __mask; \
1150 local_irq_save(__flags); \ 1135 local_irq_save(__flags); \
1151 __mask = SMC_inw( ioaddr, INT_REG ) & ~0xff; \ 1136 __mask = SMC_inw(ioaddr, INT_REG(lp)) & ~0xff; \
1152 SMC_outw( __mask | (x), ioaddr, INT_REG ); \ 1137 SMC_outw(__mask | (x), ioaddr, INT_REG(lp)); \
1153 local_irq_restore(__flags); \ 1138 local_irq_restore(__flags); \
1154 } \ 1139 } \
1155 } while (0) 1140 } while (0)
1156 1141
1157#define SMC_GET_INT_MASK() \ 1142#define SMC_GET_INT_MASK(lp) \
1158 ( SMC_CAN_USE_8BIT ? (SMC_inb(ioaddr, IM_REG)) \ 1143 (SMC_8BIT(lp) ? (SMC_inb(ioaddr, IM_REG(lp))) \
1159 : (SMC_inw( ioaddr, INT_REG ) >> 8) ) 1144 : (SMC_inw(ioaddr, INT_REG(lp)) >> 8))
1160 1145
1161#define SMC_SET_INT_MASK(x) \ 1146#define SMC_SET_INT_MASK(lp, x) \
1162 do { \ 1147 do { \
1163 if (SMC_CAN_USE_8BIT) \ 1148 if (SMC_8BIT(lp)) \
1164 SMC_outb(x, ioaddr, IM_REG); \ 1149 SMC_outb(x, ioaddr, IM_REG(lp)); \
1165 else \ 1150 else \
1166 SMC_outw((x) << 8, ioaddr, INT_REG); \ 1151 SMC_outw((x) << 8, ioaddr, INT_REG(lp)); \
1167 } while (0) 1152 } while (0)
1168 1153
1169#define SMC_CURRENT_BANK() SMC_inw(ioaddr, BANK_SELECT) 1154#define SMC_CURRENT_BANK(lp) SMC_inw(ioaddr, BANK_SELECT)
1170 1155
1171#define SMC_SELECT_BANK(x) \ 1156#define SMC_SELECT_BANK(lp, x) \
1172 do { \ 1157 do { \
1173 if (SMC_MUST_ALIGN_WRITE) \ 1158 if (SMC_MUST_ALIGN_WRITE(lp)) \
1174 SMC_outl((x)<<16, ioaddr, 12<<SMC_IO_SHIFT); \ 1159 SMC_outl((x)<<16, ioaddr, 12<<SMC_IO_SHIFT); \
1175 else \ 1160 else \
1176 SMC_outw(x, ioaddr, BANK_SELECT); \ 1161 SMC_outw(x, ioaddr, BANK_SELECT); \
1177 } while (0) 1162 } while (0)
1178 1163
1179#define SMC_GET_BASE() SMC_inw(ioaddr, BASE_REG) 1164#define SMC_GET_BASE(lp) SMC_inw(ioaddr, BASE_REG(lp))
1180 1165
1181#define SMC_SET_BASE(x) SMC_outw(x, ioaddr, BASE_REG) 1166#define SMC_SET_BASE(lp, x) SMC_outw(x, ioaddr, BASE_REG(lp))
1182 1167
1183#define SMC_GET_CONFIG() SMC_inw(ioaddr, CONFIG_REG) 1168#define SMC_GET_CONFIG(lp) SMC_inw(ioaddr, CONFIG_REG(lp))
1184 1169
1185#define SMC_SET_CONFIG(x) SMC_outw(x, ioaddr, CONFIG_REG) 1170#define SMC_SET_CONFIG(lp, x) SMC_outw(x, ioaddr, CONFIG_REG(lp))
1186 1171
1187#define SMC_GET_COUNTER() SMC_inw(ioaddr, COUNTER_REG) 1172#define SMC_GET_COUNTER(lp) SMC_inw(ioaddr, COUNTER_REG(lp))
1188 1173
1189#define SMC_GET_CTL() SMC_inw(ioaddr, CTL_REG) 1174#define SMC_GET_CTL(lp) SMC_inw(ioaddr, CTL_REG(lp))
1190 1175
1191#define SMC_SET_CTL(x) SMC_outw(x, ioaddr, CTL_REG) 1176#define SMC_SET_CTL(lp, x) SMC_outw(x, ioaddr, CTL_REG(lp))
1192 1177
1193#define SMC_GET_MII() SMC_inw(ioaddr, MII_REG) 1178#define SMC_GET_MII(lp) SMC_inw(ioaddr, MII_REG(lp))
1194 1179
1195#define SMC_SET_MII(x) SMC_outw(x, ioaddr, MII_REG) 1180#define SMC_SET_MII(lp, x) SMC_outw(x, ioaddr, MII_REG(lp))
1196 1181
1197#define SMC_GET_MIR() SMC_inw(ioaddr, MIR_REG) 1182#define SMC_GET_MIR(lp) SMC_inw(ioaddr, MIR_REG(lp))
1198 1183
1199#define SMC_SET_MIR(x) SMC_outw(x, ioaddr, MIR_REG) 1184#define SMC_SET_MIR(lp, x) SMC_outw(x, ioaddr, MIR_REG(lp))
1200 1185
1201#define SMC_GET_MMU_CMD() SMC_inw(ioaddr, MMU_CMD_REG) 1186#define SMC_GET_MMU_CMD(lp) SMC_inw(ioaddr, MMU_CMD_REG(lp))
1202 1187
1203#define SMC_SET_MMU_CMD(x) SMC_outw(x, ioaddr, MMU_CMD_REG) 1188#define SMC_SET_MMU_CMD(lp, x) SMC_outw(x, ioaddr, MMU_CMD_REG(lp))
1204 1189
1205#define SMC_GET_FIFO() SMC_inw(ioaddr, FIFO_REG) 1190#define SMC_GET_FIFO(lp) SMC_inw(ioaddr, FIFO_REG(lp))
1206 1191
1207#define SMC_GET_PTR() SMC_inw(ioaddr, PTR_REG) 1192#define SMC_GET_PTR(lp) SMC_inw(ioaddr, PTR_REG(lp))
1208 1193
1209#define SMC_SET_PTR(x) \ 1194#define SMC_SET_PTR(lp, x) \
1210 do { \ 1195 do { \
1211 if (SMC_MUST_ALIGN_WRITE) \ 1196 if (SMC_MUST_ALIGN_WRITE(lp)) \
1212 SMC_outl((x)<<16, ioaddr, SMC_REG(4, 2)); \ 1197 SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 4, 2)); \
1213 else \ 1198 else \
1214 SMC_outw(x, ioaddr, PTR_REG); \ 1199 SMC_outw(x, ioaddr, PTR_REG(lp)); \
1215 } while (0) 1200 } while (0)
1216 1201
1217#define SMC_GET_EPH_STATUS() SMC_inw(ioaddr, EPH_STATUS_REG) 1202#define SMC_GET_EPH_STATUS(lp) SMC_inw(ioaddr, EPH_STATUS_REG(lp))
1218 1203
1219#define SMC_GET_RCR() SMC_inw(ioaddr, RCR_REG) 1204#define SMC_GET_RCR(lp) SMC_inw(ioaddr, RCR_REG(lp))
1220 1205
1221#define SMC_SET_RCR(x) SMC_outw(x, ioaddr, RCR_REG) 1206#define SMC_SET_RCR(lp, x) SMC_outw(x, ioaddr, RCR_REG(lp))
1222 1207
1223#define SMC_GET_REV() SMC_inw(ioaddr, REV_REG) 1208#define SMC_GET_REV(lp) SMC_inw(ioaddr, REV_REG(lp))
1224 1209
1225#define SMC_GET_RPC() SMC_inw(ioaddr, RPC_REG) 1210#define SMC_GET_RPC(lp) SMC_inw(ioaddr, RPC_REG(lp))
1226 1211
1227#define SMC_SET_RPC(x) \ 1212#define SMC_SET_RPC(lp, x) \
1228 do { \ 1213 do { \
1229 if (SMC_MUST_ALIGN_WRITE) \ 1214 if (SMC_MUST_ALIGN_WRITE(lp)) \
1230 SMC_outl((x)<<16, ioaddr, SMC_REG(8, 0)); \ 1215 SMC_outl((x)<<16, ioaddr, SMC_REG(lp, 8, 0)); \
1231 else \ 1216 else \
1232 SMC_outw(x, ioaddr, RPC_REG); \ 1217 SMC_outw(x, ioaddr, RPC_REG(lp)); \
1233 } while (0) 1218 } while (0)
1234 1219
1235#define SMC_GET_TCR() SMC_inw(ioaddr, TCR_REG) 1220#define SMC_GET_TCR(lp) SMC_inw(ioaddr, TCR_REG(lp))
1236 1221
1237#define SMC_SET_TCR(x) SMC_outw(x, ioaddr, TCR_REG) 1222#define SMC_SET_TCR(lp, x) SMC_outw(x, ioaddr, TCR_REG(lp))
1238 1223
1239#ifndef SMC_GET_MAC_ADDR 1224#ifndef SMC_GET_MAC_ADDR
1240#define SMC_GET_MAC_ADDR(addr) \ 1225#define SMC_GET_MAC_ADDR(lp, addr) \
1241 do { \ 1226 do { \
1242 unsigned int __v; \ 1227 unsigned int __v; \
1243 __v = SMC_inw( ioaddr, ADDR0_REG ); \ 1228 __v = SMC_inw(ioaddr, ADDR0_REG(lp)); \
1244 addr[0] = __v; addr[1] = __v >> 8; \ 1229 addr[0] = __v; addr[1] = __v >> 8; \
1245 __v = SMC_inw( ioaddr, ADDR1_REG ); \ 1230 __v = SMC_inw(ioaddr, ADDR1_REG(lp)); \
1246 addr[2] = __v; addr[3] = __v >> 8; \ 1231 addr[2] = __v; addr[3] = __v >> 8; \
1247 __v = SMC_inw( ioaddr, ADDR2_REG ); \ 1232 __v = SMC_inw(ioaddr, ADDR2_REG(lp)); \
1248 addr[4] = __v; addr[5] = __v >> 8; \ 1233 addr[4] = __v; addr[5] = __v >> 8; \
1249 } while (0) 1234 } while (0)
1250#endif 1235#endif
1251 1236
1252#define SMC_SET_MAC_ADDR(addr) \ 1237#define SMC_SET_MAC_ADDR(lp, addr) \
1253 do { \ 1238 do { \
1254 SMC_outw( addr[0]|(addr[1] << 8), ioaddr, ADDR0_REG ); \ 1239 SMC_outw(addr[0]|(addr[1] << 8), ioaddr, ADDR0_REG(lp)); \
1255 SMC_outw( addr[2]|(addr[3] << 8), ioaddr, ADDR1_REG ); \ 1240 SMC_outw(addr[2]|(addr[3] << 8), ioaddr, ADDR1_REG(lp)); \
1256 SMC_outw( addr[4]|(addr[5] << 8), ioaddr, ADDR2_REG ); \ 1241 SMC_outw(addr[4]|(addr[5] << 8), ioaddr, ADDR2_REG(lp)); \
1257 } while (0) 1242 } while (0)
1258 1243
1259#define SMC_SET_MCAST(x) \ 1244#define SMC_SET_MCAST(lp, x) \
1260 do { \ 1245 do { \
1261 const unsigned char *mt = (x); \ 1246 const unsigned char *mt = (x); \
1262 SMC_outw( mt[0] | (mt[1] << 8), ioaddr, MCAST_REG1 ); \ 1247 SMC_outw(mt[0] | (mt[1] << 8), ioaddr, MCAST_REG1(lp)); \
1263 SMC_outw( mt[2] | (mt[3] << 8), ioaddr, MCAST_REG2 ); \ 1248 SMC_outw(mt[2] | (mt[3] << 8), ioaddr, MCAST_REG2(lp)); \
1264 SMC_outw( mt[4] | (mt[5] << 8), ioaddr, MCAST_REG3 ); \ 1249 SMC_outw(mt[4] | (mt[5] << 8), ioaddr, MCAST_REG3(lp)); \
1265 SMC_outw( mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4 ); \ 1250 SMC_outw(mt[6] | (mt[7] << 8), ioaddr, MCAST_REG4(lp)); \
1266 } while (0) 1251 } while (0)
1267 1252
1268#define SMC_PUT_PKT_HDR(status, length) \ 1253#define SMC_PUT_PKT_HDR(lp, status, length) \
1269 do { \ 1254 do { \
1270 if (SMC_CAN_USE_32BIT) \ 1255 if (SMC_32BIT(lp)) \
1271 SMC_outl((status) | (length)<<16, ioaddr, DATA_REG); \ 1256 SMC_outl((status) | (length)<<16, ioaddr, \
1257 DATA_REG(lp)); \
1272 else { \ 1258 else { \
1273 SMC_outw(status, ioaddr, DATA_REG); \ 1259 SMC_outw(status, ioaddr, DATA_REG(lp)); \
1274 SMC_outw(length, ioaddr, DATA_REG); \ 1260 SMC_outw(length, ioaddr, DATA_REG(lp)); \
1275 } \ 1261 } \
1276 } while (0) 1262 } while (0)
1277 1263
1278#define SMC_GET_PKT_HDR(status, length) \ 1264#define SMC_GET_PKT_HDR(lp, status, length) \
1279 do { \ 1265 do { \
1280 if (SMC_CAN_USE_32BIT) { \ 1266 if (SMC_32BIT(lp)) { \
1281 unsigned int __val = SMC_inl(ioaddr, DATA_REG); \ 1267 unsigned int __val = SMC_inl(ioaddr, DATA_REG(lp)); \
1282 (status) = __val & 0xffff; \ 1268 (status) = __val & 0xffff; \
1283 (length) = __val >> 16; \ 1269 (length) = __val >> 16; \
1284 } else { \ 1270 } else { \
1285 (status) = SMC_inw(ioaddr, DATA_REG); \ 1271 (status) = SMC_inw(ioaddr, DATA_REG(lp)); \
1286 (length) = SMC_inw(ioaddr, DATA_REG); \ 1272 (length) = SMC_inw(ioaddr, DATA_REG(lp)); \
1287 } \ 1273 } \
1288 } while (0) 1274 } while (0)
1289 1275
1290#define SMC_PUSH_DATA(p, l) \ 1276#define SMC_PUSH_DATA(lp, p, l) \
1291 do { \ 1277 do { \
1292 if (SMC_CAN_USE_32BIT) { \ 1278 if (SMC_32BIT(lp)) { \
1293 void *__ptr = (p); \ 1279 void *__ptr = (p); \
1294 int __len = (l); \ 1280 int __len = (l); \
1295 void __iomem *__ioaddr = ioaddr; \ 1281 void __iomem *__ioaddr = ioaddr; \
1296 if (__len >= 2 && (unsigned long)__ptr & 2) { \ 1282 if (__len >= 2 && (unsigned long)__ptr & 2) { \
1297 __len -= 2; \ 1283 __len -= 2; \
1298 SMC_outw(*(u16 *)__ptr, ioaddr, DATA_REG); \ 1284 SMC_outw(*(u16 *)__ptr, ioaddr, \
1285 DATA_REG(lp)); \
1299 __ptr += 2; \ 1286 __ptr += 2; \
1300 } \ 1287 } \
1301 if (SMC_CAN_USE_DATACS && lp->datacs) \ 1288 if (SMC_CAN_USE_DATACS && lp->datacs) \
1302 __ioaddr = lp->datacs; \ 1289 __ioaddr = lp->datacs; \
1303 SMC_outsl(__ioaddr, DATA_REG, __ptr, __len>>2); \ 1290 SMC_outsl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
1304 if (__len & 2) { \ 1291 if (__len & 2) { \
1305 __ptr += (__len & ~3); \ 1292 __ptr += (__len & ~3); \
1306 SMC_outw(*((u16 *)__ptr), ioaddr, DATA_REG); \ 1293 SMC_outw(*((u16 *)__ptr), ioaddr, \
1294 DATA_REG(lp)); \
1307 } \ 1295 } \
1308 } else if (SMC_CAN_USE_16BIT) \ 1296 } else if (SMC_16BIT(lp)) \
1309 SMC_outsw(ioaddr, DATA_REG, p, (l) >> 1); \ 1297 SMC_outsw(ioaddr, DATA_REG(lp), p, (l) >> 1); \
1310 else if (SMC_CAN_USE_8BIT) \ 1298 else if (SMC_8BIT(lp)) \
1311 SMC_outsb(ioaddr, DATA_REG, p, l); \ 1299 SMC_outsb(ioaddr, DATA_REG(lp), p, l); \
1312 } while (0) 1300 } while (0)
1313 1301
1314#define SMC_PULL_DATA(p, l) \ 1302#define SMC_PULL_DATA(lp, p, l) \
1315 do { \ 1303 do { \
1316 if (SMC_CAN_USE_32BIT) { \ 1304 if (SMC_32BIT(lp)) { \
1317 void *__ptr = (p); \ 1305 void *__ptr = (p); \
1318 int __len = (l); \ 1306 int __len = (l); \
1319 void __iomem *__ioaddr = ioaddr; \ 1307 void __iomem *__ioaddr = ioaddr; \
@@ -1333,16 +1321,17 @@ static const char * chip_ids[ 16 ] = {
1333 */ \ 1321 */ \
1334 __ptr -= 2; \ 1322 __ptr -= 2; \
1335 __len += 2; \ 1323 __len += 2; \
1336 SMC_SET_PTR(2|PTR_READ|PTR_RCV|PTR_AUTOINC); \ 1324 SMC_SET_PTR(lp, \
1325 2|PTR_READ|PTR_RCV|PTR_AUTOINC); \
1337 } \ 1326 } \
1338 if (SMC_CAN_USE_DATACS && lp->datacs) \ 1327 if (SMC_CAN_USE_DATACS && lp->datacs) \
1339 __ioaddr = lp->datacs; \ 1328 __ioaddr = lp->datacs; \
1340 __len += 2; \ 1329 __len += 2; \
1341 SMC_insl(__ioaddr, DATA_REG, __ptr, __len>>2); \ 1330 SMC_insl(__ioaddr, DATA_REG(lp), __ptr, __len>>2); \
1342 } else if (SMC_CAN_USE_16BIT) \ 1331 } else if (SMC_16BIT(lp)) \
1343 SMC_insw(ioaddr, DATA_REG, p, (l) >> 1); \ 1332 SMC_insw(ioaddr, DATA_REG(lp), p, (l) >> 1); \
1344 else if (SMC_CAN_USE_8BIT) \ 1333 else if (SMC_8BIT(lp)) \
1345 SMC_insb(ioaddr, DATA_REG, p, l); \ 1334 SMC_insb(ioaddr, DATA_REG(lp), p, l); \
1346 } while (0) 1335 } while (0)
1347 1336
1348#endif /* _SMC91X_H_ */ 1337#endif /* _SMC91X_H_ */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index bccae7e5c6ad..477671606273 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -1399,6 +1399,8 @@ spider_net_link_reset(struct net_device *netdev)
1399 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0); 1399 spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
1400 1400
1401 /* reset phy and setup aneg */ 1401 /* reset phy and setup aneg */
1402 card->aneg_count = 0;
1403 card->medium = BCM54XX_COPPER;
1402 spider_net_setup_aneg(card); 1404 spider_net_setup_aneg(card);
1403 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); 1405 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
1404 1406
@@ -1413,18 +1415,12 @@ spider_net_link_reset(struct net_device *netdev)
1413 * found when an interrupt is presented 1415 * found when an interrupt is presented
1414 */ 1416 */
1415static void 1417static void
1416spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) 1418spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
1419 u32 error_reg1, u32 error_reg2)
1417{ 1420{
1418 u32 error_reg1, error_reg2;
1419 u32 i; 1421 u32 i;
1420 int show_error = 1; 1422 int show_error = 1;
1421 1423
1422 error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
1423 error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
1424
1425 error_reg1 &= SPIDER_NET_INT1_MASK_VALUE;
1426 error_reg2 &= SPIDER_NET_INT2_MASK_VALUE;
1427
1428 /* check GHIINT0STS ************************************/ 1424 /* check GHIINT0STS ************************************/
1429 if (status_reg) 1425 if (status_reg)
1430 for (i = 0; i < 32; i++) 1426 for (i = 0; i < 32; i++)
@@ -1654,12 +1650,15 @@ spider_net_interrupt(int irq, void *ptr)
1654{ 1650{
1655 struct net_device *netdev = ptr; 1651 struct net_device *netdev = ptr;
1656 struct spider_net_card *card = netdev_priv(netdev); 1652 struct spider_net_card *card = netdev_priv(netdev);
1657 u32 status_reg; 1653 u32 status_reg, error_reg1, error_reg2;
1658 1654
1659 status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS); 1655 status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
1660 status_reg &= SPIDER_NET_INT0_MASK_VALUE; 1656 error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
1657 error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
1661 1658
1662 if (!status_reg) 1659 if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) &&
1660 !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) &&
1661 !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE))
1663 return IRQ_NONE; 1662 return IRQ_NONE;
1664 1663
1665 if (status_reg & SPIDER_NET_RXINT ) { 1664 if (status_reg & SPIDER_NET_RXINT ) {
@@ -1674,7 +1673,8 @@ spider_net_interrupt(int irq, void *ptr)
1674 spider_net_link_reset(netdev); 1673 spider_net_link_reset(netdev);
1675 1674
1676 if (status_reg & SPIDER_NET_ERRINT ) 1675 if (status_reg & SPIDER_NET_ERRINT )
1677 spider_net_handle_error_irq(card, status_reg); 1676 spider_net_handle_error_irq(card, status_reg,
1677 error_reg1, error_reg2);
1678 1678
1679 /* clear interrupt sources */ 1679 /* clear interrupt sources */
1680 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg); 1680 spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
@@ -1982,6 +1982,8 @@ spider_net_open(struct net_device *netdev)
1982 goto init_firmware_failed; 1982 goto init_firmware_failed;
1983 1983
1984 /* start probing with copper */ 1984 /* start probing with copper */
1985 card->aneg_count = 0;
1986 card->medium = BCM54XX_COPPER;
1985 spider_net_setup_aneg(card); 1987 spider_net_setup_aneg(card);
1986 if (card->phy.def->phy_id) 1988 if (card->phy.def->phy_id)
1987 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER); 1989 mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
@@ -2043,7 +2045,8 @@ static void spider_net_link_phy(unsigned long data)
2043 /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */ 2045 /* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
2044 if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) { 2046 if (card->aneg_count > SPIDER_NET_ANEG_TIMEOUT) {
2045 2047
2046 pr_info("%s: link is down trying to bring it up\n", card->netdev->name); 2048 pr_debug("%s: link is down trying to bring it up\n",
2049 card->netdev->name);
2047 2050
2048 switch (card->medium) { 2051 switch (card->medium) {
2049 case BCM54XX_COPPER: 2052 case BCM54XX_COPPER:
@@ -2094,9 +2097,10 @@ static void spider_net_link_phy(unsigned long data)
2094 2097
2095 card->aneg_count = 0; 2098 card->aneg_count = 0;
2096 2099
2097 pr_debug("Found %s with %i Mbps, %s-duplex %sautoneg.\n", 2100 pr_info("%s: link up, %i Mbps, %s-duplex %sautoneg.\n",
2098 phy->def->name, phy->speed, phy->duplex==1 ? "Full" : "Half", 2101 card->netdev->name, phy->speed,
2099 phy->autoneg==1 ? "" : "no "); 2102 phy->duplex == 1 ? "Full" : "Half",
2103 phy->autoneg == 1 ? "" : "no ");
2100 2104
2101 return; 2105 return;
2102} 2106}
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index e1d05c0f47eb..05f74cbdd617 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -52,7 +52,7 @@ extern char spider_net_driver_name[];
52 52
53#define SPIDER_NET_TX_TIMER (HZ/5) 53#define SPIDER_NET_TX_TIMER (HZ/5)
54#define SPIDER_NET_ANEG_TIMER (HZ) 54#define SPIDER_NET_ANEG_TIMER (HZ)
55#define SPIDER_NET_ANEG_TIMEOUT 2 55#define SPIDER_NET_ANEG_TIMEOUT 5
56 56
57#define SPIDER_NET_RX_CSUM_DEFAULT 1 57#define SPIDER_NET_RX_CSUM_DEFAULT 1
58 58
@@ -159,9 +159,8 @@ extern char spider_net_driver_name[];
159 159
160/** interrupt mask registers */ 160/** interrupt mask registers */
161#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7 161#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7
162#define SPIDER_NET_INT1_MASK_VALUE 0xffff7ff7 162#define SPIDER_NET_INT1_MASK_VALUE 0x0000fff2
163/* no MAC aborts -> auto retransmission */ 163#define SPIDER_NET_INT2_MASK_VALUE 0x000003f1
164#define SPIDER_NET_INT2_MASK_VALUE 0xffef7ff1
165 164
166/* we rely on flagged descriptor interrupts */ 165/* we rely on flagged descriptor interrupts */
167#define SPIDER_NET_FRAMENUM_VALUE 0x00000000 166#define SPIDER_NET_FRAMENUM_VALUE 0x00000000
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 370d329d15d9..10e4e85da3fc 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -23,9 +23,9 @@
23 */ 23 */
24 24
25#ifdef TC35815_NAPI 25#ifdef TC35815_NAPI
26#define DRV_VERSION "1.36-NAPI" 26#define DRV_VERSION "1.37-NAPI"
27#else 27#else
28#define DRV_VERSION "1.36" 28#define DRV_VERSION "1.37"
29#endif 29#endif
30static const char *version = "tc35815.c:v" DRV_VERSION "\n"; 30static const char *version = "tc35815.c:v" DRV_VERSION "\n";
31#define MODNAME "tc35815" 31#define MODNAME "tc35815"
@@ -47,8 +47,8 @@ static const char *version = "tc35815.c:v" DRV_VERSION "\n";
47#include <linux/skbuff.h> 47#include <linux/skbuff.h>
48#include <linux/delay.h> 48#include <linux/delay.h>
49#include <linux/pci.h> 49#include <linux/pci.h>
50#include <linux/mii.h> 50#include <linux/phy.h>
51#include <linux/ethtool.h> 51#include <linux/workqueue.h>
52#include <linux/platform_device.h> 52#include <linux/platform_device.h>
53#include <asm/io.h> 53#include <asm/io.h>
54#include <asm/byteorder.h> 54#include <asm/byteorder.h>
@@ -60,16 +60,16 @@ static const char *version = "tc35815.c:v" DRV_VERSION "\n";
60#define WORKAROUND_100HALF_PROMISC 60#define WORKAROUND_100HALF_PROMISC
61/* #define TC35815_USE_PACKEDBUFFER */ 61/* #define TC35815_USE_PACKEDBUFFER */
62 62
63typedef enum { 63enum tc35815_chiptype {
64 TC35815CF = 0, 64 TC35815CF = 0,
65 TC35815_NWU, 65 TC35815_NWU,
66 TC35815_TX4939, 66 TC35815_TX4939,
67} board_t; 67};
68 68
69/* indexed by board_t, above */ 69/* indexed by tc35815_chiptype, above */
70static const struct { 70static const struct {
71 const char *name; 71 const char *name;
72} board_info[] __devinitdata = { 72} chip_info[] __devinitdata = {
73 { "TOSHIBA TC35815CF 10/100BaseTX" }, 73 { "TOSHIBA TC35815CF 10/100BaseTX" },
74 { "TOSHIBA TC35815 with Wake on LAN" }, 74 { "TOSHIBA TC35815 with Wake on LAN" },
75 { "TOSHIBA TC35815/TX4939" }, 75 { "TOSHIBA TC35815/TX4939" },
@@ -81,209 +81,208 @@ static const struct pci_device_id tc35815_pci_tbl[] = {
81 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 }, 81 {PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
82 {0,} 82 {0,}
83}; 83};
84MODULE_DEVICE_TABLE (pci, tc35815_pci_tbl); 84MODULE_DEVICE_TABLE(pci, tc35815_pci_tbl);
85 85
86/* see MODULE_PARM_DESC */ 86/* see MODULE_PARM_DESC */
87static struct tc35815_options { 87static struct tc35815_options {
88 int speed; 88 int speed;
89 int duplex; 89 int duplex;
90 int doforce;
91} options; 90} options;
92 91
93/* 92/*
94 * Registers 93 * Registers
95 */ 94 */
96struct tc35815_regs { 95struct tc35815_regs {
97 volatile __u32 DMA_Ctl; /* 0x00 */ 96 __u32 DMA_Ctl; /* 0x00 */
98 volatile __u32 TxFrmPtr; 97 __u32 TxFrmPtr;
99 volatile __u32 TxThrsh; 98 __u32 TxThrsh;
100 volatile __u32 TxPollCtr; 99 __u32 TxPollCtr;
101 volatile __u32 BLFrmPtr; 100 __u32 BLFrmPtr;
102 volatile __u32 RxFragSize; 101 __u32 RxFragSize;
103 volatile __u32 Int_En; 102 __u32 Int_En;
104 volatile __u32 FDA_Bas; 103 __u32 FDA_Bas;
105 volatile __u32 FDA_Lim; /* 0x20 */ 104 __u32 FDA_Lim; /* 0x20 */
106 volatile __u32 Int_Src; 105 __u32 Int_Src;
107 volatile __u32 unused0[2]; 106 __u32 unused0[2];
108 volatile __u32 PauseCnt; 107 __u32 PauseCnt;
109 volatile __u32 RemPauCnt; 108 __u32 RemPauCnt;
110 volatile __u32 TxCtlFrmStat; 109 __u32 TxCtlFrmStat;
111 volatile __u32 unused1; 110 __u32 unused1;
112 volatile __u32 MAC_Ctl; /* 0x40 */ 111 __u32 MAC_Ctl; /* 0x40 */
113 volatile __u32 CAM_Ctl; 112 __u32 CAM_Ctl;
114 volatile __u32 Tx_Ctl; 113 __u32 Tx_Ctl;
115 volatile __u32 Tx_Stat; 114 __u32 Tx_Stat;
116 volatile __u32 Rx_Ctl; 115 __u32 Rx_Ctl;
117 volatile __u32 Rx_Stat; 116 __u32 Rx_Stat;
118 volatile __u32 MD_Data; 117 __u32 MD_Data;
119 volatile __u32 MD_CA; 118 __u32 MD_CA;
120 volatile __u32 CAM_Adr; /* 0x60 */ 119 __u32 CAM_Adr; /* 0x60 */
121 volatile __u32 CAM_Data; 120 __u32 CAM_Data;
122 volatile __u32 CAM_Ena; 121 __u32 CAM_Ena;
123 volatile __u32 PROM_Ctl; 122 __u32 PROM_Ctl;
124 volatile __u32 PROM_Data; 123 __u32 PROM_Data;
125 volatile __u32 Algn_Cnt; 124 __u32 Algn_Cnt;
126 volatile __u32 CRC_Cnt; 125 __u32 CRC_Cnt;
127 volatile __u32 Miss_Cnt; 126 __u32 Miss_Cnt;
128}; 127};
129 128
130/* 129/*
131 * Bit assignments 130 * Bit assignments
132 */ 131 */
133/* DMA_Ctl bit asign ------------------------------------------------------- */ 132/* DMA_Ctl bit asign ------------------------------------------------------- */
134#define DMA_RxAlign 0x00c00000 /* 1:Reception Alignment */ 133#define DMA_RxAlign 0x00c00000 /* 1:Reception Alignment */
135#define DMA_RxAlign_1 0x00400000 134#define DMA_RxAlign_1 0x00400000
136#define DMA_RxAlign_2 0x00800000 135#define DMA_RxAlign_2 0x00800000
137#define DMA_RxAlign_3 0x00c00000 136#define DMA_RxAlign_3 0x00c00000
138#define DMA_M66EnStat 0x00080000 /* 1:66MHz Enable State */ 137#define DMA_M66EnStat 0x00080000 /* 1:66MHz Enable State */
139#define DMA_IntMask 0x00040000 /* 1:Interupt mask */ 138#define DMA_IntMask 0x00040000 /* 1:Interupt mask */
140#define DMA_SWIntReq 0x00020000 /* 1:Software Interrupt request */ 139#define DMA_SWIntReq 0x00020000 /* 1:Software Interrupt request */
141#define DMA_TxWakeUp 0x00010000 /* 1:Transmit Wake Up */ 140#define DMA_TxWakeUp 0x00010000 /* 1:Transmit Wake Up */
142#define DMA_RxBigE 0x00008000 /* 1:Receive Big Endian */ 141#define DMA_RxBigE 0x00008000 /* 1:Receive Big Endian */
143#define DMA_TxBigE 0x00004000 /* 1:Transmit Big Endian */ 142#define DMA_TxBigE 0x00004000 /* 1:Transmit Big Endian */
144#define DMA_TestMode 0x00002000 /* 1:Test Mode */ 143#define DMA_TestMode 0x00002000 /* 1:Test Mode */
145#define DMA_PowrMgmnt 0x00001000 /* 1:Power Management */ 144#define DMA_PowrMgmnt 0x00001000 /* 1:Power Management */
146#define DMA_DmBurst_Mask 0x000001fc /* DMA Burst size */ 145#define DMA_DmBurst_Mask 0x000001fc /* DMA Burst size */
147 146
148/* RxFragSize bit asign ---------------------------------------------------- */ 147/* RxFragSize bit asign ---------------------------------------------------- */
149#define RxFrag_EnPack 0x00008000 /* 1:Enable Packing */ 148#define RxFrag_EnPack 0x00008000 /* 1:Enable Packing */
150#define RxFrag_MinFragMask 0x00000ffc /* Minimum Fragment */ 149#define RxFrag_MinFragMask 0x00000ffc /* Minimum Fragment */
151 150
152/* MAC_Ctl bit asign ------------------------------------------------------- */ 151/* MAC_Ctl bit asign ------------------------------------------------------- */
153#define MAC_Link10 0x00008000 /* 1:Link Status 10Mbits */ 152#define MAC_Link10 0x00008000 /* 1:Link Status 10Mbits */
154#define MAC_EnMissRoll 0x00002000 /* 1:Enable Missed Roll */ 153#define MAC_EnMissRoll 0x00002000 /* 1:Enable Missed Roll */
155#define MAC_MissRoll 0x00000400 /* 1:Missed Roll */ 154#define MAC_MissRoll 0x00000400 /* 1:Missed Roll */
156#define MAC_Loop10 0x00000080 /* 1:Loop 10 Mbps */ 155#define MAC_Loop10 0x00000080 /* 1:Loop 10 Mbps */
157#define MAC_Conn_Auto 0x00000000 /*00:Connection mode (Automatic) */ 156#define MAC_Conn_Auto 0x00000000 /*00:Connection mode (Automatic) */
158#define MAC_Conn_10M 0x00000020 /*01: (10Mbps endec)*/ 157#define MAC_Conn_10M 0x00000020 /*01: (10Mbps endec)*/
159#define MAC_Conn_Mll 0x00000040 /*10: (Mll clock) */ 158#define MAC_Conn_Mll 0x00000040 /*10: (Mll clock) */
160#define MAC_MacLoop 0x00000010 /* 1:MAC Loopback */ 159#define MAC_MacLoop 0x00000010 /* 1:MAC Loopback */
161#define MAC_FullDup 0x00000008 /* 1:Full Duplex 0:Half Duplex */ 160#define MAC_FullDup 0x00000008 /* 1:Full Duplex 0:Half Duplex */
162#define MAC_Reset 0x00000004 /* 1:Software Reset */ 161#define MAC_Reset 0x00000004 /* 1:Software Reset */
163#define MAC_HaltImm 0x00000002 /* 1:Halt Immediate */ 162#define MAC_HaltImm 0x00000002 /* 1:Halt Immediate */
164#define MAC_HaltReq 0x00000001 /* 1:Halt request */ 163#define MAC_HaltReq 0x00000001 /* 1:Halt request */
165 164
166/* PROM_Ctl bit asign ------------------------------------------------------ */ 165/* PROM_Ctl bit asign ------------------------------------------------------ */
167#define PROM_Busy 0x00008000 /* 1:Busy (Start Operation) */ 166#define PROM_Busy 0x00008000 /* 1:Busy (Start Operation) */
168#define PROM_Read 0x00004000 /*10:Read operation */ 167#define PROM_Read 0x00004000 /*10:Read operation */
169#define PROM_Write 0x00002000 /*01:Write operation */ 168#define PROM_Write 0x00002000 /*01:Write operation */
170#define PROM_Erase 0x00006000 /*11:Erase operation */ 169#define PROM_Erase 0x00006000 /*11:Erase operation */
171 /*00:Enable or Disable Writting, */ 170 /*00:Enable or Disable Writting, */
172 /* as specified in PROM_Addr. */ 171 /* as specified in PROM_Addr. */
173#define PROM_Addr_Ena 0x00000030 /*11xxxx:PROM Write enable */ 172#define PROM_Addr_Ena 0x00000030 /*11xxxx:PROM Write enable */
174 /*00xxxx: disable */ 173 /*00xxxx: disable */
175 174
176/* CAM_Ctl bit asign ------------------------------------------------------- */ 175/* CAM_Ctl bit asign ------------------------------------------------------- */
177#define CAM_CompEn 0x00000010 /* 1:CAM Compare Enable */ 176#define CAM_CompEn 0x00000010 /* 1:CAM Compare Enable */
178#define CAM_NegCAM 0x00000008 /* 1:Reject packets CAM recognizes,*/ 177#define CAM_NegCAM 0x00000008 /* 1:Reject packets CAM recognizes,*/
179 /* accept other */ 178 /* accept other */
180#define CAM_BroadAcc 0x00000004 /* 1:Broadcast assept */ 179#define CAM_BroadAcc 0x00000004 /* 1:Broadcast assept */
181#define CAM_GroupAcc 0x00000002 /* 1:Multicast assept */ 180#define CAM_GroupAcc 0x00000002 /* 1:Multicast assept */
182#define CAM_StationAcc 0x00000001 /* 1:unicast accept */ 181#define CAM_StationAcc 0x00000001 /* 1:unicast accept */
183 182
184/* CAM_Ena bit asign ------------------------------------------------------- */ 183/* CAM_Ena bit asign ------------------------------------------------------- */
185#define CAM_ENTRY_MAX 21 /* CAM Data entry max count */ 184#define CAM_ENTRY_MAX 21 /* CAM Data entry max count */
186#define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits) */ 185#define CAM_Ena_Mask ((1<<CAM_ENTRY_MAX)-1) /* CAM Enable bits (Max 21bits) */
187#define CAM_Ena_Bit(index) (1<<(index)) 186#define CAM_Ena_Bit(index) (1 << (index))
188#define CAM_ENTRY_DESTINATION 0 187#define CAM_ENTRY_DESTINATION 0
189#define CAM_ENTRY_SOURCE 1 188#define CAM_ENTRY_SOURCE 1
190#define CAM_ENTRY_MACCTL 20 189#define CAM_ENTRY_MACCTL 20
191 190
192/* Tx_Ctl bit asign -------------------------------------------------------- */ 191/* Tx_Ctl bit asign -------------------------------------------------------- */
193#define Tx_En 0x00000001 /* 1:Transmit enable */ 192#define Tx_En 0x00000001 /* 1:Transmit enable */
194#define Tx_TxHalt 0x00000002 /* 1:Transmit Halt Request */ 193#define Tx_TxHalt 0x00000002 /* 1:Transmit Halt Request */
195#define Tx_NoPad 0x00000004 /* 1:Suppress Padding */ 194#define Tx_NoPad 0x00000004 /* 1:Suppress Padding */
196#define Tx_NoCRC 0x00000008 /* 1:Suppress Padding */ 195#define Tx_NoCRC 0x00000008 /* 1:Suppress Padding */
197#define Tx_FBack 0x00000010 /* 1:Fast Back-off */ 196#define Tx_FBack 0x00000010 /* 1:Fast Back-off */
198#define Tx_EnUnder 0x00000100 /* 1:Enable Underrun */ 197#define Tx_EnUnder 0x00000100 /* 1:Enable Underrun */
199#define Tx_EnExDefer 0x00000200 /* 1:Enable Excessive Deferral */ 198#define Tx_EnExDefer 0x00000200 /* 1:Enable Excessive Deferral */
200#define Tx_EnLCarr 0x00000400 /* 1:Enable Lost Carrier */ 199#define Tx_EnLCarr 0x00000400 /* 1:Enable Lost Carrier */
201#define Tx_EnExColl 0x00000800 /* 1:Enable Excessive Collision */ 200#define Tx_EnExColl 0x00000800 /* 1:Enable Excessive Collision */
202#define Tx_EnLateColl 0x00001000 /* 1:Enable Late Collision */ 201#define Tx_EnLateColl 0x00001000 /* 1:Enable Late Collision */
203#define Tx_EnTxPar 0x00002000 /* 1:Enable Transmit Parity */ 202#define Tx_EnTxPar 0x00002000 /* 1:Enable Transmit Parity */
204#define Tx_EnComp 0x00004000 /* 1:Enable Completion */ 203#define Tx_EnComp 0x00004000 /* 1:Enable Completion */
205 204
206/* Tx_Stat bit asign ------------------------------------------------------- */ 205/* Tx_Stat bit asign ------------------------------------------------------- */
207#define Tx_TxColl_MASK 0x0000000F /* Tx Collision Count */ 206#define Tx_TxColl_MASK 0x0000000F /* Tx Collision Count */
208#define Tx_ExColl 0x00000010 /* Excessive Collision */ 207#define Tx_ExColl 0x00000010 /* Excessive Collision */
209#define Tx_TXDefer 0x00000020 /* Transmit Defered */ 208#define Tx_TXDefer 0x00000020 /* Transmit Defered */
210#define Tx_Paused 0x00000040 /* Transmit Paused */ 209#define Tx_Paused 0x00000040 /* Transmit Paused */
211#define Tx_IntTx 0x00000080 /* Interrupt on Tx */ 210#define Tx_IntTx 0x00000080 /* Interrupt on Tx */
212#define Tx_Under 0x00000100 /* Underrun */ 211#define Tx_Under 0x00000100 /* Underrun */
213#define Tx_Defer 0x00000200 /* Deferral */ 212#define Tx_Defer 0x00000200 /* Deferral */
214#define Tx_NCarr 0x00000400 /* No Carrier */ 213#define Tx_NCarr 0x00000400 /* No Carrier */
215#define Tx_10Stat 0x00000800 /* 10Mbps Status */ 214#define Tx_10Stat 0x00000800 /* 10Mbps Status */
216#define Tx_LateColl 0x00001000 /* Late Collision */ 215#define Tx_LateColl 0x00001000 /* Late Collision */
217#define Tx_TxPar 0x00002000 /* Tx Parity Error */ 216#define Tx_TxPar 0x00002000 /* Tx Parity Error */
218#define Tx_Comp 0x00004000 /* Completion */ 217#define Tx_Comp 0x00004000 /* Completion */
219#define Tx_Halted 0x00008000 /* Tx Halted */ 218#define Tx_Halted 0x00008000 /* Tx Halted */
220#define Tx_SQErr 0x00010000 /* Signal Quality Error(SQE) */ 219#define Tx_SQErr 0x00010000 /* Signal Quality Error(SQE) */
221 220
222/* Rx_Ctl bit asign -------------------------------------------------------- */ 221/* Rx_Ctl bit asign -------------------------------------------------------- */
223#define Rx_EnGood 0x00004000 /* 1:Enable Good */ 222#define Rx_EnGood 0x00004000 /* 1:Enable Good */
224#define Rx_EnRxPar 0x00002000 /* 1:Enable Receive Parity */ 223#define Rx_EnRxPar 0x00002000 /* 1:Enable Receive Parity */
225#define Rx_EnLongErr 0x00000800 /* 1:Enable Long Error */ 224#define Rx_EnLongErr 0x00000800 /* 1:Enable Long Error */
226#define Rx_EnOver 0x00000400 /* 1:Enable OverFlow */ 225#define Rx_EnOver 0x00000400 /* 1:Enable OverFlow */
227#define Rx_EnCRCErr 0x00000200 /* 1:Enable CRC Error */ 226#define Rx_EnCRCErr 0x00000200 /* 1:Enable CRC Error */
228#define Rx_EnAlign 0x00000100 /* 1:Enable Alignment */ 227#define Rx_EnAlign 0x00000100 /* 1:Enable Alignment */
229#define Rx_IgnoreCRC 0x00000040 /* 1:Ignore CRC Value */ 228#define Rx_IgnoreCRC 0x00000040 /* 1:Ignore CRC Value */
230#define Rx_StripCRC 0x00000010 /* 1:Strip CRC Value */ 229#define Rx_StripCRC 0x00000010 /* 1:Strip CRC Value */
231#define Rx_ShortEn 0x00000008 /* 1:Short Enable */ 230#define Rx_ShortEn 0x00000008 /* 1:Short Enable */
232#define Rx_LongEn 0x00000004 /* 1:Long Enable */ 231#define Rx_LongEn 0x00000004 /* 1:Long Enable */
233#define Rx_RxHalt 0x00000002 /* 1:Receive Halt Request */ 232#define Rx_RxHalt 0x00000002 /* 1:Receive Halt Request */
234#define Rx_RxEn 0x00000001 /* 1:Receive Intrrupt Enable */ 233#define Rx_RxEn 0x00000001 /* 1:Receive Intrrupt Enable */
235 234
236/* Rx_Stat bit asign ------------------------------------------------------- */ 235/* Rx_Stat bit asign ------------------------------------------------------- */
237#define Rx_Halted 0x00008000 /* Rx Halted */ 236#define Rx_Halted 0x00008000 /* Rx Halted */
238#define Rx_Good 0x00004000 /* Rx Good */ 237#define Rx_Good 0x00004000 /* Rx Good */
239#define Rx_RxPar 0x00002000 /* Rx Parity Error */ 238#define Rx_RxPar 0x00002000 /* Rx Parity Error */
240 /* 0x00001000 not use */ 239 /* 0x00001000 not use */
241#define Rx_LongErr 0x00000800 /* Rx Long Error */ 240#define Rx_LongErr 0x00000800 /* Rx Long Error */
242#define Rx_Over 0x00000400 /* Rx Overflow */ 241#define Rx_Over 0x00000400 /* Rx Overflow */
243#define Rx_CRCErr 0x00000200 /* Rx CRC Error */ 242#define Rx_CRCErr 0x00000200 /* Rx CRC Error */
244#define Rx_Align 0x00000100 /* Rx Alignment Error */ 243#define Rx_Align 0x00000100 /* Rx Alignment Error */
245#define Rx_10Stat 0x00000080 /* Rx 10Mbps Status */ 244#define Rx_10Stat 0x00000080 /* Rx 10Mbps Status */
246#define Rx_IntRx 0x00000040 /* Rx Interrupt */ 245#define Rx_IntRx 0x00000040 /* Rx Interrupt */
247#define Rx_CtlRecd 0x00000020 /* Rx Control Receive */ 246#define Rx_CtlRecd 0x00000020 /* Rx Control Receive */
248 247
249#define Rx_Stat_Mask 0x0000EFC0 /* Rx All Status Mask */ 248#define Rx_Stat_Mask 0x0000EFC0 /* Rx All Status Mask */
250 249
251/* Int_En bit asign -------------------------------------------------------- */ 250/* Int_En bit asign -------------------------------------------------------- */
252#define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */ 251#define Int_NRAbtEn 0x00000800 /* 1:Non-recoverable Abort Enable */
253#define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Control Complete Enable */ 252#define Int_TxCtlCmpEn 0x00000400 /* 1:Transmit Ctl Complete Enable */
254#define Int_DmParErrEn 0x00000200 /* 1:DMA Parity Error Enable */ 253#define Int_DmParErrEn 0x00000200 /* 1:DMA Parity Error Enable */
255#define Int_DParDEn 0x00000100 /* 1:Data Parity Error Enable */ 254#define Int_DParDEn 0x00000100 /* 1:Data Parity Error Enable */
256#define Int_EarNotEn 0x00000080 /* 1:Early Notify Enable */ 255#define Int_EarNotEn 0x00000080 /* 1:Early Notify Enable */
257#define Int_DParErrEn 0x00000040 /* 1:Detected Parity Error Enable */ 256#define Int_DParErrEn 0x00000040 /* 1:Detected Parity Error Enable */
258#define Int_SSysErrEn 0x00000020 /* 1:Signalled System Error Enable */ 257#define Int_SSysErrEn 0x00000020 /* 1:Signalled System Error Enable */
259#define Int_RMasAbtEn 0x00000010 /* 1:Received Master Abort Enable */ 258#define Int_RMasAbtEn 0x00000010 /* 1:Received Master Abort Enable */
260#define Int_RTargAbtEn 0x00000008 /* 1:Received Target Abort Enable */ 259#define Int_RTargAbtEn 0x00000008 /* 1:Received Target Abort Enable */
261#define Int_STargAbtEn 0x00000004 /* 1:Signalled Target Abort Enable */ 260#define Int_STargAbtEn 0x00000004 /* 1:Signalled Target Abort Enable */
262#define Int_BLExEn 0x00000002 /* 1:Buffer List Exhausted Enable */ 261#define Int_BLExEn 0x00000002 /* 1:Buffer List Exhausted Enable */
263#define Int_FDAExEn 0x00000001 /* 1:Free Descriptor Area */ 262#define Int_FDAExEn 0x00000001 /* 1:Free Descriptor Area */
264 /* Exhausted Enable */ 263 /* Exhausted Enable */
265 264
266/* Int_Src bit asign ------------------------------------------------------- */ 265/* Int_Src bit asign ------------------------------------------------------- */
267#define Int_NRabt 0x00004000 /* 1:Non Recoverable error */ 266#define Int_NRabt 0x00004000 /* 1:Non Recoverable error */
268#define Int_DmParErrStat 0x00002000 /* 1:DMA Parity Error & Clear */ 267#define Int_DmParErrStat 0x00002000 /* 1:DMA Parity Error & Clear */
269#define Int_BLEx 0x00001000 /* 1:Buffer List Empty & Clear */ 268#define Int_BLEx 0x00001000 /* 1:Buffer List Empty & Clear */
270#define Int_FDAEx 0x00000800 /* 1:FDA Empty & Clear */ 269#define Int_FDAEx 0x00000800 /* 1:FDA Empty & Clear */
271#define Int_IntNRAbt 0x00000400 /* 1:Non Recoverable Abort */ 270#define Int_IntNRAbt 0x00000400 /* 1:Non Recoverable Abort */
272#define Int_IntCmp 0x00000200 /* 1:MAC control packet complete */ 271#define Int_IntCmp 0x00000200 /* 1:MAC control packet complete */
273#define Int_IntExBD 0x00000100 /* 1:Interrupt Extra BD & Clear */ 272#define Int_IntExBD 0x00000100 /* 1:Interrupt Extra BD & Clear */
274#define Int_DmParErr 0x00000080 /* 1:DMA Parity Error & Clear */ 273#define Int_DmParErr 0x00000080 /* 1:DMA Parity Error & Clear */
275#define Int_IntEarNot 0x00000040 /* 1:Receive Data write & Clear */ 274#define Int_IntEarNot 0x00000040 /* 1:Receive Data write & Clear */
276#define Int_SWInt 0x00000020 /* 1:Software request & Clear */ 275#define Int_SWInt 0x00000020 /* 1:Software request & Clear */
277#define Int_IntBLEx 0x00000010 /* 1:Buffer List Empty & Clear */ 276#define Int_IntBLEx 0x00000010 /* 1:Buffer List Empty & Clear */
278#define Int_IntFDAEx 0x00000008 /* 1:FDA Empty & Clear */ 277#define Int_IntFDAEx 0x00000008 /* 1:FDA Empty & Clear */
279#define Int_IntPCI 0x00000004 /* 1:PCI controller & Clear */ 278#define Int_IntPCI 0x00000004 /* 1:PCI controller & Clear */
280#define Int_IntMacRx 0x00000002 /* 1:Rx controller & Clear */ 279#define Int_IntMacRx 0x00000002 /* 1:Rx controller & Clear */
281#define Int_IntMacTx 0x00000001 /* 1:Tx controller & Clear */ 280#define Int_IntMacTx 0x00000001 /* 1:Tx controller & Clear */
282 281
283/* MD_CA bit asign --------------------------------------------------------- */ 282/* MD_CA bit asign --------------------------------------------------------- */
284#define MD_CA_PreSup 0x00001000 /* 1:Preamble Supress */ 283#define MD_CA_PreSup 0x00001000 /* 1:Preamble Supress */
285#define MD_CA_Busy 0x00000800 /* 1:Busy (Start Operation) */ 284#define MD_CA_Busy 0x00000800 /* 1:Busy (Start Operation) */
286#define MD_CA_Wr 0x00000400 /* 1:Write 0:Read */ 285#define MD_CA_Wr 0x00000400 /* 1:Write 0:Read */
287 286
288 287
289/* 288/*
@@ -307,24 +306,24 @@ struct BDesc {
307#define FD_ALIGN 16 306#define FD_ALIGN 16
308 307
309/* Frame Descripter bit asign ---------------------------------------------- */ 308/* Frame Descripter bit asign ---------------------------------------------- */
310#define FD_FDLength_MASK 0x0000FFFF /* Length MASK */ 309#define FD_FDLength_MASK 0x0000FFFF /* Length MASK */
311#define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */ 310#define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */
312#define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */ 311#define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */
313#define FD_FrmOpt_BigEndian 0x40000000 /* Tx/Rx */ 312#define FD_FrmOpt_BigEndian 0x40000000 /* Tx/Rx */
314#define FD_FrmOpt_IntTx 0x20000000 /* Tx only */ 313#define FD_FrmOpt_IntTx 0x20000000 /* Tx only */
315#define FD_FrmOpt_NoCRC 0x10000000 /* Tx only */ 314#define FD_FrmOpt_NoCRC 0x10000000 /* Tx only */
316#define FD_FrmOpt_NoPadding 0x08000000 /* Tx only */ 315#define FD_FrmOpt_NoPadding 0x08000000 /* Tx only */
317#define FD_FrmOpt_Packing 0x04000000 /* Rx only */ 316#define FD_FrmOpt_Packing 0x04000000 /* Rx only */
318#define FD_CownsFD 0x80000000 /* FD Controller owner bit */ 317#define FD_CownsFD 0x80000000 /* FD Controller owner bit */
319#define FD_Next_EOL 0x00000001 /* FD EOL indicator */ 318#define FD_Next_EOL 0x00000001 /* FD EOL indicator */
320#define FD_BDCnt_SHIFT 16 319#define FD_BDCnt_SHIFT 16
321 320
322/* Buffer Descripter bit asign --------------------------------------------- */ 321/* Buffer Descripter bit asign --------------------------------------------- */
323#define BD_BuffLength_MASK 0x0000FFFF /* Recieve Data Size */ 322#define BD_BuffLength_MASK 0x0000FFFF /* Recieve Data Size */
324#define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */ 323#define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */
325#define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */ 324#define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */
326#define BD_CownsBD 0x80000000 /* BD Controller owner bit */ 325#define BD_CownsBD 0x80000000 /* BD Controller owner bit */
327#define BD_RxBDID_SHIFT 16 326#define BD_RxBDID_SHIFT 16
328#define BD_RxBDSeqN_SHIFT 24 327#define BD_RxBDSeqN_SHIFT 24
329 328
330 329
@@ -348,13 +347,15 @@ struct BDesc {
348 Int_STargAbtEn | \ 347 Int_STargAbtEn | \
349 Int_BLExEn | Int_FDAExEn) /* maybe 0xb7f*/ 348 Int_BLExEn | Int_FDAExEn) /* maybe 0xb7f*/
350#define DMA_CTL_CMD DMA_BURST_SIZE 349#define DMA_CTL_CMD DMA_BURST_SIZE
351#define HAVE_DMA_RXALIGN(lp) likely((lp)->boardtype != TC35815CF) 350#define HAVE_DMA_RXALIGN(lp) likely((lp)->chiptype != TC35815CF)
352 351
353/* Tuning parameters */ 352/* Tuning parameters */
354#define DMA_BURST_SIZE 32 353#define DMA_BURST_SIZE 32
355#define TX_THRESHOLD 1024 354#define TX_THRESHOLD 1024
356#define TX_THRESHOLD_MAX 1536 /* used threshold with packet max byte for low pci transfer ability.*/ 355/* used threshold with packet max byte for low pci transfer ability.*/
357#define TX_THRESHOLD_KEEP_LIMIT 10 /* setting threshold max value when overrun error occured this count. */ 356#define TX_THRESHOLD_MAX 1536
357/* setting threshold max value when overrun error occured this count. */
358#define TX_THRESHOLD_KEEP_LIMIT 10
358 359
359/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */ 360/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */
360#ifdef TC35815_USE_PACKEDBUFFER 361#ifdef TC35815_USE_PACKEDBUFFER
@@ -396,21 +397,12 @@ struct FrFD {
396}; 397};
397 398
398 399
399#define tc_readl(addr) readl(addr) 400#define tc_readl(addr) ioread32(addr)
400#define tc_writel(d, addr) writel(d, addr) 401#define tc_writel(d, addr) iowrite32(d, addr)
401 402
402#define TC35815_TX_TIMEOUT msecs_to_jiffies(400) 403#define TC35815_TX_TIMEOUT msecs_to_jiffies(400)
403 404
404/* Timer state engine. */ 405/* Information that need to be kept for each controller. */
405enum tc35815_timer_state {
406 arbwait = 0, /* Waiting for auto negotiation to complete. */
407 lupwait = 1, /* Auto-neg complete, awaiting link-up status. */
408 ltrywait = 2, /* Forcing try of all modes, from fastest to slowest. */
409 asleep = 3, /* Time inactive. */
410 lcheck = 4, /* Check link status. */
411};
412
413/* Information that need to be kept for each board. */
414struct tc35815_local { 406struct tc35815_local {
415 struct pci_dev *pci_dev; 407 struct pci_dev *pci_dev;
416 408
@@ -418,12 +410,11 @@ struct tc35815_local {
418 struct napi_struct napi; 410 struct napi_struct napi;
419 411
420 /* statistics */ 412 /* statistics */
421 struct net_device_stats stats;
422 struct { 413 struct {
423 int max_tx_qlen; 414 int max_tx_qlen;
424 int tx_ints; 415 int tx_ints;
425 int rx_ints; 416 int rx_ints;
426 int tx_underrun; 417 int tx_underrun;
427 } lstats; 418 } lstats;
428 419
429 /* Tx control lock. This protects the transmit buffer ring 420 /* Tx control lock. This protects the transmit buffer ring
@@ -433,12 +424,12 @@ struct tc35815_local {
433 */ 424 */
434 spinlock_t lock; 425 spinlock_t lock;
435 426
436 int phy_addr; 427 struct mii_bus mii_bus;
437 int fullduplex; 428 struct phy_device *phy_dev;
438 unsigned short saved_lpa; 429 int duplex;
439 struct timer_list timer; 430 int speed;
440 enum tc35815_timer_state timer_state; /* State of auto-neg timer. */ 431 int link;
441 unsigned int timer_ticks; /* Number of clicks at each state */ 432 struct work_struct restart_work;
442 433
443 /* 434 /*
444 * Transmitting: Batch Mode. 435 * Transmitting: Batch Mode.
@@ -452,7 +443,7 @@ struct tc35815_local {
452 * RX_BUF_NUM BD in Free Buffer FD. 443 * RX_BUF_NUM BD in Free Buffer FD.
453 * One Free Buffer BD has ETH_FRAME_LEN data buffer. 444 * One Free Buffer BD has ETH_FRAME_LEN data buffer.
454 */ 445 */
455 void * fd_buf; /* for TxFD, RxFD, FrFD */ 446 void *fd_buf; /* for TxFD, RxFD, FrFD */
456 dma_addr_t fd_buf_dma; 447 dma_addr_t fd_buf_dma;
457 struct TxFD *tfd_base; 448 struct TxFD *tfd_base;
458 unsigned int tfd_start; 449 unsigned int tfd_start;
@@ -463,7 +454,7 @@ struct tc35815_local {
463 struct FrFD *fbl_ptr; 454 struct FrFD *fbl_ptr;
464#ifdef TC35815_USE_PACKEDBUFFER 455#ifdef TC35815_USE_PACKEDBUFFER
465 unsigned char fbl_curid; 456 unsigned char fbl_curid;
466 void * data_buf[RX_BUF_NUM]; /* packing */ 457 void *data_buf[RX_BUF_NUM]; /* packing */
467 dma_addr_t data_buf_dma[RX_BUF_NUM]; 458 dma_addr_t data_buf_dma[RX_BUF_NUM];
468 struct { 459 struct {
469 struct sk_buff *skb; 460 struct sk_buff *skb;
@@ -476,10 +467,8 @@ struct tc35815_local {
476 dma_addr_t skb_dma; 467 dma_addr_t skb_dma;
477 } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM]; 468 } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM];
478#endif 469#endif
479 struct mii_if_info mii;
480 unsigned short mii_id[2];
481 u32 msg_enable; 470 u32 msg_enable;
482 board_t boardtype; 471 enum tc35815_chiptype chiptype;
483}; 472};
484 473
485static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt) 474static inline dma_addr_t fd_virt_to_bus(struct tc35815_local *lp, void *virt)
@@ -506,13 +495,14 @@ static inline void *rxbuf_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
506} 495}
507 496
508#define TC35815_DMA_SYNC_ONDEMAND 497#define TC35815_DMA_SYNC_ONDEMAND
509static void* alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle) 498static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
510{ 499{
511#ifdef TC35815_DMA_SYNC_ONDEMAND 500#ifdef TC35815_DMA_SYNC_ONDEMAND
512 void *buf; 501 void *buf;
513 /* pci_map + pci_dma_sync will be more effective than 502 /* pci_map + pci_dma_sync will be more effective than
514 * pci_alloc_consistent on some archs. */ 503 * pci_alloc_consistent on some archs. */
515 if ((buf = (void *)__get_free_page(GFP_ATOMIC)) == NULL) 504 buf = (void *)__get_free_page(GFP_ATOMIC);
505 if (!buf)
516 return NULL; 506 return NULL;
517 *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE, 507 *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
518 PCI_DMA_FROMDEVICE); 508 PCI_DMA_FROMDEVICE);
@@ -577,7 +567,7 @@ static void tc35815_txdone(struct net_device *dev);
577static int tc35815_close(struct net_device *dev); 567static int tc35815_close(struct net_device *dev);
578static struct net_device_stats *tc35815_get_stats(struct net_device *dev); 568static struct net_device_stats *tc35815_get_stats(struct net_device *dev);
579static void tc35815_set_multicast_list(struct net_device *dev); 569static void tc35815_set_multicast_list(struct net_device *dev);
580static void tc35815_tx_timeout(struct net_device *dev); 570static void tc35815_tx_timeout(struct net_device *dev);
581static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 571static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
582#ifdef CONFIG_NET_POLL_CONTROLLER 572#ifdef CONFIG_NET_POLL_CONTROLLER
583static void tc35815_poll_controller(struct net_device *dev); 573static void tc35815_poll_controller(struct net_device *dev);
@@ -585,21 +575,225 @@ static void tc35815_poll_controller(struct net_device *dev);
585static const struct ethtool_ops tc35815_ethtool_ops; 575static const struct ethtool_ops tc35815_ethtool_ops;
586 576
587/* Example routines you must write ;->. */ 577/* Example routines you must write ;->. */
588static void tc35815_chip_reset(struct net_device *dev); 578static void tc35815_chip_reset(struct net_device *dev);
589static void tc35815_chip_init(struct net_device *dev); 579static void tc35815_chip_init(struct net_device *dev);
590static void tc35815_find_phy(struct net_device *dev);
591static void tc35815_phy_chip_init(struct net_device *dev);
592 580
593#ifdef DEBUG 581#ifdef DEBUG
594static void panic_queues(struct net_device *dev); 582static void panic_queues(struct net_device *dev);
595#endif 583#endif
596 584
597static void tc35815_timer(unsigned long data); 585static void tc35815_restart_work(struct work_struct *work);
598static void tc35815_start_auto_negotiation(struct net_device *dev, 586
599 struct ethtool_cmd *ep); 587static int tc_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
600static int tc_mdio_read(struct net_device *dev, int phy_id, int location); 588{
601static void tc_mdio_write(struct net_device *dev, int phy_id, int location, 589 struct net_device *dev = bus->priv;
602 int val); 590 struct tc35815_regs __iomem *tr =
591 (struct tc35815_regs __iomem *)dev->base_addr;
592 unsigned long timeout = jiffies + 10;
593
594 tc_writel(MD_CA_Busy | (mii_id << 5) | (regnum & 0x1f), &tr->MD_CA);
595 while (tc_readl(&tr->MD_CA) & MD_CA_Busy) {
596 if (time_after(jiffies, timeout))
597 return -EIO;
598 cpu_relax();
599 }
600 return tc_readl(&tr->MD_Data) & 0xffff;
601}
602
603static int tc_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 val)
604{
605 struct net_device *dev = bus->priv;
606 struct tc35815_regs __iomem *tr =
607 (struct tc35815_regs __iomem *)dev->base_addr;
608 unsigned long timeout = jiffies + 10;
609
610 tc_writel(val, &tr->MD_Data);
611 tc_writel(MD_CA_Busy | MD_CA_Wr | (mii_id << 5) | (regnum & 0x1f),
612 &tr->MD_CA);
613 while (tc_readl(&tr->MD_CA) & MD_CA_Busy) {
614 if (time_after(jiffies, timeout))
615 return -EIO;
616 cpu_relax();
617 }
618 return 0;
619}
620
621static void tc_handle_link_change(struct net_device *dev)
622{
623 struct tc35815_local *lp = netdev_priv(dev);
624 struct phy_device *phydev = lp->phy_dev;
625 unsigned long flags;
626 int status_change = 0;
627
628 spin_lock_irqsave(&lp->lock, flags);
629 if (phydev->link &&
630 (lp->speed != phydev->speed || lp->duplex != phydev->duplex)) {
631 struct tc35815_regs __iomem *tr =
632 (struct tc35815_regs __iomem *)dev->base_addr;
633 u32 reg;
634
635 reg = tc_readl(&tr->MAC_Ctl);
636 reg |= MAC_HaltReq;
637 tc_writel(reg, &tr->MAC_Ctl);
638 if (phydev->duplex == DUPLEX_FULL)
639 reg |= MAC_FullDup;
640 else
641 reg &= ~MAC_FullDup;
642 tc_writel(reg, &tr->MAC_Ctl);
643 reg &= ~MAC_HaltReq;
644 tc_writel(reg, &tr->MAC_Ctl);
645
646 /*
647 * TX4939 PCFG.SPEEDn bit will be changed on
648 * NETDEV_CHANGE event.
649 */
650
651#if !defined(NO_CHECK_CARRIER) && defined(WORKAROUND_LOSTCAR)
652 /*
653 * WORKAROUND: enable LostCrS only if half duplex
654 * operation.
655 * (TX4939 does not have EnLCarr)
656 */
657 if (phydev->duplex == DUPLEX_HALF &&
658 lp->chiptype != TC35815_TX4939)
659 tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr,
660 &tr->Tx_Ctl);
661#endif
662
663 lp->speed = phydev->speed;
664 lp->duplex = phydev->duplex;
665 status_change = 1;
666 }
667
668 if (phydev->link != lp->link) {
669 if (phydev->link) {
670#ifdef WORKAROUND_100HALF_PROMISC
671 /* delayed promiscuous enabling */
672 if (dev->flags & IFF_PROMISC)
673 tc35815_set_multicast_list(dev);
674#endif
675 netif_schedule(dev);
676 } else {
677 lp->speed = 0;
678 lp->duplex = -1;
679 }
680 lp->link = phydev->link;
681
682 status_change = 1;
683 }
684 spin_unlock_irqrestore(&lp->lock, flags);
685
686 if (status_change && netif_msg_link(lp)) {
687 phy_print_status(phydev);
688#ifdef DEBUG
689 printk(KERN_DEBUG
690 "%s: MII BMCR %04x BMSR %04x LPA %04x\n",
691 dev->name,
692 phy_read(phydev, MII_BMCR),
693 phy_read(phydev, MII_BMSR),
694 phy_read(phydev, MII_LPA));
695#endif
696 }
697}
698
699static int tc_mii_probe(struct net_device *dev)
700{
701 struct tc35815_local *lp = netdev_priv(dev);
702 struct phy_device *phydev = NULL;
703 int phy_addr;
704 u32 dropmask;
705
706 /* find the first phy */
707 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
708 if (lp->mii_bus.phy_map[phy_addr]) {
709 if (phydev) {
710 printk(KERN_ERR "%s: multiple PHYs found\n",
711 dev->name);
712 return -EINVAL;
713 }
714 phydev = lp->mii_bus.phy_map[phy_addr];
715 break;
716 }
717 }
718
719 if (!phydev) {
720 printk(KERN_ERR "%s: no PHY found\n", dev->name);
721 return -ENODEV;
722 }
723
724 /* attach the mac to the phy */
725 phydev = phy_connect(dev, phydev->dev.bus_id,
726 &tc_handle_link_change, 0,
727 lp->chiptype == TC35815_TX4939 ?
728 PHY_INTERFACE_MODE_RMII : PHY_INTERFACE_MODE_MII);
729 if (IS_ERR(phydev)) {
730 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
731 return PTR_ERR(phydev);
732 }
733 printk(KERN_INFO "%s: attached PHY driver [%s] "
734 "(mii_bus:phy_addr=%s, id=%x)\n",
735 dev->name, phydev->drv->name, phydev->dev.bus_id,
736 phydev->phy_id);
737
738 /* mask with MAC supported features */
739 phydev->supported &= PHY_BASIC_FEATURES;
740 dropmask = 0;
741 if (options.speed == 10)
742 dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full;
743 else if (options.speed == 100)
744 dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full;
745 if (options.duplex == 1)
746 dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full;
747 else if (options.duplex == 2)
748 dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half;
749 phydev->supported &= ~dropmask;
750 phydev->advertising = phydev->supported;
751
752 lp->link = 0;
753 lp->speed = 0;
754 lp->duplex = -1;
755 lp->phy_dev = phydev;
756
757 return 0;
758}
759
760static int tc_mii_init(struct net_device *dev)
761{
762 struct tc35815_local *lp = netdev_priv(dev);
763 int err;
764 int i;
765
766 lp->mii_bus.name = "tc35815_mii_bus";
767 lp->mii_bus.read = tc_mdio_read;
768 lp->mii_bus.write = tc_mdio_write;
769 snprintf(lp->mii_bus.id, MII_BUS_ID_SIZE, "%x",
770 (lp->pci_dev->bus->number << 8) | lp->pci_dev->devfn);
771 lp->mii_bus.priv = dev;
772 lp->mii_bus.dev = &lp->pci_dev->dev;
773 lp->mii_bus.irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
774 if (!lp->mii_bus.irq) {
775 err = -ENOMEM;
776 goto err_out;
777 }
778
779 for (i = 0; i < PHY_MAX_ADDR; i++)
780 lp->mii_bus.irq[i] = PHY_POLL;
781
782 err = mdiobus_register(&lp->mii_bus);
783 if (err)
784 goto err_out_free_mdio_irq;
785 err = tc_mii_probe(dev);
786 if (err)
787 goto err_out_unregister_bus;
788 return 0;
789
790err_out_unregister_bus:
791 mdiobus_unregister(&lp->mii_bus);
792err_out_free_mdio_irq:
793 kfree(lp->mii_bus.irq);
794err_out:
795 return err;
796}
603 797
604#ifdef CONFIG_CPU_TX49XX 798#ifdef CONFIG_CPU_TX49XX
605/* 799/*
@@ -617,7 +811,7 @@ static int __devinit tc35815_mac_match(struct device *dev, void *data)
617 811
618static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev) 812static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
619{ 813{
620 struct tc35815_local *lp = dev->priv; 814 struct tc35815_local *lp = netdev_priv(dev);
621 struct device *pd = bus_find_device(&platform_bus_type, NULL, 815 struct device *pd = bus_find_device(&platform_bus_type, NULL,
622 lp->pci_dev, tc35815_mac_match); 816 lp->pci_dev, tc35815_mac_match);
623 if (pd) { 817 if (pd) {
@@ -635,7 +829,7 @@ static int __devinit tc35815_read_plat_dev_addr(struct net_device *dev)
635} 829}
636#endif 830#endif
637 831
638static int __devinit tc35815_init_dev_addr (struct net_device *dev) 832static int __devinit tc35815_init_dev_addr(struct net_device *dev)
639{ 833{
640 struct tc35815_regs __iomem *tr = 834 struct tc35815_regs __iomem *tr =
641 (struct tc35815_regs __iomem *)dev->base_addr; 835 (struct tc35815_regs __iomem *)dev->base_addr;
@@ -657,21 +851,21 @@ static int __devinit tc35815_init_dev_addr (struct net_device *dev)
657 return 0; 851 return 0;
658} 852}
659 853
660static int __devinit tc35815_init_one (struct pci_dev *pdev, 854static int __devinit tc35815_init_one(struct pci_dev *pdev,
661 const struct pci_device_id *ent) 855 const struct pci_device_id *ent)
662{ 856{
663 void __iomem *ioaddr = NULL; 857 void __iomem *ioaddr = NULL;
664 struct net_device *dev; 858 struct net_device *dev;
665 struct tc35815_local *lp; 859 struct tc35815_local *lp;
666 int rc; 860 int rc;
667 unsigned long mmio_start, mmio_end, mmio_flags, mmio_len; 861 DECLARE_MAC_BUF(mac);
668 862
669 static int printed_version; 863 static int printed_version;
670 if (!printed_version++) { 864 if (!printed_version++) {
671 printk(version); 865 printk(version);
672 dev_printk(KERN_DEBUG, &pdev->dev, 866 dev_printk(KERN_DEBUG, &pdev->dev,
673 "speed:%d duplex:%d doforce:%d\n", 867 "speed:%d duplex:%d\n",
674 options.speed, options.duplex, options.doforce); 868 options.speed, options.duplex);
675 } 869 }
676 870
677 if (!pdev->irq) { 871 if (!pdev->irq) {
@@ -680,55 +874,24 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev,
680 } 874 }
681 875
682 /* dev zeroed in alloc_etherdev */ 876 /* dev zeroed in alloc_etherdev */
683 dev = alloc_etherdev (sizeof (*lp)); 877 dev = alloc_etherdev(sizeof(*lp));
684 if (dev == NULL) { 878 if (dev == NULL) {
685 dev_err(&pdev->dev, "unable to alloc new ethernet\n"); 879 dev_err(&pdev->dev, "unable to alloc new ethernet\n");
686 return -ENOMEM; 880 return -ENOMEM;
687 } 881 }
688 SET_NETDEV_DEV(dev, &pdev->dev); 882 SET_NETDEV_DEV(dev, &pdev->dev);
689 lp = dev->priv; 883 lp = netdev_priv(dev);
690 lp->dev = dev; 884 lp->dev = dev;
691 885
692 /* enable device (incl. PCI PM wakeup), and bus-mastering */ 886 /* enable device (incl. PCI PM wakeup), and bus-mastering */
693 rc = pci_enable_device (pdev); 887 rc = pcim_enable_device(pdev);
694 if (rc) 888 if (rc)
695 goto err_out; 889 goto err_out;
696 890 rc = pcim_iomap_regions(pdev, 1 << 1, MODNAME);
697 mmio_start = pci_resource_start (pdev, 1);
698 mmio_end = pci_resource_end (pdev, 1);
699 mmio_flags = pci_resource_flags (pdev, 1);
700 mmio_len = pci_resource_len (pdev, 1);
701
702 /* set this immediately, we need to know before
703 * we talk to the chip directly */
704
705 /* make sure PCI base addr 1 is MMIO */
706 if (!(mmio_flags & IORESOURCE_MEM)) {
707 dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
708 rc = -ENODEV;
709 goto err_out;
710 }
711
712 /* check for weird/broken PCI region reporting */
713 if ((mmio_len < sizeof(struct tc35815_regs))) {
714 dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n");
715 rc = -ENODEV;
716 goto err_out;
717 }
718
719 rc = pci_request_regions (pdev, MODNAME);
720 if (rc) 891 if (rc)
721 goto err_out; 892 goto err_out;
722 893 pci_set_master(pdev);
723 pci_set_master (pdev); 894 ioaddr = pcim_iomap_table(pdev)[1];
724
725 /* ioremap MMIO region */
726 ioaddr = ioremap (mmio_start, mmio_len);
727 if (ioaddr == NULL) {
728 dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
729 rc = -EIO;
730 goto err_out_free_res;
731 }
732 895
733 /* Initialize the device structure. */ 896 /* Initialize the device structure. */
734 dev->open = tc35815_open; 897 dev->open = tc35815_open;
@@ -748,11 +911,12 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev,
748#endif 911#endif
749 912
750 dev->irq = pdev->irq; 913 dev->irq = pdev->irq;
751 dev->base_addr = (unsigned long) ioaddr; 914 dev->base_addr = (unsigned long)ioaddr;
752 915
916 INIT_WORK(&lp->restart_work, tc35815_restart_work);
753 spin_lock_init(&lp->lock); 917 spin_lock_init(&lp->lock);
754 lp->pci_dev = pdev; 918 lp->pci_dev = pdev;
755 lp->boardtype = ent->driver_data; 919 lp->chiptype = ent->driver_data;
756 920
757 lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK; 921 lp->msg_enable = NETIF_MSG_TX_ERR | NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK;
758 pci_set_drvdata(pdev, dev); 922 pci_set_drvdata(pdev, dev);
@@ -766,68 +930,49 @@ static int __devinit tc35815_init_one (struct pci_dev *pdev,
766 random_ether_addr(dev->dev_addr); 930 random_ether_addr(dev->dev_addr);
767 } 931 }
768 932
769 rc = register_netdev (dev); 933 rc = register_netdev(dev);
770 if (rc) 934 if (rc)
771 goto err_out_unmap; 935 goto err_out;
772 936
773 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 937 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
774 printk(KERN_INFO "%s: %s at 0x%lx, " 938 printk(KERN_INFO "%s: %s at 0x%lx, %s, IRQ %d\n",
775 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
776 "IRQ %d\n",
777 dev->name, 939 dev->name,
778 board_info[ent->driver_data].name, 940 chip_info[ent->driver_data].name,
779 dev->base_addr, 941 dev->base_addr,
780 dev->dev_addr[0], dev->dev_addr[1], 942 print_mac(mac, dev->dev_addr),
781 dev->dev_addr[2], dev->dev_addr[3],
782 dev->dev_addr[4], dev->dev_addr[5],
783 dev->irq); 943 dev->irq);
784 944
785 setup_timer(&lp->timer, tc35815_timer, (unsigned long) dev); 945 rc = tc_mii_init(dev);
786 lp->mii.dev = dev; 946 if (rc)
787 lp->mii.mdio_read = tc_mdio_read; 947 goto err_out_unregister;
788 lp->mii.mdio_write = tc_mdio_write;
789 lp->mii.phy_id_mask = 0x1f;
790 lp->mii.reg_num_mask = 0x1f;
791 tc35815_find_phy(dev);
792 lp->mii.phy_id = lp->phy_addr;
793 lp->mii.full_duplex = 0;
794 lp->mii.force_media = 0;
795 948
796 return 0; 949 return 0;
797 950
798err_out_unmap: 951err_out_unregister:
799 iounmap(ioaddr); 952 unregister_netdev(dev);
800err_out_free_res:
801 pci_release_regions (pdev);
802err_out: 953err_out:
803 free_netdev (dev); 954 free_netdev(dev);
804 return rc; 955 return rc;
805} 956}
806 957
807 958
808static void __devexit tc35815_remove_one (struct pci_dev *pdev) 959static void __devexit tc35815_remove_one(struct pci_dev *pdev)
809{ 960{
810 struct net_device *dev = pci_get_drvdata (pdev); 961 struct net_device *dev = pci_get_drvdata(pdev);
811 unsigned long mmio_addr; 962 struct tc35815_local *lp = netdev_priv(dev);
812
813 mmio_addr = dev->base_addr;
814
815 unregister_netdev (dev);
816
817 if (mmio_addr) {
818 iounmap ((void __iomem *)mmio_addr);
819 pci_release_regions (pdev);
820 }
821
822 free_netdev (dev);
823 963
824 pci_set_drvdata (pdev, NULL); 964 phy_disconnect(lp->phy_dev);
965 mdiobus_unregister(&lp->mii_bus);
966 kfree(lp->mii_bus.irq);
967 unregister_netdev(dev);
968 free_netdev(dev);
969 pci_set_drvdata(pdev, NULL);
825} 970}
826 971
827static int 972static int
828tc35815_init_queues(struct net_device *dev) 973tc35815_init_queues(struct net_device *dev)
829{ 974{
830 struct tc35815_local *lp = dev->priv; 975 struct tc35815_local *lp = netdev_priv(dev);
831 int i; 976 int i;
832 unsigned long fd_addr; 977 unsigned long fd_addr;
833 978
@@ -838,11 +983,17 @@ tc35815_init_queues(struct net_device *dev)
838 sizeof(struct TxFD) * TX_FD_NUM > 983 sizeof(struct TxFD) * TX_FD_NUM >
839 PAGE_SIZE * FD_PAGE_NUM); 984 PAGE_SIZE * FD_PAGE_NUM);
840 985
841 if ((lp->fd_buf = pci_alloc_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, &lp->fd_buf_dma)) == 0) 986 lp->fd_buf = pci_alloc_consistent(lp->pci_dev,
987 PAGE_SIZE * FD_PAGE_NUM,
988 &lp->fd_buf_dma);
989 if (!lp->fd_buf)
842 return -ENOMEM; 990 return -ENOMEM;
843 for (i = 0; i < RX_BUF_NUM; i++) { 991 for (i = 0; i < RX_BUF_NUM; i++) {
844#ifdef TC35815_USE_PACKEDBUFFER 992#ifdef TC35815_USE_PACKEDBUFFER
845 if ((lp->data_buf[i] = alloc_rxbuf_page(lp->pci_dev, &lp->data_buf_dma[i])) == NULL) { 993 lp->data_buf[i] =
994 alloc_rxbuf_page(lp->pci_dev,
995 &lp->data_buf_dma[i]);
996 if (!lp->data_buf[i]) {
846 while (--i >= 0) { 997 while (--i >= 0) {
847 free_rxbuf_page(lp->pci_dev, 998 free_rxbuf_page(lp->pci_dev,
848 lp->data_buf[i], 999 lp->data_buf[i],
@@ -885,18 +1036,17 @@ tc35815_init_queues(struct net_device *dev)
885#endif 1036#endif
886 printk("\n"); 1037 printk("\n");
887 } else { 1038 } else {
888 for (i = 0; i < FD_PAGE_NUM; i++) { 1039 for (i = 0; i < FD_PAGE_NUM; i++)
889 clear_page((void *)((unsigned long)lp->fd_buf + i * PAGE_SIZE)); 1040 clear_page((void *)((unsigned long)lp->fd_buf +
890 } 1041 i * PAGE_SIZE));
891 } 1042 }
892 fd_addr = (unsigned long)lp->fd_buf; 1043 fd_addr = (unsigned long)lp->fd_buf;
893 1044
894 /* Free Descriptors (for Receive) */ 1045 /* Free Descriptors (for Receive) */
895 lp->rfd_base = (struct RxFD *)fd_addr; 1046 lp->rfd_base = (struct RxFD *)fd_addr;
896 fd_addr += sizeof(struct RxFD) * RX_FD_NUM; 1047 fd_addr += sizeof(struct RxFD) * RX_FD_NUM;
897 for (i = 0; i < RX_FD_NUM; i++) { 1048 for (i = 0; i < RX_FD_NUM; i++)
898 lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD); 1049 lp->rfd_base[i].fd.FDCtl = cpu_to_le32(FD_CownsFD);
899 }
900 lp->rfd_cur = lp->rfd_base; 1050 lp->rfd_cur = lp->rfd_base;
901 lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1); 1051 lp->rfd_limit = (struct RxFD *)fd_addr - (RX_FD_RESERVE + 1);
902 1052
@@ -964,7 +1114,7 @@ tc35815_init_queues(struct net_device *dev)
964static void 1114static void
965tc35815_clear_queues(struct net_device *dev) 1115tc35815_clear_queues(struct net_device *dev)
966{ 1116{
967 struct tc35815_local *lp = dev->priv; 1117 struct tc35815_local *lp = netdev_priv(dev);
968 int i; 1118 int i;
969 1119
970 for (i = 0; i < TX_FD_NUM; i++) { 1120 for (i = 0; i < TX_FD_NUM; i++) {
@@ -995,7 +1145,7 @@ tc35815_clear_queues(struct net_device *dev)
995static void 1145static void
996tc35815_free_queues(struct net_device *dev) 1146tc35815_free_queues(struct net_device *dev)
997{ 1147{
998 struct tc35815_local *lp = dev->priv; 1148 struct tc35815_local *lp = netdev_priv(dev);
999 int i; 1149 int i;
1000 1150
1001 if (lp->tfd_base) { 1151 if (lp->tfd_base) {
@@ -1076,7 +1226,7 @@ dump_rxfd(struct RxFD *fd)
1076 le32_to_cpu(fd->fd.FDStat), 1226 le32_to_cpu(fd->fd.FDStat),
1077 le32_to_cpu(fd->fd.FDCtl)); 1227 le32_to_cpu(fd->fd.FDCtl));
1078 if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD) 1228 if (le32_to_cpu(fd->fd.FDCtl) & FD_CownsFD)
1079 return 0; 1229 return 0;
1080 printk("BD: "); 1230 printk("BD: ");
1081 for (i = 0; i < bd_count; i++) 1231 for (i = 0; i < bd_count; i++)
1082 printk(" %08x %08x", 1232 printk(" %08x %08x",
@@ -1109,7 +1259,7 @@ dump_frfd(struct FrFD *fd)
1109static void 1259static void
1110panic_queues(struct net_device *dev) 1260panic_queues(struct net_device *dev)
1111{ 1261{
1112 struct tc35815_local *lp = dev->priv; 1262 struct tc35815_local *lp = netdev_priv(dev);
1113 int i; 1263 int i;
1114 1264
1115 printk("TxFD base %p, start %u, end %u\n", 1265 printk("TxFD base %p, start %u, end %u\n",
@@ -1128,42 +1278,33 @@ panic_queues(struct net_device *dev)
1128} 1278}
1129#endif 1279#endif
1130 1280
1131static void print_eth(char *add) 1281static void print_eth(const u8 *add)
1132{ 1282{
1133 int i; 1283 DECLARE_MAC_BUF(mac);
1134 1284
1135 printk("print_eth(%p)\n", add); 1285 printk(KERN_DEBUG "print_eth(%p)\n", add);
1136 for (i = 0; i < 6; i++) 1286 printk(KERN_DEBUG " %s =>", print_mac(mac, add + 6));
1137 printk(" %2.2X", (unsigned char) add[i + 6]); 1287 printk(KERN_CONT " %s : %02x%02x\n",
1138 printk(" =>"); 1288 print_mac(mac, add), add[12], add[13]);
1139 for (i = 0; i < 6; i++)
1140 printk(" %2.2X", (unsigned char) add[i]);
1141 printk(" : %2.2X%2.2X\n", (unsigned char) add[12], (unsigned char) add[13]);
1142} 1289}
1143 1290
1144static int tc35815_tx_full(struct net_device *dev) 1291static int tc35815_tx_full(struct net_device *dev)
1145{ 1292{
1146 struct tc35815_local *lp = dev->priv; 1293 struct tc35815_local *lp = netdev_priv(dev);
1147 return ((lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end); 1294 return ((lp->tfd_start + 1) % TX_FD_NUM == lp->tfd_end);
1148} 1295}
1149 1296
1150static void tc35815_restart(struct net_device *dev) 1297static void tc35815_restart(struct net_device *dev)
1151{ 1298{
1152 struct tc35815_local *lp = dev->priv; 1299 struct tc35815_local *lp = netdev_priv(dev);
1153 int pid = lp->phy_addr; 1300
1154 int do_phy_reset = 1; 1301 if (lp->phy_dev) {
1155 del_timer(&lp->timer); /* Kill if running */
1156
1157 if (lp->mii_id[0] == 0x0016 && (lp->mii_id[1] & 0xfc00) == 0xf800) {
1158 /* Resetting PHY cause problem on some chip... (SEEQ 80221) */
1159 do_phy_reset = 0;
1160 }
1161 if (do_phy_reset) {
1162 int timeout; 1302 int timeout;
1163 tc_mdio_write(dev, pid, MII_BMCR, BMCR_RESET); 1303
1304 phy_write(lp->phy_dev, MII_BMCR, BMCR_RESET);
1164 timeout = 100; 1305 timeout = 100;
1165 while (--timeout) { 1306 while (--timeout) {
1166 if (!(tc_mdio_read(dev, pid, MII_BMCR) & BMCR_RESET)) 1307 if (!(phy_read(lp->phy_dev, MII_BMCR) & BMCR_RESET))
1167 break; 1308 break;
1168 udelay(1); 1309 udelay(1);
1169 } 1310 }
@@ -1171,16 +1312,40 @@ static void tc35815_restart(struct net_device *dev)
1171 printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name); 1312 printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name);
1172 } 1313 }
1173 1314
1315 spin_lock_irq(&lp->lock);
1174 tc35815_chip_reset(dev); 1316 tc35815_chip_reset(dev);
1175 tc35815_clear_queues(dev); 1317 tc35815_clear_queues(dev);
1176 tc35815_chip_init(dev); 1318 tc35815_chip_init(dev);
1177 /* Reconfigure CAM again since tc35815_chip_init() initialize it. */ 1319 /* Reconfigure CAM again since tc35815_chip_init() initialize it. */
1178 tc35815_set_multicast_list(dev); 1320 tc35815_set_multicast_list(dev);
1321 spin_unlock_irq(&lp->lock);
1322
1323 netif_wake_queue(dev);
1324}
1325
1326static void tc35815_restart_work(struct work_struct *work)
1327{
1328 struct tc35815_local *lp =
1329 container_of(work, struct tc35815_local, restart_work);
1330 struct net_device *dev = lp->dev;
1331
1332 tc35815_restart(dev);
1333}
1334
1335static void tc35815_schedule_restart(struct net_device *dev)
1336{
1337 struct tc35815_local *lp = netdev_priv(dev);
1338 struct tc35815_regs __iomem *tr =
1339 (struct tc35815_regs __iomem *)dev->base_addr;
1340
1341 /* disable interrupts */
1342 tc_writel(0, &tr->Int_En);
1343 tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl);
1344 schedule_work(&lp->restart_work);
1179} 1345}
1180 1346
1181static void tc35815_tx_timeout(struct net_device *dev) 1347static void tc35815_tx_timeout(struct net_device *dev)
1182{ 1348{
1183 struct tc35815_local *lp = dev->priv;
1184 struct tc35815_regs __iomem *tr = 1349 struct tc35815_regs __iomem *tr =
1185 (struct tc35815_regs __iomem *)dev->base_addr; 1350 (struct tc35815_regs __iomem *)dev->base_addr;
1186 1351
@@ -1188,28 +1353,12 @@ static void tc35815_tx_timeout(struct net_device *dev)
1188 dev->name, tc_readl(&tr->Tx_Stat)); 1353 dev->name, tc_readl(&tr->Tx_Stat));
1189 1354
1190 /* Try to restart the adaptor. */ 1355 /* Try to restart the adaptor. */
1191 spin_lock_irq(&lp->lock); 1356 tc35815_schedule_restart(dev);
1192 tc35815_restart(dev); 1357 dev->stats.tx_errors++;
1193 spin_unlock_irq(&lp->lock);
1194
1195 lp->stats.tx_errors++;
1196
1197 /* If we have space available to accept new transmit
1198 * requests, wake up the queueing layer. This would
1199 * be the case if the chipset_init() call above just
1200 * flushes out the tx queue and empties it.
1201 *
1202 * If instead, the tx queue is retained then the
1203 * netif_wake_queue() call should be placed in the
1204 * TX completion interrupt handler of the driver instead
1205 * of here.
1206 */
1207 if (!tc35815_tx_full(dev))
1208 netif_wake_queue(dev);
1209} 1358}
1210 1359
1211/* 1360/*
1212 * Open/initialize the board. This is called (in the current kernel) 1361 * Open/initialize the controller. This is called (in the current kernel)
1213 * sometime after booting when the 'ifconfig' program is run. 1362 * sometime after booting when the 'ifconfig' program is run.
1214 * 1363 *
1215 * This routine should set everything up anew at each open, even 1364 * This routine should set everything up anew at each open, even
@@ -1219,17 +1368,16 @@ static void tc35815_tx_timeout(struct net_device *dev)
1219static int 1368static int
1220tc35815_open(struct net_device *dev) 1369tc35815_open(struct net_device *dev)
1221{ 1370{
1222 struct tc35815_local *lp = dev->priv; 1371 struct tc35815_local *lp = netdev_priv(dev);
1223 1372
1224 /* 1373 /*
1225 * This is used if the interrupt line can turned off (shared). 1374 * This is used if the interrupt line can turned off (shared).
1226 * See 3c503.c for an example of selecting the IRQ at config-time. 1375 * See 3c503.c for an example of selecting the IRQ at config-time.
1227 */ 1376 */
1228 if (request_irq(dev->irq, &tc35815_interrupt, IRQF_SHARED, dev->name, dev)) { 1377 if (request_irq(dev->irq, &tc35815_interrupt, IRQF_SHARED,
1378 dev->name, dev))
1229 return -EAGAIN; 1379 return -EAGAIN;
1230 }
1231 1380
1232 del_timer(&lp->timer); /* Kill if running */
1233 tc35815_chip_reset(dev); 1381 tc35815_chip_reset(dev);
1234 1382
1235 if (tc35815_init_queues(dev) != 0) { 1383 if (tc35815_init_queues(dev) != 0) {
@@ -1246,6 +1394,9 @@ tc35815_open(struct net_device *dev)
1246 tc35815_chip_init(dev); 1394 tc35815_chip_init(dev);
1247 spin_unlock_irq(&lp->lock); 1395 spin_unlock_irq(&lp->lock);
1248 1396
1397 /* schedule a link state check */
1398 phy_start(lp->phy_dev);
1399
1249 /* We are now ready to accept transmit requeusts from 1400 /* We are now ready to accept transmit requeusts from
1250 * the queueing layer of the networking. 1401 * the queueing layer of the networking.
1251 */ 1402 */
@@ -1261,7 +1412,7 @@ tc35815_open(struct net_device *dev)
1261 */ 1412 */
1262static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev) 1413static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
1263{ 1414{
1264 struct tc35815_local *lp = dev->priv; 1415 struct tc35815_local *lp = netdev_priv(dev);
1265 struct TxFD *txfd; 1416 struct TxFD *txfd;
1266 unsigned long flags; 1417 unsigned long flags;
1267 1418
@@ -1366,7 +1517,7 @@ static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
1366 panic("%s: Too many fatal errors.", dev->name); 1517 panic("%s: Too many fatal errors.", dev->name);
1367 printk(KERN_WARNING "%s: Resetting ...\n", dev->name); 1518 printk(KERN_WARNING "%s: Resetting ...\n", dev->name);
1368 /* Try to restart the adaptor. */ 1519 /* Try to restart the adaptor. */
1369 tc35815_restart(dev); 1520 tc35815_schedule_restart(dev);
1370} 1521}
1371 1522
1372#ifdef TC35815_NAPI 1523#ifdef TC35815_NAPI
@@ -1375,7 +1526,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
1375static int tc35815_do_interrupt(struct net_device *dev, u32 status) 1526static int tc35815_do_interrupt(struct net_device *dev, u32 status)
1376#endif 1527#endif
1377{ 1528{
1378 struct tc35815_local *lp = dev->priv; 1529 struct tc35815_local *lp = netdev_priv(dev);
1379 struct tc35815_regs __iomem *tr = 1530 struct tc35815_regs __iomem *tr =
1380 (struct tc35815_regs __iomem *)dev->base_addr; 1531 (struct tc35815_regs __iomem *)dev->base_addr;
1381 int ret = -1; 1532 int ret = -1;
@@ -1392,7 +1543,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status)
1392 printk(KERN_WARNING 1543 printk(KERN_WARNING
1393 "%s: Free Descriptor Area Exhausted (%#x).\n", 1544 "%s: Free Descriptor Area Exhausted (%#x).\n",
1394 dev->name, status); 1545 dev->name, status);
1395 lp->stats.rx_dropped++; 1546 dev->stats.rx_dropped++;
1396 ret = 0; 1547 ret = 0;
1397 } 1548 }
1398 if (status & Int_IntBLEx) { 1549 if (status & Int_IntBLEx) {
@@ -1401,14 +1552,14 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status)
1401 printk(KERN_WARNING 1552 printk(KERN_WARNING
1402 "%s: Buffer List Exhausted (%#x).\n", 1553 "%s: Buffer List Exhausted (%#x).\n",
1403 dev->name, status); 1554 dev->name, status);
1404 lp->stats.rx_dropped++; 1555 dev->stats.rx_dropped++;
1405 ret = 0; 1556 ret = 0;
1406 } 1557 }
1407 if (status & Int_IntExBD) { 1558 if (status & Int_IntExBD) {
1408 printk(KERN_WARNING 1559 printk(KERN_WARNING
1409 "%s: Excessive Buffer Descriptiors (%#x).\n", 1560 "%s: Excessive Buffer Descriptiors (%#x).\n",
1410 dev->name, status); 1561 dev->name, status);
1411 lp->stats.rx_length_errors++; 1562 dev->stats.rx_length_errors++;
1412 ret = 0; 1563 ret = 0;
1413 } 1564 }
1414 1565
@@ -1492,7 +1643,7 @@ static void
1492tc35815_rx(struct net_device *dev) 1643tc35815_rx(struct net_device *dev)
1493#endif 1644#endif
1494{ 1645{
1495 struct tc35815_local *lp = dev->priv; 1646 struct tc35815_local *lp = netdev_priv(dev);
1496 unsigned int fdctl; 1647 unsigned int fdctl;
1497 int i; 1648 int i;
1498 int buf_free_count = 0; 1649 int buf_free_count = 0;
@@ -1532,7 +1683,7 @@ tc35815_rx(struct net_device *dev)
1532 if (skb == NULL) { 1683 if (skb == NULL) {
1533 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", 1684 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
1534 dev->name); 1685 dev->name);
1535 lp->stats.rx_dropped++; 1686 dev->stats.rx_dropped++;
1536 break; 1687 break;
1537 } 1688 }
1538 skb_reserve(skb, 2); /* 16 bit alignment */ 1689 skb_reserve(skb, 2); /* 16 bit alignment */
@@ -1602,10 +1753,10 @@ tc35815_rx(struct net_device *dev)
1602 netif_rx(skb); 1753 netif_rx(skb);
1603#endif 1754#endif
1604 dev->last_rx = jiffies; 1755 dev->last_rx = jiffies;
1605 lp->stats.rx_packets++; 1756 dev->stats.rx_packets++;
1606 lp->stats.rx_bytes += pkt_len; 1757 dev->stats.rx_bytes += pkt_len;
1607 } else { 1758 } else {
1608 lp->stats.rx_errors++; 1759 dev->stats.rx_errors++;
1609 printk(KERN_DEBUG "%s: Rx error (status %x)\n", 1760 printk(KERN_DEBUG "%s: Rx error (status %x)\n",
1610 dev->name, status & Rx_Stat_Mask); 1761 dev->name, status & Rx_Stat_Mask);
1611 /* WORKAROUND: LongErr and CRCErr means Overflow. */ 1762 /* WORKAROUND: LongErr and CRCErr means Overflow. */
@@ -1613,10 +1764,14 @@ tc35815_rx(struct net_device *dev)
1613 status &= ~(Rx_LongErr|Rx_CRCErr); 1764 status &= ~(Rx_LongErr|Rx_CRCErr);
1614 status |= Rx_Over; 1765 status |= Rx_Over;
1615 } 1766 }
1616 if (status & Rx_LongErr) lp->stats.rx_length_errors++; 1767 if (status & Rx_LongErr)
1617 if (status & Rx_Over) lp->stats.rx_fifo_errors++; 1768 dev->stats.rx_length_errors++;
1618 if (status & Rx_CRCErr) lp->stats.rx_crc_errors++; 1769 if (status & Rx_Over)
1619 if (status & Rx_Align) lp->stats.rx_frame_errors++; 1770 dev->stats.rx_fifo_errors++;
1771 if (status & Rx_CRCErr)
1772 dev->stats.rx_crc_errors++;
1773 if (status & Rx_Align)
1774 dev->stats.rx_frame_errors++;
1620 } 1775 }
1621 1776
1622 if (bd_count > 0) { 1777 if (bd_count > 0) {
@@ -1772,40 +1927,39 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
1772static void 1927static void
1773tc35815_check_tx_stat(struct net_device *dev, int status) 1928tc35815_check_tx_stat(struct net_device *dev, int status)
1774{ 1929{
1775 struct tc35815_local *lp = dev->priv; 1930 struct tc35815_local *lp = netdev_priv(dev);
1776 const char *msg = NULL; 1931 const char *msg = NULL;
1777 1932
1778 /* count collisions */ 1933 /* count collisions */
1779 if (status & Tx_ExColl) 1934 if (status & Tx_ExColl)
1780 lp->stats.collisions += 16; 1935 dev->stats.collisions += 16;
1781 if (status & Tx_TxColl_MASK) 1936 if (status & Tx_TxColl_MASK)
1782 lp->stats.collisions += status & Tx_TxColl_MASK; 1937 dev->stats.collisions += status & Tx_TxColl_MASK;
1783 1938
1784#ifndef NO_CHECK_CARRIER 1939#ifndef NO_CHECK_CARRIER
1785 /* TX4939 does not have NCarr */ 1940 /* TX4939 does not have NCarr */
1786 if (lp->boardtype == TC35815_TX4939) 1941 if (lp->chiptype == TC35815_TX4939)
1787 status &= ~Tx_NCarr; 1942 status &= ~Tx_NCarr;
1788#ifdef WORKAROUND_LOSTCAR 1943#ifdef WORKAROUND_LOSTCAR
1789 /* WORKAROUND: ignore LostCrS in full duplex operation */ 1944 /* WORKAROUND: ignore LostCrS in full duplex operation */
1790 if ((lp->timer_state != asleep && lp->timer_state != lcheck) 1945 if (!lp->link || lp->duplex == DUPLEX_FULL)
1791 || lp->fullduplex)
1792 status &= ~Tx_NCarr; 1946 status &= ~Tx_NCarr;
1793#endif 1947#endif
1794#endif 1948#endif
1795 1949
1796 if (!(status & TX_STA_ERR)) { 1950 if (!(status & TX_STA_ERR)) {
1797 /* no error. */ 1951 /* no error. */
1798 lp->stats.tx_packets++; 1952 dev->stats.tx_packets++;
1799 return; 1953 return;
1800 } 1954 }
1801 1955
1802 lp->stats.tx_errors++; 1956 dev->stats.tx_errors++;
1803 if (status & Tx_ExColl) { 1957 if (status & Tx_ExColl) {
1804 lp->stats.tx_aborted_errors++; 1958 dev->stats.tx_aborted_errors++;
1805 msg = "Excessive Collision."; 1959 msg = "Excessive Collision.";
1806 } 1960 }
1807 if (status & Tx_Under) { 1961 if (status & Tx_Under) {
1808 lp->stats.tx_fifo_errors++; 1962 dev->stats.tx_fifo_errors++;
1809 msg = "Tx FIFO Underrun."; 1963 msg = "Tx FIFO Underrun.";
1810 if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) { 1964 if (lp->lstats.tx_underrun < TX_THRESHOLD_KEEP_LIMIT) {
1811 lp->lstats.tx_underrun++; 1965 lp->lstats.tx_underrun++;
@@ -1818,25 +1972,25 @@ tc35815_check_tx_stat(struct net_device *dev, int status)
1818 } 1972 }
1819 } 1973 }
1820 if (status & Tx_Defer) { 1974 if (status & Tx_Defer) {
1821 lp->stats.tx_fifo_errors++; 1975 dev->stats.tx_fifo_errors++;
1822 msg = "Excessive Deferral."; 1976 msg = "Excessive Deferral.";
1823 } 1977 }
1824#ifndef NO_CHECK_CARRIER 1978#ifndef NO_CHECK_CARRIER
1825 if (status & Tx_NCarr) { 1979 if (status & Tx_NCarr) {
1826 lp->stats.tx_carrier_errors++; 1980 dev->stats.tx_carrier_errors++;
1827 msg = "Lost Carrier Sense."; 1981 msg = "Lost Carrier Sense.";
1828 } 1982 }
1829#endif 1983#endif
1830 if (status & Tx_LateColl) { 1984 if (status & Tx_LateColl) {
1831 lp->stats.tx_aborted_errors++; 1985 dev->stats.tx_aborted_errors++;
1832 msg = "Late Collision."; 1986 msg = "Late Collision.";
1833 } 1987 }
1834 if (status & Tx_TxPar) { 1988 if (status & Tx_TxPar) {
1835 lp->stats.tx_fifo_errors++; 1989 dev->stats.tx_fifo_errors++;
1836 msg = "Transmit Parity Error."; 1990 msg = "Transmit Parity Error.";
1837 } 1991 }
1838 if (status & Tx_SQErr) { 1992 if (status & Tx_SQErr) {
1839 lp->stats.tx_heartbeat_errors++; 1993 dev->stats.tx_heartbeat_errors++;
1840 msg = "Signal Quality Error."; 1994 msg = "Signal Quality Error.";
1841 } 1995 }
1842 if (msg && netif_msg_tx_err(lp)) 1996 if (msg && netif_msg_tx_err(lp))
@@ -1849,7 +2003,7 @@ tc35815_check_tx_stat(struct net_device *dev, int status)
1849static void 2003static void
1850tc35815_txdone(struct net_device *dev) 2004tc35815_txdone(struct net_device *dev)
1851{ 2005{
1852 struct tc35815_local *lp = dev->priv; 2006 struct tc35815_local *lp = netdev_priv(dev);
1853 struct TxFD *txfd; 2007 struct TxFD *txfd;
1854 unsigned int fdctl; 2008 unsigned int fdctl;
1855 2009
@@ -1878,7 +2032,7 @@ tc35815_txdone(struct net_device *dev)
1878 BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb); 2032 BUG_ON(lp->tx_skbs[lp->tfd_end].skb != skb);
1879#endif 2033#endif
1880 if (skb) { 2034 if (skb) {
1881 lp->stats.tx_bytes += skb->len; 2035 dev->stats.tx_bytes += skb->len;
1882 pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE); 2036 pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE);
1883 lp->tx_skbs[lp->tfd_end].skb = NULL; 2037 lp->tx_skbs[lp->tfd_end].skb = NULL;
1884 lp->tx_skbs[lp->tfd_end].skb_dma = 0; 2038 lp->tx_skbs[lp->tfd_end].skb_dma = 0;
@@ -1904,7 +2058,7 @@ tc35815_txdone(struct net_device *dev)
1904 struct tc35815_regs __iomem *tr = 2058 struct tc35815_regs __iomem *tr =
1905 (struct tc35815_regs __iomem *)dev->base_addr; 2059 (struct tc35815_regs __iomem *)dev->base_addr;
1906 int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM; 2060 int head = (lp->tfd_start + TX_FD_NUM - 1) % TX_FD_NUM;
1907 struct TxFD* txhead = &lp->tfd_base[head]; 2061 struct TxFD *txhead = &lp->tfd_base[head];
1908 int qlen = (lp->tfd_start + TX_FD_NUM 2062 int qlen = (lp->tfd_start + TX_FD_NUM
1909 - lp->tfd_end) % TX_FD_NUM; 2063 - lp->tfd_end) % TX_FD_NUM;
1910 2064
@@ -1939,7 +2093,7 @@ tc35815_txdone(struct net_device *dev)
1939 * condition, and space has now been made available, 2093 * condition, and space has now been made available,
1940 * wake up the queue. 2094 * wake up the queue.
1941 */ 2095 */
1942 if (netif_queue_stopped(dev) && ! tc35815_tx_full(dev)) 2096 if (netif_queue_stopped(dev) && !tc35815_tx_full(dev))
1943 netif_wake_queue(dev); 2097 netif_wake_queue(dev);
1944} 2098}
1945 2099
@@ -1947,16 +2101,17 @@ tc35815_txdone(struct net_device *dev)
1947static int 2101static int
1948tc35815_close(struct net_device *dev) 2102tc35815_close(struct net_device *dev)
1949{ 2103{
1950 struct tc35815_local *lp = dev->priv; 2104 struct tc35815_local *lp = netdev_priv(dev);
1951 2105
1952 netif_stop_queue(dev); 2106 netif_stop_queue(dev);
1953#ifdef TC35815_NAPI 2107#ifdef TC35815_NAPI
1954 napi_disable(&lp->napi); 2108 napi_disable(&lp->napi);
1955#endif 2109#endif
2110 if (lp->phy_dev)
2111 phy_stop(lp->phy_dev);
2112 cancel_work_sync(&lp->restart_work);
1956 2113
1957 /* Flush the Tx and disable Rx here. */ 2114 /* Flush the Tx and disable Rx here. */
1958
1959 del_timer(&lp->timer); /* Kill if running */
1960 tc35815_chip_reset(dev); 2115 tc35815_chip_reset(dev);
1961 free_irq(dev->irq, dev); 2116 free_irq(dev->irq, dev);
1962 2117
@@ -1972,34 +2127,30 @@ tc35815_close(struct net_device *dev)
1972 */ 2127 */
1973static struct net_device_stats *tc35815_get_stats(struct net_device *dev) 2128static struct net_device_stats *tc35815_get_stats(struct net_device *dev)
1974{ 2129{
1975 struct tc35815_local *lp = dev->priv;
1976 struct tc35815_regs __iomem *tr = 2130 struct tc35815_regs __iomem *tr =
1977 (struct tc35815_regs __iomem *)dev->base_addr; 2131 (struct tc35815_regs __iomem *)dev->base_addr;
1978 if (netif_running(dev)) { 2132 if (netif_running(dev))
1979 /* Update the statistics from the device registers. */ 2133 /* Update the statistics from the device registers. */
1980 lp->stats.rx_missed_errors = tc_readl(&tr->Miss_Cnt); 2134 dev->stats.rx_missed_errors = tc_readl(&tr->Miss_Cnt);
1981 }
1982 2135
1983 return &lp->stats; 2136 return &dev->stats;
1984} 2137}
1985 2138
1986static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr) 2139static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr)
1987{ 2140{
1988 struct tc35815_local *lp = dev->priv; 2141 struct tc35815_local *lp = netdev_priv(dev);
1989 struct tc35815_regs __iomem *tr = 2142 struct tc35815_regs __iomem *tr =
1990 (struct tc35815_regs __iomem *)dev->base_addr; 2143 (struct tc35815_regs __iomem *)dev->base_addr;
1991 int cam_index = index * 6; 2144 int cam_index = index * 6;
1992 u32 cam_data; 2145 u32 cam_data;
1993 u32 saved_addr; 2146 u32 saved_addr;
2147 DECLARE_MAC_BUF(mac);
2148
1994 saved_addr = tc_readl(&tr->CAM_Adr); 2149 saved_addr = tc_readl(&tr->CAM_Adr);
1995 2150
1996 if (netif_msg_hw(lp)) { 2151 if (netif_msg_hw(lp))
1997 int i; 2152 printk(KERN_DEBUG "%s: CAM %d: %s\n",
1998 printk(KERN_DEBUG "%s: CAM %d:", dev->name, index); 2153 dev->name, index, print_mac(mac, addr));
1999 for (i = 0; i < 6; i++)
2000 printk(" %02x", addr[i]);
2001 printk("\n");
2002 }
2003 if (index & 1) { 2154 if (index & 1) {
2004 /* read modify write */ 2155 /* read modify write */
2005 tc_writel(cam_index - 2, &tr->CAM_Adr); 2156 tc_writel(cam_index - 2, &tr->CAM_Adr);
@@ -2039,28 +2190,24 @@ tc35815_set_multicast_list(struct net_device *dev)
2039 struct tc35815_regs __iomem *tr = 2190 struct tc35815_regs __iomem *tr =
2040 (struct tc35815_regs __iomem *)dev->base_addr; 2191 (struct tc35815_regs __iomem *)dev->base_addr;
2041 2192
2042 if (dev->flags&IFF_PROMISC) 2193 if (dev->flags & IFF_PROMISC) {
2043 {
2044#ifdef WORKAROUND_100HALF_PROMISC 2194#ifdef WORKAROUND_100HALF_PROMISC
2045 /* With some (all?) 100MHalf HUB, controller will hang 2195 /* With some (all?) 100MHalf HUB, controller will hang
2046 * if we enabled promiscuous mode before linkup... */ 2196 * if we enabled promiscuous mode before linkup... */
2047 struct tc35815_local *lp = dev->priv; 2197 struct tc35815_local *lp = netdev_priv(dev);
2048 int pid = lp->phy_addr; 2198
2049 if (!(tc_mdio_read(dev, pid, MII_BMSR) & BMSR_LSTATUS)) 2199 if (!lp->link)
2050 return; 2200 return;
2051#endif 2201#endif
2052 /* Enable promiscuous mode */ 2202 /* Enable promiscuous mode */
2053 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl); 2203 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
2054 } 2204 } else if ((dev->flags & IFF_ALLMULTI) ||
2055 else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > CAM_ENTRY_MAX - 3) 2205 dev->mc_count > CAM_ENTRY_MAX - 3) {
2056 {
2057 /* CAM 0, 1, 20 are reserved. */ 2206 /* CAM 0, 1, 20 are reserved. */
2058 /* Disable promiscuous mode, use normal mode. */ 2207 /* Disable promiscuous mode, use normal mode. */
2059 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl); 2208 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
2060 } 2209 } else if (dev->mc_count) {
2061 else if(dev->mc_count) 2210 struct dev_mc_list *cur_addr = dev->mc_list;
2062 {
2063 struct dev_mc_list* cur_addr = dev->mc_list;
2064 int i; 2211 int i;
2065 int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE); 2212 int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
2066 2213
@@ -2075,8 +2222,7 @@ tc35815_set_multicast_list(struct net_device *dev)
2075 } 2222 }
2076 tc_writel(ena_bits, &tr->CAM_Ena); 2223 tc_writel(ena_bits, &tr->CAM_Ena);
2077 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); 2224 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
2078 } 2225 } else {
2079 else {
2080 tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena); 2226 tc_writel(CAM_Ena_Bit(CAM_ENTRY_SOURCE), &tr->CAM_Ena);
2081 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl); 2227 tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
2082 } 2228 }
@@ -2084,7 +2230,7 @@ tc35815_set_multicast_list(struct net_device *dev)
2084 2230
2085static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2231static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2086{ 2232{
2087 struct tc35815_local *lp = dev->priv; 2233 struct tc35815_local *lp = netdev_priv(dev);
2088 strcpy(info->driver, MODNAME); 2234 strcpy(info->driver, MODNAME);
2089 strcpy(info->version, DRV_VERSION); 2235 strcpy(info->version, DRV_VERSION);
2090 strcpy(info->bus_info, pci_name(lp->pci_dev)); 2236 strcpy(info->bus_info, pci_name(lp->pci_dev));
@@ -2092,78 +2238,37 @@ static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
2092 2238
2093static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2239static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2094{ 2240{
2095 struct tc35815_local *lp = dev->priv; 2241 struct tc35815_local *lp = netdev_priv(dev);
2096 spin_lock_irq(&lp->lock);
2097 mii_ethtool_gset(&lp->mii, cmd);
2098 spin_unlock_irq(&lp->lock);
2099 return 0;
2100}
2101
2102static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2103{
2104 struct tc35815_local *lp = dev->priv;
2105 int rc;
2106#if 1 /* use our negotiation method... */
2107 /* Verify the settings we care about. */
2108 if (cmd->autoneg != AUTONEG_ENABLE &&
2109 cmd->autoneg != AUTONEG_DISABLE)
2110 return -EINVAL;
2111 if (cmd->autoneg == AUTONEG_DISABLE &&
2112 ((cmd->speed != SPEED_100 &&
2113 cmd->speed != SPEED_10) ||
2114 (cmd->duplex != DUPLEX_HALF &&
2115 cmd->duplex != DUPLEX_FULL)))
2116 return -EINVAL;
2117 2242
2118 /* Ok, do it to it. */ 2243 if (!lp->phy_dev)
2119 spin_lock_irq(&lp->lock); 2244 return -ENODEV;
2120 del_timer(&lp->timer); 2245 return phy_ethtool_gset(lp->phy_dev, cmd);
2121 tc35815_start_auto_negotiation(dev, cmd);
2122 spin_unlock_irq(&lp->lock);
2123 rc = 0;
2124#else
2125 spin_lock_irq(&lp->lock);
2126 rc = mii_ethtool_sset(&lp->mii, cmd);
2127 spin_unlock_irq(&lp->lock);
2128#endif
2129 return rc;
2130} 2246}
2131 2247
2132static int tc35815_nway_reset(struct net_device *dev) 2248static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2133{ 2249{
2134 struct tc35815_local *lp = dev->priv; 2250 struct tc35815_local *lp = netdev_priv(dev);
2135 int rc;
2136 spin_lock_irq(&lp->lock);
2137 rc = mii_nway_restart(&lp->mii);
2138 spin_unlock_irq(&lp->lock);
2139 return rc;
2140}
2141 2251
2142static u32 tc35815_get_link(struct net_device *dev) 2252 if (!lp->phy_dev)
2143{ 2253 return -ENODEV;
2144 struct tc35815_local *lp = dev->priv; 2254 return phy_ethtool_sset(lp->phy_dev, cmd);
2145 int rc;
2146 spin_lock_irq(&lp->lock);
2147 rc = mii_link_ok(&lp->mii);
2148 spin_unlock_irq(&lp->lock);
2149 return rc;
2150} 2255}
2151 2256
2152static u32 tc35815_get_msglevel(struct net_device *dev) 2257static u32 tc35815_get_msglevel(struct net_device *dev)
2153{ 2258{
2154 struct tc35815_local *lp = dev->priv; 2259 struct tc35815_local *lp = netdev_priv(dev);
2155 return lp->msg_enable; 2260 return lp->msg_enable;
2156} 2261}
2157 2262
2158static void tc35815_set_msglevel(struct net_device *dev, u32 datum) 2263static void tc35815_set_msglevel(struct net_device *dev, u32 datum)
2159{ 2264{
2160 struct tc35815_local *lp = dev->priv; 2265 struct tc35815_local *lp = netdev_priv(dev);
2161 lp->msg_enable = datum; 2266 lp->msg_enable = datum;
2162} 2267}
2163 2268
2164static int tc35815_get_sset_count(struct net_device *dev, int sset) 2269static int tc35815_get_sset_count(struct net_device *dev, int sset)
2165{ 2270{
2166 struct tc35815_local *lp = dev->priv; 2271 struct tc35815_local *lp = netdev_priv(dev);
2167 2272
2168 switch (sset) { 2273 switch (sset) {
2169 case ETH_SS_STATS: 2274 case ETH_SS_STATS:
@@ -2175,7 +2280,7 @@ static int tc35815_get_sset_count(struct net_device *dev, int sset)
2175 2280
2176static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) 2281static void tc35815_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data)
2177{ 2282{
2178 struct tc35815_local *lp = dev->priv; 2283 struct tc35815_local *lp = netdev_priv(dev);
2179 data[0] = lp->lstats.max_tx_qlen; 2284 data[0] = lp->lstats.max_tx_qlen;
2180 data[1] = lp->lstats.tx_ints; 2285 data[1] = lp->lstats.tx_ints;
2181 data[2] = lp->lstats.rx_ints; 2286 data[2] = lp->lstats.rx_ints;
@@ -2200,8 +2305,7 @@ static const struct ethtool_ops tc35815_ethtool_ops = {
2200 .get_drvinfo = tc35815_get_drvinfo, 2305 .get_drvinfo = tc35815_get_drvinfo,
2201 .get_settings = tc35815_get_settings, 2306 .get_settings = tc35815_get_settings,
2202 .set_settings = tc35815_set_settings, 2307 .set_settings = tc35815_set_settings,
2203 .nway_reset = tc35815_nway_reset, 2308 .get_link = ethtool_op_get_link,
2204 .get_link = tc35815_get_link,
2205 .get_msglevel = tc35815_get_msglevel, 2309 .get_msglevel = tc35815_get_msglevel,
2206 .set_msglevel = tc35815_set_msglevel, 2310 .set_msglevel = tc35815_set_msglevel,
2207 .get_strings = tc35815_get_strings, 2311 .get_strings = tc35815_get_strings,
@@ -2211,611 +2315,13 @@ static const struct ethtool_ops tc35815_ethtool_ops = {
2211 2315
2212static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2316static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2213{ 2317{
2214 struct tc35815_local *lp = dev->priv; 2318 struct tc35815_local *lp = netdev_priv(dev);
2215 int rc;
2216 2319
2217 if (!netif_running(dev)) 2320 if (!netif_running(dev))
2218 return -EINVAL; 2321 return -EINVAL;
2219 2322 if (!lp->phy_dev)
2220 spin_lock_irq(&lp->lock); 2323 return -ENODEV;
2221 rc = generic_mii_ioctl(&lp->mii, if_mii(rq), cmd, NULL); 2324 return phy_mii_ioctl(lp->phy_dev, if_mii(rq), cmd);
2222 spin_unlock_irq(&lp->lock);
2223
2224 return rc;
2225}
2226
2227static int tc_mdio_read(struct net_device *dev, int phy_id, int location)
2228{
2229 struct tc35815_regs __iomem *tr =
2230 (struct tc35815_regs __iomem *)dev->base_addr;
2231 u32 data;
2232 tc_writel(MD_CA_Busy | (phy_id << 5) | location, &tr->MD_CA);
2233 while (tc_readl(&tr->MD_CA) & MD_CA_Busy)
2234 ;
2235 data = tc_readl(&tr->MD_Data);
2236 return data & 0xffff;
2237}
2238
2239static void tc_mdio_write(struct net_device *dev, int phy_id, int location,
2240 int val)
2241{
2242 struct tc35815_regs __iomem *tr =
2243 (struct tc35815_regs __iomem *)dev->base_addr;
2244 tc_writel(val, &tr->MD_Data);
2245 tc_writel(MD_CA_Busy | MD_CA_Wr | (phy_id << 5) | location, &tr->MD_CA);
2246 while (tc_readl(&tr->MD_CA) & MD_CA_Busy)
2247 ;
2248}
2249
2250/* Auto negotiation. The scheme is very simple. We have a timer routine
2251 * that keeps watching the auto negotiation process as it progresses.
2252 * The DP83840 is first told to start doing it's thing, we set up the time
2253 * and place the timer state machine in it's initial state.
2254 *
2255 * Here the timer peeks at the DP83840 status registers at each click to see
2256 * if the auto negotiation has completed, we assume here that the DP83840 PHY
2257 * will time out at some point and just tell us what (didn't) happen. For
2258 * complete coverage we only allow so many of the ticks at this level to run,
2259 * when this has expired we print a warning message and try another strategy.
2260 * This "other" strategy is to force the interface into various speed/duplex
2261 * configurations and we stop when we see a link-up condition before the
2262 * maximum number of "peek" ticks have occurred.
2263 *
2264 * Once a valid link status has been detected we configure the BigMAC and
2265 * the rest of the Happy Meal to speak the most efficient protocol we could
2266 * get a clean link for. The priority for link configurations, highest first
2267 * is:
2268 * 100 Base-T Full Duplex
2269 * 100 Base-T Half Duplex
2270 * 10 Base-T Full Duplex
2271 * 10 Base-T Half Duplex
2272 *
2273 * We start a new timer now, after a successful auto negotiation status has
2274 * been detected. This timer just waits for the link-up bit to get set in
2275 * the BMCR of the DP83840. When this occurs we print a kernel log message
2276 * describing the link type in use and the fact that it is up.
2277 *
2278 * If a fatal error of some sort is signalled and detected in the interrupt
2279 * service routine, and the chip is reset, or the link is ifconfig'd down
2280 * and then back up, this entire process repeats itself all over again.
2281 */
2282/* Note: Above comments are come from sunhme driver. */
2283
2284static int tc35815_try_next_permutation(struct net_device *dev)
2285{
2286 struct tc35815_local *lp = dev->priv;
2287 int pid = lp->phy_addr;
2288 unsigned short bmcr;
2289
2290 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2291
2292 /* Downgrade from full to half duplex. Only possible via ethtool. */
2293 if (bmcr & BMCR_FULLDPLX) {
2294 bmcr &= ~BMCR_FULLDPLX;
2295 printk(KERN_DEBUG "%s: try next permutation (BMCR %x)\n", dev->name, bmcr);
2296 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2297 return 0;
2298 }
2299
2300 /* Downgrade from 100 to 10. */
2301 if (bmcr & BMCR_SPEED100) {
2302 bmcr &= ~BMCR_SPEED100;
2303 printk(KERN_DEBUG "%s: try next permutation (BMCR %x)\n", dev->name, bmcr);
2304 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2305 return 0;
2306 }
2307
2308 /* We've tried everything. */
2309 return -1;
2310}
2311
2312static void
2313tc35815_display_link_mode(struct net_device *dev)
2314{
2315 struct tc35815_local *lp = dev->priv;
2316 int pid = lp->phy_addr;
2317 unsigned short lpa, bmcr;
2318 char *speed = "", *duplex = "";
2319
2320 lpa = tc_mdio_read(dev, pid, MII_LPA);
2321 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2322 if (options.speed ? (bmcr & BMCR_SPEED100) : (lpa & (LPA_100HALF | LPA_100FULL)))
2323 speed = "100Mb/s";
2324 else
2325 speed = "10Mb/s";
2326 if (options.duplex ? (bmcr & BMCR_FULLDPLX) : (lpa & (LPA_100FULL | LPA_10FULL)))
2327 duplex = "Full Duplex";
2328 else
2329 duplex = "Half Duplex";
2330
2331 if (netif_msg_link(lp))
2332 printk(KERN_INFO "%s: Link is up at %s, %s.\n",
2333 dev->name, speed, duplex);
2334 printk(KERN_DEBUG "%s: MII BMCR %04x BMSR %04x LPA %04x\n",
2335 dev->name,
2336 bmcr, tc_mdio_read(dev, pid, MII_BMSR), lpa);
2337}
2338
2339static void tc35815_display_forced_link_mode(struct net_device *dev)
2340{
2341 struct tc35815_local *lp = dev->priv;
2342 int pid = lp->phy_addr;
2343 unsigned short bmcr;
2344 char *speed = "", *duplex = "";
2345
2346 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2347 if (bmcr & BMCR_SPEED100)
2348 speed = "100Mb/s";
2349 else
2350 speed = "10Mb/s";
2351 if (bmcr & BMCR_FULLDPLX)
2352 duplex = "Full Duplex.\n";
2353 else
2354 duplex = "Half Duplex.\n";
2355
2356 if (netif_msg_link(lp))
2357 printk(KERN_INFO "%s: Link has been forced up at %s, %s",
2358 dev->name, speed, duplex);
2359}
2360
2361static void tc35815_set_link_modes(struct net_device *dev)
2362{
2363 struct tc35815_local *lp = dev->priv;
2364 struct tc35815_regs __iomem *tr =
2365 (struct tc35815_regs __iomem *)dev->base_addr;
2366 int pid = lp->phy_addr;
2367 unsigned short bmcr, lpa;
2368 int speed;
2369
2370 if (lp->timer_state == arbwait) {
2371 lpa = tc_mdio_read(dev, pid, MII_LPA);
2372 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2373 printk(KERN_DEBUG "%s: MII BMCR %04x BMSR %04x LPA %04x\n",
2374 dev->name,
2375 bmcr, tc_mdio_read(dev, pid, MII_BMSR), lpa);
2376 if (!(lpa & (LPA_10HALF | LPA_10FULL |
2377 LPA_100HALF | LPA_100FULL))) {
2378 /* fall back to 10HALF */
2379 printk(KERN_INFO "%s: bad ability %04x - falling back to 10HD.\n",
2380 dev->name, lpa);
2381 lpa = LPA_10HALF;
2382 }
2383 if (options.duplex ? (bmcr & BMCR_FULLDPLX) : (lpa & (LPA_100FULL | LPA_10FULL)))
2384 lp->fullduplex = 1;
2385 else
2386 lp->fullduplex = 0;
2387 if (options.speed ? (bmcr & BMCR_SPEED100) : (lpa & (LPA_100HALF | LPA_100FULL)))
2388 speed = 100;
2389 else
2390 speed = 10;
2391 } else {
2392 /* Forcing a link mode. */
2393 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2394 if (bmcr & BMCR_FULLDPLX)
2395 lp->fullduplex = 1;
2396 else
2397 lp->fullduplex = 0;
2398 if (bmcr & BMCR_SPEED100)
2399 speed = 100;
2400 else
2401 speed = 10;
2402 }
2403
2404 tc_writel(tc_readl(&tr->MAC_Ctl) | MAC_HaltReq, &tr->MAC_Ctl);
2405 if (lp->fullduplex) {
2406 tc_writel(tc_readl(&tr->MAC_Ctl) | MAC_FullDup, &tr->MAC_Ctl);
2407 } else {
2408 tc_writel(tc_readl(&tr->MAC_Ctl) & ~MAC_FullDup, &tr->MAC_Ctl);
2409 }
2410 tc_writel(tc_readl(&tr->MAC_Ctl) & ~MAC_HaltReq, &tr->MAC_Ctl);
2411
2412 /* TX4939 PCFG.SPEEDn bit will be changed on NETDEV_CHANGE event. */
2413
2414#ifndef NO_CHECK_CARRIER
2415 /* TX4939 does not have EnLCarr */
2416 if (lp->boardtype != TC35815_TX4939) {
2417#ifdef WORKAROUND_LOSTCAR
2418 /* WORKAROUND: enable LostCrS only if half duplex operation */
2419 if (!lp->fullduplex && lp->boardtype != TC35815_TX4939)
2420 tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr, &tr->Tx_Ctl);
2421#endif
2422 }
2423#endif
2424 lp->mii.full_duplex = lp->fullduplex;
2425}
2426
2427static void tc35815_timer(unsigned long data)
2428{
2429 struct net_device *dev = (struct net_device *)data;
2430 struct tc35815_local *lp = dev->priv;
2431 int pid = lp->phy_addr;
2432 unsigned short bmsr, bmcr, lpa;
2433 int restart_timer = 0;
2434
2435 spin_lock_irq(&lp->lock);
2436
2437 lp->timer_ticks++;
2438 switch (lp->timer_state) {
2439 case arbwait:
2440 /*
2441 * Only allow for 5 ticks, thats 10 seconds and much too
2442 * long to wait for arbitration to complete.
2443 */
2444 /* TC35815 need more times... */
2445 if (lp->timer_ticks >= 10) {
2446 /* Enter force mode. */
2447 if (!options.doforce) {
2448 printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful,"
2449 " cable probblem?\n", dev->name);
2450 /* Try to restart the adaptor. */
2451 tc35815_restart(dev);
2452 goto out;
2453 }
2454 printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful,"
2455 " trying force link mode\n", dev->name);
2456 printk(KERN_DEBUG "%s: BMCR %x BMSR %x\n", dev->name,
2457 tc_mdio_read(dev, pid, MII_BMCR),
2458 tc_mdio_read(dev, pid, MII_BMSR));
2459 bmcr = BMCR_SPEED100;
2460 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2461
2462 /*
2463 * OK, seems we need do disable the transceiver
2464 * for the first tick to make sure we get an
2465 * accurate link state at the second tick.
2466 */
2467
2468 lp->timer_state = ltrywait;
2469 lp->timer_ticks = 0;
2470 restart_timer = 1;
2471 } else {
2472 /* Anything interesting happen? */
2473 bmsr = tc_mdio_read(dev, pid, MII_BMSR);
2474 if (bmsr & BMSR_ANEGCOMPLETE) {
2475 /* Just what we've been waiting for... */
2476 tc35815_set_link_modes(dev);
2477
2478 /*
2479 * Success, at least so far, advance our state
2480 * engine.
2481 */
2482 lp->timer_state = lupwait;
2483 restart_timer = 1;
2484 } else {
2485 restart_timer = 1;
2486 }
2487 }
2488 break;
2489
2490 case lupwait:
2491 /*
2492 * Auto negotiation was successful and we are awaiting a
2493 * link up status. I have decided to let this timer run
2494 * forever until some sort of error is signalled, reporting
2495 * a message to the user at 10 second intervals.
2496 */
2497 bmsr = tc_mdio_read(dev, pid, MII_BMSR);
2498 if (bmsr & BMSR_LSTATUS) {
2499 /*
2500 * Wheee, it's up, display the link mode in use and put
2501 * the timer to sleep.
2502 */
2503 tc35815_display_link_mode(dev);
2504 netif_carrier_on(dev);
2505#ifdef WORKAROUND_100HALF_PROMISC
2506 /* delayed promiscuous enabling */
2507 if (dev->flags & IFF_PROMISC)
2508 tc35815_set_multicast_list(dev);
2509#endif
2510#if 1
2511 lp->saved_lpa = tc_mdio_read(dev, pid, MII_LPA);
2512 lp->timer_state = lcheck;
2513 restart_timer = 1;
2514#else
2515 lp->timer_state = asleep;
2516 restart_timer = 0;
2517#endif
2518 } else {
2519 if (lp->timer_ticks >= 10) {
2520 printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
2521 "not completely up.\n", dev->name);
2522 lp->timer_ticks = 0;
2523 restart_timer = 1;
2524 } else {
2525 restart_timer = 1;
2526 }
2527 }
2528 break;
2529
2530 case ltrywait:
2531 /*
2532 * Making the timeout here too long can make it take
2533 * annoyingly long to attempt all of the link mode
2534 * permutations, but then again this is essentially
2535 * error recovery code for the most part.
2536 */
2537 bmsr = tc_mdio_read(dev, pid, MII_BMSR);
2538 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2539 if (lp->timer_ticks == 1) {
2540 /*
2541 * Re-enable transceiver, we'll re-enable the
2542 * transceiver next tick, then check link state
2543 * on the following tick.
2544 */
2545 restart_timer = 1;
2546 break;
2547 }
2548 if (lp->timer_ticks == 2) {
2549 restart_timer = 1;
2550 break;
2551 }
2552 if (bmsr & BMSR_LSTATUS) {
2553 /* Force mode selection success. */
2554 tc35815_display_forced_link_mode(dev);
2555 netif_carrier_on(dev);
2556 tc35815_set_link_modes(dev);
2557#ifdef WORKAROUND_100HALF_PROMISC
2558 /* delayed promiscuous enabling */
2559 if (dev->flags & IFF_PROMISC)
2560 tc35815_set_multicast_list(dev);
2561#endif
2562#if 1
2563 lp->saved_lpa = tc_mdio_read(dev, pid, MII_LPA);
2564 lp->timer_state = lcheck;
2565 restart_timer = 1;
2566#else
2567 lp->timer_state = asleep;
2568 restart_timer = 0;
2569#endif
2570 } else {
2571 if (lp->timer_ticks >= 4) { /* 6 seconds or so... */
2572 int ret;
2573
2574 ret = tc35815_try_next_permutation(dev);
2575 if (ret == -1) {
2576 /*
2577 * Aieee, tried them all, reset the
2578 * chip and try all over again.
2579 */
2580 printk(KERN_NOTICE "%s: Link down, "
2581 "cable problem?\n",
2582 dev->name);
2583
2584 /* Try to restart the adaptor. */
2585 tc35815_restart(dev);
2586 goto out;
2587 }
2588 lp->timer_ticks = 0;
2589 restart_timer = 1;
2590 } else {
2591 restart_timer = 1;
2592 }
2593 }
2594 break;
2595
2596 case lcheck:
2597 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2598 lpa = tc_mdio_read(dev, pid, MII_LPA);
2599 if (bmcr & (BMCR_PDOWN | BMCR_ISOLATE | BMCR_RESET)) {
2600 printk(KERN_ERR "%s: PHY down? (BMCR %x)\n", dev->name,
2601 bmcr);
2602 } else if ((lp->saved_lpa ^ lpa) &
2603 (LPA_100FULL|LPA_100HALF|LPA_10FULL|LPA_10HALF)) {
2604 printk(KERN_NOTICE "%s: link status changed"
2605 " (BMCR %x LPA %x->%x)\n", dev->name,
2606 bmcr, lp->saved_lpa, lpa);
2607 } else {
2608 /* go on */
2609 restart_timer = 1;
2610 break;
2611 }
2612 /* Try to restart the adaptor. */
2613 tc35815_restart(dev);
2614 goto out;
2615
2616 case asleep:
2617 default:
2618 /* Can't happens.... */
2619 printk(KERN_ERR "%s: Aieee, link timer is asleep but we got "
2620 "one anyways!\n", dev->name);
2621 restart_timer = 0;
2622 lp->timer_ticks = 0;
2623 lp->timer_state = asleep; /* foo on you */
2624 break;
2625 }
2626
2627 if (restart_timer) {
2628 lp->timer.expires = jiffies + msecs_to_jiffies(1200);
2629 add_timer(&lp->timer);
2630 }
2631out:
2632 spin_unlock_irq(&lp->lock);
2633}
2634
2635static void tc35815_start_auto_negotiation(struct net_device *dev,
2636 struct ethtool_cmd *ep)
2637{
2638 struct tc35815_local *lp = dev->priv;
2639 int pid = lp->phy_addr;
2640 unsigned short bmsr, bmcr, advertize;
2641 int timeout;
2642
2643 netif_carrier_off(dev);
2644 bmsr = tc_mdio_read(dev, pid, MII_BMSR);
2645 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2646 advertize = tc_mdio_read(dev, pid, MII_ADVERTISE);
2647
2648 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
2649 if (options.speed || options.duplex) {
2650 /* Advertise only specified configuration. */
2651 advertize &= ~(ADVERTISE_10HALF |
2652 ADVERTISE_10FULL |
2653 ADVERTISE_100HALF |
2654 ADVERTISE_100FULL);
2655 if (options.speed != 10) {
2656 if (options.duplex != 1)
2657 advertize |= ADVERTISE_100FULL;
2658 if (options.duplex != 2)
2659 advertize |= ADVERTISE_100HALF;
2660 }
2661 if (options.speed != 100) {
2662 if (options.duplex != 1)
2663 advertize |= ADVERTISE_10FULL;
2664 if (options.duplex != 2)
2665 advertize |= ADVERTISE_10HALF;
2666 }
2667 if (options.speed == 100)
2668 bmcr |= BMCR_SPEED100;
2669 else if (options.speed == 10)
2670 bmcr &= ~BMCR_SPEED100;
2671 if (options.duplex == 2)
2672 bmcr |= BMCR_FULLDPLX;
2673 else if (options.duplex == 1)
2674 bmcr &= ~BMCR_FULLDPLX;
2675 } else {
2676 /* Advertise everything we can support. */
2677 if (bmsr & BMSR_10HALF)
2678 advertize |= ADVERTISE_10HALF;
2679 else
2680 advertize &= ~ADVERTISE_10HALF;
2681 if (bmsr & BMSR_10FULL)
2682 advertize |= ADVERTISE_10FULL;
2683 else
2684 advertize &= ~ADVERTISE_10FULL;
2685 if (bmsr & BMSR_100HALF)
2686 advertize |= ADVERTISE_100HALF;
2687 else
2688 advertize &= ~ADVERTISE_100HALF;
2689 if (bmsr & BMSR_100FULL)
2690 advertize |= ADVERTISE_100FULL;
2691 else
2692 advertize &= ~ADVERTISE_100FULL;
2693 }
2694
2695 tc_mdio_write(dev, pid, MII_ADVERTISE, advertize);
2696
2697 /* Enable Auto-Negotiation, this is usually on already... */
2698 bmcr |= BMCR_ANENABLE;
2699 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2700
2701 /* Restart it to make sure it is going. */
2702 bmcr |= BMCR_ANRESTART;
2703 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2704 printk(KERN_DEBUG "%s: ADVERTISE %x BMCR %x\n", dev->name, advertize, bmcr);
2705
2706 /* BMCR_ANRESTART self clears when the process has begun. */
2707 timeout = 64; /* More than enough. */
2708 while (--timeout) {
2709 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2710 if (!(bmcr & BMCR_ANRESTART))
2711 break; /* got it. */
2712 udelay(10);
2713 }
2714 if (!timeout) {
2715 printk(KERN_ERR "%s: TC35815 would not start auto "
2716 "negotiation BMCR=0x%04x\n",
2717 dev->name, bmcr);
2718 printk(KERN_NOTICE "%s: Performing force link "
2719 "detection.\n", dev->name);
2720 goto force_link;
2721 } else {
2722 printk(KERN_DEBUG "%s: auto negotiation started.\n", dev->name);
2723 lp->timer_state = arbwait;
2724 }
2725 } else {
2726force_link:
2727 /* Force the link up, trying first a particular mode.
2728 * Either we are here at the request of ethtool or
2729 * because the Happy Meal would not start to autoneg.
2730 */
2731
2732 /* Disable auto-negotiation in BMCR, enable the duplex and
2733 * speed setting, init the timer state machine, and fire it off.
2734 */
2735 if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
2736 bmcr = BMCR_SPEED100;
2737 } else {
2738 if (ep->speed == SPEED_100)
2739 bmcr = BMCR_SPEED100;
2740 else
2741 bmcr = 0;
2742 if (ep->duplex == DUPLEX_FULL)
2743 bmcr |= BMCR_FULLDPLX;
2744 }
2745 tc_mdio_write(dev, pid, MII_BMCR, bmcr);
2746
2747 /* OK, seems we need do disable the transceiver for the first
2748 * tick to make sure we get an accurate link state at the
2749 * second tick.
2750 */
2751 lp->timer_state = ltrywait;
2752 }
2753
2754 del_timer(&lp->timer);
2755 lp->timer_ticks = 0;
2756 lp->timer.expires = jiffies + msecs_to_jiffies(1200);
2757 add_timer(&lp->timer);
2758}
2759
2760static void tc35815_find_phy(struct net_device *dev)
2761{
2762 struct tc35815_local *lp = dev->priv;
2763 int pid = lp->phy_addr;
2764 unsigned short id0;
2765
2766 /* find MII phy */
2767 for (pid = 31; pid >= 0; pid--) {
2768 id0 = tc_mdio_read(dev, pid, MII_BMSR);
2769 if (id0 != 0xffff && id0 != 0x0000 &&
2770 (id0 & BMSR_RESV) != (0xffff & BMSR_RESV) /* paranoia? */
2771 ) {
2772 lp->phy_addr = pid;
2773 break;
2774 }
2775 }
2776 if (pid < 0) {
2777 printk(KERN_ERR "%s: No MII Phy found.\n",
2778 dev->name);
2779 lp->phy_addr = pid = 0;
2780 }
2781
2782 lp->mii_id[0] = tc_mdio_read(dev, pid, MII_PHYSID1);
2783 lp->mii_id[1] = tc_mdio_read(dev, pid, MII_PHYSID2);
2784 if (netif_msg_hw(lp))
2785 printk(KERN_INFO "%s: PHY(%02x) ID %04x %04x\n", dev->name,
2786 pid, lp->mii_id[0], lp->mii_id[1]);
2787}
2788
2789static void tc35815_phy_chip_init(struct net_device *dev)
2790{
2791 struct tc35815_local *lp = dev->priv;
2792 int pid = lp->phy_addr;
2793 unsigned short bmcr;
2794 struct ethtool_cmd ecmd, *ep;
2795
2796 /* dis-isolate if needed. */
2797 bmcr = tc_mdio_read(dev, pid, MII_BMCR);
2798 if (bmcr & BMCR_ISOLATE) {
2799 int count = 32;
2800 printk(KERN_DEBUG "%s: unisolating...", dev->name);
2801 tc_mdio_write(dev, pid, MII_BMCR, bmcr & ~BMCR_ISOLATE);
2802 while (--count) {
2803 if (!(tc_mdio_read(dev, pid, MII_BMCR) & BMCR_ISOLATE))
2804 break;
2805 udelay(20);
2806 }
2807 printk(" %s.\n", count ? "done" : "failed");
2808 }
2809
2810 if (options.speed && options.duplex) {
2811 ecmd.autoneg = AUTONEG_DISABLE;
2812 ecmd.speed = options.speed == 10 ? SPEED_10 : SPEED_100;
2813 ecmd.duplex = options.duplex == 1 ? DUPLEX_HALF : DUPLEX_FULL;
2814 ep = &ecmd;
2815 } else {
2816 ep = NULL;
2817 }
2818 tc35815_start_auto_negotiation(dev, ep);
2819} 2325}
2820 2326
2821static void tc35815_chip_reset(struct net_device *dev) 2327static void tc35815_chip_reset(struct net_device *dev)
@@ -2862,13 +2368,11 @@ static void tc35815_chip_reset(struct net_device *dev)
2862 2368
2863static void tc35815_chip_init(struct net_device *dev) 2369static void tc35815_chip_init(struct net_device *dev)
2864{ 2370{
2865 struct tc35815_local *lp = dev->priv; 2371 struct tc35815_local *lp = netdev_priv(dev);
2866 struct tc35815_regs __iomem *tr = 2372 struct tc35815_regs __iomem *tr =
2867 (struct tc35815_regs __iomem *)dev->base_addr; 2373 (struct tc35815_regs __iomem *)dev->base_addr;
2868 unsigned long txctl = TX_CTL_CMD; 2374 unsigned long txctl = TX_CTL_CMD;
2869 2375
2870 tc35815_phy_chip_init(dev);
2871
2872 /* load station address to CAM */ 2376 /* load station address to CAM */
2873 tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr); 2377 tc35815_set_cam_entry(dev, CAM_ENTRY_SOURCE, dev->dev_addr);
2874 2378
@@ -2905,12 +2409,11 @@ static void tc35815_chip_init(struct net_device *dev)
2905 /* start MAC transmitter */ 2409 /* start MAC transmitter */
2906#ifndef NO_CHECK_CARRIER 2410#ifndef NO_CHECK_CARRIER
2907 /* TX4939 does not have EnLCarr */ 2411 /* TX4939 does not have EnLCarr */
2908 if (lp->boardtype == TC35815_TX4939) 2412 if (lp->chiptype == TC35815_TX4939)
2909 txctl &= ~Tx_EnLCarr; 2413 txctl &= ~Tx_EnLCarr;
2910#ifdef WORKAROUND_LOSTCAR 2414#ifdef WORKAROUND_LOSTCAR
2911 /* WORKAROUND: ignore LostCrS in full duplex operation */ 2415 /* WORKAROUND: ignore LostCrS in full duplex operation */
2912 if ((lp->timer_state != asleep && lp->timer_state != lcheck) || 2416 if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL)
2913 lp->fullduplex)
2914 txctl &= ~Tx_EnLCarr; 2417 txctl &= ~Tx_EnLCarr;
2915#endif 2418#endif
2916#endif /* !NO_CHECK_CARRIER */ 2419#endif /* !NO_CHECK_CARRIER */
@@ -2924,15 +2427,16 @@ static void tc35815_chip_init(struct net_device *dev)
2924static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state) 2427static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state)
2925{ 2428{
2926 struct net_device *dev = pci_get_drvdata(pdev); 2429 struct net_device *dev = pci_get_drvdata(pdev);
2927 struct tc35815_local *lp = dev->priv; 2430 struct tc35815_local *lp = netdev_priv(dev);
2928 unsigned long flags; 2431 unsigned long flags;
2929 2432
2930 pci_save_state(pdev); 2433 pci_save_state(pdev);
2931 if (!netif_running(dev)) 2434 if (!netif_running(dev))
2932 return 0; 2435 return 0;
2933 netif_device_detach(dev); 2436 netif_device_detach(dev);
2437 if (lp->phy_dev)
2438 phy_stop(lp->phy_dev);
2934 spin_lock_irqsave(&lp->lock, flags); 2439 spin_lock_irqsave(&lp->lock, flags);
2935 del_timer(&lp->timer); /* Kill if running */
2936 tc35815_chip_reset(dev); 2440 tc35815_chip_reset(dev);
2937 spin_unlock_irqrestore(&lp->lock, flags); 2441 spin_unlock_irqrestore(&lp->lock, flags);
2938 pci_set_power_state(pdev, PCI_D3hot); 2442 pci_set_power_state(pdev, PCI_D3hot);
@@ -2942,16 +2446,15 @@ static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state)
2942static int tc35815_resume(struct pci_dev *pdev) 2446static int tc35815_resume(struct pci_dev *pdev)
2943{ 2447{
2944 struct net_device *dev = pci_get_drvdata(pdev); 2448 struct net_device *dev = pci_get_drvdata(pdev);
2945 struct tc35815_local *lp = dev->priv; 2449 struct tc35815_local *lp = netdev_priv(dev);
2946 unsigned long flags;
2947 2450
2948 pci_restore_state(pdev); 2451 pci_restore_state(pdev);
2949 if (!netif_running(dev)) 2452 if (!netif_running(dev))
2950 return 0; 2453 return 0;
2951 pci_set_power_state(pdev, PCI_D0); 2454 pci_set_power_state(pdev, PCI_D0);
2952 spin_lock_irqsave(&lp->lock, flags);
2953 tc35815_restart(dev); 2455 tc35815_restart(dev);
2954 spin_unlock_irqrestore(&lp->lock, flags); 2456 if (lp->phy_dev)
2457 phy_start(lp->phy_dev);
2955 netif_device_attach(dev); 2458 netif_device_attach(dev);
2956 return 0; 2459 return 0;
2957} 2460}
@@ -2972,8 +2475,6 @@ module_param_named(speed, options.speed, int, 0);
2972MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps"); 2475MODULE_PARM_DESC(speed, "0:auto, 10:10Mbps, 100:100Mbps");
2973module_param_named(duplex, options.duplex, int, 0); 2476module_param_named(duplex, options.duplex, int, 0);
2974MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full"); 2477MODULE_PARM_DESC(duplex, "0:auto, 1:half, 2:full");
2975module_param_named(doforce, options.doforce, int, 0);
2976MODULE_PARM_DESC(doforce, "try force link mode if auto-negotiation failed");
2977 2478
2978static int __init tc35815_init_module(void) 2479static int __init tc35815_init_module(void)
2979{ 2480{
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 44a06f8b588f..45208a0e69a0 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -42,6 +42,7 @@
42 42
43#define XL_DEBUG 0 43#define XL_DEBUG 0
44 44
45#include <linux/jiffies.h>
45#include <linux/module.h> 46#include <linux/module.h>
46#include <linux/kernel.h> 47#include <linux/kernel.h>
47#include <linux/errno.h> 48#include <linux/errno.h>
@@ -408,7 +409,7 @@ static int xl_hw_reset(struct net_device *dev)
408 t=jiffies; 409 t=jiffies;
409 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { 410 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
410 schedule(); 411 schedule();
411 if(jiffies-t > 40*HZ) { 412 if (time_after(jiffies, t + 40 * HZ)) {
412 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL card not responding to global reset.\n", dev->name); 413 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL card not responding to global reset.\n", dev->name);
413 return -ENODEV; 414 return -ENODEV;
414 } 415 }
@@ -519,7 +520,7 @@ static int xl_hw_reset(struct net_device *dev)
519 t=jiffies; 520 t=jiffies;
520 while ( !(readw(xl_mmio + MMIO_INTSTATUS_AUTO) & INTSTAT_SRB) ) { 521 while ( !(readw(xl_mmio + MMIO_INTSTATUS_AUTO) & INTSTAT_SRB) ) {
521 schedule(); 522 schedule();
522 if(jiffies-t > 15*HZ) { 523 if (time_after(jiffies, t + 15 * HZ)) {
523 printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n"); 524 printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n");
524 return -ENODEV; 525 return -ENODEV;
525 } 526 }
@@ -790,7 +791,7 @@ static int xl_open_hw(struct net_device *dev)
790 t=jiffies; 791 t=jiffies;
791 while (! (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) { 792 while (! (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) {
792 schedule(); 793 schedule();
793 if(jiffies-t > 40*HZ) { 794 if (time_after(jiffies, t + 40 * HZ)) {
794 printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n"); 795 printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n");
795 break ; 796 break ;
796 } 797 }
@@ -1003,7 +1004,7 @@ static void xl_reset(struct net_device *dev)
1003 1004
1004 t=jiffies; 1005 t=jiffies;
1005 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { 1006 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1006 if(jiffies-t > 40*HZ) { 1007 if (time_after(jiffies, t + 40 * HZ)) {
1007 printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n"); 1008 printk(KERN_ERR "3COM 3C359 Velocity XL card not responding.\n");
1008 break ; 1009 break ;
1009 } 1010 }
@@ -1270,7 +1271,7 @@ static int xl_close(struct net_device *dev)
1270 t=jiffies; 1271 t=jiffies;
1271 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { 1272 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1272 schedule(); 1273 schedule();
1273 if(jiffies-t > 10*HZ) { 1274 if (time_after(jiffies, t + 10 * HZ)) {
1274 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNSTALL not responding.\n", dev->name); 1275 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNSTALL not responding.\n", dev->name);
1275 break ; 1276 break ;
1276 } 1277 }
@@ -1279,7 +1280,7 @@ static int xl_close(struct net_device *dev)
1279 t=jiffies; 1280 t=jiffies;
1280 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { 1281 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1281 schedule(); 1282 schedule();
1282 if(jiffies-t > 10*HZ) { 1283 if (time_after(jiffies, t + 10 * HZ)) {
1283 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNDISABLE not responding.\n", dev->name); 1284 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNDISABLE not responding.\n", dev->name);
1284 break ; 1285 break ;
1285 } 1286 }
@@ -1288,7 +1289,7 @@ static int xl_close(struct net_device *dev)
1288 t=jiffies; 1289 t=jiffies;
1289 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { 1290 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1290 schedule(); 1291 schedule();
1291 if(jiffies-t > 10*HZ) { 1292 if (time_after(jiffies, t + 10 * HZ)) {
1292 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPSTALL not responding.\n", dev->name); 1293 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPSTALL not responding.\n", dev->name);
1293 break ; 1294 break ;
1294 } 1295 }
@@ -1305,7 +1306,7 @@ static int xl_close(struct net_device *dev)
1305 t=jiffies; 1306 t=jiffies;
1306 while (!(readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) { 1307 while (!(readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_SRB)) {
1307 schedule(); 1308 schedule();
1308 if(jiffies-t > 10*HZ) { 1309 if (time_after(jiffies, t + 10 * HZ)) {
1309 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-CLOSENIC not responding.\n", dev->name); 1310 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-CLOSENIC not responding.\n", dev->name);
1310 break ; 1311 break ;
1311 } 1312 }
@@ -1334,7 +1335,7 @@ static int xl_close(struct net_device *dev)
1334 t=jiffies; 1335 t=jiffies;
1335 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { 1336 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1336 schedule(); 1337 schedule();
1337 if(jiffies-t > 10*HZ) { 1338 if (time_after(jiffies, t + 10 * HZ)) {
1338 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPRESET not responding.\n", dev->name); 1339 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-UPRESET not responding.\n", dev->name);
1339 break ; 1340 break ;
1340 } 1341 }
@@ -1343,7 +1344,7 @@ static int xl_close(struct net_device *dev)
1343 t=jiffies; 1344 t=jiffies;
1344 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) { 1345 while (readw(xl_mmio + MMIO_INTSTATUS) & INTSTAT_CMD_IN_PROGRESS) {
1345 schedule(); 1346 schedule();
1346 if(jiffies-t > 10*HZ) { 1347 if (time_after(jiffies, t + 10 * HZ)) {
1347 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNRESET not responding.\n", dev->name); 1348 printk(KERN_ERR "%s: 3COM 3C359 Velocity XL-DNRESET not responding.\n", dev->name);
1348 break ; 1349 break ;
1349 } 1350 }
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
index 20ac1503021e..d913405bc393 100644
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -141,7 +141,7 @@ config ULI526X
141 be called uli526x. 141 be called uli526x.
142 142
143config PCMCIA_XIRCOM 143config PCMCIA_XIRCOM
144 tristate "Xircom CardBus support (new driver)" 144 tristate "Xircom CardBus support"
145 depends on CARDBUS 145 depends on CARDBUS
146 ---help--- 146 ---help---
147 This driver is for the Digital "Tulip" Ethernet CardBus adapters. 147 This driver is for the Digital "Tulip" Ethernet CardBus adapters.
@@ -152,17 +152,4 @@ config PCMCIA_XIRCOM
152 To compile this driver as a module, choose M here. The module will 152 To compile this driver as a module, choose M here. The module will
153 be called xircom_cb. If unsure, say N. 153 be called xircom_cb. If unsure, say N.
154 154
155config PCMCIA_XIRTULIP
156 tristate "Xircom Tulip-like CardBus support (old driver)"
157 depends on CARDBUS && BROKEN_ON_SMP
158 select CRC32
159 ---help---
160 This driver is for the Digital "Tulip" Ethernet CardBus adapters.
161 It should work with most DEC 21*4*-based chips/ethercards, as well
162 as with work-alike chips from Lite-On (PNIC) and Macronix (MXIC) and
163 ASIX.
164
165 To compile this driver as a module, choose M here. The module will
166 be called xircom_tulip_cb. If unsure, say N.
167
168endif # NET_TULIP 155endif # NET_TULIP
diff --git a/drivers/net/tulip/Makefile b/drivers/net/tulip/Makefile
index 451090d6fcca..200cbf7c815c 100644
--- a/drivers/net/tulip/Makefile
+++ b/drivers/net/tulip/Makefile
@@ -2,7 +2,6 @@
2# Makefile for the Linux "Tulip" family network device drivers. 2# Makefile for the Linux "Tulip" family network device drivers.
3# 3#
4 4
5obj-$(CONFIG_PCMCIA_XIRTULIP) += xircom_tulip_cb.o
6obj-$(CONFIG_PCMCIA_XIRCOM) += xircom_cb.o 5obj-$(CONFIG_PCMCIA_XIRCOM) += xircom_cb.o
7obj-$(CONFIG_DM9102) += dmfe.o 6obj-$(CONFIG_DM9102) += dmfe.o
8obj-$(CONFIG_WINBOND_840) += winbond-840.o 7obj-$(CONFIG_WINBOND_840) += winbond-840.o
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 3f69f53d7768..908422f2f320 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -268,7 +268,12 @@ enum t21143_csr6_bits {
268#define RX_RING_SIZE 128 268#define RX_RING_SIZE 128
269#define MEDIA_MASK 31 269#define MEDIA_MASK 31
270 270
271#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer. */ 271/* The receiver on the DC21143 rev 65 can fail to close the last
272 * receive descriptor in certain circumstances (see errata) when
273 * using MWI. This can only occur if the receive buffer ends on
274 * a cache line boundary, so the "+ 4" below ensures it doesn't.
275 */
276#define PKT_BUF_SZ (1536 + 4) /* Size of each temporary Rx buffer. */
272 277
273#define TULIP_MIN_CACHE_LINE 8 /* in units of 32-bit words */ 278#define TULIP_MIN_CACHE_LINE 8 /* in units of 32-bit words */
274 279
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 82f404b76d81..fa1c1c329a2d 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1154,18 +1154,13 @@ static void __devinit tulip_mwi_config (struct pci_dev *pdev,
1154 1154
1155 tp->csr0 = csr0 = 0; 1155 tp->csr0 = csr0 = 0;
1156 1156
1157 /* if we have any cache line size at all, we can do MRM */ 1157 /* if we have any cache line size at all, we can do MRM and MWI */
1158 csr0 |= MRM; 1158 csr0 |= MRM | MWI;
1159 1159
1160 /* ...and barring hardware bugs, MWI */ 1160 /* Enable MWI in the standard PCI command bit.
1161 if (!(tp->chip_id == DC21143 && tp->revision == 65)) 1161 * Check for the case where MWI is desired but not available
1162 csr0 |= MWI;
1163
1164 /* set or disable MWI in the standard PCI command bit.
1165 * Check for the case where mwi is desired but not available
1166 */ 1162 */
1167 if (csr0 & MWI) pci_try_set_mwi(pdev); 1163 pci_try_set_mwi(pdev);
1168 else pci_clear_mwi(pdev);
1169 1164
1170 /* read result from hardware (in case bit refused to enable) */ 1165 /* read result from hardware (in case bit refused to enable) */
1171 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 1166 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
@@ -1401,10 +1396,6 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1401#ifdef CONFIG_TULIP_MWI 1396#ifdef CONFIG_TULIP_MWI
1402 if (!force_csr0 && (tp->flags & HAS_PCI_MWI)) 1397 if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
1403 tulip_mwi_config (pdev, dev); 1398 tulip_mwi_config (pdev, dev);
1404#else
1405 /* MWI is broken for DC21143 rev 65... */
1406 if (chip_idx == DC21143 && pdev->revision == 65)
1407 tp->csr0 &= ~MWI;
1408#endif 1399#endif
1409 1400
1410 /* Stop the chip's Tx and Rx processes. */ 1401 /* Stop the chip's Tx and Rx processes. */
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 35d0cfcf8c47..50068194c163 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -107,8 +107,6 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
107/* Time in jiffies before concluding the transmitter is hung. */ 107/* Time in jiffies before concluding the transmitter is hung. */
108#define TX_TIMEOUT (2*HZ) 108#define TX_TIMEOUT (2*HZ)
109 109
110#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
111
112/* Include files, designed to support most kernel versions 2.0.0 and later. */ 110/* Include files, designed to support most kernel versions 2.0.0 and later. */
113#include <linux/module.h> 111#include <linux/module.h>
114#include <linux/kernel.h> 112#include <linux/kernel.h>
@@ -137,6 +135,9 @@ static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
137 135
138#include "tulip.h" 136#include "tulip.h"
139 137
138#undef PKT_BUF_SZ /* tulip.h also defines this */
139#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
140
140/* These identify the driver base version and may not be removed. */ 141/* These identify the driver base version and may not be removed. */
141static char version[] = 142static char version[] =
142KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n" 143KERN_INFO DRV_NAME ".c:v" DRV_VERSION " (2.4 port) " DRV_RELDATE " Donald Becker <becker@scyld.com>\n"
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c
deleted file mode 100644
index c3f8e303c6c7..000000000000
--- a/drivers/net/tulip/xircom_tulip_cb.c
+++ /dev/null
@@ -1,1726 +0,0 @@
1/* xircom_tulip_cb.c: A Xircom CBE-100 ethernet driver for Linux. */
2/*
3 Written/copyright 1994-1999 by Donald Becker.
4
5 This software may be used and distributed according to the terms
6 of the GNU General Public License, incorporated herein by reference.
7
8 The author may be reached as becker@scyld.com, or C/O
9 Scyld Computing Corporation
10 410 Severn Ave., Suite 210
11 Annapolis MD 21403
12
13*/
14
15#define DRV_NAME "xircom_tulip_cb"
16#define DRV_VERSION "0.92"
17#define DRV_RELDATE "June 27, 2006"
18
19/* A few user-configurable values. */
20
21#define xircom_debug debug
22#ifdef XIRCOM_DEBUG
23static int xircom_debug = XIRCOM_DEBUG;
24#else
25static int xircom_debug = 1;
26#endif
27
28/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
29static int max_interrupt_work = 25;
30
31#define MAX_UNITS 4
32/* Used to pass the full-duplex flag, etc. */
33static int full_duplex[MAX_UNITS];
34static int options[MAX_UNITS];
35static int mtu[MAX_UNITS]; /* Jumbo MTU for interfaces. */
36
37/* Keep the ring sizes a power of two for efficiency.
38 Making the Tx ring too large decreases the effectiveness of channel
39 bonding and packet priority.
40 There are no ill effects from too-large receive rings. */
41#define TX_RING_SIZE 16
42#define RX_RING_SIZE 32
43
44/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
45#ifdef __alpha__
46static int rx_copybreak = 1518;
47#else
48static int rx_copybreak = 100;
49#endif
50
51/*
52 Set the bus performance register.
53 Typical: Set 16 longword cache alignment, no burst limit.
54 Cache alignment bits 15:14 Burst length 13:8
55 0000 No alignment 0x00000000 unlimited 0800 8 longwords
56 4000 8 longwords 0100 1 longword 1000 16 longwords
57 8000 16 longwords 0200 2 longwords 2000 32 longwords
58 C000 32 longwords 0400 4 longwords
59 Warning: many older 486 systems are broken and require setting 0x00A04800
60 8 longword cache alignment, 8 longword burst.
61 ToDo: Non-Intel setting could be better.
62*/
63
64#if defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
65static int csr0 = 0x01A00000 | 0xE000;
66#elif defined(__powerpc__)
67static int csr0 = 0x01B00000 | 0x8000;
68#elif defined(CONFIG_SPARC)
69static int csr0 = 0x01B00080 | 0x8000;
70#elif defined(__i386__)
71static int csr0 = 0x01A00000 | 0x8000;
72#else
73#warning Processor architecture undefined!
74static int csr0 = 0x00A00000 | 0x4800;
75#endif
76
77/* Operational parameters that usually are not changed. */
78/* Time in jiffies before concluding the transmitter is hung. */
79#define TX_TIMEOUT (4 * HZ)
80#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
81#define PKT_SETUP_SZ 192 /* Size of the setup frame */
82
83/* PCI registers */
84#define PCI_POWERMGMT 0x40
85
86#include <linux/module.h>
87#include <linux/moduleparam.h>
88#include <linux/kernel.h>
89#include <linux/pci.h>
90#include <linux/netdevice.h>
91#include <linux/etherdevice.h>
92#include <linux/delay.h>
93#include <linux/init.h>
94#include <linux/mii.h>
95#include <linux/ethtool.h>
96#include <linux/crc32.h>
97
98#include <asm/io.h>
99#include <asm/processor.h> /* Processor type for cache alignment. */
100#include <asm/uaccess.h>
101
102
103/* These identify the driver base version and may not be removed. */
104static char version[] __devinitdata =
105KERN_INFO DRV_NAME ".c derived from tulip.c:v0.91 4/14/99 becker@scyld.com\n"
106KERN_INFO " unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE "\n";
107
108MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
109MODULE_DESCRIPTION("Xircom CBE-100 ethernet driver");
110MODULE_LICENSE("GPL v2");
111MODULE_VERSION(DRV_VERSION);
112
113module_param(debug, int, 0);
114module_param(max_interrupt_work, int, 0);
115module_param(rx_copybreak, int, 0);
116module_param(csr0, int, 0);
117
118module_param_array(options, int, NULL, 0);
119module_param_array(full_duplex, int, NULL, 0);
120
121#define RUN_AT(x) (jiffies + (x))
122
123/*
124 Theory of Operation
125
126I. Board Compatibility
127
128This device driver was forked from the driver for the DECchip "Tulip",
129Digital's single-chip ethernet controllers for PCI. It supports Xircom's
130almost-Tulip-compatible CBE-100 CardBus adapters.
131
132II. Board-specific settings
133
134PCI bus devices are configured by the system at boot time, so no jumpers
135need to be set on the board. The system BIOS preferably should assign the
136PCI INTA signal to an otherwise unused system IRQ line.
137
138III. Driver operation
139
140IIIa. Ring buffers
141
142The Xircom can use either ring buffers or lists of Tx and Rx descriptors.
143This driver uses statically allocated rings of Rx and Tx descriptors, set at
144compile time by RX/TX_RING_SIZE. This version of the driver allocates skbuffs
145for the Rx ring buffers at open() time and passes the skb->data field to the
146Xircom as receive data buffers. When an incoming frame is less than
147RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is
148copied to the new skbuff. When the incoming frame is larger, the skbuff is
149passed directly up the protocol stack and replaced by a newly allocated
150skbuff.
151
152The RX_COPYBREAK value is chosen to trade-off the memory wasted by
153using a full-sized skbuff for small frames vs. the copying costs of larger
154frames. For small frames the copying cost is negligible (esp. considering
155that we are pre-loading the cache with immediately useful header
156information). For large frames the copying cost is non-trivial, and the
157larger copy might flush the cache of useful data. A subtle aspect of this
158choice is that the Xircom only receives into longword aligned buffers, thus
159the IP header at offset 14 isn't longword aligned for further processing.
160Copied frames are put into the new skbuff at an offset of "+2", thus copying
161has the beneficial effect of aligning the IP header and preloading the
162cache.
163
164IIIC. Synchronization
165The driver runs as two independent, single-threaded flows of control. One
166is the send-packet routine, which enforces single-threaded use by the
167dev->tbusy flag. The other thread is the interrupt handler, which is single
168threaded by the hardware and other software.
169
170The send packet thread has partial control over the Tx ring and 'dev->tbusy'
171flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
172queue slot is empty, it clears the tbusy flag when finished otherwise it sets
173the 'tp->tx_full' flag.
174
175The interrupt handler has exclusive control over the Rx ring and records stats
176from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
177we can't avoid the interrupt overhead by having the Tx routine reap the Tx
178stats.) After reaping the stats, it marks the queue entry as empty by setting
179the 'base' to zero. Iff the 'tp->tx_full' flag is set, it clears both the
180tx_full and tbusy flags.
181
182IV. Notes
183
184IVb. References
185
186http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
187http://www.digital.com (search for current 21*4* datasheets and "21X4 SROM")
188http://www.national.com/pf/DP/DP83840A.html
189
190IVc. Errata
191
192*/
193
194/* A full-duplex map for media types. */
195enum MediaIs {
196 MediaIsFD = 1, MediaAlwaysFD=2, MediaIsMII=4, MediaIsFx=8,
197 MediaIs100=16};
198static const char media_cap[] =
199{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20 };
200
201/* Offsets to the Command and Status Registers, "CSRs". All accesses
202 must be longword instructions and quadword aligned. */
203enum xircom_offsets {
204 CSR0=0, CSR1=0x08, CSR2=0x10, CSR3=0x18, CSR4=0x20, CSR5=0x28,
205 CSR6=0x30, CSR7=0x38, CSR8=0x40, CSR9=0x48, CSR10=0x50, CSR11=0x58,
206 CSR12=0x60, CSR13=0x68, CSR14=0x70, CSR15=0x78, CSR16=0x04, };
207
208/* The bits in the CSR5 status registers, mostly interrupt sources. */
209enum status_bits {
210 LinkChange=0x08000000,
211 NormalIntr=0x10000, NormalIntrMask=0x00014045,
212 AbnormalIntr=0x8000, AbnormalIntrMask=0x0a00a5a2,
213 ReservedIntrMask=0xe0001a18,
214 EarlyRxIntr=0x4000, BusErrorIntr=0x2000,
215 EarlyTxIntr=0x400, RxDied=0x100, RxNoBuf=0x80, RxIntr=0x40,
216 TxFIFOUnderflow=0x20, TxNoBuf=0x04, TxDied=0x02, TxIntr=0x01,
217};
218
219enum csr0_control_bits {
220 EnableMWI=0x01000000, EnableMRL=0x00800000,
221 EnableMRM=0x00200000, EqualBusPrio=0x02,
222 SoftwareReset=0x01,
223};
224
225enum csr6_control_bits {
226 ReceiveAllBit=0x40000000, AllMultiBit=0x80, PromiscBit=0x40,
227 HashFilterBit=0x01, FullDuplexBit=0x0200,
228 TxThresh10=0x400000, TxStoreForw=0x200000,
229 TxThreshMask=0xc000, TxThreshShift=14,
230 EnableTx=0x2000, EnableRx=0x02,
231 ReservedZeroMask=0x8d930134, ReservedOneMask=0x320c0000,
232 EnableTxRx=(EnableTx | EnableRx),
233};
234
235
236enum tbl_flag {
237 HAS_MII=1, HAS_ACPI=2,
238};
239static struct xircom_chip_table {
240 char *chip_name;
241 int valid_intrs; /* CSR7 interrupt enable settings */
242 int flags;
243} xircom_tbl[] = {
244 { "Xircom Cardbus Adapter",
245 LinkChange | NormalIntr | AbnormalIntr | BusErrorIntr |
246 RxDied | RxNoBuf | RxIntr | TxFIFOUnderflow | TxNoBuf | TxDied | TxIntr,
247 HAS_MII | HAS_ACPI, },
248 { NULL, },
249};
250/* This matches the table above. */
251enum chips {
252 X3201_3,
253};
254
255
256/* The Xircom Rx and Tx buffer descriptors. */
257struct xircom_rx_desc {
258 s32 status;
259 s32 length;
260 u32 buffer1, buffer2;
261};
262
263struct xircom_tx_desc {
264 s32 status;
265 s32 length;
266 u32 buffer1, buffer2; /* We use only buffer 1. */
267};
268
269enum tx_desc0_status_bits {
270 Tx0DescOwned=0x80000000, Tx0DescError=0x8000, Tx0NoCarrier=0x0800,
271 Tx0LateColl=0x0200, Tx0ManyColl=0x0100, Tx0Underflow=0x02,
272};
273enum tx_desc1_status_bits {
274 Tx1ComplIntr=0x80000000, Tx1LastSeg=0x40000000, Tx1FirstSeg=0x20000000,
275 Tx1SetupPkt=0x08000000, Tx1DisableCRC=0x04000000, Tx1RingWrap=0x02000000,
276 Tx1ChainDesc=0x01000000, Tx1NoPad=0x800000, Tx1HashSetup=0x400000,
277 Tx1WholePkt=(Tx1FirstSeg | Tx1LastSeg),
278};
279enum rx_desc0_status_bits {
280 Rx0DescOwned=0x80000000, Rx0DescError=0x8000, Rx0NoSpace=0x4000,
281 Rx0Runt=0x0800, Rx0McastPkt=0x0400, Rx0FirstSeg=0x0200, Rx0LastSeg=0x0100,
282 Rx0HugeFrame=0x80, Rx0CRCError=0x02,
283 Rx0WholePkt=(Rx0FirstSeg | Rx0LastSeg),
284};
285enum rx_desc1_status_bits {
286 Rx1RingWrap=0x02000000, Rx1ChainDesc=0x01000000,
287};
288
289struct xircom_private {
290 struct xircom_rx_desc rx_ring[RX_RING_SIZE];
291 struct xircom_tx_desc tx_ring[TX_RING_SIZE];
292 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
293 struct sk_buff* tx_skbuff[TX_RING_SIZE];
294
295 /* The X3201-3 requires 4-byte aligned tx bufs */
296 struct sk_buff* tx_aligned_skbuff[TX_RING_SIZE];
297
298 /* The addresses of receive-in-place skbuffs. */
299 struct sk_buff* rx_skbuff[RX_RING_SIZE];
300 u16 setup_frame[PKT_SETUP_SZ / sizeof(u16)]; /* Pseudo-Tx frame to init address table. */
301 int chip_id;
302 struct net_device_stats stats;
303 unsigned int cur_rx, cur_tx; /* The next free ring entry */
304 unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
305 unsigned int tx_full:1; /* The Tx queue is full. */
306 unsigned int speed100:1;
307 unsigned int full_duplex:1; /* Full-duplex operation requested. */
308 unsigned int autoneg:1;
309 unsigned int default_port:4; /* Last dev->if_port value. */
310 unsigned int open:1;
311 unsigned int csr0; /* CSR0 setting. */
312 unsigned int csr6; /* Current CSR6 control settings. */
313 u16 to_advertise; /* NWay capabilities advertised. */
314 u16 advertising[4];
315 signed char phys[4], mii_cnt; /* MII device addresses. */
316 int saved_if_port;
317 struct pci_dev *pdev;
318 spinlock_t lock;
319};
320
321static int mdio_read(struct net_device *dev, int phy_id, int location);
322static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
323static void xircom_up(struct net_device *dev);
324static void xircom_down(struct net_device *dev);
325static int xircom_open(struct net_device *dev);
326static void xircom_tx_timeout(struct net_device *dev);
327static void xircom_init_ring(struct net_device *dev);
328static int xircom_start_xmit(struct sk_buff *skb, struct net_device *dev);
329static int xircom_rx(struct net_device *dev);
330static void xircom_media_change(struct net_device *dev);
331static irqreturn_t xircom_interrupt(int irq, void *dev_instance);
332static int xircom_close(struct net_device *dev);
333static struct net_device_stats *xircom_get_stats(struct net_device *dev);
334static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
335static void set_rx_mode(struct net_device *dev);
336static void check_duplex(struct net_device *dev);
337static const struct ethtool_ops ops;
338
339
340/* The Xircom cards are picky about when certain bits in CSR6 can be
341 manipulated. Keith Owens <kaos@ocs.com.au>. */
342static void outl_CSR6(u32 newcsr6, long ioaddr)
343{
344 const int strict_bits =
345 TxThresh10 | TxStoreForw | TxThreshMask | EnableTxRx | FullDuplexBit;
346 int csr5, csr5_22_20, csr5_19_17, currcsr6, attempts = 200;
347 unsigned long flags;
348 save_flags(flags);
349 cli();
350 /* mask out the reserved bits that always read 0 on the Xircom cards */
351 newcsr6 &= ~ReservedZeroMask;
352 /* or in the reserved bits that always read 1 */
353 newcsr6 |= ReservedOneMask;
354 currcsr6 = inl(ioaddr + CSR6);
355 if (((newcsr6 & strict_bits) == (currcsr6 & strict_bits)) ||
356 ((currcsr6 & ~EnableTxRx) == 0)) {
357 outl(newcsr6, ioaddr + CSR6); /* safe */
358 restore_flags(flags);
359 return;
360 }
361 /* make sure the transmitter and receiver are stopped first */
362 currcsr6 &= ~EnableTxRx;
363 while (1) {
364 csr5 = inl(ioaddr + CSR5);
365 if (csr5 == 0xffffffff)
366 break; /* cannot read csr5, card removed? */
367 csr5_22_20 = csr5 & 0x700000;
368 csr5_19_17 = csr5 & 0x0e0000;
369 if ((csr5_22_20 == 0 || csr5_22_20 == 0x600000) &&
370 (csr5_19_17 == 0 || csr5_19_17 == 0x80000 || csr5_19_17 == 0xc0000))
371 break; /* both are stopped or suspended */
372 if (!--attempts) {
373 printk(KERN_INFO DRV_NAME ": outl_CSR6 too many attempts,"
374 "csr5=0x%08x\n", csr5);
375 outl(newcsr6, ioaddr + CSR6); /* unsafe but do it anyway */
376 restore_flags(flags);
377 return;
378 }
379 outl(currcsr6, ioaddr + CSR6);
380 udelay(1);
381 }
382 /* now it is safe to change csr6 */
383 outl(newcsr6, ioaddr + CSR6);
384 restore_flags(flags);
385}
386
387
388static void __devinit read_mac_address(struct net_device *dev)
389{
390 long ioaddr = dev->base_addr;
391 int i, j;
392 unsigned char tuple, link, data_id, data_count;
393
394 /* Xircom has its address stored in the CIS;
395 * we access it through the boot rom interface for now
396 * this might not work, as the CIS is not parsed but I
397 * (danilo) use the offset I found on my card's CIS !!!
398 *
399 * Doug Ledford: I changed this routine around so that it
400 * walks the CIS memory space, parsing the config items, and
401 * finds the proper lan_node_id tuple and uses the data
402 * stored there.
403 */
404 outl(1 << 12, ioaddr + CSR9); /* enable boot rom access */
405 for (i = 0x100; i < 0x1f7; i += link+2) {
406 outl(i, ioaddr + CSR10);
407 tuple = inl(ioaddr + CSR9) & 0xff;
408 outl(i + 1, ioaddr + CSR10);
409 link = inl(ioaddr + CSR9) & 0xff;
410 outl(i + 2, ioaddr + CSR10);
411 data_id = inl(ioaddr + CSR9) & 0xff;
412 outl(i + 3, ioaddr + CSR10);
413 data_count = inl(ioaddr + CSR9) & 0xff;
414 if ( (tuple == 0x22) &&
415 (data_id == 0x04) && (data_count == 0x06) ) {
416 /*
417 * This is it. We have the data we want.
418 */
419 for (j = 0; j < 6; j++) {
420 outl(i + j + 4, ioaddr + CSR10);
421 dev->dev_addr[j] = inl(ioaddr + CSR9) & 0xff;
422 }
423 break;
424 } else if (link == 0) {
425 break;
426 }
427 }
428}
429
430
431/*
432 * locate the MII interfaces and initialize them.
433 * we disable full-duplex modes here,
434 * because we don't know how to handle them.
435 */
436static void find_mii_transceivers(struct net_device *dev)
437{
438 struct xircom_private *tp = netdev_priv(dev);
439 int phy, phy_idx;
440
441 if (media_cap[tp->default_port] & MediaIsMII) {
442 u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
443 tp->to_advertise = media2advert[tp->default_port - 9];
444 } else
445 tp->to_advertise =
446 /*ADVERTISE_100BASE4 | ADVERTISE_100FULL |*/ ADVERTISE_100HALF |
447 /*ADVERTISE_10FULL |*/ ADVERTISE_10HALF | ADVERTISE_CSMA;
448
449 /* Find the connected MII xcvrs.
450 Doing this in open() would allow detecting external xcvrs later,
451 but takes much time. */
452 for (phy = 0, phy_idx = 0; phy < 32 && phy_idx < sizeof(tp->phys); phy++) {
453 int mii_status = mdio_read(dev, phy, MII_BMSR);
454 if ((mii_status & (BMSR_100BASE4 | BMSR_100HALF | BMSR_10HALF)) == BMSR_100BASE4 ||
455 ((mii_status & BMSR_100BASE4) == 0 &&
456 (mii_status & (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | BMSR_10HALF)) != 0)) {
457 int mii_reg0 = mdio_read(dev, phy, MII_BMCR);
458 int mii_advert = mdio_read(dev, phy, MII_ADVERTISE);
459 int reg4 = ((mii_status >> 6) & tp->to_advertise) | ADVERTISE_CSMA;
460 tp->phys[phy_idx] = phy;
461 tp->advertising[phy_idx++] = reg4;
462 printk(KERN_INFO "%s: MII transceiver #%d "
463 "config %4.4x status %4.4x advertising %4.4x.\n",
464 dev->name, phy, mii_reg0, mii_status, mii_advert);
465 }
466 }
467 tp->mii_cnt = phy_idx;
468 if (phy_idx == 0) {
469 printk(KERN_INFO "%s: ***WARNING***: No MII transceiver found!\n",
470 dev->name);
471 tp->phys[0] = 0;
472 }
473}
474
475
476/*
477 * To quote Arjan van de Ven:
478 * transceiver_voodoo() enables the external UTP plug thingy.
479 * it's called voodoo as I stole this code and cannot cross-reference
480 * it with the specification.
481 * Actually it seems to go like this:
482 * - GPIO2 enables the MII itself so we can talk to it. The MII gets reset
483 * so any prior MII settings are lost.
484 * - GPIO0 enables the TP port so the MII can talk to the network.
485 * - a software reset will reset both GPIO pins.
486 * I also moved the software reset here, because doing it in xircom_up()
487 * required enabling the GPIO pins each time, which reset the MII each time.
488 * Thus we couldn't control the MII -- which sucks because we don't know
489 * how to handle full-duplex modes so we *must* disable them.
490 */
491static void transceiver_voodoo(struct net_device *dev)
492{
493 struct xircom_private *tp = netdev_priv(dev);
494 long ioaddr = dev->base_addr;
495
496 /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
497 outl(SoftwareReset, ioaddr + CSR0);
498 udelay(2);
499
500 /* Deassert reset. */
501 outl(tp->csr0, ioaddr + CSR0);
502
503 /* Reset the xcvr interface and turn on heartbeat. */
504 outl(0x0008, ioaddr + CSR15);
505 udelay(5); /* The delays are Xircom-recommended to give the
506 * chipset time to reset the actual hardware
507 * on the PCMCIA card
508 */
509 outl(0xa8050000, ioaddr + CSR15);
510 udelay(5);
511 outl(0xa00f0000, ioaddr + CSR15);
512 udelay(5);
513
514 outl_CSR6(0, ioaddr);
515 //outl_CSR6(FullDuplexBit, ioaddr);
516}
517
518
519static int __devinit xircom_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
520{
521 struct net_device *dev;
522 struct xircom_private *tp;
523 static int board_idx = -1;
524 int chip_idx = id->driver_data;
525 long ioaddr;
526 int i;
527
528/* when built into the kernel, we only print version if device is found */
529#ifndef MODULE
530 static int printed_version;
531 if (!printed_version++)
532 printk(version);
533#endif
534
535 //printk(KERN_INFO "xircom_init_one(%s)\n", pci_name(pdev));
536
537 board_idx++;
538
539 if (pci_enable_device(pdev))
540 return -ENODEV;
541
542 pci_set_master(pdev);
543
544 ioaddr = pci_resource_start(pdev, 0);
545 dev = alloc_etherdev(sizeof(*tp));
546 if (!dev) {
547 printk (KERN_ERR DRV_NAME "%d: cannot alloc etherdev, aborting\n", board_idx);
548 return -ENOMEM;
549 }
550 SET_NETDEV_DEV(dev, &pdev->dev);
551
552 dev->base_addr = ioaddr;
553 dev->irq = pdev->irq;
554
555 if (pci_request_regions(pdev, dev->name)) {
556 printk (KERN_ERR DRV_NAME " %d: cannot reserve PCI resources, aborting\n", board_idx);
557 goto err_out_free_netdev;
558 }
559
560 /* Bring the chip out of sleep mode.
561 Caution: Snooze mode does not work with some boards! */
562 if (xircom_tbl[chip_idx].flags & HAS_ACPI)
563 pci_write_config_dword(pdev, PCI_POWERMGMT, 0);
564
565 /* Stop the chip's Tx and Rx processes. */
566 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
567 /* Clear the missed-packet counter. */
568 (volatile int)inl(ioaddr + CSR8);
569
570 tp = netdev_priv(dev);
571
572 spin_lock_init(&tp->lock);
573 tp->pdev = pdev;
574 tp->chip_id = chip_idx;
575 /* BugFixes: The 21143-TD hangs with PCI Write-and-Invalidate cycles. */
576 /* XXX: is this necessary for Xircom? */
577 tp->csr0 = csr0 & ~EnableMWI;
578
579 pci_set_drvdata(pdev, dev);
580
581 /* The lower four bits are the media type. */
582 if (board_idx >= 0 && board_idx < MAX_UNITS) {
583 tp->default_port = options[board_idx] & 15;
584 if ((options[board_idx] & 0x90) || full_duplex[board_idx] > 0)
585 tp->full_duplex = 1;
586 if (mtu[board_idx] > 0)
587 dev->mtu = mtu[board_idx];
588 }
589 if (dev->mem_start)
590 tp->default_port = dev->mem_start;
591 if (tp->default_port) {
592 if (media_cap[tp->default_port] & MediaAlwaysFD)
593 tp->full_duplex = 1;
594 }
595 if (tp->full_duplex)
596 tp->autoneg = 0;
597 else
598 tp->autoneg = 1;
599 tp->speed100 = 1;
600
601 /* The Xircom-specific entries in the device structure. */
602 dev->open = &xircom_open;
603 dev->hard_start_xmit = &xircom_start_xmit;
604 dev->stop = &xircom_close;
605 dev->get_stats = &xircom_get_stats;
606 dev->do_ioctl = &xircom_ioctl;
607#ifdef HAVE_MULTICAST
608 dev->set_multicast_list = &set_rx_mode;
609#endif
610 dev->tx_timeout = xircom_tx_timeout;
611 dev->watchdog_timeo = TX_TIMEOUT;
612 SET_ETHTOOL_OPS(dev, &ops);
613
614 transceiver_voodoo(dev);
615
616 read_mac_address(dev);
617
618 if (register_netdev(dev))
619 goto err_out_cleardev;
620
621 printk(KERN_INFO "%s: %s rev %d at %#3lx,",
622 dev->name, xircom_tbl[chip_idx].chip_name, pdev->revision, ioaddr);
623 for (i = 0; i < 6; i++)
624 printk("%c%2.2X", i ? ':' : ' ', dev->dev_addr[i]);
625 printk(", IRQ %d.\n", dev->irq);
626
627 if (xircom_tbl[chip_idx].flags & HAS_MII) {
628 find_mii_transceivers(dev);
629 check_duplex(dev);
630 }
631
632 return 0;
633
634err_out_cleardev:
635 pci_set_drvdata(pdev, NULL);
636 pci_release_regions(pdev);
637err_out_free_netdev:
638 free_netdev(dev);
639 return -ENODEV;
640}
641
642
643/* MII transceiver control section.
644 Read and write the MII registers using software-generated serial
645 MDIO protocol. See the MII specifications or DP83840A data sheet
646 for details. */
647
648/* The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
649 met by back-to-back PCI I/O cycles, but we insert a delay to avoid
650 "overclocking" issues or future 66Mhz PCI. */
651#define mdio_delay() inl(mdio_addr)
652
653/* Read and write the MII registers using software-generated serial
654 MDIO protocol. It is just different enough from the EEPROM protocol
655 to not share code. The maxium data clock rate is 2.5 Mhz. */
656#define MDIO_SHIFT_CLK 0x10000
657#define MDIO_DATA_WRITE0 0x00000
658#define MDIO_DATA_WRITE1 0x20000
659#define MDIO_ENB 0x00000 /* Ignore the 0x02000 databook setting. */
660#define MDIO_ENB_IN 0x40000
661#define MDIO_DATA_READ 0x80000
662
663static int mdio_read(struct net_device *dev, int phy_id, int location)
664{
665 int i;
666 int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
667 int retval = 0;
668 long ioaddr = dev->base_addr;
669 long mdio_addr = ioaddr + CSR9;
670
671 /* Establish sync by sending at least 32 logic ones. */
672 for (i = 32; i >= 0; i--) {
673 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
674 mdio_delay();
675 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
676 mdio_delay();
677 }
678 /* Shift the read command bits out. */
679 for (i = 15; i >= 0; i--) {
680 int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
681
682 outl(MDIO_ENB | dataval, mdio_addr);
683 mdio_delay();
684 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
685 mdio_delay();
686 }
687 /* Read the two transition, 16 data, and wire-idle bits. */
688 for (i = 19; i > 0; i--) {
689 outl(MDIO_ENB_IN, mdio_addr);
690 mdio_delay();
691 retval = (retval << 1) | ((inl(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
692 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
693 mdio_delay();
694 }
695 return (retval>>1) & 0xffff;
696}
697
698
699static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
700{
701 int i;
702 int cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
703 long ioaddr = dev->base_addr;
704 long mdio_addr = ioaddr + CSR9;
705
706 /* Establish sync by sending 32 logic ones. */
707 for (i = 32; i >= 0; i--) {
708 outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
709 mdio_delay();
710 outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
711 mdio_delay();
712 }
713 /* Shift the command bits out. */
714 for (i = 31; i >= 0; i--) {
715 int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
716 outl(MDIO_ENB | dataval, mdio_addr);
717 mdio_delay();
718 outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
719 mdio_delay();
720 }
721 /* Clear out extra bits. */
722 for (i = 2; i > 0; i--) {
723 outl(MDIO_ENB_IN, mdio_addr);
724 mdio_delay();
725 outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
726 mdio_delay();
727 }
728 return;
729}
730
731
732static void
733xircom_up(struct net_device *dev)
734{
735 struct xircom_private *tp = netdev_priv(dev);
736 long ioaddr = dev->base_addr;
737 int i;
738
739 xircom_init_ring(dev);
740 /* Clear the tx ring */
741 for (i = 0; i < TX_RING_SIZE; i++) {
742 tp->tx_skbuff[i] = NULL;
743 tp->tx_ring[i].status = 0;
744 }
745
746 if (xircom_debug > 1)
747 printk(KERN_DEBUG "%s: xircom_up() irq %d.\n", dev->name, dev->irq);
748
749 outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
750 outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
751
752 tp->saved_if_port = dev->if_port;
753 if (dev->if_port == 0)
754 dev->if_port = tp->default_port;
755
756 tp->csr6 = TxThresh10 /*| FullDuplexBit*/; /* XXX: why 10 and not 100? */
757
758 set_rx_mode(dev);
759
760 /* Start the chip's Tx to process setup frame. */
761 outl_CSR6(tp->csr6, ioaddr);
762 outl_CSR6(tp->csr6 | EnableTx, ioaddr);
763
764 /* Acknowledge all outstanding interrupts sources */
765 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
766 /* Enable interrupts by setting the interrupt mask. */
767 outl(xircom_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
768 /* Enable Rx */
769 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
770 /* Rx poll demand */
771 outl(0, ioaddr + CSR2);
772
773 /* Tell the net layer we're ready */
774 netif_start_queue (dev);
775
776 /* Check current media state */
777 xircom_media_change(dev);
778
779 if (xircom_debug > 2) {
780 printk(KERN_DEBUG "%s: Done xircom_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
781 dev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR5),
782 inl(ioaddr + CSR6));
783 }
784}
785
786
787static int
788xircom_open(struct net_device *dev)
789{
790 struct xircom_private *tp = netdev_priv(dev);
791
792 if (request_irq(dev->irq, &xircom_interrupt, IRQF_SHARED, dev->name, dev))
793 return -EAGAIN;
794
795 xircom_up(dev);
796 tp->open = 1;
797
798 return 0;
799}
800
801
802static void xircom_tx_timeout(struct net_device *dev)
803{
804 struct xircom_private *tp = netdev_priv(dev);
805 long ioaddr = dev->base_addr;
806
807 if (media_cap[dev->if_port] & MediaIsMII) {
808 /* Do nothing -- the media monitor should handle this. */
809 if (xircom_debug > 1)
810 printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
811 dev->name);
812 }
813
814#if defined(way_too_many_messages)
815 if (xircom_debug > 3) {
816 int i;
817 for (i = 0; i < RX_RING_SIZE; i++) {
818 u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
819 int j;
820 printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
821 "%2.2x %2.2x %2.2x.\n",
822 i, (unsigned int)tp->rx_ring[i].status,
823 (unsigned int)tp->rx_ring[i].length,
824 (unsigned int)tp->rx_ring[i].buffer1,
825 (unsigned int)tp->rx_ring[i].buffer2,
826 buf[0], buf[1], buf[2]);
827 for (j = 0; buf[j] != 0xee && j < 1600; j++)
828 if (j < 100) printk(" %2.2x", buf[j]);
829 printk(" j=%d.\n", j);
830 }
831 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
832 for (i = 0; i < RX_RING_SIZE; i++)
833 printk(" %8.8x", (unsigned int)tp->rx_ring[i].status);
834 printk("\n" KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
835 for (i = 0; i < TX_RING_SIZE; i++)
836 printk(" %8.8x", (unsigned int)tp->tx_ring[i].status);
837 printk("\n");
838 }
839#endif
840
841 /* Stop and restart the chip's Tx/Rx processes . */
842 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
843 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
844 /* Trigger an immediate transmit demand. */
845 outl(0, ioaddr + CSR1);
846
847 dev->trans_start = jiffies;
848 netif_wake_queue (dev);
849 tp->stats.tx_errors++;
850}
851
852
853/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
854static void xircom_init_ring(struct net_device *dev)
855{
856 struct xircom_private *tp = netdev_priv(dev);
857 int i;
858
859 tp->tx_full = 0;
860 tp->cur_rx = tp->cur_tx = 0;
861 tp->dirty_rx = tp->dirty_tx = 0;
862
863 for (i = 0; i < RX_RING_SIZE; i++) {
864 tp->rx_ring[i].status = 0;
865 tp->rx_ring[i].length = PKT_BUF_SZ;
866 tp->rx_ring[i].buffer2 = virt_to_bus(&tp->rx_ring[i+1]);
867 tp->rx_skbuff[i] = NULL;
868 }
869 /* Mark the last entry as wrapping the ring. */
870 tp->rx_ring[i-1].length = PKT_BUF_SZ | Rx1RingWrap;
871 tp->rx_ring[i-1].buffer2 = virt_to_bus(&tp->rx_ring[0]);
872
873 for (i = 0; i < RX_RING_SIZE; i++) {
874 /* Note the receive buffer must be longword aligned.
875 dev_alloc_skb() provides 16 byte alignment. But do *not*
876 use skb_reserve() to align the IP header! */
877 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
878 tp->rx_skbuff[i] = skb;
879 if (skb == NULL)
880 break;
881 skb->dev = dev; /* Mark as being used by this device. */
882 tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */
883 tp->rx_ring[i].buffer1 = virt_to_bus(skb->data);
884 }
885 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
886
887 /* The Tx buffer descriptor is filled in as needed, but we
888 do need to clear the ownership bit. */
889 for (i = 0; i < TX_RING_SIZE; i++) {
890 tp->tx_skbuff[i] = NULL;
891 tp->tx_ring[i].status = 0;
892 tp->tx_ring[i].buffer2 = virt_to_bus(&tp->tx_ring[i+1]);
893 if (tp->chip_id == X3201_3)
894 tp->tx_aligned_skbuff[i] = dev_alloc_skb(PKT_BUF_SZ);
895 }
896 tp->tx_ring[i-1].buffer2 = virt_to_bus(&tp->tx_ring[0]);
897}
898
899
900static int
901xircom_start_xmit(struct sk_buff *skb, struct net_device *dev)
902{
903 struct xircom_private *tp = netdev_priv(dev);
904 int entry;
905 u32 flag;
906
907 /* Caution: the write order is important here, set the base address
908 with the "ownership" bits last. */
909
910 /* Calculate the next Tx descriptor entry. */
911 entry = tp->cur_tx % TX_RING_SIZE;
912
913 tp->tx_skbuff[entry] = skb;
914 if (tp->chip_id == X3201_3) {
915 skb_copy_from_linear_data(skb,
916 tp->tx_aligned_skbuff[entry]->data,
917 skb->len);
918 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->tx_aligned_skbuff[entry]->data);
919 } else
920 tp->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
921
922 if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
923 flag = Tx1WholePkt; /* No interrupt */
924 } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
925 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
926 } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
927 flag = Tx1WholePkt; /* No Tx-done intr. */
928 } else {
929 /* Leave room for set_rx_mode() to fill entries. */
930 flag = Tx1WholePkt | Tx1ComplIntr; /* Tx-done intr. */
931 tp->tx_full = 1;
932 }
933 if (entry == TX_RING_SIZE - 1)
934 flag |= Tx1WholePkt | Tx1ComplIntr | Tx1RingWrap;
935
936 tp->tx_ring[entry].length = skb->len | flag;
937 tp->tx_ring[entry].status = Tx0DescOwned; /* Pass ownership to the chip. */
938 tp->cur_tx++;
939 if (tp->tx_full)
940 netif_stop_queue (dev);
941 else
942 netif_wake_queue (dev);
943
944 /* Trigger an immediate transmit demand. */
945 outl(0, dev->base_addr + CSR1);
946
947 dev->trans_start = jiffies;
948
949 return 0;
950}
951
952
953static void xircom_media_change(struct net_device *dev)
954{
955 struct xircom_private *tp = netdev_priv(dev);
956 long ioaddr = dev->base_addr;
957 u16 reg0, reg1, reg4, reg5;
958 u32 csr6 = inl(ioaddr + CSR6), newcsr6;
959
960 /* reset status first */
961 mdio_read(dev, tp->phys[0], MII_BMCR);
962 mdio_read(dev, tp->phys[0], MII_BMSR);
963
964 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
965 reg1 = mdio_read(dev, tp->phys[0], MII_BMSR);
966
967 if (reg1 & BMSR_LSTATUS) {
968 /* link is up */
969 if (reg0 & BMCR_ANENABLE) {
970 /* autonegotiation is enabled */
971 reg4 = mdio_read(dev, tp->phys[0], MII_ADVERTISE);
972 reg5 = mdio_read(dev, tp->phys[0], MII_LPA);
973 if (reg4 & ADVERTISE_100FULL && reg5 & LPA_100FULL) {
974 tp->speed100 = 1;
975 tp->full_duplex = 1;
976 } else if (reg4 & ADVERTISE_100HALF && reg5 & LPA_100HALF) {
977 tp->speed100 = 1;
978 tp->full_duplex = 0;
979 } else if (reg4 & ADVERTISE_10FULL && reg5 & LPA_10FULL) {
980 tp->speed100 = 0;
981 tp->full_duplex = 1;
982 } else {
983 tp->speed100 = 0;
984 tp->full_duplex = 0;
985 }
986 } else {
987 /* autonegotiation is disabled */
988 if (reg0 & BMCR_SPEED100)
989 tp->speed100 = 1;
990 else
991 tp->speed100 = 0;
992 if (reg0 & BMCR_FULLDPLX)
993 tp->full_duplex = 1;
994 else
995 tp->full_duplex = 0;
996 }
997 printk(KERN_DEBUG "%s: Link is up, running at %sMbit %s-duplex\n",
998 dev->name,
999 tp->speed100 ? "100" : "10",
1000 tp->full_duplex ? "full" : "half");
1001 netif_carrier_on(dev);
1002 newcsr6 = csr6 & ~FullDuplexBit;
1003 if (tp->full_duplex)
1004 newcsr6 |= FullDuplexBit;
1005 if (newcsr6 != csr6)
1006 outl_CSR6(newcsr6, ioaddr + CSR6);
1007 } else {
1008 printk(KERN_DEBUG "%s: Link is down\n", dev->name);
1009 netif_carrier_off(dev);
1010 }
1011}
1012
1013
1014static void check_duplex(struct net_device *dev)
1015{
1016 struct xircom_private *tp = netdev_priv(dev);
1017 u16 reg0;
1018
1019 mdio_write(dev, tp->phys[0], MII_BMCR, BMCR_RESET);
1020 udelay(500);
1021 while (mdio_read(dev, tp->phys[0], MII_BMCR) & BMCR_RESET);
1022
1023 reg0 = mdio_read(dev, tp->phys[0], MII_BMCR);
1024 mdio_write(dev, tp->phys[0], MII_ADVERTISE, tp->advertising[0]);
1025
1026 if (tp->autoneg) {
1027 reg0 &= ~(BMCR_SPEED100 | BMCR_FULLDPLX);
1028 reg0 |= BMCR_ANENABLE | BMCR_ANRESTART;
1029 } else {
1030 reg0 &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
1031 if (tp->speed100)
1032 reg0 |= BMCR_SPEED100;
1033 if (tp->full_duplex)
1034 reg0 |= BMCR_FULLDPLX;
1035 printk(KERN_DEBUG "%s: Link forced to %sMbit %s-duplex\n",
1036 dev->name,
1037 tp->speed100 ? "100" : "10",
1038 tp->full_duplex ? "full" : "half");
1039 }
1040 mdio_write(dev, tp->phys[0], MII_BMCR, reg0);
1041}
1042
1043
1044/* The interrupt handler does all of the Rx thread work and cleans up
1045 after the Tx thread. */
1046static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
1047{
1048 struct net_device *dev = dev_instance;
1049 struct xircom_private *tp = netdev_priv(dev);
1050 long ioaddr = dev->base_addr;
1051 int csr5, work_budget = max_interrupt_work;
1052 int handled = 0;
1053
1054 spin_lock (&tp->lock);
1055
1056 do {
1057 csr5 = inl(ioaddr + CSR5);
1058 /* Acknowledge all of the current interrupt sources ASAP. */
1059 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
1060
1061 if (xircom_debug > 4)
1062 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
1063 dev->name, csr5, inl(dev->base_addr + CSR5));
1064
1065 if (csr5 == 0xffffffff)
1066 break; /* all bits set, assume PCMCIA card removed */
1067
1068 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
1069 break;
1070
1071 handled = 1;
1072
1073 if (csr5 & (RxIntr | RxNoBuf))
1074 work_budget -= xircom_rx(dev);
1075
1076 if (csr5 & (TxNoBuf | TxDied | TxIntr)) {
1077 unsigned int dirty_tx;
1078
1079 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
1080 dirty_tx++) {
1081 int entry = dirty_tx % TX_RING_SIZE;
1082 int status = tp->tx_ring[entry].status;
1083
1084 if (status < 0)
1085 break; /* It still hasn't been Txed */
1086 /* Check for Rx filter setup frames. */
1087 if (tp->tx_skbuff[entry] == NULL)
1088 continue;
1089
1090 if (status & Tx0DescError) {
1091 /* There was an major error, log it. */
1092#ifndef final_version
1093 if (xircom_debug > 1)
1094 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
1095 dev->name, status);
1096#endif
1097 tp->stats.tx_errors++;
1098 if (status & Tx0ManyColl) {
1099 tp->stats.tx_aborted_errors++;
1100 }
1101 if (status & Tx0NoCarrier) tp->stats.tx_carrier_errors++;
1102 if (status & Tx0LateColl) tp->stats.tx_window_errors++;
1103 if (status & Tx0Underflow) tp->stats.tx_fifo_errors++;
1104 } else {
1105 tp->stats.tx_bytes += tp->tx_ring[entry].length & 0x7ff;
1106 tp->stats.collisions += (status >> 3) & 15;
1107 tp->stats.tx_packets++;
1108 }
1109
1110 /* Free the original skb. */
1111 dev_kfree_skb_irq(tp->tx_skbuff[entry]);
1112 tp->tx_skbuff[entry] = NULL;
1113 }
1114
1115#ifndef final_version
1116 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
1117 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1118 dev->name, dirty_tx, tp->cur_tx, tp->tx_full);
1119 dirty_tx += TX_RING_SIZE;
1120 }
1121#endif
1122
1123 if (tp->tx_full &&
1124 tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
1125 /* The ring is no longer full */
1126 tp->tx_full = 0;
1127
1128 if (tp->tx_full)
1129 netif_stop_queue (dev);
1130 else
1131 netif_wake_queue (dev);
1132
1133 tp->dirty_tx = dirty_tx;
1134 if (csr5 & TxDied) {
1135 if (xircom_debug > 2)
1136 printk(KERN_WARNING "%s: The transmitter stopped."
1137 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
1138 dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
1139 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1140 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1141 }
1142 }
1143
1144 /* Log errors. */
1145 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
1146 if (csr5 & LinkChange)
1147 xircom_media_change(dev);
1148 if (csr5 & TxFIFOUnderflow) {
1149 if ((tp->csr6 & TxThreshMask) != TxThreshMask)
1150 tp->csr6 += (1 << TxThreshShift); /* Bump up the Tx threshold */
1151 else
1152 tp->csr6 |= TxStoreForw; /* Store-n-forward. */
1153 /* Restart the transmit process. */
1154 outl_CSR6(tp->csr6 | EnableRx, ioaddr);
1155 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1156 }
1157 if (csr5 & RxDied) { /* Missed a Rx frame. */
1158 tp->stats.rx_errors++;
1159 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1160 outl_CSR6(tp->csr6 | EnableTxRx, ioaddr);
1161 }
1162 /* Clear all error sources, included undocumented ones! */
1163 outl(0x0800f7ba, ioaddr + CSR5);
1164 }
1165 if (--work_budget < 0) {
1166 if (xircom_debug > 1)
1167 printk(KERN_WARNING "%s: Too much work during an interrupt, "
1168 "csr5=0x%8.8x.\n", dev->name, csr5);
1169 /* Acknowledge all interrupt sources. */
1170 outl(0x8001ffff, ioaddr + CSR5);
1171 break;
1172 }
1173 } while (1);
1174
1175 if (xircom_debug > 3)
1176 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
1177 dev->name, inl(ioaddr + CSR5));
1178
1179 spin_unlock (&tp->lock);
1180 return IRQ_RETVAL(handled);
1181}
1182
1183
1184static int
1185xircom_rx(struct net_device *dev)
1186{
1187 struct xircom_private *tp = netdev_priv(dev);
1188 int entry = tp->cur_rx % RX_RING_SIZE;
1189 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
1190 int work_done = 0;
1191
1192 if (xircom_debug > 4)
1193 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1194 tp->rx_ring[entry].status);
1195 /* If we own the next entry, it's a new packet. Send it up. */
1196 while (tp->rx_ring[entry].status >= 0) {
1197 s32 status = tp->rx_ring[entry].status;
1198
1199 if (xircom_debug > 5)
1200 printk(KERN_DEBUG " In xircom_rx(), entry %d %8.8x.\n", entry,
1201 tp->rx_ring[entry].status);
1202 if (--rx_work_limit < 0)
1203 break;
1204 if ((status & 0x38008300) != 0x0300) {
1205 if ((status & 0x38000300) != 0x0300) {
1206 /* Ignore earlier buffers. */
1207 if ((status & 0xffff) != 0x7fff) {
1208 if (xircom_debug > 1)
1209 printk(KERN_WARNING "%s: Oversized Ethernet frame "
1210 "spanned multiple buffers, status %8.8x!\n",
1211 dev->name, status);
1212 tp->stats.rx_length_errors++;
1213 }
1214 } else if (status & Rx0DescError) {
1215 /* There was a fatal error. */
1216 if (xircom_debug > 2)
1217 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
1218 dev->name, status);
1219 tp->stats.rx_errors++; /* end of a packet.*/
1220 if (status & (Rx0Runt | Rx0HugeFrame)) tp->stats.rx_length_errors++;
1221 if (status & Rx0CRCError) tp->stats.rx_crc_errors++;
1222 }
1223 } else {
1224 /* Omit the four octet CRC from the length. */
1225 short pkt_len = ((status >> 16) & 0x7ff) - 4;
1226 struct sk_buff *skb;
1227
1228#ifndef final_version
1229 if (pkt_len > 1518) {
1230 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
1231 dev->name, pkt_len, pkt_len);
1232 pkt_len = 1518;
1233 tp->stats.rx_length_errors++;
1234 }
1235#endif
1236 /* Check if the packet is long enough to accept without copying
1237 to a minimally-sized skbuff. */
1238 if (pkt_len < rx_copybreak
1239 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1240 skb_reserve(skb, 2); /* 16 byte align the IP header */
1241#if ! defined(__alpha__)
1242 skb_copy_to_linear_data(skb, bus_to_virt(tp->rx_ring[entry].buffer1),
1243 pkt_len);
1244 skb_put(skb, pkt_len);
1245#else
1246 memcpy(skb_put(skb, pkt_len),
1247 bus_to_virt(tp->rx_ring[entry].buffer1), pkt_len);
1248#endif
1249 work_done++;
1250 } else { /* Pass up the skb already on the Rx ring. */
1251 skb_put(skb = tp->rx_skbuff[entry], pkt_len);
1252 tp->rx_skbuff[entry] = NULL;
1253 }
1254 skb->protocol = eth_type_trans(skb, dev);
1255 netif_rx(skb);
1256 dev->last_rx = jiffies;
1257 tp->stats.rx_packets++;
1258 tp->stats.rx_bytes += pkt_len;
1259 }
1260 entry = (++tp->cur_rx) % RX_RING_SIZE;
1261 }
1262
1263 /* Refill the Rx ring buffers. */
1264 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
1265 entry = tp->dirty_rx % RX_RING_SIZE;
1266 if (tp->rx_skbuff[entry] == NULL) {
1267 struct sk_buff *skb;
1268 skb = tp->rx_skbuff[entry] = dev_alloc_skb(PKT_BUF_SZ);
1269 if (skb == NULL)
1270 break;
1271 skb->dev = dev; /* Mark as being used by this device. */
1272 tp->rx_ring[entry].buffer1 = virt_to_bus(skb->data);
1273 work_done++;
1274 }
1275 tp->rx_ring[entry].status = Rx0DescOwned;
1276 }
1277
1278 return work_done;
1279}
1280
1281
1282static void
1283xircom_down(struct net_device *dev)
1284{
1285 long ioaddr = dev->base_addr;
1286 struct xircom_private *tp = netdev_priv(dev);
1287
1288 /* Disable interrupts by clearing the interrupt mask. */
1289 outl(0, ioaddr + CSR7);
1290 /* Stop the chip's Tx and Rx processes. */
1291 outl_CSR6(inl(ioaddr + CSR6) & ~EnableTxRx, ioaddr);
1292
1293 if (inl(ioaddr + CSR6) != 0xffffffff)
1294 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1295
1296 dev->if_port = tp->saved_if_port;
1297}
1298
1299
1300static int
1301xircom_close(struct net_device *dev)
1302{
1303 long ioaddr = dev->base_addr;
1304 struct xircom_private *tp = netdev_priv(dev);
1305 int i;
1306
1307 if (xircom_debug > 1)
1308 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
1309 dev->name, inl(ioaddr + CSR5));
1310
1311 netif_stop_queue(dev);
1312
1313 if (netif_device_present(dev))
1314 xircom_down(dev);
1315
1316 free_irq(dev->irq, dev);
1317
1318 /* Free all the skbuffs in the Rx queue. */
1319 for (i = 0; i < RX_RING_SIZE; i++) {
1320 struct sk_buff *skb = tp->rx_skbuff[i];
1321 tp->rx_skbuff[i] = NULL;
1322 tp->rx_ring[i].status = 0; /* Not owned by Xircom chip. */
1323 tp->rx_ring[i].length = 0;
1324 tp->rx_ring[i].buffer1 = 0xBADF00D0; /* An invalid address. */
1325 if (skb) {
1326 dev_kfree_skb(skb);
1327 }
1328 }
1329 for (i = 0; i < TX_RING_SIZE; i++) {
1330 if (tp->tx_skbuff[i])
1331 dev_kfree_skb(tp->tx_skbuff[i]);
1332 tp->tx_skbuff[i] = NULL;
1333 }
1334
1335 tp->open = 0;
1336 return 0;
1337}
1338
1339
1340static struct net_device_stats *xircom_get_stats(struct net_device *dev)
1341{
1342 struct xircom_private *tp = netdev_priv(dev);
1343 long ioaddr = dev->base_addr;
1344
1345 if (netif_device_present(dev))
1346 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
1347
1348 return &tp->stats;
1349}
1350
1351static int xircom_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1352{
1353 struct xircom_private *tp = netdev_priv(dev);
1354 ecmd->supported =
1355 SUPPORTED_10baseT_Half |
1356 SUPPORTED_10baseT_Full |
1357 SUPPORTED_100baseT_Half |
1358 SUPPORTED_100baseT_Full |
1359 SUPPORTED_Autoneg |
1360 SUPPORTED_MII;
1361
1362 ecmd->advertising = ADVERTISED_MII;
1363 if (tp->advertising[0] & ADVERTISE_10HALF)
1364 ecmd->advertising |= ADVERTISED_10baseT_Half;
1365 if (tp->advertising[0] & ADVERTISE_10FULL)
1366 ecmd->advertising |= ADVERTISED_10baseT_Full;
1367 if (tp->advertising[0] & ADVERTISE_100HALF)
1368 ecmd->advertising |= ADVERTISED_100baseT_Half;
1369 if (tp->advertising[0] & ADVERTISE_100FULL)
1370 ecmd->advertising |= ADVERTISED_100baseT_Full;
1371 if (tp->autoneg) {
1372 ecmd->advertising |= ADVERTISED_Autoneg;
1373 ecmd->autoneg = AUTONEG_ENABLE;
1374 } else
1375 ecmd->autoneg = AUTONEG_DISABLE;
1376
1377 ecmd->port = PORT_MII;
1378 ecmd->transceiver = XCVR_INTERNAL;
1379 ecmd->phy_address = tp->phys[0];
1380 ecmd->speed = tp->speed100 ? SPEED_100 : SPEED_10;
1381 ecmd->duplex = tp->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
1382 ecmd->maxtxpkt = TX_RING_SIZE / 2;
1383 ecmd->maxrxpkt = 0;
1384 return 0;
1385}
1386
1387static int xircom_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1388{
1389 struct xircom_private *tp = netdev_priv(dev);
1390 u16 autoneg, speed100, full_duplex;
1391
1392 autoneg = (ecmd->autoneg == AUTONEG_ENABLE);
1393 speed100 = (ecmd->speed == SPEED_100);
1394 full_duplex = (ecmd->duplex == DUPLEX_FULL);
1395
1396 tp->autoneg = autoneg;
1397 if (speed100 != tp->speed100 ||
1398 full_duplex != tp->full_duplex) {
1399 tp->speed100 = speed100;
1400 tp->full_duplex = full_duplex;
1401 /* change advertising bits */
1402 tp->advertising[0] &= ~(ADVERTISE_10HALF |
1403 ADVERTISE_10FULL |
1404 ADVERTISE_100HALF |
1405 ADVERTISE_100FULL |
1406 ADVERTISE_100BASE4);
1407 if (speed100) {
1408 if (full_duplex)
1409 tp->advertising[0] |= ADVERTISE_100FULL;
1410 else
1411 tp->advertising[0] |= ADVERTISE_100HALF;
1412 } else {
1413 if (full_duplex)
1414 tp->advertising[0] |= ADVERTISE_10FULL;
1415 else
1416 tp->advertising[0] |= ADVERTISE_10HALF;
1417 }
1418 }
1419 check_duplex(dev);
1420 return 0;
1421}
1422
1423static void xircom_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1424{
1425 struct xircom_private *tp = netdev_priv(dev);
1426 strcpy(info->driver, DRV_NAME);
1427 strcpy(info->version, DRV_VERSION);
1428 strcpy(info->bus_info, pci_name(tp->pdev));
1429}
1430
1431static const struct ethtool_ops ops = {
1432 .get_settings = xircom_get_settings,
1433 .set_settings = xircom_set_settings,
1434 .get_drvinfo = xircom_get_drvinfo,
1435};
1436
1437/* Provide ioctl() calls to examine the MII xcvr state. */
1438static int xircom_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1439{
1440 struct xircom_private *tp = netdev_priv(dev);
1441 u16 *data = (u16 *)&rq->ifr_ifru;
1442 int phy = tp->phys[0] & 0x1f;
1443 unsigned long flags;
1444
1445 switch(cmd) {
1446 /* Legacy mii-diag interface */
1447 case SIOCGMIIPHY: /* Get address of MII PHY in use. */
1448 if (tp->mii_cnt)
1449 data[0] = phy;
1450 else
1451 return -ENODEV;
1452 return 0;
1453 case SIOCGMIIREG: /* Read MII PHY register. */
1454 save_flags(flags);
1455 cli();
1456 data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
1457 restore_flags(flags);
1458 return 0;
1459 case SIOCSMIIREG: /* Write MII PHY register. */
1460 if (!capable(CAP_NET_ADMIN))
1461 return -EPERM;
1462 save_flags(flags);
1463 cli();
1464 if (data[0] == tp->phys[0]) {
1465 u16 value = data[2];
1466 switch (data[1]) {
1467 case 0:
1468 if (value & (BMCR_RESET | BMCR_ANENABLE))
1469 /* Autonegotiation. */
1470 tp->autoneg = 1;
1471 else {
1472 tp->full_duplex = (value & BMCR_FULLDPLX) ? 1 : 0;
1473 tp->autoneg = 0;
1474 }
1475 break;
1476 case 4:
1477 tp->advertising[0] = value;
1478 break;
1479 }
1480 check_duplex(dev);
1481 }
1482 mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1483 restore_flags(flags);
1484 return 0;
1485 default:
1486 return -EOPNOTSUPP;
1487 }
1488
1489 return -EOPNOTSUPP;
1490}
1491
1492/* Set or clear the multicast filter for this adaptor.
1493 Note that we only use exclusion around actually queueing the
1494 new frame, not around filling tp->setup_frame. This is non-deterministic
1495 when re-entered but still correct. */
1496static void set_rx_mode(struct net_device *dev)
1497{
1498 struct xircom_private *tp = netdev_priv(dev);
1499 struct dev_mc_list *mclist;
1500 long ioaddr = dev->base_addr;
1501 int csr6 = inl(ioaddr + CSR6);
1502 u16 *eaddrs, *setup_frm;
1503 u32 tx_flags;
1504 int i;
1505
1506 tp->csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1507 csr6 &= ~(AllMultiBit | PromiscBit | HashFilterBit);
1508 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1509 tp->csr6 |= PromiscBit;
1510 csr6 |= PromiscBit;
1511 goto out;
1512 }
1513
1514 if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
1515 /* Too many to filter well -- accept all multicasts. */
1516 tp->csr6 |= AllMultiBit;
1517 csr6 |= AllMultiBit;
1518 goto out;
1519 }
1520
1521 tx_flags = Tx1WholePkt | Tx1SetupPkt | PKT_SETUP_SZ;
1522
1523 /* Note that only the low-address shortword of setup_frame is valid! */
1524 setup_frm = tp->setup_frame;
1525 mclist = dev->mc_list;
1526
1527 /* Fill the first entry with our physical address. */
1528 eaddrs = (u16 *)dev->dev_addr;
1529 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1530 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1531 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1532
1533 if (dev->mc_count > 14) { /* Must use a multicast hash table. */
1534 u32 *hash_table = (u32 *)(tp->setup_frame + 4 * 12);
1535 u32 hash, hash2;
1536
1537 tx_flags |= Tx1HashSetup;
1538 tp->csr6 |= HashFilterBit;
1539 csr6 |= HashFilterBit;
1540
1541 /* Fill the unused 3 entries with the broadcast address.
1542 At least one entry *must* contain the broadcast address!!!*/
1543 for (i = 0; i < 3; i++) {
1544 *setup_frm = 0xffff; setup_frm += 2;
1545 *setup_frm = 0xffff; setup_frm += 2;
1546 *setup_frm = 0xffff; setup_frm += 2;
1547 }
1548
1549 /* Truly brain-damaged hash filter layout */
1550 /* XXX: not sure if I should take the last or the first 9 bits */
1551 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1552 u32 *hptr;
1553 hash = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
1554 if (hash < 384) {
1555 hash2 = hash + ((hash >> 4) << 4) +
1556 ((hash >> 5) << 5);
1557 } else {
1558 hash -= 384;
1559 hash2 = 64 + hash + (hash >> 4) * 80;
1560 }
1561 hptr = &hash_table[hash2 & ~0x1f];
1562 *hptr |= cpu_to_le32(1 << (hash2 & 0x1f));
1563 }
1564 } else {
1565 /* We have <= 14 mcast addresses so we can use Xircom's
1566 wonderful 16-address perfect filter. */
1567 for (i = 0; i < dev->mc_count; i++, mclist = mclist->next) {
1568 eaddrs = (u16 *)mclist->dmi_addr;
1569 *setup_frm = cpu_to_le16(eaddrs[0]); setup_frm += 2;
1570 *setup_frm = cpu_to_le16(eaddrs[1]); setup_frm += 2;
1571 *setup_frm = cpu_to_le16(eaddrs[2]); setup_frm += 2;
1572 }
1573 /* Fill the unused entries with the broadcast address.
1574 At least one entry *must* contain the broadcast address!!!*/
1575 for (; i < 15; i++) {
1576 *setup_frm = 0xffff; setup_frm += 2;
1577 *setup_frm = 0xffff; setup_frm += 2;
1578 *setup_frm = 0xffff; setup_frm += 2;
1579 }
1580 }
1581
1582 /* Now add this frame to the Tx list. */
1583 if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
1584 /* Same setup recently queued, we need not add it. */
1585 /* XXX: Huh? All it means is that the Tx list is full...*/
1586 } else {
1587 unsigned long flags;
1588 unsigned int entry;
1589 int dummy = -1;
1590
1591 save_flags(flags); cli();
1592 entry = tp->cur_tx++ % TX_RING_SIZE;
1593
1594 if (entry != 0) {
1595 /* Avoid a chip errata by prefixing a dummy entry. */
1596 tp->tx_skbuff[entry] = NULL;
1597 tp->tx_ring[entry].length =
1598 (entry == TX_RING_SIZE - 1) ? Tx1RingWrap : 0;
1599 tp->tx_ring[entry].buffer1 = 0;
1600 /* race with chip, set Tx0DescOwned later */
1601 dummy = entry;
1602 entry = tp->cur_tx++ % TX_RING_SIZE;
1603 }
1604
1605 tp->tx_skbuff[entry] = NULL;
1606 /* Put the setup frame on the Tx list. */
1607 if (entry == TX_RING_SIZE - 1)
1608 tx_flags |= Tx1RingWrap; /* Wrap ring. */
1609 tp->tx_ring[entry].length = tx_flags;
1610 tp->tx_ring[entry].buffer1 = virt_to_bus(tp->setup_frame);
1611 tp->tx_ring[entry].status = Tx0DescOwned;
1612 if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) {
1613 tp->tx_full = 1;
1614 netif_stop_queue (dev);
1615 }
1616 if (dummy >= 0)
1617 tp->tx_ring[dummy].status = Tx0DescOwned;
1618 restore_flags(flags);
1619 /* Trigger an immediate transmit demand. */
1620 outl(0, ioaddr + CSR1);
1621 }
1622
1623out:
1624 outl_CSR6(csr6, ioaddr);
1625}
1626
1627
1628static struct pci_device_id xircom_pci_table[] = {
1629 { 0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, X3201_3 },
1630 {0},
1631};
1632MODULE_DEVICE_TABLE(pci, xircom_pci_table);
1633
1634
1635#ifdef CONFIG_PM
1636static int xircom_suspend(struct pci_dev *pdev, pm_message_t state)
1637{
1638 struct net_device *dev = pci_get_drvdata(pdev);
1639 struct xircom_private *tp = netdev_priv(dev);
1640 printk(KERN_INFO "xircom_suspend(%s)\n", dev->name);
1641 if (tp->open)
1642 xircom_down(dev);
1643
1644 pci_save_state(pdev);
1645 pci_disable_device(pdev);
1646 pci_set_power_state(pdev, 3);
1647
1648 return 0;
1649}
1650
1651
1652static int xircom_resume(struct pci_dev *pdev)
1653{
1654 struct net_device *dev = pci_get_drvdata(pdev);
1655 struct xircom_private *tp = netdev_priv(dev);
1656 printk(KERN_INFO "xircom_resume(%s)\n", dev->name);
1657
1658 pci_set_power_state(pdev,0);
1659 pci_enable_device(pdev);
1660 pci_restore_state(pdev);
1661
1662 /* Bring the chip out of sleep mode.
1663 Caution: Snooze mode does not work with some boards! */
1664 if (xircom_tbl[tp->chip_id].flags & HAS_ACPI)
1665 pci_write_config_dword(tp->pdev, PCI_POWERMGMT, 0);
1666
1667 transceiver_voodoo(dev);
1668 if (xircom_tbl[tp->chip_id].flags & HAS_MII)
1669 check_duplex(dev);
1670
1671 if (tp->open)
1672 xircom_up(dev);
1673 return 0;
1674}
1675#endif /* CONFIG_PM */
1676
1677
1678static void __devexit xircom_remove_one(struct pci_dev *pdev)
1679{
1680 struct net_device *dev = pci_get_drvdata(pdev);
1681
1682 printk(KERN_INFO "xircom_remove_one(%s)\n", dev->name);
1683 unregister_netdev(dev);
1684 pci_release_regions(pdev);
1685 free_netdev(dev);
1686 pci_set_drvdata(pdev, NULL);
1687}
1688
1689
1690static struct pci_driver xircom_driver = {
1691 .name = DRV_NAME,
1692 .id_table = xircom_pci_table,
1693 .probe = xircom_init_one,
1694 .remove = __devexit_p(xircom_remove_one),
1695#ifdef CONFIG_PM
1696 .suspend = xircom_suspend,
1697 .resume = xircom_resume
1698#endif /* CONFIG_PM */
1699};
1700
1701
1702static int __init xircom_init(void)
1703{
1704/* when a module, this is printed whether or not devices are found in probe */
1705#ifdef MODULE
1706 printk(version);
1707#endif
1708 return pci_register_driver(&xircom_driver);
1709}
1710
1711
1712static void __exit xircom_exit(void)
1713{
1714 pci_unregister_driver(&xircom_driver);
1715}
1716
1717module_init(xircom_init)
1718module_exit(xircom_exit)
1719
1720/*
1721 * Local variables:
1722 * c-indent-level: 4
1723 * c-basic-offset: 4
1724 * tab-width: 4
1725 * End:
1726 */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 5b5d87585d91..d91856b19f6f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -62,7 +62,9 @@
62#include <linux/if_ether.h> 62#include <linux/if_ether.h>
63#include <linux/if_tun.h> 63#include <linux/if_tun.h>
64#include <linux/crc32.h> 64#include <linux/crc32.h>
65#include <linux/nsproxy.h>
65#include <net/net_namespace.h> 66#include <net/net_namespace.h>
67#include <net/netns/generic.h>
66 68
67#include <asm/system.h> 69#include <asm/system.h>
68#include <asm/uaccess.h> 70#include <asm/uaccess.h>
@@ -106,7 +108,11 @@ struct tun_struct {
106 108
107/* Network device part of the driver */ 109/* Network device part of the driver */
108 110
109static LIST_HEAD(tun_dev_list); 111static unsigned int tun_net_id;
112struct tun_net {
113 struct list_head dev_list;
114};
115
110static const struct ethtool_ops tun_ethtool_ops; 116static const struct ethtool_ops tun_ethtool_ops;
111 117
112/* Net device open. */ 118/* Net device open. */
@@ -471,14 +477,15 @@ static void tun_setup(struct net_device *dev)
471 dev->stop = tun_net_close; 477 dev->stop = tun_net_close;
472 dev->ethtool_ops = &tun_ethtool_ops; 478 dev->ethtool_ops = &tun_ethtool_ops;
473 dev->destructor = free_netdev; 479 dev->destructor = free_netdev;
480 dev->features |= NETIF_F_NETNS_LOCAL;
474} 481}
475 482
476static struct tun_struct *tun_get_by_name(const char *name) 483static struct tun_struct *tun_get_by_name(struct tun_net *tn, const char *name)
477{ 484{
478 struct tun_struct *tun; 485 struct tun_struct *tun;
479 486
480 ASSERT_RTNL(); 487 ASSERT_RTNL();
481 list_for_each_entry(tun, &tun_dev_list, list) { 488 list_for_each_entry(tun, &tn->dev_list, list) {
482 if (!strncmp(tun->dev->name, name, IFNAMSIZ)) 489 if (!strncmp(tun->dev->name, name, IFNAMSIZ))
483 return tun; 490 return tun;
484 } 491 }
@@ -486,13 +493,15 @@ static struct tun_struct *tun_get_by_name(const char *name)
486 return NULL; 493 return NULL;
487} 494}
488 495
489static int tun_set_iff(struct file *file, struct ifreq *ifr) 496static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
490{ 497{
498 struct tun_net *tn;
491 struct tun_struct *tun; 499 struct tun_struct *tun;
492 struct net_device *dev; 500 struct net_device *dev;
493 int err; 501 int err;
494 502
495 tun = tun_get_by_name(ifr->ifr_name); 503 tn = net_generic(net, tun_net_id);
504 tun = tun_get_by_name(tn, ifr->ifr_name);
496 if (tun) { 505 if (tun) {
497 if (tun->attached) 506 if (tun->attached)
498 return -EBUSY; 507 return -EBUSY;
@@ -505,7 +514,7 @@ static int tun_set_iff(struct file *file, struct ifreq *ifr)
505 !capable(CAP_NET_ADMIN)) 514 !capable(CAP_NET_ADMIN))
506 return -EPERM; 515 return -EPERM;
507 } 516 }
508 else if (__dev_get_by_name(&init_net, ifr->ifr_name)) 517 else if (__dev_get_by_name(net, ifr->ifr_name))
509 return -EINVAL; 518 return -EINVAL;
510 else { 519 else {
511 char *name; 520 char *name;
@@ -536,6 +545,7 @@ static int tun_set_iff(struct file *file, struct ifreq *ifr)
536 if (!dev) 545 if (!dev)
537 return -ENOMEM; 546 return -ENOMEM;
538 547
548 dev_net_set(dev, net);
539 tun = netdev_priv(dev); 549 tun = netdev_priv(dev);
540 tun->dev = dev; 550 tun->dev = dev;
541 tun->flags = flags; 551 tun->flags = flags;
@@ -558,7 +568,7 @@ static int tun_set_iff(struct file *file, struct ifreq *ifr)
558 if (err < 0) 568 if (err < 0)
559 goto err_free_dev; 569 goto err_free_dev;
560 570
561 list_add(&tun->list, &tun_dev_list); 571 list_add(&tun->list, &tn->dev_list);
562 } 572 }
563 573
564 DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name); 574 DBG(KERN_INFO "%s: tun_set_iff\n", tun->dev->name);
@@ -575,6 +585,7 @@ static int tun_set_iff(struct file *file, struct ifreq *ifr)
575 585
576 file->private_data = tun; 586 file->private_data = tun;
577 tun->attached = 1; 587 tun->attached = 1;
588 get_net(dev_net(tun->dev));
578 589
579 strcpy(ifr->ifr_name, tun->dev->name); 590 strcpy(ifr->ifr_name, tun->dev->name);
580 return 0; 591 return 0;
@@ -603,7 +614,7 @@ static int tun_chr_ioctl(struct inode *inode, struct file *file,
603 ifr.ifr_name[IFNAMSIZ-1] = '\0'; 614 ifr.ifr_name[IFNAMSIZ-1] = '\0';
604 615
605 rtnl_lock(); 616 rtnl_lock();
606 err = tun_set_iff(file, &ifr); 617 err = tun_set_iff(current->nsproxy->net_ns, file, &ifr);
607 rtnl_unlock(); 618 rtnl_unlock();
608 619
609 if (err) 620 if (err)
@@ -790,6 +801,7 @@ static int tun_chr_close(struct inode *inode, struct file *file)
790 /* Detach from net device */ 801 /* Detach from net device */
791 file->private_data = NULL; 802 file->private_data = NULL;
792 tun->attached = 0; 803 tun->attached = 0;
804 put_net(dev_net(tun->dev));
793 805
794 /* Drop read queue */ 806 /* Drop read queue */
795 skb_queue_purge(&tun->readq); 807 skb_queue_purge(&tun->readq);
@@ -909,32 +921,76 @@ static const struct ethtool_ops tun_ethtool_ops = {
909 .set_rx_csum = tun_set_rx_csum 921 .set_rx_csum = tun_set_rx_csum
910}; 922};
911 923
912static int __init tun_init(void) 924static int tun_init_net(struct net *net)
913{ 925{
914 int ret = 0; 926 struct tun_net *tn;
915 927
916 printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 928 tn = kmalloc(sizeof(*tn), GFP_KERNEL);
917 printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT); 929 if (tn == NULL)
930 return -ENOMEM;
918 931
919 ret = misc_register(&tun_miscdev); 932 INIT_LIST_HEAD(&tn->dev_list);
920 if (ret) 933
921 printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR); 934 if (net_assign_generic(net, tun_net_id, tn)) {
922 return ret; 935 kfree(tn);
936 return -ENOMEM;
937 }
938
939 return 0;
923} 940}
924 941
925static void tun_cleanup(void) 942static void tun_exit_net(struct net *net)
926{ 943{
944 struct tun_net *tn;
927 struct tun_struct *tun, *nxt; 945 struct tun_struct *tun, *nxt;
928 946
929 misc_deregister(&tun_miscdev); 947 tn = net_generic(net, tun_net_id);
930 948
931 rtnl_lock(); 949 rtnl_lock();
932 list_for_each_entry_safe(tun, nxt, &tun_dev_list, list) { 950 list_for_each_entry_safe(tun, nxt, &tn->dev_list, list) {
933 DBG(KERN_INFO "%s cleaned up\n", tun->dev->name); 951 DBG(KERN_INFO "%s cleaned up\n", tun->dev->name);
934 unregister_netdevice(tun->dev); 952 unregister_netdevice(tun->dev);
935 } 953 }
936 rtnl_unlock(); 954 rtnl_unlock();
937 955
956 kfree(tn);
957}
958
959static struct pernet_operations tun_net_ops = {
960 .init = tun_init_net,
961 .exit = tun_exit_net,
962};
963
964static int __init tun_init(void)
965{
966 int ret = 0;
967
968 printk(KERN_INFO "tun: %s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
969 printk(KERN_INFO "tun: %s\n", DRV_COPYRIGHT);
970
971 ret = register_pernet_gen_device(&tun_net_id, &tun_net_ops);
972 if (ret) {
973 printk(KERN_ERR "tun: Can't register pernet ops\n");
974 goto err_pernet;
975 }
976
977 ret = misc_register(&tun_miscdev);
978 if (ret) {
979 printk(KERN_ERR "tun: Can't register misc device %d\n", TUN_MINOR);
980 goto err_misc;
981 }
982 return 0;
983
984err_misc:
985 unregister_pernet_gen_device(tun_net_id, &tun_net_ops);
986err_pernet:
987 return ret;
988}
989
990static void tun_cleanup(void)
991{
992 misc_deregister(&tun_miscdev);
993 unregister_pernet_gen_device(tun_net_id, &tun_net_ops);
938} 994}
939 995
940module_init(tun_init); 996module_init(tun_init);
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 0ee4c168e4c0..29a4d650e8a8 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3954,7 +3954,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3954 if (err) 3954 if (err)
3955 return -1; 3955 return -1;
3956 3956
3957 ug_info->mdio_bus = res.start; 3957 snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "%x", res.start);
3958 } 3958 }
3959 3959
3960 /* get the phy interface type, or default to MII */ 3960 /* get the phy interface type, or default to MII */
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 4fb95b3af948..9f8b7580a3a4 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -1156,7 +1156,7 @@ struct ucc_geth_info {
1156 u16 pausePeriod; 1156 u16 pausePeriod;
1157 u16 extensionField; 1157 u16 extensionField;
1158 u8 phy_address; 1158 u8 phy_address;
1159 u32 mdio_bus; 1159 char mdio_bus[MII_BUS_ID_SIZE];
1160 u8 weightfactor[NUM_TX_QUEUES]; 1160 u8 weightfactor[NUM_TX_QUEUES];
1161 u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES]; 1161 u8 interruptcoalescingmaxvalue[NUM_RX_QUEUES];
1162 u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX]; 1162 u8 l2qt[UCC_GETH_VLAN_PRIORITY_MAX];
diff --git a/drivers/net/ucc_geth_mii.c b/drivers/net/ucc_geth_mii.c
index c69e654d539f..e4d3f330bac3 100644
--- a/drivers/net/ucc_geth_mii.c
+++ b/drivers/net/ucc_geth_mii.c
@@ -157,7 +157,7 @@ static int uec_mdio_probe(struct of_device *ofdev, const struct of_device_id *ma
157 if (err) 157 if (err)
158 goto reg_map_fail; 158 goto reg_map_fail;
159 159
160 new_bus->id = res.start; 160 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
161 161
162 new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL); 162 new_bus->irq = kmalloc(32 * sizeof(int), GFP_KERNEL);
163 163
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 01660f68943a..f7319d326912 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -155,7 +155,7 @@ static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
155 dm_write_async_helper(dev, reg, value, 0, NULL); 155 dm_write_async_helper(dev, reg, value, 0, NULL);
156} 156}
157 157
158static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, u16 *value) 158static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *value)
159{ 159{
160 int ret, i; 160 int ret, i;
161 161
@@ -194,7 +194,7 @@ static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, u16 *value)
194 return ret; 194 return ret;
195} 195}
196 196
197static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, u16 value) 197static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 value)
198{ 198{
199 int ret, i; 199 int ret, i;
200 200
@@ -249,7 +249,7 @@ static int dm9601_get_eeprom(struct net_device *net,
249 struct ethtool_eeprom *eeprom, u8 * data) 249 struct ethtool_eeprom *eeprom, u8 * data)
250{ 250{
251 struct usbnet *dev = netdev_priv(net); 251 struct usbnet *dev = netdev_priv(net);
252 u16 *ebuf = (u16 *) data; 252 __le16 *ebuf = (__le16 *) data;
253 int i; 253 int i;
254 254
255 /* access is 16bit */ 255 /* access is 16bit */
@@ -268,7 +268,7 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
268{ 268{
269 struct usbnet *dev = netdev_priv(netdev); 269 struct usbnet *dev = netdev_priv(netdev);
270 270
271 u16 res; 271 __le16 res;
272 272
273 if (phy_id) { 273 if (phy_id) {
274 devdbg(dev, "Only internal phy supported"); 274 devdbg(dev, "Only internal phy supported");
@@ -288,7 +288,7 @@ static void dm9601_mdio_write(struct net_device *netdev, int phy_id, int loc,
288 int val) 288 int val)
289{ 289{
290 struct usbnet *dev = netdev_priv(netdev); 290 struct usbnet *dev = netdev_priv(netdev);
291 u16 res = cpu_to_le16(val); 291 __le16 res = cpu_to_le16(val);
292 292
293 if (phy_id) { 293 if (phy_id) {
294 devdbg(dev, "Only internal phy supported"); 294 devdbg(dev, "Only internal phy supported");
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 369c731114b3..21a7785cb8b6 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -218,7 +218,7 @@ EXPORT_SYMBOL_GPL(rndis_command);
218 * ActiveSync 4.1 Windows driver. 218 * ActiveSync 4.1 Windows driver.
219 */ 219 */
220static int rndis_query(struct usbnet *dev, struct usb_interface *intf, 220static int rndis_query(struct usbnet *dev, struct usb_interface *intf,
221 void *buf, u32 oid, u32 in_len, 221 void *buf, __le32 oid, u32 in_len,
222 void **reply, int *reply_len) 222 void **reply, int *reply_len)
223{ 223{
224 int retval; 224 int retval;
@@ -283,7 +283,8 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
283 struct rndis_set_c *set_c; 283 struct rndis_set_c *set_c;
284 struct rndis_halt *halt; 284 struct rndis_halt *halt;
285 } u; 285 } u;
286 u32 tmp, phym_unspec, *phym; 286 u32 tmp, phym_unspec;
287 __le32 *phym;
287 int reply_len; 288 int reply_len;
288 unsigned char *bp; 289 unsigned char *bp;
289 290
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index e2ad98bee6e7..31cd817f33f9 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -375,7 +375,7 @@ static int veth_newlink(struct net_device *dev,
375 else 375 else
376 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); 376 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
377 377
378 peer = rtnl_create_link(dev->nd_net, ifname, &veth_link_ops, tbp); 378 peer = rtnl_create_link(dev_net(dev), ifname, &veth_link_ops, tbp);
379 if (IS_ERR(peer)) 379 if (IS_ERR(peer))
380 return PTR_ERR(peer); 380 return PTR_ERR(peer);
381 381
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index cc0addb5640c..ed1afaf683a4 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -3460,21 +3460,22 @@ static int velocity_resume(struct pci_dev *pdev)
3460static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr) 3460static int velocity_netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
3461{ 3461{
3462 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; 3462 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3463 struct net_device *dev = ifa->ifa_dev->dev;
3464 struct velocity_info *vptr;
3465 unsigned long flags;
3463 3466
3464 if (ifa) { 3467 if (dev_net(dev) != &init_net)
3465 struct net_device *dev = ifa->ifa_dev->dev; 3468 return NOTIFY_DONE;
3466 struct velocity_info *vptr;
3467 unsigned long flags;
3468 3469
3469 spin_lock_irqsave(&velocity_dev_list_lock, flags); 3470 spin_lock_irqsave(&velocity_dev_list_lock, flags);
3470 list_for_each_entry(vptr, &velocity_dev_list, list) { 3471 list_for_each_entry(vptr, &velocity_dev_list, list) {
3471 if (vptr->dev == dev) { 3472 if (vptr->dev == dev) {
3472 velocity_get_ip(vptr); 3473 velocity_get_ip(vptr);
3473 break; 3474 break;
3474 }
3475 } 3475 }
3476 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
3477 } 3476 }
3477 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
3478
3478 return NOTIFY_DONE; 3479 return NOTIFY_DONE;
3479} 3480}
3480 3481
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 1d706eae3052..45ddfc9763cc 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -90,6 +90,7 @@
90#include <linux/ioport.h> 90#include <linux/ioport.h>
91#include <linux/netdevice.h> 91#include <linux/netdevice.h>
92#include <linux/spinlock.h> 92#include <linux/spinlock.h>
93#include <linux/mutex.h>
93#include <linux/device.h> 94#include <linux/device.h>
94 95
95#undef COSA_SLOW_IO /* for testing purposes only */ 96#undef COSA_SLOW_IO /* for testing purposes only */
@@ -127,7 +128,8 @@ struct channel_data {
127 int (*tx_done)(struct channel_data *channel, int size); 128 int (*tx_done)(struct channel_data *channel, int size);
128 129
129 /* Character device parts */ 130 /* Character device parts */
130 struct semaphore rsem, wsem; 131 struct mutex rlock;
132 struct semaphore wsem;
131 char *rxdata; 133 char *rxdata;
132 int rxsize; 134 int rxsize;
133 wait_queue_head_t txwaitq, rxwaitq; 135 wait_queue_head_t txwaitq, rxwaitq;
@@ -807,7 +809,7 @@ static struct net_device_stats *cosa_net_stats(struct net_device *dev)
807 809
808static void chardev_channel_init(struct channel_data *chan) 810static void chardev_channel_init(struct channel_data *chan)
809{ 811{
810 init_MUTEX(&chan->rsem); 812 mutex_init(&chan->rlock);
811 init_MUTEX(&chan->wsem); 813 init_MUTEX(&chan->wsem);
812} 814}
813 815
@@ -825,12 +827,12 @@ static ssize_t cosa_read(struct file *file,
825 cosa->name, cosa->firmware_status); 827 cosa->name, cosa->firmware_status);
826 return -EPERM; 828 return -EPERM;
827 } 829 }
828 if (down_interruptible(&chan->rsem)) 830 if (mutex_lock_interruptible(&chan->rlock))
829 return -ERESTARTSYS; 831 return -ERESTARTSYS;
830 832
831 if ((chan->rxdata = kmalloc(COSA_MTU, GFP_DMA|GFP_KERNEL)) == NULL) { 833 if ((chan->rxdata = kmalloc(COSA_MTU, GFP_DMA|GFP_KERNEL)) == NULL) {
832 printk(KERN_INFO "%s: cosa_read() - OOM\n", cosa->name); 834 printk(KERN_INFO "%s: cosa_read() - OOM\n", cosa->name);
833 up(&chan->rsem); 835 mutex_unlock(&chan->rlock);
834 return -ENOMEM; 836 return -ENOMEM;
835 } 837 }
836 838
@@ -848,7 +850,7 @@ static ssize_t cosa_read(struct file *file,
848 remove_wait_queue(&chan->rxwaitq, &wait); 850 remove_wait_queue(&chan->rxwaitq, &wait);
849 current->state = TASK_RUNNING; 851 current->state = TASK_RUNNING;
850 spin_unlock_irqrestore(&cosa->lock, flags); 852 spin_unlock_irqrestore(&cosa->lock, flags);
851 up(&chan->rsem); 853 mutex_unlock(&chan->rlock);
852 return -ERESTARTSYS; 854 return -ERESTARTSYS;
853 } 855 }
854 } 856 }
@@ -857,7 +859,7 @@ static ssize_t cosa_read(struct file *file,
857 kbuf = chan->rxdata; 859 kbuf = chan->rxdata;
858 count = chan->rxsize; 860 count = chan->rxsize;
859 spin_unlock_irqrestore(&cosa->lock, flags); 861 spin_unlock_irqrestore(&cosa->lock, flags);
860 up(&chan->rsem); 862 mutex_unlock(&chan->rlock);
861 863
862 if (copy_to_user(buf, kbuf, count)) { 864 if (copy_to_user(buf, kbuf, count)) {
863 kfree(kbuf); 865 kfree(kbuf);
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 96b232446c0b..b14242768fad 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -517,7 +517,7 @@ static int dlci_dev_event(struct notifier_block *unused,
517{ 517{
518 struct net_device *dev = (struct net_device *) ptr; 518 struct net_device *dev = (struct net_device *) ptr;
519 519
520 if (dev->nd_net != &init_net) 520 if (dev_net(dev) != &init_net)
521 return NOTIFY_DONE; 521 return NOTIFY_DONE;
522 522
523 if (event == NETDEV_UNREGISTER) { 523 if (event == NETDEV_UNREGISTER) {
diff --git a/drivers/net/wan/hdlc.c b/drivers/net/wan/hdlc.c
index 39951d0c34d6..9a83c9d5b8cf 100644
--- a/drivers/net/wan/hdlc.c
+++ b/drivers/net/wan/hdlc.c
@@ -68,7 +68,7 @@ static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
68{ 68{
69 struct hdlc_device *hdlc = dev_to_hdlc(dev); 69 struct hdlc_device *hdlc = dev_to_hdlc(dev);
70 70
71 if (dev->nd_net != &init_net) { 71 if (dev_net(dev) != &init_net) {
72 kfree_skb(skb); 72 kfree_skb(skb);
73 return 0; 73 return 0;
74 } 74 }
@@ -105,7 +105,7 @@ static int hdlc_device_event(struct notifier_block *this, unsigned long event,
105 unsigned long flags; 105 unsigned long flags;
106 int on; 106 int on;
107 107
108 if (dev->nd_net != &init_net) 108 if (dev_net(dev) != &init_net)
109 return NOTIFY_DONE; 109 return NOTIFY_DONE;
110 110
111 if (dev->get_stats != hdlc_get_stats) 111 if (dev->get_stats != hdlc_get_stats)
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 824df3b5ea49..b5860b97a93e 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -91,7 +91,7 @@ static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packe
91 int len, err; 91 int len, err;
92 struct lapbethdev *lapbeth; 92 struct lapbethdev *lapbeth;
93 93
94 if (dev->nd_net != &init_net) 94 if (dev_net(dev) != &init_net)
95 goto drop; 95 goto drop;
96 96
97 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 97 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
@@ -393,7 +393,7 @@ static int lapbeth_device_event(struct notifier_block *this,
393 struct lapbethdev *lapbeth; 393 struct lapbethdev *lapbeth;
394 struct net_device *dev = ptr; 394 struct net_device *dev = ptr;
395 395
396 if (dev->nd_net != &init_net) 396 if (dev_net(dev) != &init_net)
397 return NOTIFY_DONE; 397 return NOTIFY_DONE;
398 398
399 if (!dev_is_ethdev(dev)) 399 if (!dev_is_ethdev(dev))
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index 61e24b7a45a3..29b4b94e4947 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -1444,7 +1444,7 @@ static void sppp_print_bytes (u_char *p, u16 len)
1444 1444
1445static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev) 1445static int sppp_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *p, struct net_device *orig_dev)
1446{ 1446{
1447 if (dev->nd_net != &init_net) { 1447 if (dev_net(dev) != &init_net) {
1448 kfree_skb(skb); 1448 kfree_skb(skb);
1449 return 0; 1449 return 0;
1450 } 1450 }
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 714a6ca30ad2..fdf5aa8b8429 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -146,12 +146,15 @@ config IPW2100
146 configure your card: 146 configure your card:
147 147
148 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>. 148 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
149
150 It is recommended that you compile this driver as a module (M)
151 rather than built-in (Y). This driver requires firmware at device
152 initialization time, and when built-in this typically happens
153 before the filesystem is accessible (hence firmware will be
154 unavailable and initialization will fail). If you do choose to build
155 this driver into your kernel image, you can avoid this problem by
156 including the firmware and a firmware loader in an initramfs.
149 157
150 If you want to compile the driver as a module ( = code which can be
151 inserted in and removed from the running kernel whenever you want),
152 say M here and read <file:Documentation/kbuild/modules.txt>.
153 The module will be called ipw2100.ko.
154
155config IPW2100_MONITOR 158config IPW2100_MONITOR
156 bool "Enable promiscuous mode" 159 bool "Enable promiscuous mode"
157 depends on IPW2100 160 depends on IPW2100
@@ -201,11 +204,14 @@ config IPW2200
201 configure your card: 204 configure your card:
202 205
203 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>. 206 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
204 207
205 If you want to compile the driver as a module ( = code which can be 208 It is recommended that you compile this driver as a module (M)
206 inserted in and removed from the running kernel whenever you want), 209 rather than built-in (Y). This driver requires firmware at device
207 say M here and read <file:Documentation/kbuild/modules.txt>. 210 initialization time, and when built-in this typically happens
208 The module will be called ipw2200.ko. 211 before the filesystem is accessible (hence firmware will be
212 unavailable and initialization will fail). If you do choose to build
213 this driver into your kernel image, you can avoid this problem by
214 including the firmware and a firmware loader in an initramfs.
209 215
210config IPW2200_MONITOR 216config IPW2200_MONITOR
211 bool "Enable promiscuous mode" 217 bool "Enable promiscuous mode"
@@ -265,7 +271,6 @@ config LIBERTAS
265 tristate "Marvell 8xxx Libertas WLAN driver support" 271 tristate "Marvell 8xxx Libertas WLAN driver support"
266 depends on WLAN_80211 272 depends on WLAN_80211
267 select WIRELESS_EXT 273 select WIRELESS_EXT
268 select IEEE80211
269 select FW_LOADER 274 select FW_LOADER
270 ---help--- 275 ---help---
271 A library for Marvell Libertas 8xxx devices. 276 A library for Marvell Libertas 8xxx devices.
@@ -278,7 +283,7 @@ config LIBERTAS_USB
278 283
279config LIBERTAS_CS 284config LIBERTAS_CS
280 tristate "Marvell Libertas 8385 CompactFlash 802.11b/g cards" 285 tristate "Marvell Libertas 8385 CompactFlash 802.11b/g cards"
281 depends on LIBERTAS && PCMCIA && EXPERIMENTAL 286 depends on LIBERTAS && PCMCIA
282 select FW_LOADER 287 select FW_LOADER
283 ---help--- 288 ---help---
284 A driver for Marvell Libertas 8385 CompactFlash devices. 289 A driver for Marvell Libertas 8385 CompactFlash devices.
@@ -668,90 +673,10 @@ config ADM8211
668 673
669 Thanks to Infineon-ADMtek for their support of this driver. 674 Thanks to Infineon-ADMtek for their support of this driver.
670 675
671config P54_COMMON 676source "drivers/net/wireless/p54/Kconfig"
672 tristate "Softmac Prism54 support" 677source "drivers/net/wireless/ath5k/Kconfig"
673 depends on MAC80211 && WLAN_80211 && FW_LOADER && EXPERIMENTAL
674 ---help---
675 This is common code for isl38xx based cards.
676 This module does nothing by itself - the USB/PCI frontends
677 also need to be enabled in order to support any devices.
678
679 These devices require softmac firmware which can be found at
680 http://prism54.org/
681
682 If you choose to build a module, it'll be called p54common.
683
684config P54_USB
685 tristate "Prism54 USB support"
686 depends on P54_COMMON && USB
687 select CRC32
688 ---help---
689 This driver is for USB isl38xx based wireless cards.
690 These are USB based adapters found in devices such as:
691
692 3COM 3CRWE254G72
693 SMC 2862W-G
694 Accton 802.11g WN4501 USB
695 Siemens Gigaset USB
696 Netgear WG121
697 Netgear WG111
698 Medion 40900, Roper Europe
699 Shuttle PN15, Airvast WM168g, IOGear GWU513
700 Linksys WUSB54G
701 Linksys WUSB54G Portable
702 DLink DWL-G120 Spinnaker
703 DLink DWL-G122
704 Belkin F5D7050 ver 1000
705 Cohiba Proto board
706 SMC 2862W-G version 2
707 U.S. Robotics U5 802.11g Adapter
708 FUJITSU E-5400 USB D1700
709 Sagem XG703A
710 DLink DWL-G120 Cohiba
711 Spinnaker Proto board
712 Linksys WUSB54AG
713 Inventel UR054G
714 Spinnaker DUT
715
716 These devices require softmac firmware which can be found at
717 http://prism54.org/
718
719 If you choose to build a module, it'll be called p54usb.
720
721config P54_PCI
722 tristate "Prism54 PCI support"
723 depends on P54_COMMON && PCI
724 ---help---
725 This driver is for PCI isl38xx based wireless cards.
726 This driver supports most devices that are supported by the
727 fullmac prism54 driver plus many devices which are not
728 supported by the fullmac driver/firmware.
729
730 This driver requires softmac firmware which can be found at
731 http://prism54.org/
732
733 If you choose to build a module, it'll be called p54pci.
734
735config ATH5K
736 tristate "Atheros 5xxx wireless cards support"
737 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
738 ---help---
739 This module adds support for wireless adapters based on
740 Atheros 5xxx chipset.
741
742 Currently the following chip versions are supported:
743
744 MAC: AR5211 AR5212
745 PHY: RF5111/2111 RF5112/2112 RF5413/2413
746
747 This driver uses the kernel's mac80211 subsystem.
748
749 If you choose to build a module, it'll be called ath5k. Say M if
750 unsure.
751
752source "drivers/net/wireless/iwlwifi/Kconfig" 678source "drivers/net/wireless/iwlwifi/Kconfig"
753source "drivers/net/wireless/hostap/Kconfig" 679source "drivers/net/wireless/hostap/Kconfig"
754source "drivers/net/wireless/bcm43xx/Kconfig"
755source "drivers/net/wireless/b43/Kconfig" 680source "drivers/net/wireless/b43/Kconfig"
756source "drivers/net/wireless/b43legacy/Kconfig" 681source "drivers/net/wireless/b43legacy/Kconfig"
757source "drivers/net/wireless/zd1211rw/Kconfig" 682source "drivers/net/wireless/zd1211rw/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 091dfe2e574e..70092191fc53 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -35,7 +35,6 @@ obj-$(CONFIG_PCMCIA_ATMEL) += atmel_cs.o
35obj-$(CONFIG_PRISM54) += prism54/ 35obj-$(CONFIG_PRISM54) += prism54/
36 36
37obj-$(CONFIG_HOSTAP) += hostap/ 37obj-$(CONFIG_HOSTAP) += hostap/
38obj-$(CONFIG_BCM43XX) += bcm43xx/
39obj-$(CONFIG_B43) += b43/ 38obj-$(CONFIG_B43) += b43/
40obj-$(CONFIG_B43LEGACY) += b43legacy/ 39obj-$(CONFIG_B43LEGACY) += b43legacy/
41obj-$(CONFIG_ZD1211RW) += zd1211rw/ 40obj-$(CONFIG_ZD1211RW) += zd1211rw/
@@ -61,8 +60,6 @@ obj-$(CONFIG_IWL3945) += iwlwifi/
61obj-$(CONFIG_IWL4965) += iwlwifi/ 60obj-$(CONFIG_IWL4965) += iwlwifi/
62obj-$(CONFIG_RT2X00) += rt2x00/ 61obj-$(CONFIG_RT2X00) += rt2x00/
63 62
64obj-$(CONFIG_P54_COMMON) += p54common.o 63obj-$(CONFIG_P54_COMMON) += p54/
65obj-$(CONFIG_P54_USB) += p54usb.o
66obj-$(CONFIG_P54_PCI) += p54pci.o
67 64
68obj-$(CONFIG_ATH5K) += ath5k/ 65obj-$(CONFIG_ATH5K) += ath5k/
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 79796186713e..5c0d2b082750 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -48,6 +48,32 @@ static struct pci_device_id adm8211_pci_id_table[] __devinitdata = {
48 { 0 } 48 { 0 }
49}; 49};
50 50
51static struct ieee80211_rate adm8211_rates[] = {
52 { .bitrate = 10, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
53 { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
54 { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
55 { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
56 { .bitrate = 220, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, /* XX ?? */
57};
58
59static const struct ieee80211_channel adm8211_channels[] = {
60 { .center_freq = 2412},
61 { .center_freq = 2417},
62 { .center_freq = 2422},
63 { .center_freq = 2427},
64 { .center_freq = 2432},
65 { .center_freq = 2437},
66 { .center_freq = 2442},
67 { .center_freq = 2447},
68 { .center_freq = 2452},
69 { .center_freq = 2457},
70 { .center_freq = 2462},
71 { .center_freq = 2467},
72 { .center_freq = 2472},
73 { .center_freq = 2484},
74};
75
76
51static void adm8211_eeprom_register_read(struct eeprom_93cx6 *eeprom) 77static void adm8211_eeprom_register_read(struct eeprom_93cx6 *eeprom)
52{ 78{
53 struct adm8211_priv *priv = eeprom->data; 79 struct adm8211_priv *priv = eeprom->data;
@@ -155,17 +181,17 @@ static int adm8211_read_eeprom(struct ieee80211_hw *dev)
155 printk(KERN_DEBUG "%s (adm8211): Channel range: %d - %d\n", 181 printk(KERN_DEBUG "%s (adm8211): Channel range: %d - %d\n",
156 pci_name(priv->pdev), (int)chan_range.min, (int)chan_range.max); 182 pci_name(priv->pdev), (int)chan_range.min, (int)chan_range.max);
157 183
158 priv->modes[0].num_channels = chan_range.max - chan_range.min + 1; 184 BUILD_BUG_ON(sizeof(priv->channels) != sizeof(adm8211_channels));
159 priv->modes[0].channels = priv->channels;
160 185
161 memcpy(priv->channels, adm8211_channels, sizeof(adm8211_channels)); 186 memcpy(priv->channels, adm8211_channels, sizeof(priv->channels));
187 priv->band.channels = priv->channels;
188 priv->band.n_channels = ARRAY_SIZE(adm8211_channels);
189 priv->band.bitrates = adm8211_rates;
190 priv->band.n_bitrates = ARRAY_SIZE(adm8211_rates);
162 191
163 for (i = 1; i <= ARRAY_SIZE(adm8211_channels); i++) 192 for (i = 1; i <= ARRAY_SIZE(adm8211_channels); i++)
164 if (i >= chan_range.min && i <= chan_range.max) 193 if (i < chan_range.min || i > chan_range.max)
165 priv->channels[i - 1].flag = 194 priv->channels[i - 1].flags |= IEEE80211_CHAN_DISABLED;
166 IEEE80211_CHAN_W_SCAN |
167 IEEE80211_CHAN_W_ACTIVE_SCAN |
168 IEEE80211_CHAN_W_IBSS;
169 195
170 switch (priv->eeprom->specific_bbptype) { 196 switch (priv->eeprom->specific_bbptype) {
171 case ADM8211_BBP_RFMD3000: 197 case ADM8211_BBP_RFMD3000:
@@ -347,7 +373,6 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
347 unsigned int pktlen; 373 unsigned int pktlen;
348 struct sk_buff *skb, *newskb; 374 struct sk_buff *skb, *newskb;
349 unsigned int limit = priv->rx_ring_size; 375 unsigned int limit = priv->rx_ring_size;
350 static const u8 rate_tbl[] = {10, 20, 55, 110, 220};
351 u8 rssi, rate; 376 u8 rssi, rate;
352 377
353 while (!(priv->rx_ring[entry].status & cpu_to_le32(RDES0_STATUS_OWN))) { 378 while (!(priv->rx_ring[entry].status & cpu_to_le32(RDES0_STATUS_OWN))) {
@@ -425,12 +450,10 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
425 else 450 else
426 rx_status.ssi = 100 - rssi; 451 rx_status.ssi = 100 - rssi;
427 452
428 if (rate <= 4) 453 rx_status.rate_idx = rate;
429 rx_status.rate = rate_tbl[rate];
430 454
431 rx_status.channel = priv->channel; 455 rx_status.freq = adm8211_channels[priv->channel - 1].center_freq;
432 rx_status.freq = adm8211_channels[priv->channel - 1].freq; 456 rx_status.band = IEEE80211_BAND_2GHZ;
433 rx_status.phymode = MODE_IEEE80211B;
434 457
435 ieee80211_rx_irqsafe(dev, skb, &rx_status); 458 ieee80211_rx_irqsafe(dev, skb, &rx_status);
436 } 459 }
@@ -465,9 +488,6 @@ do { \
465 if (stsr & ADM8211_STSR_TCI) 488 if (stsr & ADM8211_STSR_TCI)
466 adm8211_interrupt_tci(dev); 489 adm8211_interrupt_tci(dev);
467 490
468 /*ADM8211_INT(LinkOn);*/
469 /*ADM8211_INT(LinkOff);*/
470
471 ADM8211_INT(PCF); 491 ADM8211_INT(PCF);
472 ADM8211_INT(BCNTC); 492 ADM8211_INT(BCNTC);
473 ADM8211_INT(GPINT); 493 ADM8211_INT(GPINT);
@@ -477,7 +497,6 @@ do { \
477 ADM8211_INT(SQL); 497 ADM8211_INT(SQL);
478 ADM8211_INT(WEPTD); 498 ADM8211_INT(WEPTD);
479 ADM8211_INT(ATIME); 499 ADM8211_INT(ATIME);
480 /*ADM8211_INT(TBTT);*/
481 ADM8211_INT(TEIS); 500 ADM8211_INT(TEIS);
482 ADM8211_INT(FBE); 501 ADM8211_INT(FBE);
483 ADM8211_INT(REIS); 502 ADM8211_INT(REIS);
@@ -485,9 +504,6 @@ do { \
485 ADM8211_INT(RPS); 504 ADM8211_INT(RPS);
486 ADM8211_INT(RDU); 505 ADM8211_INT(RDU);
487 ADM8211_INT(TUF); 506 ADM8211_INT(TUF);
488 /*ADM8211_INT(TRT);*/
489 /*ADM8211_INT(TLT);*/
490 /*ADM8211_INT(TDU);*/
491 ADM8211_INT(TPS); 507 ADM8211_INT(TPS);
492 508
493 return IRQ_HANDLED; 509 return IRQ_HANDLED;
@@ -1054,7 +1070,7 @@ static int adm8211_set_rate(struct ieee80211_hw *dev)
1054 if (priv->pdev->revision != ADM8211_REV_BA) { 1070 if (priv->pdev->revision != ADM8211_REV_BA) {
1055 rate_buf[0] = ARRAY_SIZE(adm8211_rates); 1071 rate_buf[0] = ARRAY_SIZE(adm8211_rates);
1056 for (i = 0; i < ARRAY_SIZE(adm8211_rates); i++) 1072 for (i = 0; i < ARRAY_SIZE(adm8211_rates); i++)
1057 rate_buf[i + 1] = (adm8211_rates[i].rate / 5) | 0x80; 1073 rate_buf[i + 1] = (adm8211_rates[i].bitrate / 5) | 0x80;
1058 } else { 1074 } else {
1059 /* workaround for rev BA specific bug */ 1075 /* workaround for rev BA specific bug */
1060 rate_buf[0] = 0x04; 1076 rate_buf[0] = 0x04;
@@ -1086,7 +1102,7 @@ static void adm8211_hw_init(struct ieee80211_hw *dev)
1086 u32 reg; 1102 u32 reg;
1087 u8 cline; 1103 u8 cline;
1088 1104
1089 reg = le32_to_cpu(ADM8211_CSR_READ(PAR)); 1105 reg = ADM8211_CSR_READ(PAR);
1090 reg |= ADM8211_PAR_MRLE | ADM8211_PAR_MRME; 1106 reg |= ADM8211_PAR_MRLE | ADM8211_PAR_MRME;
1091 reg &= ~(ADM8211_PAR_BAR | ADM8211_PAR_CAL); 1107 reg &= ~(ADM8211_PAR_BAR | ADM8211_PAR_CAL);
1092 1108
@@ -1303,9 +1319,10 @@ static int adm8211_set_ssid(struct ieee80211_hw *dev, u8 *ssid, size_t ssid_len)
1303static int adm8211_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf) 1319static int adm8211_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
1304{ 1320{
1305 struct adm8211_priv *priv = dev->priv; 1321 struct adm8211_priv *priv = dev->priv;
1322 int channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
1306 1323
1307 if (conf->channel != priv->channel) { 1324 if (channel != priv->channel) {
1308 priv->channel = conf->channel; 1325 priv->channel = channel;
1309 adm8211_rf_set_channel(dev, priv->channel); 1326 adm8211_rf_set_channel(dev, priv->channel);
1310 } 1327 }
1311 1328
@@ -1678,13 +1695,9 @@ static int adm8211_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
1678 int plcp, dur, len, plcp_signal, short_preamble; 1695 int plcp, dur, len, plcp_signal, short_preamble;
1679 struct ieee80211_hdr *hdr; 1696 struct ieee80211_hdr *hdr;
1680 1697
1681 if (control->tx_rate < 0) { 1698 short_preamble = !!(control->tx_rate->flags &
1682 short_preamble = 1; 1699 IEEE80211_TXCTL_SHORT_PREAMBLE);
1683 plcp_signal = -control->tx_rate; 1700 plcp_signal = control->tx_rate->bitrate;
1684 } else {
1685 short_preamble = 0;
1686 plcp_signal = control->tx_rate;
1687 }
1688 1701
1689 hdr = (struct ieee80211_hdr *)skb->data; 1702 hdr = (struct ieee80211_hdr *)skb->data;
1690 fc = le16_to_cpu(hdr->frame_control) & ~IEEE80211_FCTL_PROTECTED; 1703 fc = le16_to_cpu(hdr->frame_control) & ~IEEE80211_FCTL_PROTECTED;
@@ -1880,18 +1893,11 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
1880 SET_IEEE80211_PERM_ADDR(dev, perm_addr); 1893 SET_IEEE80211_PERM_ADDR(dev, perm_addr);
1881 1894
1882 dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr); 1895 dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr);
1883 dev->flags = IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED; 1896 /* dev->flags = IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */
1884 /* IEEE80211_HW_RX_INCLUDES_FCS in promisc mode */
1885 1897
1886 dev->channel_change_time = 1000; 1898 dev->channel_change_time = 1000;
1887 dev->max_rssi = 100; /* FIXME: find better value */ 1899 dev->max_rssi = 100; /* FIXME: find better value */
1888 1900
1889 priv->modes[0].mode = MODE_IEEE80211B;
1890 /* channel info filled in by adm8211_read_eeprom */
1891 memcpy(priv->rates, adm8211_rates, sizeof(adm8211_rates));
1892 priv->modes[0].num_rates = ARRAY_SIZE(adm8211_rates);
1893 priv->modes[0].rates = priv->rates;
1894
1895 dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */ 1901 dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */
1896 1902
1897 priv->retry_limit = 3; 1903 priv->retry_limit = 3;
@@ -1917,14 +1923,9 @@ static int __devinit adm8211_probe(struct pci_dev *pdev,
1917 goto err_free_desc; 1923 goto err_free_desc;
1918 } 1924 }
1919 1925
1920 priv->channel = priv->modes[0].channels[0].chan; 1926 priv->channel = 1;
1921 1927
1922 err = ieee80211_register_hwmode(dev, &priv->modes[0]); 1928 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
1923 if (err) {
1924 printk(KERN_ERR "%s (adm8211): Can't register hwmode\n",
1925 pci_name(pdev));
1926 goto err_free_desc;
1927 }
1928 1929
1929 err = ieee80211_register_hw(dev); 1930 err = ieee80211_register_hw(dev);
1930 if (err) { 1931 if (err) {
diff --git a/drivers/net/wireless/adm8211.h b/drivers/net/wireless/adm8211.h
index ef326fed42e4..8d7c564b3b04 100644
--- a/drivers/net/wireless/adm8211.h
+++ b/drivers/net/wireless/adm8211.h
@@ -534,61 +534,6 @@ struct adm8211_eeprom {
534 u8 cis_data[0]; /* 0x80, 384 bytes */ 534 u8 cis_data[0]; /* 0x80, 384 bytes */
535} __attribute__ ((packed)); 535} __attribute__ ((packed));
536 536
537static const struct ieee80211_rate adm8211_rates[] = {
538 { .rate = 10,
539 .val = 10,
540 .val2 = -10,
541 .flags = IEEE80211_RATE_CCK_2 },
542 { .rate = 20,
543 .val = 20,
544 .val2 = -20,
545 .flags = IEEE80211_RATE_CCK_2 },
546 { .rate = 55,
547 .val = 55,
548 .val2 = -55,
549 .flags = IEEE80211_RATE_CCK_2 },
550 { .rate = 110,
551 .val = 110,
552 .val2 = -110,
553 .flags = IEEE80211_RATE_CCK_2 }
554};
555
556struct ieee80211_chan_range {
557 u8 min;
558 u8 max;
559};
560
561static const struct ieee80211_channel adm8211_channels[] = {
562 { .chan = 1,
563 .freq = 2412},
564 { .chan = 2,
565 .freq = 2417},
566 { .chan = 3,
567 .freq = 2422},
568 { .chan = 4,
569 .freq = 2427},
570 { .chan = 5,
571 .freq = 2432},
572 { .chan = 6,
573 .freq = 2437},
574 { .chan = 7,
575 .freq = 2442},
576 { .chan = 8,
577 .freq = 2447},
578 { .chan = 9,
579 .freq = 2452},
580 { .chan = 10,
581 .freq = 2457},
582 { .chan = 11,
583 .freq = 2462},
584 { .chan = 12,
585 .freq = 2467},
586 { .chan = 13,
587 .freq = 2472},
588 { .chan = 14,
589 .freq = 2484},
590};
591
592struct adm8211_priv { 537struct adm8211_priv {
593 struct pci_dev *pdev; 538 struct pci_dev *pdev;
594 spinlock_t lock; 539 spinlock_t lock;
@@ -603,9 +548,8 @@ struct adm8211_priv {
603 unsigned int cur_tx, dirty_tx, cur_rx; 548 unsigned int cur_tx, dirty_tx, cur_rx;
604 549
605 struct ieee80211_low_level_stats stats; 550 struct ieee80211_low_level_stats stats;
606 struct ieee80211_hw_mode modes[1]; 551 struct ieee80211_supported_band band;
607 struct ieee80211_channel channels[ARRAY_SIZE(adm8211_channels)]; 552 struct ieee80211_channel channels[14];
608 struct ieee80211_rate rates[ARRAY_SIZE(adm8211_rates)];
609 int mode; 553 int mode;
610 554
611 int channel; 555 int channel;
@@ -643,6 +587,11 @@ struct adm8211_priv {
643 } transceiver_type; 587 } transceiver_type;
644}; 588};
645 589
590struct ieee80211_chan_range {
591 u8 min;
592 u8 max;
593};
594
646static const struct ieee80211_chan_range cranges[] = { 595static const struct ieee80211_chan_range cranges[] = {
647 {1, 11}, /* FCC */ 596 {1, 11}, /* FCC */
648 {1, 11}, /* IC */ 597 {1, 11}, /* IC */
diff --git a/drivers/net/wireless/ath5k/Kconfig b/drivers/net/wireless/ath5k/Kconfig
new file mode 100644
index 000000000000..f1f2aea2eab4
--- /dev/null
+++ b/drivers/net/wireless/ath5k/Kconfig
@@ -0,0 +1,37 @@
1config ATH5K
2 tristate "Atheros 5xxx wireless cards support"
3 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
4 ---help---
5 This module adds support for wireless adapters based on
6 Atheros 5xxx chipset.
7
8 Currently the following chip versions are supported:
9
10 MAC: AR5211 AR5212
11 PHY: RF5111/2111 RF5112/2112 RF5413/2413
12
13 This driver uses the kernel's mac80211 subsystem.
14
15 If you choose to build a module, it'll be called ath5k. Say M if
16 unsure.
17
18config ATH5K_DEBUG
19 bool "Atheros 5xxx debugging"
20 depends on ATH5K
21 ---help---
22 Atheros 5xxx debugging messages.
23
24 Say Y, if and you will get debug options for ath5k.
25 To use this, you need to mount debugfs:
26
27 mkdir /debug/
28 mount -t debugfs debug /debug/
29
30 You will get access to files under:
31 /debug/ath5k/phy0/
32
33 To enable debug, pass the debug level to the debug module
34 parameter. For example:
35
36 modprobe ath5k debug=0x00000400
37
diff --git a/drivers/net/wireless/ath5k/Makefile b/drivers/net/wireless/ath5k/Makefile
index 321641f99e13..564ecd0c5d4b 100644
--- a/drivers/net/wireless/ath5k/Makefile
+++ b/drivers/net/wireless/ath5k/Makefile
@@ -1,2 +1,6 @@
1ath5k-objs = base.o hw.o regdom.o initvals.o phy.o debug.o 1ath5k-y += base.o
2obj-$(CONFIG_ATH5K) += ath5k.o 2ath5k-y += hw.o
3ath5k-y += initvals.o
4ath5k-y += phy.o
5ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
6obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath5k/ath5k.h b/drivers/net/wireless/ath5k/ath5k.h
index 69dea3392612..ba35c30d203c 100644
--- a/drivers/net/wireless/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath5k/ath5k.h
@@ -30,7 +30,6 @@
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31 31
32#include "hw.h" 32#include "hw.h"
33#include "regdom.h"
34 33
35/* PCI IDs */ 34/* PCI IDs */
36#define PCI_DEVICE_ID_ATHEROS_AR5210 0x0007 /* AR5210 */ 35#define PCI_DEVICE_ID_ATHEROS_AR5210 0x0007 /* AR5210 */
@@ -141,7 +140,9 @@ enum ath5k_radio {
141 AR5K_RF5110 = 0, 140 AR5K_RF5110 = 0,
142 AR5K_RF5111 = 1, 141 AR5K_RF5111 = 1,
143 AR5K_RF5112 = 2, 142 AR5K_RF5112 = 2,
144 AR5K_RF5413 = 3, 143 AR5K_RF2413 = 3,
144 AR5K_RF5413 = 4,
145 AR5K_RF2425 = 5,
145}; 146};
146 147
147/* 148/*
@@ -169,12 +170,15 @@ struct ath5k_srev_name {
169#define AR5K_SREV_VER_AR5212 0x50 170#define AR5K_SREV_VER_AR5212 0x50
170#define AR5K_SREV_VER_AR5213 0x55 171#define AR5K_SREV_VER_AR5213 0x55
171#define AR5K_SREV_VER_AR5213A 0x59 172#define AR5K_SREV_VER_AR5213A 0x59
172#define AR5K_SREV_VER_AR2424 0xa0 173#define AR5K_SREV_VER_AR2413 0x78
173#define AR5K_SREV_VER_AR5424 0xa3 174#define AR5K_SREV_VER_AR2414 0x79
175#define AR5K_SREV_VER_AR2424 0xa0 /* PCI-E */
176#define AR5K_SREV_VER_AR5424 0xa3 /* PCI-E */
174#define AR5K_SREV_VER_AR5413 0xa4 177#define AR5K_SREV_VER_AR5413 0xa4
175#define AR5K_SREV_VER_AR5414 0xa5 178#define AR5K_SREV_VER_AR5414 0xa5
176#define AR5K_SREV_VER_AR5416 0xc0 /* ? */ 179#define AR5K_SREV_VER_AR5416 0xc0 /* PCI-E */
177#define AR5K_SREV_VER_AR5418 0xca 180#define AR5K_SREV_VER_AR5418 0xca /* PCI-E */
181#define AR5K_SREV_VER_AR2425 0xe2 /* PCI-E */
178 182
179#define AR5K_SREV_RAD_5110 0x00 183#define AR5K_SREV_RAD_5110 0x00
180#define AR5K_SREV_RAD_5111 0x10 184#define AR5K_SREV_RAD_5111 0x10
@@ -184,8 +188,9 @@ struct ath5k_srev_name {
184#define AR5K_SREV_RAD_5112A 0x35 188#define AR5K_SREV_RAD_5112A 0x35
185#define AR5K_SREV_RAD_2112 0x40 189#define AR5K_SREV_RAD_2112 0x40
186#define AR5K_SREV_RAD_2112A 0x45 190#define AR5K_SREV_RAD_2112A 0x45
191#define AR5K_SREV_RAD_SC0 0x56 /* Found on 2413/2414 */
187#define AR5K_SREV_RAD_SC1 0x63 /* Found on 5413/5414 */ 192#define AR5K_SREV_RAD_SC1 0x63 /* Found on 5413/5414 */
188#define AR5K_SREV_RAD_SC2 0xa2 /* Found on 2424/5424 */ 193#define AR5K_SREV_RAD_SC2 0xa2 /* Found on 2424-5/5424 */
189#define AR5K_SREV_RAD_5133 0xc0 /* MIMO found on 5418 */ 194#define AR5K_SREV_RAD_5133 0xc0 /* MIMO found on 5418 */
190 195
191/* IEEE defs */ 196/* IEEE defs */
@@ -251,26 +256,31 @@ struct ath5k_srev_name {
251 */ 256 */
252#define MODULATION_TURBO 0x00000080 257#define MODULATION_TURBO 0x00000080
253 258
254enum ath5k_vendor_mode { 259enum ath5k_driver_mode {
255 MODE_ATHEROS_TURBO = NUM_IEEE80211_MODES+1, 260 AR5K_MODE_11A = 0,
256 MODE_ATHEROS_TURBOG 261 AR5K_MODE_11A_TURBO = 1,
262 AR5K_MODE_11B = 2,
263 AR5K_MODE_11G = 3,
264 AR5K_MODE_11G_TURBO = 4,
265 AR5K_MODE_XR = 0,
266 AR5K_MODE_MAX = 5
257}; 267};
258 268
259/* Number of supported mac80211 enum ieee80211_phymode modes by this driver */
260#define NUM_DRIVER_MODES 3
261
262/* adding this flag to rate_code enables short preamble, see ar5212_reg.h */ 269/* adding this flag to rate_code enables short preamble, see ar5212_reg.h */
263#define AR5K_SET_SHORT_PREAMBLE 0x04 270#define AR5K_SET_SHORT_PREAMBLE 0x04
264 271
265#define HAS_SHPREAMBLE(_ix) (rt->rates[_ix].modulation == IEEE80211_RATE_CCK_2) 272#define HAS_SHPREAMBLE(_ix) \
266#define SHPREAMBLE_FLAG(_ix) (HAS_SHPREAMBLE(_ix) ? AR5K_SET_SHORT_PREAMBLE : 0) 273 (rt->rates[_ix].modulation == IEEE80211_RATE_SHORT_PREAMBLE)
274#define SHPREAMBLE_FLAG(_ix) \
275 (HAS_SHPREAMBLE(_ix) ? AR5K_SET_SHORT_PREAMBLE : 0)
276
267 277
268/****************\ 278/****************\
269 TX DEFINITIONS 279 TX DEFINITIONS
270\****************/ 280\****************/
271 281
272/* 282/*
273 * Tx Descriptor 283 * TX Status
274 */ 284 */
275struct ath5k_tx_status { 285struct ath5k_tx_status {
276 u16 ts_seqnum; 286 u16 ts_seqnum;
@@ -418,7 +428,7 @@ enum ath5k_dmasize {
418\****************/ 428\****************/
419 429
420/* 430/*
421 * Rx Descriptor 431 * RX Status
422 */ 432 */
423struct ath5k_rx_status { 433struct ath5k_rx_status {
424 u16 rs_datalen; 434 u16 rs_datalen;
@@ -440,16 +450,6 @@ struct ath5k_rx_status {
440#define AR5K_RXKEYIX_INVALID ((u8) - 1) 450#define AR5K_RXKEYIX_INVALID ((u8) - 1)
441#define AR5K_TXKEYIX_INVALID ((u32) - 1) 451#define AR5K_TXKEYIX_INVALID ((u32) - 1)
442 452
443struct ath5k_mib_stats {
444 u32 ackrcv_bad;
445 u32 rts_bad;
446 u32 rts_good;
447 u32 fcs_bad;
448 u32 beacons;
449};
450
451
452
453 453
454/**************************\ 454/**************************\
455 BEACON TIMERS DEFINITIONS 455 BEACON TIMERS DEFINITIONS
@@ -492,29 +492,23 @@ struct ath5k_beacon_state {
492#define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10) 492#define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10)
493 493
494 494
495
496/********************\ 495/********************\
497 COMMON DEFINITIONS 496 COMMON DEFINITIONS
498\********************/ 497\********************/
499 498
500/* 499/*
501 * Atheros descriptor 500 * Atheros hardware descriptor
501 * This is read and written to by the hardware
502 */ 502 */
503struct ath5k_desc { 503struct ath5k_desc {
504 u32 ds_link; 504 u32 ds_link; /* physical address of the next descriptor */
505 u32 ds_data; 505 u32 ds_data; /* physical address of data buffer (skb) */
506 u32 ds_ctl0;
507 u32 ds_ctl1;
508 u32 ds_hw[4];
509 506
510 union { 507 union {
511 struct ath5k_rx_status rx; 508 struct ath5k_hw_5210_tx_desc ds_tx5210;
512 struct ath5k_tx_status tx; 509 struct ath5k_hw_5212_tx_desc ds_tx5212;
513 } ds_us; 510 struct ath5k_hw_all_rx_desc ds_rx;
514 511 } ud;
515#define ds_rxstat ds_us.rx
516#define ds_txstat ds_us.tx
517
518} __packed; 512} __packed;
519 513
520#define AR5K_RXDESC_INTREQ 0x0020 514#define AR5K_RXDESC_INTREQ 0x0020
@@ -560,8 +554,8 @@ struct ath5k_desc {
560 * Used internaly in OpenHAL (ar5211.c/ar5212.c 554 * Used internaly in OpenHAL (ar5211.c/ar5212.c
561 * for reset_tx_queue). Also see struct struct ieee80211_channel. 555 * for reset_tx_queue). Also see struct struct ieee80211_channel.
562 */ 556 */
563#define IS_CHAN_XR(_c) ((_c.val & CHANNEL_XR) != 0) 557#define IS_CHAN_XR(_c) ((_c.hw_value & CHANNEL_XR) != 0)
564#define IS_CHAN_B(_c) ((_c.val & CHANNEL_B) != 0) 558#define IS_CHAN_B(_c) ((_c.hw_value & CHANNEL_B) != 0)
565 559
566/* 560/*
567 * The following structure will be used to map 2GHz channels to 561 * The following structure will be used to map 2GHz channels to
@@ -584,7 +578,7 @@ struct ath5k_athchan_2ghz {
584 578
585/** 579/**
586 * struct ath5k_rate - rate structure 580 * struct ath5k_rate - rate structure
587 * @valid: is this a valid rate for the current mode 581 * @valid: is this a valid rate for rate control (remove)
588 * @modulation: respective mac80211 modulation 582 * @modulation: respective mac80211 modulation
589 * @rate_kbps: rate in kbit/s 583 * @rate_kbps: rate in kbit/s
590 * @rate_code: hardware rate value, used in &struct ath5k_desc, on RX on 584 * @rate_code: hardware rate value, used in &struct ath5k_desc, on RX on
@@ -643,47 +637,48 @@ struct ath5k_rate_table {
643 637
644/* 638/*
645 * Rate tables... 639 * Rate tables...
640 * TODO: CLEAN THIS !!!
646 */ 641 */
647#define AR5K_RATES_11A { 8, { \ 642#define AR5K_RATES_11A { 8, { \
648 255, 255, 255, 255, 255, 255, 255, 255, 6, 4, 2, 0, \ 643 255, 255, 255, 255, 255, 255, 255, 255, 6, 4, 2, 0, \
649 7, 5, 3, 1, 255, 255, 255, 255, 255, 255, 255, 255, \ 644 7, 5, 3, 1, 255, 255, 255, 255, 255, 255, 255, 255, \
650 255, 255, 255, 255, 255, 255, 255, 255 }, { \ 645 255, 255, 255, 255, 255, 255, 255, 255 }, { \
651 { 1, IEEE80211_RATE_OFDM, 6000, 11, 140, 0 }, \ 646 { 1, 0, 6000, 11, 140, 0 }, \
652 { 1, IEEE80211_RATE_OFDM, 9000, 15, 18, 0 }, \ 647 { 1, 0, 9000, 15, 18, 0 }, \
653 { 1, IEEE80211_RATE_OFDM, 12000, 10, 152, 2 }, \ 648 { 1, 0, 12000, 10, 152, 2 }, \
654 { 1, IEEE80211_RATE_OFDM, 18000, 14, 36, 2 }, \ 649 { 1, 0, 18000, 14, 36, 2 }, \
655 { 1, IEEE80211_RATE_OFDM, 24000, 9, 176, 4 }, \ 650 { 1, 0, 24000, 9, 176, 4 }, \
656 { 1, IEEE80211_RATE_OFDM, 36000, 13, 72, 4 }, \ 651 { 1, 0, 36000, 13, 72, 4 }, \
657 { 1, IEEE80211_RATE_OFDM, 48000, 8, 96, 4 }, \ 652 { 1, 0, 48000, 8, 96, 4 }, \
658 { 1, IEEE80211_RATE_OFDM, 54000, 12, 108, 4 } } \ 653 { 1, 0, 54000, 12, 108, 4 } } \
659} 654}
660 655
661#define AR5K_RATES_11B { 4, { \ 656#define AR5K_RATES_11B { 4, { \
662 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, \ 657 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, \
663 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, \ 658 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, \
664 3, 2, 1, 0, 255, 255, 255, 255 }, { \ 659 3, 2, 1, 0, 255, 255, 255, 255 }, { \
665 { 1, IEEE80211_RATE_CCK, 1000, 27, 130, 0 }, \ 660 { 1, 0, 1000, 27, 130, 0 }, \
666 { 1, IEEE80211_RATE_CCK_2, 2000, 26, 132, 1 }, \ 661 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 2000, 26, 132, 1 }, \
667 { 1, IEEE80211_RATE_CCK_2, 5500, 25, 139, 1 }, \ 662 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 5500, 25, 139, 1 }, \
668 { 1, IEEE80211_RATE_CCK_2, 11000, 24, 150, 1 } } \ 663 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 11000, 24, 150, 1 } } \
669} 664}
670 665
671#define AR5K_RATES_11G { 12, { \ 666#define AR5K_RATES_11G { 12, { \
672 255, 255, 255, 255, 255, 255, 255, 255, 10, 8, 6, 4, \ 667 255, 255, 255, 255, 255, 255, 255, 255, 10, 8, 6, 4, \
673 11, 9, 7, 5, 255, 255, 255, 255, 255, 255, 255, 255, \ 668 11, 9, 7, 5, 255, 255, 255, 255, 255, 255, 255, 255, \
674 3, 2, 1, 0, 255, 255, 255, 255 }, { \ 669 3, 2, 1, 0, 255, 255, 255, 255 }, { \
675 { 1, IEEE80211_RATE_CCK, 1000, 27, 2, 0 }, \ 670 { 1, 0, 1000, 27, 2, 0 }, \
676 { 1, IEEE80211_RATE_CCK_2, 2000, 26, 4, 1 }, \ 671 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 2000, 26, 4, 1 }, \
677 { 1, IEEE80211_RATE_CCK_2, 5500, 25, 11, 1 }, \ 672 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 5500, 25, 11, 1 }, \
678 { 1, IEEE80211_RATE_CCK_2, 11000, 24, 22, 1 }, \ 673 { 1, IEEE80211_RATE_SHORT_PREAMBLE, 11000, 24, 22, 1 }, \
679 { 0, IEEE80211_RATE_OFDM, 6000, 11, 12, 4 }, \ 674 { 0, 0, 6000, 11, 12, 4 }, \
680 { 0, IEEE80211_RATE_OFDM, 9000, 15, 18, 4 }, \ 675 { 0, 0, 9000, 15, 18, 4 }, \
681 { 1, IEEE80211_RATE_OFDM, 12000, 10, 24, 6 }, \ 676 { 1, 0, 12000, 10, 24, 6 }, \
682 { 1, IEEE80211_RATE_OFDM, 18000, 14, 36, 6 }, \ 677 { 1, 0, 18000, 14, 36, 6 }, \
683 { 1, IEEE80211_RATE_OFDM, 24000, 9, 48, 8 }, \ 678 { 1, 0, 24000, 9, 48, 8 }, \
684 { 1, IEEE80211_RATE_OFDM, 36000, 13, 72, 8 }, \ 679 { 1, 0, 36000, 13, 72, 8 }, \
685 { 1, IEEE80211_RATE_OFDM, 48000, 8, 96, 8 }, \ 680 { 1, 0, 48000, 8, 96, 8 }, \
686 { 1, IEEE80211_RATE_OFDM, 54000, 12, 108, 8 } } \ 681 { 1, 0, 54000, 12, 108, 8 } } \
687} 682}
688 683
689#define AR5K_RATES_TURBO { 8, { \ 684#define AR5K_RATES_TURBO { 8, { \
@@ -708,14 +703,14 @@ struct ath5k_rate_table {
708 { 1, MODULATION_XR, 1000, 2, 139, 1 }, \ 703 { 1, MODULATION_XR, 1000, 2, 139, 1 }, \
709 { 1, MODULATION_XR, 2000, 6, 150, 2 }, \ 704 { 1, MODULATION_XR, 2000, 6, 150, 2 }, \
710 { 1, MODULATION_XR, 3000, 1, 150, 3 }, \ 705 { 1, MODULATION_XR, 3000, 1, 150, 3 }, \
711 { 1, IEEE80211_RATE_OFDM, 6000, 11, 140, 4 }, \ 706 { 1, 0, 6000, 11, 140, 4 }, \
712 { 1, IEEE80211_RATE_OFDM, 9000, 15, 18, 4 }, \ 707 { 1, 0, 9000, 15, 18, 4 }, \
713 { 1, IEEE80211_RATE_OFDM, 12000, 10, 152, 6 }, \ 708 { 1, 0, 12000, 10, 152, 6 }, \
714 { 1, IEEE80211_RATE_OFDM, 18000, 14, 36, 6 }, \ 709 { 1, 0, 18000, 14, 36, 6 }, \
715 { 1, IEEE80211_RATE_OFDM, 24000, 9, 176, 8 }, \ 710 { 1, 0, 24000, 9, 176, 8 }, \
716 { 1, IEEE80211_RATE_OFDM, 36000, 13, 72, 8 }, \ 711 { 1, 0, 36000, 13, 72, 8 }, \
717 { 1, IEEE80211_RATE_OFDM, 48000, 8, 96, 8 }, \ 712 { 1, 0, 48000, 8, 96, 8 }, \
718 { 1, IEEE80211_RATE_OFDM, 54000, 12, 108, 8 } } \ 713 { 1, 0, 54000, 12, 108, 8 } } \
719} 714}
720 715
721/* 716/*
@@ -890,12 +885,14 @@ enum ath5k_capability_type {
890 AR5K_CAP_RFSILENT = 20, /* Supports RFsilent */ 885 AR5K_CAP_RFSILENT = 20, /* Supports RFsilent */
891}; 886};
892 887
888
889/* XXX: we *may* move cap_range stuff to struct wiphy */
893struct ath5k_capabilities { 890struct ath5k_capabilities {
894 /* 891 /*
895 * Supported PHY modes 892 * Supported PHY modes
896 * (ie. CHANNEL_A, CHANNEL_B, ...) 893 * (ie. CHANNEL_A, CHANNEL_B, ...)
897 */ 894 */
898 DECLARE_BITMAP(cap_mode, NUM_DRIVER_MODES); 895 DECLARE_BITMAP(cap_mode, AR5K_MODE_MAX);
899 896
900 /* 897 /*
901 * Frequency range (without regulation restrictions) 898 * Frequency range (without regulation restrictions)
@@ -908,14 +905,6 @@ struct ath5k_capabilities {
908 } cap_range; 905 } cap_range;
909 906
910 /* 907 /*
911 * Active regulation domain settings
912 */
913 struct {
914 enum ath5k_regdom reg_current;
915 enum ath5k_regdom reg_hw;
916 } cap_regdomain;
917
918 /*
919 * Values stored in the EEPROM (some of them...) 908 * Values stored in the EEPROM (some of them...)
920 */ 909 */
921 struct ath5k_eeprom_info cap_eeprom; 910 struct ath5k_eeprom_info cap_eeprom;
@@ -963,6 +952,7 @@ struct ath5k_hw {
963 u16 ah_phy_revision; 952 u16 ah_phy_revision;
964 u16 ah_radio_5ghz_revision; 953 u16 ah_radio_5ghz_revision;
965 u16 ah_radio_2ghz_revision; 954 u16 ah_radio_2ghz_revision;
955 u32 ah_phy_spending;
966 956
967 enum ath5k_version ah_version; 957 enum ath5k_version ah_version;
968 enum ath5k_radio ah_radio; 958 enum ath5k_radio ah_radio;
@@ -1038,8 +1028,10 @@ struct ath5k_hw {
1038 int (*ah_setup_xtx_desc)(struct ath5k_hw *, struct ath5k_desc *, 1028 int (*ah_setup_xtx_desc)(struct ath5k_hw *, struct ath5k_desc *,
1039 unsigned int, unsigned int, unsigned int, unsigned int, 1029 unsigned int, unsigned int, unsigned int, unsigned int,
1040 unsigned int, unsigned int); 1030 unsigned int, unsigned int);
1041 int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *); 1031 int (*ah_proc_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
1042 int (*ah_proc_rx_desc)(struct ath5k_hw *, struct ath5k_desc *); 1032 struct ath5k_tx_status *);
1033 int (*ah_proc_rx_desc)(struct ath5k_hw *, struct ath5k_desc *,
1034 struct ath5k_rx_status *);
1043}; 1035};
1044 1036
1045/* 1037/*
@@ -1070,6 +1062,7 @@ extern int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase);
1070extern bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah); 1062extern bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
1071extern int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask); 1063extern int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
1072extern enum ath5k_int ath5k_hw_set_intr(struct ath5k_hw *ah, enum ath5k_int new_mask); 1064extern enum ath5k_int ath5k_hw_set_intr(struct ath5k_hw *ah, enum ath5k_int new_mask);
1065extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_low_level_stats *stats);
1073/* EEPROM access functions */ 1066/* EEPROM access functions */
1074extern int ath5k_hw_set_regdomain(struct ath5k_hw *ah, u16 regdomain); 1067extern int ath5k_hw_set_regdomain(struct ath5k_hw *ah, u16 regdomain);
1075/* Protocol Control Unit Functions */ 1068/* Protocol Control Unit Functions */
@@ -1098,7 +1091,6 @@ extern int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah, const struct ath5k_be
1098extern void ath5k_hw_reset_beacon(struct ath5k_hw *ah); 1091extern void ath5k_hw_reset_beacon(struct ath5k_hw *ah);
1099extern int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr); 1092extern int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr);
1100#endif 1093#endif
1101extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ath5k_mib_stats *statistics);
1102/* ACK bit rate */ 1094/* ACK bit rate */
1103void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high); 1095void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high);
1104/* ACK/CTS Timeouts */ 1096/* ACK/CTS Timeouts */
@@ -1129,8 +1121,6 @@ extern int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio);
1129extern u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio); 1121extern u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio);
1130extern int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val); 1122extern int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
1131extern void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level); 1123extern void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level);
1132/* Regulatory Domain/Channels Setup */
1133extern u16 ath5k_get_regdomain(struct ath5k_hw *ah);
1134/* Misc functions */ 1124/* Misc functions */
1135extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result); 1125extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result);
1136 1126
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index bef967ce34a6..e18305b781c9 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -80,7 +80,7 @@ MODULE_AUTHOR("Nick Kossifidis");
80MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards."); 80MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
81MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards"); 81MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
82MODULE_LICENSE("Dual BSD/GPL"); 82MODULE_LICENSE("Dual BSD/GPL");
83MODULE_VERSION("0.1.1 (EXPERIMENTAL)"); 83MODULE_VERSION("0.5.0 (EXPERIMENTAL)");
84 84
85 85
86/* Known PCI ids */ 86/* Known PCI ids */
@@ -118,12 +118,15 @@ static struct ath5k_srev_name srev_names[] = {
118 { "5212", AR5K_VERSION_VER, AR5K_SREV_VER_AR5212 }, 118 { "5212", AR5K_VERSION_VER, AR5K_SREV_VER_AR5212 },
119 { "5213", AR5K_VERSION_VER, AR5K_SREV_VER_AR5213 }, 119 { "5213", AR5K_VERSION_VER, AR5K_SREV_VER_AR5213 },
120 { "5213A", AR5K_VERSION_VER, AR5K_SREV_VER_AR5213A }, 120 { "5213A", AR5K_VERSION_VER, AR5K_SREV_VER_AR5213A },
121 { "2413", AR5K_VERSION_VER, AR5K_SREV_VER_AR2413 },
122 { "2414", AR5K_VERSION_VER, AR5K_SREV_VER_AR2414 },
121 { "2424", AR5K_VERSION_VER, AR5K_SREV_VER_AR2424 }, 123 { "2424", AR5K_VERSION_VER, AR5K_SREV_VER_AR2424 },
122 { "5424", AR5K_VERSION_VER, AR5K_SREV_VER_AR5424 }, 124 { "5424", AR5K_VERSION_VER, AR5K_SREV_VER_AR5424 },
123 { "5413", AR5K_VERSION_VER, AR5K_SREV_VER_AR5413 }, 125 { "5413", AR5K_VERSION_VER, AR5K_SREV_VER_AR5413 },
124 { "5414", AR5K_VERSION_VER, AR5K_SREV_VER_AR5414 }, 126 { "5414", AR5K_VERSION_VER, AR5K_SREV_VER_AR5414 },
125 { "5416", AR5K_VERSION_VER, AR5K_SREV_VER_AR5416 }, 127 { "5416", AR5K_VERSION_VER, AR5K_SREV_VER_AR5416 },
126 { "5418", AR5K_VERSION_VER, AR5K_SREV_VER_AR5418 }, 128 { "5418", AR5K_VERSION_VER, AR5K_SREV_VER_AR5418 },
129 { "2425", AR5K_VERSION_VER, AR5K_SREV_VER_AR2425 },
127 { "xxxxx", AR5K_VERSION_VER, AR5K_SREV_UNKNOWN }, 130 { "xxxxx", AR5K_VERSION_VER, AR5K_SREV_UNKNOWN },
128 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 }, 131 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 },
129 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 }, 132 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 },
@@ -132,6 +135,7 @@ static struct ath5k_srev_name srev_names[] = {
132 { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A }, 135 { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A },
133 { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 }, 136 { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 },
134 { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A }, 137 { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A },
138 { "SChip", AR5K_VERSION_RAD, AR5K_SREV_RAD_SC0 },
135 { "SChip", AR5K_VERSION_RAD, AR5K_SREV_RAD_SC1 }, 139 { "SChip", AR5K_VERSION_RAD, AR5K_SREV_RAD_SC1 },
136 { "SChip", AR5K_VERSION_RAD, AR5K_SREV_RAD_SC2 }, 140 { "SChip", AR5K_VERSION_RAD, AR5K_SREV_RAD_SC2 },
137 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 }, 141 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 },
@@ -240,6 +244,8 @@ static int ath5k_chan_set(struct ath5k_softc *sc,
240static void ath5k_setcurmode(struct ath5k_softc *sc, 244static void ath5k_setcurmode(struct ath5k_softc *sc,
241 unsigned int mode); 245 unsigned int mode);
242static void ath5k_mode_setup(struct ath5k_softc *sc); 246static void ath5k_mode_setup(struct ath5k_softc *sc);
247static void ath5k_set_total_hw_rates(struct ath5k_softc *sc);
248
243/* Descriptor setup */ 249/* Descriptor setup */
244static int ath5k_desc_alloc(struct ath5k_softc *sc, 250static int ath5k_desc_alloc(struct ath5k_softc *sc,
245 struct pci_dev *pdev); 251 struct pci_dev *pdev);
@@ -278,7 +284,8 @@ static int ath5k_rx_start(struct ath5k_softc *sc);
278static void ath5k_rx_stop(struct ath5k_softc *sc); 284static void ath5k_rx_stop(struct ath5k_softc *sc);
279static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc, 285static unsigned int ath5k_rx_decrypted(struct ath5k_softc *sc,
280 struct ath5k_desc *ds, 286 struct ath5k_desc *ds,
281 struct sk_buff *skb); 287 struct sk_buff *skb,
288 struct ath5k_rx_status *rs);
282static void ath5k_tasklet_rx(unsigned long data); 289static void ath5k_tasklet_rx(unsigned long data);
283/* Tx handling */ 290/* Tx handling */
284static void ath5k_tx_processq(struct ath5k_softc *sc, 291static void ath5k_tx_processq(struct ath5k_softc *sc,
@@ -511,35 +518,46 @@ ath5k_pci_probe(struct pci_dev *pdev,
511 sc->ah->ah_mac_srev, 518 sc->ah->ah_mac_srev,
512 sc->ah->ah_phy_revision); 519 sc->ah->ah_phy_revision);
513 520
514 if(!sc->ah->ah_single_chip){ 521 if (!sc->ah->ah_single_chip) {
515 /* Single chip radio (!RF5111) */ 522 /* Single chip radio (!RF5111) */
516 if(sc->ah->ah_radio_5ghz_revision && !sc->ah->ah_radio_2ghz_revision) { 523 if (sc->ah->ah_radio_5ghz_revision &&
524 !sc->ah->ah_radio_2ghz_revision) {
517 /* No 5GHz support -> report 2GHz radio */ 525 /* No 5GHz support -> report 2GHz radio */
518 if(!test_bit(MODE_IEEE80211A, sc->ah->ah_capabilities.cap_mode)){ 526 if (!test_bit(AR5K_MODE_11A,
527 sc->ah->ah_capabilities.cap_mode)) {
519 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n", 528 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
520 ath5k_chip_name(AR5K_VERSION_RAD,sc->ah->ah_radio_5ghz_revision), 529 ath5k_chip_name(AR5K_VERSION_RAD,
521 sc->ah->ah_radio_5ghz_revision); 530 sc->ah->ah_radio_5ghz_revision),
522 /* No 2GHz support (5110 and some 5Ghz only cards) -> report 5Ghz radio */ 531 sc->ah->ah_radio_5ghz_revision);
523 } else if(!test_bit(MODE_IEEE80211B, sc->ah->ah_capabilities.cap_mode)){ 532 /* No 2GHz support (5110 and some
533 * 5Ghz only cards) -> report 5Ghz radio */
534 } else if (!test_bit(AR5K_MODE_11B,
535 sc->ah->ah_capabilities.cap_mode)) {
524 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n", 536 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
525 ath5k_chip_name(AR5K_VERSION_RAD,sc->ah->ah_radio_5ghz_revision), 537 ath5k_chip_name(AR5K_VERSION_RAD,
526 sc->ah->ah_radio_5ghz_revision); 538 sc->ah->ah_radio_5ghz_revision),
539 sc->ah->ah_radio_5ghz_revision);
527 /* Multiband radio */ 540 /* Multiband radio */
528 } else { 541 } else {
529 ATH5K_INFO(sc, "RF%s multiband radio found" 542 ATH5K_INFO(sc, "RF%s multiband radio found"
530 " (0x%x)\n", 543 " (0x%x)\n",
531 ath5k_chip_name(AR5K_VERSION_RAD,sc->ah->ah_radio_5ghz_revision), 544 ath5k_chip_name(AR5K_VERSION_RAD,
532 sc->ah->ah_radio_5ghz_revision); 545 sc->ah->ah_radio_5ghz_revision),
546 sc->ah->ah_radio_5ghz_revision);
533 } 547 }
534 } 548 }
535 /* Multi chip radio (RF5111 - RF2111) -> report both 2GHz/5GHz radios */ 549 /* Multi chip radio (RF5111 - RF2111) ->
536 else if(sc->ah->ah_radio_5ghz_revision && sc->ah->ah_radio_2ghz_revision){ 550 * report both 2GHz/5GHz radios */
551 else if (sc->ah->ah_radio_5ghz_revision &&
552 sc->ah->ah_radio_2ghz_revision){
537 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n", 553 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
538 ath5k_chip_name(AR5K_VERSION_RAD,sc->ah->ah_radio_5ghz_revision), 554 ath5k_chip_name(AR5K_VERSION_RAD,
539 sc->ah->ah_radio_5ghz_revision); 555 sc->ah->ah_radio_5ghz_revision),
556 sc->ah->ah_radio_5ghz_revision);
540 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n", 557 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
541 ath5k_chip_name(AR5K_VERSION_RAD,sc->ah->ah_radio_2ghz_revision), 558 ath5k_chip_name(AR5K_VERSION_RAD,
542 sc->ah->ah_radio_2ghz_revision); 559 sc->ah->ah_radio_2ghz_revision),
560 sc->ah->ah_radio_2ghz_revision);
543 } 561 }
544 } 562 }
545 563
@@ -693,11 +711,14 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
693 goto err; 711 goto err;
694 } 712 }
695 713
714 /* Set *_rates so we can map hw rate index */
715 ath5k_set_total_hw_rates(sc);
716
696 /* NB: setup here so ath5k_rate_update is happy */ 717 /* NB: setup here so ath5k_rate_update is happy */
697 if (test_bit(MODE_IEEE80211A, ah->ah_modes)) 718 if (test_bit(AR5K_MODE_11A, ah->ah_modes))
698 ath5k_setcurmode(sc, MODE_IEEE80211A); 719 ath5k_setcurmode(sc, AR5K_MODE_11A);
699 else 720 else
700 ath5k_setcurmode(sc, MODE_IEEE80211B); 721 ath5k_setcurmode(sc, AR5K_MODE_11B);
701 722
702 /* 723 /*
703 * Allocate tx+rx descriptors and populate the lists. 724 * Allocate tx+rx descriptors and populate the lists.
@@ -837,12 +858,9 @@ ath5k_copy_rates(struct ieee80211_rate *rates,
837 return 0; 858 return 0;
838 859
839 for (i = 0, count = 0; i < rt->rate_count && max > 0; i++) { 860 for (i = 0, count = 0; i < rt->rate_count && max > 0; i++) {
840 if (!rt->rates[i].valid) 861 rates[count].bitrate = rt->rates[i].rate_kbps / 100;
841 continue; 862 rates[count].hw_value = rt->rates[i].rate_code;
842 rates->rate = rt->rates[i].rate_kbps / 100; 863 rates[count].flags = rt->rates[i].modulation;
843 rates->val = rt->rates[i].rate_code;
844 rates->flags = rt->rates[i].modulation;
845 rates++;
846 count++; 864 count++;
847 max--; 865 max--;
848 } 866 }
@@ -856,43 +874,22 @@ ath5k_copy_channels(struct ath5k_hw *ah,
856 unsigned int mode, 874 unsigned int mode,
857 unsigned int max) 875 unsigned int max)
858{ 876{
859 static const struct { unsigned int mode, mask, chan; } map[] = { 877 unsigned int i, count, size, chfreq, freq, ch;
860 [MODE_IEEE80211A] = { CHANNEL_OFDM, CHANNEL_OFDM | CHANNEL_TURBO, CHANNEL_A },
861 [MODE_ATHEROS_TURBO] = { CHANNEL_OFDM|CHANNEL_TURBO, CHANNEL_OFDM | CHANNEL_TURBO, CHANNEL_T },
862 [MODE_IEEE80211B] = { CHANNEL_CCK, CHANNEL_CCK, CHANNEL_B },
863 [MODE_IEEE80211G] = { CHANNEL_OFDM, CHANNEL_OFDM, CHANNEL_G },
864 [MODE_ATHEROS_TURBOG] = { CHANNEL_OFDM | CHANNEL_TURBO, CHANNEL_OFDM | CHANNEL_TURBO, CHANNEL_TG },
865 };
866 static const struct ath5k_regchannel chans_2ghz[] =
867 IEEE80211_CHANNELS_2GHZ;
868 static const struct ath5k_regchannel chans_5ghz[] =
869 IEEE80211_CHANNELS_5GHZ;
870 const struct ath5k_regchannel *chans;
871 enum ath5k_regdom dmn;
872 unsigned int i, count, size, chfreq, all, f, ch;
873 878
874 if (!test_bit(mode, ah->ah_modes)) 879 if (!test_bit(mode, ah->ah_modes))
875 return 0; 880 return 0;
876 881
877 all = ah->ah_regdomain == DMN_DEFAULT || CHAN_DEBUG == 1;
878
879 switch (mode) { 882 switch (mode) {
880 case MODE_IEEE80211A: 883 case AR5K_MODE_11A:
881 case MODE_ATHEROS_TURBO: 884 case AR5K_MODE_11A_TURBO:
882 /* 1..220, but 2GHz frequencies are filtered by check_channel */ 885 /* 1..220, but 2GHz frequencies are filtered by check_channel */
883 size = all ? 220 : ARRAY_SIZE(chans_5ghz); 886 size = 220 ;
884 chans = chans_5ghz;
885 dmn = ath5k_regdom2flag(ah->ah_regdomain,
886 IEEE80211_CHANNELS_5GHZ_MIN);
887 chfreq = CHANNEL_5GHZ; 887 chfreq = CHANNEL_5GHZ;
888 break; 888 break;
889 case MODE_IEEE80211B: 889 case AR5K_MODE_11B:
890 case MODE_IEEE80211G: 890 case AR5K_MODE_11G:
891 case MODE_ATHEROS_TURBOG: 891 case AR5K_MODE_11G_TURBO:
892 size = all ? 26 : ARRAY_SIZE(chans_2ghz); 892 size = 26;
893 chans = chans_2ghz;
894 dmn = ath5k_regdom2flag(ah->ah_regdomain,
895 IEEE80211_CHANNELS_2GHZ_MIN);
896 chfreq = CHANNEL_2GHZ; 893 chfreq = CHANNEL_2GHZ;
897 break; 894 break;
898 default: 895 default:
@@ -901,25 +898,31 @@ ath5k_copy_channels(struct ath5k_hw *ah,
901 } 898 }
902 899
903 for (i = 0, count = 0; i < size && max > 0; i++) { 900 for (i = 0, count = 0; i < size && max > 0; i++) {
904 ch = all ? i + 1 : chans[i].chan; 901 ch = i + 1 ;
905 f = ath5k_ieee2mhz(ch); 902 freq = ath5k_ieee2mhz(ch);
906 /* Check if channel is supported by the chipset */
907 if (!ath5k_channel_ok(ah, f, chfreq))
908 continue;
909 903
910 /* Match regulation domain */ 904 /* Check if channel is supported by the chipset */
911 if (!all && !(IEEE80211_DMN(chans[i].domain) & 905 if (!ath5k_channel_ok(ah, freq, chfreq))
912 IEEE80211_DMN(dmn)))
913 continue; 906 continue;
914 907
915 if (!all && (chans[i].mode & map[mode].mask) != map[mode].mode) 908 /* Write channel info and increment counter */
916 continue; 909 channels[count].center_freq = freq;
910 channels[count].band = (chfreq == CHANNEL_2GHZ) ?
911 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
912 switch (mode) {
913 case AR5K_MODE_11A:
914 case AR5K_MODE_11G:
915 channels[count].hw_value = chfreq | CHANNEL_OFDM;
916 break;
917 case AR5K_MODE_11A_TURBO:
918 case AR5K_MODE_11G_TURBO:
919 channels[count].hw_value = chfreq |
920 CHANNEL_OFDM | CHANNEL_TURBO;
921 break;
922 case AR5K_MODE_11B:
923 channels[count].hw_value = CHANNEL_B;
924 }
917 925
918 /* Write channel and increment counter */
919 channels->chan = ch;
920 channels->freq = f;
921 channels->val = map[mode].chan;
922 channels++;
923 count++; 926 count++;
924 max--; 927 max--;
925 } 928 }
@@ -927,95 +930,78 @@ ath5k_copy_channels(struct ath5k_hw *ah,
927 return count; 930 return count;
928} 931}
929 932
930/* Only tries to register modes our EEPROM says it can support */
931#define REGISTER_MODE(m) do { \
932 ret = ath5k_register_mode(hw, m); \
933 if (ret) \
934 return ret; \
935} while (0) \
936
937static inline int
938ath5k_register_mode(struct ieee80211_hw *hw, u8 m)
939{
940 struct ath5k_softc *sc = hw->priv;
941 struct ieee80211_hw_mode *modes = sc->modes;
942 unsigned int i;
943 int ret;
944
945 if (!test_bit(m, sc->ah->ah_capabilities.cap_mode))
946 return 0;
947
948 for (i = 0; i < NUM_DRIVER_MODES; i++) {
949 if (modes[i].mode != m || !modes[i].num_channels)
950 continue;
951 ret = ieee80211_register_hwmode(hw, &modes[i]);
952 if (ret) {
953 ATH5K_ERR(sc, "can't register hwmode %u\n", m);
954 return ret;
955 }
956 return 0;
957 }
958 BUG();
959}
960
961static int 933static int
962ath5k_getchannels(struct ieee80211_hw *hw) 934ath5k_getchannels(struct ieee80211_hw *hw)
963{ 935{
964 struct ath5k_softc *sc = hw->priv; 936 struct ath5k_softc *sc = hw->priv;
965 struct ath5k_hw *ah = sc->ah; 937 struct ath5k_hw *ah = sc->ah;
966 struct ieee80211_hw_mode *modes = sc->modes; 938 struct ieee80211_supported_band *sbands = sc->sbands;
967 unsigned int i, max_r, max_c; 939 const struct ath5k_rate_table *hw_rates;
968 int ret; 940 unsigned int max_r, max_c, count_r, count_c;
941 int mode2g = AR5K_MODE_11G;
969 942
970 BUILD_BUG_ON(ARRAY_SIZE(sc->modes) < 3); 943 BUILD_BUG_ON(ARRAY_SIZE(sc->sbands) < IEEE80211_NUM_BANDS);
971
972 /* The order here does not matter */
973 modes[0].mode = MODE_IEEE80211G;
974 modes[1].mode = MODE_IEEE80211B;
975 modes[2].mode = MODE_IEEE80211A;
976 944
977 max_r = ARRAY_SIZE(sc->rates); 945 max_r = ARRAY_SIZE(sc->rates);
978 max_c = ARRAY_SIZE(sc->channels); 946 max_c = ARRAY_SIZE(sc->channels);
947 count_r = count_c = 0;
979 948
980 for (i = 0; i < NUM_DRIVER_MODES; i++) { 949 /* 2GHz band */
981 struct ieee80211_hw_mode *mode = &modes[i]; 950 if (!test_bit(AR5K_MODE_11G, sc->ah->ah_capabilities.cap_mode)) {
982 const struct ath5k_rate_table *hw_rates; 951 mode2g = AR5K_MODE_11B;
952 if (!test_bit(AR5K_MODE_11B,
953 sc->ah->ah_capabilities.cap_mode))
954 mode2g = -1;
955 }
983 956
984 if (i == 0) { 957 if (mode2g > 0) {
985 modes[0].rates = sc->rates; 958 struct ieee80211_supported_band *sband =
986 modes->channels = sc->channels; 959 &sbands[IEEE80211_BAND_2GHZ];
987 } else { 960
988 struct ieee80211_hw_mode *prev_mode = &modes[i-1]; 961 sband->bitrates = sc->rates;
989 int prev_num_r = prev_mode->num_rates; 962 sband->channels = sc->channels;
990 int prev_num_c = prev_mode->num_channels; 963
991 mode->rates = &prev_mode->rates[prev_num_r]; 964 sband->band = IEEE80211_BAND_2GHZ;
992 mode->channels = &prev_mode->channels[prev_num_c]; 965 sband->n_channels = ath5k_copy_channels(ah, sband->channels,
993 } 966 mode2g, max_c);
967
968 hw_rates = ath5k_hw_get_rate_table(ah, mode2g);
969 sband->n_bitrates = ath5k_copy_rates(sband->bitrates,
970 hw_rates, max_r);
971
972 count_c = sband->n_channels;
973 count_r = sband->n_bitrates;
974
975 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
976
977 max_r -= count_r;
978 max_c -= count_c;
994 979
995 hw_rates = ath5k_hw_get_rate_table(ah, mode->mode);
996 mode->num_rates = ath5k_copy_rates(mode->rates, hw_rates,
997 max_r);
998 mode->num_channels = ath5k_copy_channels(ah, mode->channels,
999 mode->mode, max_c);
1000 max_r -= mode->num_rates;
1001 max_c -= mode->num_channels;
1002 } 980 }
1003 981
1004 /* We try to register all modes this driver supports. We don't bother 982 /* 5GHz band */
1005 * with MODE_IEEE80211B for AR5212 as MODE_IEEE80211G already accounts
1006 * for that as per mac80211. Then, REGISTER_MODE() will will actually
1007 * check the eeprom reading for more reliable capability information.
1008 * Order matters here as per mac80211's latest preference. This will
1009 * all hopefullly soon go away. */
1010 983
1011 REGISTER_MODE(MODE_IEEE80211G); 984 if (test_bit(AR5K_MODE_11A, sc->ah->ah_capabilities.cap_mode)) {
1012 if (ah->ah_version != AR5K_AR5212) 985 struct ieee80211_supported_band *sband =
1013 REGISTER_MODE(MODE_IEEE80211B); 986 &sbands[IEEE80211_BAND_5GHZ];
1014 REGISTER_MODE(MODE_IEEE80211A);
1015 987
1016 ath5k_debug_dump_modes(sc, modes); 988 sband->bitrates = &sc->rates[count_r];
989 sband->channels = &sc->channels[count_c];
1017 990
1018 return ret; 991 sband->band = IEEE80211_BAND_5GHZ;
992 sband->n_channels = ath5k_copy_channels(ah, sband->channels,
993 AR5K_MODE_11A, max_c);
994
995 hw_rates = ath5k_hw_get_rate_table(ah, AR5K_MODE_11A);
996 sband->n_bitrates = ath5k_copy_rates(sband->bitrates,
997 hw_rates, max_r);
998
999 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
1000 }
1001
1002 ath5k_debug_dump_bands(sc);
1003
1004 return 0;
1019} 1005}
1020 1006
1021/* 1007/*
@@ -1030,11 +1016,15 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
1030 struct ath5k_hw *ah = sc->ah; 1016 struct ath5k_hw *ah = sc->ah;
1031 int ret; 1017 int ret;
1032 1018
1033 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "%u (%u MHz) -> %u (%u MHz)\n", 1019 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "(%u MHz) -> (%u MHz)\n",
1034 sc->curchan->chan, sc->curchan->freq, 1020 sc->curchan->center_freq, chan->center_freq);
1035 chan->chan, chan->freq); 1021
1022 if (chan->center_freq != sc->curchan->center_freq ||
1023 chan->hw_value != sc->curchan->hw_value) {
1024
1025 sc->curchan = chan;
1026 sc->curband = &sc->sbands[chan->band];
1036 1027
1037 if (chan->freq != sc->curchan->freq || chan->val != sc->curchan->val) {
1038 /* 1028 /*
1039 * To switch channels clear any pending DMA operations; 1029 * To switch channels clear any pending DMA operations;
1040 * wait long enough for the RX fifo to drain, reset the 1030 * wait long enough for the RX fifo to drain, reset the
@@ -1044,13 +1034,13 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
1044 ath5k_hw_set_intr(ah, 0); /* disable interrupts */ 1034 ath5k_hw_set_intr(ah, 0); /* disable interrupts */
1045 ath5k_txq_cleanup(sc); /* clear pending tx frames */ 1035 ath5k_txq_cleanup(sc); /* clear pending tx frames */
1046 ath5k_rx_stop(sc); /* turn off frame recv */ 1036 ath5k_rx_stop(sc); /* turn off frame recv */
1047 ret = ath5k_hw_reset(ah, sc->opmode, chan, true); 1037 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, true);
1048 if (ret) { 1038 if (ret) {
1049 ATH5K_ERR(sc, "%s: unable to reset channel %u " 1039 ATH5K_ERR(sc, "%s: unable to reset channel "
1050 "(%u Mhz)\n", __func__, chan->chan, chan->freq); 1040 "(%u Mhz)\n", __func__, chan->center_freq);
1051 return ret; 1041 return ret;
1052 } 1042 }
1053 sc->curchan = chan; 1043
1054 ath5k_hw_set_txpower_limit(sc->ah, 0); 1044 ath5k_hw_set_txpower_limit(sc->ah, 0);
1055 1045
1056 /* 1046 /*
@@ -1081,6 +1071,9 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
1081 return 0; 1071 return 0;
1082} 1072}
1083 1073
1074/*
1075 * TODO: CLEAN THIS !!!
1076 */
1084static void 1077static void
1085ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode) 1078ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
1086{ 1079{
@@ -1121,10 +1114,6 @@ ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
1121 continue; 1114 continue;
1122 } 1115 }
1123 sc->hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; 1116 sc->hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD;
1124 if (SHPREAMBLE_FLAG(ix) || rt->rates[ix].modulation ==
1125 IEEE80211_RATE_OFDM)
1126 sc->hwmap[i].txflags |=
1127 IEEE80211_RADIOTAP_F_SHORTPRE;
1128 /* receive frames include FCS */ 1117 /* receive frames include FCS */
1129 sc->hwmap[i].rxflags = sc->hwmap[i].txflags | 1118 sc->hwmap[i].rxflags = sc->hwmap[i].txflags |
1130 IEEE80211_RADIOTAP_F_FCS; 1119 IEEE80211_RADIOTAP_F_FCS;
@@ -1142,6 +1131,12 @@ ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
1142 } 1131 }
1143 1132
1144 sc->curmode = mode; 1133 sc->curmode = mode;
1134
1135 if (mode == AR5K_MODE_11A) {
1136 sc->curband = &sc->sbands[IEEE80211_BAND_5GHZ];
1137 } else {
1138 sc->curband = &sc->sbands[IEEE80211_BAND_2GHZ];
1139 }
1145} 1140}
1146 1141
1147static void 1142static void
@@ -1164,6 +1159,72 @@ ath5k_mode_setup(struct ath5k_softc *sc)
1164 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); 1159 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
1165} 1160}
1166 1161
1162/*
1163 * Match the hw provided rate index (through descriptors)
1164 * to an index for sc->curband->bitrates, so it can be used
1165 * by the stack.
1166 *
1167 * This one is a little bit tricky but i think i'm right
1168 * about this...
1169 *
1170 * We have 4 rate tables in the following order:
1171 * XR (4 rates)
1172 * 802.11a (8 rates)
1173 * 802.11b (4 rates)
1174 * 802.11g (12 rates)
1175 * that make the hw rate table.
1176 *
1177 * Lets take a 5211 for example that supports a and b modes only.
1178 * First comes the 802.11a table and then 802.11b (total 12 rates).
1179 * When hw returns eg. 11 it points to the last 802.11b rate (11Mbit),
1180 * if it returns 2 it points to the second 802.11a rate etc.
1181 *
1182 * Same goes for 5212 who has xr/a/b/g support (total 28 rates).
1183 * First comes the XR table, then 802.11a, 802.11b and 802.11g.
1184 * When hw returns eg. 27 it points to the last 802.11g rate (54Mbits) etc
1185 */
1186static void
1187ath5k_set_total_hw_rates(struct ath5k_softc *sc) {
1188
1189 struct ath5k_hw *ah = sc->ah;
1190
1191 if (test_bit(AR5K_MODE_11A, ah->ah_modes))
1192 sc->a_rates = 8;
1193
1194 if (test_bit(AR5K_MODE_11B, ah->ah_modes))
1195 sc->b_rates = 4;
1196
1197 if (test_bit(AR5K_MODE_11G, ah->ah_modes))
1198 sc->g_rates = 12;
1199
1200 /* XXX: Need to see what what happens when
1201 xr disable bits in eeprom are set */
1202 if (ah->ah_version >= AR5K_AR5212)
1203 sc->xr_rates = 4;
1204
1205}
1206
1207static inline int
1208ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix) {
1209
1210 int mac80211_rix;
1211
1212 if(sc->curband->band == IEEE80211_BAND_2GHZ) {
1213 /* We setup a g ratetable for both b/g modes */
1214 mac80211_rix =
1215 hw_rix - sc->b_rates - sc->a_rates - sc->xr_rates;
1216 } else {
1217 mac80211_rix = hw_rix - sc->xr_rates;
1218 }
1219
1220 /* Something went wrong, fallback to basic rate for this band */
1221 if ((mac80211_rix >= sc->curband->n_bitrates) ||
1222 (mac80211_rix <= 0 ))
1223 mac80211_rix = 1;
1224
1225 return mac80211_rix;
1226}
1227
1167 1228
1168 1229
1169 1230
@@ -1268,7 +1329,8 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1268 1329
1269 ret = ah->ah_setup_tx_desc(ah, ds, pktlen, 1330 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
1270 ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL, 1331 ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL,
1271 (ctl->power_level * 2), ctl->tx_rate, ctl->retry_limit, keyidx, 0, flags, 0, 0); 1332 (sc->power_level * 2), ctl->tx_rate->hw_value,
1333 ctl->retry_limit, keyidx, 0, flags, 0, 0);
1272 if (ret) 1334 if (ret)
1273 goto err_unmap; 1335 goto err_unmap;
1274 1336
@@ -1503,8 +1565,7 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1503 */ 1565 */
1504 spin_lock_bh(&txq->lock); 1566 spin_lock_bh(&txq->lock);
1505 list_for_each_entry_safe(bf, bf0, &txq->q, list) { 1567 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1506 ath5k_debug_printtxbuf(sc, bf, !sc->ah->ah_proc_tx_desc(sc->ah, 1568 ath5k_debug_printtxbuf(sc, bf);
1507 bf->desc));
1508 1569
1509 ath5k_txbuf_free(sc, bf); 1570 ath5k_txbuf_free(sc, bf);
1510 1571
@@ -1629,20 +1690,20 @@ ath5k_rx_stop(struct ath5k_softc *sc)
1629 1690
1630static unsigned int 1691static unsigned int
1631ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds, 1692ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds,
1632 struct sk_buff *skb) 1693 struct sk_buff *skb, struct ath5k_rx_status *rs)
1633{ 1694{
1634 struct ieee80211_hdr *hdr = (void *)skb->data; 1695 struct ieee80211_hdr *hdr = (void *)skb->data;
1635 unsigned int keyix, hlen = ieee80211_get_hdrlen_from_skb(skb); 1696 unsigned int keyix, hlen = ieee80211_get_hdrlen_from_skb(skb);
1636 1697
1637 if (!(ds->ds_rxstat.rs_status & AR5K_RXERR_DECRYPT) && 1698 if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
1638 ds->ds_rxstat.rs_keyix != AR5K_RXKEYIX_INVALID) 1699 rs->rs_keyix != AR5K_RXKEYIX_INVALID)
1639 return RX_FLAG_DECRYPTED; 1700 return RX_FLAG_DECRYPTED;
1640 1701
1641 /* Apparently when a default key is used to decrypt the packet 1702 /* Apparently when a default key is used to decrypt the packet
1642 the hw does not set the index used to decrypt. In such cases 1703 the hw does not set the index used to decrypt. In such cases
1643 get the index from the packet. */ 1704 get the index from the packet. */
1644 if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED) && 1705 if ((le16_to_cpu(hdr->frame_control) & IEEE80211_FCTL_PROTECTED) &&
1645 !(ds->ds_rxstat.rs_status & AR5K_RXERR_DECRYPT) && 1706 !(rs->rs_status & AR5K_RXERR_DECRYPT) &&
1646 skb->len >= hlen + 4) { 1707 skb->len >= hlen + 4) {
1647 keyix = skb->data[hlen + 3] >> 6; 1708 keyix = skb->data[hlen + 3] >> 6;
1648 1709
@@ -1655,28 +1716,62 @@ ath5k_rx_decrypted(struct ath5k_softc *sc, struct ath5k_desc *ds,
1655 1716
1656 1717
1657static void 1718static void
1658ath5k_check_ibss_hw_merge(struct ath5k_softc *sc, struct sk_buff *skb) 1719ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
1720 struct ieee80211_rx_status *rxs)
1659{ 1721{
1722 u64 tsf, bc_tstamp;
1660 u32 hw_tu; 1723 u32 hw_tu;
1661 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 1724 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1662 1725
1663 if ((mgmt->frame_control & IEEE80211_FCTL_FTYPE) == 1726 if ((le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_FTYPE) ==
1664 IEEE80211_FTYPE_MGMT && 1727 IEEE80211_FTYPE_MGMT &&
1665 (mgmt->frame_control & IEEE80211_FCTL_STYPE) == 1728 (le16_to_cpu(mgmt->frame_control) & IEEE80211_FCTL_STYPE) ==
1666 IEEE80211_STYPE_BEACON && 1729 IEEE80211_STYPE_BEACON &&
1667 mgmt->u.beacon.capab_info & WLAN_CAPABILITY_IBSS && 1730 le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
1668 memcmp(mgmt->bssid, sc->ah->ah_bssid, ETH_ALEN) == 0) { 1731 memcmp(mgmt->bssid, sc->ah->ah_bssid, ETH_ALEN) == 0) {
1669 /* 1732 /*
1670 * Received an IBSS beacon with the same BSSID. Hardware might 1733 * Received an IBSS beacon with the same BSSID. Hardware *must*
1671 * have updated the TSF, check if we need to update timers. 1734 * have updated the local TSF. We have to work around various
1735 * hardware bugs, though...
1672 */ 1736 */
1673 hw_tu = TSF_TO_TU(ath5k_hw_get_tsf64(sc->ah)); 1737 tsf = ath5k_hw_get_tsf64(sc->ah);
1674 if (hw_tu >= sc->nexttbtt) { 1738 bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
1675 ath5k_beacon_update_timers(sc, 1739 hw_tu = TSF_TO_TU(tsf);
1676 mgmt->u.beacon.timestamp); 1740
1741 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
1742 "beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
1743 (unsigned long long)bc_tstamp,
1744 (unsigned long long)rxs->mactime,
1745 (unsigned long long)(rxs->mactime - bc_tstamp),
1746 (unsigned long long)tsf);
1747
1748 /*
1749 * Sometimes the HW will give us a wrong tstamp in the rx
1750 * status, causing the timestamp extension to go wrong.
1751 * (This seems to happen especially with beacon frames bigger
1752 * than 78 byte (incl. FCS))
1753 * But we know that the receive timestamp must be later than the
1754 * timestamp of the beacon since HW must have synced to that.
1755 *
1756 * NOTE: here we assume mactime to be after the frame was
1757 * received, not like mac80211 which defines it at the start.
1758 */
1759 if (bc_tstamp > rxs->mactime) {
1677 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON, 1760 ATH5K_DBG_UNLIMIT(sc, ATH5K_DEBUG_BEACON,
1678 "detected HW merge from received beacon\n"); 1761 "fixing mactime from %llx to %llx\n",
1762 (unsigned long long)rxs->mactime,
1763 (unsigned long long)tsf);
1764 rxs->mactime = tsf;
1679 } 1765 }
1766
1767 /*
1768 * Local TSF might have moved higher than our beacon timers,
1769 * in that case we have to update them to continue sending
1770 * beacons. This also takes care of synchronizing beacon sending
1771 * times with other stations.
1772 */
1773 if (hw_tu >= sc->nexttbtt)
1774 ath5k_beacon_update_timers(sc, bc_tstamp);
1680 } 1775 }
1681} 1776}
1682 1777
@@ -1685,12 +1780,11 @@ static void
1685ath5k_tasklet_rx(unsigned long data) 1780ath5k_tasklet_rx(unsigned long data)
1686{ 1781{
1687 struct ieee80211_rx_status rxs = {}; 1782 struct ieee80211_rx_status rxs = {};
1783 struct ath5k_rx_status rs = {};
1688 struct sk_buff *skb; 1784 struct sk_buff *skb;
1689 struct ath5k_softc *sc = (void *)data; 1785 struct ath5k_softc *sc = (void *)data;
1690 struct ath5k_buf *bf; 1786 struct ath5k_buf *bf;
1691 struct ath5k_desc *ds; 1787 struct ath5k_desc *ds;
1692 u16 len;
1693 u8 stat;
1694 int ret; 1788 int ret;
1695 int hdrlen; 1789 int hdrlen;
1696 int pad; 1790 int pad;
@@ -1713,7 +1807,7 @@ ath5k_tasklet_rx(unsigned long data)
1713 if (unlikely(ds->ds_link == bf->daddr)) /* this is the end */ 1807 if (unlikely(ds->ds_link == bf->daddr)) /* this is the end */
1714 break; 1808 break;
1715 1809
1716 ret = sc->ah->ah_proc_rx_desc(sc->ah, ds); 1810 ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
1717 if (unlikely(ret == -EINPROGRESS)) 1811 if (unlikely(ret == -EINPROGRESS))
1718 break; 1812 break;
1719 else if (unlikely(ret)) { 1813 else if (unlikely(ret)) {
@@ -1722,16 +1816,15 @@ ath5k_tasklet_rx(unsigned long data)
1722 return; 1816 return;
1723 } 1817 }
1724 1818
1725 if (unlikely(ds->ds_rxstat.rs_more)) { 1819 if (unlikely(rs.rs_more)) {
1726 ATH5K_WARN(sc, "unsupported jumbo\n"); 1820 ATH5K_WARN(sc, "unsupported jumbo\n");
1727 goto next; 1821 goto next;
1728 } 1822 }
1729 1823
1730 stat = ds->ds_rxstat.rs_status; 1824 if (unlikely(rs.rs_status)) {
1731 if (unlikely(stat)) { 1825 if (rs.rs_status & AR5K_RXERR_PHY)
1732 if (stat & AR5K_RXERR_PHY)
1733 goto next; 1826 goto next;
1734 if (stat & AR5K_RXERR_DECRYPT) { 1827 if (rs.rs_status & AR5K_RXERR_DECRYPT) {
1735 /* 1828 /*
1736 * Decrypt error. If the error occurred 1829 * Decrypt error. If the error occurred
1737 * because there was no hardware key, then 1830 * because there was no hardware key, then
@@ -1742,30 +1835,29 @@ ath5k_tasklet_rx(unsigned long data)
1742 * 1835 *
1743 * XXX do key cache faulting 1836 * XXX do key cache faulting
1744 */ 1837 */
1745 if (ds->ds_rxstat.rs_keyix == 1838 if (rs.rs_keyix == AR5K_RXKEYIX_INVALID &&
1746 AR5K_RXKEYIX_INVALID && 1839 !(rs.rs_status & AR5K_RXERR_CRC))
1747 !(stat & AR5K_RXERR_CRC))
1748 goto accept; 1840 goto accept;
1749 } 1841 }
1750 if (stat & AR5K_RXERR_MIC) { 1842 if (rs.rs_status & AR5K_RXERR_MIC) {
1751 rxs.flag |= RX_FLAG_MMIC_ERROR; 1843 rxs.flag |= RX_FLAG_MMIC_ERROR;
1752 goto accept; 1844 goto accept;
1753 } 1845 }
1754 1846
1755 /* let crypto-error packets fall through in MNTR */ 1847 /* let crypto-error packets fall through in MNTR */
1756 if ((stat & ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) || 1848 if ((rs.rs_status &
1849 ~(AR5K_RXERR_DECRYPT|AR5K_RXERR_MIC)) ||
1757 sc->opmode != IEEE80211_IF_TYPE_MNTR) 1850 sc->opmode != IEEE80211_IF_TYPE_MNTR)
1758 goto next; 1851 goto next;
1759 } 1852 }
1760accept: 1853accept:
1761 len = ds->ds_rxstat.rs_datalen; 1854 pci_dma_sync_single_for_cpu(sc->pdev, bf->skbaddr,
1762 pci_dma_sync_single_for_cpu(sc->pdev, bf->skbaddr, len, 1855 rs.rs_datalen, PCI_DMA_FROMDEVICE);
1763 PCI_DMA_FROMDEVICE);
1764 pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize, 1856 pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize,
1765 PCI_DMA_FROMDEVICE); 1857 PCI_DMA_FROMDEVICE);
1766 bf->skb = NULL; 1858 bf->skb = NULL;
1767 1859
1768 skb_put(skb, len); 1860 skb_put(skb, rs.rs_datalen);
1769 1861
1770 /* 1862 /*
1771 * the hardware adds a padding to 4 byte boundaries between 1863 * the hardware adds a padding to 4 byte boundaries between
@@ -1787,13 +1879,23 @@ accept:
1787 * 15bit only. that means TSF extension has to be done within 1879 * 15bit only. that means TSF extension has to be done within
1788 * 32768usec (about 32ms). it might be necessary to move this to 1880 * 32768usec (about 32ms). it might be necessary to move this to
1789 * the interrupt handler, like it is done in madwifi. 1881 * the interrupt handler, like it is done in madwifi.
1882 *
1883 * Unfortunately we don't know when the hardware takes the rx
1884 * timestamp (beginning of phy frame, data frame, end of rx?).
1885 * The only thing we know is that it is hardware specific...
1886 * On AR5213 it seems the rx timestamp is at the end of the
1887 * frame, but i'm not sure.
1888 *
1889 * NOTE: mac80211 defines mactime at the beginning of the first
1890 * data symbol. Since we don't have any time references it's
1891 * impossible to comply to that. This affects IBSS merge only
1892 * right now, so it's not too bad...
1790 */ 1893 */
1791 rxs.mactime = ath5k_extend_tsf(sc->ah, ds->ds_rxstat.rs_tstamp); 1894 rxs.mactime = ath5k_extend_tsf(sc->ah, rs.rs_tstamp);
1792 rxs.flag |= RX_FLAG_TSFT; 1895 rxs.flag |= RX_FLAG_TSFT;
1793 1896
1794 rxs.freq = sc->curchan->freq; 1897 rxs.freq = sc->curchan->center_freq;
1795 rxs.channel = sc->curchan->chan; 1898 rxs.band = sc->curband->band;
1796 rxs.phymode = sc->curmode;
1797 1899
1798 /* 1900 /*
1799 * signal quality: 1901 * signal quality:
@@ -1803,25 +1905,25 @@ accept:
1803 /* noise floor in dBm, from the last noise calibration */ 1905 /* noise floor in dBm, from the last noise calibration */
1804 rxs.noise = sc->ah->ah_noise_floor; 1906 rxs.noise = sc->ah->ah_noise_floor;
1805 /* signal level in dBm */ 1907 /* signal level in dBm */
1806 rxs.ssi = rxs.noise + ds->ds_rxstat.rs_rssi; 1908 rxs.ssi = rxs.noise + rs.rs_rssi;
1807 /* 1909 /*
1808 * "signal" is actually displayed as Link Quality by iwconfig 1910 * "signal" is actually displayed as Link Quality by iwconfig
1809 * we provide a percentage based on rssi (assuming max rssi 64) 1911 * we provide a percentage based on rssi (assuming max rssi 64)
1810 */ 1912 */
1811 rxs.signal = ds->ds_rxstat.rs_rssi * 100 / 64; 1913 rxs.signal = rs.rs_rssi * 100 / 64;
1812 1914
1813 rxs.antenna = ds->ds_rxstat.rs_antenna; 1915 rxs.antenna = rs.rs_antenna;
1814 rxs.rate = ds->ds_rxstat.rs_rate; 1916 rxs.rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
1815 rxs.flag |= ath5k_rx_decrypted(sc, ds, skb); 1917 rxs.flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
1816 1918
1817 ath5k_debug_dump_skb(sc, skb, "RX ", 0); 1919 ath5k_debug_dump_skb(sc, skb, "RX ", 0);
1818 1920
1819 /* check beacons in IBSS mode */ 1921 /* check beacons in IBSS mode */
1820 if (sc->opmode == IEEE80211_IF_TYPE_IBSS) 1922 if (sc->opmode == IEEE80211_IF_TYPE_IBSS)
1821 ath5k_check_ibss_hw_merge(sc, skb); 1923 ath5k_check_ibss_tsf(sc, skb, &rxs);
1822 1924
1823 __ieee80211_rx(sc->hw, skb, &rxs); 1925 __ieee80211_rx(sc->hw, skb, &rxs);
1824 sc->led_rxrate = ds->ds_rxstat.rs_rate; 1926 sc->led_rxrate = rs.rs_rate;
1825 ath5k_led_event(sc, ATH_LED_RX); 1927 ath5k_led_event(sc, ATH_LED_RX);
1826next: 1928next:
1827 list_move_tail(&bf->list, &sc->rxbuf); 1929 list_move_tail(&bf->list, &sc->rxbuf);
@@ -1840,6 +1942,7 @@ static void
1840ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq) 1942ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1841{ 1943{
1842 struct ieee80211_tx_status txs = {}; 1944 struct ieee80211_tx_status txs = {};
1945 struct ath5k_tx_status ts = {};
1843 struct ath5k_buf *bf, *bf0; 1946 struct ath5k_buf *bf, *bf0;
1844 struct ath5k_desc *ds; 1947 struct ath5k_desc *ds;
1845 struct sk_buff *skb; 1948 struct sk_buff *skb;
@@ -1852,7 +1955,7 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1852 /* TODO only one segment */ 1955 /* TODO only one segment */
1853 pci_dma_sync_single_for_cpu(sc->pdev, sc->desc_daddr, 1956 pci_dma_sync_single_for_cpu(sc->pdev, sc->desc_daddr,
1854 sc->desc_len, PCI_DMA_FROMDEVICE); 1957 sc->desc_len, PCI_DMA_FROMDEVICE);
1855 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds); 1958 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
1856 if (unlikely(ret == -EINPROGRESS)) 1959 if (unlikely(ret == -EINPROGRESS))
1857 break; 1960 break;
1858 else if (unlikely(ret)) { 1961 else if (unlikely(ret)) {
@@ -1867,17 +1970,16 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1867 PCI_DMA_TODEVICE); 1970 PCI_DMA_TODEVICE);
1868 1971
1869 txs.control = bf->ctl; 1972 txs.control = bf->ctl;
1870 txs.retry_count = ds->ds_txstat.ts_shortretry + 1973 txs.retry_count = ts.ts_shortretry + ts.ts_longretry / 6;
1871 ds->ds_txstat.ts_longretry / 6; 1974 if (unlikely(ts.ts_status)) {
1872 if (unlikely(ds->ds_txstat.ts_status)) {
1873 sc->ll_stats.dot11ACKFailureCount++; 1975 sc->ll_stats.dot11ACKFailureCount++;
1874 if (ds->ds_txstat.ts_status & AR5K_TXERR_XRETRY) 1976 if (ts.ts_status & AR5K_TXERR_XRETRY)
1875 txs.excessive_retries = 1; 1977 txs.excessive_retries = 1;
1876 else if (ds->ds_txstat.ts_status & AR5K_TXERR_FILT) 1978 else if (ts.ts_status & AR5K_TXERR_FILT)
1877 txs.flags |= IEEE80211_TX_STATUS_TX_FILTERED; 1979 txs.flags |= IEEE80211_TX_STATUS_TX_FILTERED;
1878 } else { 1980 } else {
1879 txs.flags |= IEEE80211_TX_STATUS_ACK; 1981 txs.flags |= IEEE80211_TX_STATUS_ACK;
1880 txs.ack_signal = ds->ds_txstat.ts_rssi; 1982 txs.ack_signal = ts.ts_rssi;
1881 } 1983 }
1882 1984
1883 ieee80211_tx_status(sc->hw, skb, &txs); 1985 ieee80211_tx_status(sc->hw, skb, &txs);
@@ -1958,8 +2060,9 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1958 ds->ds_data = bf->skbaddr; 2060 ds->ds_data = bf->skbaddr;
1959 ret = ah->ah_setup_tx_desc(ah, ds, skb->len, 2061 ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
1960 ieee80211_get_hdrlen_from_skb(skb), 2062 ieee80211_get_hdrlen_from_skb(skb),
1961 AR5K_PKT_TYPE_BEACON, (ctl->power_level * 2), ctl->tx_rate, 1, 2063 AR5K_PKT_TYPE_BEACON, (sc->power_level * 2),
1962 AR5K_TXKEYIX_INVALID, antenna, flags, 0, 0); 2064 ctl->tx_rate->hw_value, 1, AR5K_TXKEYIX_INVALID,
2065 antenna, flags, 0, 0);
1963 if (ret) 2066 if (ret)
1964 goto err_unmap; 2067 goto err_unmap;
1965 2068
@@ -2050,7 +2153,7 @@ ath5k_beacon_send(struct ath5k_softc *sc)
2050 * beacon timer registers. 2153 * beacon timer registers.
2051 * 2154 *
2052 * This is called in a variety of situations, e.g. when a beacon is received, 2155 * This is called in a variety of situations, e.g. when a beacon is received,
2053 * when a HW merge has been detected, but also when an new IBSS is created or 2156 * when a TSF update has been detected, but also when an new IBSS is created or
2054 * when we otherwise know we have to update the timers, but we keep it in this 2157 * when we otherwise know we have to update the timers, but we keep it in this
2055 * function to have it all together in one place. 2158 * function to have it all together in one place.
2056 */ 2159 */
@@ -2150,7 +2253,7 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
2150 * another AP to associate with. 2253 * another AP to associate with.
2151 * 2254 *
2152 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA 2255 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2153 * interrupts to detect HW merges only. 2256 * interrupts to detect TSF updates only.
2154 * 2257 *
2155 * AP mode is missing. 2258 * AP mode is missing.
2156 */ 2259 */
@@ -2170,7 +2273,7 @@ ath5k_beacon_config(struct ath5k_softc *sc)
2170 * hardware send the beacons automatically. We have to load it 2273 * hardware send the beacons automatically. We have to load it
2171 * only once here. 2274 * only once here.
2172 * We use the SWBA interrupt only to keep track of the beacon 2275 * We use the SWBA interrupt only to keep track of the beacon
2173 * timers in order to detect HW merges (automatic TSF updates). 2276 * timers in order to detect automatic TSF updates.
2174 */ 2277 */
2175 ath5k_beaconq_config(sc); 2278 ath5k_beaconq_config(sc);
2176 2279
@@ -2211,7 +2314,8 @@ ath5k_init(struct ath5k_softc *sc)
2211 * be followed by initialization of the appropriate bits 2314 * be followed by initialization of the appropriate bits
2212 * and then setup of the interrupt mask. 2315 * and then setup of the interrupt mask.
2213 */ 2316 */
2214 sc->curchan = sc->hw->conf.chan; 2317 sc->curchan = sc->hw->conf.channel;
2318 sc->curband = &sc->sbands[sc->curchan->band];
2215 ret = ath5k_hw_reset(sc->ah, sc->opmode, sc->curchan, false); 2319 ret = ath5k_hw_reset(sc->ah, sc->opmode, sc->curchan, false);
2216 if (ret) { 2320 if (ret) {
2217 ATH5K_ERR(sc, "unable to reset hardware: %d\n", ret); 2321 ATH5K_ERR(sc, "unable to reset hardware: %d\n", ret);
@@ -2238,7 +2342,8 @@ ath5k_init(struct ath5k_softc *sc)
2238 * Enable interrupts. 2342 * Enable interrupts.
2239 */ 2343 */
2240 sc->imask = AR5K_INT_RX | AR5K_INT_TX | AR5K_INT_RXEOL | 2344 sc->imask = AR5K_INT_RX | AR5K_INT_TX | AR5K_INT_RXEOL |
2241 AR5K_INT_RXORN | AR5K_INT_FATAL | AR5K_INT_GLOBAL; 2345 AR5K_INT_RXORN | AR5K_INT_FATAL | AR5K_INT_GLOBAL |
2346 AR5K_INT_MIB;
2242 2347
2243 ath5k_hw_set_intr(sc->ah, sc->imask); 2348 ath5k_hw_set_intr(sc->ah, sc->imask);
2244 /* Set ack to be sent at low bit-rates */ 2349 /* Set ack to be sent at low bit-rates */
@@ -2382,8 +2487,8 @@ ath5k_intr(int irq, void *dev_id)
2382 * 2487 *
2383 * In IBSS mode we use this interrupt just to 2488 * In IBSS mode we use this interrupt just to
2384 * keep track of the next TBTT (target beacon 2489 * keep track of the next TBTT (target beacon
2385 * transmission time) in order to detect hardware 2490 * transmission time) in order to detect wether
2386 * merges (TSF updates). 2491 * automatic TSF updates happened.
2387 */ 2492 */
2388 if (sc->opmode == IEEE80211_IF_TYPE_IBSS) { 2493 if (sc->opmode == IEEE80211_IF_TYPE_IBSS) {
2389 /* XXX: only if VEOL suppported */ 2494 /* XXX: only if VEOL suppported */
@@ -2418,7 +2523,11 @@ ath5k_intr(int irq, void *dev_id)
2418 if (status & AR5K_INT_BMISS) { 2523 if (status & AR5K_INT_BMISS) {
2419 } 2524 }
2420 if (status & AR5K_INT_MIB) { 2525 if (status & AR5K_INT_MIB) {
2421 /* TODO */ 2526 /*
2527 * These stats are also used for ANI i think
2528 * so how about updating them more often ?
2529 */
2530 ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
2422 } 2531 }
2423 } 2532 }
2424 } while (ath5k_hw_is_intr_pending(ah) && counter-- > 0); 2533 } while (ath5k_hw_is_intr_pending(ah) && counter-- > 0);
@@ -2448,7 +2557,8 @@ ath5k_calibrate(unsigned long data)
2448 struct ath5k_hw *ah = sc->ah; 2557 struct ath5k_hw *ah = sc->ah;
2449 2558
2450 ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n", 2559 ATH5K_DBG(sc, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
2451 sc->curchan->chan, sc->curchan->val); 2560 ieee80211_frequency_to_channel(sc->curchan->center_freq),
2561 sc->curchan->hw_value);
2452 2562
2453 if (ath5k_hw_get_rf_gain(ah) == AR5K_RFGAIN_NEED_CHANGE) { 2563 if (ath5k_hw_get_rf_gain(ah) == AR5K_RFGAIN_NEED_CHANGE) {
2454 /* 2564 /*
@@ -2460,7 +2570,8 @@ ath5k_calibrate(unsigned long data)
2460 } 2570 }
2461 if (ath5k_hw_phy_calibrate(ah, sc->curchan)) 2571 if (ath5k_hw_phy_calibrate(ah, sc->curchan))
2462 ATH5K_ERR(sc, "calibration of channel %u failed\n", 2572 ATH5K_ERR(sc, "calibration of channel %u failed\n",
2463 sc->curchan->chan); 2573 ieee80211_frequency_to_channel(
2574 sc->curchan->center_freq));
2464 2575
2465 mod_timer(&sc->calib_tim, round_jiffies(jiffies + 2576 mod_timer(&sc->calib_tim, round_jiffies(jiffies +
2466 msecs_to_jiffies(ath5k_calinterval * 1000))); 2577 msecs_to_jiffies(ath5k_calinterval * 1000)));
@@ -2558,7 +2669,7 @@ ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
2558 memmove(skb->data, skb->data+pad, hdrlen); 2669 memmove(skb->data, skb->data+pad, hdrlen);
2559 } 2670 }
2560 2671
2561 sc->led_txrate = ctl->tx_rate; 2672 sc->led_txrate = ctl->tx_rate->hw_value;
2562 2673
2563 spin_lock_irqsave(&sc->txbuflock, flags); 2674 spin_lock_irqsave(&sc->txbuflock, flags);
2564 if (list_empty(&sc->txbuf)) { 2675 if (list_empty(&sc->txbuf)) {
@@ -2597,11 +2708,6 @@ ath5k_reset(struct ieee80211_hw *hw)
2597 int ret; 2708 int ret;
2598 2709
2599 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n"); 2710 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
2600 /*
2601 * Convert to a hw channel description with the flags
2602 * constrained to reflect the current operating mode.
2603 */
2604 sc->curchan = hw->conf.chan;
2605 2711
2606 ath5k_hw_set_intr(ah, 0); 2712 ath5k_hw_set_intr(ah, 0);
2607 ath5k_txq_cleanup(sc); 2713 ath5k_txq_cleanup(sc);
@@ -2692,6 +2798,9 @@ end:
2692 mutex_unlock(&sc->lock); 2798 mutex_unlock(&sc->lock);
2693} 2799}
2694 2800
2801/*
2802 * TODO: Phy disable/diversity etc
2803 */
2695static int 2804static int
2696ath5k_config(struct ieee80211_hw *hw, 2805ath5k_config(struct ieee80211_hw *hw,
2697 struct ieee80211_conf *conf) 2806 struct ieee80211_conf *conf)
@@ -2699,9 +2808,9 @@ ath5k_config(struct ieee80211_hw *hw,
2699 struct ath5k_softc *sc = hw->priv; 2808 struct ath5k_softc *sc = hw->priv;
2700 2809
2701 sc->bintval = conf->beacon_int; 2810 sc->bintval = conf->beacon_int;
2702 ath5k_setcurmode(sc, conf->phymode); 2811 sc->power_level = conf->power_level;
2703 2812
2704 return ath5k_chan_set(sc, conf->chan); 2813 return ath5k_chan_set(sc, conf->channel);
2705} 2814}
2706 2815
2707static int 2816static int
@@ -2869,7 +2978,9 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2869 2978
2870 switch(key->alg) { 2979 switch(key->alg) {
2871 case ALG_WEP: 2980 case ALG_WEP:
2872 break; 2981 /* XXX: fix hardware encryption, its not working. For now
2982 * allow software encryption */
2983 /* break; */
2873 case ALG_TKIP: 2984 case ALG_TKIP:
2874 case ALG_CCMP: 2985 case ALG_CCMP:
2875 return -EOPNOTSUPP; 2986 return -EOPNOTSUPP;
@@ -2909,6 +3020,10 @@ ath5k_get_stats(struct ieee80211_hw *hw,
2909 struct ieee80211_low_level_stats *stats) 3020 struct ieee80211_low_level_stats *stats)
2910{ 3021{
2911 struct ath5k_softc *sc = hw->priv; 3022 struct ath5k_softc *sc = hw->priv;
3023 struct ath5k_hw *ah = sc->ah;
3024
3025 /* Force update */
3026 ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
2912 3027
2913 memcpy(stats, &sc->ll_stats, sizeof(sc->ll_stats)); 3028 memcpy(stats, &sc->ll_stats, sizeof(sc->ll_stats));
2914 3029
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
index 8287ae787f12..3a9755893018 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath5k/base.h
@@ -83,7 +83,7 @@ struct ath5k_txq {
83#if CHAN_DEBUG 83#if CHAN_DEBUG
84#define ATH_CHAN_MAX (26+26+26+200+200) 84#define ATH_CHAN_MAX (26+26+26+200+200)
85#else 85#else
86#define ATH_CHAN_MAX (14+14+14+252+20) /* XXX what's the max? */ 86#define ATH_CHAN_MAX (14+14+14+252+20)
87#endif 87#endif
88 88
89/* Software Carrier, keeps track of the driver state 89/* Software Carrier, keeps track of the driver state
@@ -95,15 +95,22 @@ struct ath5k_softc {
95 struct ieee80211_tx_queue_stats tx_stats; 95 struct ieee80211_tx_queue_stats tx_stats;
96 struct ieee80211_low_level_stats ll_stats; 96 struct ieee80211_low_level_stats ll_stats;
97 struct ieee80211_hw *hw; /* IEEE 802.11 common */ 97 struct ieee80211_hw *hw; /* IEEE 802.11 common */
98 struct ieee80211_hw_mode modes[NUM_DRIVER_MODES]; 98 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
99 struct ieee80211_channel channels[ATH_CHAN_MAX]; 99 struct ieee80211_channel channels[ATH_CHAN_MAX];
100 struct ieee80211_rate rates[AR5K_MAX_RATES * NUM_DRIVER_MODES]; 100 struct ieee80211_rate rates[AR5K_MAX_RATES * IEEE80211_NUM_BANDS];
101 enum ieee80211_if_types opmode; 101 enum ieee80211_if_types opmode;
102 struct ath5k_hw *ah; /* Atheros HW */ 102 struct ath5k_hw *ah; /* Atheros HW */
103 103
104#if ATH5K_DEBUG 104 struct ieee80211_supported_band *curband;
105
106 u8 a_rates;
107 u8 b_rates;
108 u8 g_rates;
109 u8 xr_rates;
110
111#ifdef CONFIG_ATH5K_DEBUG
105 struct ath5k_dbg_info debug; /* debug info */ 112 struct ath5k_dbg_info debug; /* debug info */
106#endif 113#endif /* CONFIG_ATH5K_DEBUG */
107 114
108 struct ath5k_buf *bufptr; /* allocated buffer ptr */ 115 struct ath5k_buf *bufptr; /* allocated buffer ptr */
109 struct ath5k_desc *desc; /* TX/RX descriptors */ 116 struct ath5k_desc *desc; /* TX/RX descriptors */
@@ -169,6 +176,7 @@ struct ath5k_softc {
169 unsigned int nexttbtt; /* next beacon time in TU */ 176 unsigned int nexttbtt; /* next beacon time in TU */
170 177
171 struct timer_list calib_tim; /* calibration timer */ 178 struct timer_list calib_tim; /* calibration timer */
179 int power_level; /* Requested tx power in dbm */
172}; 180};
173 181
174#define ath5k_hw_hasbssidmask(_ah) \ 182#define ath5k_hw_hasbssidmask(_ah) \
diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c
index bb581ef6d1ef..41d5fa34b544 100644
--- a/drivers/net/wireless/ath5k/debug.c
+++ b/drivers/net/wireless/ath5k/debug.c
@@ -65,7 +65,7 @@ static unsigned int ath5k_debug;
65module_param_named(debug, ath5k_debug, uint, 0); 65module_param_named(debug, ath5k_debug, uint, 0);
66 66
67 67
68#if ATH5K_DEBUG 68#ifdef CONFIG_ATH5K_DEBUG
69 69
70#include <linux/seq_file.h> 70#include <linux/seq_file.h>
71#include "reg.h" 71#include "reg.h"
@@ -200,7 +200,8 @@ static ssize_t read_file_tsf(struct file *file, char __user *user_buf,
200{ 200{
201 struct ath5k_softc *sc = file->private_data; 201 struct ath5k_softc *sc = file->private_data;
202 char buf[100]; 202 char buf[100];
203 snprintf(buf, sizeof(buf), "0x%016llx\n", ath5k_hw_get_tsf64(sc->ah)); 203 snprintf(buf, sizeof(buf), "0x%016llx\n",
204 (unsigned long long)ath5k_hw_get_tsf64(sc->ah));
204 return simple_read_from_buffer(user_buf, count, ppos, buf, 19); 205 return simple_read_from_buffer(user_buf, count, ppos, buf, 19);
205} 206}
206 207
@@ -271,7 +272,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
271 272
272 tsf = ath5k_hw_get_tsf64(sc->ah); 273 tsf = ath5k_hw_get_tsf64(sc->ah);
273 len += snprintf(buf+len, sizeof(buf)-len, 274 len += snprintf(buf+len, sizeof(buf)-len,
274 "TSF\t\t0x%016llx\tTU: %08x\n", tsf, TSF_TO_TU(tsf)); 275 "TSF\t\t0x%016llx\tTU: %08x\n",
276 (unsigned long long)tsf, TSF_TO_TU(tsf));
275 277
276 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 278 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
277} 279}
@@ -340,7 +342,7 @@ static struct {
340 { ATH5K_DEBUG_LED, "led", "LED mamagement" }, 342 { ATH5K_DEBUG_LED, "led", "LED mamagement" },
341 { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" }, 343 { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" },
342 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" }, 344 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
343 { ATH5K_DEBUG_DUMPMODES, "dumpmodes", "dump modes" }, 345 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
344 { ATH5K_DEBUG_TRACE, "trace", "trace function calls" }, 346 { ATH5K_DEBUG_TRACE, "trace", "trace function calls" },
345 { ATH5K_DEBUG_ANY, "all", "show all debug levels" }, 347 { ATH5K_DEBUG_ANY, "all", "show all debug levels" },
346}; 348};
@@ -452,43 +454,63 @@ ath5k_debug_finish_device(struct ath5k_softc *sc)
452/* functions used in other places */ 454/* functions used in other places */
453 455
454void 456void
455ath5k_debug_dump_modes(struct ath5k_softc *sc, struct ieee80211_hw_mode *modes) 457ath5k_debug_dump_bands(struct ath5k_softc *sc)
456{ 458{
457 unsigned int m, i; 459 unsigned int b, i;
458 460
459 if (likely(!(sc->debug.level & ATH5K_DEBUG_DUMPMODES))) 461 if (likely(!(sc->debug.level & ATH5K_DEBUG_DUMPBANDS)))
460 return; 462 return;
461 463
462 for (m = 0; m < NUM_DRIVER_MODES; m++) { 464 BUG_ON(!sc->sbands);
463 printk(KERN_DEBUG "Mode %u: channels %d, rates %d\n", m, 465
464 modes[m].num_channels, modes[m].num_rates); 466 for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
467 struct ieee80211_supported_band *band = &sc->sbands[b];
468 char bname[5];
469 switch (band->band) {
470 case IEEE80211_BAND_2GHZ:
471 strcpy(bname, "2 GHz");
472 break;
473 case IEEE80211_BAND_5GHZ:
474 strcpy(bname, "5 GHz");
475 break;
476 default:
477 printk(KERN_DEBUG "Band not supported: %d\n",
478 band->band);
479 return;
480 }
481 printk(KERN_DEBUG "Band %s: channels %d, rates %d\n", bname,
482 band->n_channels, band->n_bitrates);
465 printk(KERN_DEBUG " channels:\n"); 483 printk(KERN_DEBUG " channels:\n");
466 for (i = 0; i < modes[m].num_channels; i++) 484 for (i = 0; i < band->n_channels; i++)
467 printk(KERN_DEBUG " %3d %d %.4x %.4x\n", 485 printk(KERN_DEBUG " %3d %d %.4x %.4x\n",
468 modes[m].channels[i].chan, 486 ieee80211_frequency_to_channel(
469 modes[m].channels[i].freq, 487 band->channels[i].center_freq),
470 modes[m].channels[i].val, 488 band->channels[i].center_freq,
471 modes[m].channels[i].flag); 489 band->channels[i].hw_value,
490 band->channels[i].flags);
472 printk(KERN_DEBUG " rates:\n"); 491 printk(KERN_DEBUG " rates:\n");
473 for (i = 0; i < modes[m].num_rates; i++) 492 for (i = 0; i < band->n_bitrates; i++)
474 printk(KERN_DEBUG " %4d %.4x %.4x %.4x\n", 493 printk(KERN_DEBUG " %4d %.4x %.4x %.4x\n",
475 modes[m].rates[i].rate, 494 band->bitrates[i].bitrate,
476 modes[m].rates[i].val, 495 band->bitrates[i].hw_value,
477 modes[m].rates[i].flags, 496 band->bitrates[i].flags,
478 modes[m].rates[i].val2); 497 band->bitrates[i].hw_value_short);
479 } 498 }
480} 499}
481 500
482static inline void 501static inline void
483ath5k_debug_printrxbuf(struct ath5k_buf *bf, int done) 502ath5k_debug_printrxbuf(struct ath5k_buf *bf, int done,
503 struct ath5k_rx_status *rs)
484{ 504{
485 struct ath5k_desc *ds = bf->desc; 505 struct ath5k_desc *ds = bf->desc;
506 struct ath5k_hw_all_rx_desc *rd = &ds->ud.ds_rx;
486 507
487 printk(KERN_DEBUG "R (%p %llx) %08x %08x %08x %08x %08x %08x %c\n", 508 printk(KERN_DEBUG "R (%p %llx) %08x %08x %08x %08x %08x %08x %c\n",
488 ds, (unsigned long long)bf->daddr, 509 ds, (unsigned long long)bf->daddr,
489 ds->ds_link, ds->ds_data, ds->ds_ctl0, ds->ds_ctl1, 510 ds->ds_link, ds->ds_data,
490 ds->ds_hw[0], ds->ds_hw[1], 511 rd->rx_ctl.rx_control_0, rd->rx_ctl.rx_control_1,
491 !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!'); 512 rd->u.rx_stat.rx_status_0, rd->u.rx_stat.rx_status_0,
513 !done ? ' ' : (rs->rs_status == 0) ? '*' : '!');
492} 514}
493 515
494void 516void
@@ -496,6 +518,7 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
496{ 518{
497 struct ath5k_desc *ds; 519 struct ath5k_desc *ds;
498 struct ath5k_buf *bf; 520 struct ath5k_buf *bf;
521 struct ath5k_rx_status rs = {};
499 int status; 522 int status;
500 523
501 if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET))) 524 if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET)))
@@ -507,9 +530,9 @@ ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah)
507 spin_lock_bh(&sc->rxbuflock); 530 spin_lock_bh(&sc->rxbuflock);
508 list_for_each_entry(bf, &sc->rxbuf, list) { 531 list_for_each_entry(bf, &sc->rxbuf, list) {
509 ds = bf->desc; 532 ds = bf->desc;
510 status = ah->ah_proc_rx_desc(ah, ds); 533 status = ah->ah_proc_rx_desc(ah, ds, &rs);
511 if (!status) 534 if (!status)
512 ath5k_debug_printrxbuf(bf, status == 0); 535 ath5k_debug_printrxbuf(bf, status == 0, &rs);
513 } 536 }
514 spin_unlock_bh(&sc->rxbuflock); 537 spin_unlock_bh(&sc->rxbuflock);
515} 538}
@@ -533,19 +556,24 @@ ath5k_debug_dump_skb(struct ath5k_softc *sc,
533} 556}
534 557
535void 558void
536ath5k_debug_printtxbuf(struct ath5k_softc *sc, 559ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf)
537 struct ath5k_buf *bf, int done)
538{ 560{
539 struct ath5k_desc *ds = bf->desc; 561 struct ath5k_desc *ds = bf->desc;
562 struct ath5k_hw_5212_tx_desc *td = &ds->ud.ds_tx5212;
563 struct ath5k_tx_status ts = {};
564 int done;
540 565
541 if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET))) 566 if (likely(!(sc->debug.level & ATH5K_DEBUG_RESET)))
542 return; 567 return;
543 568
569 done = sc->ah->ah_proc_tx_desc(sc->ah, bf->desc, &ts);
570
544 printk(KERN_DEBUG "T (%p %llx) %08x %08x %08x %08x %08x %08x %08x " 571 printk(KERN_DEBUG "T (%p %llx) %08x %08x %08x %08x %08x %08x %08x "
545 "%08x %c\n", ds, (unsigned long long)bf->daddr, ds->ds_link, 572 "%08x %c\n", ds, (unsigned long long)bf->daddr, ds->ds_link,
546 ds->ds_data, ds->ds_ctl0, ds->ds_ctl1, 573 ds->ds_data, td->tx_ctl.tx_control_0, td->tx_ctl.tx_control_1,
547 ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3], 574 td->tx_ctl.tx_control_2, td->tx_ctl.tx_control_3,
548 !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!'); 575 td->tx_stat.tx_status_0, td->tx_stat.tx_status_1,
576 done ? ' ' : (ts.ts_status == 0) ? '*' : '!');
549} 577}
550 578
551#endif /* if ATH5K_DEBUG */ 579#endif /* ifdef CONFIG_ATH5K_DEBUG */
diff --git a/drivers/net/wireless/ath5k/debug.h b/drivers/net/wireless/ath5k/debug.h
index c4fd8c43df0c..2cf8d18b10e3 100644
--- a/drivers/net/wireless/ath5k/debug.h
+++ b/drivers/net/wireless/ath5k/debug.h
@@ -61,11 +61,6 @@
61#ifndef _ATH5K_DEBUG_H 61#ifndef _ATH5K_DEBUG_H
62#define _ATH5K_DEBUG_H 62#define _ATH5K_DEBUG_H
63 63
64/* set this to 1 for debugging output */
65#ifndef ATH5K_DEBUG
66#define ATH5K_DEBUG 0
67#endif
68
69struct ath5k_softc; 64struct ath5k_softc;
70struct ath5k_hw; 65struct ath5k_hw;
71struct ieee80211_hw_mode; 66struct ieee80211_hw_mode;
@@ -96,7 +91,7 @@ struct ath5k_dbg_info {
96 * @ATH5K_DEBUG_LED: led management 91 * @ATH5K_DEBUG_LED: led management
97 * @ATH5K_DEBUG_DUMP_RX: print received skb content 92 * @ATH5K_DEBUG_DUMP_RX: print received skb content
98 * @ATH5K_DEBUG_DUMP_TX: print transmit skb content 93 * @ATH5K_DEBUG_DUMP_TX: print transmit skb content
99 * @ATH5K_DEBUG_DUMPMODES: dump modes 94 * @ATH5K_DEBUG_DUMPBANDS: dump bands
100 * @ATH5K_DEBUG_TRACE: trace function calls 95 * @ATH5K_DEBUG_TRACE: trace function calls
101 * @ATH5K_DEBUG_ANY: show at any debug level 96 * @ATH5K_DEBUG_ANY: show at any debug level
102 * 97 *
@@ -118,12 +113,12 @@ enum ath5k_debug_level {
118 ATH5K_DEBUG_LED = 0x00000080, 113 ATH5K_DEBUG_LED = 0x00000080,
119 ATH5K_DEBUG_DUMP_RX = 0x00000100, 114 ATH5K_DEBUG_DUMP_RX = 0x00000100,
120 ATH5K_DEBUG_DUMP_TX = 0x00000200, 115 ATH5K_DEBUG_DUMP_TX = 0x00000200,
121 ATH5K_DEBUG_DUMPMODES = 0x00000400, 116 ATH5K_DEBUG_DUMPBANDS = 0x00000400,
122 ATH5K_DEBUG_TRACE = 0x00001000, 117 ATH5K_DEBUG_TRACE = 0x00001000,
123 ATH5K_DEBUG_ANY = 0xffffffff 118 ATH5K_DEBUG_ANY = 0xffffffff
124}; 119};
125 120
126#if ATH5K_DEBUG 121#ifdef CONFIG_ATH5K_DEBUG
127 122
128#define ATH5K_TRACE(_sc) do { \ 123#define ATH5K_TRACE(_sc) do { \
129 if (unlikely((_sc)->debug.level & ATH5K_DEBUG_TRACE)) \ 124 if (unlikely((_sc)->debug.level & ATH5K_DEBUG_TRACE)) \
@@ -158,20 +153,20 @@ void
158ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah); 153ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah);
159 154
160void 155void
161ath5k_debug_dump_modes(struct ath5k_softc *sc, 156ath5k_debug_dump_bands(struct ath5k_softc *sc);
162 struct ieee80211_hw_mode *modes);
163 157
164void 158void
165ath5k_debug_dump_skb(struct ath5k_softc *sc, 159ath5k_debug_dump_skb(struct ath5k_softc *sc,
166 struct sk_buff *skb, const char *prefix, int tx); 160 struct sk_buff *skb, const char *prefix, int tx);
167 161
168void 162void
169ath5k_debug_printtxbuf(struct ath5k_softc *sc, 163ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf);
170 struct ath5k_buf *bf, int done);
171 164
172#else /* no debugging */ 165#else /* no debugging */
173 166
174#define ATH5K_TRACE(_sc) /* empty */ 167#include <linux/compiler.h>
168
169#define ATH5K_TRACE(_sc) typecheck(struct ath5k_softc *, (_sc))
175 170
176static inline void __attribute__ ((format (printf, 3, 4))) 171static inline void __attribute__ ((format (printf, 3, 4)))
177ATH5K_DBG(struct ath5k_softc *sc, unsigned int m, const char *fmt, ...) {} 172ATH5K_DBG(struct ath5k_softc *sc, unsigned int m, const char *fmt, ...) {}
@@ -196,17 +191,15 @@ static inline void
196ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah) {} 191ath5k_debug_printrxbuffs(struct ath5k_softc *sc, struct ath5k_hw *ah) {}
197 192
198static inline void 193static inline void
199ath5k_debug_dump_modes(struct ath5k_softc *sc, 194ath5k_debug_dump_bands(struct ath5k_softc *sc) {}
200 struct ieee80211_hw_mode *modes) {}
201 195
202static inline void 196static inline void
203ath5k_debug_dump_skb(struct ath5k_softc *sc, 197ath5k_debug_dump_skb(struct ath5k_softc *sc,
204 struct sk_buff *skb, const char *prefix, int tx) {} 198 struct sk_buff *skb, const char *prefix, int tx) {}
205 199
206static inline void 200static inline void
207ath5k_debug_printtxbuf(struct ath5k_softc *sc, 201ath5k_debug_printtxbuf(struct ath5k_softc *sc, struct ath5k_buf *bf) {}
208 struct ath5k_buf *bf, int done) {}
209 202
210#endif /* if ATH5K_DEBUG */ 203#endif /* ifdef CONFIG_ATH5K_DEBUG */
211 204
212#endif /* ifndef _ATH5K_DEBUG_H */ 205#endif /* ifndef _ATH5K_DEBUG_H */
diff --git a/drivers/net/wireless/ath5k/hw.c b/drivers/net/wireless/ath5k/hw.c
index 01757436353d..87e782291a01 100644
--- a/drivers/net/wireless/ath5k/hw.c
+++ b/drivers/net/wireless/ath5k/hw.c
@@ -1,4 +1,4 @@
1 /* 1/*
2 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org> 2 * Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
3 * Copyright (c) 2006-2007 Nick Kossifidis <mickflemm@gmail.com> 3 * Copyright (c) 2006-2007 Nick Kossifidis <mickflemm@gmail.com>
4 * Copyright (c) 2007 Matthew W. S. Bell <mentor@madwifi.org> 4 * Copyright (c) 2007 Matthew W. S. Bell <mentor@madwifi.org>
@@ -48,14 +48,18 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *, struct ath5k_desc *,
48static int ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *, struct ath5k_desc *, 48static int ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *, struct ath5k_desc *,
49 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, 49 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int,
50 unsigned int); 50 unsigned int);
51static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *, struct ath5k_desc *); 51static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *, struct ath5k_desc *,
52 struct ath5k_tx_status *);
52static int ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *, struct ath5k_desc *, 53static int ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *, struct ath5k_desc *,
53 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int, 54 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int,
54 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, 55 unsigned int, unsigned int, unsigned int, unsigned int, unsigned int,
55 unsigned int, unsigned int); 56 unsigned int, unsigned int);
56static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *, struct ath5k_desc *); 57static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *, struct ath5k_desc *,
57static int ath5k_hw_proc_new_rx_status(struct ath5k_hw *, struct ath5k_desc *); 58 struct ath5k_tx_status *);
58static int ath5k_hw_proc_old_rx_status(struct ath5k_hw *, struct ath5k_desc *); 59static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *, struct ath5k_desc *,
60 struct ath5k_rx_status *);
61static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *, struct ath5k_desc *,
62 struct ath5k_rx_status *);
59static int ath5k_hw_get_capabilities(struct ath5k_hw *); 63static int ath5k_hw_get_capabilities(struct ath5k_hw *);
60 64
61static int ath5k_eeprom_init(struct ath5k_hw *); 65static int ath5k_eeprom_init(struct ath5k_hw *);
@@ -81,12 +85,12 @@ static int ath5k_hw_disable_pspoll(struct ath5k_hw *);
81 85
82static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo) 86static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo)
83{ 87{
84 return turbo == true ? (usec * 80) : (usec * 40); 88 return turbo ? (usec * 80) : (usec * 40);
85} 89}
86 90
87static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo) 91static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
88{ 92{
89 return turbo == true ? (clock / 80) : (clock / 40); 93 return turbo ? (clock / 80) : (clock / 40);
90} 94}
91 95
92/* 96/*
@@ -100,7 +104,7 @@ int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
100 104
101 for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) { 105 for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
102 data = ath5k_hw_reg_read(ah, reg); 106 data = ath5k_hw_reg_read(ah, reg);
103 if ((is_set == true) && (data & flag)) 107 if (is_set && (data & flag))
104 break; 108 break;
105 else if ((data & flag) == val) 109 else if ((data & flag) == val)
106 break; 110 break;
@@ -116,11 +120,69 @@ int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
116\***************************************/ 120\***************************************/
117 121
118/* 122/*
123 * Power On Self Test helper function
124 */
125static int ath5k_hw_post(struct ath5k_hw *ah)
126{
127
128 int i, c;
129 u16 cur_reg;
130 u16 regs[2] = {AR5K_STA_ID0, AR5K_PHY(8)};
131 u32 var_pattern;
132 u32 static_pattern[4] = {
133 0x55555555, 0xaaaaaaaa,
134 0x66666666, 0x99999999
135 };
136 u32 init_val;
137 u32 cur_val;
138
139 for (c = 0; c < 2; c++) {
140
141 cur_reg = regs[c];
142 init_val = ath5k_hw_reg_read(ah, cur_reg);
143
144 for (i = 0; i < 256; i++) {
145 var_pattern = i << 16 | i;
146 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
147 cur_val = ath5k_hw_reg_read(ah, cur_reg);
148
149 if (cur_val != var_pattern) {
150 ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
151 return -EAGAIN;
152 }
153
154 /* Found on ndiswrapper dumps */
155 var_pattern = 0x0039080f;
156 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
157 }
158
159 for (i = 0; i < 4; i++) {
160 var_pattern = static_pattern[i];
161 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
162 cur_val = ath5k_hw_reg_read(ah, cur_reg);
163
164 if (cur_val != var_pattern) {
165 ATH5K_ERR(ah->ah_sc, "POST Failed !!!\n");
166 return -EAGAIN;
167 }
168
169 /* Found on ndiswrapper dumps */
170 var_pattern = 0x003b080f;
171 ath5k_hw_reg_write(ah, var_pattern, cur_reg);
172 }
173 }
174
175 return 0;
176
177}
178
179/*
119 * Check if the device is supported and initialize the needed structs 180 * Check if the device is supported and initialize the needed structs
120 */ 181 */
121struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version) 182struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
122{ 183{
123 struct ath5k_hw *ah; 184 struct ath5k_hw *ah;
185 struct pci_dev *pdev = sc->pdev;
124 u8 mac[ETH_ALEN]; 186 u8 mac[ETH_ALEN];
125 int ret; 187 int ret;
126 u32 srev; 188 u32 srev;
@@ -140,9 +202,6 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
140 * HW information 202 * HW information
141 */ 203 */
142 204
143 /* Get reg domain from eeprom */
144 ath5k_get_regdomain(ah);
145
146 ah->ah_op_mode = IEEE80211_IF_TYPE_STA; 205 ah->ah_op_mode = IEEE80211_IF_TYPE_STA;
147 ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT; 206 ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT;
148 ah->ah_turbo = false; 207 ah->ah_turbo = false;
@@ -177,9 +236,9 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
177 } 236 }
178 237
179 if (ah->ah_version == AR5K_AR5212) 238 if (ah->ah_version == AR5K_AR5212)
180 ah->ah_proc_rx_desc = ath5k_hw_proc_new_rx_status; 239 ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
181 else if (ah->ah_version <= AR5K_AR5211) 240 else if (ah->ah_version <= AR5K_AR5211)
182 ah->ah_proc_rx_desc = ath5k_hw_proc_old_rx_status; 241 ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status;
183 242
184 /* Bring device out of sleep and reset it's units */ 243 /* Bring device out of sleep and reset it's units */
185 ret = ath5k_hw_nic_wakeup(ah, AR5K_INIT_MODE, true); 244 ret = ath5k_hw_nic_wakeup(ah, AR5K_INIT_MODE, true);
@@ -203,15 +262,19 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
203 CHANNEL_2GHZ); 262 CHANNEL_2GHZ);
204 263
205 /* Return on unsuported chips (unsupported eeprom etc) */ 264 /* Return on unsuported chips (unsupported eeprom etc) */
206 if(srev >= AR5K_SREV_VER_AR5416){ 265 if ((srev >= AR5K_SREV_VER_AR5416) &&
266 (srev < AR5K_SREV_VER_AR2425)) {
207 ATH5K_ERR(sc, "Device not yet supported.\n"); 267 ATH5K_ERR(sc, "Device not yet supported.\n");
208 ret = -ENODEV; 268 ret = -ENODEV;
209 goto err_free; 269 goto err_free;
270 } else if (srev == AR5K_SREV_VER_AR2425) {
271 ATH5K_WARN(sc, "Support for RF2425 is under development.\n");
210 } 272 }
211 273
212 /* Identify single chip solutions */ 274 /* Identify single chip solutions */
213 if((srev <= AR5K_SREV_VER_AR5414) && 275 if (((srev <= AR5K_SREV_VER_AR5414) &&
214 (srev >= AR5K_SREV_VER_AR2424)) { 276 (srev >= AR5K_SREV_VER_AR2413)) ||
277 (srev == AR5K_SREV_VER_AR2425)) {
215 ah->ah_single_chip = true; 278 ah->ah_single_chip = true;
216 } else { 279 } else {
217 ah->ah_single_chip = false; 280 ah->ah_single_chip = false;
@@ -226,15 +289,81 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc, u8 mac_version)
226 ah->ah_radio = AR5K_RF5110; 289 ah->ah_radio = AR5K_RF5110;
227 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112) { 290 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112) {
228 ah->ah_radio = AR5K_RF5111; 291 ah->ah_radio = AR5K_RF5111;
229 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC1) { 292 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5111;
293 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC0) {
294
230 ah->ah_radio = AR5K_RF5112; 295 ah->ah_radio = AR5K_RF5112;
231 } else { 296
297 if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112A) {
298 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
299 } else {
300 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112A;
301 }
302
303 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC1) {
304 ah->ah_radio = AR5K_RF2413;
305 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112A;
306 } else if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_SC2) {
307
232 ah->ah_radio = AR5K_RF5413; 308 ah->ah_radio = AR5K_RF5413;
309
310 if (ah->ah_mac_srev <= AR5K_SREV_VER_AR5424 &&
311 ah->ah_mac_srev >= AR5K_SREV_VER_AR2424)
312 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5424;
313 else
314 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112A;
315 /*
316 * Register returns 0x4 for radio revision
317 * so ath5k_hw_radio_revision doesn't parse the value
318 * correctly. For now we are based on mac's srev to
319 * identify RF2425 radio.
320 */
321 } else if (srev == AR5K_SREV_VER_AR2425) {
322 ah->ah_radio = AR5K_RF2425;
323 ah->ah_phy_spending = AR5K_PHY_SPENDING_RF5112;
233 } 324 }
234 325
235 ah->ah_phy = AR5K_PHY(0); 326 ah->ah_phy = AR5K_PHY(0);
236 327
237 /* 328 /*
329 * Identify AR5212-based PCI-E cards
330 * And write some initial settings.
331 *
332 * (doing a "strings" on ndis driver
333 * -ar5211.sys- reveals the following
334 * pci-e related functions:
335 *
336 * pcieClockReq
337 * pcieRxErrNotify
338 * pcieL1SKPEnable
339 * pcieAspm
340 * pcieDisableAspmOnRfWake
341 * pciePowerSaveEnable
342 *
343 * I guess these point to ClockReq but
344 * i'm not sure.)
345 */
346 if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) {
347 ath5k_hw_reg_write(ah, 0x9248fc00, 0x4080);
348 ath5k_hw_reg_write(ah, 0x24924924, 0x4080);
349 ath5k_hw_reg_write(ah, 0x28000039, 0x4080);
350 ath5k_hw_reg_write(ah, 0x53160824, 0x4080);
351 ath5k_hw_reg_write(ah, 0xe5980579, 0x4080);
352 ath5k_hw_reg_write(ah, 0x001defff, 0x4080);
353 ath5k_hw_reg_write(ah, 0x1aaabe40, 0x4080);
354 ath5k_hw_reg_write(ah, 0xbe105554, 0x4080);
355 ath5k_hw_reg_write(ah, 0x000e3007, 0x4080);
356 ath5k_hw_reg_write(ah, 0x00000000, 0x4084);
357 }
358
359 /*
360 * POST
361 */
362 ret = ath5k_hw_post(ah);
363 if (ret)
364 goto err_free;
365
366 /*
238 * Get card capabilities, values, ... 367 * Get card capabilities, values, ...
239 */ 368 */
240 369
@@ -280,7 +409,8 @@ err:
280 */ 409 */
281static int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial) 410static int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
282{ 411{
283 u32 turbo, mode, clock; 412 struct pci_dev *pdev = ah->ah_sc->pdev;
413 u32 turbo, mode, clock, bus_flags;
284 int ret; 414 int ret;
285 415
286 turbo = 0; 416 turbo = 0;
@@ -357,10 +487,16 @@ static int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
357 AR5K_PHY_TURBO); 487 AR5K_PHY_TURBO);
358 } 488 }
359 489
360 /* ...reset chipset and PCI device */ 490 /* reseting PCI on PCI-E cards results card to hang
361 if (ah->ah_single_chip == false && ath5k_hw_nic_reset(ah, 491 * and always return 0xffff... so we ingore that flag
362 AR5K_RESET_CTL_CHIP | AR5K_RESET_CTL_PCI)) { 492 * for PCI-E cards */
363 ATH5K_ERR(ah->ah_sc, "failed to reset the MAC Chip + PCI\n"); 493 bus_flags = (pdev->is_pcie) ? 0 : AR5K_RESET_CTL_PCI;
494
495 /* Reset chipset */
496 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
497 AR5K_RESET_CTL_BASEBAND | bus_flags);
498 if (ret) {
499 ATH5K_ERR(ah->ah_sc, "failed to reset the MAC Chip\n");
364 return -EIO; 500 return -EIO;
365 } 501 }
366 502
@@ -405,15 +541,15 @@ const struct ath5k_rate_table *ath5k_hw_get_rate_table(struct ath5k_hw *ah,
405 541
406 /* Get rate tables */ 542 /* Get rate tables */
407 switch (mode) { 543 switch (mode) {
408 case MODE_IEEE80211A: 544 case AR5K_MODE_11A:
409 return &ath5k_rt_11a; 545 return &ath5k_rt_11a;
410 case MODE_ATHEROS_TURBO: 546 case AR5K_MODE_11A_TURBO:
411 return &ath5k_rt_turbo; 547 return &ath5k_rt_turbo;
412 case MODE_IEEE80211B: 548 case AR5K_MODE_11B:
413 return &ath5k_rt_11b; 549 return &ath5k_rt_11b;
414 case MODE_IEEE80211G: 550 case AR5K_MODE_11G:
415 return &ath5k_rt_11g; 551 return &ath5k_rt_11g;
416 case MODE_ATHEROS_TURBOG: 552 case AR5K_MODE_11G_TURBO:
417 return &ath5k_rt_xr; 553 return &ath5k_rt_xr;
418 } 554 }
419 555
@@ -459,15 +595,15 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
459 ds_coef_exp, ds_coef_man, clock; 595 ds_coef_exp, ds_coef_man, clock;
460 596
461 if (!(ah->ah_version == AR5K_AR5212) || 597 if (!(ah->ah_version == AR5K_AR5212) ||
462 !(channel->val & CHANNEL_OFDM)) 598 !(channel->hw_value & CHANNEL_OFDM))
463 BUG(); 599 BUG();
464 600
465 /* Seems there are two PLLs, one for baseband sampling and one 601 /* Seems there are two PLLs, one for baseband sampling and one
466 * for tuning. Tuning basebands are 40 MHz or 80MHz when in 602 * for tuning. Tuning basebands are 40 MHz or 80MHz when in
467 * turbo. */ 603 * turbo. */
468 clock = channel->val & CHANNEL_TURBO ? 80 : 40; 604 clock = channel->hw_value & CHANNEL_TURBO ? 80 : 40;
469 coef_scaled = ((5 * (clock << 24)) / 2) / 605 coef_scaled = ((5 * (clock << 24)) / 2) /
470 channel->freq; 606 channel->center_freq;
471 607
472 for (coef_exp = 31; coef_exp > 0; coef_exp--) 608 for (coef_exp = 31; coef_exp > 0; coef_exp--)
473 if ((coef_scaled >> coef_exp) & 0x1) 609 if ((coef_scaled >> coef_exp) & 0x1)
@@ -494,8 +630,7 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
494 * ath5k_hw_write_rate_duration - set rate duration during hw resets 630 * ath5k_hw_write_rate_duration - set rate duration during hw resets
495 * 631 *
496 * @ah: the &struct ath5k_hw 632 * @ah: the &struct ath5k_hw
497 * @driver_mode: one of enum ieee80211_phymode or our one of our own 633 * @mode: one of enum ath5k_driver_mode
498 * vendor modes
499 * 634 *
500 * Write the rate duration table for the current mode upon hw reset. This 635 * Write the rate duration table for the current mode upon hw reset. This
501 * is a helper for ath5k_hw_reset(). It seems all this is doing is setting 636 * is a helper for ath5k_hw_reset(). It seems all this is doing is setting
@@ -506,19 +641,20 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
506 * 641 *
507 */ 642 */
508static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah, 643static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah,
509 unsigned int driver_mode) 644 unsigned int mode)
510{ 645{
511 struct ath5k_softc *sc = ah->ah_sc; 646 struct ath5k_softc *sc = ah->ah_sc;
512 const struct ath5k_rate_table *rt; 647 const struct ath5k_rate_table *rt;
648 struct ieee80211_rate srate = {};
513 unsigned int i; 649 unsigned int i;
514 650
515 /* Get rate table for the current operating mode */ 651 /* Get rate table for the current operating mode */
516 rt = ath5k_hw_get_rate_table(ah, 652 rt = ath5k_hw_get_rate_table(ah, mode);
517 driver_mode);
518 653
519 /* Write rate duration table */ 654 /* Write rate duration table */
520 for (i = 0; i < rt->rate_count; i++) { 655 for (i = 0; i < rt->rate_count; i++) {
521 const struct ath5k_rate *rate, *control_rate; 656 const struct ath5k_rate *rate, *control_rate;
657
522 u32 reg; 658 u32 reg;
523 u16 tx_time; 659 u16 tx_time;
524 660
@@ -528,14 +664,16 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah,
528 /* Set ACK timeout */ 664 /* Set ACK timeout */
529 reg = AR5K_RATE_DUR(rate->rate_code); 665 reg = AR5K_RATE_DUR(rate->rate_code);
530 666
667 srate.bitrate = control_rate->rate_kbps/100;
668
531 /* An ACK frame consists of 10 bytes. If you add the FCS, 669 /* An ACK frame consists of 10 bytes. If you add the FCS,
532 * which ieee80211_generic_frame_duration() adds, 670 * which ieee80211_generic_frame_duration() adds,
533 * its 14 bytes. Note we use the control rate and not the 671 * its 14 bytes. Note we use the control rate and not the
534 * actual rate for this rate. See mac80211 tx.c 672 * actual rate for this rate. See mac80211 tx.c
535 * ieee80211_duration() for a brief description of 673 * ieee80211_duration() for a brief description of
536 * what rate we should choose to TX ACKs. */ 674 * what rate we should choose to TX ACKs. */
537 tx_time = ieee80211_generic_frame_duration(sc->hw, 675 tx_time = le16_to_cpu(ieee80211_generic_frame_duration(sc->hw,
538 sc->vif, 10, control_rate->rate_kbps/100); 676 sc->vif, 10, &srate));
539 677
540 ath5k_hw_reg_write(ah, tx_time, reg); 678 ath5k_hw_reg_write(ah, tx_time, reg);
541 679
@@ -568,8 +706,9 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
568 struct ieee80211_channel *channel, bool change_channel) 706 struct ieee80211_channel *channel, bool change_channel)
569{ 707{
570 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 708 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
571 u32 data, s_seq, s_ant, s_led[3]; 709 struct pci_dev *pdev = ah->ah_sc->pdev;
572 unsigned int i, mode, freq, ee_mode, ant[2], driver_mode = -1; 710 u32 data, s_seq, s_ant, s_led[3], dma_size;
711 unsigned int i, mode, freq, ee_mode, ant[2];
573 int ret; 712 int ret;
574 713
575 ATH5K_TRACE(ah->ah_sc); 714 ATH5K_TRACE(ah->ah_sc);
@@ -585,7 +724,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
585 */ 724 */
586 /*DCU/Antenna selection not available on 5210*/ 725 /*DCU/Antenna selection not available on 5210*/
587 if (ah->ah_version != AR5K_AR5210) { 726 if (ah->ah_version != AR5K_AR5210) {
588 if (change_channel == true) { 727 if (change_channel) {
589 /* Seq number for queue 0 -do this for all queues ? */ 728 /* Seq number for queue 0 -do this for all queues ? */
590 s_seq = ath5k_hw_reg_read(ah, 729 s_seq = ath5k_hw_reg_read(ah,
591 AR5K_QUEUE_DFS_SEQNUM(0)); 730 AR5K_QUEUE_DFS_SEQNUM(0));
@@ -599,12 +738,12 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
599 s_led[1] = ath5k_hw_reg_read(ah, AR5K_GPIOCR); 738 s_led[1] = ath5k_hw_reg_read(ah, AR5K_GPIOCR);
600 s_led[2] = ath5k_hw_reg_read(ah, AR5K_GPIODO); 739 s_led[2] = ath5k_hw_reg_read(ah, AR5K_GPIODO);
601 740
602 if (change_channel == true && ah->ah_rf_banks != NULL) 741 if (change_channel && ah->ah_rf_banks != NULL)
603 ath5k_hw_get_rf_gain(ah); 742 ath5k_hw_get_rf_gain(ah);
604 743
605 744
606 /*Wakeup the device*/ 745 /*Wakeup the device*/
607 ret = ath5k_hw_nic_wakeup(ah, channel->val, false); 746 ret = ath5k_hw_nic_wakeup(ah, channel->hw_value, false);
608 if (ret) 747 if (ret)
609 return ret; 748 return ret;
610 749
@@ -620,43 +759,40 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
620 if (ah->ah_version != AR5K_AR5210) { 759 if (ah->ah_version != AR5K_AR5210) {
621 if (ah->ah_radio != AR5K_RF5111 && 760 if (ah->ah_radio != AR5K_RF5111 &&
622 ah->ah_radio != AR5K_RF5112 && 761 ah->ah_radio != AR5K_RF5112 &&
623 ah->ah_radio != AR5K_RF5413) { 762 ah->ah_radio != AR5K_RF5413 &&
763 ah->ah_radio != AR5K_RF2413 &&
764 ah->ah_radio != AR5K_RF2425) {
624 ATH5K_ERR(ah->ah_sc, 765 ATH5K_ERR(ah->ah_sc,
625 "invalid phy radio: %u\n", ah->ah_radio); 766 "invalid phy radio: %u\n", ah->ah_radio);
626 return -EINVAL; 767 return -EINVAL;
627 } 768 }
628 769
629 switch (channel->val & CHANNEL_MODES) { 770 switch (channel->hw_value & CHANNEL_MODES) {
630 case CHANNEL_A: 771 case CHANNEL_A:
631 mode = AR5K_INI_VAL_11A; 772 mode = AR5K_MODE_11A;
632 freq = AR5K_INI_RFGAIN_5GHZ; 773 freq = AR5K_INI_RFGAIN_5GHZ;
633 ee_mode = AR5K_EEPROM_MODE_11A; 774 ee_mode = AR5K_EEPROM_MODE_11A;
634 driver_mode = MODE_IEEE80211A;
635 break; 775 break;
636 case CHANNEL_G: 776 case CHANNEL_G:
637 mode = AR5K_INI_VAL_11G; 777 mode = AR5K_MODE_11G;
638 freq = AR5K_INI_RFGAIN_2GHZ; 778 freq = AR5K_INI_RFGAIN_2GHZ;
639 ee_mode = AR5K_EEPROM_MODE_11G; 779 ee_mode = AR5K_EEPROM_MODE_11G;
640 driver_mode = MODE_IEEE80211G;
641 break; 780 break;
642 case CHANNEL_B: 781 case CHANNEL_B:
643 mode = AR5K_INI_VAL_11B; 782 mode = AR5K_MODE_11B;
644 freq = AR5K_INI_RFGAIN_2GHZ; 783 freq = AR5K_INI_RFGAIN_2GHZ;
645 ee_mode = AR5K_EEPROM_MODE_11B; 784 ee_mode = AR5K_EEPROM_MODE_11B;
646 driver_mode = MODE_IEEE80211B;
647 break; 785 break;
648 case CHANNEL_T: 786 case CHANNEL_T:
649 mode = AR5K_INI_VAL_11A_TURBO; 787 mode = AR5K_MODE_11A_TURBO;
650 freq = AR5K_INI_RFGAIN_5GHZ; 788 freq = AR5K_INI_RFGAIN_5GHZ;
651 ee_mode = AR5K_EEPROM_MODE_11A; 789 ee_mode = AR5K_EEPROM_MODE_11A;
652 driver_mode = MODE_ATHEROS_TURBO;
653 break; 790 break;
654 /*Is this ok on 5211 too ?*/ 791 /*Is this ok on 5211 too ?*/
655 case CHANNEL_TG: 792 case CHANNEL_TG:
656 mode = AR5K_INI_VAL_11G_TURBO; 793 mode = AR5K_MODE_11G_TURBO;
657 freq = AR5K_INI_RFGAIN_2GHZ; 794 freq = AR5K_INI_RFGAIN_2GHZ;
658 ee_mode = AR5K_EEPROM_MODE_11G; 795 ee_mode = AR5K_EEPROM_MODE_11G;
659 driver_mode = MODE_ATHEROS_TURBOG;
660 break; 796 break;
661 case CHANNEL_XR: 797 case CHANNEL_XR:
662 if (ah->ah_version == AR5K_AR5211) { 798 if (ah->ah_version == AR5K_AR5211) {
@@ -664,14 +800,13 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
664 "XR mode not available on 5211"); 800 "XR mode not available on 5211");
665 return -EINVAL; 801 return -EINVAL;
666 } 802 }
667 mode = AR5K_INI_VAL_XR; 803 mode = AR5K_MODE_XR;
668 freq = AR5K_INI_RFGAIN_5GHZ; 804 freq = AR5K_INI_RFGAIN_5GHZ;
669 ee_mode = AR5K_EEPROM_MODE_11A; 805 ee_mode = AR5K_EEPROM_MODE_11A;
670 driver_mode = MODE_IEEE80211A;
671 break; 806 break;
672 default: 807 default:
673 ATH5K_ERR(ah->ah_sc, 808 ATH5K_ERR(ah->ah_sc,
674 "invalid channel: %d\n", channel->freq); 809 "invalid channel: %d\n", channel->center_freq);
675 return -EINVAL; 810 return -EINVAL;
676 } 811 }
677 812
@@ -701,15 +836,26 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
701 /* 836 /*
702 * Write some more initial register settings 837 * Write some more initial register settings
703 */ 838 */
704 if (ah->ah_version > AR5K_AR5211){ /* found on 5213+ */ 839 if (ah->ah_version == AR5K_AR5212) {
705 ath5k_hw_reg_write(ah, 0x0002a002, AR5K_PHY(11)); 840 ath5k_hw_reg_write(ah, 0x0002a002, AR5K_PHY(11));
706 841
707 if (channel->val == CHANNEL_G) 842 if (channel->hw_value == CHANNEL_G)
708 ath5k_hw_reg_write(ah, 0x00f80d80, AR5K_PHY(83)); /* 0x00fc0ec0 */ 843 if (ah->ah_mac_srev < AR5K_SREV_VER_AR2413)
844 ath5k_hw_reg_write(ah, 0x00f80d80,
845 AR5K_PHY(83));
846 else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2424)
847 ath5k_hw_reg_write(ah, 0x00380140,
848 AR5K_PHY(83));
849 else if (ah->ah_mac_srev < AR5K_SREV_VER_AR2425)
850 ath5k_hw_reg_write(ah, 0x00fc0ec0,
851 AR5K_PHY(83));
852 else /* 2425 */
853 ath5k_hw_reg_write(ah, 0x00fc0fc0,
854 AR5K_PHY(83));
709 else 855 else
710 ath5k_hw_reg_write(ah, 0x00000000, AR5K_PHY(83)); 856 ath5k_hw_reg_write(ah, 0x00000000,
857 AR5K_PHY(83));
711 858
712 ath5k_hw_reg_write(ah, 0x000001b5, 0xa228); /* 0x000009b5 */
713 ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); 859 ath5k_hw_reg_write(ah, 0x000009b5, 0xa228);
714 ath5k_hw_reg_write(ah, 0x0000000f, 0x8060); 860 ath5k_hw_reg_write(ah, 0x0000000f, 0x8060);
715 ath5k_hw_reg_write(ah, 0x00000000, 0xa254); 861 ath5k_hw_reg_write(ah, 0x00000000, 0xa254);
@@ -722,7 +868,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
722 AR5K_SREV_RAD_5112A) { 868 AR5K_SREV_RAD_5112A) {
723 ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD, 869 ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD,
724 AR5K_PHY_CCKTXCTL); 870 AR5K_PHY_CCKTXCTL);
725 if (channel->val & CHANNEL_5GHZ) 871 if (channel->hw_value & CHANNEL_5GHZ)
726 data = 0xffb81020; 872 data = 0xffb81020;
727 else 873 else
728 data = 0xffb80d20; 874 data = 0xffb80d20;
@@ -742,7 +888,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
742 * mac80211 are integrated */ 888 * mac80211 are integrated */
743 if (ah->ah_version == AR5K_AR5212 && 889 if (ah->ah_version == AR5K_AR5212 &&
744 ah->ah_sc->vif != NULL) 890 ah->ah_sc->vif != NULL)
745 ath5k_hw_write_rate_duration(ah, driver_mode); 891 ath5k_hw_write_rate_duration(ah, mode);
746 892
747 /* 893 /*
748 * Write RF registers 894 * Write RF registers
@@ -758,7 +904,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
758 904
759 /* Write OFDM timings on 5212*/ 905 /* Write OFDM timings on 5212*/
760 if (ah->ah_version == AR5K_AR5212 && 906 if (ah->ah_version == AR5K_AR5212 &&
761 channel->val & CHANNEL_OFDM) { 907 channel->hw_value & CHANNEL_OFDM) {
762 ret = ath5k_hw_write_ofdm_timings(ah, channel); 908 ret = ath5k_hw_write_ofdm_timings(ah, channel);
763 if (ret) 909 if (ret)
764 return ret; 910 return ret;
@@ -767,7 +913,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
767 /*Enable/disable 802.11b mode on 5111 913 /*Enable/disable 802.11b mode on 5111
768 (enable 2111 frequency converter + CCK)*/ 914 (enable 2111 frequency converter + CCK)*/
769 if (ah->ah_radio == AR5K_RF5111) { 915 if (ah->ah_radio == AR5K_RF5111) {
770 if (driver_mode == MODE_IEEE80211B) 916 if (mode == AR5K_MODE_11B)
771 AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, 917 AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG,
772 AR5K_TXCFG_B_MODE); 918 AR5K_TXCFG_B_MODE);
773 else 919 else
@@ -885,13 +1031,24 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
885 1031
886 /* 1032 /*
887 * Set Rx/Tx DMA Configuration 1033 * Set Rx/Tx DMA Configuration
888 *(passing dma size not available on 5210) 1034 *
1035 * Set maximum DMA size (512) except for PCI-E cards since
1036 * it causes rx overruns and tx errors (tested on 5424 but since
1037 * rx overruns also occur on 5416/5418 with madwifi we set 128
1038 * for all PCI-E cards to be safe).
1039 *
1040 * In dumps this is 128 for allchips.
1041 *
1042 * XXX: need to check 5210 for this
1043 * TODO: Check out tx triger level, it's always 64 on dumps but I
1044 * guess we can tweak it and see how it goes ;-)
889 */ 1045 */
1046 dma_size = (pdev->is_pcie) ? AR5K_DMASIZE_128B : AR5K_DMASIZE_512B;
890 if (ah->ah_version != AR5K_AR5210) { 1047 if (ah->ah_version != AR5K_AR5210) {
891 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_SDMAMR, 1048 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
892 AR5K_DMASIZE_512B | AR5K_TXCFG_DMASIZE); 1049 AR5K_TXCFG_SDMAMR, dma_size);
893 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_SDMAMW, 1050 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
894 AR5K_DMASIZE_512B); 1051 AR5K_RXCFG_SDMAMW, dma_size);
895 } 1052 }
896 1053
897 /* 1054 /*
@@ -905,7 +1062,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
905 if (ah->ah_version != AR5K_AR5210) { 1062 if (ah->ah_version != AR5K_AR5210) {
906 data = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & 1063 data = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
907 AR5K_PHY_RX_DELAY_M; 1064 AR5K_PHY_RX_DELAY_M;
908 data = (channel->val & CHANNEL_CCK) ? 1065 data = (channel->hw_value & CHANNEL_CCK) ?
909 ((data << 2) / 22) : (data / 10); 1066 ((data << 2) / 22) : (data / 10);
910 1067
911 udelay(100 + data); 1068 udelay(100 + data);
@@ -922,11 +1079,11 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
922 if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, 1079 if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
923 AR5K_PHY_AGCCTL_CAL, 0, false)) { 1080 AR5K_PHY_AGCCTL_CAL, 0, false)) {
924 ATH5K_ERR(ah->ah_sc, "calibration timeout (%uMHz)\n", 1081 ATH5K_ERR(ah->ah_sc, "calibration timeout (%uMHz)\n",
925 channel->freq); 1082 channel->center_freq);
926 return -EAGAIN; 1083 return -EAGAIN;
927 } 1084 }
928 1085
929 ret = ath5k_hw_noise_floor_calibration(ah, channel->freq); 1086 ret = ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
930 if (ret) 1087 if (ret)
931 return ret; 1088 return ret;
932 1089
@@ -934,7 +1091,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
934 1091
935 /* A and G modes can use QAM modulation which requires enabling 1092 /* A and G modes can use QAM modulation which requires enabling
936 * I and Q calibration. Don't bother in B mode. */ 1093 * I and Q calibration. Don't bother in B mode. */
937 if (!(driver_mode == MODE_IEEE80211B)) { 1094 if (!(mode == AR5K_MODE_11B)) {
938 ah->ah_calibration = true; 1095 ah->ah_calibration = true;
939 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, 1096 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
940 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15); 1097 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
@@ -981,6 +1138,8 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
981 1138
982 /* 1139 /*
983 * Set the 32MHz reference clock on 5212 phy clock sleep register 1140 * Set the 32MHz reference clock on 5212 phy clock sleep register
1141 *
1142 * TODO: Find out how to switch to external 32Khz clock to save power
984 */ 1143 */
985 if (ah->ah_version == AR5K_AR5212) { 1144 if (ah->ah_version == AR5K_AR5212) {
986 ath5k_hw_reg_write(ah, AR5K_PHY_SCR_32MHZ, AR5K_PHY_SCR); 1145 ath5k_hw_reg_write(ah, AR5K_PHY_SCR_32MHZ, AR5K_PHY_SCR);
@@ -988,9 +1147,15 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum ieee80211_if_types op_mode,
988 ath5k_hw_reg_write(ah, AR5K_PHY_SCAL_32MHZ, AR5K_PHY_SCAL); 1147 ath5k_hw_reg_write(ah, AR5K_PHY_SCAL_32MHZ, AR5K_PHY_SCAL);
989 ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK); 1148 ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
990 ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY); 1149 ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
991 ath5k_hw_reg_write(ah, ah->ah_radio == AR5K_RF5111 ? 1150 ath5k_hw_reg_write(ah, ah->ah_phy_spending, AR5K_PHY_SPENDING);
992 AR5K_PHY_SPENDING_RF5111 : AR5K_PHY_SPENDING_RF5112, 1151 }
993 AR5K_PHY_SPENDING); 1152
1153 if (ah->ah_version == AR5K_AR5212) {
1154 ath5k_hw_reg_write(ah, 0x000100aa, 0x8118);
1155 ath5k_hw_reg_write(ah, 0x00003210, 0x811c);
1156 ath5k_hw_reg_write(ah, 0x00000052, 0x8108);
1157 if (ah->ah_mac_srev >= AR5K_SREV_VER_AR2413)
1158 ath5k_hw_reg_write(ah, 0x00000004, 0x8120);
994 } 1159 }
995 1160
996 /* 1161 /*
@@ -1065,7 +1230,7 @@ int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
1065 staid &= ~AR5K_STA_ID1_DEFAULT_ANTENNA; 1230 staid &= ~AR5K_STA_ID1_DEFAULT_ANTENNA;
1066 /* fallthrough */ 1231 /* fallthrough */
1067 case AR5K_PM_NETWORK_SLEEP: 1232 case AR5K_PM_NETWORK_SLEEP:
1068 if (set_chip == true) 1233 if (set_chip)
1069 ath5k_hw_reg_write(ah, 1234 ath5k_hw_reg_write(ah,
1070 AR5K_SLEEP_CTL_SLE | sleep_duration, 1235 AR5K_SLEEP_CTL_SLE | sleep_duration,
1071 AR5K_SLEEP_CTL); 1236 AR5K_SLEEP_CTL);
@@ -1074,7 +1239,7 @@ int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
1074 break; 1239 break;
1075 1240
1076 case AR5K_PM_FULL_SLEEP: 1241 case AR5K_PM_FULL_SLEEP:
1077 if (set_chip == true) 1242 if (set_chip)
1078 ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_SLP, 1243 ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_SLP,
1079 AR5K_SLEEP_CTL); 1244 AR5K_SLEEP_CTL);
1080 1245
@@ -1082,7 +1247,7 @@ int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
1082 break; 1247 break;
1083 1248
1084 case AR5K_PM_AWAKE: 1249 case AR5K_PM_AWAKE:
1085 if (set_chip == false) 1250 if (!set_chip)
1086 goto commit; 1251 goto commit;
1087 1252
1088 ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_WAKE, 1253 ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_WAKE,
@@ -1389,7 +1554,7 @@ int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
1389 trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG), 1554 trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
1390 AR5K_TXCFG_TXFULL); 1555 AR5K_TXCFG_TXFULL);
1391 1556
1392 if (increase == false) { 1557 if (!increase) {
1393 if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES) 1558 if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
1394 goto done; 1559 goto done;
1395 } else 1560 } else
@@ -1592,9 +1757,10 @@ static int ath5k_hw_eeprom_read(struct ath5k_hw *ah, u32 offset, u16 *data)
1592/* 1757/*
1593 * Write to eeprom - currently disabled, use at your own risk 1758 * Write to eeprom - currently disabled, use at your own risk
1594 */ 1759 */
1760#if 0
1595static int ath5k_hw_eeprom_write(struct ath5k_hw *ah, u32 offset, u16 data) 1761static int ath5k_hw_eeprom_write(struct ath5k_hw *ah, u32 offset, u16 data)
1596{ 1762{
1597#if 0 1763
1598 u32 status, timeout; 1764 u32 status, timeout;
1599 1765
1600 ATH5K_TRACE(ah->ah_sc); 1766 ATH5K_TRACE(ah->ah_sc);
@@ -1636,10 +1802,11 @@ static int ath5k_hw_eeprom_write(struct ath5k_hw *ah, u32 offset, u16 data)
1636 } 1802 }
1637 udelay(15); 1803 udelay(15);
1638 } 1804 }
1639#endif 1805
1640 ATH5K_ERR(ah->ah_sc, "EEPROM Write is disabled!"); 1806 ATH5K_ERR(ah->ah_sc, "EEPROM Write is disabled!");
1641 return -EIO; 1807 return -EIO;
1642} 1808}
1809#endif
1643 1810
1644/* 1811/*
1645 * Translate binary channel representation in EEPROM to frequency 1812 * Translate binary channel representation in EEPROM to frequency
@@ -2045,50 +2212,6 @@ static int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
2045} 2212}
2046 2213
2047/* 2214/*
2048 * Read/Write regulatory domain
2049 */
2050static bool ath5k_eeprom_regulation_domain(struct ath5k_hw *ah, bool write,
2051 enum ath5k_regdom *regdomain)
2052{
2053 u16 ee_regdomain;
2054
2055 /* Read current value */
2056 if (write != true) {
2057 ee_regdomain = ah->ah_capabilities.cap_eeprom.ee_regdomain;
2058 *regdomain = ath5k_regdom_to_ieee(ee_regdomain);
2059 return true;
2060 }
2061
2062 ee_regdomain = ath5k_regdom_from_ieee(*regdomain);
2063
2064 /* Try to write a new value */
2065 if (ah->ah_capabilities.cap_eeprom.ee_protect &
2066 AR5K_EEPROM_PROTECT_WR_128_191)
2067 return false;
2068 if (ath5k_hw_eeprom_write(ah, AR5K_EEPROM_REG_DOMAIN, ee_regdomain)!=0)
2069 return false;
2070
2071 ah->ah_capabilities.cap_eeprom.ee_regdomain = ee_regdomain;
2072
2073 return true;
2074}
2075
2076/*
2077 * Use the above to write a new regulatory domain
2078 */
2079int ath5k_hw_set_regdomain(struct ath5k_hw *ah, u16 regdomain)
2080{
2081 enum ath5k_regdom ieee_regdomain;
2082
2083 ieee_regdomain = ath5k_regdom_to_ieee(regdomain);
2084
2085 if (ath5k_eeprom_regulation_domain(ah, true, &ieee_regdomain) == true)
2086 return 0;
2087
2088 return -EIO;
2089}
2090
2091/*
2092 * Fill the capabilities struct 2215 * Fill the capabilities struct
2093 */ 2216 */
2094static int ath5k_hw_get_capabilities(struct ath5k_hw *ah) 2217static int ath5k_hw_get_capabilities(struct ath5k_hw *ah)
@@ -2110,8 +2233,8 @@ static int ath5k_hw_get_capabilities(struct ath5k_hw *ah)
2110 ah->ah_capabilities.cap_range.range_2ghz_max = 0; 2233 ah->ah_capabilities.cap_range.range_2ghz_max = 0;
2111 2234
2112 /* Set supported modes */ 2235 /* Set supported modes */
2113 __set_bit(MODE_IEEE80211A, ah->ah_capabilities.cap_mode); 2236 __set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode);
2114 __set_bit(MODE_ATHEROS_TURBO, ah->ah_capabilities.cap_mode); 2237 __set_bit(AR5K_MODE_11A_TURBO, ah->ah_capabilities.cap_mode);
2115 } else { 2238 } else {
2116 /* 2239 /*
2117 * XXX The tranceiver supports frequencies from 4920 to 6100GHz 2240 * XXX The tranceiver supports frequencies from 4920 to 6100GHz
@@ -2133,12 +2256,12 @@ static int ath5k_hw_get_capabilities(struct ath5k_hw *ah)
2133 ah->ah_capabilities.cap_range.range_5ghz_max = 6100; 2256 ah->ah_capabilities.cap_range.range_5ghz_max = 6100;
2134 2257
2135 /* Set supported modes */ 2258 /* Set supported modes */
2136 __set_bit(MODE_IEEE80211A, 2259 __set_bit(AR5K_MODE_11A,
2137 ah->ah_capabilities.cap_mode); 2260 ah->ah_capabilities.cap_mode);
2138 __set_bit(MODE_ATHEROS_TURBO, 2261 __set_bit(AR5K_MODE_11A_TURBO,
2139 ah->ah_capabilities.cap_mode); 2262 ah->ah_capabilities.cap_mode);
2140 if (ah->ah_version == AR5K_AR5212) 2263 if (ah->ah_version == AR5K_AR5212)
2141 __set_bit(MODE_ATHEROS_TURBOG, 2264 __set_bit(AR5K_MODE_11G_TURBO,
2142 ah->ah_capabilities.cap_mode); 2265 ah->ah_capabilities.cap_mode);
2143 } 2266 }
2144 2267
@@ -2150,11 +2273,11 @@ static int ath5k_hw_get_capabilities(struct ath5k_hw *ah)
2150 ah->ah_capabilities.cap_range.range_2ghz_max = 2732; 2273 ah->ah_capabilities.cap_range.range_2ghz_max = 2732;
2151 2274
2152 if (AR5K_EEPROM_HDR_11B(ee_header)) 2275 if (AR5K_EEPROM_HDR_11B(ee_header))
2153 __set_bit(MODE_IEEE80211B, 2276 __set_bit(AR5K_MODE_11B,
2154 ah->ah_capabilities.cap_mode); 2277 ah->ah_capabilities.cap_mode);
2155 2278
2156 if (AR5K_EEPROM_HDR_11G(ee_header)) 2279 if (AR5K_EEPROM_HDR_11G(ee_header))
2157 __set_bit(MODE_IEEE80211G, 2280 __set_bit(AR5K_MODE_11G,
2158 ah->ah_capabilities.cap_mode); 2281 ah->ah_capabilities.cap_mode);
2159 } 2282 }
2160 } 2283 }
@@ -2279,8 +2402,8 @@ void ath5k_hw_set_associd(struct ath5k_hw *ah, const u8 *bssid, u16 assoc_id)
2279 * Set simple BSSID mask on 5212 2402 * Set simple BSSID mask on 5212
2280 */ 2403 */
2281 if (ah->ah_version == AR5K_AR5212) { 2404 if (ah->ah_version == AR5K_AR5212) {
2282 ath5k_hw_reg_write(ah, 0xfffffff, AR5K_BSS_IDM0); 2405 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_BSS_IDM0);
2283 ath5k_hw_reg_write(ah, 0xfffffff, AR5K_BSS_IDM1); 2406 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_BSS_IDM1);
2284 } 2407 }
2285 2408
2286 /* 2409 /*
@@ -2425,6 +2548,8 @@ void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
2425{ 2548{
2426 ATH5K_TRACE(ah->ah_sc); 2549 ATH5K_TRACE(ah->ah_sc);
2427 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); 2550 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
2551
2552 /* TODO: ANI Support */
2428} 2553}
2429 2554
2430/* 2555/*
@@ -2434,6 +2559,8 @@ void ath5k_hw_stop_pcu_recv(struct ath5k_hw *ah)
2434{ 2559{
2435 ATH5K_TRACE(ah->ah_sc); 2560 ATH5K_TRACE(ah->ah_sc);
2436 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX); 2561 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
2562
2563 /* TODO: ANI Support */
2437} 2564}
2438 2565
2439/* 2566/*
@@ -2828,15 +2955,19 @@ int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr)
2828 * Update mib counters (statistics) 2955 * Update mib counters (statistics)
2829 */ 2956 */
2830void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, 2957void ath5k_hw_update_mib_counters(struct ath5k_hw *ah,
2831 struct ath5k_mib_stats *statistics) 2958 struct ieee80211_low_level_stats *stats)
2832{ 2959{
2833 ATH5K_TRACE(ah->ah_sc); 2960 ATH5K_TRACE(ah->ah_sc);
2961
2834 /* Read-And-Clear */ 2962 /* Read-And-Clear */
2835 statistics->ackrcv_bad += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL); 2963 stats->dot11ACKFailureCount += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
2836 statistics->rts_bad += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL); 2964 stats->dot11RTSFailureCount += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
2837 statistics->rts_good += ath5k_hw_reg_read(ah, AR5K_RTS_OK); 2965 stats->dot11RTSSuccessCount += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
2838 statistics->fcs_bad += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL); 2966 stats->dot11FCSErrorCount += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
2839 statistics->beacons += ath5k_hw_reg_read(ah, AR5K_BEACON_CNT); 2967
2968 /* XXX: Should we use this to track beacon count ?
2969 * -we read it anyway to clear the register */
2970 ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
2840 2971
2841 /* Reset profile count registers on 5212*/ 2972 /* Reset profile count registers on 5212*/
2842 if (ah->ah_version == AR5K_AR5212) { 2973 if (ah->ah_version == AR5K_AR5212) {
@@ -2937,8 +3068,16 @@ int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
2937 for (i = 0; i < AR5K_KEYCACHE_SIZE; i++) 3068 for (i = 0; i < AR5K_KEYCACHE_SIZE; i++)
2938 ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_OFF(entry, i)); 3069 ath5k_hw_reg_write(ah, 0, AR5K_KEYTABLE_OFF(entry, i));
2939 3070
2940 /* Set NULL encryption on non-5210*/ 3071 /*
2941 if (ah->ah_version != AR5K_AR5210) 3072 * Set NULL encryption on AR5212+
3073 *
3074 * Note: AR5K_KEYTABLE_TYPE -> AR5K_KEYTABLE_OFF(entry, 5)
3075 * AR5K_KEYTABLE_TYPE_NULL -> 0x00000007
3076 *
3077 * Note2: Windows driver (ndiswrapper) sets this to
3078 * 0x00000714 instead of 0x00000007
3079 */
3080 if (ah->ah_version > AR5K_AR5211)
2942 ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL, 3081 ath5k_hw_reg_write(ah, AR5K_KEYTABLE_TYPE_NULL,
2943 AR5K_KEYTABLE_TYPE(entry)); 3082 AR5K_KEYTABLE_TYPE(entry));
2944 3083
@@ -3186,19 +3325,19 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
3186 return 0; 3325 return 0;
3187 3326
3188 /* Set Slot time */ 3327 /* Set Slot time */
3189 ath5k_hw_reg_write(ah, ah->ah_turbo == true ? 3328 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3190 AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME, 3329 AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME,
3191 AR5K_SLOT_TIME); 3330 AR5K_SLOT_TIME);
3192 /* Set ACK_CTS timeout */ 3331 /* Set ACK_CTS timeout */
3193 ath5k_hw_reg_write(ah, ah->ah_turbo == true ? 3332 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3194 AR5K_INIT_ACK_CTS_TIMEOUT_TURBO : 3333 AR5K_INIT_ACK_CTS_TIMEOUT_TURBO :
3195 AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME); 3334 AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME);
3196 /* Set Transmit Latency */ 3335 /* Set Transmit Latency */
3197 ath5k_hw_reg_write(ah, ah->ah_turbo == true ? 3336 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3198 AR5K_INIT_TRANSMIT_LATENCY_TURBO : 3337 AR5K_INIT_TRANSMIT_LATENCY_TURBO :
3199 AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210); 3338 AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210);
3200 /* Set IFS0 */ 3339 /* Set IFS0 */
3201 if (ah->ah_turbo == true) 3340 if (ah->ah_turbo)
3202 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO + 3341 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
3203 (ah->ah_aifs + tq->tqi_aifs) * 3342 (ah->ah_aifs + tq->tqi_aifs) *
3204 AR5K_INIT_SLOT_TIME_TURBO) << 3343 AR5K_INIT_SLOT_TIME_TURBO) <<
@@ -3211,16 +3350,16 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
3211 AR5K_INIT_SIFS, AR5K_IFS0); 3350 AR5K_INIT_SIFS, AR5K_IFS0);
3212 3351
3213 /* Set IFS1 */ 3352 /* Set IFS1 */
3214 ath5k_hw_reg_write(ah, ah->ah_turbo == true ? 3353 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3215 AR5K_INIT_PROTO_TIME_CNTRL_TURBO : 3354 AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
3216 AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1); 3355 AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
3217 /* Set PHY register 0x9844 (??) */ 3356 /* Set PHY register 0x9844 (??) */
3218 ath5k_hw_reg_write(ah, ah->ah_turbo == true ? 3357 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3219 (ath5k_hw_reg_read(ah, AR5K_PHY(17)) & ~0x7F) | 0x38 : 3358 (ath5k_hw_reg_read(ah, AR5K_PHY(17)) & ~0x7F) | 0x38 :
3220 (ath5k_hw_reg_read(ah, AR5K_PHY(17)) & ~0x7F) | 0x1C, 3359 (ath5k_hw_reg_read(ah, AR5K_PHY(17)) & ~0x7F) | 0x1C,
3221 AR5K_PHY(17)); 3360 AR5K_PHY(17));
3222 /* Set Frame Control Register */ 3361 /* Set Frame Control Register */
3223 ath5k_hw_reg_write(ah, ah->ah_turbo == true ? 3362 ath5k_hw_reg_write(ah, ah->ah_turbo ?
3224 (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE | 3363 (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
3225 AR5K_PHY_TURBO_SHORT | 0x2020) : 3364 AR5K_PHY_TURBO_SHORT | 0x2020) :
3226 (AR5K_PHY_FRAME_CTL_INI | 0x1020), 3365 (AR5K_PHY_FRAME_CTL_INI | 0x1020),
@@ -3259,7 +3398,7 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
3259 /* 3398 /*
3260 * Calculate and set retry limits 3399 * Calculate and set retry limits
3261 */ 3400 */
3262 if (ah->ah_software_retry == true) { 3401 if (ah->ah_software_retry) {
3263 /* XXX Need to test this */ 3402 /* XXX Need to test this */
3264 retry_lg = ah->ah_limit_tx_retries; 3403 retry_lg = ah->ah_limit_tx_retries;
3265 retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ? 3404 retry_sh = retry_lg = retry_lg > AR5K_DCU_RETRY_LMT_SH_RETRY ?
@@ -3507,10 +3646,10 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3507 unsigned int rtscts_rate, unsigned int rtscts_duration) 3646 unsigned int rtscts_rate, unsigned int rtscts_duration)
3508{ 3647{
3509 u32 frame_type; 3648 u32 frame_type;
3510 struct ath5k_hw_2w_tx_desc *tx_desc; 3649 struct ath5k_hw_2w_tx_ctl *tx_ctl;
3511 unsigned int frame_len; 3650 unsigned int frame_len;
3512 3651
3513 tx_desc = (struct ath5k_hw_2w_tx_desc *)&desc->ds_ctl0; 3652 tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
3514 3653
3515 /* 3654 /*
3516 * Validate input 3655 * Validate input
@@ -3529,12 +3668,8 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3529 return -EINVAL; 3668 return -EINVAL;
3530 } 3669 }
3531 3670
3532 /* Clear status descriptor */ 3671 /* Clear descriptor */
3533 memset(desc->ds_hw, 0, sizeof(struct ath5k_hw_tx_status)); 3672 memset(&desc->ud.ds_tx5210, 0, sizeof(struct ath5k_hw_5210_tx_desc));
3534
3535 /* Initialize control descriptor */
3536 tx_desc->tx_control_0 = 0;
3537 tx_desc->tx_control_1 = 0;
3538 3673
3539 /* Setup control descriptor */ 3674 /* Setup control descriptor */
3540 3675
@@ -3546,7 +3681,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3546 if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN) 3681 if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
3547 return -EINVAL; 3682 return -EINVAL;
3548 3683
3549 tx_desc->tx_control_0 = frame_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN; 3684 tx_ctl->tx_control_0 = frame_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN;
3550 3685
3551 /* Verify and set buffer length */ 3686 /* Verify and set buffer length */
3552 3687
@@ -3557,7 +3692,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3557 if (pkt_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN) 3692 if (pkt_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN)
3558 return -EINVAL; 3693 return -EINVAL;
3559 3694
3560 tx_desc->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN; 3695 tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
3561 3696
3562 /* 3697 /*
3563 * Verify and set header length 3698 * Verify and set header length
@@ -3566,7 +3701,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3566 if (ah->ah_version == AR5K_AR5210) { 3701 if (ah->ah_version == AR5K_AR5210) {
3567 if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN) 3702 if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN)
3568 return -EINVAL; 3703 return -EINVAL;
3569 tx_desc->tx_control_0 |= 3704 tx_ctl->tx_control_0 |=
3570 AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN); 3705 AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN);
3571 } 3706 }
3572 3707
@@ -3582,19 +3717,19 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3582 frame_type = type /*<< 2 ?*/; 3717 frame_type = type /*<< 2 ?*/;
3583 } 3718 }
3584 3719
3585 tx_desc->tx_control_0 |= 3720 tx_ctl->tx_control_0 |=
3586 AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE) | 3721 AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE) |
3587 AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE); 3722 AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
3588 } else { 3723 } else {
3589 tx_desc->tx_control_0 |= 3724 tx_ctl->tx_control_0 |=
3590 AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE) | 3725 AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE) |
3591 AR5K_REG_SM(antenna_mode, AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT); 3726 AR5K_REG_SM(antenna_mode, AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT);
3592 tx_desc->tx_control_1 |= 3727 tx_ctl->tx_control_1 |=
3593 AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE); 3728 AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE);
3594 } 3729 }
3595#define _TX_FLAGS(_c, _flag) \ 3730#define _TX_FLAGS(_c, _flag) \
3596 if (flags & AR5K_TXDESC_##_flag) \ 3731 if (flags & AR5K_TXDESC_##_flag) \
3597 tx_desc->tx_control_##_c |= \ 3732 tx_ctl->tx_control_##_c |= \
3598 AR5K_2W_TX_DESC_CTL##_c##_##_flag 3733 AR5K_2W_TX_DESC_CTL##_c##_##_flag
3599 3734
3600 _TX_FLAGS(0, CLRDMASK); 3735 _TX_FLAGS(0, CLRDMASK);
@@ -3609,9 +3744,9 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3609 * WEP crap 3744 * WEP crap
3610 */ 3745 */
3611 if (key_index != AR5K_TXKEYIX_INVALID) { 3746 if (key_index != AR5K_TXKEYIX_INVALID) {
3612 tx_desc->tx_control_0 |= 3747 tx_ctl->tx_control_0 |=
3613 AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID; 3748 AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
3614 tx_desc->tx_control_1 |= 3749 tx_ctl->tx_control_1 |=
3615 AR5K_REG_SM(key_index, 3750 AR5K_REG_SM(key_index,
3616 AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX); 3751 AR5K_2W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
3617 } 3752 }
@@ -3621,7 +3756,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3621 */ 3756 */
3622 if ((ah->ah_version == AR5K_AR5210) && 3757 if ((ah->ah_version == AR5K_AR5210) &&
3623 (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA))) 3758 (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)))
3624 tx_desc->tx_control_1 |= rtscts_duration & 3759 tx_ctl->tx_control_1 |= rtscts_duration &
3625 AR5K_2W_TX_DESC_CTL1_RTS_DURATION; 3760 AR5K_2W_TX_DESC_CTL1_RTS_DURATION;
3626 3761
3627 return 0; 3762 return 0;
@@ -3637,13 +3772,11 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
3637 unsigned int antenna_mode, unsigned int flags, unsigned int rtscts_rate, 3772 unsigned int antenna_mode, unsigned int flags, unsigned int rtscts_rate,
3638 unsigned int rtscts_duration) 3773 unsigned int rtscts_duration)
3639{ 3774{
3640 struct ath5k_hw_4w_tx_desc *tx_desc; 3775 struct ath5k_hw_4w_tx_ctl *tx_ctl;
3641 struct ath5k_hw_tx_status *tx_status;
3642 unsigned int frame_len; 3776 unsigned int frame_len;
3643 3777
3644 ATH5K_TRACE(ah->ah_sc); 3778 ATH5K_TRACE(ah->ah_sc);
3645 tx_desc = (struct ath5k_hw_4w_tx_desc *)&desc->ds_ctl0; 3779 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
3646 tx_status = (struct ath5k_hw_tx_status *)&desc->ds_hw[2];
3647 3780
3648 /* 3781 /*
3649 * Validate input 3782 * Validate input
@@ -3662,14 +3795,8 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
3662 return -EINVAL; 3795 return -EINVAL;
3663 } 3796 }
3664 3797
3665 /* Clear status descriptor */ 3798 /* Clear descriptor */
3666 memset(tx_status, 0, sizeof(struct ath5k_hw_tx_status)); 3799 memset(&desc->ud.ds_tx5212, 0, sizeof(struct ath5k_hw_5212_tx_desc));
3667
3668 /* Initialize control descriptor */
3669 tx_desc->tx_control_0 = 0;
3670 tx_desc->tx_control_1 = 0;
3671 tx_desc->tx_control_2 = 0;
3672 tx_desc->tx_control_3 = 0;
3673 3800
3674 /* Setup control descriptor */ 3801 /* Setup control descriptor */
3675 3802
@@ -3681,7 +3808,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
3681 if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN) 3808 if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
3682 return -EINVAL; 3809 return -EINVAL;
3683 3810
3684 tx_desc->tx_control_0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN; 3811 tx_ctl->tx_control_0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN;
3685 3812
3686 /* Verify and set buffer length */ 3813 /* Verify and set buffer length */
3687 3814
@@ -3692,20 +3819,20 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
3692 if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN) 3819 if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN)
3693 return -EINVAL; 3820 return -EINVAL;
3694 3821
3695 tx_desc->tx_control_1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN; 3822 tx_ctl->tx_control_1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN;
3696 3823
3697 tx_desc->tx_control_0 |= 3824 tx_ctl->tx_control_0 |=
3698 AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) | 3825 AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) |
3699 AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT); 3826 AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
3700 tx_desc->tx_control_1 |= AR5K_REG_SM(type, 3827 tx_ctl->tx_control_1 |= AR5K_REG_SM(type,
3701 AR5K_4W_TX_DESC_CTL1_FRAME_TYPE); 3828 AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
3702 tx_desc->tx_control_2 = AR5K_REG_SM(tx_tries0 + AR5K_TUNE_HWTXTRIES, 3829 tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0 + AR5K_TUNE_HWTXTRIES,
3703 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0); 3830 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
3704 tx_desc->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0; 3831 tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
3705 3832
3706#define _TX_FLAGS(_c, _flag) \ 3833#define _TX_FLAGS(_c, _flag) \
3707 if (flags & AR5K_TXDESC_##_flag) \ 3834 if (flags & AR5K_TXDESC_##_flag) \
3708 tx_desc->tx_control_##_c |= \ 3835 tx_ctl->tx_control_##_c |= \
3709 AR5K_4W_TX_DESC_CTL##_c##_##_flag 3836 AR5K_4W_TX_DESC_CTL##_c##_##_flag
3710 3837
3711 _TX_FLAGS(0, CLRDMASK); 3838 _TX_FLAGS(0, CLRDMASK);
@@ -3721,8 +3848,8 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
3721 * WEP crap 3848 * WEP crap
3722 */ 3849 */
3723 if (key_index != AR5K_TXKEYIX_INVALID) { 3850 if (key_index != AR5K_TXKEYIX_INVALID) {
3724 tx_desc->tx_control_0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID; 3851 tx_ctl->tx_control_0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
3725 tx_desc->tx_control_1 |= AR5K_REG_SM(key_index, 3852 tx_ctl->tx_control_1 |= AR5K_REG_SM(key_index,
3726 AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX); 3853 AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_INDEX);
3727 } 3854 }
3728 3855
@@ -3733,9 +3860,9 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
3733 if ((flags & AR5K_TXDESC_RTSENA) && 3860 if ((flags & AR5K_TXDESC_RTSENA) &&
3734 (flags & AR5K_TXDESC_CTSENA)) 3861 (flags & AR5K_TXDESC_CTSENA))
3735 return -EINVAL; 3862 return -EINVAL;
3736 tx_desc->tx_control_2 |= rtscts_duration & 3863 tx_ctl->tx_control_2 |= rtscts_duration &
3737 AR5K_4W_TX_DESC_CTL2_RTS_DURATION; 3864 AR5K_4W_TX_DESC_CTL2_RTS_DURATION;
3738 tx_desc->tx_control_3 |= AR5K_REG_SM(rtscts_rate, 3865 tx_ctl->tx_control_3 |= AR5K_REG_SM(rtscts_rate,
3739 AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE); 3866 AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE);
3740 } 3867 }
3741 3868
@@ -3750,7 +3877,7 @@ ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3750 unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, u_int tx_tries2, 3877 unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, u_int tx_tries2,
3751 unsigned int tx_rate3, u_int tx_tries3) 3878 unsigned int tx_rate3, u_int tx_tries3)
3752{ 3879{
3753 struct ath5k_hw_4w_tx_desc *tx_desc; 3880 struct ath5k_hw_4w_tx_ctl *tx_ctl;
3754 3881
3755 /* 3882 /*
3756 * Rates can be 0 as long as the retry count is 0 too. 3883 * Rates can be 0 as long as the retry count is 0 too.
@@ -3767,14 +3894,14 @@ ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3767 } 3894 }
3768 3895
3769 if (ah->ah_version == AR5K_AR5212) { 3896 if (ah->ah_version == AR5K_AR5212) {
3770 tx_desc = (struct ath5k_hw_4w_tx_desc *)&desc->ds_ctl0; 3897 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
3771 3898
3772#define _XTX_TRIES(_n) \ 3899#define _XTX_TRIES(_n) \
3773 if (tx_tries##_n) { \ 3900 if (tx_tries##_n) { \
3774 tx_desc->tx_control_2 |= \ 3901 tx_ctl->tx_control_2 |= \
3775 AR5K_REG_SM(tx_tries##_n, \ 3902 AR5K_REG_SM(tx_tries##_n, \
3776 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES##_n); \ 3903 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES##_n); \
3777 tx_desc->tx_control_3 |= \ 3904 tx_ctl->tx_control_3 |= \
3778 AR5K_REG_SM(tx_rate##_n, \ 3905 AR5K_REG_SM(tx_rate##_n, \
3779 AR5K_4W_TX_DESC_CTL3_XMIT_RATE##_n); \ 3906 AR5K_4W_TX_DESC_CTL3_XMIT_RATE##_n); \
3780 } 3907 }
@@ -3795,13 +3922,15 @@ ath5k_hw_setup_xr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3795 * Proccess the tx status descriptor on 5210/5211 3922 * Proccess the tx status descriptor on 5210/5211
3796 */ 3923 */
3797static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah, 3924static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
3798 struct ath5k_desc *desc) 3925 struct ath5k_desc *desc, struct ath5k_tx_status *ts)
3799{ 3926{
3927 struct ath5k_hw_2w_tx_ctl *tx_ctl;
3800 struct ath5k_hw_tx_status *tx_status; 3928 struct ath5k_hw_tx_status *tx_status;
3801 struct ath5k_hw_2w_tx_desc *tx_desc;
3802 3929
3803 tx_desc = (struct ath5k_hw_2w_tx_desc *)&desc->ds_ctl0; 3930 ATH5K_TRACE(ah->ah_sc);
3804 tx_status = (struct ath5k_hw_tx_status *)&desc->ds_hw[0]; 3931
3932 tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
3933 tx_status = &desc->ud.ds_tx5210.tx_stat;
3805 3934
3806 /* No frame has been send or error */ 3935 /* No frame has been send or error */
3807 if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0)) 3936 if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0))
@@ -3810,32 +3939,32 @@ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
3810 /* 3939 /*
3811 * Get descriptor status 3940 * Get descriptor status
3812 */ 3941 */
3813 desc->ds_us.tx.ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0, 3942 ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
3814 AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP); 3943 AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
3815 desc->ds_us.tx.ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0, 3944 ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
3816 AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT); 3945 AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
3817 desc->ds_us.tx.ts_longretry = AR5K_REG_MS(tx_status->tx_status_0, 3946 ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0,
3818 AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT); 3947 AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
3819 /*TODO: desc->ds_us.tx.ts_virtcol + test*/ 3948 /*TODO: ts->ts_virtcol + test*/
3820 desc->ds_us.tx.ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1, 3949 ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
3821 AR5K_DESC_TX_STATUS1_SEQ_NUM); 3950 AR5K_DESC_TX_STATUS1_SEQ_NUM);
3822 desc->ds_us.tx.ts_rssi = AR5K_REG_MS(tx_status->tx_status_1, 3951 ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
3823 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH); 3952 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
3824 desc->ds_us.tx.ts_antenna = 1; 3953 ts->ts_antenna = 1;
3825 desc->ds_us.tx.ts_status = 0; 3954 ts->ts_status = 0;
3826 desc->ds_us.tx.ts_rate = AR5K_REG_MS(tx_desc->tx_control_0, 3955 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_0,
3827 AR5K_2W_TX_DESC_CTL0_XMIT_RATE); 3956 AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
3828 3957
3829 if ((tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK) == 0){ 3958 if ((tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK) == 0){
3830 if (tx_status->tx_status_0 & 3959 if (tx_status->tx_status_0 &
3831 AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES) 3960 AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
3832 desc->ds_us.tx.ts_status |= AR5K_TXERR_XRETRY; 3961 ts->ts_status |= AR5K_TXERR_XRETRY;
3833 3962
3834 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN) 3963 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
3835 desc->ds_us.tx.ts_status |= AR5K_TXERR_FIFO; 3964 ts->ts_status |= AR5K_TXERR_FIFO;
3836 3965
3837 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED) 3966 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
3838 desc->ds_us.tx.ts_status |= AR5K_TXERR_FILT; 3967 ts->ts_status |= AR5K_TXERR_FILT;
3839 } 3968 }
3840 3969
3841 return 0; 3970 return 0;
@@ -3845,14 +3974,15 @@ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
3845 * Proccess a tx descriptor on 5212 3974 * Proccess a tx descriptor on 5212
3846 */ 3975 */
3847static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, 3976static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
3848 struct ath5k_desc *desc) 3977 struct ath5k_desc *desc, struct ath5k_tx_status *ts)
3849{ 3978{
3979 struct ath5k_hw_4w_tx_ctl *tx_ctl;
3850 struct ath5k_hw_tx_status *tx_status; 3980 struct ath5k_hw_tx_status *tx_status;
3851 struct ath5k_hw_4w_tx_desc *tx_desc;
3852 3981
3853 ATH5K_TRACE(ah->ah_sc); 3982 ATH5K_TRACE(ah->ah_sc);
3854 tx_desc = (struct ath5k_hw_4w_tx_desc *)&desc->ds_ctl0; 3983
3855 tx_status = (struct ath5k_hw_tx_status *)&desc->ds_hw[2]; 3984 tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
3985 tx_status = &desc->ud.ds_tx5212.tx_stat;
3856 3986
3857 /* No frame has been send or error */ 3987 /* No frame has been send or error */
3858 if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0)) 3988 if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0))
@@ -3861,42 +3991,42 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
3861 /* 3991 /*
3862 * Get descriptor status 3992 * Get descriptor status
3863 */ 3993 */
3864 desc->ds_us.tx.ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0, 3994 ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
3865 AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP); 3995 AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
3866 desc->ds_us.tx.ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0, 3996 ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
3867 AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT); 3997 AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
3868 desc->ds_us.tx.ts_longretry = AR5K_REG_MS(tx_status->tx_status_0, 3998 ts->ts_longretry = AR5K_REG_MS(tx_status->tx_status_0,
3869 AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT); 3999 AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
3870 desc->ds_us.tx.ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1, 4000 ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
3871 AR5K_DESC_TX_STATUS1_SEQ_NUM); 4001 AR5K_DESC_TX_STATUS1_SEQ_NUM);
3872 desc->ds_us.tx.ts_rssi = AR5K_REG_MS(tx_status->tx_status_1, 4002 ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
3873 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH); 4003 AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
3874 desc->ds_us.tx.ts_antenna = (tx_status->tx_status_1 & 4004 ts->ts_antenna = (tx_status->tx_status_1 &
3875 AR5K_DESC_TX_STATUS1_XMIT_ANTENNA) ? 2 : 1; 4005 AR5K_DESC_TX_STATUS1_XMIT_ANTENNA) ? 2 : 1;
3876 desc->ds_us.tx.ts_status = 0; 4006 ts->ts_status = 0;
3877 4007
3878 switch (AR5K_REG_MS(tx_status->tx_status_1, 4008 switch (AR5K_REG_MS(tx_status->tx_status_1,
3879 AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX)) { 4009 AR5K_DESC_TX_STATUS1_FINAL_TS_INDEX)) {
3880 case 0: 4010 case 0:
3881 desc->ds_us.tx.ts_rate = tx_desc->tx_control_3 & 4011 ts->ts_rate = tx_ctl->tx_control_3 &
3882 AR5K_4W_TX_DESC_CTL3_XMIT_RATE0; 4012 AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
3883 break; 4013 break;
3884 case 1: 4014 case 1:
3885 desc->ds_us.tx.ts_rate = AR5K_REG_MS(tx_desc->tx_control_3, 4015 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
3886 AR5K_4W_TX_DESC_CTL3_XMIT_RATE1); 4016 AR5K_4W_TX_DESC_CTL3_XMIT_RATE1);
3887 desc->ds_us.tx.ts_longretry +=AR5K_REG_MS(tx_desc->tx_control_2, 4017 ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
3888 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1); 4018 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES1);
3889 break; 4019 break;
3890 case 2: 4020 case 2:
3891 desc->ds_us.tx.ts_rate = AR5K_REG_MS(tx_desc->tx_control_3, 4021 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
3892 AR5K_4W_TX_DESC_CTL3_XMIT_RATE2); 4022 AR5K_4W_TX_DESC_CTL3_XMIT_RATE2);
3893 desc->ds_us.tx.ts_longretry +=AR5K_REG_MS(tx_desc->tx_control_2, 4023 ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
3894 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2); 4024 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES2);
3895 break; 4025 break;
3896 case 3: 4026 case 3:
3897 desc->ds_us.tx.ts_rate = AR5K_REG_MS(tx_desc->tx_control_3, 4027 ts->ts_rate = AR5K_REG_MS(tx_ctl->tx_control_3,
3898 AR5K_4W_TX_DESC_CTL3_XMIT_RATE3); 4028 AR5K_4W_TX_DESC_CTL3_XMIT_RATE3);
3899 desc->ds_us.tx.ts_longretry +=AR5K_REG_MS(tx_desc->tx_control_2, 4029 ts->ts_longretry += AR5K_REG_MS(tx_ctl->tx_control_2,
3900 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3); 4030 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES3);
3901 break; 4031 break;
3902 } 4032 }
@@ -3904,13 +4034,13 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
3904 if ((tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK) == 0){ 4034 if ((tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK) == 0){
3905 if (tx_status->tx_status_0 & 4035 if (tx_status->tx_status_0 &
3906 AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES) 4036 AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
3907 desc->ds_us.tx.ts_status |= AR5K_TXERR_XRETRY; 4037 ts->ts_status |= AR5K_TXERR_XRETRY;
3908 4038
3909 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN) 4039 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
3910 desc->ds_us.tx.ts_status |= AR5K_TXERR_FIFO; 4040 ts->ts_status |= AR5K_TXERR_FIFO;
3911 4041
3912 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED) 4042 if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
3913 desc->ds_us.tx.ts_status |= AR5K_TXERR_FILT; 4043 ts->ts_status |= AR5K_TXERR_FILT;
3914 } 4044 }
3915 4045
3916 return 0; 4046 return 0;
@@ -3926,31 +4056,27 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
3926int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, 4056int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3927 u32 size, unsigned int flags) 4057 u32 size, unsigned int flags)
3928{ 4058{
3929 struct ath5k_rx_desc *rx_desc; 4059 struct ath5k_hw_rx_ctl *rx_ctl;
3930 4060
3931 ATH5K_TRACE(ah->ah_sc); 4061 ATH5K_TRACE(ah->ah_sc);
3932 rx_desc = (struct ath5k_rx_desc *)&desc->ds_ctl0; 4062 rx_ctl = &desc->ud.ds_rx.rx_ctl;
3933 4063
3934 /* 4064 /*
3935 *Clear ds_hw 4065 * Clear the descriptor
3936 * If we don't clean the status descriptor, 4066 * If we don't clean the status descriptor,
3937 * while scanning we get too many results, 4067 * while scanning we get too many results,
3938 * most of them virtual, after some secs 4068 * most of them virtual, after some secs
3939 * of scanning system hangs. M.F. 4069 * of scanning system hangs. M.F.
3940 */ 4070 */
3941 memset(desc->ds_hw, 0, sizeof(desc->ds_hw)); 4071 memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc));
3942
3943 /*Initialize rx descriptor*/
3944 rx_desc->rx_control_0 = 0;
3945 rx_desc->rx_control_1 = 0;
3946 4072
3947 /* Setup descriptor */ 4073 /* Setup descriptor */
3948 rx_desc->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN; 4074 rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN;
3949 if (unlikely(rx_desc->rx_control_1 != size)) 4075 if (unlikely(rx_ctl->rx_control_1 != size))
3950 return -EINVAL; 4076 return -EINVAL;
3951 4077
3952 if (flags & AR5K_RXDESC_INTREQ) 4078 if (flags & AR5K_RXDESC_INTREQ)
3953 rx_desc->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ; 4079 rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ;
3954 4080
3955 return 0; 4081 return 0;
3956} 4082}
@@ -3958,67 +4084,68 @@ int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
3958/* 4084/*
3959 * Proccess the rx status descriptor on 5210/5211 4085 * Proccess the rx status descriptor on 5210/5211
3960 */ 4086 */
3961static int ath5k_hw_proc_old_rx_status(struct ath5k_hw *ah, 4087static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
3962 struct ath5k_desc *desc) 4088 struct ath5k_desc *desc, struct ath5k_rx_status *rs)
3963{ 4089{
3964 struct ath5k_hw_old_rx_status *rx_status; 4090 struct ath5k_hw_rx_status *rx_status;
3965 4091
3966 rx_status = (struct ath5k_hw_old_rx_status *)&desc->ds_hw[0]; 4092 rx_status = &desc->ud.ds_rx.u.rx_stat;
3967 4093
3968 /* No frame received / not ready */ 4094 /* No frame received / not ready */
3969 if (unlikely((rx_status->rx_status_1 & AR5K_OLD_RX_DESC_STATUS1_DONE) 4095 if (unlikely((rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_DONE)
3970 == 0)) 4096 == 0))
3971 return -EINPROGRESS; 4097 return -EINPROGRESS;
3972 4098
3973 /* 4099 /*
3974 * Frame receive status 4100 * Frame receive status
3975 */ 4101 */
3976 desc->ds_us.rx.rs_datalen = rx_status->rx_status_0 & 4102 rs->rs_datalen = rx_status->rx_status_0 &
3977 AR5K_OLD_RX_DESC_STATUS0_DATA_LEN; 4103 AR5K_5210_RX_DESC_STATUS0_DATA_LEN;
3978 desc->ds_us.rx.rs_rssi = AR5K_REG_MS(rx_status->rx_status_0, 4104 rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
3979 AR5K_OLD_RX_DESC_STATUS0_RECEIVE_SIGNAL); 4105 AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
3980 desc->ds_us.rx.rs_rate = AR5K_REG_MS(rx_status->rx_status_0, 4106 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
3981 AR5K_OLD_RX_DESC_STATUS0_RECEIVE_RATE); 4107 AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
3982 desc->ds_us.rx.rs_antenna = rx_status->rx_status_0 & 4108 rs->rs_antenna = rx_status->rx_status_0 &
3983 AR5K_OLD_RX_DESC_STATUS0_RECEIVE_ANTENNA; 4109 AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA;
3984 desc->ds_us.rx.rs_more = rx_status->rx_status_0 & 4110 rs->rs_more = rx_status->rx_status_0 &
3985 AR5K_OLD_RX_DESC_STATUS0_MORE; 4111 AR5K_5210_RX_DESC_STATUS0_MORE;
3986 desc->ds_us.rx.rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, 4112 /* TODO: this timestamp is 13 bit, later on we assume 15 bit */
3987 AR5K_OLD_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); 4113 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
3988 desc->ds_us.rx.rs_status = 0; 4114 AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
4115 rs->rs_status = 0;
3989 4116
3990 /* 4117 /*
3991 * Key table status 4118 * Key table status
3992 */ 4119 */
3993 if (rx_status->rx_status_1 & AR5K_OLD_RX_DESC_STATUS1_KEY_INDEX_VALID) 4120 if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID)
3994 desc->ds_us.rx.rs_keyix = AR5K_REG_MS(rx_status->rx_status_1, 4121 rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
3995 AR5K_OLD_RX_DESC_STATUS1_KEY_INDEX); 4122 AR5K_5210_RX_DESC_STATUS1_KEY_INDEX);
3996 else 4123 else
3997 desc->ds_us.rx.rs_keyix = AR5K_RXKEYIX_INVALID; 4124 rs->rs_keyix = AR5K_RXKEYIX_INVALID;
3998 4125
3999 /* 4126 /*
4000 * Receive/descriptor errors 4127 * Receive/descriptor errors
4001 */ 4128 */
4002 if ((rx_status->rx_status_1 & AR5K_OLD_RX_DESC_STATUS1_FRAME_RECEIVE_OK) 4129 if ((rx_status->rx_status_1 &
4003 == 0) { 4130 AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK) == 0) {
4004 if (rx_status->rx_status_1 & AR5K_OLD_RX_DESC_STATUS1_CRC_ERROR) 4131 if (rx_status->rx_status_1 &
4005 desc->ds_us.rx.rs_status |= AR5K_RXERR_CRC; 4132 AR5K_5210_RX_DESC_STATUS1_CRC_ERROR)
4133 rs->rs_status |= AR5K_RXERR_CRC;
4006 4134
4007 if (rx_status->rx_status_1 & 4135 if (rx_status->rx_status_1 &
4008 AR5K_OLD_RX_DESC_STATUS1_FIFO_OVERRUN) 4136 AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN)
4009 desc->ds_us.rx.rs_status |= AR5K_RXERR_FIFO; 4137 rs->rs_status |= AR5K_RXERR_FIFO;
4010 4138
4011 if (rx_status->rx_status_1 & 4139 if (rx_status->rx_status_1 &
4012 AR5K_OLD_RX_DESC_STATUS1_PHY_ERROR) { 4140 AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) {
4013 desc->ds_us.rx.rs_status |= AR5K_RXERR_PHY; 4141 rs->rs_status |= AR5K_RXERR_PHY;
4014 desc->ds_us.rx.rs_phyerr = 4142 rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1,
4015 AR5K_REG_MS(rx_status->rx_status_1, 4143 AR5K_5210_RX_DESC_STATUS1_PHY_ERROR);
4016 AR5K_OLD_RX_DESC_STATUS1_PHY_ERROR);
4017 } 4144 }
4018 4145
4019 if (rx_status->rx_status_1 & 4146 if (rx_status->rx_status_1 &
4020 AR5K_OLD_RX_DESC_STATUS1_DECRYPT_CRC_ERROR) 4147 AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
4021 desc->ds_us.rx.rs_status |= AR5K_RXERR_DECRYPT; 4148 rs->rs_status |= AR5K_RXERR_DECRYPT;
4022 } 4149 }
4023 4150
4024 return 0; 4151 return 0;
@@ -4027,71 +4154,72 @@ static int ath5k_hw_proc_old_rx_status(struct ath5k_hw *ah,
4027/* 4154/*
4028 * Proccess the rx status descriptor on 5212 4155 * Proccess the rx status descriptor on 5212
4029 */ 4156 */
4030static int ath5k_hw_proc_new_rx_status(struct ath5k_hw *ah, 4157static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
4031 struct ath5k_desc *desc) 4158 struct ath5k_desc *desc, struct ath5k_rx_status *rs)
4032{ 4159{
4033 struct ath5k_hw_new_rx_status *rx_status; 4160 struct ath5k_hw_rx_status *rx_status;
4034 struct ath5k_hw_rx_error *rx_err; 4161 struct ath5k_hw_rx_error *rx_err;
4035 4162
4036 ATH5K_TRACE(ah->ah_sc); 4163 ATH5K_TRACE(ah->ah_sc);
4037 rx_status = (struct ath5k_hw_new_rx_status *)&desc->ds_hw[0]; 4164 rx_status = &desc->ud.ds_rx.u.rx_stat;
4038 4165
4039 /* Overlay on error */ 4166 /* Overlay on error */
4040 rx_err = (struct ath5k_hw_rx_error *)&desc->ds_hw[0]; 4167 rx_err = &desc->ud.ds_rx.u.rx_err;
4041 4168
4042 /* No frame received / not ready */ 4169 /* No frame received / not ready */
4043 if (unlikely((rx_status->rx_status_1 & AR5K_NEW_RX_DESC_STATUS1_DONE) 4170 if (unlikely((rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_DONE)
4044 == 0)) 4171 == 0))
4045 return -EINPROGRESS; 4172 return -EINPROGRESS;
4046 4173
4047 /* 4174 /*
4048 * Frame receive status 4175 * Frame receive status
4049 */ 4176 */
4050 desc->ds_us.rx.rs_datalen = rx_status->rx_status_0 & 4177 rs->rs_datalen = rx_status->rx_status_0 &
4051 AR5K_NEW_RX_DESC_STATUS0_DATA_LEN; 4178 AR5K_5212_RX_DESC_STATUS0_DATA_LEN;
4052 desc->ds_us.rx.rs_rssi = AR5K_REG_MS(rx_status->rx_status_0, 4179 rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
4053 AR5K_NEW_RX_DESC_STATUS0_RECEIVE_SIGNAL); 4180 AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL);
4054 desc->ds_us.rx.rs_rate = AR5K_REG_MS(rx_status->rx_status_0, 4181 rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
4055 AR5K_NEW_RX_DESC_STATUS0_RECEIVE_RATE); 4182 AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE);
4056 desc->ds_us.rx.rs_antenna = rx_status->rx_status_0 & 4183 rs->rs_antenna = rx_status->rx_status_0 &
4057 AR5K_NEW_RX_DESC_STATUS0_RECEIVE_ANTENNA; 4184 AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA;
4058 desc->ds_us.rx.rs_more = rx_status->rx_status_0 & 4185 rs->rs_more = rx_status->rx_status_0 &
4059 AR5K_NEW_RX_DESC_STATUS0_MORE; 4186 AR5K_5212_RX_DESC_STATUS0_MORE;
4060 desc->ds_us.rx.rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1, 4187 rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
4061 AR5K_NEW_RX_DESC_STATUS1_RECEIVE_TIMESTAMP); 4188 AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
4062 desc->ds_us.rx.rs_status = 0; 4189 rs->rs_status = 0;
4063 4190
4064 /* 4191 /*
4065 * Key table status 4192 * Key table status
4066 */ 4193 */
4067 if (rx_status->rx_status_1 & AR5K_NEW_RX_DESC_STATUS1_KEY_INDEX_VALID) 4194 if (rx_status->rx_status_1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
4068 desc->ds_us.rx.rs_keyix = AR5K_REG_MS(rx_status->rx_status_1, 4195 rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
4069 AR5K_NEW_RX_DESC_STATUS1_KEY_INDEX); 4196 AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
4070 else 4197 else
4071 desc->ds_us.rx.rs_keyix = AR5K_RXKEYIX_INVALID; 4198 rs->rs_keyix = AR5K_RXKEYIX_INVALID;
4072 4199
4073 /* 4200 /*
4074 * Receive/descriptor errors 4201 * Receive/descriptor errors
4075 */ 4202 */
4076 if ((rx_status->rx_status_1 & 4203 if ((rx_status->rx_status_1 &
4077 AR5K_NEW_RX_DESC_STATUS1_FRAME_RECEIVE_OK) == 0) { 4204 AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK) == 0) {
4078 if (rx_status->rx_status_1 & AR5K_NEW_RX_DESC_STATUS1_CRC_ERROR) 4205 if (rx_status->rx_status_1 &
4079 desc->ds_us.rx.rs_status |= AR5K_RXERR_CRC; 4206 AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
4207 rs->rs_status |= AR5K_RXERR_CRC;
4080 4208
4081 if (rx_status->rx_status_1 & 4209 if (rx_status->rx_status_1 &
4082 AR5K_NEW_RX_DESC_STATUS1_PHY_ERROR) { 4210 AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
4083 desc->ds_us.rx.rs_status |= AR5K_RXERR_PHY; 4211 rs->rs_status |= AR5K_RXERR_PHY;
4084 desc->ds_us.rx.rs_phyerr = 4212 rs->rs_phyerr = AR5K_REG_MS(rx_err->rx_error_1,
4085 AR5K_REG_MS(rx_err->rx_error_1, 4213 AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
4086 AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
4087 } 4214 }
4088 4215
4089 if (rx_status->rx_status_1 & 4216 if (rx_status->rx_status_1 &
4090 AR5K_NEW_RX_DESC_STATUS1_DECRYPT_CRC_ERROR) 4217 AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
4091 desc->ds_us.rx.rs_status |= AR5K_RXERR_DECRYPT; 4218 rs->rs_status |= AR5K_RXERR_DECRYPT;
4092 4219
4093 if (rx_status->rx_status_1 & AR5K_NEW_RX_DESC_STATUS1_MIC_ERROR) 4220 if (rx_status->rx_status_1 &
4094 desc->ds_us.rx.rs_status |= AR5K_RXERR_MIC; 4221 AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
4222 rs->rs_status |= AR5K_RXERR_MIC;
4095 } 4223 }
4096 4224
4097 return 0; 4225 return 0;
@@ -4250,35 +4378,6 @@ void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
4250} 4378}
4251 4379
4252 4380
4253/*********************************\
4254 Regulatory Domain/Channels Setup
4255\*********************************/
4256
4257u16 ath5k_get_regdomain(struct ath5k_hw *ah)
4258{
4259 u16 regdomain;
4260 enum ath5k_regdom ieee_regdomain;
4261#ifdef COUNTRYCODE
4262 u16 code;
4263#endif
4264
4265 ath5k_eeprom_regulation_domain(ah, false, &ieee_regdomain);
4266 ah->ah_capabilities.cap_regdomain.reg_hw = ieee_regdomain;
4267
4268#ifdef COUNTRYCODE
4269 /*
4270 * Get the regulation domain by country code. This will ignore
4271 * the settings found in the EEPROM.
4272 */
4273 code = ieee80211_name2countrycode(COUNTRYCODE);
4274 ieee_regdomain = ieee80211_countrycode2regdomain(code);
4275#endif
4276
4277 regdomain = ath5k_regdom_from_ieee(ieee_regdomain);
4278 ah->ah_capabilities.cap_regdomain.reg_current = regdomain;
4279
4280 return regdomain;
4281}
4282 4381
4283 4382
4284/****************\ 4383/****************\
diff --git a/drivers/net/wireless/ath5k/hw.h b/drivers/net/wireless/ath5k/hw.h
index d9a7c0973f53..64fca8dcb386 100644
--- a/drivers/net/wireless/ath5k/hw.h
+++ b/drivers/net/wireless/ath5k/hw.h
@@ -173,7 +173,10 @@ struct ath5k_eeprom_info {
173 * (rX: reserved fields possibily used by future versions of the ar5k chipset) 173 * (rX: reserved fields possibily used by future versions of the ar5k chipset)
174 */ 174 */
175 175
176struct ath5k_rx_desc { 176/*
177 * common hardware RX control descriptor
178 */
179struct ath5k_hw_rx_ctl {
177 u32 rx_control_0; /* RX control word 0 */ 180 u32 rx_control_0; /* RX control word 0 */
178 181
179#define AR5K_DESC_RX_CTL0 0x00000000 182#define AR5K_DESC_RX_CTL0 0x00000000
@@ -185,69 +188,63 @@ struct ath5k_rx_desc {
185} __packed; 188} __packed;
186 189
187/* 190/*
188 * 5210/5211 rx status descriptor 191 * common hardware RX status descriptor
192 * 5210/11 and 5212 differ only in the flags defined below
189 */ 193 */
190struct ath5k_hw_old_rx_status { 194struct ath5k_hw_rx_status {
191 u32 rx_status_0; /* RX status word 0 */ 195 u32 rx_status_0; /* RX status word 0 */
192
193#define AR5K_OLD_RX_DESC_STATUS0_DATA_LEN 0x00000fff
194#define AR5K_OLD_RX_DESC_STATUS0_MORE 0x00001000
195#define AR5K_OLD_RX_DESC_STATUS0_RECEIVE_RATE 0x00078000
196#define AR5K_OLD_RX_DESC_STATUS0_RECEIVE_RATE_S 15
197#define AR5K_OLD_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x07f80000
198#define AR5K_OLD_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 19
199#define AR5K_OLD_RX_DESC_STATUS0_RECEIVE_ANTENNA 0x38000000
200#define AR5K_OLD_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 27
201
202 u32 rx_status_1; /* RX status word 1 */ 196 u32 rx_status_1; /* RX status word 1 */
203
204#define AR5K_OLD_RX_DESC_STATUS1_DONE 0x00000001
205#define AR5K_OLD_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002
206#define AR5K_OLD_RX_DESC_STATUS1_CRC_ERROR 0x00000004
207#define AR5K_OLD_RX_DESC_STATUS1_FIFO_OVERRUN 0x00000008
208#define AR5K_OLD_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000010
209#define AR5K_OLD_RX_DESC_STATUS1_PHY_ERROR 0x000000e0
210#define AR5K_OLD_RX_DESC_STATUS1_PHY_ERROR_S 5
211#define AR5K_OLD_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100
212#define AR5K_OLD_RX_DESC_STATUS1_KEY_INDEX 0x00007e00
213#define AR5K_OLD_RX_DESC_STATUS1_KEY_INDEX_S 9
214#define AR5K_OLD_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x0fff8000
215#define AR5K_OLD_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 15
216#define AR5K_OLD_RX_DESC_STATUS1_KEY_CACHE_MISS 0x10000000
217} __packed; 197} __packed;
218 198
199/* 5210/5211 */
200#define AR5K_5210_RX_DESC_STATUS0_DATA_LEN 0x00000fff
201#define AR5K_5210_RX_DESC_STATUS0_MORE 0x00001000
202#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE 0x00078000
203#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE_S 15
204#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x07f80000
205#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 19
206#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA 0x38000000
207#define AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 27
208#define AR5K_5210_RX_DESC_STATUS1_DONE 0x00000001
209#define AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002
210#define AR5K_5210_RX_DESC_STATUS1_CRC_ERROR 0x00000004
211#define AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN 0x00000008
212#define AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000010
213#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR 0x000000e0
214#define AR5K_5210_RX_DESC_STATUS1_PHY_ERROR_S 5
215#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100
216#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX 0x00007e00
217#define AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_S 9
218#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x0fff8000
219#define AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 15
220#define AR5K_5210_RX_DESC_STATUS1_KEY_CACHE_MISS 0x10000000
221
222/* 5212 */
223#define AR5K_5212_RX_DESC_STATUS0_DATA_LEN 0x00000fff
224#define AR5K_5212_RX_DESC_STATUS0_MORE 0x00001000
225#define AR5K_5212_RX_DESC_STATUS0_DECOMP_CRC_ERROR 0x00002000
226#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE 0x000f8000
227#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE_S 15
228#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x0ff00000
229#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 20
230#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA 0xf0000000
231#define AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 28
232#define AR5K_5212_RX_DESC_STATUS1_DONE 0x00000001
233#define AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002
234#define AR5K_5212_RX_DESC_STATUS1_CRC_ERROR 0x00000004
235#define AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000008
236#define AR5K_5212_RX_DESC_STATUS1_PHY_ERROR 0x00000010
237#define AR5K_5212_RX_DESC_STATUS1_MIC_ERROR 0x00000020
238#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100
239#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX 0x0000fe00
240#define AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_S 9
241#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x7fff0000
242#define AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 16
243#define AR5K_5212_RX_DESC_STATUS1_KEY_CACHE_MISS 0x80000000
244
219/* 245/*
220 * 5212 rx status descriptor 246 * common hardware RX error descriptor
221 */ 247 */
222struct ath5k_hw_new_rx_status {
223 u32 rx_status_0; /* RX status word 0 */
224
225#define AR5K_NEW_RX_DESC_STATUS0_DATA_LEN 0x00000fff
226#define AR5K_NEW_RX_DESC_STATUS0_MORE 0x00001000
227#define AR5K_NEW_RX_DESC_STATUS0_DECOMP_CRC_ERROR 0x00002000
228#define AR5K_NEW_RX_DESC_STATUS0_RECEIVE_RATE 0x000f8000
229#define AR5K_NEW_RX_DESC_STATUS0_RECEIVE_RATE_S 15
230#define AR5K_NEW_RX_DESC_STATUS0_RECEIVE_SIGNAL 0x0ff00000
231#define AR5K_NEW_RX_DESC_STATUS0_RECEIVE_SIGNAL_S 20
232#define AR5K_NEW_RX_DESC_STATUS0_RECEIVE_ANTENNA 0xf0000000
233#define AR5K_NEW_RX_DESC_STATUS0_RECEIVE_ANTENNA_S 28
234
235 u32 rx_status_1; /* RX status word 1 */
236
237#define AR5K_NEW_RX_DESC_STATUS1_DONE 0x00000001
238#define AR5K_NEW_RX_DESC_STATUS1_FRAME_RECEIVE_OK 0x00000002
239#define AR5K_NEW_RX_DESC_STATUS1_CRC_ERROR 0x00000004
240#define AR5K_NEW_RX_DESC_STATUS1_DECRYPT_CRC_ERROR 0x00000008
241#define AR5K_NEW_RX_DESC_STATUS1_PHY_ERROR 0x00000010
242#define AR5K_NEW_RX_DESC_STATUS1_MIC_ERROR 0x00000020
243#define AR5K_NEW_RX_DESC_STATUS1_KEY_INDEX_VALID 0x00000100
244#define AR5K_NEW_RX_DESC_STATUS1_KEY_INDEX 0x0000fe00
245#define AR5K_NEW_RX_DESC_STATUS1_KEY_INDEX_S 9
246#define AR5K_NEW_RX_DESC_STATUS1_RECEIVE_TIMESTAMP 0x7fff0000
247#define AR5K_NEW_RX_DESC_STATUS1_RECEIVE_TIMESTAMP_S 16
248#define AR5K_NEW_RX_DESC_STATUS1_KEY_CACHE_MISS 0x80000000
249} __packed;
250
251struct ath5k_hw_rx_error { 248struct ath5k_hw_rx_error {
252 u32 rx_error_0; /* RX error word 0 */ 249 u32 rx_error_0; /* RX error word 0 */
253 250
@@ -268,7 +265,10 @@ struct ath5k_hw_rx_error {
268#define AR5K_DESC_RX_PHY_ERROR_SERVICE 0xc0 265#define AR5K_DESC_RX_PHY_ERROR_SERVICE 0xc0
269#define AR5K_DESC_RX_PHY_ERROR_TRANSMITOVR 0xe0 266#define AR5K_DESC_RX_PHY_ERROR_TRANSMITOVR 0xe0
270 267
271struct ath5k_hw_2w_tx_desc { 268/*
269 * 5210/5211 hardware 2-word TX control descriptor
270 */
271struct ath5k_hw_2w_tx_ctl {
272 u32 tx_control_0; /* TX control word 0 */ 272 u32 tx_control_0; /* TX control word 0 */
273 273
274#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff 274#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff
@@ -314,9 +314,9 @@ struct ath5k_hw_2w_tx_desc {
314#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 0x10 314#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 0x10
315 315
316/* 316/*
317 * 5212 4-word tx control descriptor 317 * 5212 hardware 4-word TX control descriptor
318 */ 318 */
319struct ath5k_hw_4w_tx_desc { 319struct ath5k_hw_4w_tx_ctl {
320 u32 tx_control_0; /* TX control word 0 */ 320 u32 tx_control_0; /* TX control word 0 */
321 321
322#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff 322#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff
@@ -374,7 +374,7 @@ struct ath5k_hw_4w_tx_desc {
374} __packed; 374} __packed;
375 375
376/* 376/*
377 * Common tx status descriptor 377 * Common TX status descriptor
378 */ 378 */
379struct ath5k_hw_tx_status { 379struct ath5k_hw_tx_status {
380 u32 tx_status_0; /* TX status word 0 */ 380 u32 tx_status_0; /* TX status word 0 */
@@ -415,6 +415,34 @@ struct ath5k_hw_tx_status {
415 415
416 416
417/* 417/*
418 * 5210/5211 hardware TX descriptor
419 */
420struct ath5k_hw_5210_tx_desc {
421 struct ath5k_hw_2w_tx_ctl tx_ctl;
422 struct ath5k_hw_tx_status tx_stat;
423} __packed;
424
425/*
426 * 5212 hardware TX descriptor
427 */
428struct ath5k_hw_5212_tx_desc {
429 struct ath5k_hw_4w_tx_ctl tx_ctl;
430 struct ath5k_hw_tx_status tx_stat;
431} __packed;
432
433/*
434 * common hardware RX descriptor
435 */
436struct ath5k_hw_all_rx_desc {
437 struct ath5k_hw_rx_ctl rx_ctl;
438 union {
439 struct ath5k_hw_rx_status rx_stat;
440 struct ath5k_hw_rx_error rx_err;
441 } u;
442} __packed;
443
444
445/*
418 * AR5K REGISTER ACCESS 446 * AR5K REGISTER ACCESS
419 */ 447 */
420 448
diff --git a/drivers/net/wireless/ath5k/initvals.c b/drivers/net/wireless/ath5k/initvals.c
index 2c22f1d4ee64..04c84e9da89d 100644
--- a/drivers/net/wireless/ath5k/initvals.c
+++ b/drivers/net/wireless/ath5k/initvals.c
@@ -678,8 +678,8 @@ static const struct ath5k_ini ar5212_ini[] = {
678 { AR5K_PHY(644), 0x00806333 }, 678 { AR5K_PHY(644), 0x00806333 },
679 { AR5K_PHY(645), 0x00106c10 }, 679 { AR5K_PHY(645), 0x00106c10 },
680 { AR5K_PHY(646), 0x009c4060 }, 680 { AR5K_PHY(646), 0x009c4060 },
681 /*{ AR5K_PHY(647), 0x1483800a },*/ /* Old value */
682 { AR5K_PHY(647), 0x1483800a }, 681 { AR5K_PHY(647), 0x1483800a },
682 /* { AR5K_PHY(648), 0x018830c6 },*/ /* 2413 */
683 { AR5K_PHY(648), 0x01831061 }, 683 { AR5K_PHY(648), 0x01831061 },
684 { AR5K_PHY(649), 0x00000400 }, 684 { AR5K_PHY(649), 0x00000400 },
685 /*{ AR5K_PHY(650), 0x000001b5 },*/ 685 /*{ AR5K_PHY(650), 0x000001b5 },*/
@@ -1081,6 +1081,414 @@ static const struct ath5k_ini_mode rf5413_ini_mode_end[] = {
1081 { 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0 } }, 1081 { 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0, 0xf3307ff0 } },
1082}; 1082};
1083 1083
1084/* Initial mode-specific settings for RF2413/2414 (Written after ar5212_ini) */
1085/* XXX: No dumps for turbog yet, so turbog is the same with g here with some
1086 * minor tweaking based on dumps from other chips */
1087static const struct ath5k_ini_mode rf2413_ini_mode_end[] = {
1088 { AR5K_TXCFG,
1089 /* b g gTurbo */
1090 { 0x00000015, 0x00000015, 0x00000015 } },
1091 { AR5K_USEC_5211,
1092 { 0x04e01395, 0x12e013ab, 0x098813cf } },
1093 { AR5K_PHY(10),
1094 { 0x05020000, 0x0a020001, 0x0a020001 } },
1095 { AR5K_PHY(13),
1096 { 0x00000e00, 0x00000e00, 0x00000e00 } },
1097 { AR5K_PHY(14),
1098 { 0x0000000a, 0x0000000a, 0x0000000a } },
1099 { AR5K_PHY(18),
1100 { 0x001a6a64, 0x001a6a64, 0x001a6a64 } },
1101 { AR5K_PHY(20),
1102 { 0x0de8b0da, 0x0c98b0da, 0x0c98b0da } },
1103 { AR5K_PHY_SIG,
1104 { 0x7ee80d2e, 0x7ec80d2e, 0x7ec80d2e } },
1105 { AR5K_PHY_AGCCOARSE,
1106 { 0x3137665e, 0x3139605e, 0x3139605e } },
1107 { AR5K_PHY(27),
1108 { 0x050cb081, 0x050cb081, 0x050cb081 } },
1109 { AR5K_PHY_RX_DELAY,
1110 { 0x0000044c, 0x00000898, 0x000007d0 } },
1111 { AR5K_PHY_FRAME_CTL_5211,
1112 { 0xf7b80d00, 0xf7b81000, 0xf7b81000 } },
1113 { AR5K_PHY_CCKTXCTL,
1114 { 0x00000000, 0x00000000, 0x00000000 } },
1115 { AR5K_PHY(642),
1116 { 0xd03e6788, 0xd03e6788, 0xd03e6788 } },
1117 { AR5K_PHY_GAIN_2GHZ,
1118 { 0x0042c140, 0x0042c140, 0x0042c140 } },
1119 { 0xa21c,
1120 { 0x1863800a, 0x1883800a, 0x1883800a } },
1121 { AR5K_DCU_FP,
1122 { 0x000003e0, 0x000003e0, 0x000003e0 } },
1123 { 0x8060,
1124 { 0x0000000f, 0x0000000f, 0x0000000f } },
1125 { 0x8118,
1126 { 0x00000000, 0x00000000, 0x00000000 } },
1127 { 0x811c,
1128 { 0x00000000, 0x00000000, 0x00000000 } },
1129 { 0x8120,
1130 { 0x00000000, 0x00000000, 0x00000000 } },
1131 { 0x8124,
1132 { 0x00000000, 0x00000000, 0x00000000 } },
1133 { 0x8128,
1134 { 0x00000000, 0x00000000, 0x00000000 } },
1135 { 0x812c,
1136 { 0x00000000, 0x00000000, 0x00000000 } },
1137 { 0x8130,
1138 { 0x00000000, 0x00000000, 0x00000000 } },
1139 { 0x8134,
1140 { 0x00000000, 0x00000000, 0x00000000 } },
1141 { 0x8138,
1142 { 0x00000000, 0x00000000, 0x00000000 } },
1143 { 0x813c,
1144 { 0x00000000, 0x00000000, 0x00000000 } },
1145 { 0x8140,
1146 { 0x800000a8, 0x800000a8, 0x800000a8 } },
1147 { 0x8144,
1148 { 0x00000000, 0x00000000, 0x00000000 } },
1149 { AR5K_PHY_AGC,
1150 { 0x00000000, 0x00000000, 0x00000000 } },
1151 { AR5K_PHY(11),
1152 { 0x0000a000, 0x0000a000, 0x0000a000 } },
1153 { AR5K_PHY(15),
1154 { 0x00200400, 0x00200400, 0x00200400 } },
1155 { AR5K_PHY(19),
1156 { 0x1284233c, 0x1284233c, 0x1284233c } },
1157 { AR5K_PHY_SCR,
1158 { 0x0000001f, 0x0000001f, 0x0000001f } },
1159 { AR5K_PHY_SLMT,
1160 { 0x00000080, 0x00000080, 0x00000080 } },
1161 { AR5K_PHY_SCAL,
1162 { 0x0000000e, 0x0000000e, 0x0000000e } },
1163 { AR5K_PHY(86),
1164 { 0x000000ff, 0x000000ff, 0x000000ff } },
1165 { AR5K_PHY(96),
1166 { 0x00000000, 0x00000000, 0x00000000 } },
1167 { AR5K_PHY(97),
1168 { 0x02800000, 0x02800000, 0x02800000 } },
1169 { AR5K_PHY(104),
1170 { 0x00000000, 0x00000000, 0x00000000 } },
1171 { AR5K_PHY(120),
1172 { 0x00000000, 0x00000000, 0x00000000 } },
1173 { AR5K_PHY(121),
1174 { 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa } },
1175 { AR5K_PHY(122),
1176 { 0x3c466478, 0x3c466478, 0x3c466478 } },
1177 { AR5K_PHY(123),
1178 { 0x000000aa, 0x000000aa, 0x000000aa } },
1179 { AR5K_PHY_SCLOCK,
1180 { 0x0000000c, 0x0000000c, 0x0000000c } },
1181 { AR5K_PHY_SDELAY,
1182 { 0x000000ff, 0x000000ff, 0x000000ff } },
1183 { AR5K_PHY_SPENDING,
1184 { 0x00000014, 0x00000014, 0x00000014 } },
1185 { 0xa228,
1186 { 0x000009b5, 0x000009b5, 0x000009b5 } },
1187 { 0xa23c,
1188 { 0x93c889af, 0x93c889af, 0x93c889af } },
1189 { 0xa24c,
1190 { 0x00000001, 0x00000001, 0x00000001 } },
1191 { 0xa250,
1192 { 0x0000a000, 0x0000a000, 0x0000a000 } },
1193 { 0xa254,
1194 { 0x00000000, 0x00000000, 0x00000000 } },
1195 { 0xa258,
1196 { 0x0cc75380, 0x0cc75380, 0x0cc75380 } },
1197 { 0xa25c,
1198 { 0x0f0f0f01, 0x0f0f0f01, 0x0f0f0f01 } },
1199 { 0xa260,
1200 { 0x5f690f01, 0x5f690f01, 0x5f690f01 } },
1201 { 0xa264,
1202 { 0x00418a11, 0x00418a11, 0x00418a11 } },
1203 { 0xa268,
1204 { 0x00000000, 0x00000000, 0x00000000 } },
1205 { 0xa26c,
1206 { 0x0c30c16a, 0x0c30c16a, 0x0c30c16a } },
1207 { 0xa270,
1208 { 0x00820820, 0x00820820, 0x00820820 } },
1209 { 0xa274,
1210 { 0x001b7caa, 0x001b7caa, 0x001b7caa } },
1211 { 0xa278,
1212 { 0x1ce739ce, 0x1ce739ce, 0x1ce739ce } },
1213 { 0xa27c,
1214 { 0x051701ce, 0x051701ce, 0x051701ce } },
1215 { 0xa300,
1216 { 0x18010000, 0x18010000, 0x18010000 } },
1217 { 0xa304,
1218 { 0x30032602, 0x30032602, 0x30032602 } },
1219 { 0xa308,
1220 { 0x48073e06, 0x48073e06, 0x48073e06 } },
1221 { 0xa30c,
1222 { 0x560b4c0a, 0x560b4c0a, 0x560b4c0a } },
1223 { 0xa310,
1224 { 0x641a600f, 0x641a600f, 0x641a600f } },
1225 { 0xa314,
1226 { 0x784f6e1b, 0x784f6e1b, 0x784f6e1b } },
1227 { 0xa318,
1228 { 0x868f7c5a, 0x868f7c5a, 0x868f7c5a } },
1229 { 0xa31c,
1230 { 0x8ecf865b, 0x8ecf865b, 0x8ecf865b } },
1231 { 0xa320,
1232 { 0x9d4f970f, 0x9d4f970f, 0x9d4f970f } },
1233 { 0xa324,
1234 { 0xa5cfa18f, 0xa5cfa18f, 0xa5cfa18f } },
1235 { 0xa328,
1236 { 0xb55faf1f, 0xb55faf1f, 0xb55faf1f } },
1237 { 0xa32c,
1238 { 0xbddfb99f, 0xbddfb99f, 0xbddfb99f } },
1239 { 0xa330,
1240 { 0xcd7fc73f, 0xcd7fc73f, 0xcd7fc73f } },
1241 { 0xa334,
1242 { 0xd5ffd1bf, 0xd5ffd1bf, 0xd5ffd1bf } },
1243 { 0xa338,
1244 { 0x00000000, 0x00000000, 0x00000000 } },
1245 { 0xa33c,
1246 { 0x00000000, 0x00000000, 0x00000000 } },
1247 { 0xa340,
1248 { 0x00000000, 0x00000000, 0x00000000 } },
1249 { 0xa344,
1250 { 0x00000000, 0x00000000, 0x00000000 } },
1251 { 0xa348,
1252 { 0x3fffffff, 0x3fffffff, 0x3fffffff } },
1253 { 0xa34c,
1254 { 0x3fffffff, 0x3fffffff, 0x3fffffff } },
1255 { 0xa350,
1256 { 0x3fffffff, 0x3fffffff, 0x3fffffff } },
1257 { 0xa354,
1258 { 0x0003ffff, 0x0003ffff, 0x0003ffff } },
1259 { 0xa358,
1260 { 0x79a8aa1f, 0x79a8aa1f, 0x79a8aa1f } },
1261 { 0xa35c,
1262 { 0x066c420f, 0x066c420f, 0x066c420f } },
1263 { 0xa360,
1264 { 0x0f282207, 0x0f282207, 0x0f282207 } },
1265 { 0xa364,
1266 { 0x17601685, 0x17601685, 0x17601685 } },
1267 { 0xa368,
1268 { 0x1f801104, 0x1f801104, 0x1f801104 } },
1269 { 0xa36c,
1270 { 0x37a00c03, 0x37a00c03, 0x37a00c03 } },
1271 { 0xa370,
1272 { 0x3fc40883, 0x3fc40883, 0x3fc40883 } },
1273 { 0xa374,
1274 { 0x57c00803, 0x57c00803, 0x57c00803 } },
1275 { 0xa378,
1276 { 0x5fd80682, 0x5fd80682, 0x5fd80682 } },
1277 { 0xa37c,
1278 { 0x7fe00482, 0x7fe00482, 0x7fe00482 } },
1279 { 0xa380,
1280 { 0x7f3c7bba, 0x7f3c7bba, 0x7f3c7bba } },
1281 { 0xa384,
1282 { 0xf3307ff0, 0xf3307ff0, 0xf3307ff0 } },
1283};
1284
1285/* Initial mode-specific settings for RF2425 (Written after ar5212_ini) */
1286/* XXX: No dumps for turbog yet, so turbog is the same with g here with some
1287 * minor tweaking based on dumps from other chips */
1288static const struct ath5k_ini_mode rf2425_ini_mode_end[] = {
1289 { AR5K_TXCFG,
1290 /* g gTurbo */
1291 { 0x00000015, 0x00000015 } },
1292 { AR5K_USEC_5211,
1293 { 0x12e013ab, 0x098813cf } },
1294 { AR5K_PHY_TURBO,
1295 { 0x00000000, 0x00000003 } },
1296 { AR5K_PHY(10),
1297 { 0x0a020001, 0x0a020001 } },
1298 { AR5K_PHY(13),
1299 { 0x00000e0e, 0x00000e0e } },
1300 { AR5K_PHY(14),
1301 { 0x0000000b, 0x0000000b } },
1302 { AR5K_PHY(17),
1303 { 0x13721422, 0x13721422 } },
1304 { AR5K_PHY(18),
1305 { 0x00199a65, 0x00199a65 } },
1306 { AR5K_PHY(20),
1307 { 0x0c98b0da, 0x0c98b0da } },
1308 { AR5K_PHY_SIG,
1309 { 0x7ec80d2e, 0x7ec80d2e } },
1310 { AR5K_PHY_AGCCOARSE,
1311 { 0x3139605e, 0x3139605e } },
1312 { AR5K_PHY(27),
1313 { 0x050cb081, 0x050cb081 } },
1314 { AR5K_PHY_RX_DELAY,
1315 { 0x00000898, 0x000007d0 } },
1316 { AR5K_PHY_FRAME_CTL_5211,
1317 { 0xf7b81000, 0xf7b81000 } },
1318 { AR5K_PHY_CCKTXCTL,
1319 { 0x00000000, 0x00000000 } },
1320 { AR5K_PHY(642),
1321 { 0xd03e6788, 0xd03e6788 } },
1322 { AR5K_PHY_GAIN_2GHZ,
1323 { 0x0052c140, 0x0052c140 } },
1324 { 0xa21c,
1325 { 0x1883800a, 0x1883800a } },
1326 { 0xa324,
1327 { 0xa7cfa7cf, 0xa7cfa7cf } },
1328 { 0xa328,
1329 { 0xa7cfa7cf, 0xa7cfa7cf } },
1330 { 0xa32c,
1331 { 0xa7cfa7cf, 0xa7cfa7cf } },
1332 { 0xa330,
1333 { 0xa7cfa7cf, 0xa7cfa7cf } },
1334 { 0xa334,
1335 { 0xa7cfa7cf, 0xa7cfa7cf } },
1336 { AR5K_DCU_FP,
1337 { 0x000003e0, 0x000003e0 } },
1338 { 0x8060,
1339 { 0x0000000f, 0x0000000f } },
1340 { 0x809c,
1341 { 0x00000000, 0x00000000 } },
1342 { 0x80a0,
1343 { 0x00000000, 0x00000000 } },
1344 { 0x8118,
1345 { 0x00000000, 0x00000000 } },
1346 { 0x811c,
1347 { 0x00000000, 0x00000000 } },
1348 { 0x8120,
1349 { 0x00000000, 0x00000000 } },
1350 { 0x8124,
1351 { 0x00000000, 0x00000000 } },
1352 { 0x8128,
1353 { 0x00000000, 0x00000000 } },
1354 { 0x812c,
1355 { 0x00000000, 0x00000000 } },
1356 { 0x8130,
1357 { 0x00000000, 0x00000000 } },
1358 { 0x8134,
1359 { 0x00000000, 0x00000000 } },
1360 { 0x8138,
1361 { 0x00000000, 0x00000000 } },
1362 { 0x813c,
1363 { 0x00000000, 0x00000000 } },
1364 { 0x8140,
1365 { 0x800003f9, 0x800003f9 } },
1366 { 0x8144,
1367 { 0x00000000, 0x00000000 } },
1368 { AR5K_PHY_AGC,
1369 { 0x00000000, 0x00000000 } },
1370 { AR5K_PHY(11),
1371 { 0x0000a000, 0x0000a000 } },
1372 { AR5K_PHY(15),
1373 { 0x00200400, 0x00200400 } },
1374 { AR5K_PHY(19),
1375 { 0x1284233c, 0x1284233c } },
1376 { AR5K_PHY_SCR,
1377 { 0x0000001f, 0x0000001f } },
1378 { AR5K_PHY_SLMT,
1379 { 0x00000080, 0x00000080 } },
1380 { AR5K_PHY_SCAL,
1381 { 0x0000000e, 0x0000000e } },
1382 { AR5K_PHY(86),
1383 { 0x00081fff, 0x00081fff } },
1384 { AR5K_PHY(96),
1385 { 0x00000000, 0x00000000 } },
1386 { AR5K_PHY(97),
1387 { 0x02800000, 0x02800000 } },
1388 { AR5K_PHY(104),
1389 { 0x00000000, 0x00000000 } },
1390 { AR5K_PHY(119),
1391 { 0xfebadbe8, 0xfebadbe8 } },
1392 { AR5K_PHY(120),
1393 { 0x00000000, 0x00000000 } },
1394 { AR5K_PHY(121),
1395 { 0xaaaaaaaa, 0xaaaaaaaa } },
1396 { AR5K_PHY(122),
1397 { 0x3c466478, 0x3c466478 } },
1398 { AR5K_PHY(123),
1399 { 0x000000aa, 0x000000aa } },
1400 { AR5K_PHY_SCLOCK,
1401 { 0x0000000c, 0x0000000c } },
1402 { AR5K_PHY_SDELAY,
1403 { 0x000000ff, 0x000000ff } },
1404 { AR5K_PHY_SPENDING,
1405 { 0x00000014, 0x00000014 } },
1406 { 0xa228,
1407 { 0x000009b5, 0x000009b5 } },
1408 { AR5K_PHY_TXPOWER_RATE3,
1409 { 0x20202020, 0x20202020 } },
1410 { AR5K_PHY_TXPOWER_RATE4,
1411 { 0x20202020, 0x20202020 } },
1412 { 0xa23c,
1413 { 0x93c889af, 0x93c889af } },
1414 { 0xa24c,
1415 { 0x00000001, 0x00000001 } },
1416 { 0xa250,
1417 { 0x0000a000, 0x0000a000 } },
1418 { 0xa254,
1419 { 0x00000000, 0x00000000 } },
1420 { 0xa258,
1421 { 0x0cc75380, 0x0cc75380 } },
1422 { 0xa25c,
1423 { 0x0f0f0f01, 0x0f0f0f01 } },
1424 { 0xa260,
1425 { 0x5f690f01, 0x5f690f01 } },
1426 { 0xa264,
1427 { 0x00418a11, 0x00418a11 } },
1428 { 0xa268,
1429 { 0x00000000, 0x00000000 } },
1430 { 0xa26c,
1431 { 0x0c30c166, 0x0c30c166 } },
1432 { 0xa270,
1433 { 0x00820820, 0x00820820 } },
1434 { 0xa274,
1435 { 0x081a3caa, 0x081a3caa } },
1436 { 0xa278,
1437 { 0x1ce739ce, 0x1ce739ce } },
1438 { 0xa27c,
1439 { 0x051701ce, 0x051701ce } },
1440 { 0xa300,
1441 { 0x16010000, 0x16010000 } },
1442 { 0xa304,
1443 { 0x2c032402, 0x2c032402 } },
1444 { 0xa308,
1445 { 0x48433e42, 0x48433e42 } },
1446 { 0xa30c,
1447 { 0x5a0f500b, 0x5a0f500b } },
1448 { 0xa310,
1449 { 0x6c4b624a, 0x6c4b624a } },
1450 { 0xa314,
1451 { 0x7e8b748a, 0x7e8b748a } },
1452 { 0xa318,
1453 { 0x96cf8ccb, 0x96cf8ccb } },
1454 { 0xa31c,
1455 { 0xa34f9d0f, 0xa34f9d0f } },
1456 { 0xa320,
1457 { 0xa7cfa58f, 0xa7cfa58f } },
1458 { 0xa348,
1459 { 0x3fffffff, 0x3fffffff } },
1460 { 0xa34c,
1461 { 0x3fffffff, 0x3fffffff } },
1462 { 0xa350,
1463 { 0x3fffffff, 0x3fffffff } },
1464 { 0xa354,
1465 { 0x0003ffff, 0x0003ffff } },
1466 { 0xa358,
1467 { 0x79a8aa1f, 0x79a8aa1f } },
1468 { 0xa35c,
1469 { 0x066c420f, 0x066c420f } },
1470 { 0xa360,
1471 { 0x0f282207, 0x0f282207 } },
1472 { 0xa364,
1473 { 0x17601685, 0x17601685 } },
1474 { 0xa368,
1475 { 0x1f801104, 0x1f801104 } },
1476 { 0xa36c,
1477 { 0x37a00c03, 0x37a00c03 } },
1478 { 0xa370,
1479 { 0x3fc40883, 0x3fc40883 } },
1480 { 0xa374,
1481 { 0x57c00803, 0x57c00803 } },
1482 { 0xa378,
1483 { 0x5fd80682, 0x5fd80682 } },
1484 { 0xa37c,
1485 { 0x7fe00482, 0x7fe00482 } },
1486 { 0xa380,
1487 { 0x7f3c7bba, 0x7f3c7bba } },
1488 { 0xa384,
1489 { 0xf3307ff0, 0xf3307ff0 } },
1490};
1491
1084/* 1492/*
1085 * Initial BaseBand Gain settings for RF5111/5112 (AR5210 comes with 1493 * Initial BaseBand Gain settings for RF5111/5112 (AR5210 comes with
1086 * RF5110 only so initial BB Gain settings are included in AR5K_AR5210_INI) 1494 * RF5110 only so initial BB Gain settings are included in AR5K_AR5210_INI)
@@ -1290,35 +1698,92 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
1290 1698
1291 /* Second set of mode-specific settings */ 1699 /* Second set of mode-specific settings */
1292 if (ah->ah_radio == AR5K_RF5111){ 1700 if (ah->ah_radio == AR5K_RF5111){
1701
1293 ath5k_hw_ini_mode_registers(ah, 1702 ath5k_hw_ini_mode_registers(ah,
1294 ARRAY_SIZE(ar5212_rf5111_ini_mode_end), 1703 ARRAY_SIZE(ar5212_rf5111_ini_mode_end),
1295 ar5212_rf5111_ini_mode_end, mode); 1704 ar5212_rf5111_ini_mode_end, mode);
1705
1296 /* Baseband gain table */ 1706 /* Baseband gain table */
1297 ath5k_hw_ini_registers(ah, 1707 ath5k_hw_ini_registers(ah,
1298 ARRAY_SIZE(rf5111_ini_bbgain), 1708 ARRAY_SIZE(rf5111_ini_bbgain),
1299 rf5111_ini_bbgain, change_channel); 1709 rf5111_ini_bbgain, change_channel);
1710
1300 } else if (ah->ah_radio == AR5K_RF5112){ 1711 } else if (ah->ah_radio == AR5K_RF5112){
1712
1301 ath5k_hw_ini_mode_registers(ah, 1713 ath5k_hw_ini_mode_registers(ah,
1302 ARRAY_SIZE(ar5212_rf5112_ini_mode_end), 1714 ARRAY_SIZE(ar5212_rf5112_ini_mode_end),
1303 ar5212_rf5112_ini_mode_end, mode); 1715 ar5212_rf5112_ini_mode_end, mode);
1304 /* Baseband gain table */ 1716
1305 ath5k_hw_ini_registers(ah, 1717 ath5k_hw_ini_registers(ah,
1306 ARRAY_SIZE(rf5112_ini_bbgain), 1718 ARRAY_SIZE(rf5112_ini_bbgain),
1307 rf5112_ini_bbgain, change_channel); 1719 rf5112_ini_bbgain, change_channel);
1720
1308 } else if (ah->ah_radio == AR5K_RF5413){ 1721 } else if (ah->ah_radio == AR5K_RF5413){
1722
1309 ath5k_hw_ini_mode_registers(ah, 1723 ath5k_hw_ini_mode_registers(ah,
1310 ARRAY_SIZE(rf5413_ini_mode_end), 1724 ARRAY_SIZE(rf5413_ini_mode_end),
1311 rf5413_ini_mode_end, mode); 1725 rf5413_ini_mode_end, mode);
1726
1727 ath5k_hw_ini_registers(ah,
1728 ARRAY_SIZE(rf5112_ini_bbgain),
1729 rf5112_ini_bbgain, change_channel);
1730
1731 } else if (ah->ah_radio == AR5K_RF2413) {
1732
1733 if (mode < 2) {
1734 ATH5K_ERR(ah->ah_sc,
1735 "unsupported channel mode: %d\n", mode);
1736 return -EINVAL;
1737 }
1738 mode = mode - 2;
1739
1740 /* Override a setting from ar5212_ini */
1741 ath5k_hw_reg_write(ah, 0x018830c6, AR5K_PHY(648));
1742
1743 ath5k_hw_ini_mode_registers(ah,
1744 ARRAY_SIZE(rf2413_ini_mode_end),
1745 rf2413_ini_mode_end, mode);
1746
1747 /* Baseband gain table */
1748 ath5k_hw_ini_registers(ah,
1749 ARRAY_SIZE(rf5112_ini_bbgain),
1750 rf5112_ini_bbgain, change_channel);
1751
1752 } else if (ah->ah_radio == AR5K_RF2425) {
1753
1754 if (mode < 2) {
1755 ATH5K_ERR(ah->ah_sc,
1756 "unsupported channel mode: %d\n", mode);
1757 return -EINVAL;
1758 }
1759
1760 /* Map b to g */
1761 if (mode == 2)
1762 mode = 0;
1763 else
1764 mode = mode - 3;
1765
1766 /* Override a setting from ar5212_ini */
1767 ath5k_hw_reg_write(ah, 0x018830c6, AR5K_PHY(648));
1768
1769 ath5k_hw_ini_mode_registers(ah,
1770 ARRAY_SIZE(rf2425_ini_mode_end),
1771 rf2425_ini_mode_end, mode);
1772
1312 /* Baseband gain table */ 1773 /* Baseband gain table */
1313 ath5k_hw_ini_registers(ah, 1774 ath5k_hw_ini_registers(ah,
1314 ARRAY_SIZE(rf5112_ini_bbgain), 1775 ARRAY_SIZE(rf5112_ini_bbgain),
1315 rf5112_ini_bbgain, change_channel); 1776 rf5112_ini_bbgain, change_channel);
1777
1316 } 1778 }
1779
1317 /* For AR5211 */ 1780 /* For AR5211 */
1318 } else if (ah->ah_version == AR5K_AR5211) { 1781 } else if (ah->ah_version == AR5K_AR5211) {
1319 1782
1320 if(mode > 2){ /* AR5K_INI_VAL_11B */ 1783 /* AR5K_MODE_11B */
1321 ATH5K_ERR(ah->ah_sc,"unsupported channel mode: %d\n", mode); 1784 if (mode > 2) {
1785 ATH5K_ERR(ah->ah_sc,
1786 "unsupported channel mode: %d\n", mode);
1322 return -EINVAL; 1787 return -EINVAL;
1323 } 1788 }
1324 1789
diff --git a/drivers/net/wireless/ath5k/phy.c b/drivers/net/wireless/ath5k/phy.c
index b95941797141..afd8689e5c03 100644
--- a/drivers/net/wireless/ath5k/phy.c
+++ b/drivers/net/wireless/ath5k/phy.c
@@ -666,6 +666,153 @@ static const struct ath5k_ini_rf rfregs_5413[] = {
666 { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } }, 666 { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
667}; 667};
668 668
669/* RF2413/2414 mode-specific init registers */
670static const struct ath5k_ini_rf rfregs_2413[] = {
671 { 1, AR5K_RF_BUFFER_CONTROL_4,
672 /* mode b mode g mode gTurbo */
673 { 0x00000020, 0x00000020, 0x00000020 } },
674 { 2, AR5K_RF_BUFFER_CONTROL_3,
675 { 0x02001408, 0x02001408, 0x02001408 } },
676 { 3, AR5K_RF_BUFFER_CONTROL_6,
677 { 0x00e020c0, 0x00e020c0, 0x00e020c0 } },
678 { 6, AR5K_RF_BUFFER,
679 { 0xf0000000, 0xf0000000, 0xf0000000 } },
680 { 6, AR5K_RF_BUFFER,
681 { 0x00000000, 0x00000000, 0x00000000 } },
682 { 6, AR5K_RF_BUFFER,
683 { 0x03000000, 0x03000000, 0x03000000 } },
684 { 6, AR5K_RF_BUFFER,
685 { 0x00000000, 0x00000000, 0x00000000 } },
686 { 6, AR5K_RF_BUFFER,
687 { 0x00000000, 0x00000000, 0x00000000 } },
688 { 6, AR5K_RF_BUFFER,
689 { 0x00000000, 0x00000000, 0x00000000 } },
690 { 6, AR5K_RF_BUFFER,
691 { 0x00000000, 0x00000000, 0x00000000 } },
692 { 6, AR5K_RF_BUFFER,
693 { 0x00000000, 0x00000000, 0x00000000 } },
694 { 6, AR5K_RF_BUFFER,
695 { 0x40400000, 0x40400000, 0x40400000 } },
696 { 6, AR5K_RF_BUFFER,
697 { 0x65050000, 0x65050000, 0x65050000 } },
698 { 6, AR5K_RF_BUFFER,
699 { 0x00000000, 0x00000000, 0x00000000 } },
700 { 6, AR5K_RF_BUFFER,
701 { 0x00000000, 0x00000000, 0x00000000 } },
702 { 6, AR5K_RF_BUFFER,
703 { 0x00420000, 0x00420000, 0x00420000 } },
704 { 6, AR5K_RF_BUFFER,
705 { 0x00b50000, 0x00b50000, 0x00b50000 } },
706 { 6, AR5K_RF_BUFFER,
707 { 0x00030000, 0x00030000, 0x00030000 } },
708 { 6, AR5K_RF_BUFFER,
709 { 0x00f70000, 0x00f70000, 0x00f70000 } },
710 { 6, AR5K_RF_BUFFER,
711 { 0x009d0000, 0x009d0000, 0x009d0000 } },
712 { 6, AR5K_RF_BUFFER,
713 { 0x00220000, 0x00220000, 0x00220000 } },
714 { 6, AR5K_RF_BUFFER,
715 { 0x04220000, 0x04220000, 0x04220000 } },
716 { 6, AR5K_RF_BUFFER,
717 { 0x00230018, 0x00230018, 0x00230018 } },
718 { 6, AR5K_RF_BUFFER,
719 { 0x00280050, 0x00280050, 0x00280050 } },
720 { 6, AR5K_RF_BUFFER,
721 { 0x005000c3, 0x005000c3, 0x005000c3 } },
722 { 6, AR5K_RF_BUFFER,
723 { 0x0004007f, 0x0004007f, 0x0004007f } },
724 { 6, AR5K_RF_BUFFER,
725 { 0x00000458, 0x00000458, 0x00000458 } },
726 { 6, AR5K_RF_BUFFER,
727 { 0x00000000, 0x00000000, 0x00000000 } },
728 { 6, AR5K_RF_BUFFER,
729 { 0x0000c000, 0x0000c000, 0x0000c000 } },
730 { 6, AR5K_RF_BUFFER_CONTROL_5,
731 { 0x00400230, 0x00400230, 0x00400230 } },
732 { 7, AR5K_RF_BUFFER,
733 { 0x00006400, 0x00006400, 0x00006400 } },
734 { 7, AR5K_RF_BUFFER,
735 { 0x00000800, 0x00000800, 0x00000800 } },
736 { 7, AR5K_RF_BUFFER_CONTROL_2,
737 { 0x0000000e, 0x0000000e, 0x0000000e } },
738};
739
740/* RF2425 mode-specific init registers */
741static const struct ath5k_ini_rf rfregs_2425[] = {
742 { 1, AR5K_RF_BUFFER_CONTROL_4,
743 /* mode g mode gTurbo */
744 { 0x00000020, 0x00000020 } },
745 { 2, AR5K_RF_BUFFER_CONTROL_3,
746 { 0x02001408, 0x02001408 } },
747 { 3, AR5K_RF_BUFFER_CONTROL_6,
748 { 0x00e020c0, 0x00e020c0 } },
749 { 6, AR5K_RF_BUFFER,
750 { 0x10000000, 0x10000000 } },
751 { 6, AR5K_RF_BUFFER,
752 { 0x00000000, 0x00000000 } },
753 { 6, AR5K_RF_BUFFER,
754 { 0x00000000, 0x00000000 } },
755 { 6, AR5K_RF_BUFFER,
756 { 0x00000000, 0x00000000 } },
757 { 6, AR5K_RF_BUFFER,
758 { 0x00000000, 0x00000000 } },
759 { 6, AR5K_RF_BUFFER,
760 { 0x00000000, 0x00000000 } },
761 { 6, AR5K_RF_BUFFER,
762 { 0x00000000, 0x00000000 } },
763 { 6, AR5K_RF_BUFFER,
764 { 0x00000000, 0x00000000 } },
765 { 6, AR5K_RF_BUFFER,
766 { 0x00000000, 0x00000000 } },
767 { 6, AR5K_RF_BUFFER,
768 { 0x00000000, 0x00000000 } },
769 { 6, AR5K_RF_BUFFER,
770 { 0x00000000, 0x00000000 } },
771 { 6, AR5K_RF_BUFFER,
772 { 0x002a0000, 0x002a0000 } },
773 { 6, AR5K_RF_BUFFER,
774 { 0x00000000, 0x00000000 } },
775 { 6, AR5K_RF_BUFFER,
776 { 0x00000000, 0x00000000 } },
777 { 6, AR5K_RF_BUFFER,
778 { 0x00100000, 0x00100000 } },
779 { 6, AR5K_RF_BUFFER,
780 { 0x00020000, 0x00020000 } },
781 { 6, AR5K_RF_BUFFER,
782 { 0x00730000, 0x00730000 } },
783 { 6, AR5K_RF_BUFFER,
784 { 0x00f80000, 0x00f80000 } },
785 { 6, AR5K_RF_BUFFER,
786 { 0x00e70000, 0x00e70000 } },
787 { 6, AR5K_RF_BUFFER,
788 { 0x00140000, 0x00140000 } },
789 { 6, AR5K_RF_BUFFER,
790 { 0x00910040, 0x00910040 } },
791 { 6, AR5K_RF_BUFFER,
792 { 0x0007001a, 0x0007001a } },
793 { 6, AR5K_RF_BUFFER,
794 { 0x00410000, 0x00410000 } },
795 { 6, AR5K_RF_BUFFER,
796 { 0x00810060, 0x00810060 } },
797 { 6, AR5K_RF_BUFFER,
798 { 0x00020803, 0x00020803 } },
799 { 6, AR5K_RF_BUFFER,
800 { 0x00000000, 0x00000000 } },
801 { 6, AR5K_RF_BUFFER,
802 { 0x00000000, 0x00000000 } },
803 { 6, AR5K_RF_BUFFER,
804 { 0x00001660, 0x00001660 } },
805 { 6, AR5K_RF_BUFFER,
806 { 0x00001688, 0x00001688 } },
807 { 6, AR5K_RF_BUFFER_CONTROL_1,
808 { 0x00000001, 0x00000001 } },
809 { 7, AR5K_RF_BUFFER,
810 { 0x00006400, 0x00006400 } },
811 { 7, AR5K_RF_BUFFER,
812 { 0x00000800, 0x00000800 } },
813 { 7, AR5K_RF_BUFFER_CONTROL_2,
814 { 0x0000000e, 0x0000000e } },
815};
669 816
670/* Initial RF Gain settings for RF5112 */ 817/* Initial RF Gain settings for RF5112 */
671static const struct ath5k_ini_rfgain rfgain_5112[] = { 818static const struct ath5k_ini_rfgain rfgain_5112[] = {
@@ -805,6 +952,74 @@ static const struct ath5k_ini_rfgain rfgain_5413[] = {
805 { AR5K_RF_GAIN(63), { 0x000000f9, 0x000000f9 } }, 952 { AR5K_RF_GAIN(63), { 0x000000f9, 0x000000f9 } },
806}; 953};
807 954
955/* Initial RF Gain settings for RF2413 */
956static const struct ath5k_ini_rfgain rfgain_2413[] = {
957 { AR5K_RF_GAIN(0), { 0x00000000 } },
958 { AR5K_RF_GAIN(1), { 0x00000040 } },
959 { AR5K_RF_GAIN(2), { 0x00000080 } },
960 { AR5K_RF_GAIN(3), { 0x00000181 } },
961 { AR5K_RF_GAIN(4), { 0x000001c1 } },
962 { AR5K_RF_GAIN(5), { 0x00000001 } },
963 { AR5K_RF_GAIN(6), { 0x00000041 } },
964 { AR5K_RF_GAIN(7), { 0x00000081 } },
965 { AR5K_RF_GAIN(8), { 0x00000168 } },
966 { AR5K_RF_GAIN(9), { 0x000001a8 } },
967 { AR5K_RF_GAIN(10), { 0x000001e8 } },
968 { AR5K_RF_GAIN(11), { 0x00000028 } },
969 { AR5K_RF_GAIN(12), { 0x00000068 } },
970 { AR5K_RF_GAIN(13), { 0x00000189 } },
971 { AR5K_RF_GAIN(14), { 0x000001c9 } },
972 { AR5K_RF_GAIN(15), { 0x00000009 } },
973 { AR5K_RF_GAIN(16), { 0x00000049 } },
974 { AR5K_RF_GAIN(17), { 0x00000089 } },
975 { AR5K_RF_GAIN(18), { 0x00000190 } },
976 { AR5K_RF_GAIN(19), { 0x000001d0 } },
977 { AR5K_RF_GAIN(20), { 0x00000010 } },
978 { AR5K_RF_GAIN(21), { 0x00000050 } },
979 { AR5K_RF_GAIN(22), { 0x00000090 } },
980 { AR5K_RF_GAIN(23), { 0x00000191 } },
981 { AR5K_RF_GAIN(24), { 0x000001d1 } },
982 { AR5K_RF_GAIN(25), { 0x00000011 } },
983 { AR5K_RF_GAIN(26), { 0x00000051 } },
984 { AR5K_RF_GAIN(27), { 0x00000091 } },
985 { AR5K_RF_GAIN(28), { 0x00000178 } },
986 { AR5K_RF_GAIN(29), { 0x000001b8 } },
987 { AR5K_RF_GAIN(30), { 0x000001f8 } },
988 { AR5K_RF_GAIN(31), { 0x00000038 } },
989 { AR5K_RF_GAIN(32), { 0x00000078 } },
990 { AR5K_RF_GAIN(33), { 0x00000199 } },
991 { AR5K_RF_GAIN(34), { 0x000001d9 } },
992 { AR5K_RF_GAIN(35), { 0x00000019 } },
993 { AR5K_RF_GAIN(36), { 0x00000059 } },
994 { AR5K_RF_GAIN(37), { 0x00000099 } },
995 { AR5K_RF_GAIN(38), { 0x000000d9 } },
996 { AR5K_RF_GAIN(39), { 0x000000f9 } },
997 { AR5K_RF_GAIN(40), { 0x000000f9 } },
998 { AR5K_RF_GAIN(41), { 0x000000f9 } },
999 { AR5K_RF_GAIN(42), { 0x000000f9 } },
1000 { AR5K_RF_GAIN(43), { 0x000000f9 } },
1001 { AR5K_RF_GAIN(44), { 0x000000f9 } },
1002 { AR5K_RF_GAIN(45), { 0x000000f9 } },
1003 { AR5K_RF_GAIN(46), { 0x000000f9 } },
1004 { AR5K_RF_GAIN(47), { 0x000000f9 } },
1005 { AR5K_RF_GAIN(48), { 0x000000f9 } },
1006 { AR5K_RF_GAIN(49), { 0x000000f9 } },
1007 { AR5K_RF_GAIN(50), { 0x000000f9 } },
1008 { AR5K_RF_GAIN(51), { 0x000000f9 } },
1009 { AR5K_RF_GAIN(52), { 0x000000f9 } },
1010 { AR5K_RF_GAIN(53), { 0x000000f9 } },
1011 { AR5K_RF_GAIN(54), { 0x000000f9 } },
1012 { AR5K_RF_GAIN(55), { 0x000000f9 } },
1013 { AR5K_RF_GAIN(56), { 0x000000f9 } },
1014 { AR5K_RF_GAIN(57), { 0x000000f9 } },
1015 { AR5K_RF_GAIN(58), { 0x000000f9 } },
1016 { AR5K_RF_GAIN(59), { 0x000000f9 } },
1017 { AR5K_RF_GAIN(60), { 0x000000f9 } },
1018 { AR5K_RF_GAIN(61), { 0x000000f9 } },
1019 { AR5K_RF_GAIN(62), { 0x000000f9 } },
1020 { AR5K_RF_GAIN(63), { 0x000000f9 } },
1021};
1022
808static const struct ath5k_gain_opt rfgain_opt_5112 = { 1023static const struct ath5k_gain_opt rfgain_opt_5112 = {
809 1, 1024 1,
810 8, 1025 8,
@@ -844,14 +1059,14 @@ static unsigned int ath5k_hw_rfregs_op(u32 *rf, u32 offset, u32 reg, u32 bits,
844 entry = ((first - 1) / 8) + offset; 1059 entry = ((first - 1) / 8) + offset;
845 position = (first - 1) % 8; 1060 position = (first - 1) % 8;
846 1061
847 if (set == true) 1062 if (set)
848 data = ath5k_hw_bitswap(reg, bits); 1063 data = ath5k_hw_bitswap(reg, bits);
849 1064
850 for (i = shift = 0, left = bits; left > 0; position = 0, entry++, i++) { 1065 for (i = shift = 0, left = bits; left > 0; position = 0, entry++, i++) {
851 last = (position + left > 8) ? 8 : position + left; 1066 last = (position + left > 8) ? 8 : position + left;
852 mask = (((1 << last) - 1) ^ ((1 << position) - 1)) << (col * 8); 1067 mask = (((1 << last) - 1) ^ ((1 << position) - 1)) << (col * 8);
853 1068
854 if (set == true) { 1069 if (set) {
855 rf[entry] &= ~mask; 1070 rf[entry] &= ~mask;
856 rf[entry] |= ((data << position) << (col * 8)) & mask; 1071 rf[entry] |= ((data << position) << (col * 8)) & mask;
857 data >>= (8 - position); 1072 data >>= (8 - position);
@@ -864,7 +1079,7 @@ static unsigned int ath5k_hw_rfregs_op(u32 *rf, u32 offset, u32 reg, u32 bits,
864 left -= 8 - position; 1079 left -= 8 - position;
865 } 1080 }
866 1081
867 data = set == true ? 1 : ath5k_hw_bitswap(data, bits); 1082 data = set ? 1 : ath5k_hw_bitswap(data, bits);
868 1083
869 return data; 1084 return data;
870} 1085}
@@ -955,7 +1170,6 @@ static s32 ath5k_hw_rfregs_gain_adjust(struct ath5k_hw *ah)
955 go = &rfgain_opt_5111; 1170 go = &rfgain_opt_5111;
956 break; 1171 break;
957 case AR5K_RF5112: 1172 case AR5K_RF5112:
958 case AR5K_RF5413: /* ??? */
959 go = &rfgain_opt_5112; 1173 go = &rfgain_opt_5112;
960 break; 1174 break;
961 default: 1175 default:
@@ -1018,7 +1232,7 @@ static int ath5k_hw_rf5111_rfregs(struct ath5k_hw *ah,
1018 int obdb = -1, bank = -1; 1232 int obdb = -1, bank = -1;
1019 u32 ee_mode; 1233 u32 ee_mode;
1020 1234
1021 AR5K_ASSERT_ENTRY(mode, AR5K_INI_VAL_MAX); 1235 AR5K_ASSERT_ENTRY(mode, AR5K_MODE_MAX);
1022 1236
1023 rf = ah->ah_rf_banks; 1237 rf = ah->ah_rf_banks;
1024 1238
@@ -1038,8 +1252,8 @@ static int ath5k_hw_rf5111_rfregs(struct ath5k_hw *ah,
1038 } 1252 }
1039 1253
1040 /* Modify bank 0 */ 1254 /* Modify bank 0 */
1041 if (channel->val & CHANNEL_2GHZ) { 1255 if (channel->hw_value & CHANNEL_2GHZ) {
1042 if (channel->val & CHANNEL_CCK) 1256 if (channel->hw_value & CHANNEL_CCK)
1043 ee_mode = AR5K_EEPROM_MODE_11B; 1257 ee_mode = AR5K_EEPROM_MODE_11B;
1044 else 1258 else
1045 ee_mode = AR5K_EEPROM_MODE_11G; 1259 ee_mode = AR5K_EEPROM_MODE_11G;
@@ -1058,10 +1272,10 @@ static int ath5k_hw_rf5111_rfregs(struct ath5k_hw *ah,
1058 } else { 1272 } else {
1059 /* For 11a, Turbo and XR */ 1273 /* For 11a, Turbo and XR */
1060 ee_mode = AR5K_EEPROM_MODE_11A; 1274 ee_mode = AR5K_EEPROM_MODE_11A;
1061 obdb = channel->freq >= 5725 ? 3 : 1275 obdb = channel->center_freq >= 5725 ? 3 :
1062 (channel->freq >= 5500 ? 2 : 1276 (channel->center_freq >= 5500 ? 2 :
1063 (channel->freq >= 5260 ? 1 : 1277 (channel->center_freq >= 5260 ? 1 :
1064 (channel->freq > 4000 ? 0 : -1))); 1278 (channel->center_freq > 4000 ? 0 : -1)));
1065 1279
1066 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[6], 1280 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[6],
1067 ee->ee_pwd_84, 1, 51, 3, true)) 1281 ee->ee_pwd_84, 1, 51, 3, true))
@@ -1119,12 +1333,12 @@ static int ath5k_hw_rf5112_rfregs(struct ath5k_hw *ah,
1119 int obdb = -1, bank = -1; 1333 int obdb = -1, bank = -1;
1120 u32 ee_mode; 1334 u32 ee_mode;
1121 1335
1122 AR5K_ASSERT_ENTRY(mode, AR5K_INI_VAL_MAX); 1336 AR5K_ASSERT_ENTRY(mode, AR5K_MODE_MAX);
1123 1337
1124 rf = ah->ah_rf_banks; 1338 rf = ah->ah_rf_banks;
1125 1339
1126 if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_2112A 1340 if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_2112A
1127 && !test_bit(MODE_IEEE80211A, ah->ah_capabilities.cap_mode)){ 1341 && !test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
1128 rf_ini = rfregs_2112a; 1342 rf_ini = rfregs_2112a;
1129 rf_size = ARRAY_SIZE(rfregs_5112a); 1343 rf_size = ARRAY_SIZE(rfregs_5112a);
1130 if (mode < 2) { 1344 if (mode < 2) {
@@ -1156,8 +1370,8 @@ static int ath5k_hw_rf5112_rfregs(struct ath5k_hw *ah,
1156 } 1370 }
1157 1371
1158 /* Modify bank 6 */ 1372 /* Modify bank 6 */
1159 if (channel->val & CHANNEL_2GHZ) { 1373 if (channel->hw_value & CHANNEL_2GHZ) {
1160 if (channel->val & CHANNEL_OFDM) 1374 if (channel->hw_value & CHANNEL_OFDM)
1161 ee_mode = AR5K_EEPROM_MODE_11G; 1375 ee_mode = AR5K_EEPROM_MODE_11G;
1162 else 1376 else
1163 ee_mode = AR5K_EEPROM_MODE_11B; 1377 ee_mode = AR5K_EEPROM_MODE_11B;
@@ -1173,10 +1387,13 @@ static int ath5k_hw_rf5112_rfregs(struct ath5k_hw *ah,
1173 } else { 1387 } else {
1174 /* For 11a, Turbo and XR */ 1388 /* For 11a, Turbo and XR */
1175 ee_mode = AR5K_EEPROM_MODE_11A; 1389 ee_mode = AR5K_EEPROM_MODE_11A;
1176 obdb = channel->freq >= 5725 ? 3 : 1390 obdb = channel->center_freq >= 5725 ? 3 :
1177 (channel->freq >= 5500 ? 2 : 1391 (channel->center_freq >= 5500 ? 2 :
1178 (channel->freq >= 5260 ? 1 : 1392 (channel->center_freq >= 5260 ? 1 :
1179 (channel->freq > 4000 ? 0 : -1))); 1393 (channel->center_freq > 4000 ? 0 : -1)));
1394
1395 if (obdb == -1)
1396 return -EINVAL;
1180 1397
1181 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[6], 1398 if (!ath5k_hw_rfregs_op(rf, ah->ah_offset[6],
1182 ee->ee_ob[ee_mode][obdb], 3, 279, 0, true)) 1399 ee->ee_ob[ee_mode][obdb], 3, 279, 0, true))
@@ -1209,7 +1426,8 @@ static int ath5k_hw_rf5112_rfregs(struct ath5k_hw *ah,
1209} 1426}
1210 1427
1211/* 1428/*
1212 * Initialize RF5413/5414 1429 * Initialize RF5413/5414 and future chips
1430 * (until we come up with a better solution)
1213 */ 1431 */
1214static int ath5k_hw_rf5413_rfregs(struct ath5k_hw *ah, 1432static int ath5k_hw_rf5413_rfregs(struct ath5k_hw *ah,
1215 struct ieee80211_channel *channel, unsigned int mode) 1433 struct ieee80211_channel *channel, unsigned int mode)
@@ -1219,12 +1437,47 @@ static int ath5k_hw_rf5413_rfregs(struct ath5k_hw *ah,
1219 unsigned int rf_size, i; 1437 unsigned int rf_size, i;
1220 int bank = -1; 1438 int bank = -1;
1221 1439
1222 AR5K_ASSERT_ENTRY(mode, AR5K_INI_VAL_MAX); 1440 AR5K_ASSERT_ENTRY(mode, AR5K_MODE_MAX);
1223 1441
1224 rf = ah->ah_rf_banks; 1442 rf = ah->ah_rf_banks;
1225 1443
1226 rf_ini = rfregs_5413; 1444 switch (ah->ah_radio) {
1227 rf_size = ARRAY_SIZE(rfregs_5413); 1445 case AR5K_RF5413:
1446 rf_ini = rfregs_5413;
1447 rf_size = ARRAY_SIZE(rfregs_5413);
1448 break;
1449 case AR5K_RF2413:
1450 rf_ini = rfregs_2413;
1451 rf_size = ARRAY_SIZE(rfregs_2413);
1452
1453 if (mode < 2) {
1454 ATH5K_ERR(ah->ah_sc,
1455 "invalid channel mode: %i\n", mode);
1456 return -EINVAL;
1457 }
1458
1459 mode = mode - 2;
1460 break;
1461 case AR5K_RF2425:
1462 rf_ini = rfregs_2425;
1463 rf_size = ARRAY_SIZE(rfregs_2425);
1464
1465 if (mode < 2) {
1466 ATH5K_ERR(ah->ah_sc,
1467 "invalid channel mode: %i\n", mode);
1468 return -EINVAL;
1469 }
1470
1471 /* Map b to g */
1472 if (mode == 2)
1473 mode = 0;
1474 else
1475 mode = mode - 3;
1476
1477 break;
1478 default:
1479 return -EINVAL;
1480 }
1228 1481
1229 /* Copy values to modify them */ 1482 /* Copy values to modify them */
1230 for (i = 0; i < rf_size; i++) { 1483 for (i = 0; i < rf_size; i++) {
@@ -1283,6 +1536,14 @@ int ath5k_hw_rfregs(struct ath5k_hw *ah, struct ieee80211_channel *channel,
1283 ah->ah_rf_banks_size = sizeof(rfregs_5413); 1536 ah->ah_rf_banks_size = sizeof(rfregs_5413);
1284 func = ath5k_hw_rf5413_rfregs; 1537 func = ath5k_hw_rf5413_rfregs;
1285 break; 1538 break;
1539 case AR5K_RF2413:
1540 ah->ah_rf_banks_size = sizeof(rfregs_2413);
1541 func = ath5k_hw_rf5413_rfregs;
1542 break;
1543 case AR5K_RF2425:
1544 ah->ah_rf_banks_size = sizeof(rfregs_2425);
1545 func = ath5k_hw_rf5413_rfregs;
1546 break;
1286 default: 1547 default:
1287 return -EINVAL; 1548 return -EINVAL;
1288 } 1549 }
@@ -1321,6 +1582,16 @@ int ath5k_hw_rfgain(struct ath5k_hw *ah, unsigned int freq)
1321 ath5k_rfg = rfgain_5413; 1582 ath5k_rfg = rfgain_5413;
1322 size = ARRAY_SIZE(rfgain_5413); 1583 size = ARRAY_SIZE(rfgain_5413);
1323 break; 1584 break;
1585 case AR5K_RF2413:
1586 ath5k_rfg = rfgain_2413;
1587 size = ARRAY_SIZE(rfgain_2413);
1588 freq = 0; /* only 2Ghz */
1589 break;
1590 case AR5K_RF2425:
1591 ath5k_rfg = rfgain_2413;
1592 size = ARRAY_SIZE(rfgain_2413);
1593 freq = 0; /* only 2Ghz */
1594 break;
1324 default: 1595 default:
1325 return -EINVAL; 1596 return -EINVAL;
1326 } 1597 }
@@ -1395,7 +1666,6 @@ int ath5k_hw_set_rfgain_opt(struct ath5k_hw *ah)
1395 ah->ah_gain.g_active = 1; 1666 ah->ah_gain.g_active = 1;
1396 break; 1667 break;
1397 case AR5K_RF5112: 1668 case AR5K_RF5112:
1398 case AR5K_RF5413: /* ??? */
1399 ah->ah_gain.g_step_idx = rfgain_opt_5112.go_default; 1669 ah->ah_gain.g_step_idx = rfgain_opt_5112.go_default;
1400 ah->ah_gain.g_step = 1670 ah->ah_gain.g_step =
1401 &rfgain_opt_5112.go_step[ah->ah_gain.g_step_idx]; 1671 &rfgain_opt_5112.go_step[ah->ah_gain.g_step_idx];
@@ -1445,9 +1715,10 @@ static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
1445 * newer chipsets like the AR5212A who have a completely 1715 * newer chipsets like the AR5212A who have a completely
1446 * different RF/PHY part. 1716 * different RF/PHY part.
1447 */ 1717 */
1448 athchan = (ath5k_hw_bitswap((channel->chan - 24) / 2, 5) << 1) | 1718 athchan = (ath5k_hw_bitswap(
1449 (1 << 6) | 0x1; 1719 (ieee80211_frequency_to_channel(
1450 1720 channel->center_freq) - 24) / 2, 5)
1721 << 1) | (1 << 6) | 0x1;
1451 return athchan; 1722 return athchan;
1452} 1723}
1453 1724
@@ -1506,7 +1777,8 @@ static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
1506 struct ieee80211_channel *channel) 1777 struct ieee80211_channel *channel)
1507{ 1778{
1508 struct ath5k_athchan_2ghz ath5k_channel_2ghz; 1779 struct ath5k_athchan_2ghz ath5k_channel_2ghz;
1509 unsigned int ath5k_channel = channel->chan; 1780 unsigned int ath5k_channel =
1781 ieee80211_frequency_to_channel(channel->center_freq);
1510 u32 data0, data1, clock; 1782 u32 data0, data1, clock;
1511 int ret; 1783 int ret;
1512 1784
@@ -1515,10 +1787,11 @@ static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
1515 */ 1787 */
1516 data0 = data1 = 0; 1788 data0 = data1 = 0;
1517 1789
1518 if (channel->val & CHANNEL_2GHZ) { 1790 if (channel->hw_value & CHANNEL_2GHZ) {
1519 /* Map 2GHz channel to 5GHz Atheros channel ID */ 1791 /* Map 2GHz channel to 5GHz Atheros channel ID */
1520 ret = ath5k_hw_rf5111_chan2athchan(channel->chan, 1792 ret = ath5k_hw_rf5111_chan2athchan(
1521 &ath5k_channel_2ghz); 1793 ieee80211_frequency_to_channel(channel->center_freq),
1794 &ath5k_channel_2ghz);
1522 if (ret) 1795 if (ret)
1523 return ret; 1796 return ret;
1524 1797
@@ -1555,7 +1828,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
1555 u16 c; 1828 u16 c;
1556 1829
1557 data = data0 = data1 = data2 = 0; 1830 data = data0 = data1 = data2 = 0;
1558 c = channel->freq; 1831 c = channel->center_freq;
1559 1832
1560 /* 1833 /*
1561 * Set the channel on the RF5112 or newer 1834 * Set the channel on the RF5112 or newer
@@ -1599,19 +1872,17 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
1599int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel) 1872int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
1600{ 1873{
1601 int ret; 1874 int ret;
1602
1603 /* 1875 /*
1604 * Check bounds supported by the PHY 1876 * Check bounds supported by the PHY (we don't care about regultory
1605 * (don't care about regulation restrictions at this point) 1877 * restrictions at this point). Note: hw_value already has the band
1606 */ 1878 * (CHANNEL_2GHZ, or CHANNEL_5GHZ) so we inform ath5k_channel_ok()
1607 if ((channel->freq < ah->ah_capabilities.cap_range.range_2ghz_min || 1879 * of the band by that */
1608 channel->freq > ah->ah_capabilities.cap_range.range_2ghz_max) && 1880 if (!ath5k_channel_ok(ah, channel->center_freq, channel->hw_value)) {
1609 (channel->freq < ah->ah_capabilities.cap_range.range_5ghz_min ||
1610 channel->freq > ah->ah_capabilities.cap_range.range_5ghz_max)) {
1611 ATH5K_ERR(ah->ah_sc, 1881 ATH5K_ERR(ah->ah_sc,
1612 "channel out of supported range (%u MHz)\n", 1882 "channel frequency (%u MHz) out of supported "
1613 channel->freq); 1883 "band range\n",
1614 return -EINVAL; 1884 channel->center_freq);
1885 return -EINVAL;
1615 } 1886 }
1616 1887
1617 /* 1888 /*
@@ -1632,9 +1903,9 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
1632 if (ret) 1903 if (ret)
1633 return ret; 1904 return ret;
1634 1905
1635 ah->ah_current_channel.freq = channel->freq; 1906 ah->ah_current_channel.center_freq = channel->center_freq;
1636 ah->ah_current_channel.val = channel->val; 1907 ah->ah_current_channel.hw_value = channel->hw_value;
1637 ah->ah_turbo = channel->val == CHANNEL_T ? true : false; 1908 ah->ah_turbo = channel->hw_value == CHANNEL_T ? true : false;
1638 1909
1639 return 0; 1910 return 0;
1640} 1911}
@@ -1797,11 +2068,11 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
1797 2068
1798 if (ret) { 2069 if (ret) {
1799 ATH5K_ERR(ah->ah_sc, "calibration timeout (%uMHz)\n", 2070 ATH5K_ERR(ah->ah_sc, "calibration timeout (%uMHz)\n",
1800 channel->freq); 2071 channel->center_freq);
1801 return ret; 2072 return ret;
1802 } 2073 }
1803 2074
1804 ret = ath5k_hw_noise_floor_calibration(ah, channel->freq); 2075 ret = ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
1805 if (ret) 2076 if (ret)
1806 return ret; 2077 return ret;
1807 2078
@@ -1825,7 +2096,7 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
1825 s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd; 2096 s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd;
1826 ATH5K_TRACE(ah->ah_sc); 2097 ATH5K_TRACE(ah->ah_sc);
1827 2098
1828 if (ah->ah_calibration == false || 2099 if (!ah->ah_calibration ||
1829 ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) 2100 ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN)
1830 goto done; 2101 goto done;
1831 2102
@@ -1848,10 +2119,10 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
1848 ((u32)q_coff) | ((u32)i_coff << AR5K_PHY_IQ_CORR_Q_I_COFF_S)); 2119 ((u32)q_coff) | ((u32)i_coff << AR5K_PHY_IQ_CORR_Q_I_COFF_S));
1849 2120
1850done: 2121done:
1851 ath5k_hw_noise_floor_calibration(ah, channel->freq); 2122 ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
1852 2123
1853 /* Request RF gain */ 2124 /* Request RF gain */
1854 if (channel->val & CHANNEL_5GHZ) { 2125 if (channel->hw_value & CHANNEL_5GHZ) {
1855 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txpower.txp_max, 2126 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txpower.txp_max,
1856 AR5K_PHY_PAPD_PROBE_TXPOWER) | 2127 AR5K_PHY_PAPD_PROBE_TXPOWER) |
1857 AR5K_PHY_PAPD_PROBE_TX_NEXT, AR5K_PHY_PAPD_PROBE); 2128 AR5K_PHY_PAPD_PROBE_TX_NEXT, AR5K_PHY_PAPD_PROBE);
@@ -2015,6 +2286,18 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
2015 return -EINVAL; 2286 return -EINVAL;
2016 } 2287 }
2017 2288
2289 /*
2290 * RF2413 for some reason can't
2291 * transmit anything if we call
2292 * this funtion, so we skip it
2293 * until we fix txpower.
2294 *
2295 * XXX: Assume same for RF2425
2296 * to be safe.
2297 */
2298 if ((ah->ah_radio == AR5K_RF2413) || (ah->ah_radio == AR5K_RF2425))
2299 return 0;
2300
2018 /* Reset TX power values */ 2301 /* Reset TX power values */
2019 memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower)); 2302 memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
2020 ah->ah_txpower.txp_tpc = tpc; 2303 ah->ah_txpower.txp_tpc = tpc;
@@ -2048,7 +2331,7 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
2048 AR5K_TXPOWER_CCK(13, 16) | AR5K_TXPOWER_CCK(12, 8) | 2331 AR5K_TXPOWER_CCK(13, 16) | AR5K_TXPOWER_CCK(12, 8) |
2049 AR5K_TXPOWER_CCK(11, 0), AR5K_PHY_TXPOWER_RATE4); 2332 AR5K_TXPOWER_CCK(11, 0), AR5K_PHY_TXPOWER_RATE4);
2050 2333
2051 if (ah->ah_txpower.txp_tpc == true) 2334 if (ah->ah_txpower.txp_tpc)
2052 ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX_TPC_ENABLE | 2335 ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX_TPC_ENABLE |
2053 AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX); 2336 AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX);
2054 else 2337 else
diff --git a/drivers/net/wireless/ath5k/reg.h b/drivers/net/wireless/ath5k/reg.h
index 2f41c8398602..30629b3e37c2 100644
--- a/drivers/net/wireless/ath5k/reg.h
+++ b/drivers/net/wireless/ath5k/reg.h
@@ -1923,7 +1923,9 @@ after DFS is enabled */
1923#define AR5K_PHY_SDELAY_32MHZ 0x000000ff 1923#define AR5K_PHY_SDELAY_32MHZ 0x000000ff
1924#define AR5K_PHY_SPENDING 0x99f8 1924#define AR5K_PHY_SPENDING 0x99f8
1925#define AR5K_PHY_SPENDING_RF5111 0x00000018 1925#define AR5K_PHY_SPENDING_RF5111 0x00000018
1926#define AR5K_PHY_SPENDING_RF5112 0x00000014 1926#define AR5K_PHY_SPENDING_RF5112 0x00000014 /* <- i 've only seen this on 2425 dumps ! */
1927#define AR5K_PHY_SPENDING_RF5112A 0x0000000e /* but since i only have 5112A-based chips */
1928#define AR5K_PHY_SPENDING_RF5424 0x00000012 /* to test it might be also for old 5112. */
1927 1929
1928/* 1930/*
1929 * Misc PHY/radio registers [5110 - 5111] 1931 * Misc PHY/radio registers [5110 - 5111]
diff --git a/drivers/net/wireless/ath5k/regdom.c b/drivers/net/wireless/ath5k/regdom.c
deleted file mode 100644
index e851957dacfd..000000000000
--- a/drivers/net/wireless/ath5k/regdom.c
+++ /dev/null
@@ -1,121 +0,0 @@
1/*
2 * Copyright (c) 2004, 2005 Reyk Floeter <reyk@vantronix.net>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/*
18 * Basic regulation domain extensions for the IEEE 802.11 stack
19 */
20
21#include <linux/kernel.h>
22#include <linux/string.h>
23
24#include "regdom.h"
25
26static const struct ath5k_regdommap {
27 enum ath5k_regdom dmn;
28 enum ath5k_regdom dmn5;
29 enum ath5k_regdom dmn2;
30} r_map[] = {
31 { DMN_DEFAULT, DMN_DEBUG, DMN_DEBUG },
32 { DMN_NULL_WORLD, DMN_NULL, DMN_WORLD },
33 { DMN_NULL_ETSIB, DMN_NULL, DMN_ETSIB },
34 { DMN_NULL_ETSIC, DMN_NULL, DMN_ETSIC },
35 { DMN_FCC1_FCCA, DMN_FCC1, DMN_FCCA },
36 { DMN_FCC1_WORLD, DMN_FCC1, DMN_WORLD },
37 { DMN_FCC2_FCCA, DMN_FCC2, DMN_FCCA },
38 { DMN_FCC2_WORLD, DMN_FCC2, DMN_WORLD },
39 { DMN_FCC2_ETSIC, DMN_FCC2, DMN_ETSIC },
40 { DMN_FRANCE_NULL, DMN_ETSI3, DMN_ETSI3 },
41 { DMN_FCC3_FCCA, DMN_FCC3, DMN_WORLD },
42 { DMN_ETSI1_WORLD, DMN_ETSI1, DMN_WORLD },
43 { DMN_ETSI3_ETSIA, DMN_ETSI3, DMN_WORLD },
44 { DMN_ETSI2_WORLD, DMN_ETSI2, DMN_WORLD },
45 { DMN_ETSI3_WORLD, DMN_ETSI3, DMN_WORLD },
46 { DMN_ETSI4_WORLD, DMN_ETSI4, DMN_WORLD },
47 { DMN_ETSI4_ETSIC, DMN_ETSI4, DMN_ETSIC },
48 { DMN_ETSI5_WORLD, DMN_ETSI5, DMN_WORLD },
49 { DMN_ETSI6_WORLD, DMN_ETSI6, DMN_WORLD },
50 { DMN_ETSI_NULL, DMN_ETSI1, DMN_ETSI1 },
51 { DMN_MKK1_MKKA, DMN_MKK1, DMN_MKKA },
52 { DMN_MKK1_MKKB, DMN_MKK1, DMN_MKKA },
53 { DMN_APL4_WORLD, DMN_APL4, DMN_WORLD },
54 { DMN_MKK2_MKKA, DMN_MKK2, DMN_MKKA },
55 { DMN_APL_NULL, DMN_APL1, DMN_NULL },
56 { DMN_APL2_WORLD, DMN_APL2, DMN_WORLD },
57 { DMN_APL2_APLC, DMN_APL2, DMN_WORLD },
58 { DMN_APL3_WORLD, DMN_APL3, DMN_WORLD },
59 { DMN_MKK1_FCCA, DMN_MKK1, DMN_FCCA },
60 { DMN_APL2_APLD, DMN_APL2, DMN_APLD },
61 { DMN_MKK1_MKKA1, DMN_MKK1, DMN_MKKA },
62 { DMN_MKK1_MKKA2, DMN_MKK1, DMN_MKKA },
63 { DMN_APL1_WORLD, DMN_APL1, DMN_WORLD },
64 { DMN_APL1_FCCA, DMN_APL1, DMN_FCCA },
65 { DMN_APL1_APLA, DMN_APL1, DMN_WORLD },
66 { DMN_APL1_ETSIC, DMN_APL1, DMN_ETSIC },
67 { DMN_APL2_ETSIC, DMN_APL2, DMN_ETSIC },
68 { DMN_APL5_WORLD, DMN_APL5, DMN_WORLD },
69 { DMN_WOR0_WORLD, DMN_WORLD, DMN_WORLD },
70 { DMN_WOR1_WORLD, DMN_WORLD, DMN_WORLD },
71 { DMN_WOR2_WORLD, DMN_WORLD, DMN_WORLD },
72 { DMN_WOR3_WORLD, DMN_WORLD, DMN_WORLD },
73 { DMN_WOR4_WORLD, DMN_WORLD, DMN_WORLD },
74 { DMN_WOR5_ETSIC, DMN_WORLD, DMN_WORLD },
75 { DMN_WOR01_WORLD, DMN_WORLD, DMN_WORLD },
76 { DMN_WOR02_WORLD, DMN_WORLD, DMN_WORLD },
77 { DMN_EU1_WORLD, DMN_ETSI1, DMN_WORLD },
78 { DMN_WOR9_WORLD, DMN_WORLD, DMN_WORLD },
79 { DMN_WORA_WORLD, DMN_WORLD, DMN_WORLD },
80};
81
82enum ath5k_regdom ath5k_regdom2flag(enum ath5k_regdom dmn, u16 mhz)
83{
84 unsigned int i;
85
86 for (i = 0; i < ARRAY_SIZE(r_map); i++) {
87 if (r_map[i].dmn == dmn) {
88 if (mhz >= 2000 && mhz <= 3000)
89 return r_map[i].dmn2;
90 if (mhz >= IEEE80211_CHANNELS_5GHZ_MIN &&
91 mhz <= IEEE80211_CHANNELS_5GHZ_MAX)
92 return r_map[i].dmn5;
93 }
94 }
95
96 return DMN_DEBUG;
97}
98
99u16 ath5k_regdom_from_ieee(enum ath5k_regdom ieee)
100{
101 u32 regdomain = (u32)ieee;
102
103 /*
104 * Use the default regulation domain if the value is empty
105 * or not supported by the net80211 regulation code.
106 */
107 if (ath5k_regdom2flag(regdomain, IEEE80211_CHANNELS_5GHZ_MIN) ==
108 DMN_DEBUG)
109 return (u16)AR5K_TUNE_REGDOMAIN;
110
111 /* It is supported, just return the value */
112 return regdomain;
113}
114
115enum ath5k_regdom ath5k_regdom_to_ieee(u16 regdomain)
116{
117 enum ath5k_regdom ieee = (enum ath5k_regdom)regdomain;
118
119 return ieee;
120}
121
diff --git a/drivers/net/wireless/ath5k/regdom.h b/drivers/net/wireless/ath5k/regdom.h
deleted file mode 100644
index f7d3c66e594e..000000000000
--- a/drivers/net/wireless/ath5k/regdom.h
+++ /dev/null
@@ -1,500 +0,0 @@
1/*
2 * Copyright (c) 2004, 2005 Reyk Floeter <reyk@openbsd.org>
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef _IEEE80211_REGDOMAIN_H_
18#define _IEEE80211_REGDOMAIN_H_
19
20#include <linux/types.h>
21
22/* Default regulation domain if stored value EEPROM value is invalid */
23#define AR5K_TUNE_REGDOMAIN DMN_FCC2_FCCA /* Canada */
24#define AR5K_TUNE_CTRY CTRY_DEFAULT
25
26
27enum ath5k_regdom {
28 DMN_DEFAULT = 0x00,
29 DMN_NULL_WORLD = 0x03,
30 DMN_NULL_ETSIB = 0x07,
31 DMN_NULL_ETSIC = 0x08,
32 DMN_FCC1_FCCA = 0x10,
33 DMN_FCC1_WORLD = 0x11,
34 DMN_FCC2_FCCA = 0x20,
35 DMN_FCC2_WORLD = 0x21,
36 DMN_FCC2_ETSIC = 0x22,
37 DMN_FRANCE_NULL = 0x31,
38 DMN_FCC3_FCCA = 0x3A,
39 DMN_ETSI1_WORLD = 0x37,
40 DMN_ETSI3_ETSIA = 0x32,
41 DMN_ETSI2_WORLD = 0x35,
42 DMN_ETSI3_WORLD = 0x36,
43 DMN_ETSI4_WORLD = 0x30,
44 DMN_ETSI4_ETSIC = 0x38,
45 DMN_ETSI5_WORLD = 0x39,
46 DMN_ETSI6_WORLD = 0x34,
47 DMN_ETSI_NULL = 0x33,
48 DMN_MKK1_MKKA = 0x40,
49 DMN_MKK1_MKKB = 0x41,
50 DMN_APL4_WORLD = 0x42,
51 DMN_MKK2_MKKA = 0x43,
52 DMN_APL_NULL = 0x44,
53 DMN_APL2_WORLD = 0x45,
54 DMN_APL2_APLC = 0x46,
55 DMN_APL3_WORLD = 0x47,
56 DMN_MKK1_FCCA = 0x48,
57 DMN_APL2_APLD = 0x49,
58 DMN_MKK1_MKKA1 = 0x4A,
59 DMN_MKK1_MKKA2 = 0x4B,
60 DMN_APL1_WORLD = 0x52,
61 DMN_APL1_FCCA = 0x53,
62 DMN_APL1_APLA = 0x54,
63 DMN_APL1_ETSIC = 0x55,
64 DMN_APL2_ETSIC = 0x56,
65 DMN_APL5_WORLD = 0x58,
66 DMN_WOR0_WORLD = 0x60,
67 DMN_WOR1_WORLD = 0x61,
68 DMN_WOR2_WORLD = 0x62,
69 DMN_WOR3_WORLD = 0x63,
70 DMN_WOR4_WORLD = 0x64,
71 DMN_WOR5_ETSIC = 0x65,
72 DMN_WOR01_WORLD = 0x66,
73 DMN_WOR02_WORLD = 0x67,
74 DMN_EU1_WORLD = 0x68,
75 DMN_WOR9_WORLD = 0x69,
76 DMN_WORA_WORLD = 0x6A,
77
78 DMN_APL1 = 0xf0000001,
79 DMN_APL2 = 0xf0000002,
80 DMN_APL3 = 0xf0000004,
81 DMN_APL4 = 0xf0000008,
82 DMN_APL5 = 0xf0000010,
83 DMN_ETSI1 = 0xf0000020,
84 DMN_ETSI2 = 0xf0000040,
85 DMN_ETSI3 = 0xf0000080,
86 DMN_ETSI4 = 0xf0000100,
87 DMN_ETSI5 = 0xf0000200,
88 DMN_ETSI6 = 0xf0000400,
89 DMN_ETSIA = 0xf0000800,
90 DMN_ETSIB = 0xf0001000,
91 DMN_ETSIC = 0xf0002000,
92 DMN_FCC1 = 0xf0004000,
93 DMN_FCC2 = 0xf0008000,
94 DMN_FCC3 = 0xf0010000,
95 DMN_FCCA = 0xf0020000,
96 DMN_APLD = 0xf0040000,
97 DMN_MKK1 = 0xf0080000,
98 DMN_MKK2 = 0xf0100000,
99 DMN_MKKA = 0xf0200000,
100 DMN_NULL = 0xf0400000,
101 DMN_WORLD = 0xf0800000,
102 DMN_DEBUG = 0xf1000000 /* used for debugging */
103};
104
105#define IEEE80211_DMN(_d) ((_d) & ~0xf0000000)
106
107enum ath5k_countrycode {
108 CTRY_DEFAULT = 0, /* Default domain (NA) */
109 CTRY_ALBANIA = 8, /* Albania */
110 CTRY_ALGERIA = 12, /* Algeria */
111 CTRY_ARGENTINA = 32, /* Argentina */
112 CTRY_ARMENIA = 51, /* Armenia */
113 CTRY_AUSTRALIA = 36, /* Australia */
114 CTRY_AUSTRIA = 40, /* Austria */
115 CTRY_AZERBAIJAN = 31, /* Azerbaijan */
116 CTRY_BAHRAIN = 48, /* Bahrain */
117 CTRY_BELARUS = 112, /* Belarus */
118 CTRY_BELGIUM = 56, /* Belgium */
119 CTRY_BELIZE = 84, /* Belize */
120 CTRY_BOLIVIA = 68, /* Bolivia */
121 CTRY_BRAZIL = 76, /* Brazil */
122 CTRY_BRUNEI_DARUSSALAM = 96, /* Brunei Darussalam */
123 CTRY_BULGARIA = 100, /* Bulgaria */
124 CTRY_CANADA = 124, /* Canada */
125 CTRY_CHILE = 152, /* Chile */
126 CTRY_CHINA = 156, /* People's Republic of China */
127 CTRY_COLOMBIA = 170, /* Colombia */
128 CTRY_COSTA_RICA = 188, /* Costa Rica */
129 CTRY_CROATIA = 191, /* Croatia */
130 CTRY_CYPRUS = 196, /* Cyprus */
131 CTRY_CZECH = 203, /* Czech Republic */
132 CTRY_DENMARK = 208, /* Denmark */
133 CTRY_DOMINICAN_REPUBLIC = 214, /* Dominican Republic */
134 CTRY_ECUADOR = 218, /* Ecuador */
135 CTRY_EGYPT = 818, /* Egypt */
136 CTRY_EL_SALVADOR = 222, /* El Salvador */
137 CTRY_ESTONIA = 233, /* Estonia */
138 CTRY_FAEROE_ISLANDS = 234, /* Faeroe Islands */
139 CTRY_FINLAND = 246, /* Finland */
140 CTRY_FRANCE = 250, /* France */
141 CTRY_FRANCE2 = 255, /* France2 */
142 CTRY_GEORGIA = 268, /* Georgia */
143 CTRY_GERMANY = 276, /* Germany */
144 CTRY_GREECE = 300, /* Greece */
145 CTRY_GUATEMALA = 320, /* Guatemala */
146 CTRY_HONDURAS = 340, /* Honduras */
147 CTRY_HONG_KONG = 344, /* Hong Kong S.A.R., P.R.C. */
148 CTRY_HUNGARY = 348, /* Hungary */
149 CTRY_ICELAND = 352, /* Iceland */
150 CTRY_INDIA = 356, /* India */
151 CTRY_INDONESIA = 360, /* Indonesia */
152 CTRY_IRAN = 364, /* Iran */
153 CTRY_IRAQ = 368, /* Iraq */
154 CTRY_IRELAND = 372, /* Ireland */
155 CTRY_ISRAEL = 376, /* Israel */
156 CTRY_ITALY = 380, /* Italy */
157 CTRY_JAMAICA = 388, /* Jamaica */
158 CTRY_JAPAN = 392, /* Japan */
159 CTRY_JAPAN1 = 393, /* Japan (JP1) */
160 CTRY_JAPAN2 = 394, /* Japan (JP0) */
161 CTRY_JAPAN3 = 395, /* Japan (JP1-1) */
162 CTRY_JAPAN4 = 396, /* Japan (JE1) */
163 CTRY_JAPAN5 = 397, /* Japan (JE2) */
164 CTRY_JORDAN = 400, /* Jordan */
165 CTRY_KAZAKHSTAN = 398, /* Kazakhstan */
166 CTRY_KENYA = 404, /* Kenya */
167 CTRY_KOREA_NORTH = 408, /* North Korea */
168 CTRY_KOREA_ROC = 410, /* South Korea */
169 CTRY_KOREA_ROC2 = 411, /* South Korea */
170 CTRY_KUWAIT = 414, /* Kuwait */
171 CTRY_LATVIA = 428, /* Latvia */
172 CTRY_LEBANON = 422, /* Lebanon */
173 CTRY_LIBYA = 434, /* Libya */
174 CTRY_LIECHTENSTEIN = 438, /* Liechtenstein */
175 CTRY_LITHUANIA = 440, /* Lithuania */
176 CTRY_LUXEMBOURG = 442, /* Luxembourg */
177 CTRY_MACAU = 446, /* Macau */
178 CTRY_MACEDONIA = 807, /* Republic of Macedonia */
179 CTRY_MALAYSIA = 458, /* Malaysia */
180 CTRY_MEXICO = 484, /* Mexico */
181 CTRY_MONACO = 492, /* Principality of Monaco */
182 CTRY_MOROCCO = 504, /* Morocco */
183 CTRY_NETHERLANDS = 528, /* Netherlands */
184 CTRY_NEW_ZEALAND = 554, /* New Zealand */
185 CTRY_NICARAGUA = 558, /* Nicaragua */
186 CTRY_NORWAY = 578, /* Norway */
187 CTRY_OMAN = 512, /* Oman */
188 CTRY_PAKISTAN = 586, /* Islamic Republic of Pakistan */
189 CTRY_PANAMA = 591, /* Panama */
190 CTRY_PARAGUAY = 600, /* Paraguay */
191 CTRY_PERU = 604, /* Peru */
192 CTRY_PHILIPPINES = 608, /* Republic of the Philippines */
193 CTRY_POLAND = 616, /* Poland */
194 CTRY_PORTUGAL = 620, /* Portugal */
195 CTRY_PUERTO_RICO = 630, /* Puerto Rico */
196 CTRY_QATAR = 634, /* Qatar */
197 CTRY_ROMANIA = 642, /* Romania */
198 CTRY_RUSSIA = 643, /* Russia */
199 CTRY_SAUDI_ARABIA = 682, /* Saudi Arabia */
200 CTRY_SINGAPORE = 702, /* Singapore */
201 CTRY_SLOVAKIA = 703, /* Slovak Republic */
202 CTRY_SLOVENIA = 705, /* Slovenia */
203 CTRY_SOUTH_AFRICA = 710, /* South Africa */
204 CTRY_SPAIN = 724, /* Spain */
205 CTRY_SRI_LANKA = 728, /* Sri Lanka */
206 CTRY_SWEDEN = 752, /* Sweden */
207 CTRY_SWITZERLAND = 756, /* Switzerland */
208 CTRY_SYRIA = 760, /* Syria */
209 CTRY_TAIWAN = 158, /* Taiwan */
210 CTRY_THAILAND = 764, /* Thailand */
211 CTRY_TRINIDAD_Y_TOBAGO = 780, /* Trinidad y Tobago */
212 CTRY_TUNISIA = 788, /* Tunisia */
213 CTRY_TURKEY = 792, /* Turkey */
214 CTRY_UAE = 784, /* U.A.E. */
215 CTRY_UKRAINE = 804, /* Ukraine */
216 CTRY_UNITED_KINGDOM = 826, /* United Kingdom */
217 CTRY_UNITED_STATES = 840, /* United States */
218 CTRY_URUGUAY = 858, /* Uruguay */
219 CTRY_UZBEKISTAN = 860, /* Uzbekistan */
220 CTRY_VENEZUELA = 862, /* Venezuela */
221 CTRY_VIET_NAM = 704, /* Viet Nam */
222 CTRY_YEMEN = 887, /* Yemen */
223 CTRY_ZIMBABWE = 716, /* Zimbabwe */
224};
225
226#define IEEE80211_CHANNELS_2GHZ_MIN 2412 /* 2GHz channel 1 */
227#define IEEE80211_CHANNELS_2GHZ_MAX 2732 /* 2GHz channel 26 */
228#define IEEE80211_CHANNELS_5GHZ_MIN 5005 /* 5GHz channel 1 */
229#define IEEE80211_CHANNELS_5GHZ_MAX 6100 /* 5GHz channel 220 */
230
231struct ath5k_regchannel {
232 u16 chan;
233 enum ath5k_regdom domain;
234 u32 mode;
235};
236
237#define IEEE80211_CHANNELS_2GHZ { \
238/*2412*/ { 1, DMN_APLD, CHANNEL_CCK|CHANNEL_OFDM }, \
239/*2417*/ { 2, DMN_APLD, CHANNEL_CCK|CHANNEL_OFDM }, \
240/*2422*/ { 3, DMN_APLD, CHANNEL_CCK|CHANNEL_OFDM }, \
241/*2427*/ { 4, DMN_APLD, CHANNEL_CCK|CHANNEL_OFDM }, \
242/*2432*/ { 5, DMN_APLD, CHANNEL_CCK|CHANNEL_OFDM }, \
243/*2437*/ { 6, DMN_APLD, CHANNEL_CCK|CHANNEL_OFDM }, \
244/*2442*/ { 7, DMN_APLD, CHANNEL_CCK|CHANNEL_OFDM }, \
245/*2447*/ { 8, DMN_APLD, CHANNEL_CCK|CHANNEL_OFDM }, \
246/*2452*/ { 9, DMN_APLD, CHANNEL_CCK|CHANNEL_OFDM }, \
247/*2457*/ { 10, DMN_APLD, CHANNEL_CCK|CHANNEL_OFDM }, \
248/*2462*/ { 11, DMN_APLD, CHANNEL_CCK|CHANNEL_OFDM }, \
249/*2467*/ { 12, DMN_APLD, CHANNEL_CCK|CHANNEL_OFDM }, \
250/*2472*/ { 13, DMN_APLD, CHANNEL_CCK|CHANNEL_OFDM }, \
251 \
252/*2432*/ { 5, DMN_ETSIB, CHANNEL_CCK|CHANNEL_OFDM }, \
253/*2437*/ { 6, DMN_ETSIB, CHANNEL_CCK|CHANNEL_OFDM|CHANNEL_TURBO }, \
254/*2442*/ { 7, DMN_ETSIB, CHANNEL_CCK|CHANNEL_OFDM }, \
255 \
256/*2412*/ { 1, DMN_ETSIC, CHANNEL_CCK|CHANNEL_OFDM }, \
257/*2417*/ { 2, DMN_ETSIC, CHANNEL_CCK|CHANNEL_OFDM }, \
258/*2422*/ { 3, DMN_ETSIC, CHANNEL_CCK|CHANNEL_OFDM }, \
259/*2427*/ { 4, DMN_ETSIC, CHANNEL_CCK|CHANNEL_OFDM }, \
260/*2432*/ { 5, DMN_ETSIC, CHANNEL_CCK|CHANNEL_OFDM }, \
261/*2437*/ { 6, DMN_ETSIC, CHANNEL_CCK|CHANNEL_OFDM|CHANNEL_TURBO }, \
262/*2442*/ { 7, DMN_ETSIC, CHANNEL_CCK|CHANNEL_OFDM }, \
263/*2447*/ { 8, DMN_ETSIC, CHANNEL_CCK|CHANNEL_OFDM }, \
264/*2452*/ { 9, DMN_ETSIC, CHANNEL_CCK|CHANNEL_OFDM }, \
265/*2457*/ { 10, DMN_ETSIC, CHANNEL_CCK|CHANNEL_OFDM }, \
266/*2462*/ { 11, DMN_ETSIC, CHANNEL_CCK|CHANNEL_OFDM }, \
267/*2467*/ { 12, DMN_ETSIC, CHANNEL_CCK|CHANNEL_OFDM }, \
268/*2472*/ { 13, DMN_ETSIC, CHANNEL_CCK|CHANNEL_OFDM }, \
269 \
270/*2412*/ { 1, DMN_FCCA, CHANNEL_CCK|CHANNEL_OFDM }, \
271/*2417*/ { 2, DMN_FCCA, CHANNEL_CCK|CHANNEL_OFDM }, \
272/*2422*/ { 3, DMN_FCCA, CHANNEL_CCK|CHANNEL_OFDM }, \
273/*2427*/ { 4, DMN_FCCA, CHANNEL_CCK|CHANNEL_OFDM }, \
274/*2432*/ { 5, DMN_FCCA, CHANNEL_CCK|CHANNEL_OFDM }, \
275/*2437*/ { 6, DMN_FCCA, CHANNEL_CCK|CHANNEL_OFDM|CHANNEL_TURBO }, \
276/*2442*/ { 7, DMN_FCCA, CHANNEL_CCK|CHANNEL_OFDM }, \
277/*2447*/ { 8, DMN_FCCA, CHANNEL_CCK|CHANNEL_OFDM }, \
278/*2452*/ { 9, DMN_FCCA, CHANNEL_CCK|CHANNEL_OFDM }, \
279/*2457*/ { 10, DMN_FCCA, CHANNEL_CCK|CHANNEL_OFDM }, \
280/*2462*/ { 11, DMN_FCCA, CHANNEL_CCK|CHANNEL_OFDM }, \
281 \
282/*2412*/ { 1, DMN_MKKA, CHANNEL_CCK|CHANNEL_OFDM }, \
283/*2417*/ { 2, DMN_MKKA, CHANNEL_CCK|CHANNEL_OFDM }, \
284/*2422*/ { 3, DMN_MKKA, CHANNEL_CCK|CHANNEL_OFDM }, \
285/*2427*/ { 4, DMN_MKKA, CHANNEL_CCK|CHANNEL_OFDM }, \
286/*2432*/ { 5, DMN_MKKA, CHANNEL_CCK|CHANNEL_OFDM }, \
287/*2437*/ { 6, DMN_MKKA, CHANNEL_CCK|CHANNEL_OFDM }, \
288/*2442*/ { 7, DMN_MKKA, CHANNEL_CCK|CHANNEL_OFDM }, \
289/*2447*/ { 8, DMN_MKKA, CHANNEL_CCK|CHANNEL_OFDM }, \
290/*2452*/ { 9, DMN_MKKA, CHANNEL_CCK|CHANNEL_OFDM }, \
291/*2457*/ { 10, DMN_MKKA, CHANNEL_CCK|CHANNEL_OFDM }, \
292/*2462*/ { 11, DMN_MKKA, CHANNEL_CCK|CHANNEL_OFDM }, \
293/*2467*/ { 12, DMN_MKKA, CHANNEL_CCK|CHANNEL_OFDM }, \
294/*2472*/ { 13, DMN_MKKA, CHANNEL_CCK|CHANNEL_OFDM }, \
295/*2484*/ { 14, DMN_MKKA, CHANNEL_CCK }, \
296 \
297/*2412*/ { 1, DMN_WORLD, CHANNEL_CCK|CHANNEL_OFDM }, \
298/*2417*/ { 2, DMN_WORLD, CHANNEL_CCK|CHANNEL_OFDM }, \
299/*2422*/ { 3, DMN_WORLD, CHANNEL_CCK|CHANNEL_OFDM }, \
300/*2427*/ { 4, DMN_WORLD, CHANNEL_CCK|CHANNEL_OFDM }, \
301/*2432*/ { 5, DMN_WORLD, CHANNEL_CCK|CHANNEL_OFDM }, \
302/*2437*/ { 6, DMN_WORLD, CHANNEL_CCK|CHANNEL_OFDM|CHANNEL_TURBO }, \
303/*2442*/ { 7, DMN_WORLD, CHANNEL_CCK|CHANNEL_OFDM }, \
304/*2447*/ { 8, DMN_WORLD, CHANNEL_CCK|CHANNEL_OFDM }, \
305/*2452*/ { 9, DMN_WORLD, CHANNEL_CCK|CHANNEL_OFDM }, \
306/*2457*/ { 10, DMN_WORLD, CHANNEL_CCK|CHANNEL_OFDM }, \
307/*2462*/ { 11, DMN_WORLD, CHANNEL_CCK|CHANNEL_OFDM }, \
308/*2467*/ { 12, DMN_WORLD, CHANNEL_CCK|CHANNEL_OFDM }, \
309/*2472*/ { 13, DMN_WORLD, CHANNEL_CCK|CHANNEL_OFDM }, \
310}
311
312#define IEEE80211_CHANNELS_5GHZ { \
313/*5745*/ { 149, DMN_APL1, CHANNEL_OFDM }, \
314/*5765*/ { 153, DMN_APL1, CHANNEL_OFDM }, \
315/*5785*/ { 157, DMN_APL1, CHANNEL_OFDM }, \
316/*5805*/ { 161, DMN_APL1, CHANNEL_OFDM }, \
317/*5825*/ { 165, DMN_APL1, CHANNEL_OFDM }, \
318 \
319/*5745*/ { 149, DMN_APL2, CHANNEL_OFDM }, \
320/*5765*/ { 153, DMN_APL2, CHANNEL_OFDM }, \
321/*5785*/ { 157, DMN_APL2, CHANNEL_OFDM }, \
322/*5805*/ { 161, DMN_APL2, CHANNEL_OFDM }, \
323 \
324/*5280*/ { 56, DMN_APL3, CHANNEL_OFDM }, \
325/*5300*/ { 60, DMN_APL3, CHANNEL_OFDM }, \
326/*5320*/ { 64, DMN_APL3, CHANNEL_OFDM }, \
327/*5745*/ { 149, DMN_APL3, CHANNEL_OFDM }, \
328/*5765*/ { 153, DMN_APL3, CHANNEL_OFDM }, \
329/*5785*/ { 157, DMN_APL3, CHANNEL_OFDM }, \
330/*5805*/ { 161, DMN_APL3, CHANNEL_OFDM }, \
331 \
332/*5180*/ { 36, DMN_APL4, CHANNEL_OFDM }, \
333/*5200*/ { 40, DMN_APL4, CHANNEL_OFDM }, \
334/*5220*/ { 44, DMN_APL4, CHANNEL_OFDM }, \
335/*5240*/ { 48, DMN_APL4, CHANNEL_OFDM }, \
336/*5745*/ { 149, DMN_APL4, CHANNEL_OFDM }, \
337/*5765*/ { 153, DMN_APL4, CHANNEL_OFDM }, \
338/*5785*/ { 157, DMN_APL4, CHANNEL_OFDM }, \
339/*5805*/ { 161, DMN_APL4, CHANNEL_OFDM }, \
340/*5825*/ { 165, DMN_APL4, CHANNEL_OFDM }, \
341 \
342/*5745*/ { 149, DMN_APL5, CHANNEL_OFDM }, \
343/*5765*/ { 153, DMN_APL5, CHANNEL_OFDM }, \
344/*5785*/ { 157, DMN_APL5, CHANNEL_OFDM }, \
345/*5805*/ { 161, DMN_APL5, CHANNEL_OFDM }, \
346/*5825*/ { 165, DMN_APL5, CHANNEL_OFDM }, \
347 \
348/*5180*/ { 36, DMN_ETSI1, CHANNEL_OFDM }, \
349/*5200*/ { 40, DMN_ETSI1, CHANNEL_OFDM }, \
350/*5220*/ { 44, DMN_ETSI1, CHANNEL_OFDM }, \
351/*5240*/ { 48, DMN_ETSI1, CHANNEL_OFDM }, \
352/*5260*/ { 52, DMN_ETSI1, CHANNEL_OFDM }, \
353/*5280*/ { 56, DMN_ETSI1, CHANNEL_OFDM }, \
354/*5300*/ { 60, DMN_ETSI1, CHANNEL_OFDM }, \
355/*5320*/ { 64, DMN_ETSI1, CHANNEL_OFDM }, \
356/*5500*/ { 100, DMN_ETSI1, CHANNEL_OFDM }, \
357/*5520*/ { 104, DMN_ETSI1, CHANNEL_OFDM }, \
358/*5540*/ { 108, DMN_ETSI1, CHANNEL_OFDM }, \
359/*5560*/ { 112, DMN_ETSI1, CHANNEL_OFDM }, \
360/*5580*/ { 116, DMN_ETSI1, CHANNEL_OFDM }, \
361/*5600*/ { 120, DMN_ETSI1, CHANNEL_OFDM }, \
362/*5620*/ { 124, DMN_ETSI1, CHANNEL_OFDM }, \
363/*5640*/ { 128, DMN_ETSI1, CHANNEL_OFDM }, \
364/*5660*/ { 132, DMN_ETSI1, CHANNEL_OFDM }, \
365/*5680*/ { 136, DMN_ETSI1, CHANNEL_OFDM }, \
366/*5700*/ { 140, DMN_ETSI1, CHANNEL_OFDM }, \
367 \
368/*5180*/ { 36, DMN_ETSI2, CHANNEL_OFDM }, \
369/*5200*/ { 40, DMN_ETSI2, CHANNEL_OFDM }, \
370/*5220*/ { 44, DMN_ETSI2, CHANNEL_OFDM }, \
371/*5240*/ { 48, DMN_ETSI2, CHANNEL_OFDM }, \
372 \
373/*5180*/ { 36, DMN_ETSI3, CHANNEL_OFDM }, \
374/*5200*/ { 40, DMN_ETSI3, CHANNEL_OFDM }, \
375/*5220*/ { 44, DMN_ETSI3, CHANNEL_OFDM }, \
376/*5240*/ { 48, DMN_ETSI3, CHANNEL_OFDM }, \
377/*5260*/ { 52, DMN_ETSI3, CHANNEL_OFDM }, \
378/*5280*/ { 56, DMN_ETSI3, CHANNEL_OFDM }, \
379/*5300*/ { 60, DMN_ETSI3, CHANNEL_OFDM }, \
380/*5320*/ { 64, DMN_ETSI3, CHANNEL_OFDM }, \
381 \
382/*5180*/ { 36, DMN_ETSI4, CHANNEL_OFDM }, \
383/*5200*/ { 40, DMN_ETSI4, CHANNEL_OFDM }, \
384/*5220*/ { 44, DMN_ETSI4, CHANNEL_OFDM }, \
385/*5240*/ { 48, DMN_ETSI4, CHANNEL_OFDM }, \
386/*5260*/ { 52, DMN_ETSI4, CHANNEL_OFDM }, \
387/*5280*/ { 56, DMN_ETSI4, CHANNEL_OFDM }, \
388/*5300*/ { 60, DMN_ETSI4, CHANNEL_OFDM }, \
389/*5320*/ { 64, DMN_ETSI4, CHANNEL_OFDM }, \
390 \
391/*5180*/ { 36, DMN_ETSI5, CHANNEL_OFDM }, \
392/*5200*/ { 40, DMN_ETSI5, CHANNEL_OFDM }, \
393/*5220*/ { 44, DMN_ETSI5, CHANNEL_OFDM }, \
394/*5240*/ { 48, DMN_ETSI5, CHANNEL_OFDM }, \
395 \
396/*5180*/ { 36, DMN_ETSI6, CHANNEL_OFDM }, \
397/*5200*/ { 40, DMN_ETSI6, CHANNEL_OFDM }, \
398/*5220*/ { 44, DMN_ETSI6, CHANNEL_OFDM }, \
399/*5240*/ { 48, DMN_ETSI6, CHANNEL_OFDM }, \
400/*5260*/ { 52, DMN_ETSI6, CHANNEL_OFDM }, \
401/*5280*/ { 56, DMN_ETSI6, CHANNEL_OFDM }, \
402/*5500*/ { 100, DMN_ETSI6, CHANNEL_OFDM }, \
403/*5520*/ { 104, DMN_ETSI6, CHANNEL_OFDM }, \
404/*5540*/ { 108, DMN_ETSI6, CHANNEL_OFDM }, \
405/*5560*/ { 112, DMN_ETSI6, CHANNEL_OFDM }, \
406/*5580*/ { 116, DMN_ETSI6, CHANNEL_OFDM }, \
407/*5600*/ { 120, DMN_ETSI6, CHANNEL_OFDM }, \
408/*5620*/ { 124, DMN_ETSI6, CHANNEL_OFDM }, \
409/*5640*/ { 128, DMN_ETSI6, CHANNEL_OFDM }, \
410/*5660*/ { 132, DMN_ETSI6, CHANNEL_OFDM }, \
411/*5680*/ { 136, DMN_ETSI6, CHANNEL_OFDM }, \
412/*5700*/ { 140, DMN_ETSI6, CHANNEL_OFDM }, \
413 \
414/*5180*/ { 36, DMN_FCC1, CHANNEL_OFDM }, \
415/*5200*/ { 40, DMN_FCC1, CHANNEL_OFDM }, \
416/*5210*/ { 42, DMN_FCC1, CHANNEL_OFDM|CHANNEL_TURBO }, \
417/*5220*/ { 44, DMN_FCC1, CHANNEL_OFDM }, \
418/*5240*/ { 48, DMN_FCC1, CHANNEL_OFDM }, \
419/*5250*/ { 50, DMN_FCC1, CHANNEL_OFDM|CHANNEL_TURBO }, \
420/*5260*/ { 52, DMN_FCC1, CHANNEL_OFDM }, \
421/*5280*/ { 56, DMN_FCC1, CHANNEL_OFDM }, \
422/*5290*/ { 58, DMN_FCC1, CHANNEL_OFDM|CHANNEL_TURBO }, \
423/*5300*/ { 60, DMN_FCC1, CHANNEL_OFDM }, \
424/*5320*/ { 64, DMN_FCC1, CHANNEL_OFDM }, \
425/*5745*/ { 149, DMN_FCC1, CHANNEL_OFDM }, \
426/*5760*/ { 152, DMN_FCC1, CHANNEL_OFDM|CHANNEL_TURBO }, \
427/*5765*/ { 153, DMN_FCC1, CHANNEL_OFDM }, \
428/*5785*/ { 157, DMN_FCC1, CHANNEL_OFDM }, \
429/*5800*/ { 160, DMN_FCC1, CHANNEL_OFDM|CHANNEL_TURBO }, \
430/*5805*/ { 161, DMN_FCC1, CHANNEL_OFDM }, \
431/*5825*/ { 165, DMN_FCC1, CHANNEL_OFDM }, \
432 \
433/*5180*/ { 36, DMN_FCC2, CHANNEL_OFDM }, \
434/*5200*/ { 40, DMN_FCC2, CHANNEL_OFDM }, \
435/*5220*/ { 44, DMN_FCC2, CHANNEL_OFDM }, \
436/*5240*/ { 48, DMN_FCC2, CHANNEL_OFDM }, \
437/*5260*/ { 52, DMN_FCC2, CHANNEL_OFDM }, \
438/*5280*/ { 56, DMN_FCC2, CHANNEL_OFDM }, \
439/*5300*/ { 60, DMN_FCC2, CHANNEL_OFDM }, \
440/*5320*/ { 64, DMN_FCC2, CHANNEL_OFDM }, \
441/*5745*/ { 149, DMN_FCC2, CHANNEL_OFDM }, \
442/*5765*/ { 153, DMN_FCC2, CHANNEL_OFDM }, \
443/*5785*/ { 157, DMN_FCC2, CHANNEL_OFDM }, \
444/*5805*/ { 161, DMN_FCC2, CHANNEL_OFDM }, \
445/*5825*/ { 165, DMN_FCC2, CHANNEL_OFDM }, \
446 \
447/*5180*/ { 36, DMN_FCC3, CHANNEL_OFDM }, \
448/*5200*/ { 40, DMN_FCC3, CHANNEL_OFDM }, \
449/*5210*/ { 42, DMN_FCC3, CHANNEL_OFDM|CHANNEL_TURBO }, \
450/*5220*/ { 44, DMN_FCC3, CHANNEL_OFDM }, \
451/*5240*/ { 48, DMN_FCC3, CHANNEL_OFDM }, \
452/*5250*/ { 50, DMN_FCC3, CHANNEL_OFDM|CHANNEL_TURBO }, \
453/*5260*/ { 52, DMN_FCC3, CHANNEL_OFDM }, \
454/*5280*/ { 56, DMN_FCC3, CHANNEL_OFDM }, \
455/*5290*/ { 58, DMN_FCC3, CHANNEL_OFDM|CHANNEL_TURBO }, \
456/*5300*/ { 60, DMN_FCC3, CHANNEL_OFDM }, \
457/*5320*/ { 64, DMN_FCC3, CHANNEL_OFDM }, \
458/*5500*/ { 100, DMN_FCC3, CHANNEL_OFDM }, \
459/*5520*/ { 104, DMN_FCC3, CHANNEL_OFDM }, \
460/*5540*/ { 108, DMN_FCC3, CHANNEL_OFDM }, \
461/*5560*/ { 112, DMN_FCC3, CHANNEL_OFDM }, \
462/*5580*/ { 116, DMN_FCC3, CHANNEL_OFDM }, \
463/*5600*/ { 120, DMN_FCC3, CHANNEL_OFDM }, \
464/*5620*/ { 124, DMN_FCC3, CHANNEL_OFDM }, \
465/*5640*/ { 128, DMN_FCC3, CHANNEL_OFDM }, \
466/*5660*/ { 132, DMN_FCC3, CHANNEL_OFDM }, \
467/*5680*/ { 136, DMN_FCC3, CHANNEL_OFDM }, \
468/*5700*/ { 140, DMN_FCC3, CHANNEL_OFDM }, \
469/*5745*/ { 149, DMN_FCC3, CHANNEL_OFDM }, \
470/*5760*/ { 152, DMN_FCC3, CHANNEL_OFDM|CHANNEL_TURBO }, \
471/*5765*/ { 153, DMN_FCC3, CHANNEL_OFDM }, \
472/*5785*/ { 157, DMN_FCC3, CHANNEL_OFDM }, \
473/*5800*/ { 160, DMN_FCC3, CHANNEL_OFDM|CHANNEL_TURBO }, \
474/*5805*/ { 161, DMN_FCC3, CHANNEL_OFDM }, \
475/*5825*/ { 165, DMN_FCC3, CHANNEL_OFDM }, \
476 \
477/*5170*/ { 34, DMN_MKK1, CHANNEL_OFDM }, \
478/*5190*/ { 38, DMN_MKK1, CHANNEL_OFDM }, \
479/*5210*/ { 42, DMN_MKK1, CHANNEL_OFDM }, \
480/*5230*/ { 46, DMN_MKK1, CHANNEL_OFDM }, \
481 \
482/*5040*/ { 8, DMN_MKK2, CHANNEL_OFDM }, \
483/*5060*/ { 12, DMN_MKK2, CHANNEL_OFDM }, \
484/*5080*/ { 16, DMN_MKK2, CHANNEL_OFDM }, \
485/*5170*/ { 34, DMN_MKK2, CHANNEL_OFDM }, \
486/*5190*/ { 38, DMN_MKK2, CHANNEL_OFDM }, \
487/*5210*/ { 42, DMN_MKK2, CHANNEL_OFDM }, \
488/*5230*/ { 46, DMN_MKK2, CHANNEL_OFDM }, \
489 \
490/*5180*/ { 36, DMN_WORLD, CHANNEL_OFDM }, \
491/*5200*/ { 40, DMN_WORLD, CHANNEL_OFDM }, \
492/*5220*/ { 44, DMN_WORLD, CHANNEL_OFDM }, \
493/*5240*/ { 48, DMN_WORLD, CHANNEL_OFDM }, \
494}
495
496enum ath5k_regdom ath5k_regdom2flag(enum ath5k_regdom, u16);
497u16 ath5k_regdom_from_ieee(enum ath5k_regdom ieee);
498enum ath5k_regdom ath5k_regdom_to_ieee(u16 regdomain);
499
500#endif
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 63ec7a70ee76..ef2da4023d68 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -66,6 +66,7 @@
66#include <linux/device.h> 66#include <linux/device.h>
67#include <linux/moduleparam.h> 67#include <linux/moduleparam.h>
68#include <linux/firmware.h> 68#include <linux/firmware.h>
69#include <linux/jiffies.h>
69#include <net/ieee80211.h> 70#include <net/ieee80211.h>
70#include "atmel.h" 71#include "atmel.h"
71 72
@@ -516,7 +517,7 @@ struct atmel_private {
516 SITE_SURVEY_IN_PROGRESS, 517 SITE_SURVEY_IN_PROGRESS,
517 SITE_SURVEY_COMPLETED 518 SITE_SURVEY_COMPLETED
518 } site_survey_state; 519 } site_survey_state;
519 time_t last_survey; 520 unsigned long last_survey;
520 521
521 int station_was_associated, station_is_associated; 522 int station_was_associated, station_is_associated;
522 int fast_scan; 523 int fast_scan;
@@ -2283,7 +2284,7 @@ static int atmel_set_scan(struct net_device *dev,
2283 return -EAGAIN; 2284 return -EAGAIN;
2284 2285
2285 /* Timeout old surveys. */ 2286 /* Timeout old surveys. */
2286 if ((jiffies - priv->last_survey) > (20 * HZ)) 2287 if (time_after(jiffies, priv->last_survey + 20 * HZ))
2287 priv->site_survey_state = SITE_SURVEY_IDLE; 2288 priv->site_survey_state = SITE_SURVEY_IDLE;
2288 priv->last_survey = jiffies; 2289 priv->last_survey = jiffies;
2289 2290
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 8bc4bc4c330e..f51b2d9b085b 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -62,6 +62,14 @@ config B43_PCMCIA
62 62
63 If unsure, say N. 63 If unsure, say N.
64 64
65# Data transfers to the device via PIO
66# This is only needed on PCMCIA devices. All others can do DMA properly.
67config B43_PIO
68 bool
69 depends on B43 && (B43_PCMCIA || B43_FORCE_PIO)
70 select SSB_BLOCKIO
71 default y
72
65config B43_NPHY 73config B43_NPHY
66 bool "Pre IEEE 802.11n support (BROKEN)" 74 bool "Pre IEEE 802.11n support (BROKEN)"
67 depends on B43 && EXPERIMENTAL && BROKEN 75 depends on B43 && EXPERIMENTAL && BROKEN
@@ -94,3 +102,13 @@ config B43_DEBUG
94 102
95 Say Y, if you want to find out why the driver does not 103 Say Y, if you want to find out why the driver does not
96 work for you. 104 work for you.
105
106config B43_FORCE_PIO
107 bool "Force usage of PIO instead of DMA"
108 depends on B43 && B43_DEBUG
109 ---help---
110 This will disable DMA and always enable PIO instead.
111
112 Say N!
113 This is only for debugging the PIO engine code. You do
114 _NOT_ want to enable this.
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index ac1329dba045..8c52b0b9862a 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -1,13 +1,14 @@
1b43-y += main.o 1b43-y += main.o
2b43-y += tables.o 2b43-y += tables.o
3b43-y += tables_nphy.o 3b43-$(CONFIG_B43_NPHY) += tables_nphy.o
4b43-y += phy.o 4b43-y += phy.o
5b43-y += nphy.o 5b43-$(CONFIG_B43_NPHY) += nphy.o
6b43-y += sysfs.o 6b43-y += sysfs.o
7b43-y += xmit.o 7b43-y += xmit.o
8b43-y += lo.o 8b43-y += lo.o
9b43-y += wa.o 9b43-y += wa.o
10b43-y += dma.o 10b43-y += dma.o
11b43-$(CONFIG_B43_PIO) += pio.o
11b43-$(CONFIG_B43_RFKILL) += rfkill.o 12b43-$(CONFIG_B43_RFKILL) += rfkill.o
12b43-$(CONFIG_B43_LEDS) += leds.o 13b43-$(CONFIG_B43_LEDS) += leds.o
13b43-$(CONFIG_B43_PCMCIA) += pcmcia.o 14b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index f13346ba9dd2..eff2a158a411 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -75,6 +75,23 @@
75#define B43_MMIO_DMA64_BASE4 0x300 75#define B43_MMIO_DMA64_BASE4 0x300
76#define B43_MMIO_DMA64_BASE5 0x340 76#define B43_MMIO_DMA64_BASE5 0x340
77 77
78/* PIO on core rev < 11 */
79#define B43_MMIO_PIO_BASE0 0x300
80#define B43_MMIO_PIO_BASE1 0x310
81#define B43_MMIO_PIO_BASE2 0x320
82#define B43_MMIO_PIO_BASE3 0x330
83#define B43_MMIO_PIO_BASE4 0x340
84#define B43_MMIO_PIO_BASE5 0x350
85#define B43_MMIO_PIO_BASE6 0x360
86#define B43_MMIO_PIO_BASE7 0x370
87/* PIO on core rev >= 11 */
88#define B43_MMIO_PIO11_BASE0 0x200
89#define B43_MMIO_PIO11_BASE1 0x240
90#define B43_MMIO_PIO11_BASE2 0x280
91#define B43_MMIO_PIO11_BASE3 0x2C0
92#define B43_MMIO_PIO11_BASE4 0x300
93#define B43_MMIO_PIO11_BASE5 0x340
94
78#define B43_MMIO_PHY_VER 0x3E0 95#define B43_MMIO_PHY_VER 0x3E0
79#define B43_MMIO_PHY_RADIO 0x3E2 96#define B43_MMIO_PHY_RADIO 0x3E2
80#define B43_MMIO_PHY0 0x3E6 97#define B43_MMIO_PHY0 0x3E6
@@ -94,11 +111,14 @@
94#define B43_MMIO_GPIO_MASK 0x49E 111#define B43_MMIO_GPIO_MASK 0x49E
95#define B43_MMIO_TSF_CFP_START_LOW 0x604 112#define B43_MMIO_TSF_CFP_START_LOW 0x604
96#define B43_MMIO_TSF_CFP_START_HIGH 0x606 113#define B43_MMIO_TSF_CFP_START_HIGH 0x606
114#define B43_MMIO_TSF_CFP_PRETBTT 0x612
97#define B43_MMIO_TSF_0 0x632 /* core rev < 3 only */ 115#define B43_MMIO_TSF_0 0x632 /* core rev < 3 only */
98#define B43_MMIO_TSF_1 0x634 /* core rev < 3 only */ 116#define B43_MMIO_TSF_1 0x634 /* core rev < 3 only */
99#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */ 117#define B43_MMIO_TSF_2 0x636 /* core rev < 3 only */
100#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */ 118#define B43_MMIO_TSF_3 0x638 /* core rev < 3 only */
101#define B43_MMIO_RNG 0x65A 119#define B43_MMIO_RNG 0x65A
120#define B43_MMIO_IFSCTL 0x688 /* Interframe space control */
121#define B43_MMIO_IFSCTL_USE_EDCF 0x0004
102#define B43_MMIO_POWERUP_DELAY 0x6A8 122#define B43_MMIO_POWERUP_DELAY 0x6A8
103 123
104/* SPROM boardflags_lo values */ 124/* SPROM boardflags_lo values */
@@ -144,7 +164,8 @@ enum {
144#define B43_SHM_SH_PHYTYPE 0x0052 /* PHY type */ 164#define B43_SHM_SH_PHYTYPE 0x0052 /* PHY type */
145#define B43_SHM_SH_ANTSWAP 0x005C /* Antenna swap threshold */ 165#define B43_SHM_SH_ANTSWAP 0x005C /* Antenna swap threshold */
146#define B43_SHM_SH_HOSTFLO 0x005E /* Hostflags for ucode options (low) */ 166#define B43_SHM_SH_HOSTFLO 0x005E /* Hostflags for ucode options (low) */
147#define B43_SHM_SH_HOSTFHI 0x0060 /* Hostflags for ucode options (high) */ 167#define B43_SHM_SH_HOSTFMI 0x0060 /* Hostflags for ucode options (middle) */
168#define B43_SHM_SH_HOSTFHI 0x0062 /* Hostflags for ucode options (high) */
148#define B43_SHM_SH_RFATT 0x0064 /* Current radio attenuation value */ 169#define B43_SHM_SH_RFATT 0x0064 /* Current radio attenuation value */
149#define B43_SHM_SH_RADAR 0x0066 /* Radar register */ 170#define B43_SHM_SH_RADAR 0x0066 /* Radar register */
150#define B43_SHM_SH_PHYTXNOI 0x006E /* PHY noise directly after TX (lower 8bit only) */ 171#define B43_SHM_SH_PHYTXNOI 0x006E /* PHY noise directly after TX (lower 8bit only) */
@@ -232,31 +253,41 @@ enum {
232#define B43_MMIO_RADIO_HWENABLED_LO_MASK (1 << 4) 253#define B43_MMIO_RADIO_HWENABLED_LO_MASK (1 << 4)
233 254
234/* HostFlags. See b43_hf_read/write() */ 255/* HostFlags. See b43_hf_read/write() */
235#define B43_HF_ANTDIVHELP 0x00000001 /* ucode antenna div helper */ 256#define B43_HF_ANTDIVHELP 0x000000000001ULL /* ucode antenna div helper */
236#define B43_HF_SYMW 0x00000002 /* G-PHY SYM workaround */ 257#define B43_HF_SYMW 0x000000000002ULL /* G-PHY SYM workaround */
237#define B43_HF_RXPULLW 0x00000004 /* RX pullup workaround */ 258#define B43_HF_RXPULLW 0x000000000004ULL /* RX pullup workaround */
238#define B43_HF_CCKBOOST 0x00000008 /* 4dB CCK power boost (exclusive with OFDM boost) */ 259#define B43_HF_CCKBOOST 0x000000000008ULL /* 4dB CCK power boost (exclusive with OFDM boost) */
239#define B43_HF_BTCOEX 0x00000010 /* Bluetooth coexistance */ 260#define B43_HF_BTCOEX 0x000000000010ULL /* Bluetooth coexistance */
240#define B43_HF_GDCW 0x00000020 /* G-PHY DV canceller filter bw workaround */ 261#define B43_HF_GDCW 0x000000000020ULL /* G-PHY DC canceller filter bw workaround */
241#define B43_HF_OFDMPABOOST 0x00000040 /* Enable PA gain boost for OFDM */ 262#define B43_HF_OFDMPABOOST 0x000000000040ULL /* Enable PA gain boost for OFDM */
242#define B43_HF_ACPR 0x00000080 /* Disable for Japan, channel 14 */ 263#define B43_HF_ACPR 0x000000000080ULL /* Disable for Japan, channel 14 */
243#define B43_HF_EDCF 0x00000100 /* on if WME and MAC suspended */ 264#define B43_HF_EDCF 0x000000000100ULL /* on if WME and MAC suspended */
244#define B43_HF_TSSIRPSMW 0x00000200 /* TSSI reset PSM ucode workaround */ 265#define B43_HF_TSSIRPSMW 0x000000000200ULL /* TSSI reset PSM ucode workaround */
245#define B43_HF_DSCRQ 0x00000400 /* Disable slow clock request in ucode */ 266#define B43_HF_20IN40IQW 0x000000000200ULL /* 20 in 40 MHz I/Q workaround (rev >= 13 only) */
246#define B43_HF_ACIW 0x00000800 /* ACI workaround: shift bits by 2 on PHY CRS */ 267#define B43_HF_DSCRQ 0x000000000400ULL /* Disable slow clock request in ucode */
247#define B43_HF_2060W 0x00001000 /* 2060 radio workaround */ 268#define B43_HF_ACIW 0x000000000800ULL /* ACI workaround: shift bits by 2 on PHY CRS */
248#define B43_HF_RADARW 0x00002000 /* Radar workaround */ 269#define B43_HF_2060W 0x000000001000ULL /* 2060 radio workaround */
249#define B43_HF_USEDEFKEYS 0x00004000 /* Enable use of default keys */ 270#define B43_HF_RADARW 0x000000002000ULL /* Radar workaround */
250#define B43_HF_BT4PRIOCOEX 0x00010000 /* Bluetooth 2-priority coexistance */ 271#define B43_HF_USEDEFKEYS 0x000000004000ULL /* Enable use of default keys */
251#define B43_HF_FWKUP 0x00020000 /* Fast wake-up ucode */ 272#define B43_HF_AFTERBURNER 0x000000008000ULL /* Afterburner enabled */
252#define B43_HF_VCORECALC 0x00040000 /* Force VCO recalculation when powering up synthpu */ 273#define B43_HF_BT4PRIOCOEX 0x000000010000ULL /* Bluetooth 4-priority coexistance */
253#define B43_HF_PCISCW 0x00080000 /* PCI slow clock workaround */ 274#define B43_HF_FWKUP 0x000000020000ULL /* Fast wake-up ucode */
254#define B43_HF_4318TSSI 0x00200000 /* 4318 TSSI */ 275#define B43_HF_VCORECALC 0x000000040000ULL /* Force VCO recalculation when powering up synthpu */
255#define B43_HF_FBCMCFIFO 0x00400000 /* Flush bcast/mcast FIFO immediately */ 276#define B43_HF_PCISCW 0x000000080000ULL /* PCI slow clock workaround */
256#define B43_HF_HWPCTL 0x00800000 /* Enable hardwarre power control */ 277#define B43_HF_4318TSSI 0x000000200000ULL /* 4318 TSSI */
257#define B43_HF_BTCOEXALT 0x01000000 /* Bluetooth coexistance in alternate pins */ 278#define B43_HF_FBCMCFIFO 0x000000400000ULL /* Flush bcast/mcast FIFO immediately */
258#define B43_HF_TXBTCHECK 0x02000000 /* Bluetooth check during transmission */ 279#define B43_HF_HWPCTL 0x000000800000ULL /* Enable hardwarre power control */
259#define B43_HF_SKCFPUP 0x04000000 /* Skip CFP update */ 280#define B43_HF_BTCOEXALT 0x000001000000ULL /* Bluetooth coexistance in alternate pins */
281#define B43_HF_TXBTCHECK 0x000002000000ULL /* Bluetooth check during transmission */
282#define B43_HF_SKCFPUP 0x000004000000ULL /* Skip CFP update */
283#define B43_HF_N40W 0x000008000000ULL /* N PHY 40 MHz workaround (rev >= 13 only) */
284#define B43_HF_ANTSEL 0x000020000000ULL /* Antenna selection (for testing antenna div.) */
285#define B43_HF_BT3COEXT 0x000020000000ULL /* Bluetooth 3-wire coexistence (rev >= 13 only) */
286#define B43_HF_BTCANT 0x000040000000ULL /* Bluetooth coexistence (antenna mode) (rev >= 13 only) */
287#define B43_HF_ANTSELEN 0x000100000000ULL /* Antenna selection enabled (rev >= 13 only) */
288#define B43_HF_ANTSELMODE 0x000200000000ULL /* Antenna selection mode (rev >= 13 only) */
289#define B43_HF_MLADVW 0x001000000000ULL /* N PHY ML ADV workaround (rev >= 13 only) */
290#define B43_HF_PR45960W 0x080000000000ULL /* PR 45960 workaround (rev >= 13 only) */
260 291
261/* MacFilter offsets. */ 292/* MacFilter offsets. */
262#define B43_MACFILTER_SELF 0x0000 293#define B43_MACFILTER_SELF 0x0000
@@ -380,7 +411,6 @@ enum {
380 411
381#define B43_IRQ_ALL 0xFFFFFFFF 412#define B43_IRQ_ALL 0xFFFFFFFF
382#define B43_IRQ_MASKTEMPLATE (B43_IRQ_MAC_SUSPENDED | \ 413#define B43_IRQ_MASKTEMPLATE (B43_IRQ_MAC_SUSPENDED | \
383 B43_IRQ_BEACON | \
384 B43_IRQ_TBTT_INDI | \ 414 B43_IRQ_TBTT_INDI | \
385 B43_IRQ_ATIM_END | \ 415 B43_IRQ_ATIM_END | \
386 B43_IRQ_PMQ | \ 416 B43_IRQ_PMQ | \
@@ -429,7 +459,6 @@ enum {
429}; 459};
430 460
431struct b43_dmaring; 461struct b43_dmaring;
432struct b43_pioqueue;
433 462
434/* The firmware file header */ 463/* The firmware file header */
435#define B43_FW_TYPE_UCODE 'u' 464#define B43_FW_TYPE_UCODE 'u'
@@ -458,20 +487,13 @@ struct b43_iv {
458} __attribute__((__packed__)); 487} __attribute__((__packed__));
459 488
460 489
461#define B43_PHYMODE(phytype) (1 << (phytype))
462#define B43_PHYMODE_A B43_PHYMODE(B43_PHYTYPE_A)
463#define B43_PHYMODE_B B43_PHYMODE(B43_PHYTYPE_B)
464#define B43_PHYMODE_G B43_PHYMODE(B43_PHYTYPE_G)
465
466struct b43_phy { 490struct b43_phy {
467 /* Possible PHYMODEs on this PHY */ 491 /* Band support flags. */
468 u8 possible_phymodes; 492 bool supports_2ghz;
493 bool supports_5ghz;
494
469 /* GMODE bit enabled? */ 495 /* GMODE bit enabled? */
470 bool gmode; 496 bool gmode;
471 /* Possible ieee80211 subsystem hwmodes for this PHY.
472 * Which mode is selected, depends on thr GMODE enabled bit */
473#define B43_MAX_PHYHWMODES 2
474 struct ieee80211_hw_mode hwmodes[B43_MAX_PHYHWMODES];
475 497
476 /* Analog Type */ 498 /* Analog Type */
477 u8 analog; 499 u8 analog;
@@ -583,15 +605,27 @@ struct b43_phy {
583 605
584/* Data structures for DMA transmission, per 80211 core. */ 606/* Data structures for DMA transmission, per 80211 core. */
585struct b43_dma { 607struct b43_dma {
586 struct b43_dmaring *tx_ring0; 608 struct b43_dmaring *tx_ring_AC_BK; /* Background */
587 struct b43_dmaring *tx_ring1; 609 struct b43_dmaring *tx_ring_AC_BE; /* Best Effort */
588 struct b43_dmaring *tx_ring2; 610 struct b43_dmaring *tx_ring_AC_VI; /* Video */
589 struct b43_dmaring *tx_ring3; 611 struct b43_dmaring *tx_ring_AC_VO; /* Voice */
590 struct b43_dmaring *tx_ring4; 612 struct b43_dmaring *tx_ring_mcast; /* Multicast */
591 struct b43_dmaring *tx_ring5; 613
592 614 struct b43_dmaring *rx_ring;
593 struct b43_dmaring *rx_ring0; 615};
594 struct b43_dmaring *rx_ring3; /* only available on core.rev < 5 */ 616
617struct b43_pio_txqueue;
618struct b43_pio_rxqueue;
619
620/* Data structures for PIO transmission, per 80211 core. */
621struct b43_pio {
622 struct b43_pio_txqueue *tx_queue_AC_BK; /* Background */
623 struct b43_pio_txqueue *tx_queue_AC_BE; /* Best Effort */
624 struct b43_pio_txqueue *tx_queue_AC_VI; /* Video */
625 struct b43_pio_txqueue *tx_queue_AC_VO; /* Voice */
626 struct b43_pio_txqueue *tx_queue_mcast; /* Multicast */
627
628 struct b43_pio_rxqueue *rx_queue;
595}; 629};
596 630
597/* Context information for a noise calculation (Link Quality). */ 631/* Context information for a noise calculation (Link Quality). */
@@ -617,6 +651,35 @@ struct b43_key {
617 u8 algorithm; 651 u8 algorithm;
618}; 652};
619 653
654/* SHM offsets to the QOS data structures for the 4 different queues. */
655#define B43_QOS_PARAMS(queue) (B43_SHM_SH_EDCFQ + \
656 (B43_NR_QOSPARAMS * sizeof(u16) * (queue)))
657#define B43_QOS_BACKGROUND B43_QOS_PARAMS(0)
658#define B43_QOS_BESTEFFORT B43_QOS_PARAMS(1)
659#define B43_QOS_VIDEO B43_QOS_PARAMS(2)
660#define B43_QOS_VOICE B43_QOS_PARAMS(3)
661
662/* QOS parameter hardware data structure offsets. */
663#define B43_NR_QOSPARAMS 22
664enum {
665 B43_QOSPARAM_TXOP = 0,
666 B43_QOSPARAM_CWMIN,
667 B43_QOSPARAM_CWMAX,
668 B43_QOSPARAM_CWCUR,
669 B43_QOSPARAM_AIFS,
670 B43_QOSPARAM_BSLOTS,
671 B43_QOSPARAM_REGGAP,
672 B43_QOSPARAM_STATUS,
673};
674
675/* QOS parameters for a queue. */
676struct b43_qos_params {
677 /* The QOS parameters */
678 struct ieee80211_tx_queue_params p;
679 /* Does this need to get uploaded to hardware? */
680 bool need_hw_update;
681};
682
620struct b43_wldev; 683struct b43_wldev;
621 684
622/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */ 685/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */
@@ -667,8 +730,16 @@ struct b43_wl {
667 /* The beacon we are currently using (AP or IBSS mode). 730 /* The beacon we are currently using (AP or IBSS mode).
668 * This beacon stuff is protected by the irq_lock. */ 731 * This beacon stuff is protected by the irq_lock. */
669 struct sk_buff *current_beacon; 732 struct sk_buff *current_beacon;
733 struct ieee80211_tx_control beacon_txctl;
670 bool beacon0_uploaded; 734 bool beacon0_uploaded;
671 bool beacon1_uploaded; 735 bool beacon1_uploaded;
736 struct work_struct beacon_update_trigger;
737
738 /* The current QOS parameters for the 4 queues.
739 * This is protected by the irq_lock. */
740 struct b43_qos_params qos_params[4];
741 /* Workqueue for updating QOS parameters in hardware. */
742 struct work_struct qos_update_work;
672}; 743};
673 744
674/* In-memory representation of a cached microcode file. */ 745/* In-memory representation of a cached microcode file. */
@@ -727,7 +798,6 @@ struct b43_wldev {
727 798
728 bool bad_frames_preempt; /* Use "Bad Frames Preemption" (default off) */ 799 bool bad_frames_preempt; /* Use "Bad Frames Preemption" (default off) */
729 bool dfq_valid; /* Directed frame queue valid (IBSS PS mode, ATIM) */ 800 bool dfq_valid; /* Directed frame queue valid (IBSS PS mode, ATIM) */
730 bool short_preamble; /* TRUE, if short preamble is enabled. */
731 bool short_slot; /* TRUE, if short slot timing is enabled. */ 801 bool short_slot; /* TRUE, if short slot timing is enabled. */
732 bool radio_hw_enable; /* saved state of radio hardware enabled state */ 802 bool radio_hw_enable; /* saved state of radio hardware enabled state */
733 bool suspend_in_progress; /* TRUE, if we are in a suspend/resume cycle */ 803 bool suspend_in_progress; /* TRUE, if we are in a suspend/resume cycle */
@@ -735,8 +805,15 @@ struct b43_wldev {
735 /* PHY/Radio device. */ 805 /* PHY/Radio device. */
736 struct b43_phy phy; 806 struct b43_phy phy;
737 807
738 /* DMA engines. */ 808 union {
739 struct b43_dma dma; 809 /* DMA engines. */
810 struct b43_dma dma;
811 /* PIO engines. */
812 struct b43_pio pio;
813 };
814 /* Use b43_using_pio_transfers() to check whether we are using
815 * DMA or PIO data transfers. */
816 bool __using_pio_transfers;
740 817
741 /* Various statistics about the physical device. */ 818 /* Various statistics about the physical device. */
742 struct b43_stats stats; 819 struct b43_stats stats;
@@ -820,6 +897,22 @@ static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value)
820 ssb_write32(dev->dev, offset, value); 897 ssb_write32(dev->dev, offset, value);
821} 898}
822 899
900static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
901{
902#ifdef CONFIG_B43_PIO
903 return dev->__using_pio_transfers;
904#else
905 return 0;
906#endif
907}
908
909#ifdef CONFIG_B43_FORCE_PIO
910# define B43_FORCE_PIO 1
911#else
912# define B43_FORCE_PIO 0
913#endif
914
915
823/* Message printing */ 916/* Message printing */
824void b43info(struct b43_wl *wl, const char *fmt, ...) 917void b43info(struct b43_wl *wl, const char *fmt, ...)
825 __attribute__ ((format(printf, 2, 3))); 918 __attribute__ ((format(printf, 2, 3)));
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 48e912487b16..21c886a9a1d9 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -38,6 +38,7 @@
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/skbuff.h> 39#include <linux/skbuff.h>
40#include <linux/etherdevice.h> 40#include <linux/etherdevice.h>
41#include <asm/div64.h>
41 42
42 43
43/* 32bit DMA ops. */ 44/* 32bit DMA ops. */
@@ -291,52 +292,6 @@ static inline int request_slot(struct b43_dmaring *ring)
291 return slot; 292 return slot;
292} 293}
293 294
294/* Mac80211-queue to b43-ring mapping */
295static struct b43_dmaring *priority_to_txring(struct b43_wldev *dev,
296 int queue_priority)
297{
298 struct b43_dmaring *ring;
299
300/*FIXME: For now we always run on TX-ring-1 */
301 return dev->dma.tx_ring1;
302
303 /* 0 = highest priority */
304 switch (queue_priority) {
305 default:
306 B43_WARN_ON(1);
307 /* fallthrough */
308 case 0:
309 ring = dev->dma.tx_ring3;
310 break;
311 case 1:
312 ring = dev->dma.tx_ring2;
313 break;
314 case 2:
315 ring = dev->dma.tx_ring1;
316 break;
317 case 3:
318 ring = dev->dma.tx_ring0;
319 break;
320 }
321
322 return ring;
323}
324
325/* b43-ring to mac80211-queue mapping */
326static inline int txring_to_priority(struct b43_dmaring *ring)
327{
328 static const u8 idx_to_prio[] = { 3, 2, 1, 0, };
329 unsigned int index;
330
331/*FIXME: have only one queue, for now */
332 return 0;
333
334 index = ring->index;
335 if (B43_WARN_ON(index >= ARRAY_SIZE(idx_to_prio)))
336 index = 0;
337 return idx_to_prio[index];
338}
339
340static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx) 295static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
341{ 296{
342 static const u16 map64[] = { 297 static const u16 map64[] = {
@@ -596,7 +551,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
596 struct b43_dmadesc_meta *meta, gfp_t gfp_flags) 551 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
597{ 552{
598 struct b43_rxhdr_fw4 *rxhdr; 553 struct b43_rxhdr_fw4 *rxhdr;
599 struct b43_hwtxstatus *txstat;
600 dma_addr_t dmaaddr; 554 dma_addr_t dmaaddr;
601 struct sk_buff *skb; 555 struct sk_buff *skb;
602 556
@@ -632,8 +586,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
632 586
633 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data); 587 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
634 rxhdr->frame_len = 0; 588 rxhdr->frame_len = 0;
635 txstat = (struct b43_hwtxstatus *)(skb->data);
636 txstat->cookie = 0;
637 589
638 return 0; 590 return 0;
639} 591}
@@ -822,6 +774,18 @@ static u64 supported_dma_mask(struct b43_wldev *dev)
822 return DMA_30BIT_MASK; 774 return DMA_30BIT_MASK;
823} 775}
824 776
777static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
778{
779 if (dmamask == DMA_30BIT_MASK)
780 return B43_DMA_30BIT;
781 if (dmamask == DMA_32BIT_MASK)
782 return B43_DMA_32BIT;
783 if (dmamask == DMA_64BIT_MASK)
784 return B43_DMA_64BIT;
785 B43_WARN_ON(1);
786 return B43_DMA_30BIT;
787}
788
825/* Main initialization function. */ 789/* Main initialization function. */
826static 790static
827struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev, 791struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
@@ -937,16 +901,52 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
937 goto out; 901 goto out;
938} 902}
939 903
904#define divide(a, b) ({ \
905 typeof(a) __a = a; \
906 do_div(__a, b); \
907 __a; \
908 })
909
910#define modulo(a, b) ({ \
911 typeof(a) __a = a; \
912 do_div(__a, b); \
913 })
914
940/* Main cleanup function. */ 915/* Main cleanup function. */
941static void b43_destroy_dmaring(struct b43_dmaring *ring) 916static void b43_destroy_dmaring(struct b43_dmaring *ring,
917 const char *ringname)
942{ 918{
943 if (!ring) 919 if (!ring)
944 return; 920 return;
945 921
946 b43dbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots: %d/%d\n", 922#ifdef CONFIG_B43_DEBUG
947 (unsigned int)(ring->type), 923 {
948 ring->mmio_base, 924 /* Print some statistics. */
949 (ring->tx) ? "TX" : "RX", ring->max_used_slots, ring->nr_slots); 925 u64 failed_packets = ring->nr_failed_tx_packets;
926 u64 succeed_packets = ring->nr_succeed_tx_packets;
927 u64 nr_packets = failed_packets + succeed_packets;
928 u64 permille_failed = 0, average_tries = 0;
929
930 if (nr_packets)
931 permille_failed = divide(failed_packets * 1000, nr_packets);
932 if (nr_packets)
933 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
934
935 b43dbg(ring->dev->wl, "DMA-%u %s: "
936 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
937 "Average tries %llu.%02llu\n",
938 (unsigned int)(ring->type), ringname,
939 ring->max_used_slots,
940 ring->nr_slots,
941 (unsigned long long)failed_packets,
942 (unsigned long long)nr_packets,
943 (unsigned long long)divide(permille_failed, 10),
944 (unsigned long long)modulo(permille_failed, 10),
945 (unsigned long long)divide(average_tries, 100),
946 (unsigned long long)modulo(average_tries, 100));
947 }
948#endif /* DEBUG */
949
950 /* Device IRQs are disabled prior entering this function, 950 /* Device IRQs are disabled prior entering this function,
951 * so no need to take care of concurrency with rx handler stuff. 951 * so no need to take care of concurrency with rx handler stuff.
952 */ 952 */
@@ -959,51 +959,36 @@ static void b43_destroy_dmaring(struct b43_dmaring *ring)
959 kfree(ring); 959 kfree(ring);
960} 960}
961 961
962#define destroy_ring(dma, ring) do { \
963 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
964 (dma)->ring = NULL; \
965 } while (0)
966
962void b43_dma_free(struct b43_wldev *dev) 967void b43_dma_free(struct b43_wldev *dev)
963{ 968{
964 struct b43_dma *dma = &dev->dma; 969 struct b43_dma *dma;
965 970
966 b43_destroy_dmaring(dma->rx_ring3); 971 if (b43_using_pio_transfers(dev))
967 dma->rx_ring3 = NULL; 972 return;
968 b43_destroy_dmaring(dma->rx_ring0); 973 dma = &dev->dma;
969 dma->rx_ring0 = NULL; 974
970 975 destroy_ring(dma, rx_ring);
971 b43_destroy_dmaring(dma->tx_ring5); 976 destroy_ring(dma, tx_ring_AC_BK);
972 dma->tx_ring5 = NULL; 977 destroy_ring(dma, tx_ring_AC_BE);
973 b43_destroy_dmaring(dma->tx_ring4); 978 destroy_ring(dma, tx_ring_AC_VI);
974 dma->tx_ring4 = NULL; 979 destroy_ring(dma, tx_ring_AC_VO);
975 b43_destroy_dmaring(dma->tx_ring3); 980 destroy_ring(dma, tx_ring_mcast);
976 dma->tx_ring3 = NULL;
977 b43_destroy_dmaring(dma->tx_ring2);
978 dma->tx_ring2 = NULL;
979 b43_destroy_dmaring(dma->tx_ring1);
980 dma->tx_ring1 = NULL;
981 b43_destroy_dmaring(dma->tx_ring0);
982 dma->tx_ring0 = NULL;
983} 981}
984 982
985int b43_dma_init(struct b43_wldev *dev) 983int b43_dma_init(struct b43_wldev *dev)
986{ 984{
987 struct b43_dma *dma = &dev->dma; 985 struct b43_dma *dma = &dev->dma;
988 struct b43_dmaring *ring;
989 int err; 986 int err;
990 u64 dmamask; 987 u64 dmamask;
991 enum b43_dmatype type; 988 enum b43_dmatype type;
992 989
993 dmamask = supported_dma_mask(dev); 990 dmamask = supported_dma_mask(dev);
994 switch (dmamask) { 991 type = dma_mask_to_engine_type(dmamask);
995 default:
996 B43_WARN_ON(1);
997 case DMA_30BIT_MASK:
998 type = B43_DMA_30BIT;
999 break;
1000 case DMA_32BIT_MASK:
1001 type = B43_DMA_32BIT;
1002 break;
1003 case DMA_64BIT_MASK:
1004 type = B43_DMA_64BIT;
1005 break;
1006 }
1007 err = ssb_dma_set_mask(dev->dev, dmamask); 992 err = ssb_dma_set_mask(dev->dev, dmamask);
1008 if (err) { 993 if (err) {
1009 b43err(dev->wl, "The machine/kernel does not support " 994 b43err(dev->wl, "The machine/kernel does not support "
@@ -1015,83 +1000,57 @@ int b43_dma_init(struct b43_wldev *dev)
1015 1000
1016 err = -ENOMEM; 1001 err = -ENOMEM;
1017 /* setup TX DMA channels. */ 1002 /* setup TX DMA channels. */
1018 ring = b43_setup_dmaring(dev, 0, 1, type); 1003 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
1019 if (!ring) 1004 if (!dma->tx_ring_AC_BK)
1020 goto out; 1005 goto out;
1021 dma->tx_ring0 = ring;
1022 1006
1023 ring = b43_setup_dmaring(dev, 1, 1, type); 1007 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
1024 if (!ring) 1008 if (!dma->tx_ring_AC_BE)
1025 goto err_destroy_tx0; 1009 goto err_destroy_bk;
1026 dma->tx_ring1 = ring;
1027 1010
1028 ring = b43_setup_dmaring(dev, 2, 1, type); 1011 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
1029 if (!ring) 1012 if (!dma->tx_ring_AC_VI)
1030 goto err_destroy_tx1; 1013 goto err_destroy_be;
1031 dma->tx_ring2 = ring;
1032 1014
1033 ring = b43_setup_dmaring(dev, 3, 1, type); 1015 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
1034 if (!ring) 1016 if (!dma->tx_ring_AC_VO)
1035 goto err_destroy_tx2; 1017 goto err_destroy_vi;
1036 dma->tx_ring3 = ring;
1037 1018
1038 ring = b43_setup_dmaring(dev, 4, 1, type); 1019 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
1039 if (!ring) 1020 if (!dma->tx_ring_mcast)
1040 goto err_destroy_tx3; 1021 goto err_destroy_vo;
1041 dma->tx_ring4 = ring;
1042 1022
1043 ring = b43_setup_dmaring(dev, 5, 1, type); 1023 /* setup RX DMA channel. */
1044 if (!ring) 1024 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1045 goto err_destroy_tx4; 1025 if (!dma->rx_ring)
1046 dma->tx_ring5 = ring; 1026 goto err_destroy_mcast;
1047 1027
1048 /* setup RX DMA channels. */ 1028 /* No support for the TX status DMA ring. */
1049 ring = b43_setup_dmaring(dev, 0, 0, type); 1029 B43_WARN_ON(dev->dev->id.revision < 5);
1050 if (!ring)
1051 goto err_destroy_tx5;
1052 dma->rx_ring0 = ring;
1053
1054 if (dev->dev->id.revision < 5) {
1055 ring = b43_setup_dmaring(dev, 3, 0, type);
1056 if (!ring)
1057 goto err_destroy_rx0;
1058 dma->rx_ring3 = ring;
1059 }
1060 1030
1061 b43dbg(dev->wl, "%u-bit DMA initialized\n", 1031 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1062 (unsigned int)type); 1032 (unsigned int)type);
1063 err = 0; 1033 err = 0;
1064 out: 1034out:
1065 return err; 1035 return err;
1066 1036
1067 err_destroy_rx0: 1037err_destroy_mcast:
1068 b43_destroy_dmaring(dma->rx_ring0); 1038 destroy_ring(dma, tx_ring_mcast);
1069 dma->rx_ring0 = NULL; 1039err_destroy_vo:
1070 err_destroy_tx5: 1040 destroy_ring(dma, tx_ring_AC_VO);
1071 b43_destroy_dmaring(dma->tx_ring5); 1041err_destroy_vi:
1072 dma->tx_ring5 = NULL; 1042 destroy_ring(dma, tx_ring_AC_VI);
1073 err_destroy_tx4: 1043err_destroy_be:
1074 b43_destroy_dmaring(dma->tx_ring4); 1044 destroy_ring(dma, tx_ring_AC_BE);
1075 dma->tx_ring4 = NULL; 1045err_destroy_bk:
1076 err_destroy_tx3: 1046 destroy_ring(dma, tx_ring_AC_BK);
1077 b43_destroy_dmaring(dma->tx_ring3); 1047 return err;
1078 dma->tx_ring3 = NULL;
1079 err_destroy_tx2:
1080 b43_destroy_dmaring(dma->tx_ring2);
1081 dma->tx_ring2 = NULL;
1082 err_destroy_tx1:
1083 b43_destroy_dmaring(dma->tx_ring1);
1084 dma->tx_ring1 = NULL;
1085 err_destroy_tx0:
1086 b43_destroy_dmaring(dma->tx_ring0);
1087 dma->tx_ring0 = NULL;
1088 goto out;
1089} 1048}
1090 1049
1091/* Generate a cookie for the TX header. */ 1050/* Generate a cookie for the TX header. */
1092static u16 generate_cookie(struct b43_dmaring *ring, int slot) 1051static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1093{ 1052{
1094 u16 cookie = 0x1000; 1053 u16 cookie;
1095 1054
1096 /* Use the upper 4 bits of the cookie as 1055 /* Use the upper 4 bits of the cookie as
1097 * DMA controller ID and store the slot number 1056 * DMA controller ID and store the slot number
@@ -1101,30 +1060,9 @@ static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1101 * It can also not be 0xFFFF because that is special 1060 * It can also not be 0xFFFF because that is special
1102 * for multicast frames. 1061 * for multicast frames.
1103 */ 1062 */
1104 switch (ring->index) { 1063 cookie = (((u16)ring->index + 1) << 12);
1105 case 0:
1106 cookie = 0x1000;
1107 break;
1108 case 1:
1109 cookie = 0x2000;
1110 break;
1111 case 2:
1112 cookie = 0x3000;
1113 break;
1114 case 3:
1115 cookie = 0x4000;
1116 break;
1117 case 4:
1118 cookie = 0x5000;
1119 break;
1120 case 5:
1121 cookie = 0x6000;
1122 break;
1123 default:
1124 B43_WARN_ON(1);
1125 }
1126 B43_WARN_ON(slot & ~0x0FFF); 1064 B43_WARN_ON(slot & ~0x0FFF);
1127 cookie |= (u16) slot; 1065 cookie |= (u16)slot;
1128 1066
1129 return cookie; 1067 return cookie;
1130} 1068}
@@ -1138,22 +1076,19 @@ struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1138 1076
1139 switch (cookie & 0xF000) { 1077 switch (cookie & 0xF000) {
1140 case 0x1000: 1078 case 0x1000:
1141 ring = dma->tx_ring0; 1079 ring = dma->tx_ring_AC_BK;
1142 break; 1080 break;
1143 case 0x2000: 1081 case 0x2000:
1144 ring = dma->tx_ring1; 1082 ring = dma->tx_ring_AC_BE;
1145 break; 1083 break;
1146 case 0x3000: 1084 case 0x3000:
1147 ring = dma->tx_ring2; 1085 ring = dma->tx_ring_AC_VI;
1148 break; 1086 break;
1149 case 0x4000: 1087 case 0x4000:
1150 ring = dma->tx_ring3; 1088 ring = dma->tx_ring_AC_VO;
1151 break; 1089 break;
1152 case 0x5000: 1090 case 0x5000:
1153 ring = dma->tx_ring4; 1091 ring = dma->tx_ring_mcast;
1154 break;
1155 case 0x6000:
1156 ring = dma->tx_ring5;
1157 break; 1092 break;
1158 default: 1093 default:
1159 B43_WARN_ON(1); 1094 B43_WARN_ON(1);
@@ -1180,7 +1115,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
1180 size_t hdrsize = b43_txhdr_size(ring->dev); 1115 size_t hdrsize = b43_txhdr_size(ring->dev);
1181 1116
1182#define SLOTS_PER_PACKET 2 1117#define SLOTS_PER_PACKET 2
1183 B43_WARN_ON(skb_shinfo(skb)->nr_frags);
1184 1118
1185 old_top_slot = ring->current_slot; 1119 old_top_slot = ring->current_slot;
1186 old_used_slots = ring->used_slots; 1120 old_used_slots = ring->used_slots;
@@ -1285,6 +1219,37 @@ static inline int should_inject_overflow(struct b43_dmaring *ring)
1285 return 0; 1219 return 0;
1286} 1220}
1287 1221
1222/* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
1223static struct b43_dmaring * select_ring_by_priority(struct b43_wldev *dev,
1224 u8 queue_prio)
1225{
1226 struct b43_dmaring *ring;
1227
1228 if (b43_modparam_qos) {
1229 /* 0 = highest priority */
1230 switch (queue_prio) {
1231 default:
1232 B43_WARN_ON(1);
1233 /* fallthrough */
1234 case 0:
1235 ring = dev->dma.tx_ring_AC_VO;
1236 break;
1237 case 1:
1238 ring = dev->dma.tx_ring_AC_VI;
1239 break;
1240 case 2:
1241 ring = dev->dma.tx_ring_AC_BE;
1242 break;
1243 case 3:
1244 ring = dev->dma.tx_ring_AC_BK;
1245 break;
1246 }
1247 } else
1248 ring = dev->dma.tx_ring_AC_BE;
1249
1250 return ring;
1251}
1252
1288int b43_dma_tx(struct b43_wldev *dev, 1253int b43_dma_tx(struct b43_wldev *dev,
1289 struct sk_buff *skb, struct ieee80211_tx_control *ctl) 1254 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
1290{ 1255{
@@ -1293,21 +1258,16 @@ int b43_dma_tx(struct b43_wldev *dev,
1293 int err = 0; 1258 int err = 0;
1294 unsigned long flags; 1259 unsigned long flags;
1295 1260
1296 if (unlikely(skb->len < 2 + 2 + 6)) {
1297 /* Too short, this can't be a valid frame. */
1298 return -EINVAL;
1299 }
1300
1301 hdr = (struct ieee80211_hdr *)skb->data; 1261 hdr = (struct ieee80211_hdr *)skb->data;
1302 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) { 1262 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
1303 /* The multicast ring will be sent after the DTIM */ 1263 /* The multicast ring will be sent after the DTIM */
1304 ring = dev->dma.tx_ring4; 1264 ring = dev->dma.tx_ring_mcast;
1305 /* Set the more-data bit. Ucode will clear it on 1265 /* Set the more-data bit. Ucode will clear it on
1306 * the last frame for us. */ 1266 * the last frame for us. */
1307 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); 1267 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1308 } else { 1268 } else {
1309 /* Decide by priority where to put this frame. */ 1269 /* Decide by priority where to put this frame. */
1310 ring = priority_to_txring(dev, ctl->queue); 1270 ring = select_ring_by_priority(dev, ctl->queue);
1311 } 1271 }
1312 1272
1313 spin_lock_irqsave(&ring->lock, flags); 1273 spin_lock_irqsave(&ring->lock, flags);
@@ -1322,6 +1282,11 @@ int b43_dma_tx(struct b43_wldev *dev,
1322 * That would be a mac80211 bug. */ 1282 * That would be a mac80211 bug. */
1323 B43_WARN_ON(ring->stopped); 1283 B43_WARN_ON(ring->stopped);
1324 1284
1285 /* Assign the queue number to the ring (if not already done before)
1286 * so TX status handling can use it. The queue to ring mapping is
1287 * static, so we don't need to store it per frame. */
1288 ring->queue_prio = ctl->queue;
1289
1325 err = dma_tx_fragment(ring, skb, ctl); 1290 err = dma_tx_fragment(ring, skb, ctl);
1326 if (unlikely(err == -ENOKEY)) { 1291 if (unlikely(err == -ENOKEY)) {
1327 /* Drop this packet, as we don't have the encryption key 1292 /* Drop this packet, as we don't have the encryption key
@@ -1338,7 +1303,7 @@ int b43_dma_tx(struct b43_wldev *dev,
1338 if ((free_slots(ring) < SLOTS_PER_PACKET) || 1303 if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1339 should_inject_overflow(ring)) { 1304 should_inject_overflow(ring)) {
1340 /* This TX ring is full. */ 1305 /* This TX ring is full. */
1341 ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring)); 1306 ieee80211_stop_queue(dev->wl->hw, ctl->queue);
1342 ring->stopped = 1; 1307 ring->stopped = 1;
1343 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { 1308 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1344 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index); 1309 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
@@ -1359,6 +1324,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1359 struct b43_dmadesc_generic *desc; 1324 struct b43_dmadesc_generic *desc;
1360 struct b43_dmadesc_meta *meta; 1325 struct b43_dmadesc_meta *meta;
1361 int slot; 1326 int slot;
1327 bool frame_succeed;
1362 1328
1363 ring = parse_cookie(dev, status->cookie, &slot); 1329 ring = parse_cookie(dev, status->cookie, &slot);
1364 if (unlikely(!ring)) 1330 if (unlikely(!ring))
@@ -1385,18 +1351,15 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1385 * status of the transmission. 1351 * status of the transmission.
1386 * Some fields of txstat are already filled in dma_tx(). 1352 * Some fields of txstat are already filled in dma_tx().
1387 */ 1353 */
1388 if (status->acked) { 1354 frame_succeed = b43_fill_txstatus_report(
1389 meta->txstat.flags |= IEEE80211_TX_STATUS_ACK; 1355 &(meta->txstat), status);
1390 } else { 1356#ifdef CONFIG_B43_DEBUG
1391 if (!(meta->txstat.control.flags 1357 if (frame_succeed)
1392 & IEEE80211_TXCTL_NO_ACK)) 1358 ring->nr_succeed_tx_packets++;
1393 meta->txstat.excessive_retries = 1; 1359 else
1394 } 1360 ring->nr_failed_tx_packets++;
1395 if (status->frame_count == 0) { 1361 ring->nr_total_packet_tries += status->frame_count;
1396 /* The frame was not transmitted at all. */ 1362#endif /* DEBUG */
1397 meta->txstat.retry_count = 0;
1398 } else
1399 meta->txstat.retry_count = status->frame_count - 1;
1400 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb, 1363 ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb,
1401 &(meta->txstat)); 1364 &(meta->txstat));
1402 /* skb is freed by ieee80211_tx_status_irqsafe() */ 1365 /* skb is freed by ieee80211_tx_status_irqsafe() */
@@ -1418,7 +1381,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
1418 dev->stats.last_tx = jiffies; 1381 dev->stats.last_tx = jiffies;
1419 if (ring->stopped) { 1382 if (ring->stopped) {
1420 B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET); 1383 B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1421 ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring)); 1384 ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1422 ring->stopped = 0; 1385 ring->stopped = 0;
1423 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) { 1386 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1424 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index); 1387 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
@@ -1439,7 +1402,7 @@ void b43_dma_get_tx_stats(struct b43_wldev *dev,
1439 1402
1440 for (i = 0; i < nr_queues; i++) { 1403 for (i = 0; i < nr_queues; i++) {
1441 data = &(stats->data[i]); 1404 data = &(stats->data[i]);
1442 ring = priority_to_txring(dev, i); 1405 ring = select_ring_by_priority(dev, i);
1443 1406
1444 spin_lock_irqsave(&ring->lock, flags); 1407 spin_lock_irqsave(&ring->lock, flags);
1445 data->len = ring->used_slots / SLOTS_PER_PACKET; 1408 data->len = ring->used_slots / SLOTS_PER_PACKET;
@@ -1465,25 +1428,6 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
1465 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); 1428 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1466 skb = meta->skb; 1429 skb = meta->skb;
1467 1430
1468 if (ring->index == 3) {
1469 /* We received an xmit status. */
1470 struct b43_hwtxstatus *hw = (struct b43_hwtxstatus *)skb->data;
1471 int i = 0;
1472
1473 while (hw->cookie == 0) {
1474 if (i > 100)
1475 break;
1476 i++;
1477 udelay(2);
1478 barrier();
1479 }
1480 b43_handle_hwtxstatus(ring->dev, hw);
1481 /* recycle the descriptor buffer. */
1482 sync_descbuffer_for_device(ring, meta->dmaaddr,
1483 ring->rx_buffersize);
1484
1485 return;
1486 }
1487 rxhdr = (struct b43_rxhdr_fw4 *)skb->data; 1431 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1488 len = le16_to_cpu(rxhdr->frame_len); 1432 len = le16_to_cpu(rxhdr->frame_len);
1489 if (len == 0) { 1433 if (len == 0) {
@@ -1540,7 +1484,7 @@ static void dma_rx(struct b43_dmaring *ring, int *slot)
1540 skb_pull(skb, ring->frameoffset); 1484 skb_pull(skb, ring->frameoffset);
1541 1485
1542 b43_rx(ring->dev, skb, rxhdr); 1486 b43_rx(ring->dev, skb, rxhdr);
1543 drop: 1487drop:
1544 return; 1488 return;
1545} 1489}
1546 1490
@@ -1586,21 +1530,55 @@ static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1586void b43_dma_tx_suspend(struct b43_wldev *dev) 1530void b43_dma_tx_suspend(struct b43_wldev *dev)
1587{ 1531{
1588 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE); 1532 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
1589 b43_dma_tx_suspend_ring(dev->dma.tx_ring0); 1533 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1590 b43_dma_tx_suspend_ring(dev->dma.tx_ring1); 1534 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1591 b43_dma_tx_suspend_ring(dev->dma.tx_ring2); 1535 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1592 b43_dma_tx_suspend_ring(dev->dma.tx_ring3); 1536 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1593 b43_dma_tx_suspend_ring(dev->dma.tx_ring4); 1537 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
1594 b43_dma_tx_suspend_ring(dev->dma.tx_ring5);
1595} 1538}
1596 1539
1597void b43_dma_tx_resume(struct b43_wldev *dev) 1540void b43_dma_tx_resume(struct b43_wldev *dev)
1598{ 1541{
1599 b43_dma_tx_resume_ring(dev->dma.tx_ring5); 1542 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1600 b43_dma_tx_resume_ring(dev->dma.tx_ring4); 1543 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1601 b43_dma_tx_resume_ring(dev->dma.tx_ring3); 1544 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1602 b43_dma_tx_resume_ring(dev->dma.tx_ring2); 1545 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1603 b43_dma_tx_resume_ring(dev->dma.tx_ring1); 1546 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
1604 b43_dma_tx_resume_ring(dev->dma.tx_ring0);
1605 b43_power_saving_ctl_bits(dev, 0); 1547 b43_power_saving_ctl_bits(dev, 0);
1606} 1548}
1549
1550#ifdef CONFIG_B43_PIO
1551static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1552 u16 mmio_base, bool enable)
1553{
1554 u32 ctl;
1555
1556 if (type == B43_DMA_64BIT) {
1557 ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
1558 ctl &= ~B43_DMA64_RXDIRECTFIFO;
1559 if (enable)
1560 ctl |= B43_DMA64_RXDIRECTFIFO;
1561 b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
1562 } else {
1563 ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
1564 ctl &= ~B43_DMA32_RXDIRECTFIFO;
1565 if (enable)
1566 ctl |= B43_DMA32_RXDIRECTFIFO;
1567 b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
1568 }
1569}
1570
1571/* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1572 * This is called from PIO code, so DMA structures are not available. */
1573void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1574 unsigned int engine_index, bool enable)
1575{
1576 enum b43_dmatype type;
1577 u16 mmio_base;
1578
1579 type = dma_mask_to_engine_type(supported_dma_mask(dev));
1580
1581 mmio_base = b43_dmacontroller_base(type, engine_index);
1582 direct_fifo_rx(dev, type, mmio_base, enable);
1583}
1584#endif /* CONFIG_B43_PIO */
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index c0d6b69e6501..20acf885dba5 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -245,6 +245,9 @@ struct b43_dmaring {
245 enum b43_dmatype type; 245 enum b43_dmatype type;
246 /* Boolean. Is this ring stopped at ieee80211 level? */ 246 /* Boolean. Is this ring stopped at ieee80211 level? */
247 bool stopped; 247 bool stopped;
248 /* The QOS priority assigned to this ring. Only used for TX rings.
249 * This is the mac80211 "queue" value. */
250 u8 queue_prio;
248 /* Lock, only used for TX. */ 251 /* Lock, only used for TX. */
249 spinlock_t lock; 252 spinlock_t lock;
250 struct b43_wldev *dev; 253 struct b43_wldev *dev;
@@ -253,7 +256,13 @@ struct b43_dmaring {
253 int max_used_slots; 256 int max_used_slots;
254 /* Last time we injected a ring overflow. */ 257 /* Last time we injected a ring overflow. */
255 unsigned long last_injected_overflow; 258 unsigned long last_injected_overflow;
256#endif /* CONFIG_B43_DEBUG */ 259 /* Statistics: Number of successfully transmitted packets */
260 u64 nr_succeed_tx_packets;
261 /* Statistics: Number of failed TX packets */
262 u64 nr_failed_tx_packets;
263 /* Statistics: Total number of TX plus all retries. */
264 u64 nr_total_packet_tries;
265#endif /* CONFIG_B43_DEBUG */
257}; 266};
258 267
259static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset) 268static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset)
@@ -282,4 +291,7 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
282 291
283void b43_dma_rx(struct b43_dmaring *ring); 292void b43_dma_rx(struct b43_dmaring *ring);
284 293
294void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
295 unsigned int engine_index, bool enable);
296
285#endif /* B43_DMA_H_ */ 297#endif /* B43_DMA_H_ */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index c73a75b24cd6..cf5c046c9fa8 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -46,7 +46,9 @@
46#include "main.h" 46#include "main.h"
47#include "debugfs.h" 47#include "debugfs.h"
48#include "phy.h" 48#include "phy.h"
49#include "nphy.h"
49#include "dma.h" 50#include "dma.h"
51#include "pio.h"
50#include "sysfs.h" 52#include "sysfs.h"
51#include "xmit.h" 53#include "xmit.h"
52#include "lo.h" 54#include "lo.h"
@@ -78,6 +80,11 @@ static int modparam_nohwcrypt;
78module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); 80module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
79MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 81MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
80 82
83int b43_modparam_qos = 1;
84module_param_named(qos, b43_modparam_qos, int, 0444);
85MODULE_PARM_DESC(qos, "Enable QOS support (default on)");
86
87
81static const struct ssb_device_id b43_ssb_tbl[] = { 88static const struct ssb_device_id b43_ssb_tbl[] = {
82 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5), 89 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5),
83 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 6), 90 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 6),
@@ -96,25 +103,29 @@ MODULE_DEVICE_TABLE(ssb, b43_ssb_tbl);
96 * data in there. This data is the same for all devices, so we don't 103 * data in there. This data is the same for all devices, so we don't
97 * get concurrency issues */ 104 * get concurrency issues */
98#define RATETAB_ENT(_rateid, _flags) \ 105#define RATETAB_ENT(_rateid, _flags) \
99 { \ 106 { \
100 .rate = B43_RATE_TO_BASE100KBPS(_rateid), \ 107 .bitrate = B43_RATE_TO_BASE100KBPS(_rateid), \
101 .val = (_rateid), \ 108 .hw_value = (_rateid), \
102 .val2 = (_rateid), \ 109 .flags = (_flags), \
103 .flags = (_flags), \
104 } 110 }
111
112/*
113 * NOTE: When changing this, sync with xmit.c's
114 * b43_plcp_get_bitrate_idx_* functions!
115 */
105static struct ieee80211_rate __b43_ratetable[] = { 116static struct ieee80211_rate __b43_ratetable[] = {
106 RATETAB_ENT(B43_CCK_RATE_1MB, IEEE80211_RATE_CCK), 117 RATETAB_ENT(B43_CCK_RATE_1MB, 0),
107 RATETAB_ENT(B43_CCK_RATE_2MB, IEEE80211_RATE_CCK_2), 118 RATETAB_ENT(B43_CCK_RATE_2MB, IEEE80211_RATE_SHORT_PREAMBLE),
108 RATETAB_ENT(B43_CCK_RATE_5MB, IEEE80211_RATE_CCK_2), 119 RATETAB_ENT(B43_CCK_RATE_5MB, IEEE80211_RATE_SHORT_PREAMBLE),
109 RATETAB_ENT(B43_CCK_RATE_11MB, IEEE80211_RATE_CCK_2), 120 RATETAB_ENT(B43_CCK_RATE_11MB, IEEE80211_RATE_SHORT_PREAMBLE),
110 RATETAB_ENT(B43_OFDM_RATE_6MB, IEEE80211_RATE_OFDM), 121 RATETAB_ENT(B43_OFDM_RATE_6MB, 0),
111 RATETAB_ENT(B43_OFDM_RATE_9MB, IEEE80211_RATE_OFDM), 122 RATETAB_ENT(B43_OFDM_RATE_9MB, 0),
112 RATETAB_ENT(B43_OFDM_RATE_12MB, IEEE80211_RATE_OFDM), 123 RATETAB_ENT(B43_OFDM_RATE_12MB, 0),
113 RATETAB_ENT(B43_OFDM_RATE_18MB, IEEE80211_RATE_OFDM), 124 RATETAB_ENT(B43_OFDM_RATE_18MB, 0),
114 RATETAB_ENT(B43_OFDM_RATE_24MB, IEEE80211_RATE_OFDM), 125 RATETAB_ENT(B43_OFDM_RATE_24MB, 0),
115 RATETAB_ENT(B43_OFDM_RATE_36MB, IEEE80211_RATE_OFDM), 126 RATETAB_ENT(B43_OFDM_RATE_36MB, 0),
116 RATETAB_ENT(B43_OFDM_RATE_48MB, IEEE80211_RATE_OFDM), 127 RATETAB_ENT(B43_OFDM_RATE_48MB, 0),
117 RATETAB_ENT(B43_OFDM_RATE_54MB, IEEE80211_RATE_OFDM), 128 RATETAB_ENT(B43_OFDM_RATE_54MB, 0),
118}; 129};
119 130
120#define b43_a_ratetable (__b43_ratetable + 4) 131#define b43_a_ratetable (__b43_ratetable + 4)
@@ -124,53 +135,144 @@ static struct ieee80211_rate __b43_ratetable[] = {
124#define b43_g_ratetable (__b43_ratetable + 0) 135#define b43_g_ratetable (__b43_ratetable + 0)
125#define b43_g_ratetable_size 12 136#define b43_g_ratetable_size 12
126 137
127#define CHANTAB_ENT(_chanid, _freq) \ 138#define CHAN4G(_channel, _freq, _flags) { \
128 { \ 139 .band = IEEE80211_BAND_2GHZ, \
129 .chan = (_chanid), \ 140 .center_freq = (_freq), \
130 .freq = (_freq), \ 141 .hw_value = (_channel), \
131 .val = (_chanid), \ 142 .flags = (_flags), \
132 .flag = IEEE80211_CHAN_W_SCAN | \ 143 .max_antenna_gain = 0, \
133 IEEE80211_CHAN_W_ACTIVE_SCAN | \ 144 .max_power = 30, \
134 IEEE80211_CHAN_W_IBSS, \ 145}
135 .power_level = 0xFF, \
136 .antenna_max = 0xFF, \
137 }
138static struct ieee80211_channel b43_2ghz_chantable[] = { 146static struct ieee80211_channel b43_2ghz_chantable[] = {
139 CHANTAB_ENT(1, 2412), 147 CHAN4G(1, 2412, 0),
140 CHANTAB_ENT(2, 2417), 148 CHAN4G(2, 2417, 0),
141 CHANTAB_ENT(3, 2422), 149 CHAN4G(3, 2422, 0),
142 CHANTAB_ENT(4, 2427), 150 CHAN4G(4, 2427, 0),
143 CHANTAB_ENT(5, 2432), 151 CHAN4G(5, 2432, 0),
144 CHANTAB_ENT(6, 2437), 152 CHAN4G(6, 2437, 0),
145 CHANTAB_ENT(7, 2442), 153 CHAN4G(7, 2442, 0),
146 CHANTAB_ENT(8, 2447), 154 CHAN4G(8, 2447, 0),
147 CHANTAB_ENT(9, 2452), 155 CHAN4G(9, 2452, 0),
148 CHANTAB_ENT(10, 2457), 156 CHAN4G(10, 2457, 0),
149 CHANTAB_ENT(11, 2462), 157 CHAN4G(11, 2462, 0),
150 CHANTAB_ENT(12, 2467), 158 CHAN4G(12, 2467, 0),
151 CHANTAB_ENT(13, 2472), 159 CHAN4G(13, 2472, 0),
152 CHANTAB_ENT(14, 2484), 160 CHAN4G(14, 2484, 0),
153}; 161};
154#define b43_2ghz_chantable_size ARRAY_SIZE(b43_2ghz_chantable) 162#undef CHAN4G
155 163
156#if 0 164#define CHAN5G(_channel, _flags) { \
157static struct ieee80211_channel b43_5ghz_chantable[] = { 165 .band = IEEE80211_BAND_5GHZ, \
158 CHANTAB_ENT(36, 5180), 166 .center_freq = 5000 + (5 * (_channel)), \
159 CHANTAB_ENT(40, 5200), 167 .hw_value = (_channel), \
160 CHANTAB_ENT(44, 5220), 168 .flags = (_flags), \
161 CHANTAB_ENT(48, 5240), 169 .max_antenna_gain = 0, \
162 CHANTAB_ENT(52, 5260), 170 .max_power = 30, \
163 CHANTAB_ENT(56, 5280), 171}
164 CHANTAB_ENT(60, 5300), 172static struct ieee80211_channel b43_5ghz_nphy_chantable[] = {
165 CHANTAB_ENT(64, 5320), 173 CHAN5G(32, 0), CHAN5G(34, 0),
166 CHANTAB_ENT(149, 5745), 174 CHAN5G(36, 0), CHAN5G(38, 0),
167 CHANTAB_ENT(153, 5765), 175 CHAN5G(40, 0), CHAN5G(42, 0),
168 CHANTAB_ENT(157, 5785), 176 CHAN5G(44, 0), CHAN5G(46, 0),
169 CHANTAB_ENT(161, 5805), 177 CHAN5G(48, 0), CHAN5G(50, 0),
170 CHANTAB_ENT(165, 5825), 178 CHAN5G(52, 0), CHAN5G(54, 0),
179 CHAN5G(56, 0), CHAN5G(58, 0),
180 CHAN5G(60, 0), CHAN5G(62, 0),
181 CHAN5G(64, 0), CHAN5G(66, 0),
182 CHAN5G(68, 0), CHAN5G(70, 0),
183 CHAN5G(72, 0), CHAN5G(74, 0),
184 CHAN5G(76, 0), CHAN5G(78, 0),
185 CHAN5G(80, 0), CHAN5G(82, 0),
186 CHAN5G(84, 0), CHAN5G(86, 0),
187 CHAN5G(88, 0), CHAN5G(90, 0),
188 CHAN5G(92, 0), CHAN5G(94, 0),
189 CHAN5G(96, 0), CHAN5G(98, 0),
190 CHAN5G(100, 0), CHAN5G(102, 0),
191 CHAN5G(104, 0), CHAN5G(106, 0),
192 CHAN5G(108, 0), CHAN5G(110, 0),
193 CHAN5G(112, 0), CHAN5G(114, 0),
194 CHAN5G(116, 0), CHAN5G(118, 0),
195 CHAN5G(120, 0), CHAN5G(122, 0),
196 CHAN5G(124, 0), CHAN5G(126, 0),
197 CHAN5G(128, 0), CHAN5G(130, 0),
198 CHAN5G(132, 0), CHAN5G(134, 0),
199 CHAN5G(136, 0), CHAN5G(138, 0),
200 CHAN5G(140, 0), CHAN5G(142, 0),
201 CHAN5G(144, 0), CHAN5G(145, 0),
202 CHAN5G(146, 0), CHAN5G(147, 0),
203 CHAN5G(148, 0), CHAN5G(149, 0),
204 CHAN5G(150, 0), CHAN5G(151, 0),
205 CHAN5G(152, 0), CHAN5G(153, 0),
206 CHAN5G(154, 0), CHAN5G(155, 0),
207 CHAN5G(156, 0), CHAN5G(157, 0),
208 CHAN5G(158, 0), CHAN5G(159, 0),
209 CHAN5G(160, 0), CHAN5G(161, 0),
210 CHAN5G(162, 0), CHAN5G(163, 0),
211 CHAN5G(164, 0), CHAN5G(165, 0),
212 CHAN5G(166, 0), CHAN5G(168, 0),
213 CHAN5G(170, 0), CHAN5G(172, 0),
214 CHAN5G(174, 0), CHAN5G(176, 0),
215 CHAN5G(178, 0), CHAN5G(180, 0),
216 CHAN5G(182, 0), CHAN5G(184, 0),
217 CHAN5G(186, 0), CHAN5G(188, 0),
218 CHAN5G(190, 0), CHAN5G(192, 0),
219 CHAN5G(194, 0), CHAN5G(196, 0),
220 CHAN5G(198, 0), CHAN5G(200, 0),
221 CHAN5G(202, 0), CHAN5G(204, 0),
222 CHAN5G(206, 0), CHAN5G(208, 0),
223 CHAN5G(210, 0), CHAN5G(212, 0),
224 CHAN5G(214, 0), CHAN5G(216, 0),
225 CHAN5G(218, 0), CHAN5G(220, 0),
226 CHAN5G(222, 0), CHAN5G(224, 0),
227 CHAN5G(226, 0), CHAN5G(228, 0),
228};
229
230static struct ieee80211_channel b43_5ghz_aphy_chantable[] = {
231 CHAN5G(34, 0), CHAN5G(36, 0),
232 CHAN5G(38, 0), CHAN5G(40, 0),
233 CHAN5G(42, 0), CHAN5G(44, 0),
234 CHAN5G(46, 0), CHAN5G(48, 0),
235 CHAN5G(52, 0), CHAN5G(56, 0),
236 CHAN5G(60, 0), CHAN5G(64, 0),
237 CHAN5G(100, 0), CHAN5G(104, 0),
238 CHAN5G(108, 0), CHAN5G(112, 0),
239 CHAN5G(116, 0), CHAN5G(120, 0),
240 CHAN5G(124, 0), CHAN5G(128, 0),
241 CHAN5G(132, 0), CHAN5G(136, 0),
242 CHAN5G(140, 0), CHAN5G(149, 0),
243 CHAN5G(153, 0), CHAN5G(157, 0),
244 CHAN5G(161, 0), CHAN5G(165, 0),
245 CHAN5G(184, 0), CHAN5G(188, 0),
246 CHAN5G(192, 0), CHAN5G(196, 0),
247 CHAN5G(200, 0), CHAN5G(204, 0),
248 CHAN5G(208, 0), CHAN5G(212, 0),
249 CHAN5G(216, 0),
250};
251#undef CHAN5G
252
253static struct ieee80211_supported_band b43_band_5GHz_nphy = {
254 .band = IEEE80211_BAND_5GHZ,
255 .channels = b43_5ghz_nphy_chantable,
256 .n_channels = ARRAY_SIZE(b43_5ghz_nphy_chantable),
257 .bitrates = b43_a_ratetable,
258 .n_bitrates = b43_a_ratetable_size,
259};
260
261static struct ieee80211_supported_band b43_band_5GHz_aphy = {
262 .band = IEEE80211_BAND_5GHZ,
263 .channels = b43_5ghz_aphy_chantable,
264 .n_channels = ARRAY_SIZE(b43_5ghz_aphy_chantable),
265 .bitrates = b43_a_ratetable,
266 .n_bitrates = b43_a_ratetable_size,
267};
268
269static struct ieee80211_supported_band b43_band_2GHz = {
270 .band = IEEE80211_BAND_2GHZ,
271 .channels = b43_2ghz_chantable,
272 .n_channels = ARRAY_SIZE(b43_2ghz_chantable),
273 .bitrates = b43_g_ratetable,
274 .n_bitrates = b43_g_ratetable_size,
171}; 275};
172#define b43_5ghz_chantable_size ARRAY_SIZE(b43_5ghz_chantable)
173#endif
174 276
175static void b43_wireless_core_exit(struct b43_wldev *dev); 277static void b43_wireless_core_exit(struct b43_wldev *dev);
176static int b43_wireless_core_init(struct b43_wldev *dev); 278static int b43_wireless_core_init(struct b43_wldev *dev);
@@ -370,24 +472,30 @@ out:
370} 472}
371 473
372/* Read HostFlags */ 474/* Read HostFlags */
373u32 b43_hf_read(struct b43_wldev * dev) 475u64 b43_hf_read(struct b43_wldev * dev)
374{ 476{
375 u32 ret; 477 u64 ret;
376 478
377 ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI); 479 ret = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI);
378 ret <<= 16; 480 ret <<= 16;
481 ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI);
482 ret <<= 16;
379 ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO); 483 ret |= b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO);
380 484
381 return ret; 485 return ret;
382} 486}
383 487
384/* Write HostFlags */ 488/* Write HostFlags */
385void b43_hf_write(struct b43_wldev *dev, u32 value) 489void b43_hf_write(struct b43_wldev *dev, u64 value)
386{ 490{
387 b43_shm_write16(dev, B43_SHM_SHARED, 491 u16 lo, mi, hi;
388 B43_SHM_SH_HOSTFLO, (value & 0x0000FFFF)); 492
389 b43_shm_write16(dev, B43_SHM_SHARED, 493 lo = (value & 0x00000000FFFFULL);
390 B43_SHM_SH_HOSTFHI, ((value & 0xFFFF0000) >> 16)); 494 mi = (value & 0x0000FFFF0000ULL) >> 16;
495 hi = (value & 0xFFFF00000000ULL) >> 32;
496 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFLO, lo);
497 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFMI, mi);
498 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_HOSTFHI, hi);
391} 499}
392 500
393void b43_tsf_read(struct b43_wldev *dev, u64 * tsf) 501void b43_tsf_read(struct b43_wldev *dev, u64 * tsf)
@@ -912,7 +1020,18 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
912/* Turn the Analog ON/OFF */ 1020/* Turn the Analog ON/OFF */
913static void b43_switch_analog(struct b43_wldev *dev, int on) 1021static void b43_switch_analog(struct b43_wldev *dev, int on)
914{ 1022{
915 b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4); 1023 switch (dev->phy.type) {
1024 case B43_PHYTYPE_A:
1025 case B43_PHYTYPE_G:
1026 b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4);
1027 break;
1028 case B43_PHYTYPE_N:
1029 b43_phy_write(dev, B43_NPHY_AFECTL_OVER,
1030 on ? 0 : 0x7FFF);
1031 break;
1032 default:
1033 B43_WARN_ON(1);
1034 }
916} 1035}
917 1036
918void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags) 1037void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags)
@@ -1162,22 +1281,107 @@ static void b43_write_template_common(struct b43_wldev *dev,
1162 size + sizeof(struct b43_plcp_hdr6)); 1281 size + sizeof(struct b43_plcp_hdr6));
1163} 1282}
1164 1283
1284/* Check if the use of the antenna that ieee80211 told us to
1285 * use is possible. This will fall back to DEFAULT.
1286 * "antenna_nr" is the antenna identifier we got from ieee80211. */
1287u8 b43_ieee80211_antenna_sanitize(struct b43_wldev *dev,
1288 u8 antenna_nr)
1289{
1290 u8 antenna_mask;
1291
1292 if (antenna_nr == 0) {
1293 /* Zero means "use default antenna". That's always OK. */
1294 return 0;
1295 }
1296
1297 /* Get the mask of available antennas. */
1298 if (dev->phy.gmode)
1299 antenna_mask = dev->dev->bus->sprom.ant_available_bg;
1300 else
1301 antenna_mask = dev->dev->bus->sprom.ant_available_a;
1302
1303 if (!(antenna_mask & (1 << (antenna_nr - 1)))) {
1304 /* This antenna is not available. Fall back to default. */
1305 return 0;
1306 }
1307
1308 return antenna_nr;
1309}
1310
1311static int b43_antenna_from_ieee80211(struct b43_wldev *dev, u8 antenna)
1312{
1313 antenna = b43_ieee80211_antenna_sanitize(dev, antenna);
1314 switch (antenna) {
1315 case 0: /* default/diversity */
1316 return B43_ANTENNA_DEFAULT;
1317 case 1: /* Antenna 0 */
1318 return B43_ANTENNA0;
1319 case 2: /* Antenna 1 */
1320 return B43_ANTENNA1;
1321 case 3: /* Antenna 2 */
1322 return B43_ANTENNA2;
1323 case 4: /* Antenna 3 */
1324 return B43_ANTENNA3;
1325 default:
1326 return B43_ANTENNA_DEFAULT;
1327 }
1328}
1329
1330/* Convert a b43 antenna number value to the PHY TX control value. */
1331static u16 b43_antenna_to_phyctl(int antenna)
1332{
1333 switch (antenna) {
1334 case B43_ANTENNA0:
1335 return B43_TXH_PHY_ANT0;
1336 case B43_ANTENNA1:
1337 return B43_TXH_PHY_ANT1;
1338 case B43_ANTENNA2:
1339 return B43_TXH_PHY_ANT2;
1340 case B43_ANTENNA3:
1341 return B43_TXH_PHY_ANT3;
1342 case B43_ANTENNA_AUTO:
1343 return B43_TXH_PHY_ANT01AUTO;
1344 }
1345 B43_WARN_ON(1);
1346 return 0;
1347}
1348
1165static void b43_write_beacon_template(struct b43_wldev *dev, 1349static void b43_write_beacon_template(struct b43_wldev *dev,
1166 u16 ram_offset, 1350 u16 ram_offset,
1167 u16 shm_size_offset, u8 rate) 1351 u16 shm_size_offset)
1168{ 1352{
1169 unsigned int i, len, variable_len; 1353 unsigned int i, len, variable_len;
1170 const struct ieee80211_mgmt *bcn; 1354 const struct ieee80211_mgmt *bcn;
1171 const u8 *ie; 1355 const u8 *ie;
1172 bool tim_found = 0; 1356 bool tim_found = 0;
1357 unsigned int rate;
1358 u16 ctl;
1359 int antenna;
1173 1360
1174 bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data); 1361 bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data);
1175 len = min((size_t) dev->wl->current_beacon->len, 1362 len = min((size_t) dev->wl->current_beacon->len,
1176 0x200 - sizeof(struct b43_plcp_hdr6)); 1363 0x200 - sizeof(struct b43_plcp_hdr6));
1364 rate = dev->wl->beacon_txctl.tx_rate->hw_value;
1177 1365
1178 b43_write_template_common(dev, (const u8 *)bcn, 1366 b43_write_template_common(dev, (const u8 *)bcn,
1179 len, ram_offset, shm_size_offset, rate); 1367 len, ram_offset, shm_size_offset, rate);
1180 1368
1369 /* Write the PHY TX control parameters. */
1370 antenna = b43_antenna_from_ieee80211(dev,
1371 dev->wl->beacon_txctl.antenna_sel_tx);
1372 antenna = b43_antenna_to_phyctl(antenna);
1373 ctl = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL);
1374 /* We can't send beacons with short preamble. Would get PHY errors. */
1375 ctl &= ~B43_TXH_PHY_SHORTPRMBL;
1376 ctl &= ~B43_TXH_PHY_ANT;
1377 ctl &= ~B43_TXH_PHY_ENC;
1378 ctl |= antenna;
1379 if (b43_is_cck_rate(rate))
1380 ctl |= B43_TXH_PHY_ENC_CCK;
1381 else
1382 ctl |= B43_TXH_PHY_ENC_OFDM;
1383 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL, ctl);
1384
1181 /* Find the position of the TIM and the DTIM_period value 1385 /* Find the position of the TIM and the DTIM_period value
1182 * and write them to SHM. */ 1386 * and write them to SHM. */
1183 ie = bcn->u.beacon.variable; 1387 ie = bcn->u.beacon.variable;
@@ -1218,21 +1422,23 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
1218 b43warn(dev->wl, "Did not find a valid TIM IE in " 1422 b43warn(dev->wl, "Did not find a valid TIM IE in "
1219 "the beacon template packet. AP or IBSS operation " 1423 "the beacon template packet. AP or IBSS operation "
1220 "may be broken.\n"); 1424 "may be broken.\n");
1221 } 1425 } else
1426 b43dbg(dev->wl, "Updated beacon template\n");
1222} 1427}
1223 1428
1224static void b43_write_probe_resp_plcp(struct b43_wldev *dev, 1429static void b43_write_probe_resp_plcp(struct b43_wldev *dev,
1225 u16 shm_offset, u16 size, u8 rate) 1430 u16 shm_offset, u16 size,
1431 struct ieee80211_rate *rate)
1226{ 1432{
1227 struct b43_plcp_hdr4 plcp; 1433 struct b43_plcp_hdr4 plcp;
1228 u32 tmp; 1434 u32 tmp;
1229 __le16 dur; 1435 __le16 dur;
1230 1436
1231 plcp.data = 0; 1437 plcp.data = 0;
1232 b43_generate_plcp_hdr(&plcp, size + FCS_LEN, rate); 1438 b43_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->hw_value);
1233 dur = ieee80211_generic_frame_duration(dev->wl->hw, 1439 dur = ieee80211_generic_frame_duration(dev->wl->hw,
1234 dev->wl->vif, size, 1440 dev->wl->vif, size,
1235 B43_RATE_TO_BASE100KBPS(rate)); 1441 rate);
1236 /* Write PLCP in two parts and timing for packet transfer */ 1442 /* Write PLCP in two parts and timing for packet transfer */
1237 tmp = le32_to_cpu(plcp.data); 1443 tmp = le32_to_cpu(plcp.data);
1238 b43_shm_write16(dev, B43_SHM_SHARED, shm_offset, tmp & 0xFFFF); 1444 b43_shm_write16(dev, B43_SHM_SHARED, shm_offset, tmp & 0xFFFF);
@@ -1247,7 +1453,8 @@ static void b43_write_probe_resp_plcp(struct b43_wldev *dev,
1247 * 3) Stripping TIM 1453 * 3) Stripping TIM
1248 */ 1454 */
1249static const u8 * b43_generate_probe_resp(struct b43_wldev *dev, 1455static const u8 * b43_generate_probe_resp(struct b43_wldev *dev,
1250 u16 *dest_size, u8 rate) 1456 u16 *dest_size,
1457 struct ieee80211_rate *rate)
1251{ 1458{
1252 const u8 *src_data; 1459 const u8 *src_data;
1253 u8 *dest_data; 1460 u8 *dest_data;
@@ -1292,7 +1499,7 @@ static const u8 * b43_generate_probe_resp(struct b43_wldev *dev,
1292 IEEE80211_STYPE_PROBE_RESP); 1499 IEEE80211_STYPE_PROBE_RESP);
1293 dur = ieee80211_generic_frame_duration(dev->wl->hw, 1500 dur = ieee80211_generic_frame_duration(dev->wl->hw,
1294 dev->wl->vif, *dest_size, 1501 dev->wl->vif, *dest_size,
1295 B43_RATE_TO_BASE100KBPS(rate)); 1502 rate);
1296 hdr->duration_id = dur; 1503 hdr->duration_id = dur;
1297 1504
1298 return dest_data; 1505 return dest_data;
@@ -1300,7 +1507,8 @@ static const u8 * b43_generate_probe_resp(struct b43_wldev *dev,
1300 1507
1301static void b43_write_probe_resp_template(struct b43_wldev *dev, 1508static void b43_write_probe_resp_template(struct b43_wldev *dev,
1302 u16 ram_offset, 1509 u16 ram_offset,
1303 u16 shm_size_offset, u8 rate) 1510 u16 shm_size_offset,
1511 struct ieee80211_rate *rate)
1304{ 1512{
1305 const u8 *probe_resp_data; 1513 const u8 *probe_resp_data;
1306 u16 size; 1514 u16 size;
@@ -1313,20 +1521,89 @@ static void b43_write_probe_resp_template(struct b43_wldev *dev,
1313 /* Looks like PLCP headers plus packet timings are stored for 1521 /* Looks like PLCP headers plus packet timings are stored for
1314 * all possible basic rates 1522 * all possible basic rates
1315 */ 1523 */
1316 b43_write_probe_resp_plcp(dev, 0x31A, size, B43_CCK_RATE_1MB); 1524 b43_write_probe_resp_plcp(dev, 0x31A, size, &b43_b_ratetable[0]);
1317 b43_write_probe_resp_plcp(dev, 0x32C, size, B43_CCK_RATE_2MB); 1525 b43_write_probe_resp_plcp(dev, 0x32C, size, &b43_b_ratetable[1]);
1318 b43_write_probe_resp_plcp(dev, 0x33E, size, B43_CCK_RATE_5MB); 1526 b43_write_probe_resp_plcp(dev, 0x33E, size, &b43_b_ratetable[2]);
1319 b43_write_probe_resp_plcp(dev, 0x350, size, B43_CCK_RATE_11MB); 1527 b43_write_probe_resp_plcp(dev, 0x350, size, &b43_b_ratetable[3]);
1320 1528
1321 size = min((size_t) size, 0x200 - sizeof(struct b43_plcp_hdr6)); 1529 size = min((size_t) size, 0x200 - sizeof(struct b43_plcp_hdr6));
1322 b43_write_template_common(dev, probe_resp_data, 1530 b43_write_template_common(dev, probe_resp_data,
1323 size, ram_offset, shm_size_offset, rate); 1531 size, ram_offset, shm_size_offset,
1532 rate->hw_value);
1324 kfree(probe_resp_data); 1533 kfree(probe_resp_data);
1325} 1534}
1326 1535
1536static void handle_irq_beacon(struct b43_wldev *dev)
1537{
1538 struct b43_wl *wl = dev->wl;
1539 u32 cmd, beacon0_valid, beacon1_valid;
1540
1541 if (!b43_is_mode(wl, IEEE80211_IF_TYPE_AP))
1542 return;
1543
1544 /* This is the bottom half of the asynchronous beacon update. */
1545
1546 /* Ignore interrupt in the future. */
1547 dev->irq_savedstate &= ~B43_IRQ_BEACON;
1548
1549 cmd = b43_read32(dev, B43_MMIO_MACCMD);
1550 beacon0_valid = (cmd & B43_MACCMD_BEACON0_VALID);
1551 beacon1_valid = (cmd & B43_MACCMD_BEACON1_VALID);
1552
1553 /* Schedule interrupt manually, if busy. */
1554 if (beacon0_valid && beacon1_valid) {
1555 b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, B43_IRQ_BEACON);
1556 dev->irq_savedstate |= B43_IRQ_BEACON;
1557 return;
1558 }
1559
1560 if (!beacon0_valid) {
1561 if (!wl->beacon0_uploaded) {
1562 b43_write_beacon_template(dev, 0x68, 0x18);
1563 b43_write_probe_resp_template(dev, 0x268, 0x4A,
1564 &__b43_ratetable[3]);
1565 wl->beacon0_uploaded = 1;
1566 }
1567 cmd = b43_read32(dev, B43_MMIO_MACCMD);
1568 cmd |= B43_MACCMD_BEACON0_VALID;
1569 b43_write32(dev, B43_MMIO_MACCMD, cmd);
1570 } else if (!beacon1_valid) {
1571 if (!wl->beacon1_uploaded) {
1572 b43_write_beacon_template(dev, 0x468, 0x1A);
1573 wl->beacon1_uploaded = 1;
1574 }
1575 cmd = b43_read32(dev, B43_MMIO_MACCMD);
1576 cmd |= B43_MACCMD_BEACON1_VALID;
1577 b43_write32(dev, B43_MMIO_MACCMD, cmd);
1578 }
1579}
1580
1581static void b43_beacon_update_trigger_work(struct work_struct *work)
1582{
1583 struct b43_wl *wl = container_of(work, struct b43_wl,
1584 beacon_update_trigger);
1585 struct b43_wldev *dev;
1586
1587 mutex_lock(&wl->mutex);
1588 dev = wl->current_dev;
1589 if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED))) {
1590 spin_lock_irq(&wl->irq_lock);
1591 /* update beacon right away or defer to irq */
1592 dev->irq_savedstate = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK);
1593 handle_irq_beacon(dev);
1594 /* The handler might have updated the IRQ mask. */
1595 b43_write32(dev, B43_MMIO_GEN_IRQ_MASK,
1596 dev->irq_savedstate);
1597 mmiowb();
1598 spin_unlock_irq(&wl->irq_lock);
1599 }
1600 mutex_unlock(&wl->mutex);
1601}
1602
1327/* Asynchronously update the packet templates in template RAM. 1603/* Asynchronously update the packet templates in template RAM.
1328 * Locking: Requires wl->irq_lock to be locked. */ 1604 * Locking: Requires wl->irq_lock to be locked. */
1329static void b43_update_templates(struct b43_wl *wl, struct sk_buff *beacon) 1605static void b43_update_templates(struct b43_wl *wl, struct sk_buff *beacon,
1606 const struct ieee80211_tx_control *txctl)
1330{ 1607{
1331 /* This is the top half of the ansynchronous beacon update. 1608 /* This is the top half of the ansynchronous beacon update.
1332 * The bottom half is the beacon IRQ. 1609 * The bottom half is the beacon IRQ.
@@ -1337,8 +1614,10 @@ static void b43_update_templates(struct b43_wl *wl, struct sk_buff *beacon)
1337 if (wl->current_beacon) 1614 if (wl->current_beacon)
1338 dev_kfree_skb_any(wl->current_beacon); 1615 dev_kfree_skb_any(wl->current_beacon);
1339 wl->current_beacon = beacon; 1616 wl->current_beacon = beacon;
1617 memcpy(&wl->beacon_txctl, txctl, sizeof(wl->beacon_txctl));
1340 wl->beacon0_uploaded = 0; 1618 wl->beacon0_uploaded = 0;
1341 wl->beacon1_uploaded = 0; 1619 wl->beacon1_uploaded = 0;
1620 queue_work(wl->hw->workqueue, &wl->beacon_update_trigger);
1342} 1621}
1343 1622
1344static void b43_set_ssid(struct b43_wldev *dev, const u8 * ssid, u8 ssid_len) 1623static void b43_set_ssid(struct b43_wldev *dev, const u8 * ssid, u8 ssid_len)
@@ -1364,44 +1643,14 @@ static void b43_set_beacon_int(struct b43_wldev *dev, u16 beacon_int)
1364{ 1643{
1365 b43_time_lock(dev); 1644 b43_time_lock(dev);
1366 if (dev->dev->id.revision >= 3) { 1645 if (dev->dev->id.revision >= 3) {
1367 b43_write32(dev, 0x188, (beacon_int << 16)); 1646 b43_write32(dev, B43_MMIO_TSF_CFP_REP, (beacon_int << 16));
1647 b43_write32(dev, B43_MMIO_TSF_CFP_START, (beacon_int << 10));
1368 } else { 1648 } else {
1369 b43_write16(dev, 0x606, (beacon_int >> 6)); 1649 b43_write16(dev, 0x606, (beacon_int >> 6));
1370 b43_write16(dev, 0x610, beacon_int); 1650 b43_write16(dev, 0x610, beacon_int);
1371 } 1651 }
1372 b43_time_unlock(dev); 1652 b43_time_unlock(dev);
1373} 1653 b43dbg(dev->wl, "Set beacon interval to %u\n", beacon_int);
1374
1375static void handle_irq_beacon(struct b43_wldev *dev)
1376{
1377 struct b43_wl *wl = dev->wl;
1378 u32 cmd;
1379
1380 if (!b43_is_mode(wl, IEEE80211_IF_TYPE_AP))
1381 return;
1382
1383 /* This is the bottom half of the asynchronous beacon update. */
1384
1385 cmd = b43_read32(dev, B43_MMIO_MACCMD);
1386 if (!(cmd & B43_MACCMD_BEACON0_VALID)) {
1387 if (!wl->beacon0_uploaded) {
1388 b43_write_beacon_template(dev, 0x68, 0x18,
1389 B43_CCK_RATE_1MB);
1390 b43_write_probe_resp_template(dev, 0x268, 0x4A,
1391 B43_CCK_RATE_11MB);
1392 wl->beacon0_uploaded = 1;
1393 }
1394 cmd |= B43_MACCMD_BEACON0_VALID;
1395 }
1396 if (!(cmd & B43_MACCMD_BEACON1_VALID)) {
1397 if (!wl->beacon1_uploaded) {
1398 b43_write_beacon_template(dev, 0x468, 0x1A,
1399 B43_CCK_RATE_1MB);
1400 wl->beacon1_uploaded = 1;
1401 }
1402 cmd |= B43_MACCMD_BEACON1_VALID;
1403 }
1404 b43_write32(dev, B43_MMIO_MACCMD, cmd);
1405} 1654}
1406 1655
1407static void handle_irq_ucode_debug(struct b43_wldev *dev) 1656static void handle_irq_ucode_debug(struct b43_wldev *dev)
@@ -1483,12 +1732,15 @@ static void b43_interrupt_tasklet(struct b43_wldev *dev)
1483 handle_irq_noise(dev); 1732 handle_irq_noise(dev);
1484 1733
1485 /* Check the DMA reason registers for received data. */ 1734 /* Check the DMA reason registers for received data. */
1486 if (dma_reason[0] & B43_DMAIRQ_RX_DONE) 1735 if (dma_reason[0] & B43_DMAIRQ_RX_DONE) {
1487 b43_dma_rx(dev->dma.rx_ring0); 1736 if (b43_using_pio_transfers(dev))
1488 if (dma_reason[3] & B43_DMAIRQ_RX_DONE) 1737 b43_pio_rx(dev->pio.rx_queue);
1489 b43_dma_rx(dev->dma.rx_ring3); 1738 else
1739 b43_dma_rx(dev->dma.rx_ring);
1740 }
1490 B43_WARN_ON(dma_reason[1] & B43_DMAIRQ_RX_DONE); 1741 B43_WARN_ON(dma_reason[1] & B43_DMAIRQ_RX_DONE);
1491 B43_WARN_ON(dma_reason[2] & B43_DMAIRQ_RX_DONE); 1742 B43_WARN_ON(dma_reason[2] & B43_DMAIRQ_RX_DONE);
1743 B43_WARN_ON(dma_reason[3] & B43_DMAIRQ_RX_DONE);
1492 B43_WARN_ON(dma_reason[4] & B43_DMAIRQ_RX_DONE); 1744 B43_WARN_ON(dma_reason[4] & B43_DMAIRQ_RX_DONE);
1493 B43_WARN_ON(dma_reason[5] & B43_DMAIRQ_RX_DONE); 1745 B43_WARN_ON(dma_reason[5] & B43_DMAIRQ_RX_DONE);
1494 1746
@@ -2045,7 +2297,7 @@ static void b43_gpio_cleanup(struct b43_wldev *dev)
2045} 2297}
2046 2298
2047/* http://bcm-specs.sipsolutions.net/EnableMac */ 2299/* http://bcm-specs.sipsolutions.net/EnableMac */
2048void b43_mac_enable(struct b43_wldev *dev) 2300static void b43_mac_enable(struct b43_wldev *dev)
2049{ 2301{
2050 dev->mac_suspended--; 2302 dev->mac_suspended--;
2051 B43_WARN_ON(dev->mac_suspended < 0); 2303 B43_WARN_ON(dev->mac_suspended < 0);
@@ -2068,7 +2320,7 @@ void b43_mac_enable(struct b43_wldev *dev)
2068} 2320}
2069 2321
2070/* http://bcm-specs.sipsolutions.net/SuspendMAC */ 2322/* http://bcm-specs.sipsolutions.net/SuspendMAC */
2071void b43_mac_suspend(struct b43_wldev *dev) 2323static void b43_mac_suspend(struct b43_wldev *dev)
2072{ 2324{
2073 int i; 2325 int i;
2074 u32 tmp; 2326 u32 tmp;
@@ -2091,6 +2343,13 @@ void b43_mac_suspend(struct b43_wldev *dev)
2091 & ~B43_MACCTL_ENABLED); 2343 & ~B43_MACCTL_ENABLED);
2092 /* force pci to flush the write */ 2344 /* force pci to flush the write */
2093 b43_read32(dev, B43_MMIO_MACCTL); 2345 b43_read32(dev, B43_MMIO_MACCTL);
2346 for (i = 35; i; i--) {
2347 tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON);
2348 if (tmp & B43_IRQ_MAC_SUSPENDED)
2349 goto out;
2350 udelay(10);
2351 }
2352 /* Hm, it seems this will take some time. Use msleep(). */
2094 for (i = 40; i; i--) { 2353 for (i = 40; i; i--) {
2095 tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON); 2354 tmp = b43_read32(dev, B43_MMIO_GEN_IRQ_REASON);
2096 if (tmp & B43_IRQ_MAC_SUSPENDED) 2355 if (tmp & B43_IRQ_MAC_SUSPENDED)
@@ -2196,38 +2455,28 @@ static void b43_rate_memory_init(struct b43_wldev *dev)
2196 } 2455 }
2197} 2456}
2198 2457
2458/* Set the default values for the PHY TX Control Words. */
2459static void b43_set_phytxctl_defaults(struct b43_wldev *dev)
2460{
2461 u16 ctl = 0;
2462
2463 ctl |= B43_TXH_PHY_ENC_CCK;
2464 ctl |= B43_TXH_PHY_ANT01AUTO;
2465 ctl |= B43_TXH_PHY_TXPWR;
2466
2467 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL, ctl);
2468 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_ACKCTSPHYCTL, ctl);
2469 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRPHYCTL, ctl);
2470}
2471
2199/* Set the TX-Antenna for management frames sent by firmware. */ 2472/* Set the TX-Antenna for management frames sent by firmware. */
2200static void b43_mgmtframe_txantenna(struct b43_wldev *dev, int antenna) 2473static void b43_mgmtframe_txantenna(struct b43_wldev *dev, int antenna)
2201{ 2474{
2202 u16 ant = 0; 2475 u16 ant;
2203 u16 tmp; 2476 u16 tmp;
2204 2477
2205 switch (antenna) { 2478 ant = b43_antenna_to_phyctl(antenna);
2206 case B43_ANTENNA0:
2207 ant |= B43_TXH_PHY_ANT0;
2208 break;
2209 case B43_ANTENNA1:
2210 ant |= B43_TXH_PHY_ANT1;
2211 break;
2212 case B43_ANTENNA2:
2213 ant |= B43_TXH_PHY_ANT2;
2214 break;
2215 case B43_ANTENNA3:
2216 ant |= B43_TXH_PHY_ANT3;
2217 break;
2218 case B43_ANTENNA_AUTO:
2219 ant |= B43_TXH_PHY_ANT01AUTO;
2220 break;
2221 default:
2222 B43_WARN_ON(1);
2223 }
2224
2225 /* FIXME We also need to set the other flags of the PHY control field somewhere. */
2226 2479
2227 /* For Beacons */
2228 tmp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL);
2229 tmp = (tmp & ~B43_TXH_PHY_ANT) | ant;
2230 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_BEACPHYCTL, tmp);
2231 /* For ACK/CTS */ 2480 /* For ACK/CTS */
2232 tmp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_ACKCTSPHYCTL); 2481 tmp = b43_shm_read16(dev, B43_SHM_SHARED, B43_SHM_SH_ACKCTSPHYCTL);
2233 tmp = (tmp & ~B43_TXH_PHY_ANT) | ant; 2482 tmp = (tmp & ~B43_TXH_PHY_ANT) | ant;
@@ -2589,22 +2838,199 @@ static int b43_op_tx(struct ieee80211_hw *hw,
2589 struct b43_wldev *dev = wl->current_dev; 2838 struct b43_wldev *dev = wl->current_dev;
2590 int err = -ENODEV; 2839 int err = -ENODEV;
2591 2840
2841 if (unlikely(skb->len < 2 + 2 + 6)) {
2842 /* Too short, this can't be a valid frame. */
2843 return -EINVAL;
2844 }
2845 B43_WARN_ON(skb_shinfo(skb)->nr_frags);
2846
2592 if (unlikely(!dev)) 2847 if (unlikely(!dev))
2593 goto out; 2848 goto out;
2594 if (unlikely(b43_status(dev) < B43_STAT_STARTED)) 2849 if (unlikely(b43_status(dev) < B43_STAT_STARTED))
2595 goto out; 2850 goto out;
2596 /* DMA-TX is done without a global lock. */ 2851 /* TX is done without a global lock. */
2597 err = b43_dma_tx(dev, skb, ctl); 2852 if (b43_using_pio_transfers(dev))
2853 err = b43_pio_tx(dev, skb, ctl);
2854 else
2855 err = b43_dma_tx(dev, skb, ctl);
2598out: 2856out:
2599 if (unlikely(err)) 2857 if (unlikely(err))
2600 return NETDEV_TX_BUSY; 2858 return NETDEV_TX_BUSY;
2601 return NETDEV_TX_OK; 2859 return NETDEV_TX_OK;
2602} 2860}
2603 2861
2862/* Locking: wl->irq_lock */
2863static void b43_qos_params_upload(struct b43_wldev *dev,
2864 const struct ieee80211_tx_queue_params *p,
2865 u16 shm_offset)
2866{
2867 u16 params[B43_NR_QOSPARAMS];
2868 int cw_min, cw_max, aifs, bslots, tmp;
2869 unsigned int i;
2870
2871 const u16 aCWmin = 0x0001;
2872 const u16 aCWmax = 0x03FF;
2873
2874 /* Calculate the default values for the parameters, if needed. */
2875 switch (shm_offset) {
2876 case B43_QOS_VOICE:
2877 aifs = (p->aifs == -1) ? 2 : p->aifs;
2878 cw_min = (p->cw_min == 0) ? ((aCWmin + 1) / 4 - 1) : p->cw_min;
2879 cw_max = (p->cw_max == 0) ? ((aCWmin + 1) / 2 - 1) : p->cw_max;
2880 break;
2881 case B43_QOS_VIDEO:
2882 aifs = (p->aifs == -1) ? 2 : p->aifs;
2883 cw_min = (p->cw_min == 0) ? ((aCWmin + 1) / 2 - 1) : p->cw_min;
2884 cw_max = (p->cw_max == 0) ? aCWmin : p->cw_max;
2885 break;
2886 case B43_QOS_BESTEFFORT:
2887 aifs = (p->aifs == -1) ? 3 : p->aifs;
2888 cw_min = (p->cw_min == 0) ? aCWmin : p->cw_min;
2889 cw_max = (p->cw_max == 0) ? aCWmax : p->cw_max;
2890 break;
2891 case B43_QOS_BACKGROUND:
2892 aifs = (p->aifs == -1) ? 7 : p->aifs;
2893 cw_min = (p->cw_min == 0) ? aCWmin : p->cw_min;
2894 cw_max = (p->cw_max == 0) ? aCWmax : p->cw_max;
2895 break;
2896 default:
2897 B43_WARN_ON(1);
2898 return;
2899 }
2900 if (cw_min <= 0)
2901 cw_min = aCWmin;
2902 if (cw_max <= 0)
2903 cw_max = aCWmin;
2904 bslots = b43_read16(dev, B43_MMIO_RNG) % cw_min;
2905
2906 memset(&params, 0, sizeof(params));
2907
2908 params[B43_QOSPARAM_TXOP] = p->txop * 32;
2909 params[B43_QOSPARAM_CWMIN] = cw_min;
2910 params[B43_QOSPARAM_CWMAX] = cw_max;
2911 params[B43_QOSPARAM_CWCUR] = cw_min;
2912 params[B43_QOSPARAM_AIFS] = aifs;
2913 params[B43_QOSPARAM_BSLOTS] = bslots;
2914 params[B43_QOSPARAM_REGGAP] = bslots + aifs;
2915
2916 for (i = 0; i < ARRAY_SIZE(params); i++) {
2917 if (i == B43_QOSPARAM_STATUS) {
2918 tmp = b43_shm_read16(dev, B43_SHM_SHARED,
2919 shm_offset + (i * 2));
2920 /* Mark the parameters as updated. */
2921 tmp |= 0x100;
2922 b43_shm_write16(dev, B43_SHM_SHARED,
2923 shm_offset + (i * 2),
2924 tmp);
2925 } else {
2926 b43_shm_write16(dev, B43_SHM_SHARED,
2927 shm_offset + (i * 2),
2928 params[i]);
2929 }
2930 }
2931}
2932
2933/* Update the QOS parameters in hardware. */
2934static void b43_qos_update(struct b43_wldev *dev)
2935{
2936 struct b43_wl *wl = dev->wl;
2937 struct b43_qos_params *params;
2938 unsigned long flags;
2939 unsigned int i;
2940
2941 /* Mapping of mac80211 queues to b43 SHM offsets. */
2942 static const u16 qos_shm_offsets[] = {
2943 [0] = B43_QOS_VOICE,
2944 [1] = B43_QOS_VIDEO,
2945 [2] = B43_QOS_BESTEFFORT,
2946 [3] = B43_QOS_BACKGROUND,
2947 };
2948 BUILD_BUG_ON(ARRAY_SIZE(qos_shm_offsets) != ARRAY_SIZE(wl->qos_params));
2949
2950 b43_mac_suspend(dev);
2951 spin_lock_irqsave(&wl->irq_lock, flags);
2952
2953 for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) {
2954 params = &(wl->qos_params[i]);
2955 if (params->need_hw_update) {
2956 b43_qos_params_upload(dev, &(params->p),
2957 qos_shm_offsets[i]);
2958 params->need_hw_update = 0;
2959 }
2960 }
2961
2962 spin_unlock_irqrestore(&wl->irq_lock, flags);
2963 b43_mac_enable(dev);
2964}
2965
2966static void b43_qos_clear(struct b43_wl *wl)
2967{
2968 struct b43_qos_params *params;
2969 unsigned int i;
2970
2971 for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++) {
2972 params = &(wl->qos_params[i]);
2973
2974 memset(&(params->p), 0, sizeof(params->p));
2975 params->p.aifs = -1;
2976 params->need_hw_update = 1;
2977 }
2978}
2979
2980/* Initialize the core's QOS capabilities */
2981static void b43_qos_init(struct b43_wldev *dev)
2982{
2983 struct b43_wl *wl = dev->wl;
2984 unsigned int i;
2985
2986 /* Upload the current QOS parameters. */
2987 for (i = 0; i < ARRAY_SIZE(wl->qos_params); i++)
2988 wl->qos_params[i].need_hw_update = 1;
2989 b43_qos_update(dev);
2990
2991 /* Enable QOS support. */
2992 b43_hf_write(dev, b43_hf_read(dev) | B43_HF_EDCF);
2993 b43_write16(dev, B43_MMIO_IFSCTL,
2994 b43_read16(dev, B43_MMIO_IFSCTL)
2995 | B43_MMIO_IFSCTL_USE_EDCF);
2996}
2997
2998static void b43_qos_update_work(struct work_struct *work)
2999{
3000 struct b43_wl *wl = container_of(work, struct b43_wl, qos_update_work);
3001 struct b43_wldev *dev;
3002
3003 mutex_lock(&wl->mutex);
3004 dev = wl->current_dev;
3005 if (likely(dev && (b43_status(dev) >= B43_STAT_INITIALIZED)))
3006 b43_qos_update(dev);
3007 mutex_unlock(&wl->mutex);
3008}
3009
2604static int b43_op_conf_tx(struct ieee80211_hw *hw, 3010static int b43_op_conf_tx(struct ieee80211_hw *hw,
2605 int queue, 3011 int _queue,
2606 const struct ieee80211_tx_queue_params *params) 3012 const struct ieee80211_tx_queue_params *params)
2607{ 3013{
3014 struct b43_wl *wl = hw_to_b43_wl(hw);
3015 unsigned long flags;
3016 unsigned int queue = (unsigned int)_queue;
3017 struct b43_qos_params *p;
3018
3019 if (queue >= ARRAY_SIZE(wl->qos_params)) {
3020 /* Queue not available or don't support setting
3021 * params on this queue. Return success to not
3022 * confuse mac80211. */
3023 return 0;
3024 }
3025
3026 spin_lock_irqsave(&wl->irq_lock, flags);
3027 p = &(wl->qos_params[queue]);
3028 memcpy(&(p->p), params, sizeof(p->p));
3029 p->need_hw_update = 1;
3030 spin_unlock_irqrestore(&wl->irq_lock, flags);
3031
3032 queue_work(hw->workqueue, &wl->qos_update_work);
3033
2608 return 0; 3034 return 0;
2609} 3035}
2610 3036
@@ -2620,7 +3046,10 @@ static int b43_op_get_tx_stats(struct ieee80211_hw *hw,
2620 goto out; 3046 goto out;
2621 spin_lock_irqsave(&wl->irq_lock, flags); 3047 spin_lock_irqsave(&wl->irq_lock, flags);
2622 if (likely(b43_status(dev) >= B43_STAT_STARTED)) { 3048 if (likely(b43_status(dev) >= B43_STAT_STARTED)) {
2623 b43_dma_get_tx_stats(dev, stats); 3049 if (b43_using_pio_transfers(dev))
3050 b43_pio_get_tx_stats(dev, stats);
3051 else
3052 b43_dma_get_tx_stats(dev, stats);
2624 err = 0; 3053 err = 0;
2625 } 3054 }
2626 spin_unlock_irqrestore(&wl->irq_lock, flags); 3055 spin_unlock_irqrestore(&wl->irq_lock, flags);
@@ -2641,45 +3070,6 @@ static int b43_op_get_stats(struct ieee80211_hw *hw,
2641 return 0; 3070 return 0;
2642} 3071}
2643 3072
2644static const char *phymode_to_string(unsigned int phymode)
2645{
2646 switch (phymode) {
2647 case B43_PHYMODE_A:
2648 return "A";
2649 case B43_PHYMODE_B:
2650 return "B";
2651 case B43_PHYMODE_G:
2652 return "G";
2653 default:
2654 B43_WARN_ON(1);
2655 }
2656 return "";
2657}
2658
2659static int find_wldev_for_phymode(struct b43_wl *wl,
2660 unsigned int phymode,
2661 struct b43_wldev **dev, bool * gmode)
2662{
2663 struct b43_wldev *d;
2664
2665 list_for_each_entry(d, &wl->devlist, list) {
2666 if (d->phy.possible_phymodes & phymode) {
2667 /* Ok, this device supports the PHY-mode.
2668 * Now figure out how the gmode bit has to be
2669 * set to support it. */
2670 if (phymode == B43_PHYMODE_A)
2671 *gmode = 0;
2672 else
2673 *gmode = 1;
2674 *dev = d;
2675
2676 return 0;
2677 }
2678 }
2679
2680 return -ESRCH;
2681}
2682
2683static void b43_put_phy_into_reset(struct b43_wldev *dev) 3073static void b43_put_phy_into_reset(struct b43_wldev *dev)
2684{ 3074{
2685 struct ssb_device *sdev = dev->dev; 3075 struct ssb_device *sdev = dev->dev;
@@ -2699,28 +3089,64 @@ static void b43_put_phy_into_reset(struct b43_wldev *dev)
2699 msleep(1); 3089 msleep(1);
2700} 3090}
2701 3091
3092static const char * band_to_string(enum ieee80211_band band)
3093{
3094 switch (band) {
3095 case IEEE80211_BAND_5GHZ:
3096 return "5";
3097 case IEEE80211_BAND_2GHZ:
3098 return "2.4";
3099 default:
3100 break;
3101 }
3102 B43_WARN_ON(1);
3103 return "";
3104}
3105
2702/* Expects wl->mutex locked */ 3106/* Expects wl->mutex locked */
2703static int b43_switch_phymode(struct b43_wl *wl, unsigned int new_mode) 3107static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
2704{ 3108{
2705 struct b43_wldev *up_dev; 3109 struct b43_wldev *up_dev = NULL;
2706 struct b43_wldev *down_dev; 3110 struct b43_wldev *down_dev;
3111 struct b43_wldev *d;
2707 int err; 3112 int err;
2708 bool gmode = 0; 3113 bool gmode;
2709 int prev_status; 3114 int prev_status;
2710 3115
2711 err = find_wldev_for_phymode(wl, new_mode, &up_dev, &gmode); 3116 /* Find a device and PHY which supports the band. */
2712 if (err) { 3117 list_for_each_entry(d, &wl->devlist, list) {
2713 b43err(wl, "Could not find a device for %s-PHY mode\n", 3118 switch (chan->band) {
2714 phymode_to_string(new_mode)); 3119 case IEEE80211_BAND_5GHZ:
2715 return err; 3120 if (d->phy.supports_5ghz) {
3121 up_dev = d;
3122 gmode = 0;
3123 }
3124 break;
3125 case IEEE80211_BAND_2GHZ:
3126 if (d->phy.supports_2ghz) {
3127 up_dev = d;
3128 gmode = 1;
3129 }
3130 break;
3131 default:
3132 B43_WARN_ON(1);
3133 return -EINVAL;
3134 }
3135 if (up_dev)
3136 break;
3137 }
3138 if (!up_dev) {
3139 b43err(wl, "Could not find a device for %s-GHz band operation\n",
3140 band_to_string(chan->band));
3141 return -ENODEV;
2716 } 3142 }
2717 if ((up_dev == wl->current_dev) && 3143 if ((up_dev == wl->current_dev) &&
2718 (!!wl->current_dev->phy.gmode == !!gmode)) { 3144 (!!wl->current_dev->phy.gmode == !!gmode)) {
2719 /* This device is already running. */ 3145 /* This device is already running. */
2720 return 0; 3146 return 0;
2721 } 3147 }
2722 b43dbg(wl, "Reconfiguring PHYmode to %s-PHY\n", 3148 b43dbg(wl, "Switching to %s-GHz band\n",
2723 phymode_to_string(new_mode)); 3149 band_to_string(chan->band));
2724 down_dev = wl->current_dev; 3150 down_dev = wl->current_dev;
2725 3151
2726 prev_status = b43_status(down_dev); 3152 prev_status = b43_status(down_dev);
@@ -2742,8 +3168,8 @@ static int b43_switch_phymode(struct b43_wl *wl, unsigned int new_mode)
2742 err = b43_wireless_core_init(up_dev); 3168 err = b43_wireless_core_init(up_dev);
2743 if (err) { 3169 if (err) {
2744 b43err(wl, "Fatal: Could not initialize device for " 3170 b43err(wl, "Fatal: Could not initialize device for "
2745 "newly selected %s-PHY mode\n", 3171 "selected %s-GHz band\n",
2746 phymode_to_string(new_mode)); 3172 band_to_string(chan->band));
2747 goto init_failure; 3173 goto init_failure;
2748 } 3174 }
2749 } 3175 }
@@ -2751,8 +3177,8 @@ static int b43_switch_phymode(struct b43_wl *wl, unsigned int new_mode)
2751 err = b43_wireless_core_start(up_dev); 3177 err = b43_wireless_core_start(up_dev);
2752 if (err) { 3178 if (err) {
2753 b43err(wl, "Fatal: Coult not start device for " 3179 b43err(wl, "Fatal: Coult not start device for "
2754 "newly selected %s-PHY mode\n", 3180 "selected %s-GHz band\n",
2755 phymode_to_string(new_mode)); 3181 band_to_string(chan->band));
2756 b43_wireless_core_exit(up_dev); 3182 b43_wireless_core_exit(up_dev);
2757 goto init_failure; 3183 goto init_failure;
2758 } 3184 }
@@ -2762,86 +3188,26 @@ static int b43_switch_phymode(struct b43_wl *wl, unsigned int new_mode)
2762 wl->current_dev = up_dev; 3188 wl->current_dev = up_dev;
2763 3189
2764 return 0; 3190 return 0;
2765 init_failure: 3191init_failure:
2766 /* Whoops, failed to init the new core. No core is operating now. */ 3192 /* Whoops, failed to init the new core. No core is operating now. */
2767 wl->current_dev = NULL; 3193 wl->current_dev = NULL;
2768 return err; 3194 return err;
2769} 3195}
2770 3196
2771/* Check if the use of the antenna that ieee80211 told us to
2772 * use is possible. This will fall back to DEFAULT.
2773 * "antenna_nr" is the antenna identifier we got from ieee80211. */
2774u8 b43_ieee80211_antenna_sanitize(struct b43_wldev *dev,
2775 u8 antenna_nr)
2776{
2777 u8 antenna_mask;
2778
2779 if (antenna_nr == 0) {
2780 /* Zero means "use default antenna". That's always OK. */
2781 return 0;
2782 }
2783
2784 /* Get the mask of available antennas. */
2785 if (dev->phy.gmode)
2786 antenna_mask = dev->dev->bus->sprom.ant_available_bg;
2787 else
2788 antenna_mask = dev->dev->bus->sprom.ant_available_a;
2789
2790 if (!(antenna_mask & (1 << (antenna_nr - 1)))) {
2791 /* This antenna is not available. Fall back to default. */
2792 return 0;
2793 }
2794
2795 return antenna_nr;
2796}
2797
2798static int b43_antenna_from_ieee80211(struct b43_wldev *dev, u8 antenna)
2799{
2800 antenna = b43_ieee80211_antenna_sanitize(dev, antenna);
2801 switch (antenna) {
2802 case 0: /* default/diversity */
2803 return B43_ANTENNA_DEFAULT;
2804 case 1: /* Antenna 0 */
2805 return B43_ANTENNA0;
2806 case 2: /* Antenna 1 */
2807 return B43_ANTENNA1;
2808 case 3: /* Antenna 2 */
2809 return B43_ANTENNA2;
2810 case 4: /* Antenna 3 */
2811 return B43_ANTENNA3;
2812 default:
2813 return B43_ANTENNA_DEFAULT;
2814 }
2815}
2816
2817static int b43_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) 3197static int b43_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
2818{ 3198{
2819 struct b43_wl *wl = hw_to_b43_wl(hw); 3199 struct b43_wl *wl = hw_to_b43_wl(hw);
2820 struct b43_wldev *dev; 3200 struct b43_wldev *dev;
2821 struct b43_phy *phy; 3201 struct b43_phy *phy;
2822 unsigned long flags; 3202 unsigned long flags;
2823 unsigned int new_phymode = 0xFFFF;
2824 int antenna; 3203 int antenna;
2825 int err = 0; 3204 int err = 0;
2826 u32 savedirqs; 3205 u32 savedirqs;
2827 3206
2828 mutex_lock(&wl->mutex); 3207 mutex_lock(&wl->mutex);
2829 3208
2830 /* Switch the PHY mode (if necessary). */ 3209 /* Switch the band (if necessary). This might change the active core. */
2831 switch (conf->phymode) { 3210 err = b43_switch_band(wl, conf->channel);
2832 case MODE_IEEE80211A:
2833 new_phymode = B43_PHYMODE_A;
2834 break;
2835 case MODE_IEEE80211B:
2836 new_phymode = B43_PHYMODE_B;
2837 break;
2838 case MODE_IEEE80211G:
2839 new_phymode = B43_PHYMODE_G;
2840 break;
2841 default:
2842 B43_WARN_ON(1);
2843 }
2844 err = b43_switch_phymode(wl, new_phymode);
2845 if (err) 3211 if (err)
2846 goto out_unlock_mutex; 3212 goto out_unlock_mutex;
2847 dev = wl->current_dev; 3213 dev = wl->current_dev;
@@ -2861,8 +3227,8 @@ static int b43_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
2861 3227
2862 /* Switch to the requested channel. 3228 /* Switch to the requested channel.
2863 * The firmware takes care of races with the TX handler. */ 3229 * The firmware takes care of races with the TX handler. */
2864 if (conf->channel_val != phy->channel) 3230 if (conf->channel->hw_value != phy->channel)
2865 b43_radio_selectchannel(dev, conf->channel_val, 0); 3231 b43_radio_selectchannel(dev, conf->channel->hw_value, 0);
2866 3232
2867 /* Enable/Disable ShortSlot timing. */ 3233 /* Enable/Disable ShortSlot timing. */
2868 if ((!!(conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME)) != 3234 if ((!!(conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME)) !=
@@ -3075,8 +3441,10 @@ static int b43_op_config_interface(struct ieee80211_hw *hw,
3075 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP)) { 3441 if (b43_is_mode(wl, IEEE80211_IF_TYPE_AP)) {
3076 B43_WARN_ON(conf->type != IEEE80211_IF_TYPE_AP); 3442 B43_WARN_ON(conf->type != IEEE80211_IF_TYPE_AP);
3077 b43_set_ssid(dev, conf->ssid, conf->ssid_len); 3443 b43_set_ssid(dev, conf->ssid, conf->ssid_len);
3078 if (conf->beacon) 3444 if (conf->beacon) {
3079 b43_update_templates(wl, conf->beacon); 3445 b43_update_templates(wl, conf->beacon,
3446 conf->beacon_control);
3447 }
3080 } 3448 }
3081 b43_write_mac_bssid_templates(dev); 3449 b43_write_mac_bssid_templates(dev);
3082 } 3450 }
@@ -3106,6 +3474,7 @@ static void b43_wireless_core_stop(struct b43_wldev *dev)
3106 3474
3107 b43_set_status(dev, B43_STAT_INITIALIZED); 3475 b43_set_status(dev, B43_STAT_INITIALIZED);
3108 3476
3477 b43_pio_stop(dev);
3109 mutex_unlock(&wl->mutex); 3478 mutex_unlock(&wl->mutex);
3110 /* Must unlock as it would otherwise deadlock. No races here. 3479 /* Must unlock as it would otherwise deadlock. No races here.
3111 * Cancel the possibly running self-rearming periodic work. */ 3480 * Cancel the possibly running self-rearming periodic work. */
@@ -3400,6 +3769,41 @@ static void b43_set_retry_limits(struct b43_wldev *dev,
3400 long_retry); 3769 long_retry);
3401} 3770}
3402 3771
3772static void b43_set_synth_pu_delay(struct b43_wldev *dev, bool idle)
3773{
3774 u16 pu_delay;
3775
3776 /* The time value is in microseconds. */
3777 if (dev->phy.type == B43_PHYTYPE_A)
3778 pu_delay = 3700;
3779 else
3780 pu_delay = 1050;
3781 if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS) || idle)
3782 pu_delay = 500;
3783 if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8))
3784 pu_delay = max(pu_delay, (u16)2400);
3785
3786 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_SPUWKUP, pu_delay);
3787}
3788
3789/* Set the TSF CFP pre-TargetBeaconTransmissionTime. */
3790static void b43_set_pretbtt(struct b43_wldev *dev)
3791{
3792 u16 pretbtt;
3793
3794 /* The time value is in microseconds. */
3795 if (b43_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS)) {
3796 pretbtt = 2;
3797 } else {
3798 if (dev->phy.type == B43_PHYTYPE_A)
3799 pretbtt = 120;
3800 else
3801 pretbtt = 250;
3802 }
3803 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRETBTT, pretbtt);
3804 b43_write16(dev, B43_MMIO_TSF_CFP_PRETBTT, pretbtt);
3805}
3806
3403/* Shutdown a wireless core */ 3807/* Shutdown a wireless core */
3404/* Locking: wl->mutex */ 3808/* Locking: wl->mutex */
3405static void b43_wireless_core_exit(struct b43_wldev *dev) 3809static void b43_wireless_core_exit(struct b43_wldev *dev)
@@ -3423,6 +3827,7 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
3423 b43_rng_exit(dev->wl, false); 3827 b43_rng_exit(dev->wl, false);
3424 } 3828 }
3425 b43_dma_free(dev); 3829 b43_dma_free(dev);
3830 b43_pio_free(dev);
3426 b43_chip_exit(dev); 3831 b43_chip_exit(dev);
3427 b43_radio_turn_off(dev, 1); 3832 b43_radio_turn_off(dev, 1);
3428 b43_switch_analog(dev, 0); 3833 b43_switch_analog(dev, 0);
@@ -3510,6 +3915,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
3510 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRMAXTIME, 1); 3915 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_PRMAXTIME, 1);
3511 3916
3512 b43_rate_memory_init(dev); 3917 b43_rate_memory_init(dev);
3918 b43_set_phytxctl_defaults(dev);
3513 3919
3514 /* Minimum Contention Window */ 3920 /* Minimum Contention Window */
3515 if (phy->type == B43_PHYTYPE_B) { 3921 if (phy->type == B43_PHYTYPE_B) {
@@ -3520,18 +3926,17 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
3520 /* Maximum Contention Window */ 3926 /* Maximum Contention Window */
3521 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF); 3927 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF);
3522 3928
3523 err = b43_dma_init(dev); 3929 if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) || B43_FORCE_PIO) {
3930 dev->__using_pio_transfers = 1;
3931 err = b43_pio_init(dev);
3932 } else {
3933 dev->__using_pio_transfers = 0;
3934 err = b43_dma_init(dev);
3935 }
3524 if (err) 3936 if (err)
3525 goto err_chip_exit; 3937 goto err_chip_exit;
3526 b43_qos_init(dev); 3938 b43_qos_init(dev);
3527 3939 b43_set_synth_pu_delay(dev, 1);
3528//FIXME
3529#if 1
3530 b43_write16(dev, 0x0612, 0x0050);
3531 b43_shm_write16(dev, B43_SHM_SHARED, 0x0416, 0x0050);
3532 b43_shm_write16(dev, B43_SHM_SHARED, 0x0414, 0x01F4);
3533#endif
3534
3535 b43_bluetooth_coext_enable(dev); 3940 b43_bluetooth_coext_enable(dev);
3536 3941
3537 ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */ 3942 ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */
@@ -3591,6 +3996,8 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
3591 3996
3592 spin_lock_irqsave(&wl->irq_lock, flags); 3997 spin_lock_irqsave(&wl->irq_lock, flags);
3593 b43_adjust_opmode(dev); 3998 b43_adjust_opmode(dev);
3999 b43_set_pretbtt(dev);
4000 b43_set_synth_pu_delay(dev, 0);
3594 b43_upload_card_macaddress(dev); 4001 b43_upload_card_macaddress(dev);
3595 spin_unlock_irqrestore(&wl->irq_lock, flags); 4002 spin_unlock_irqrestore(&wl->irq_lock, flags);
3596 4003
@@ -3642,6 +4049,7 @@ static int b43_op_start(struct ieee80211_hw *hw)
3642 memset(wl->mac_addr, 0, ETH_ALEN); 4049 memset(wl->mac_addr, 0, ETH_ALEN);
3643 wl->filter_flags = 0; 4050 wl->filter_flags = 0;
3644 wl->radiotap_enabled = 0; 4051 wl->radiotap_enabled = 0;
4052 b43_qos_clear(wl);
3645 4053
3646 /* First register RFkill. 4054 /* First register RFkill.
3647 * LEDs that are registered later depend on it. */ 4055 * LEDs that are registered later depend on it. */
@@ -3683,6 +4091,8 @@ static void b43_op_stop(struct ieee80211_hw *hw)
3683 struct b43_wldev *dev = wl->current_dev; 4091 struct b43_wldev *dev = wl->current_dev;
3684 4092
3685 b43_rfkill_exit(dev); 4093 b43_rfkill_exit(dev);
4094 cancel_work_sync(&(wl->qos_update_work));
4095 cancel_work_sync(&(wl->beacon_update_trigger));
3686 4096
3687 mutex_lock(&wl->mutex); 4097 mutex_lock(&wl->mutex);
3688 if (b43_status(dev) >= B43_STAT_STARTED) 4098 if (b43_status(dev) >= B43_STAT_STARTED)
@@ -3716,16 +4126,17 @@ static int b43_op_beacon_set_tim(struct ieee80211_hw *hw, int aid, int set)
3716 struct b43_wl *wl = hw_to_b43_wl(hw); 4126 struct b43_wl *wl = hw_to_b43_wl(hw);
3717 struct sk_buff *beacon; 4127 struct sk_buff *beacon;
3718 unsigned long flags; 4128 unsigned long flags;
4129 struct ieee80211_tx_control txctl;
3719 4130
3720 /* We could modify the existing beacon and set the aid bit in 4131 /* We could modify the existing beacon and set the aid bit in
3721 * the TIM field, but that would probably require resizing and 4132 * the TIM field, but that would probably require resizing and
3722 * moving of data within the beacon template. 4133 * moving of data within the beacon template.
3723 * Simply request a new beacon and let mac80211 do the hard work. */ 4134 * Simply request a new beacon and let mac80211 do the hard work. */
3724 beacon = ieee80211_beacon_get(hw, wl->vif, NULL); 4135 beacon = ieee80211_beacon_get(hw, wl->vif, &txctl);
3725 if (unlikely(!beacon)) 4136 if (unlikely(!beacon))
3726 return -ENOMEM; 4137 return -ENOMEM;
3727 spin_lock_irqsave(&wl->irq_lock, flags); 4138 spin_lock_irqsave(&wl->irq_lock, flags);
3728 b43_update_templates(wl, beacon); 4139 b43_update_templates(wl, beacon, &txctl);
3729 spin_unlock_irqrestore(&wl->irq_lock, flags); 4140 spin_unlock_irqrestore(&wl->irq_lock, flags);
3730 4141
3731 return 0; 4142 return 0;
@@ -3739,12 +4150,22 @@ static int b43_op_ibss_beacon_update(struct ieee80211_hw *hw,
3739 unsigned long flags; 4150 unsigned long flags;
3740 4151
3741 spin_lock_irqsave(&wl->irq_lock, flags); 4152 spin_lock_irqsave(&wl->irq_lock, flags);
3742 b43_update_templates(wl, beacon); 4153 b43_update_templates(wl, beacon, ctl);
3743 spin_unlock_irqrestore(&wl->irq_lock, flags); 4154 spin_unlock_irqrestore(&wl->irq_lock, flags);
3744 4155
3745 return 0; 4156 return 0;
3746} 4157}
3747 4158
4159static void b43_op_sta_notify(struct ieee80211_hw *hw,
4160 struct ieee80211_vif *vif,
4161 enum sta_notify_cmd notify_cmd,
4162 const u8 *addr)
4163{
4164 struct b43_wl *wl = hw_to_b43_wl(hw);
4165
4166 B43_WARN_ON(!vif || wl->vif != vif);
4167}
4168
3748static const struct ieee80211_ops b43_hw_ops = { 4169static const struct ieee80211_ops b43_hw_ops = {
3749 .tx = b43_op_tx, 4170 .tx = b43_op_tx,
3750 .conf_tx = b43_op_conf_tx, 4171 .conf_tx = b43_op_conf_tx,
@@ -3761,6 +4182,7 @@ static const struct ieee80211_ops b43_hw_ops = {
3761 .set_retry_limit = b43_op_set_retry_limit, 4182 .set_retry_limit = b43_op_set_retry_limit,
3762 .set_tim = b43_op_beacon_set_tim, 4183 .set_tim = b43_op_beacon_set_tim,
3763 .beacon_update = b43_op_ibss_beacon_update, 4184 .beacon_update = b43_op_ibss_beacon_update,
4185 .sta_notify = b43_op_sta_notify,
3764}; 4186};
3765 4187
3766/* Hard-reset the chip. Do not call this directly. 4188/* Hard-reset the chip. Do not call this directly.
@@ -3804,31 +4226,23 @@ static void b43_chip_reset(struct work_struct *work)
3804 b43info(wl, "Controller restarted\n"); 4226 b43info(wl, "Controller restarted\n");
3805} 4227}
3806 4228
3807static int b43_setup_modes(struct b43_wldev *dev, 4229static int b43_setup_bands(struct b43_wldev *dev,
3808 bool have_2ghz_phy, bool have_5ghz_phy) 4230 bool have_2ghz_phy, bool have_5ghz_phy)
3809{ 4231{
3810 struct ieee80211_hw *hw = dev->wl->hw; 4232 struct ieee80211_hw *hw = dev->wl->hw;
3811 struct ieee80211_hw_mode *mode;
3812 struct b43_phy *phy = &dev->phy;
3813 int err;
3814 4233
3815 /* XXX: This function will go away soon, when mac80211 4234 if (have_2ghz_phy)
3816 * band stuff is rewritten. So this is just a hack. 4235 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &b43_band_2GHz;
3817 * For now we always claim GPHY mode, as there is no 4236 if (dev->phy.type == B43_PHYTYPE_N) {
3818 * support for NPHY and APHY in the device, yet. 4237 if (have_5ghz_phy)
3819 * This assumption is OK, as any B, N or A PHY will already 4238 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &b43_band_5GHz_nphy;
3820 * have died a horrible sanity check death earlier. */ 4239 } else {
3821 4240 if (have_5ghz_phy)
3822 mode = &phy->hwmodes[0]; 4241 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &b43_band_5GHz_aphy;
3823 mode->mode = MODE_IEEE80211G; 4242 }
3824 mode->num_channels = b43_2ghz_chantable_size; 4243
3825 mode->channels = b43_2ghz_chantable; 4244 dev->phy.supports_2ghz = have_2ghz_phy;
3826 mode->num_rates = b43_g_ratetable_size; 4245 dev->phy.supports_5ghz = have_5ghz_phy;
3827 mode->rates = b43_g_ratetable;
3828 err = ieee80211_register_hwmode(hw, mode);
3829 if (err)
3830 return err;
3831 phy->possible_phymodes |= B43_PHYMODE_G;
3832 4246
3833 return 0; 4247 return 0;
3834} 4248}
@@ -3910,7 +4324,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
3910 err = b43_validate_chipaccess(dev); 4324 err = b43_validate_chipaccess(dev);
3911 if (err) 4325 if (err)
3912 goto err_powerdown; 4326 goto err_powerdown;
3913 err = b43_setup_modes(dev, have_2ghz_phy, have_5ghz_phy); 4327 err = b43_setup_bands(dev, have_2ghz_phy, have_5ghz_phy);
3914 if (err) 4328 if (err)
3915 goto err_powerdown; 4329 goto err_powerdown;
3916 4330
@@ -4040,7 +4454,7 @@ static int b43_wireless_init(struct ssb_device *dev)
4040 hw->max_signal = 100; 4454 hw->max_signal = 100;
4041 hw->max_rssi = -110; 4455 hw->max_rssi = -110;
4042 hw->max_noise = -110; 4456 hw->max_noise = -110;
4043 hw->queues = 1; /* FIXME: hardware has more queues */ 4457 hw->queues = b43_modparam_qos ? 4 : 1;
4044 SET_IEEE80211_DEV(hw, dev->dev); 4458 SET_IEEE80211_DEV(hw, dev->dev);
4045 if (is_valid_ether_addr(sprom->et1mac)) 4459 if (is_valid_ether_addr(sprom->et1mac))
4046 SET_IEEE80211_PERM_ADDR(hw, sprom->et1mac); 4460 SET_IEEE80211_PERM_ADDR(hw, sprom->et1mac);
@@ -4056,6 +4470,8 @@ static int b43_wireless_init(struct ssb_device *dev)
4056 spin_lock_init(&wl->shm_lock); 4470 spin_lock_init(&wl->shm_lock);
4057 mutex_init(&wl->mutex); 4471 mutex_init(&wl->mutex);
4058 INIT_LIST_HEAD(&wl->devlist); 4472 INIT_LIST_HEAD(&wl->devlist);
4473 INIT_WORK(&wl->qos_update_work, b43_qos_update_work);
4474 INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
4059 4475
4060 ssb_set_devtypedata(dev, wl); 4476 ssb_set_devtypedata(dev, wl);
4061 b43info(wl, "Broadcom %04X WLAN found\n", dev->bus->chip_id); 4477 b43info(wl, "Broadcom %04X WLAN found\n", dev->bus->chip_id);
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index 2d52d9de9305..5230aeca78bf 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -38,6 +38,10 @@
38/* Magic helper macro to pad structures. Ignore those above. It's magic. */ 38/* Magic helper macro to pad structures. Ignore those above. It's magic. */
39#define PAD_BYTES(nr_bytes) P4D_BYTES( __LINE__ , (nr_bytes)) 39#define PAD_BYTES(nr_bytes) P4D_BYTES( __LINE__ , (nr_bytes))
40 40
41
42extern int b43_modparam_qos;
43
44
41/* Lightweight function to convert a frequency (in Mhz) to a channel number. */ 45/* Lightweight function to convert a frequency (in Mhz) to a channel number. */
42static inline u8 b43_freq_to_channel_5ghz(int freq) 46static inline u8 b43_freq_to_channel_5ghz(int freq)
43{ 47{
@@ -95,16 +99,13 @@ u16 b43_shm_read16(struct b43_wldev *dev, u16 routing, u16 offset);
95void b43_shm_write32(struct b43_wldev *dev, u16 routing, u16 offset, u32 value); 99void b43_shm_write32(struct b43_wldev *dev, u16 routing, u16 offset, u32 value);
96void b43_shm_write16(struct b43_wldev *dev, u16 routing, u16 offset, u16 value); 100void b43_shm_write16(struct b43_wldev *dev, u16 routing, u16 offset, u16 value);
97 101
98u32 b43_hf_read(struct b43_wldev *dev); 102u64 b43_hf_read(struct b43_wldev *dev);
99void b43_hf_write(struct b43_wldev *dev, u32 value); 103void b43_hf_write(struct b43_wldev *dev, u64 value);
100 104
101void b43_dummy_transmission(struct b43_wldev *dev); 105void b43_dummy_transmission(struct b43_wldev *dev);
102 106
103void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags); 107void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags);
104 108
105void b43_mac_suspend(struct b43_wldev *dev);
106void b43_mac_enable(struct b43_wldev *dev);
107
108void b43_controller_restart(struct b43_wldev *dev, const char *reason); 109void b43_controller_restart(struct b43_wldev *dev, const char *reason);
109 110
110#define B43_PS_ENABLED (1 << 0) /* Force enable hardware power saving */ 111#define B43_PS_ENABLED (1 << 0) /* Force enable hardware power saving */
diff --git a/drivers/net/wireless/b43/nphy.c b/drivers/net/wireless/b43/nphy.c
index 705131ef4bfa..8695eb223476 100644
--- a/drivers/net/wireless/b43/nphy.c
+++ b/drivers/net/wireless/b43/nphy.c
@@ -240,7 +240,6 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
240 240
241 b43_phy_set(dev, B43_NPHY_IQFLIP, 241 b43_phy_set(dev, B43_NPHY_IQFLIP,
242 B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2); 242 B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
243 //FIXME the following condition is different in the specs.
244 if (1 /* FIXME band is 2.4GHz */) { 243 if (1 /* FIXME band is 2.4GHz */) {
245 b43_phy_set(dev, B43_NPHY_CLASSCTL, 244 b43_phy_set(dev, B43_NPHY_CLASSCTL,
246 B43_NPHY_CLASSCTL_CCKEN); 245 B43_NPHY_CLASSCTL_CCKEN);
diff --git a/drivers/net/wireless/b43/nphy.h b/drivers/net/wireless/b43/nphy.h
index 5d95118b8193..faf46b9cbf1b 100644
--- a/drivers/net/wireless/b43/nphy.h
+++ b/drivers/net/wireless/b43/nphy.h
@@ -919,6 +919,10 @@
919 919
920struct b43_wldev; 920struct b43_wldev;
921 921
922
923#ifdef CONFIG_B43_NPHY
924/* N-PHY support enabled */
925
922int b43_phy_initn(struct b43_wldev *dev); 926int b43_phy_initn(struct b43_wldev *dev);
923 927
924void b43_nphy_radio_turn_on(struct b43_wldev *dev); 928void b43_nphy_radio_turn_on(struct b43_wldev *dev);
@@ -929,4 +933,40 @@ int b43_nphy_selectchannel(struct b43_wldev *dev, u8 channel);
929void b43_nphy_xmitpower(struct b43_wldev *dev); 933void b43_nphy_xmitpower(struct b43_wldev *dev);
930void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna); 934void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna);
931 935
936
937#else /* CONFIG_B43_NPHY */
938/* N-PHY support disabled */
939
940
941static inline
942int b43_phy_initn(struct b43_wldev *dev)
943{
944 return -EOPNOTSUPP;
945}
946
947static inline
948void b43_nphy_radio_turn_on(struct b43_wldev *dev)
949{
950}
951static inline
952void b43_nphy_radio_turn_off(struct b43_wldev *dev)
953{
954}
955
956static inline
957int b43_nphy_selectchannel(struct b43_wldev *dev, u8 channel)
958{
959 return -ENOSYS;
960}
961
962static inline
963void b43_nphy_xmitpower(struct b43_wldev *dev)
964{
965}
966static inline
967void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
968{
969}
970
971#endif /* CONFIG_B43_NPHY */
932#endif /* B43_NPHY_H_ */ 972#endif /* B43_NPHY_H_ */
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 371e4a119511..b8aa16307f79 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -43,14 +43,16 @@ MODULE_DEVICE_TABLE(pcmcia, b43_pcmcia_tbl);
43#ifdef CONFIG_PM 43#ifdef CONFIG_PM
44static int b43_pcmcia_suspend(struct pcmcia_device *dev) 44static int b43_pcmcia_suspend(struct pcmcia_device *dev)
45{ 45{
46 //TODO 46 struct ssb_bus *ssb = dev->priv;
47 return 0; 47
48 return ssb_bus_suspend(ssb);
48} 49}
49 50
50static int b43_pcmcia_resume(struct pcmcia_device *dev) 51static int b43_pcmcia_resume(struct pcmcia_device *dev)
51{ 52{
52 //TODO 53 struct ssb_bus *ssb = dev->priv;
53 return 0; 54
55 return ssb_bus_resume(ssb);
54} 56}
55#else /* CONFIG_PM */ 57#else /* CONFIG_PM */
56# define b43_pcmcia_suspend NULL 58# define b43_pcmcia_suspend NULL
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
new file mode 100644
index 000000000000..fcacafb04346
--- /dev/null
+++ b/drivers/net/wireless/b43/pio.c
@@ -0,0 +1,842 @@
1/*
2
3 Broadcom B43 wireless driver
4
5 PIO data transfer
6
7 Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
22 Boston, MA 02110-1301, USA.
23
24*/
25
26#include "b43.h"
27#include "pio.h"
28#include "dma.h"
29#include "main.h"
30#include "xmit.h"
31
32#include <linux/delay.h>
33
34
35static void b43_pio_rx_work(struct work_struct *work);
36
37
38static u16 generate_cookie(struct b43_pio_txqueue *q,
39 struct b43_pio_txpacket *pack)
40{
41 u16 cookie;
42
43 /* Use the upper 4 bits of the cookie as
44 * PIO controller ID and store the packet index number
45 * in the lower 12 bits.
46 * Note that the cookie must never be 0, as this
47 * is a special value used in RX path.
48 * It can also not be 0xFFFF because that is special
49 * for multicast frames.
50 */
51 cookie = (((u16)q->index + 1) << 12);
52 cookie |= pack->index;
53
54 return cookie;
55}
56
57static
58struct b43_pio_txqueue * parse_cookie(struct b43_wldev *dev,
59 u16 cookie,
60 struct b43_pio_txpacket **pack)
61{
62 struct b43_pio *pio = &dev->pio;
63 struct b43_pio_txqueue *q = NULL;
64 unsigned int pack_index;
65
66 switch (cookie & 0xF000) {
67 case 0x1000:
68 q = pio->tx_queue_AC_BK;
69 break;
70 case 0x2000:
71 q = pio->tx_queue_AC_BE;
72 break;
73 case 0x3000:
74 q = pio->tx_queue_AC_VI;
75 break;
76 case 0x4000:
77 q = pio->tx_queue_AC_VO;
78 break;
79 case 0x5000:
80 q = pio->tx_queue_mcast;
81 break;
82 }
83 if (B43_WARN_ON(!q))
84 return NULL;
85 pack_index = (cookie & 0x0FFF);
86 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets)))
87 return NULL;
88 *pack = &q->packets[pack_index];
89
90 return q;
91}
92
93static u16 index_to_pioqueue_base(struct b43_wldev *dev,
94 unsigned int index)
95{
96 static const u16 bases[] = {
97 B43_MMIO_PIO_BASE0,
98 B43_MMIO_PIO_BASE1,
99 B43_MMIO_PIO_BASE2,
100 B43_MMIO_PIO_BASE3,
101 B43_MMIO_PIO_BASE4,
102 B43_MMIO_PIO_BASE5,
103 B43_MMIO_PIO_BASE6,
104 B43_MMIO_PIO_BASE7,
105 };
106 static const u16 bases_rev11[] = {
107 B43_MMIO_PIO11_BASE0,
108 B43_MMIO_PIO11_BASE1,
109 B43_MMIO_PIO11_BASE2,
110 B43_MMIO_PIO11_BASE3,
111 B43_MMIO_PIO11_BASE4,
112 B43_MMIO_PIO11_BASE5,
113 };
114
115 if (dev->dev->id.revision >= 11) {
116 B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11));
117 return bases_rev11[index];
118 }
119 B43_WARN_ON(index >= ARRAY_SIZE(bases));
120 return bases[index];
121}
122
123static u16 pio_txqueue_offset(struct b43_wldev *dev)
124{
125 if (dev->dev->id.revision >= 11)
126 return 0x18;
127 return 0;
128}
129
130static u16 pio_rxqueue_offset(struct b43_wldev *dev)
131{
132 if (dev->dev->id.revision >= 11)
133 return 0x38;
134 return 8;
135}
136
137static struct b43_pio_txqueue * b43_setup_pioqueue_tx(struct b43_wldev *dev,
138 unsigned int index)
139{
140 struct b43_pio_txqueue *q;
141 struct b43_pio_txpacket *p;
142 unsigned int i;
143
144 q = kzalloc(sizeof(*q), GFP_KERNEL);
145 if (!q)
146 return NULL;
147 spin_lock_init(&q->lock);
148 q->dev = dev;
149 q->rev = dev->dev->id.revision;
150 q->mmio_base = index_to_pioqueue_base(dev, index) +
151 pio_txqueue_offset(dev);
152 q->index = index;
153
154 q->free_packet_slots = B43_PIO_MAX_NR_TXPACKETS;
155 if (q->rev >= 8) {
156 q->buffer_size = 1920; //FIXME this constant is wrong.
157 } else {
158 q->buffer_size = b43_piotx_read16(q, B43_PIO_TXQBUFSIZE);
159 q->buffer_size -= 80;
160 }
161
162 INIT_LIST_HEAD(&q->packets_list);
163 for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
164 p = &(q->packets[i]);
165 INIT_LIST_HEAD(&p->list);
166 p->index = i;
167 p->queue = q;
168 list_add(&p->list, &q->packets_list);
169 }
170
171 return q;
172}
173
174static struct b43_pio_rxqueue * b43_setup_pioqueue_rx(struct b43_wldev *dev,
175 unsigned int index)
176{
177 struct b43_pio_rxqueue *q;
178
179 q = kzalloc(sizeof(*q), GFP_KERNEL);
180 if (!q)
181 return NULL;
182 spin_lock_init(&q->lock);
183 q->dev = dev;
184 q->rev = dev->dev->id.revision;
185 q->mmio_base = index_to_pioqueue_base(dev, index) +
186 pio_rxqueue_offset(dev);
187 INIT_WORK(&q->rx_work, b43_pio_rx_work);
188
189 /* Enable Direct FIFO RX (PIO) on the engine. */
190 b43_dma_direct_fifo_rx(dev, index, 1);
191
192 return q;
193}
194
195static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
196{
197 struct b43_pio_txpacket *pack;
198 unsigned int i;
199
200 for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
201 pack = &(q->packets[i]);
202 if (pack->skb) {
203 dev_kfree_skb_any(pack->skb);
204 pack->skb = NULL;
205 }
206 }
207}
208
209static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q,
210 const char *name)
211{
212 if (!q)
213 return;
214 b43_pio_cancel_tx_packets(q);
215 kfree(q);
216}
217
218static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q,
219 const char *name)
220{
221 if (!q)
222 return;
223 kfree(q);
224}
225
226#define destroy_queue_tx(pio, queue) do { \
227 b43_destroy_pioqueue_tx((pio)->queue, __stringify(queue)); \
228 (pio)->queue = NULL; \
229 } while (0)
230
231#define destroy_queue_rx(pio, queue) do { \
232 b43_destroy_pioqueue_rx((pio)->queue, __stringify(queue)); \
233 (pio)->queue = NULL; \
234 } while (0)
235
236void b43_pio_free(struct b43_wldev *dev)
237{
238 struct b43_pio *pio;
239
240 if (!b43_using_pio_transfers(dev))
241 return;
242 pio = &dev->pio;
243
244 destroy_queue_rx(pio, rx_queue);
245 destroy_queue_tx(pio, tx_queue_mcast);
246 destroy_queue_tx(pio, tx_queue_AC_VO);
247 destroy_queue_tx(pio, tx_queue_AC_VI);
248 destroy_queue_tx(pio, tx_queue_AC_BE);
249 destroy_queue_tx(pio, tx_queue_AC_BK);
250}
251
252void b43_pio_stop(struct b43_wldev *dev)
253{
254 if (!b43_using_pio_transfers(dev))
255 return;
256 cancel_work_sync(&dev->pio.rx_queue->rx_work);
257}
258
259int b43_pio_init(struct b43_wldev *dev)
260{
261 struct b43_pio *pio = &dev->pio;
262 int err = -ENOMEM;
263
264 b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL)
265 & ~B43_MACCTL_BE);
266 b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RXPADOFF, 0);
267
268 pio->tx_queue_AC_BK = b43_setup_pioqueue_tx(dev, 0);
269 if (!pio->tx_queue_AC_BK)
270 goto out;
271
272 pio->tx_queue_AC_BE = b43_setup_pioqueue_tx(dev, 1);
273 if (!pio->tx_queue_AC_BE)
274 goto err_destroy_bk;
275
276 pio->tx_queue_AC_VI = b43_setup_pioqueue_tx(dev, 2);
277 if (!pio->tx_queue_AC_VI)
278 goto err_destroy_be;
279
280 pio->tx_queue_AC_VO = b43_setup_pioqueue_tx(dev, 3);
281 if (!pio->tx_queue_AC_VO)
282 goto err_destroy_vi;
283
284 pio->tx_queue_mcast = b43_setup_pioqueue_tx(dev, 4);
285 if (!pio->tx_queue_mcast)
286 goto err_destroy_vo;
287
288 pio->rx_queue = b43_setup_pioqueue_rx(dev, 0);
289 if (!pio->rx_queue)
290 goto err_destroy_mcast;
291
292 b43dbg(dev->wl, "PIO initialized\n");
293 err = 0;
294out:
295 return err;
296
297err_destroy_mcast:
298 destroy_queue_tx(pio, tx_queue_mcast);
299err_destroy_vo:
300 destroy_queue_tx(pio, tx_queue_AC_VO);
301err_destroy_vi:
302 destroy_queue_tx(pio, tx_queue_AC_VI);
303err_destroy_be:
304 destroy_queue_tx(pio, tx_queue_AC_BE);
305err_destroy_bk:
306 destroy_queue_tx(pio, tx_queue_AC_BK);
307 return err;
308}
309
310/* Static mapping of mac80211's queues (priorities) to b43 PIO queues. */
311static struct b43_pio_txqueue * select_queue_by_priority(struct b43_wldev *dev,
312 u8 queue_prio)
313{
314 struct b43_pio_txqueue *q;
315
316 if (b43_modparam_qos) {
317 /* 0 = highest priority */
318 switch (queue_prio) {
319 default:
320 B43_WARN_ON(1);
321 /* fallthrough */
322 case 0:
323 q = dev->pio.tx_queue_AC_VO;
324 break;
325 case 1:
326 q = dev->pio.tx_queue_AC_VI;
327 break;
328 case 2:
329 q = dev->pio.tx_queue_AC_BE;
330 break;
331 case 3:
332 q = dev->pio.tx_queue_AC_BK;
333 break;
334 }
335 } else
336 q = dev->pio.tx_queue_AC_BE;
337
338 return q;
339}
340
341static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
342 u16 ctl,
343 const void *_data,
344 unsigned int data_len)
345{
346 struct b43_wldev *dev = q->dev;
347 const u8 *data = _data;
348
349 ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI;
350 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
351
352 ssb_block_write(dev->dev, data, (data_len & ~1),
353 q->mmio_base + B43_PIO_TXDATA,
354 sizeof(u16));
355 if (data_len & 1) {
356 /* Write the last byte. */
357 ctl &= ~B43_PIO_TXCTL_WRITEHI;
358 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
359 b43_piotx_write16(q, B43_PIO_TXDATA, data[data_len - 1]);
360 }
361
362 return ctl;
363}
364
365static void pio_tx_frame_2byte_queue(struct b43_pio_txpacket *pack,
366 const u8 *hdr, unsigned int hdrlen)
367{
368 struct b43_pio_txqueue *q = pack->queue;
369 const char *frame = pack->skb->data;
370 unsigned int frame_len = pack->skb->len;
371 u16 ctl;
372
373 ctl = b43_piotx_read16(q, B43_PIO_TXCTL);
374 ctl |= B43_PIO_TXCTL_FREADY;
375 ctl &= ~B43_PIO_TXCTL_EOF;
376
377 /* Transfer the header data. */
378 ctl = tx_write_2byte_queue(q, ctl, hdr, hdrlen);
379 /* Transfer the frame data. */
380 ctl = tx_write_2byte_queue(q, ctl, frame, frame_len);
381
382 ctl |= B43_PIO_TXCTL_EOF;
383 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
384}
385
386static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
387 u32 ctl,
388 const void *_data,
389 unsigned int data_len)
390{
391 struct b43_wldev *dev = q->dev;
392 const u8 *data = _data;
393
394 ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 |
395 B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31;
396 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
397
398 ssb_block_write(dev->dev, data, (data_len & ~3),
399 q->mmio_base + B43_PIO8_TXDATA,
400 sizeof(u32));
401 if (data_len & 3) {
402 u32 value = 0;
403
404 /* Write the last few bytes. */
405 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
406 B43_PIO8_TXCTL_24_31);
407 data = &(data[data_len - 1]);
408 switch (data_len & 3) {
409 case 3:
410 ctl |= B43_PIO8_TXCTL_16_23;
411 value |= (u32)(*data) << 16;
412 data--;
413 case 2:
414 ctl |= B43_PIO8_TXCTL_8_15;
415 value |= (u32)(*data) << 8;
416 data--;
417 case 1:
418 value |= (u32)(*data);
419 }
420 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
421 b43_piotx_write32(q, B43_PIO8_TXDATA, value);
422 }
423
424 return ctl;
425}
426
427static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack,
428 const u8 *hdr, unsigned int hdrlen)
429{
430 struct b43_pio_txqueue *q = pack->queue;
431 const char *frame = pack->skb->data;
432 unsigned int frame_len = pack->skb->len;
433 u32 ctl;
434
435 ctl = b43_piotx_read32(q, B43_PIO8_TXCTL);
436 ctl |= B43_PIO8_TXCTL_FREADY;
437 ctl &= ~B43_PIO8_TXCTL_EOF;
438
439 /* Transfer the header data. */
440 ctl = tx_write_4byte_queue(q, ctl, hdr, hdrlen);
441 /* Transfer the frame data. */
442 ctl = tx_write_4byte_queue(q, ctl, frame, frame_len);
443
444 ctl |= B43_PIO8_TXCTL_EOF;
445 b43_piotx_write32(q, B43_PIO_TXCTL, ctl);
446}
447
448static int pio_tx_frame(struct b43_pio_txqueue *q,
449 struct sk_buff *skb,
450 struct ieee80211_tx_control *ctl)
451{
452 struct b43_pio_txpacket *pack;
453 struct b43_txhdr txhdr;
454 u16 cookie;
455 int err;
456 unsigned int hdrlen;
457
458 B43_WARN_ON(list_empty(&q->packets_list));
459 pack = list_entry(q->packets_list.next,
460 struct b43_pio_txpacket, list);
461 memset(&pack->txstat, 0, sizeof(pack->txstat));
462 memcpy(&pack->txstat.control, ctl, sizeof(*ctl));
463
464 cookie = generate_cookie(q, pack);
465 hdrlen = b43_txhdr_size(q->dev);
466 err = b43_generate_txhdr(q->dev, (u8 *)&txhdr, skb->data,
467 skb->len, ctl, cookie);
468 if (err)
469 return err;
470
471 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
472 /* Tell the firmware about the cookie of the last
473 * mcast frame, so it can clear the more-data bit in it. */
474 b43_shm_write16(q->dev, B43_SHM_SHARED,
475 B43_SHM_SH_MCASTCOOKIE, cookie);
476 }
477
478 pack->skb = skb;
479 if (q->rev >= 8)
480 pio_tx_frame_4byte_queue(pack, (const u8 *)&txhdr, hdrlen);
481 else
482 pio_tx_frame_2byte_queue(pack, (const u8 *)&txhdr, hdrlen);
483
484 /* Remove it from the list of available packet slots.
485 * It will be put back when we receive the status report. */
486 list_del(&pack->list);
487
488 /* Update the queue statistics. */
489 q->buffer_used += roundup(skb->len + hdrlen, 4);
490 q->free_packet_slots -= 1;
491
492 return 0;
493}
494
495int b43_pio_tx(struct b43_wldev *dev,
496 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
497{
498 struct b43_pio_txqueue *q;
499 struct ieee80211_hdr *hdr;
500 unsigned long flags;
501 unsigned int hdrlen, total_len;
502 int err = 0;
503
504 hdr = (struct ieee80211_hdr *)skb->data;
505 if (ctl->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM) {
506 /* The multicast queue will be sent after the DTIM. */
507 q = dev->pio.tx_queue_mcast;
508 /* Set the frame More-Data bit. Ucode will clear it
509 * for us on the last frame. */
510 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
511 } else {
512 /* Decide by priority where to put this frame. */
513 q = select_queue_by_priority(dev, ctl->queue);
514 }
515
516 spin_lock_irqsave(&q->lock, flags);
517
518 hdrlen = b43_txhdr_size(dev);
519 total_len = roundup(skb->len + hdrlen, 4);
520
521 if (unlikely(total_len > q->buffer_size)) {
522 err = -ENOBUFS;
523 b43dbg(dev->wl, "PIO: TX packet longer than queue.\n");
524 goto out_unlock;
525 }
526 if (unlikely(q->free_packet_slots == 0)) {
527 err = -ENOBUFS;
528 b43warn(dev->wl, "PIO: TX packet overflow.\n");
529 goto out_unlock;
530 }
531 B43_WARN_ON(q->buffer_used > q->buffer_size);
532
533 if (total_len > (q->buffer_size - q->buffer_used)) {
534 /* Not enough memory on the queue. */
535 err = -EBUSY;
536 ieee80211_stop_queue(dev->wl->hw, ctl->queue);
537 q->stopped = 1;
538 goto out_unlock;
539 }
540
541 /* Assign the queue number to the ring (if not already done before)
542 * so TX status handling can use it. The mac80211-queue to b43-queue
543 * mapping is static, so we don't need to store it per frame. */
544 q->queue_prio = ctl->queue;
545
546 err = pio_tx_frame(q, skb, ctl);
547 if (unlikely(err == -ENOKEY)) {
548 /* Drop this packet, as we don't have the encryption key
549 * anymore and must not transmit it unencrypted. */
550 dev_kfree_skb_any(skb);
551 err = 0;
552 goto out_unlock;
553 }
554 if (unlikely(err)) {
555 b43err(dev->wl, "PIO transmission failure\n");
556 goto out_unlock;
557 }
558 q->nr_tx_packets++;
559
560 B43_WARN_ON(q->buffer_used > q->buffer_size);
561 if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
562 (q->free_packet_slots == 0)) {
563 /* The queue is full. */
564 ieee80211_stop_queue(dev->wl->hw, ctl->queue);
565 q->stopped = 1;
566 }
567
568out_unlock:
569 spin_unlock_irqrestore(&q->lock, flags);
570
571 return err;
572}
573
574/* Called with IRQs disabled. */
575void b43_pio_handle_txstatus(struct b43_wldev *dev,
576 const struct b43_txstatus *status)
577{
578 struct b43_pio_txqueue *q;
579 struct b43_pio_txpacket *pack = NULL;
580 unsigned int total_len;
581
582 q = parse_cookie(dev, status->cookie, &pack);
583 if (unlikely(!q))
584 return;
585 B43_WARN_ON(!pack);
586
587 spin_lock(&q->lock); /* IRQs are already disabled. */
588
589 b43_fill_txstatus_report(&(pack->txstat), status);
590
591 total_len = pack->skb->len + b43_txhdr_size(dev);
592 total_len = roundup(total_len, 4);
593 q->buffer_used -= total_len;
594 q->free_packet_slots += 1;
595
596 ieee80211_tx_status_irqsafe(dev->wl->hw, pack->skb,
597 &(pack->txstat));
598 pack->skb = NULL;
599 list_add(&pack->list, &q->packets_list);
600
601 if (q->stopped) {
602 ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
603 q->stopped = 0;
604 }
605
606 spin_unlock(&q->lock);
607}
608
609void b43_pio_get_tx_stats(struct b43_wldev *dev,
610 struct ieee80211_tx_queue_stats *stats)
611{
612 const int nr_queues = dev->wl->hw->queues;
613 struct b43_pio_txqueue *q;
614 struct ieee80211_tx_queue_stats_data *data;
615 unsigned long flags;
616 int i;
617
618 for (i = 0; i < nr_queues; i++) {
619 data = &(stats->data[i]);
620 q = select_queue_by_priority(dev, i);
621
622 spin_lock_irqsave(&q->lock, flags);
623 data->len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots;
624 data->limit = B43_PIO_MAX_NR_TXPACKETS;
625 data->count = q->nr_tx_packets;
626 spin_unlock_irqrestore(&q->lock, flags);
627 }
628}
629
630/* Returns whether we should fetch another frame. */
631static bool pio_rx_frame(struct b43_pio_rxqueue *q)
632{
633 struct b43_wldev *dev = q->dev;
634 struct b43_rxhdr_fw4 rxhdr;
635 u16 len;
636 u32 macstat;
637 unsigned int i, padding;
638 struct sk_buff *skb;
639 const char *err_msg = NULL;
640
641 memset(&rxhdr, 0, sizeof(rxhdr));
642
643 /* Check if we have data and wait for it to get ready. */
644 if (q->rev >= 8) {
645 u32 ctl;
646
647 ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
648 if (!(ctl & B43_PIO8_RXCTL_FRAMERDY))
649 return 0;
650 b43_piorx_write32(q, B43_PIO8_RXCTL,
651 B43_PIO8_RXCTL_FRAMERDY);
652 for (i = 0; i < 10; i++) {
653 ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
654 if (ctl & B43_PIO8_RXCTL_DATARDY)
655 goto data_ready;
656 udelay(10);
657 }
658 } else {
659 u16 ctl;
660
661 ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
662 if (!(ctl & B43_PIO_RXCTL_FRAMERDY))
663 return 0;
664 b43_piorx_write16(q, B43_PIO_RXCTL,
665 B43_PIO_RXCTL_FRAMERDY);
666 for (i = 0; i < 10; i++) {
667 ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
668 if (ctl & B43_PIO_RXCTL_DATARDY)
669 goto data_ready;
670 udelay(10);
671 }
672 }
673 b43dbg(q->dev->wl, "PIO RX timed out\n");
674 return 1;
675data_ready:
676
677 /* Get the preamble (RX header) */
678 if (q->rev >= 8) {
679 ssb_block_read(dev->dev, &rxhdr, sizeof(rxhdr),
680 q->mmio_base + B43_PIO8_RXDATA,
681 sizeof(u32));
682 } else {
683 ssb_block_read(dev->dev, &rxhdr, sizeof(rxhdr),
684 q->mmio_base + B43_PIO_RXDATA,
685 sizeof(u16));
686 }
687 /* Sanity checks. */
688 len = le16_to_cpu(rxhdr.frame_len);
689 if (unlikely(len > 0x700)) {
690 err_msg = "len > 0x700";
691 goto rx_error;
692 }
693 if (unlikely(len == 0)) {
694 err_msg = "len == 0";
695 goto rx_error;
696 }
697
698 macstat = le32_to_cpu(rxhdr.mac_status);
699 if (macstat & B43_RX_MAC_FCSERR) {
700 if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
701 /* Drop frames with failed FCS. */
702 err_msg = "Frame FCS error";
703 goto rx_error;
704 }
705 }
706
707 /* We always pad 2 bytes, as that's what upstream code expects
708 * due to the RX-header being 30 bytes. In case the frame is
709 * unaligned, we pad another 2 bytes. */
710 padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0;
711 skb = dev_alloc_skb(len + padding + 2);
712 if (unlikely(!skb)) {
713 err_msg = "Out of memory";
714 goto rx_error;
715 }
716 skb_reserve(skb, 2);
717 skb_put(skb, len + padding);
718 if (q->rev >= 8) {
719 ssb_block_read(dev->dev, skb->data + padding, (len & ~3),
720 q->mmio_base + B43_PIO8_RXDATA,
721 sizeof(u32));
722 if (len & 3) {
723 u32 value;
724 char *data;
725
726 /* Read the last few bytes. */
727 value = b43_piorx_read32(q, B43_PIO8_RXDATA);
728 data = &(skb->data[len + padding - 1]);
729 switch (len & 3) {
730 case 3:
731 *data = (value >> 16);
732 data--;
733 case 2:
734 *data = (value >> 8);
735 data--;
736 case 1:
737 *data = value;
738 }
739 }
740 } else {
741 ssb_block_read(dev->dev, skb->data + padding, (len & ~1),
742 q->mmio_base + B43_PIO_RXDATA,
743 sizeof(u16));
744 if (len & 1) {
745 u16 value;
746
747 /* Read the last byte. */
748 value = b43_piorx_read16(q, B43_PIO_RXDATA);
749 skb->data[len + padding - 1] = value;
750 }
751 }
752
753 b43_rx(q->dev, skb, &rxhdr);
754
755 return 1;
756
757rx_error:
758 if (err_msg)
759 b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg);
760 b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
761 return 1;
762}
763
764/* RX workqueue. We can sleep, yay! */
765static void b43_pio_rx_work(struct work_struct *work)
766{
767 struct b43_pio_rxqueue *q = container_of(work, struct b43_pio_rxqueue,
768 rx_work);
769 unsigned int budget = 50;
770 bool stop;
771
772 do {
773 spin_lock_irq(&q->lock);
774 stop = (pio_rx_frame(q) == 0);
775 spin_unlock_irq(&q->lock);
776 cond_resched();
777 if (stop)
778 break;
779 } while (--budget);
780}
781
782/* Called with IRQs disabled. */
783void b43_pio_rx(struct b43_pio_rxqueue *q)
784{
785 /* Due to latency issues we must run the RX path in
786 * a workqueue to be able to schedule between packets. */
787 queue_work(q->dev->wl->hw->workqueue, &q->rx_work);
788}
789
790static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
791{
792 unsigned long flags;
793
794 spin_lock_irqsave(&q->lock, flags);
795 if (q->rev >= 8) {
796 b43_piotx_write32(q, B43_PIO8_TXCTL,
797 b43_piotx_read32(q, B43_PIO8_TXCTL)
798 | B43_PIO8_TXCTL_SUSPREQ);
799 } else {
800 b43_piotx_write16(q, B43_PIO_TXCTL,
801 b43_piotx_read16(q, B43_PIO_TXCTL)
802 | B43_PIO_TXCTL_SUSPREQ);
803 }
804 spin_unlock_irqrestore(&q->lock, flags);
805}
806
807static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
808{
809 unsigned long flags;
810
811 spin_lock_irqsave(&q->lock, flags);
812 if (q->rev >= 8) {
813 b43_piotx_write32(q, B43_PIO8_TXCTL,
814 b43_piotx_read32(q, B43_PIO8_TXCTL)
815 & ~B43_PIO8_TXCTL_SUSPREQ);
816 } else {
817 b43_piotx_write16(q, B43_PIO_TXCTL,
818 b43_piotx_read16(q, B43_PIO_TXCTL)
819 & ~B43_PIO_TXCTL_SUSPREQ);
820 }
821 spin_unlock_irqrestore(&q->lock, flags);
822}
823
824void b43_pio_tx_suspend(struct b43_wldev *dev)
825{
826 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
827 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BK);
828 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BE);
829 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VI);
830 b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VO);
831 b43_pio_tx_suspend_queue(dev->pio.tx_queue_mcast);
832}
833
834void b43_pio_tx_resume(struct b43_wldev *dev)
835{
836 b43_pio_tx_resume_queue(dev->pio.tx_queue_mcast);
837 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VO);
838 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VI);
839 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BE);
840 b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BK);
841 b43_power_saving_ctl_bits(dev, 0);
842}
diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
new file mode 100644
index 000000000000..e2ec676cc9e4
--- /dev/null
+++ b/drivers/net/wireless/b43/pio.h
@@ -0,0 +1,220 @@
1#ifndef B43_PIO_H_
2#define B43_PIO_H_
3
4#include "b43.h"
5
6#include <linux/interrupt.h>
7#include <linux/io.h>
8#include <linux/list.h>
9#include <linux/skbuff.h>
10
11
12/*** Registers for PIO queues up to revision 7. ***/
13/* TX queue. */
14#define B43_PIO_TXCTL 0x00
15#define B43_PIO_TXCTL_WRITELO 0x0001
16#define B43_PIO_TXCTL_WRITEHI 0x0002
17#define B43_PIO_TXCTL_EOF 0x0004
18#define B43_PIO_TXCTL_FREADY 0x0008
19#define B43_PIO_TXCTL_FLUSHREQ 0x0020
20#define B43_PIO_TXCTL_FLUSHPEND 0x0040
21#define B43_PIO_TXCTL_SUSPREQ 0x0080
22#define B43_PIO_TXCTL_QSUSP 0x0100
23#define B43_PIO_TXCTL_COMMCNT 0xFC00
24#define B43_PIO_TXCTL_COMMCNT_SHIFT 10
25#define B43_PIO_TXDATA 0x02
26#define B43_PIO_TXQBUFSIZE 0x04
27/* RX queue. */
28#define B43_PIO_RXCTL 0x00
29#define B43_PIO_RXCTL_FRAMERDY 0x0001
30#define B43_PIO_RXCTL_DATARDY 0x0002
31#define B43_PIO_RXDATA 0x02
32
33/*** Registers for PIO queues revision 8 and later. ***/
34/* TX queue */
35#define B43_PIO8_TXCTL 0x00
36#define B43_PIO8_TXCTL_0_7 0x00000001
37#define B43_PIO8_TXCTL_8_15 0x00000002
38#define B43_PIO8_TXCTL_16_23 0x00000004
39#define B43_PIO8_TXCTL_24_31 0x00000008
40#define B43_PIO8_TXCTL_EOF 0x00000010
41#define B43_PIO8_TXCTL_FREADY 0x00000080
42#define B43_PIO8_TXCTL_SUSPREQ 0x00000100
43#define B43_PIO8_TXCTL_QSUSP 0x00000200
44#define B43_PIO8_TXCTL_FLUSHREQ 0x00000400
45#define B43_PIO8_TXCTL_FLUSHPEND 0x00000800
46#define B43_PIO8_TXDATA 0x04
47/* RX queue */
48#define B43_PIO8_RXCTL 0x00
49#define B43_PIO8_RXCTL_FRAMERDY 0x00000001
50#define B43_PIO8_RXCTL_DATARDY 0x00000002
51#define B43_PIO8_RXDATA 0x04
52
53
54/* The maximum number of TX-packets the HW can handle. */
55#define B43_PIO_MAX_NR_TXPACKETS 32
56
57
58#ifdef CONFIG_B43_PIO
59
60struct b43_pio_txpacket {
61 /* Pointer to the TX queue we belong to. */
62 struct b43_pio_txqueue *queue;
63 /* The TX data packet. */
64 struct sk_buff *skb;
65 /* The status meta data. */
66 struct ieee80211_tx_status txstat;
67 /* Index in the (struct b43_pio_txqueue)->packets array. */
68 u8 index;
69
70 struct list_head list;
71};
72
73struct b43_pio_txqueue {
74 struct b43_wldev *dev;
75 spinlock_t lock;
76 u16 mmio_base;
77
78 /* The device queue buffer size in bytes. */
79 u16 buffer_size;
80 /* The number of used bytes in the device queue buffer. */
81 u16 buffer_used;
82 /* The number of packets that can still get queued.
83 * This is decremented on queueing a packet and incremented
84 * after receiving the transmit status. */
85 u16 free_packet_slots;
86
87 /* True, if the mac80211 queue was stopped due to overflow at TX. */
88 bool stopped;
89 /* Our b43 queue index number */
90 u8 index;
91 /* The mac80211 QoS queue priority. */
92 u8 queue_prio;
93
94 /* Buffer for TX packet meta data. */
95 struct b43_pio_txpacket packets[B43_PIO_MAX_NR_TXPACKETS];
96 struct list_head packets_list;
97
98 /* Total number of transmitted packets. */
99 unsigned int nr_tx_packets;
100
101 /* Shortcut to the 802.11 core revision. This is to
102 * avoid horrible pointer dereferencing in the fastpaths. */
103 u8 rev;
104};
105
106struct b43_pio_rxqueue {
107 struct b43_wldev *dev;
108 spinlock_t lock;
109 u16 mmio_base;
110
111 /* Work to reduce latency issues on RX. */
112 struct work_struct rx_work;
113
114 /* Shortcut to the 802.11 core revision. This is to
115 * avoid horrible pointer dereferencing in the fastpaths. */
116 u8 rev;
117};
118
119
120static inline u16 b43_piotx_read16(struct b43_pio_txqueue *q, u16 offset)
121{
122 return b43_read16(q->dev, q->mmio_base + offset);
123}
124
125static inline u32 b43_piotx_read32(struct b43_pio_txqueue *q, u16 offset)
126{
127 return b43_read32(q->dev, q->mmio_base + offset);
128}
129
130static inline void b43_piotx_write16(struct b43_pio_txqueue *q,
131 u16 offset, u16 value)
132{
133 b43_write16(q->dev, q->mmio_base + offset, value);
134}
135
136static inline void b43_piotx_write32(struct b43_pio_txqueue *q,
137 u16 offset, u32 value)
138{
139 b43_write32(q->dev, q->mmio_base + offset, value);
140}
141
142
143static inline u16 b43_piorx_read16(struct b43_pio_rxqueue *q, u16 offset)
144{
145 return b43_read16(q->dev, q->mmio_base + offset);
146}
147
148static inline u32 b43_piorx_read32(struct b43_pio_rxqueue *q, u16 offset)
149{
150 return b43_read32(q->dev, q->mmio_base + offset);
151}
152
153static inline void b43_piorx_write16(struct b43_pio_rxqueue *q,
154 u16 offset, u16 value)
155{
156 b43_write16(q->dev, q->mmio_base + offset, value);
157}
158
159static inline void b43_piorx_write32(struct b43_pio_rxqueue *q,
160 u16 offset, u32 value)
161{
162 b43_write32(q->dev, q->mmio_base + offset, value);
163}
164
165
166int b43_pio_init(struct b43_wldev *dev);
167void b43_pio_stop(struct b43_wldev *dev);
168void b43_pio_free(struct b43_wldev *dev);
169
170int b43_pio_tx(struct b43_wldev *dev,
171 struct sk_buff *skb, struct ieee80211_tx_control *ctl);
172void b43_pio_handle_txstatus(struct b43_wldev *dev,
173 const struct b43_txstatus *status);
174void b43_pio_get_tx_stats(struct b43_wldev *dev,
175 struct ieee80211_tx_queue_stats *stats);
176void b43_pio_rx(struct b43_pio_rxqueue *q);
177
178void b43_pio_tx_suspend(struct b43_wldev *dev);
179void b43_pio_tx_resume(struct b43_wldev *dev);
180
181
182#else /* CONFIG_B43_PIO */
183
184
185static inline int b43_pio_init(struct b43_wldev *dev)
186{
187 return 0;
188}
189static inline void b43_pio_free(struct b43_wldev *dev)
190{
191}
192static inline void b43_pio_stop(struct b43_wldev *dev)
193{
194}
195static inline int b43_pio_tx(struct b43_wldev *dev,
196 struct sk_buff *skb,
197 struct ieee80211_tx_control *ctl)
198{
199 return 0;
200}
201static inline void b43_pio_handle_txstatus(struct b43_wldev *dev,
202 const struct b43_txstatus *status)
203{
204}
205static inline void b43_pio_get_tx_stats(struct b43_wldev *dev,
206 struct ieee80211_tx_queue_stats *stats)
207{
208}
209static inline void b43_pio_rx(struct b43_pio_rxqueue *q)
210{
211}
212static inline void b43_pio_tx_suspend(struct b43_wldev *dev)
213{
214}
215static inline void b43_pio_tx_resume(struct b43_wldev *dev)
216{
217}
218
219#endif /* CONFIG_B43_PIO */
220#endif /* B43_PIO_H_ */
diff --git a/drivers/net/wireless/b43/sysfs.c b/drivers/net/wireless/b43/sysfs.c
index f4faff6a7d6c..275095b8cbe7 100644
--- a/drivers/net/wireless/b43/sysfs.c
+++ b/drivers/net/wireless/b43/sysfs.c
@@ -47,29 +47,6 @@ static int get_integer(const char *buf, size_t count)
47 return ret; 47 return ret;
48} 48}
49 49
50static int get_boolean(const char *buf, size_t count)
51{
52 if (count != 0) {
53 if (buf[0] == '1')
54 return 1;
55 if (buf[0] == '0')
56 return 0;
57 if (count >= 4 && memcmp(buf, "true", 4) == 0)
58 return 1;
59 if (count >= 5 && memcmp(buf, "false", 5) == 0)
60 return 0;
61 if (count >= 3 && memcmp(buf, "yes", 3) == 0)
62 return 1;
63 if (count >= 2 && memcmp(buf, "no", 2) == 0)
64 return 0;
65 if (count >= 2 && memcmp(buf, "on", 2) == 0)
66 return 1;
67 if (count >= 3 && memcmp(buf, "off", 3) == 0)
68 return 0;
69 }
70 return -EINVAL;
71}
72
73static ssize_t b43_attr_interfmode_show(struct device *dev, 50static ssize_t b43_attr_interfmode_show(struct device *dev,
74 struct device_attribute *attr, 51 struct device_attribute *attr,
75 char *buf) 52 char *buf)
@@ -155,82 +132,18 @@ static ssize_t b43_attr_interfmode_store(struct device *dev,
155static DEVICE_ATTR(interference, 0644, 132static DEVICE_ATTR(interference, 0644,
156 b43_attr_interfmode_show, b43_attr_interfmode_store); 133 b43_attr_interfmode_show, b43_attr_interfmode_store);
157 134
158static ssize_t b43_attr_preamble_show(struct device *dev,
159 struct device_attribute *attr, char *buf)
160{
161 struct b43_wldev *wldev = dev_to_b43_wldev(dev);
162 ssize_t count;
163
164 if (!capable(CAP_NET_ADMIN))
165 return -EPERM;
166
167 mutex_lock(&wldev->wl->mutex);
168
169 if (wldev->short_preamble)
170 count =
171 snprintf(buf, PAGE_SIZE, "1 (Short Preamble enabled)\n");
172 else
173 count =
174 snprintf(buf, PAGE_SIZE, "0 (Short Preamble disabled)\n");
175
176 mutex_unlock(&wldev->wl->mutex);
177
178 return count;
179}
180
181static ssize_t b43_attr_preamble_store(struct device *dev,
182 struct device_attribute *attr,
183 const char *buf, size_t count)
184{
185 struct b43_wldev *wldev = dev_to_b43_wldev(dev);
186 unsigned long flags;
187 int value;
188
189 if (!capable(CAP_NET_ADMIN))
190 return -EPERM;
191
192 value = get_boolean(buf, count);
193 if (value < 0)
194 return value;
195 mutex_lock(&wldev->wl->mutex);
196 spin_lock_irqsave(&wldev->wl->irq_lock, flags);
197
198 wldev->short_preamble = !!value;
199
200 spin_unlock_irqrestore(&wldev->wl->irq_lock, flags);
201 mutex_unlock(&wldev->wl->mutex);
202
203 return count;
204}
205
206static DEVICE_ATTR(shortpreamble, 0644,
207 b43_attr_preamble_show, b43_attr_preamble_store);
208
209int b43_sysfs_register(struct b43_wldev *wldev) 135int b43_sysfs_register(struct b43_wldev *wldev)
210{ 136{
211 struct device *dev = wldev->dev->dev; 137 struct device *dev = wldev->dev->dev;
212 int err;
213 138
214 B43_WARN_ON(b43_status(wldev) != B43_STAT_INITIALIZED); 139 B43_WARN_ON(b43_status(wldev) != B43_STAT_INITIALIZED);
215 140
216 err = device_create_file(dev, &dev_attr_interference); 141 return device_create_file(dev, &dev_attr_interference);
217 if (err)
218 goto out;
219 err = device_create_file(dev, &dev_attr_shortpreamble);
220 if (err)
221 goto err_remove_interfmode;
222
223 out:
224 return err;
225 err_remove_interfmode:
226 device_remove_file(dev, &dev_attr_interference);
227 goto out;
228} 142}
229 143
230void b43_sysfs_unregister(struct b43_wldev *wldev) 144void b43_sysfs_unregister(struct b43_wldev *wldev)
231{ 145{
232 struct device *dev = wldev->dev->dev; 146 struct device *dev = wldev->dev->dev;
233 147
234 device_remove_file(dev, &dev_attr_shortpreamble);
235 device_remove_file(dev, &dev_attr_interference); 148 device_remove_file(dev, &dev_attr_interference);
236} 149}
diff --git a/drivers/net/wireless/b43/wa.c b/drivers/net/wireless/b43/wa.c
index e632125cb772..daa94211f838 100644
--- a/drivers/net/wireless/b43/wa.c
+++ b/drivers/net/wireless/b43/wa.c
@@ -204,42 +204,43 @@ static void b43_wa_rt(struct b43_wldev *dev) /* Rotor table */
204 b43_ofdmtab_write32(dev, B43_OFDMTAB_ROTOR, i, b43_tab_rotor[i]); 204 b43_ofdmtab_write32(dev, B43_OFDMTAB_ROTOR, i, b43_tab_rotor[i]);
205} 205}
206 206
207static void b43_write_null_nst(struct b43_wldev *dev)
208{
209 int i;
210
211 for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++)
212 b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE, i, 0);
213}
214
215static void b43_write_nst(struct b43_wldev *dev, const u16 *nst)
216{
217 int i;
218
219 for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++)
220 b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE, i, nst[i]);
221}
222
207static void b43_wa_nst(struct b43_wldev *dev) /* Noise scale table */ 223static void b43_wa_nst(struct b43_wldev *dev) /* Noise scale table */
208{ 224{
209 struct b43_phy *phy = &dev->phy; 225 struct b43_phy *phy = &dev->phy;
210 int i;
211 226
212 if (phy->type == B43_PHYTYPE_A) { 227 if (phy->type == B43_PHYTYPE_A) {
213 if (phy->rev <= 1) 228 if (phy->rev <= 1)
214 for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++) 229 b43_write_null_nst(dev);
215 b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE,
216 i, 0);
217 else if (phy->rev == 2) 230 else if (phy->rev == 2)
218 for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++) 231 b43_write_nst(dev, b43_tab_noisescalea2);
219 b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE,
220 i, b43_tab_noisescalea2[i]);
221 else if (phy->rev == 3) 232 else if (phy->rev == 3)
222 for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++) 233 b43_write_nst(dev, b43_tab_noisescalea3);
223 b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE,
224 i, b43_tab_noisescalea3[i]);
225 else 234 else
226 for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++) 235 b43_write_nst(dev, b43_tab_noisescaleg3);
227 b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE,
228 i, b43_tab_noisescaleg3[i]);
229 } else { 236 } else {
230 if (phy->rev >= 6) { 237 if (phy->rev >= 6) {
231 if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN) 238 if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN)
232 for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++) 239 b43_write_nst(dev, b43_tab_noisescaleg3);
233 b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE,
234 i, b43_tab_noisescaleg3[i]);
235 else 240 else
236 for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++) 241 b43_write_nst(dev, b43_tab_noisescaleg2);
237 b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE,
238 i, b43_tab_noisescaleg2[i]);
239 } else { 242 } else {
240 for (i = 0; i < B43_TAB_NOISESCALE_SIZE; i++) 243 b43_write_nst(dev, b43_tab_noisescaleg1);
241 b43_ofdmtab_write16(dev, B43_OFDMTAB_NOISESCALE,
242 i, b43_tab_noisescaleg1[i]);
243 } 244 }
244 } 245 }
245} 246}
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 7caa26eb4105..19aefbfb2c93 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -30,48 +30,51 @@
30#include "xmit.h" 30#include "xmit.h"
31#include "phy.h" 31#include "phy.h"
32#include "dma.h" 32#include "dma.h"
33#include "pio.h"
33 34
34 35
35/* Extract the bitrate out of a CCK PLCP header. */ 36/* Extract the bitrate index out of a CCK PLCP header. */
36static u8 b43_plcp_get_bitrate_cck(struct b43_plcp_hdr6 *plcp) 37static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp)
37{ 38{
38 switch (plcp->raw[0]) { 39 switch (plcp->raw[0]) {
39 case 0x0A: 40 case 0x0A:
40 return B43_CCK_RATE_1MB; 41 return 0;
41 case 0x14: 42 case 0x14:
42 return B43_CCK_RATE_2MB; 43 return 1;
43 case 0x37: 44 case 0x37:
44 return B43_CCK_RATE_5MB; 45 return 2;
45 case 0x6E: 46 case 0x6E:
46 return B43_CCK_RATE_11MB; 47 return 3;
47 } 48 }
48 B43_WARN_ON(1); 49 B43_WARN_ON(1);
49 return 0; 50 return -1;
50} 51}
51 52
52/* Extract the bitrate out of an OFDM PLCP header. */ 53/* Extract the bitrate index out of an OFDM PLCP header. */
53static u8 b43_plcp_get_bitrate_ofdm(struct b43_plcp_hdr6 *plcp) 54static u8 b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy)
54{ 55{
56 int base = aphy ? 0 : 4;
57
55 switch (plcp->raw[0] & 0xF) { 58 switch (plcp->raw[0] & 0xF) {
56 case 0xB: 59 case 0xB:
57 return B43_OFDM_RATE_6MB; 60 return base + 0;
58 case 0xF: 61 case 0xF:
59 return B43_OFDM_RATE_9MB; 62 return base + 1;
60 case 0xA: 63 case 0xA:
61 return B43_OFDM_RATE_12MB; 64 return base + 2;
62 case 0xE: 65 case 0xE:
63 return B43_OFDM_RATE_18MB; 66 return base + 3;
64 case 0x9: 67 case 0x9:
65 return B43_OFDM_RATE_24MB; 68 return base + 4;
66 case 0xD: 69 case 0xD:
67 return B43_OFDM_RATE_36MB; 70 return base + 5;
68 case 0x8: 71 case 0x8:
69 return B43_OFDM_RATE_48MB; 72 return base + 6;
70 case 0xC: 73 case 0xC:
71 return B43_OFDM_RATE_54MB; 74 return base + 7;
72 } 75 }
73 B43_WARN_ON(1); 76 B43_WARN_ON(1);
74 return 0; 77 return -1;
75} 78}
76 79
77u8 b43_plcp_get_ratecode_cck(const u8 bitrate) 80u8 b43_plcp_get_ratecode_cck(const u8 bitrate)
@@ -191,6 +194,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
191 (const struct ieee80211_hdr *)fragment_data; 194 (const struct ieee80211_hdr *)fragment_data;
192 int use_encryption = (!(txctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)); 195 int use_encryption = (!(txctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT));
193 u16 fctl = le16_to_cpu(wlhdr->frame_control); 196 u16 fctl = le16_to_cpu(wlhdr->frame_control);
197 struct ieee80211_rate *fbrate;
194 u8 rate, rate_fb; 198 u8 rate, rate_fb;
195 int rate_ofdm, rate_fb_ofdm; 199 int rate_ofdm, rate_fb_ofdm;
196 unsigned int plcp_fragment_len; 200 unsigned int plcp_fragment_len;
@@ -200,9 +204,11 @@ int b43_generate_txhdr(struct b43_wldev *dev,
200 204
201 memset(txhdr, 0, sizeof(*txhdr)); 205 memset(txhdr, 0, sizeof(*txhdr));
202 206
203 rate = txctl->tx_rate; 207 WARN_ON(!txctl->tx_rate);
208 rate = txctl->tx_rate ? txctl->tx_rate->hw_value : B43_CCK_RATE_1MB;
204 rate_ofdm = b43_is_ofdm_rate(rate); 209 rate_ofdm = b43_is_ofdm_rate(rate);
205 rate_fb = (txctl->alt_retry_rate == -1) ? rate : txctl->alt_retry_rate; 210 fbrate = txctl->alt_retry_rate ? : txctl->tx_rate;
211 rate_fb = fbrate->hw_value;
206 rate_fb_ofdm = b43_is_ofdm_rate(rate_fb); 212 rate_fb_ofdm = b43_is_ofdm_rate(rate_fb);
207 213
208 if (rate_ofdm) 214 if (rate_ofdm)
@@ -221,11 +227,10 @@ int b43_generate_txhdr(struct b43_wldev *dev,
221 * use the original dur_id field. */ 227 * use the original dur_id field. */
222 txhdr->dur_fb = wlhdr->duration_id; 228 txhdr->dur_fb = wlhdr->duration_id;
223 } else { 229 } else {
224 int fbrate_base100kbps = B43_RATE_TO_BASE100KBPS(rate_fb);
225 txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw, 230 txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw,
226 txctl->vif, 231 txctl->vif,
227 fragment_len, 232 fragment_len,
228 fbrate_base100kbps); 233 fbrate);
229 } 234 }
230 235
231 plcp_fragment_len = fragment_len + FCS_LEN; 236 plcp_fragment_len = fragment_len + FCS_LEN;
@@ -287,7 +292,7 @@ int b43_generate_txhdr(struct b43_wldev *dev,
287 phy_ctl |= B43_TXH_PHY_ENC_OFDM; 292 phy_ctl |= B43_TXH_PHY_ENC_OFDM;
288 else 293 else
289 phy_ctl |= B43_TXH_PHY_ENC_CCK; 294 phy_ctl |= B43_TXH_PHY_ENC_CCK;
290 if (dev->short_preamble) 295 if (txctl->flags & IEEE80211_TXCTL_SHORT_PREAMBLE)
291 phy_ctl |= B43_TXH_PHY_SHORTPRMBL; 296 phy_ctl |= B43_TXH_PHY_SHORTPRMBL;
292 297
293 switch (b43_ieee80211_antenna_sanitize(dev, txctl->antenna_sel_tx)) { 298 switch (b43_ieee80211_antenna_sanitize(dev, txctl->antenna_sel_tx)) {
@@ -332,7 +337,8 @@ int b43_generate_txhdr(struct b43_wldev *dev,
332 int rts_rate_ofdm, rts_rate_fb_ofdm; 337 int rts_rate_ofdm, rts_rate_fb_ofdm;
333 struct b43_plcp_hdr6 *plcp; 338 struct b43_plcp_hdr6 *plcp;
334 339
335 rts_rate = txctl->rts_cts_rate; 340 WARN_ON(!txctl->rts_cts_rate);
341 rts_rate = txctl->rts_cts_rate ? txctl->rts_cts_rate->hw_value : B43_CCK_RATE_1MB;
336 rts_rate_ofdm = b43_is_ofdm_rate(rts_rate); 342 rts_rate_ofdm = b43_is_ofdm_rate(rts_rate);
337 rts_rate_fb = b43_calc_fallback_rate(rts_rate); 343 rts_rate_fb = b43_calc_fallback_rate(rts_rate);
338 rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb); 344 rts_rate_fb_ofdm = b43_is_ofdm_rate(rts_rate_fb);
@@ -506,7 +512,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
506 u16 phystat0, phystat3, chanstat, mactime; 512 u16 phystat0, phystat3, chanstat, mactime;
507 u32 macstat; 513 u32 macstat;
508 u16 chanid; 514 u16 chanid;
509 u8 jssi; 515 u16 phytype;
510 int padding; 516 int padding;
511 517
512 memset(&status, 0, sizeof(status)); 518 memset(&status, 0, sizeof(status));
@@ -514,10 +520,10 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
514 /* Get metadata about the frame from the header. */ 520 /* Get metadata about the frame from the header. */
515 phystat0 = le16_to_cpu(rxhdr->phy_status0); 521 phystat0 = le16_to_cpu(rxhdr->phy_status0);
516 phystat3 = le16_to_cpu(rxhdr->phy_status3); 522 phystat3 = le16_to_cpu(rxhdr->phy_status3);
517 jssi = rxhdr->jssi;
518 macstat = le32_to_cpu(rxhdr->mac_status); 523 macstat = le32_to_cpu(rxhdr->mac_status);
519 mactime = le16_to_cpu(rxhdr->mac_time); 524 mactime = le16_to_cpu(rxhdr->mac_time);
520 chanstat = le16_to_cpu(rxhdr->channel); 525 chanstat = le16_to_cpu(rxhdr->channel);
526 phytype = chanstat & B43_RX_CHAN_PHYTYPE;
521 527
522 if (macstat & B43_RX_MAC_FCSERR) 528 if (macstat & B43_RX_MAC_FCSERR)
523 dev->wl->ieee_stats.dot11FCSErrorCount++; 529 dev->wl->ieee_stats.dot11FCSErrorCount++;
@@ -567,26 +573,40 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
567 } 573 }
568 } 574 }
569 575
570 status.ssi = b43_rssi_postprocess(dev, jssi, 576 /* Link quality statistics */
571 (phystat0 & B43_RX_PHYST0_OFDM),
572 (phystat0 & B43_RX_PHYST0_GAINCTL),
573 (phystat3 & B43_RX_PHYST3_TRSTATE));
574 status.noise = dev->stats.link_noise; 577 status.noise = dev->stats.link_noise;
575 /* the next line looks wrong, but is what mac80211 wants */ 578 if ((chanstat & B43_RX_CHAN_PHYTYPE) == B43_PHYTYPE_N) {
576 status.signal = (jssi * 100) / B43_RX_MAX_SSI; 579// s8 rssi = max(rxhdr->power0, rxhdr->power1);
580 //TODO: Find out what the rssi value is (dBm or percentage?)
581 // and also find out what the maximum possible value is.
582 // Fill status.ssi and status.signal fields.
583 } else {
584 status.ssi = b43_rssi_postprocess(dev, rxhdr->jssi,
585 (phystat0 & B43_RX_PHYST0_OFDM),
586 (phystat0 & B43_RX_PHYST0_GAINCTL),
587 (phystat3 & B43_RX_PHYST3_TRSTATE));
588 /* the next line looks wrong, but is what mac80211 wants */
589 status.signal = (rxhdr->jssi * 100) / B43_RX_MAX_SSI;
590 }
591
577 if (phystat0 & B43_RX_PHYST0_OFDM) 592 if (phystat0 & B43_RX_PHYST0_OFDM)
578 status.rate = b43_plcp_get_bitrate_ofdm(plcp); 593 status.rate_idx = b43_plcp_get_bitrate_idx_ofdm(plcp,
594 phytype == B43_PHYTYPE_A);
579 else 595 else
580 status.rate = b43_plcp_get_bitrate_cck(plcp); 596 status.rate_idx = b43_plcp_get_bitrate_idx_cck(plcp);
581 status.antenna = !!(phystat0 & B43_RX_PHYST0_ANT); 597 status.antenna = !!(phystat0 & B43_RX_PHYST0_ANT);
582 598
583 /* 599 /*
584 * If monitors are present get full 64-bit timestamp. This 600 * All frames on monitor interfaces and beacons always need a full
585 * code assumes we get to process the packet within 16 bits 601 * 64-bit timestamp. Monitor interfaces need it for diagnostic
586 * of timestamp, i.e. about 65 milliseconds after the PHY 602 * purposes and beacons for IBSS merging.
587 * received the first symbol. 603 * This code assumes we get to process the packet within 16 bits
604 * of timestamp, i.e. about 65 milliseconds after the PHY received
605 * the first symbol.
588 */ 606 */
589 if (dev->wl->radiotap_enabled) { 607 if (((fctl & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE))
608 == (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON)) ||
609 dev->wl->radiotap_enabled) {
590 u16 low_mactime_now; 610 u16 low_mactime_now;
591 611
592 b43_tsf_read(dev, &status.mactime); 612 b43_tsf_read(dev, &status.mactime);
@@ -601,29 +621,28 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
601 chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT; 621 chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
602 switch (chanstat & B43_RX_CHAN_PHYTYPE) { 622 switch (chanstat & B43_RX_CHAN_PHYTYPE) {
603 case B43_PHYTYPE_A: 623 case B43_PHYTYPE_A:
604 status.phymode = MODE_IEEE80211A; 624 status.band = IEEE80211_BAND_5GHZ;
605 B43_WARN_ON(1); 625 B43_WARN_ON(1);
606 /* FIXME: We don't really know which value the "chanid" contains. 626 /* FIXME: We don't really know which value the "chanid" contains.
607 * So the following assignment might be wrong. */ 627 * So the following assignment might be wrong. */
608 status.channel = chanid; 628 status.freq = b43_channel_to_freq_5ghz(chanid);
609 status.freq = b43_channel_to_freq_5ghz(status.channel);
610 break; 629 break;
611 case B43_PHYTYPE_G: 630 case B43_PHYTYPE_G:
612 status.phymode = MODE_IEEE80211G; 631 status.band = IEEE80211_BAND_2GHZ;
613 /* chanid is the radio channel cookie value as used 632 /* chanid is the radio channel cookie value as used
614 * to tune the radio. */ 633 * to tune the radio. */
615 status.freq = chanid + 2400; 634 status.freq = chanid + 2400;
616 status.channel = b43_freq_to_channel_2ghz(status.freq);
617 break; 635 break;
618 case B43_PHYTYPE_N: 636 case B43_PHYTYPE_N:
619 status.phymode = 0xDEAD /*FIXME MODE_IEEE80211N*/;
620 /* chanid is the SHM channel cookie. Which is the plain 637 /* chanid is the SHM channel cookie. Which is the plain
621 * channel number in b43. */ 638 * channel number in b43. */
622 status.channel = chanid; 639 if (chanstat & B43_RX_CHAN_5GHZ) {
623 if (chanstat & B43_RX_CHAN_5GHZ) 640 status.band = IEEE80211_BAND_5GHZ;
624 status.freq = b43_freq_to_channel_5ghz(status.freq); 641 status.freq = b43_freq_to_channel_5ghz(chanid);
625 else 642 } else {
626 status.freq = b43_freq_to_channel_2ghz(status.freq); 643 status.band = IEEE80211_BAND_2GHZ;
644 status.freq = b43_freq_to_channel_2ghz(chanid);
645 }
627 break; 646 break;
628 default: 647 default:
629 B43_WARN_ON(1); 648 B43_WARN_ON(1);
@@ -657,67 +676,54 @@ void b43_handle_txstatus(struct b43_wldev *dev,
657 dev->wl->ieee_stats.dot11RTSSuccessCount++; 676 dev->wl->ieee_stats.dot11RTSSuccessCount++;
658 } 677 }
659 678
660 b43_dma_handle_txstatus(dev, status); 679 if (b43_using_pio_transfers(dev))
680 b43_pio_handle_txstatus(dev, status);
681 else
682 b43_dma_handle_txstatus(dev, status);
661} 683}
662 684
663/* Handle TX status report as received through DMA/PIO queues */ 685/* Fill out the mac80211 TXstatus report based on the b43-specific
664void b43_handle_hwtxstatus(struct b43_wldev *dev, 686 * txstatus report data. This returns a boolean whether the frame was
665 const struct b43_hwtxstatus *hw) 687 * successfully transmitted. */
688bool b43_fill_txstatus_report(struct ieee80211_tx_status *report,
689 const struct b43_txstatus *status)
666{ 690{
667 struct b43_txstatus status; 691 bool frame_success = 1;
668 u8 tmp; 692
669 693 if (status->acked) {
670 status.cookie = le16_to_cpu(hw->cookie); 694 /* The frame was ACKed. */
671 status.seq = le16_to_cpu(hw->seq); 695 report->flags |= IEEE80211_TX_STATUS_ACK;
672 status.phy_stat = hw->phy_stat; 696 } else {
673 tmp = hw->count; 697 /* The frame was not ACKed... */
674 status.frame_count = (tmp >> 4); 698 if (!(report->control.flags & IEEE80211_TXCTL_NO_ACK)) {
675 status.rts_count = (tmp & 0x0F); 699 /* ...but we expected an ACK. */
676 tmp = hw->flags; 700 frame_success = 0;
677 status.supp_reason = ((tmp & 0x1C) >> 2); 701 report->excessive_retries = 1;
678 status.pm_indicated = !!(tmp & 0x80); 702 }
679 status.intermediate = !!(tmp & 0x40); 703 }
680 status.for_ampdu = !!(tmp & 0x20); 704 if (status->frame_count == 0) {
681 status.acked = !!(tmp & 0x02); 705 /* The frame was not transmitted at all. */
682 706 report->retry_count = 0;
683 b43_handle_txstatus(dev, &status); 707 } else
708 report->retry_count = status->frame_count - 1;
709
710 return frame_success;
684} 711}
685 712
686/* Stop any TX operation on the device (suspend the hardware queues) */ 713/* Stop any TX operation on the device (suspend the hardware queues) */
687void b43_tx_suspend(struct b43_wldev *dev) 714void b43_tx_suspend(struct b43_wldev *dev)
688{ 715{
689 b43_dma_tx_suspend(dev); 716 if (b43_using_pio_transfers(dev))
717 b43_pio_tx_suspend(dev);
718 else
719 b43_dma_tx_suspend(dev);
690} 720}
691 721
692/* Resume any TX operation on the device (resume the hardware queues) */ 722/* Resume any TX operation on the device (resume the hardware queues) */
693void b43_tx_resume(struct b43_wldev *dev) 723void b43_tx_resume(struct b43_wldev *dev)
694{ 724{
695 b43_dma_tx_resume(dev); 725 if (b43_using_pio_transfers(dev))
696} 726 b43_pio_tx_resume(dev);
697 727 else
698#if 0 728 b43_dma_tx_resume(dev);
699static void upload_qos_parms(struct b43_wldev *dev,
700 const u16 * parms, u16 offset)
701{
702 int i;
703
704 for (i = 0; i < B43_NR_QOSPARMS; i++) {
705 b43_shm_write16(dev, B43_SHM_SHARED,
706 offset + (i * 2), parms[i]);
707 }
708}
709#endif
710
711/* Initialize the QoS parameters */
712void b43_qos_init(struct b43_wldev *dev)
713{
714 /* FIXME: This function must probably be called from the mac80211
715 * config callback. */
716 return;
717
718 b43_hf_write(dev, b43_hf_read(dev) | B43_HF_EDCF);
719 //FIXME kill magic
720 b43_write16(dev, 0x688, b43_read16(dev, 0x688) | 0x4);
721
722 /*TODO: We might need some stack support here to get the values. */
723} 729}
diff --git a/drivers/net/wireless/b43/xmit.h b/drivers/net/wireless/b43/xmit.h
index 41765039552b..b05f44e0d626 100644
--- a/drivers/net/wireless/b43/xmit.h
+++ b/drivers/net/wireless/b43/xmit.h
@@ -207,25 +207,24 @@ enum {
207 B43_TXST_SUPP_ABNACK, /* Afterburner NACK */ 207 B43_TXST_SUPP_ABNACK, /* Afterburner NACK */
208}; 208};
209 209
210/* Transmit Status as received through DMA/PIO on old chips */
211struct b43_hwtxstatus {
212 PAD_BYTES(4);
213 __le16 cookie;
214 u8 flags;
215 u8 count;
216 PAD_BYTES(2);
217 __le16 seq;
218 u8 phy_stat;
219 PAD_BYTES(1);
220} __attribute__ ((__packed__));
221
222/* Receive header for v4 firmware. */ 210/* Receive header for v4 firmware. */
223struct b43_rxhdr_fw4 { 211struct b43_rxhdr_fw4 {
224 __le16 frame_len; /* Frame length */ 212 __le16 frame_len; /* Frame length */
225 PAD_BYTES(2); 213 PAD_BYTES(2);
226 __le16 phy_status0; /* PHY RX Status 0 */ 214 __le16 phy_status0; /* PHY RX Status 0 */
227 __u8 jssi; /* PHY RX Status 1: JSSI */ 215 union {
228 __u8 sig_qual; /* PHY RX Status 1: Signal Quality */ 216 /* RSSI for A/B/G-PHYs */
217 struct {
218 __u8 jssi; /* PHY RX Status 1: JSSI */
219 __u8 sig_qual; /* PHY RX Status 1: Signal Quality */
220 } __attribute__ ((__packed__));
221
222 /* RSSI for N-PHYs */
223 struct {
224 __s8 power0; /* PHY RX Status 1: Power 0 */
225 __s8 power1; /* PHY RX Status 1: Power 1 */
226 } __attribute__ ((__packed__));
227 } __attribute__ ((__packed__));
229 __le16 phy_status2; /* PHY RX Status 2 */ 228 __le16 phy_status2; /* PHY RX Status 2 */
230 __le16 phy_status3; /* PHY RX Status 3 */ 229 __le16 phy_status3; /* PHY RX Status 3 */
231 __le32 mac_status; /* MAC RX status */ 230 __le32 mac_status; /* MAC RX status */
@@ -295,25 +294,12 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr);
295 294
296void b43_handle_txstatus(struct b43_wldev *dev, 295void b43_handle_txstatus(struct b43_wldev *dev,
297 const struct b43_txstatus *status); 296 const struct b43_txstatus *status);
298 297bool b43_fill_txstatus_report(struct ieee80211_tx_status *report,
299void b43_handle_hwtxstatus(struct b43_wldev *dev, 298 const struct b43_txstatus *status);
300 const struct b43_hwtxstatus *hw);
301 299
302void b43_tx_suspend(struct b43_wldev *dev); 300void b43_tx_suspend(struct b43_wldev *dev);
303void b43_tx_resume(struct b43_wldev *dev); 301void b43_tx_resume(struct b43_wldev *dev);
304 302
305#define B43_NR_QOSPARMS 22
306enum {
307 B43_QOSPARM_TXOP = 0,
308 B43_QOSPARM_CWMIN,
309 B43_QOSPARM_CWMAX,
310 B43_QOSPARM_CWCUR,
311 B43_QOSPARM_AIFS,
312 B43_QOSPARM_BSLOTS,
313 B43_QOSPARM_REGGAP,
314 B43_QOSPARM_STATUS,
315};
316void b43_qos_init(struct b43_wldev *dev);
317 303
318/* Helper functions for converting the key-table index from "firmware-format" 304/* Helper functions for converting the key-table index from "firmware-format"
319 * to "raw-format" and back. The firmware API changed for this at some revision. 305 * to "raw-format" and back. The firmware API changed for this at some revision.
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 93d45b71799a..ded3cd31b3df 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -97,6 +97,7 @@
97#define B43legacy_MMIO_RADIO_HWENABLED_LO 0x49A 97#define B43legacy_MMIO_RADIO_HWENABLED_LO 0x49A
98#define B43legacy_MMIO_GPIO_CONTROL 0x49C 98#define B43legacy_MMIO_GPIO_CONTROL 0x49C
99#define B43legacy_MMIO_GPIO_MASK 0x49E 99#define B43legacy_MMIO_GPIO_MASK 0x49E
100#define B43legacy_MMIO_TSF_CFP_PRETBTT 0x612
100#define B43legacy_MMIO_TSF_0 0x632 /* core rev < 3 only */ 101#define B43legacy_MMIO_TSF_0 0x632 /* core rev < 3 only */
101#define B43legacy_MMIO_TSF_1 0x634 /* core rev < 3 only */ 102#define B43legacy_MMIO_TSF_1 0x634 /* core rev < 3 only */
102#define B43legacy_MMIO_TSF_2 0x636 /* core rev < 3 only */ 103#define B43legacy_MMIO_TSF_2 0x636 /* core rev < 3 only */
@@ -130,19 +131,27 @@
130#define B43legacy_SHM_SH_HOSTFHI 0x0060 /* Hostflags ucode opts (high) */ 131#define B43legacy_SHM_SH_HOSTFHI 0x0060 /* Hostflags ucode opts (high) */
131/* SHM_SHARED crypto engine */ 132/* SHM_SHARED crypto engine */
132#define B43legacy_SHM_SH_KEYIDXBLOCK 0x05D4 /* Key index/algorithm block */ 133#define B43legacy_SHM_SH_KEYIDXBLOCK 0x05D4 /* Key index/algorithm block */
133/* SHM_SHARED beacon variables */ 134/* SHM_SHARED beacon/AP variables */
135#define B43legacy_SHM_SH_DTIMP 0x0012 /* DTIM period */
136#define B43legacy_SHM_SH_BTL0 0x0018 /* Beacon template length 0 */
137#define B43legacy_SHM_SH_BTL1 0x001A /* Beacon template length 1 */
138#define B43legacy_SHM_SH_BTSFOFF 0x001C /* Beacon TSF offset */
139#define B43legacy_SHM_SH_TIMPOS 0x001E /* TIM position in beacon */
134#define B43legacy_SHM_SH_BEACPHYCTL 0x0054 /* Beacon PHY TX control word */ 140#define B43legacy_SHM_SH_BEACPHYCTL 0x0054 /* Beacon PHY TX control word */
135/* SHM_SHARED ACK/CTS control */ 141/* SHM_SHARED ACK/CTS control */
136#define B43legacy_SHM_SH_ACKCTSPHYCTL 0x0022 /* ACK/CTS PHY control word */ 142#define B43legacy_SHM_SH_ACKCTSPHYCTL 0x0022 /* ACK/CTS PHY control word */
137/* SHM_SHARED probe response variables */ 143/* SHM_SHARED probe response variables */
138#define B43legacy_SHM_SH_PRPHYCTL 0x0188 /* Probe Resp PHY TX control */ 144#define B43legacy_SHM_SH_PRTLEN 0x004A /* Probe Response template length */
139#define B43legacy_SHM_SH_PRMAXTIME 0x0074 /* Probe Response max time */ 145#define B43legacy_SHM_SH_PRMAXTIME 0x0074 /* Probe Response max time */
146#define B43legacy_SHM_SH_PRPHYCTL 0x0188 /* Probe Resp PHY TX control */
140/* SHM_SHARED rate tables */ 147/* SHM_SHARED rate tables */
141/* SHM_SHARED microcode soft registers */ 148/* SHM_SHARED microcode soft registers */
142#define B43legacy_SHM_SH_UCODEREV 0x0000 /* Microcode revision */ 149#define B43legacy_SHM_SH_UCODEREV 0x0000 /* Microcode revision */
143#define B43legacy_SHM_SH_UCODEPATCH 0x0002 /* Microcode patchlevel */ 150#define B43legacy_SHM_SH_UCODEPATCH 0x0002 /* Microcode patchlevel */
144#define B43legacy_SHM_SH_UCODEDATE 0x0004 /* Microcode date */ 151#define B43legacy_SHM_SH_UCODEDATE 0x0004 /* Microcode date */
145#define B43legacy_SHM_SH_UCODETIME 0x0006 /* Microcode time */ 152#define B43legacy_SHM_SH_UCODETIME 0x0006 /* Microcode time */
153#define B43legacy_SHM_SH_SPUWKUP 0x0094 /* pre-wakeup for synth PU in us */
154#define B43legacy_SHM_SH_PRETBTT 0x0096 /* pre-TBTT in us */
146 155
147#define B43legacy_UCODEFLAGS_OFFSET 0x005E 156#define B43legacy_UCODEFLAGS_OFFSET 0x005E
148 157
@@ -199,6 +208,13 @@
199#define B43legacy_MACCTL_TBTTHOLD 0x10000000 /* TBTT Hold */ 208#define B43legacy_MACCTL_TBTTHOLD 0x10000000 /* TBTT Hold */
200#define B43legacy_MACCTL_GMODE 0x80000000 /* G Mode */ 209#define B43legacy_MACCTL_GMODE 0x80000000 /* G Mode */
201 210
211/* MAC Command bitfield */
212#define B43legacy_MACCMD_BEACON0_VALID 0x00000001 /* Beacon 0 in template RAM is busy/valid */
213#define B43legacy_MACCMD_BEACON1_VALID 0x00000002 /* Beacon 1 in template RAM is busy/valid */
214#define B43legacy_MACCMD_DFQ_VALID 0x00000004 /* Directed frame queue valid (IBSS PS mode, ATIM) */
215#define B43legacy_MACCMD_CCA 0x00000008 /* Clear channel assessment */
216#define B43legacy_MACCMD_BGNOISE 0x00000010 /* Background noise */
217
202/* 802.11 core specific TM State Low flags */ 218/* 802.11 core specific TM State Low flags */
203#define B43legacy_TMSLOW_GMODE 0x20000000 /* G Mode Enable */ 219#define B43legacy_TMSLOW_GMODE 0x20000000 /* G Mode Enable */
204#define B43legacy_TMSLOW_PLLREFSEL 0x00200000 /* PLL Freq Ref Select */ 220#define B43legacy_TMSLOW_PLLREFSEL 0x00200000 /* PLL Freq Ref Select */
@@ -317,15 +333,7 @@ enum {
317# undef assert 333# undef assert
318#endif 334#endif
319#ifdef CONFIG_B43LEGACY_DEBUG 335#ifdef CONFIG_B43LEGACY_DEBUG
320# define B43legacy_WARN_ON(expr) \ 336# define B43legacy_WARN_ON(x) WARN_ON(x)
321 do { \
322 if (unlikely((expr))) { \
323 printk(KERN_INFO PFX "Test (%s) failed at:" \
324 " %s:%d:%s()\n", \
325 #expr, __FILE__, \
326 __LINE__, __FUNCTION__); \
327 } \
328 } while (0)
329# define B43legacy_BUG_ON(expr) \ 337# define B43legacy_BUG_ON(expr) \
330 do { \ 338 do { \
331 if (unlikely((expr))) { \ 339 if (unlikely((expr))) { \
@@ -336,7 +344,9 @@ enum {
336 } while (0) 344 } while (0)
337# define B43legacy_DEBUG 1 345# define B43legacy_DEBUG 1
338#else 346#else
339# define B43legacy_WARN_ON(x) do { /* nothing */ } while (0) 347/* This will evaluate the argument even if debugging is disabled. */
348static inline bool __b43legacy_warn_on_dummy(bool x) { return x; }
349# define B43legacy_WARN_ON(x) __b43legacy_warn_on_dummy(unlikely(!!(x)))
340# define B43legacy_BUG_ON(x) do { /* nothing */ } while (0) 350# define B43legacy_BUG_ON(x) do { /* nothing */ } while (0)
341# define B43legacy_DEBUG 0 351# define B43legacy_DEBUG 0
342#endif 352#endif
@@ -392,10 +402,6 @@ struct b43legacy_phy {
392 u8 possible_phymodes; 402 u8 possible_phymodes;
393 /* GMODE bit enabled in MACCTL? */ 403 /* GMODE bit enabled in MACCTL? */
394 bool gmode; 404 bool gmode;
395 /* Possible ieee80211 subsystem hwmodes for this PHY.
396 * Which mode is selected, depends on thr GMODE enabled bit */
397#define B43legacy_MAX_PHYHWMODES 2
398 struct ieee80211_hw_mode hwmodes[B43legacy_MAX_PHYHWMODES];
399 405
400 /* Analog Type */ 406 /* Analog Type */
401 u8 analog; 407 u8 analog;
@@ -598,6 +604,12 @@ struct b43legacy_wl {
598 u8 nr_devs; 604 u8 nr_devs;
599 605
600 bool radiotap_enabled; 606 bool radiotap_enabled;
607
608 /* The beacon we are currently using (AP or IBSS mode).
609 * This beacon stuff is protected by the irq_lock. */
610 struct sk_buff *current_beacon;
611 bool beacon0_uploaded;
612 bool beacon1_uploaded;
601}; 613};
602 614
603/* Pointers to the firmware data and meta information about it. */ 615/* Pointers to the firmware data and meta information about it. */
@@ -649,7 +661,7 @@ struct b43legacy_wldev {
649 661
650 bool __using_pio; /* Using pio rather than dma. */ 662 bool __using_pio; /* Using pio rather than dma. */
651 bool bad_frames_preempt;/* Use "Bad Frames Preemption". */ 663 bool bad_frames_preempt;/* Use "Bad Frames Preemption". */
652 bool reg124_set_0x4; /* Variable to keep track of IRQ. */ 664 bool dfq_valid; /* Directed frame queue valid (IBSS PS mode, ATIM). */
653 bool short_preamble; /* TRUE if using short preamble. */ 665 bool short_preamble; /* TRUE if using short preamble. */
654 bool short_slot; /* TRUE if using short slot timing. */ 666 bool short_slot; /* TRUE if using short slot timing. */
655 bool radio_hw_enable; /* State of radio hardware enable bit. */ 667 bool radio_hw_enable; /* State of radio hardware enable bit. */
@@ -696,9 +708,6 @@ struct b43legacy_wldev {
696 u8 max_nr_keys; 708 u8 max_nr_keys;
697 struct b43legacy_key key[58]; 709 struct b43legacy_key key[58];
698 710
699 /* Cached beacon template while uploading the template. */
700 struct sk_buff *cached_beacon;
701
702 /* Firmware data */ 711 /* Firmware data */
703 struct b43legacy_firmware fw; 712 struct b43legacy_firmware fw;
704 713
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 0f7a6e7bd96a..ef829ee8ffd4 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -95,28 +95,29 @@ MODULE_DEVICE_TABLE(ssb, b43legacy_ssb_tbl);
95 * data in there. This data is the same for all devices, so we don't 95 * data in there. This data is the same for all devices, so we don't
96 * get concurrency issues */ 96 * get concurrency issues */
97#define RATETAB_ENT(_rateid, _flags) \ 97#define RATETAB_ENT(_rateid, _flags) \
98 { \ 98 { \
99 .rate = B43legacy_RATE_TO_100KBPS(_rateid), \ 99 .bitrate = B43legacy_RATE_TO_100KBPS(_rateid), \
100 .val = (_rateid), \ 100 .hw_value = (_rateid), \
101 .val2 = (_rateid), \ 101 .flags = (_flags), \
102 .flags = (_flags), \
103 } 102 }
103/*
104 * NOTE: When changing this, sync with xmit.c's
105 * b43legacy_plcp_get_bitrate_idx_* functions!
106 */
104static struct ieee80211_rate __b43legacy_ratetable[] = { 107static struct ieee80211_rate __b43legacy_ratetable[] = {
105 RATETAB_ENT(B43legacy_CCK_RATE_1MB, IEEE80211_RATE_CCK), 108 RATETAB_ENT(B43legacy_CCK_RATE_1MB, 0),
106 RATETAB_ENT(B43legacy_CCK_RATE_2MB, IEEE80211_RATE_CCK_2), 109 RATETAB_ENT(B43legacy_CCK_RATE_2MB, IEEE80211_RATE_SHORT_PREAMBLE),
107 RATETAB_ENT(B43legacy_CCK_RATE_5MB, IEEE80211_RATE_CCK_2), 110 RATETAB_ENT(B43legacy_CCK_RATE_5MB, IEEE80211_RATE_SHORT_PREAMBLE),
108 RATETAB_ENT(B43legacy_CCK_RATE_11MB, IEEE80211_RATE_CCK_2), 111 RATETAB_ENT(B43legacy_CCK_RATE_11MB, IEEE80211_RATE_SHORT_PREAMBLE),
109 RATETAB_ENT(B43legacy_OFDM_RATE_6MB, IEEE80211_RATE_OFDM), 112 RATETAB_ENT(B43legacy_OFDM_RATE_6MB, 0),
110 RATETAB_ENT(B43legacy_OFDM_RATE_9MB, IEEE80211_RATE_OFDM), 113 RATETAB_ENT(B43legacy_OFDM_RATE_9MB, 0),
111 RATETAB_ENT(B43legacy_OFDM_RATE_12MB, IEEE80211_RATE_OFDM), 114 RATETAB_ENT(B43legacy_OFDM_RATE_12MB, 0),
112 RATETAB_ENT(B43legacy_OFDM_RATE_18MB, IEEE80211_RATE_OFDM), 115 RATETAB_ENT(B43legacy_OFDM_RATE_18MB, 0),
113 RATETAB_ENT(B43legacy_OFDM_RATE_24MB, IEEE80211_RATE_OFDM), 116 RATETAB_ENT(B43legacy_OFDM_RATE_24MB, 0),
114 RATETAB_ENT(B43legacy_OFDM_RATE_36MB, IEEE80211_RATE_OFDM), 117 RATETAB_ENT(B43legacy_OFDM_RATE_36MB, 0),
115 RATETAB_ENT(B43legacy_OFDM_RATE_48MB, IEEE80211_RATE_OFDM), 118 RATETAB_ENT(B43legacy_OFDM_RATE_48MB, 0),
116 RATETAB_ENT(B43legacy_OFDM_RATE_54MB, IEEE80211_RATE_OFDM), 119 RATETAB_ENT(B43legacy_OFDM_RATE_54MB, 0),
117}; 120};
118#define b43legacy_a_ratetable (__b43legacy_ratetable + 4)
119#define b43legacy_a_ratetable_size 8
120#define b43legacy_b_ratetable (__b43legacy_ratetable + 0) 121#define b43legacy_b_ratetable (__b43legacy_ratetable + 0)
121#define b43legacy_b_ratetable_size 4 122#define b43legacy_b_ratetable_size 4
122#define b43legacy_g_ratetable (__b43legacy_ratetable + 0) 123#define b43legacy_g_ratetable (__b43legacy_ratetable + 0)
@@ -124,14 +125,8 @@ static struct ieee80211_rate __b43legacy_ratetable[] = {
124 125
125#define CHANTAB_ENT(_chanid, _freq) \ 126#define CHANTAB_ENT(_chanid, _freq) \
126 { \ 127 { \
127 .chan = (_chanid), \ 128 .center_freq = (_freq), \
128 .freq = (_freq), \ 129 .hw_value = (_chanid), \
129 .val = (_chanid), \
130 .flag = IEEE80211_CHAN_W_SCAN | \
131 IEEE80211_CHAN_W_ACTIVE_SCAN | \
132 IEEE80211_CHAN_W_IBSS, \
133 .power_level = 0x0A, \
134 .antenna_max = 0xFF, \
135 } 130 }
136static struct ieee80211_channel b43legacy_bg_chantable[] = { 131static struct ieee80211_channel b43legacy_bg_chantable[] = {
137 CHANTAB_ENT(1, 2412), 132 CHANTAB_ENT(1, 2412),
@@ -149,7 +144,20 @@ static struct ieee80211_channel b43legacy_bg_chantable[] = {
149 CHANTAB_ENT(13, 2472), 144 CHANTAB_ENT(13, 2472),
150 CHANTAB_ENT(14, 2484), 145 CHANTAB_ENT(14, 2484),
151}; 146};
152#define b43legacy_bg_chantable_size ARRAY_SIZE(b43legacy_bg_chantable) 147
148static struct ieee80211_supported_band b43legacy_band_2GHz_BPHY = {
149 .channels = b43legacy_bg_chantable,
150 .n_channels = ARRAY_SIZE(b43legacy_bg_chantable),
151 .bitrates = b43legacy_b_ratetable,
152 .n_bitrates = b43legacy_b_ratetable_size,
153};
154
155static struct ieee80211_supported_band b43legacy_band_2GHz_GPHY = {
156 .channels = b43legacy_bg_chantable,
157 .n_channels = ARRAY_SIZE(b43legacy_bg_chantable),
158 .bitrates = b43legacy_g_ratetable,
159 .n_bitrates = b43legacy_g_ratetable_size,
160};
153 161
154static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev); 162static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev);
155static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev); 163static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev);
@@ -797,9 +805,8 @@ static void b43legacy_generate_noise_sample(struct b43legacy_wldev *dev)
797{ 805{
798 b43legacy_jssi_write(dev, 0x7F7F7F7F); 806 b43legacy_jssi_write(dev, 0x7F7F7F7F);
799 b43legacy_write32(dev, B43legacy_MMIO_MACCMD, 807 b43legacy_write32(dev, B43legacy_MMIO_MACCMD,
800 b43legacy_read32(dev, 808 b43legacy_read32(dev, B43legacy_MMIO_MACCMD)
801 B43legacy_MMIO_MACCMD) 809 | B43legacy_MACCMD_BGNOISE);
802 | (1 << 4));
803 B43legacy_WARN_ON(dev->noisecalc.channel_at_start != 810 B43legacy_WARN_ON(dev->noisecalc.channel_at_start !=
804 dev->phy.channel); 811 dev->phy.channel);
805} 812}
@@ -888,18 +895,18 @@ static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev)
888 if (1/*FIXME: the last PSpoll frame was sent successfully */) 895 if (1/*FIXME: the last PSpoll frame was sent successfully */)
889 b43legacy_power_saving_ctl_bits(dev, -1, -1); 896 b43legacy_power_saving_ctl_bits(dev, -1, -1);
890 } 897 }
891 dev->reg124_set_0x4 = 0;
892 if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS)) 898 if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS))
893 dev->reg124_set_0x4 = 1; 899 dev->dfq_valid = 1;
894} 900}
895 901
896static void handle_irq_atim_end(struct b43legacy_wldev *dev) 902static void handle_irq_atim_end(struct b43legacy_wldev *dev)
897{ 903{
898 if (!dev->reg124_set_0x4) /*FIXME rename this variable*/ 904 if (dev->dfq_valid) {
899 return; 905 b43legacy_write32(dev, B43legacy_MMIO_MACCMD,
900 b43legacy_write32(dev, B43legacy_MMIO_MACCMD, 906 b43legacy_read32(dev, B43legacy_MMIO_MACCMD)
901 b43legacy_read32(dev, B43legacy_MMIO_MACCMD) 907 | B43legacy_MACCMD_DFQ_VALID);
902 | 0x4); 908 dev->dfq_valid = 0;
909 }
903} 910}
904 911
905static void handle_irq_pmq(struct b43legacy_wldev *dev) 912static void handle_irq_pmq(struct b43legacy_wldev *dev)
@@ -955,32 +962,77 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
955 u16 ram_offset, 962 u16 ram_offset,
956 u16 shm_size_offset, u8 rate) 963 u16 shm_size_offset, u8 rate)
957{ 964{
958 int len;
959 const u8 *data;
960 965
961 B43legacy_WARN_ON(!dev->cached_beacon); 966 unsigned int i, len, variable_len;
962 len = min((size_t)dev->cached_beacon->len, 967 const struct ieee80211_mgmt *bcn;
968 const u8 *ie;
969 bool tim_found = 0;
970
971 bcn = (const struct ieee80211_mgmt *)(dev->wl->current_beacon->data);
972 len = min((size_t)dev->wl->current_beacon->len,
963 0x200 - sizeof(struct b43legacy_plcp_hdr6)); 973 0x200 - sizeof(struct b43legacy_plcp_hdr6));
964 data = (const u8 *)(dev->cached_beacon->data); 974
965 b43legacy_write_template_common(dev, data, 975 b43legacy_write_template_common(dev, (const u8 *)bcn, len, ram_offset,
966 len, ram_offset,
967 shm_size_offset, rate); 976 shm_size_offset, rate);
977
978 /* Find the position of the TIM and the DTIM_period value
979 * and write them to SHM. */
980 ie = bcn->u.beacon.variable;
981 variable_len = len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
982 for (i = 0; i < variable_len - 2; ) {
983 uint8_t ie_id, ie_len;
984
985 ie_id = ie[i];
986 ie_len = ie[i + 1];
987 if (ie_id == 5) {
988 u16 tim_position;
989 u16 dtim_period;
990 /* This is the TIM Information Element */
991
992 /* Check whether the ie_len is in the beacon data range. */
993 if (variable_len < ie_len + 2 + i)
994 break;
995 /* A valid TIM is at least 4 bytes long. */
996 if (ie_len < 4)
997 break;
998 tim_found = 1;
999
1000 tim_position = sizeof(struct b43legacy_plcp_hdr6);
1001 tim_position += offsetof(struct ieee80211_mgmt,
1002 u.beacon.variable);
1003 tim_position += i;
1004
1005 dtim_period = ie[i + 3];
1006
1007 b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
1008 B43legacy_SHM_SH_TIMPOS, tim_position);
1009 b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
1010 B43legacy_SHM_SH_DTIMP, dtim_period);
1011 break;
1012 }
1013 i += ie_len + 2;
1014 }
1015 if (!tim_found) {
1016 b43legacywarn(dev->wl, "Did not find a valid TIM IE in the "
1017 "beacon template packet. AP or IBSS operation "
1018 "may be broken.\n");
1019 }
968} 1020}
969 1021
970static void b43legacy_write_probe_resp_plcp(struct b43legacy_wldev *dev, 1022static void b43legacy_write_probe_resp_plcp(struct b43legacy_wldev *dev,
971 u16 shm_offset, u16 size, 1023 u16 shm_offset, u16 size,
972 u8 rate) 1024 struct ieee80211_rate *rate)
973{ 1025{
974 struct b43legacy_plcp_hdr4 plcp; 1026 struct b43legacy_plcp_hdr4 plcp;
975 u32 tmp; 1027 u32 tmp;
976 __le16 dur; 1028 __le16 dur;
977 1029
978 plcp.data = 0; 1030 plcp.data = 0;
979 b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate); 1031 b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->bitrate);
980 dur = ieee80211_generic_frame_duration(dev->wl->hw, 1032 dur = ieee80211_generic_frame_duration(dev->wl->hw,
981 dev->wl->vif, 1033 dev->wl->vif,
982 size, 1034 size,
983 B43legacy_RATE_TO_100KBPS(rate)); 1035 rate);
984 /* Write PLCP in two parts and timing for packet transfer */ 1036 /* Write PLCP in two parts and timing for packet transfer */
985 tmp = le32_to_cpu(plcp.data); 1037 tmp = le32_to_cpu(plcp.data);
986 b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_offset, 1038 b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, shm_offset,
@@ -997,45 +1049,44 @@ static void b43legacy_write_probe_resp_plcp(struct b43legacy_wldev *dev,
997 * 2) Patching duration field 1049 * 2) Patching duration field
998 * 3) Stripping TIM 1050 * 3) Stripping TIM
999 */ 1051 */
1000static u8 *b43legacy_generate_probe_resp(struct b43legacy_wldev *dev, 1052static const u8 *b43legacy_generate_probe_resp(struct b43legacy_wldev *dev,
1001 u16 *dest_size, u8 rate) 1053 u16 *dest_size,
1054 struct ieee80211_rate *rate)
1002{ 1055{
1003 const u8 *src_data; 1056 const u8 *src_data;
1004 u8 *dest_data; 1057 u8 *dest_data;
1005 u16 src_size; 1058 u16 src_size, elem_size, src_pos, dest_pos;
1006 u16 elem_size;
1007 u16 src_pos;
1008 u16 dest_pos;
1009 __le16 dur; 1059 __le16 dur;
1010 struct ieee80211_hdr *hdr; 1060 struct ieee80211_hdr *hdr;
1061 size_t ie_start;
1062
1063 src_size = dev->wl->current_beacon->len;
1064 src_data = (const u8 *)dev->wl->current_beacon->data;
1011 1065
1012 B43legacy_WARN_ON(!dev->cached_beacon); 1066 /* Get the start offset of the variable IEs in the packet. */
1013 src_size = dev->cached_beacon->len; 1067 ie_start = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
1014 src_data = (const u8 *)dev->cached_beacon->data; 1068 B43legacy_WARN_ON(ie_start != offsetof(struct ieee80211_mgmt,
1069 u.beacon.variable));
1015 1070
1016 if (unlikely(src_size < 0x24)) { 1071 if (B43legacy_WARN_ON(src_size < ie_start))
1017 b43legacydbg(dev->wl, "b43legacy_generate_probe_resp: "
1018 "invalid beacon\n");
1019 return NULL; 1072 return NULL;
1020 }
1021 1073
1022 dest_data = kmalloc(src_size, GFP_ATOMIC); 1074 dest_data = kmalloc(src_size, GFP_ATOMIC);
1023 if (unlikely(!dest_data)) 1075 if (unlikely(!dest_data))
1024 return NULL; 1076 return NULL;
1025 1077
1026 /* 0x24 is offset of first variable-len Information-Element 1078 /* Copy the static data and all Information Elements, except the TIM. */
1027 * in beacon frame. 1079 memcpy(dest_data, src_data, ie_start);
1028 */ 1080 src_pos = ie_start;
1029 memcpy(dest_data, src_data, 0x24); 1081 dest_pos = ie_start;
1030 src_pos = 0x24; 1082 for ( ; src_pos < src_size - 2; src_pos += elem_size) {
1031 dest_pos = 0x24;
1032 for (; src_pos < src_size - 2; src_pos += elem_size) {
1033 elem_size = src_data[src_pos + 1] + 2; 1083 elem_size = src_data[src_pos + 1] + 2;
1034 if (src_data[src_pos] != 0x05) { /* TIM */ 1084 if (src_data[src_pos] == 5) {
1035 memcpy(dest_data + dest_pos, src_data + src_pos, 1085 /* This is the TIM. */
1036 elem_size); 1086 continue;
1037 dest_pos += elem_size;
1038 } 1087 }
1088 memcpy(dest_data + dest_pos, src_data + src_pos, elem_size);
1089 dest_pos += elem_size;
1039 } 1090 }
1040 *dest_size = dest_pos; 1091 *dest_size = dest_pos;
1041 hdr = (struct ieee80211_hdr *)dest_data; 1092 hdr = (struct ieee80211_hdr *)dest_data;
@@ -1046,7 +1097,7 @@ static u8 *b43legacy_generate_probe_resp(struct b43legacy_wldev *dev,
1046 dur = ieee80211_generic_frame_duration(dev->wl->hw, 1097 dur = ieee80211_generic_frame_duration(dev->wl->hw,
1047 dev->wl->vif, 1098 dev->wl->vif,
1048 *dest_size, 1099 *dest_size,
1049 B43legacy_RATE_TO_100KBPS(rate)); 1100 rate);
1050 hdr->duration_id = dur; 1101 hdr->duration_id = dur;
1051 1102
1052 return dest_data; 1103 return dest_data;
@@ -1054,13 +1105,13 @@ static u8 *b43legacy_generate_probe_resp(struct b43legacy_wldev *dev,
1054 1105
1055static void b43legacy_write_probe_resp_template(struct b43legacy_wldev *dev, 1106static void b43legacy_write_probe_resp_template(struct b43legacy_wldev *dev,
1056 u16 ram_offset, 1107 u16 ram_offset,
1057 u16 shm_size_offset, u8 rate) 1108 u16 shm_size_offset,
1109 struct ieee80211_rate *rate)
1058{ 1110{
1059 u8 *probe_resp_data; 1111 const u8 *probe_resp_data;
1060 u16 size; 1112 u16 size;
1061 1113
1062 B43legacy_WARN_ON(!dev->cached_beacon); 1114 size = dev->wl->current_beacon->len;
1063 size = dev->cached_beacon->len;
1064 probe_resp_data = b43legacy_generate_probe_resp(dev, &size, rate); 1115 probe_resp_data = b43legacy_generate_probe_resp(dev, &size, rate);
1065 if (unlikely(!probe_resp_data)) 1116 if (unlikely(!probe_resp_data))
1066 return; 1117 return;
@@ -1069,59 +1120,37 @@ static void b43legacy_write_probe_resp_template(struct b43legacy_wldev *dev,
1069 * all possible basic rates 1120 * all possible basic rates
1070 */ 1121 */
1071 b43legacy_write_probe_resp_plcp(dev, 0x31A, size, 1122 b43legacy_write_probe_resp_plcp(dev, 0x31A, size,
1072 B43legacy_CCK_RATE_1MB); 1123 &b43legacy_b_ratetable[0]);
1073 b43legacy_write_probe_resp_plcp(dev, 0x32C, size, 1124 b43legacy_write_probe_resp_plcp(dev, 0x32C, size,
1074 B43legacy_CCK_RATE_2MB); 1125 &b43legacy_b_ratetable[1]);
1075 b43legacy_write_probe_resp_plcp(dev, 0x33E, size, 1126 b43legacy_write_probe_resp_plcp(dev, 0x33E, size,
1076 B43legacy_CCK_RATE_5MB); 1127 &b43legacy_b_ratetable[2]);
1077 b43legacy_write_probe_resp_plcp(dev, 0x350, size, 1128 b43legacy_write_probe_resp_plcp(dev, 0x350, size,
1078 B43legacy_CCK_RATE_11MB); 1129 &b43legacy_b_ratetable[3]);
1079 1130
1080 size = min((size_t)size, 1131 size = min((size_t)size,
1081 0x200 - sizeof(struct b43legacy_plcp_hdr6)); 1132 0x200 - sizeof(struct b43legacy_plcp_hdr6));
1082 b43legacy_write_template_common(dev, probe_resp_data, 1133 b43legacy_write_template_common(dev, probe_resp_data,
1083 size, ram_offset, 1134 size, ram_offset,
1084 shm_size_offset, rate); 1135 shm_size_offset, rate->bitrate);
1085 kfree(probe_resp_data); 1136 kfree(probe_resp_data);
1086} 1137}
1087 1138
1088static int b43legacy_refresh_cached_beacon(struct b43legacy_wldev *dev, 1139/* Asynchronously update the packet templates in template RAM.
1089 struct sk_buff *beacon) 1140 * Locking: Requires wl->irq_lock to be locked. */
1141static void b43legacy_update_templates(struct b43legacy_wl *wl,
1142 struct sk_buff *beacon)
1090{ 1143{
1091 if (dev->cached_beacon) 1144 /* This is the top half of the ansynchronous beacon update. The bottom
1092 kfree_skb(dev->cached_beacon); 1145 * half is the beacon IRQ. Beacon update must be asynchronous to avoid
1093 dev->cached_beacon = beacon; 1146 * sending an invalid beacon. This can happen for example, if the
1147 * firmware transmits a beacon while we are updating it. */
1094 1148
1095 return 0; 1149 if (wl->current_beacon)
1096} 1150 dev_kfree_skb_any(wl->current_beacon);
1097 1151 wl->current_beacon = beacon;
1098static void b43legacy_update_templates(struct b43legacy_wldev *dev) 1152 wl->beacon0_uploaded = 0;
1099{ 1153 wl->beacon1_uploaded = 0;
1100 u32 status;
1101
1102 B43legacy_WARN_ON(!dev->cached_beacon);
1103
1104 b43legacy_write_beacon_template(dev, 0x68, 0x18,
1105 B43legacy_CCK_RATE_1MB);
1106 b43legacy_write_beacon_template(dev, 0x468, 0x1A,
1107 B43legacy_CCK_RATE_1MB);
1108 b43legacy_write_probe_resp_template(dev, 0x268, 0x4A,
1109 B43legacy_CCK_RATE_11MB);
1110
1111 status = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
1112 status |= 0x03;
1113 b43legacy_write32(dev, B43legacy_MMIO_MACCMD, status);
1114}
1115
1116static void b43legacy_refresh_templates(struct b43legacy_wldev *dev,
1117 struct sk_buff *beacon)
1118{
1119 int err;
1120
1121 err = b43legacy_refresh_cached_beacon(dev, beacon);
1122 if (unlikely(err))
1123 return;
1124 b43legacy_update_templates(dev);
1125} 1154}
1126 1155
1127static void b43legacy_set_ssid(struct b43legacy_wldev *dev, 1156static void b43legacy_set_ssid(struct b43legacy_wldev *dev,
@@ -1162,38 +1191,37 @@ static void b43legacy_set_beacon_int(struct b43legacy_wldev *dev,
1162 1191
1163static void handle_irq_beacon(struct b43legacy_wldev *dev) 1192static void handle_irq_beacon(struct b43legacy_wldev *dev)
1164{ 1193{
1165 u32 status; 1194 struct b43legacy_wl *wl = dev->wl;
1195 u32 cmd;
1166 1196
1167 if (!b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_AP)) 1197 if (!b43legacy_is_mode(wl, IEEE80211_IF_TYPE_AP))
1168 return; 1198 return;
1169 1199
1170 dev->irq_savedstate &= ~B43legacy_IRQ_BEACON; 1200 /* This is the bottom half of the asynchronous beacon update. */
1171 status = b43legacy_read32(dev, B43legacy_MMIO_MACCMD); 1201
1172 1202 cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
1173 if (!dev->cached_beacon || ((status & 0x1) && (status & 0x2))) { 1203 if (!(cmd & B43legacy_MACCMD_BEACON0_VALID)) {
1174 /* ACK beacon IRQ. */ 1204 if (!wl->beacon0_uploaded) {
1175 b43legacy_write32(dev, B43legacy_MMIO_GEN_IRQ_REASON, 1205 b43legacy_write_beacon_template(dev, 0x68,
1176 B43legacy_IRQ_BEACON); 1206 B43legacy_SHM_SH_BTL0,
1177 dev->irq_savedstate |= B43legacy_IRQ_BEACON; 1207 B43legacy_CCK_RATE_1MB);
1178 if (dev->cached_beacon) 1208 b43legacy_write_probe_resp_template(dev, 0x268,
1179 kfree_skb(dev->cached_beacon); 1209 B43legacy_SHM_SH_PRTLEN,
1180 dev->cached_beacon = NULL; 1210 &__b43legacy_ratetable[3]);
1181 return; 1211 wl->beacon0_uploaded = 1;
1182 } 1212 }
1183 if (!(status & 0x1)) { 1213 cmd |= B43legacy_MACCMD_BEACON0_VALID;
1184 b43legacy_write_beacon_template(dev, 0x68, 0x18, 1214 }
1185 B43legacy_CCK_RATE_1MB); 1215 if (!(cmd & B43legacy_MACCMD_BEACON1_VALID)) {
1186 status |= 0x1; 1216 if (!wl->beacon1_uploaded) {
1187 b43legacy_write32(dev, B43legacy_MMIO_MACCMD, 1217 b43legacy_write_beacon_template(dev, 0x468,
1188 status); 1218 B43legacy_SHM_SH_BTL1,
1189 } 1219 B43legacy_CCK_RATE_1MB);
1190 if (!(status & 0x2)) { 1220 wl->beacon1_uploaded = 1;
1191 b43legacy_write_beacon_template(dev, 0x468, 0x1A, 1221 }
1192 B43legacy_CCK_RATE_1MB); 1222 cmd |= B43legacy_MACCMD_BEACON1_VALID;
1193 status |= 0x2;
1194 b43legacy_write32(dev, B43legacy_MMIO_MACCMD,
1195 status);
1196 } 1223 }
1224 b43legacy_write32(dev, B43legacy_MMIO_MACCMD, cmd);
1197} 1225}
1198 1226
1199static void handle_irq_ucode_debug(struct b43legacy_wldev *dev) 1227static void handle_irq_ucode_debug(struct b43legacy_wldev *dev)
@@ -2552,14 +2580,16 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
2552 antenna_rx = b43legacy_antenna_from_ieee80211(conf->antenna_sel_rx); 2580 antenna_rx = b43legacy_antenna_from_ieee80211(conf->antenna_sel_rx);
2553 2581
2554 mutex_lock(&wl->mutex); 2582 mutex_lock(&wl->mutex);
2583 dev = wl->current_dev;
2584 phy = &dev->phy;
2555 2585
2556 /* Switch the PHY mode (if necessary). */ 2586 /* Switch the PHY mode (if necessary). */
2557 switch (conf->phymode) { 2587 switch (conf->channel->band) {
2558 case MODE_IEEE80211B: 2588 case IEEE80211_BAND_2GHZ:
2559 new_phymode = B43legacy_PHYMODE_B; 2589 if (phy->type == B43legacy_PHYTYPE_B)
2560 break; 2590 new_phymode = B43legacy_PHYMODE_B;
2561 case MODE_IEEE80211G: 2591 else
2562 new_phymode = B43legacy_PHYMODE_G; 2592 new_phymode = B43legacy_PHYMODE_G;
2563 break; 2593 break;
2564 default: 2594 default:
2565 B43legacy_WARN_ON(1); 2595 B43legacy_WARN_ON(1);
@@ -2567,8 +2597,6 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
2567 err = b43legacy_switch_phymode(wl, new_phymode); 2597 err = b43legacy_switch_phymode(wl, new_phymode);
2568 if (err) 2598 if (err)
2569 goto out_unlock_mutex; 2599 goto out_unlock_mutex;
2570 dev = wl->current_dev;
2571 phy = &dev->phy;
2572 2600
2573 /* Disable IRQs while reconfiguring the device. 2601 /* Disable IRQs while reconfiguring the device.
2574 * This makes it possible to drop the spinlock throughout 2602 * This makes it possible to drop the spinlock throughout
@@ -2584,8 +2612,8 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
2584 2612
2585 /* Switch to the requested channel. 2613 /* Switch to the requested channel.
2586 * The firmware takes care of races with the TX handler. */ 2614 * The firmware takes care of races with the TX handler. */
2587 if (conf->channel_val != phy->channel) 2615 if (conf->channel->hw_value != phy->channel)
2588 b43legacy_radio_selectchannel(dev, conf->channel_val, 0); 2616 b43legacy_radio_selectchannel(dev, conf->channel->hw_value, 0);
2589 2617
2590 /* Enable/Disable ShortSlot timing. */ 2618 /* Enable/Disable ShortSlot timing. */
2591 if ((!!(conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME)) 2619 if ((!!(conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME))
@@ -2702,7 +2730,7 @@ static int b43legacy_op_config_interface(struct ieee80211_hw *hw,
2702 B43legacy_WARN_ON(conf->type != IEEE80211_IF_TYPE_AP); 2730 B43legacy_WARN_ON(conf->type != IEEE80211_IF_TYPE_AP);
2703 b43legacy_set_ssid(dev, conf->ssid, conf->ssid_len); 2731 b43legacy_set_ssid(dev, conf->ssid, conf->ssid_len);
2704 if (conf->beacon) 2732 if (conf->beacon)
2705 b43legacy_refresh_templates(dev, conf->beacon); 2733 b43legacy_update_templates(wl, conf->beacon);
2706 } 2734 }
2707 b43legacy_write_mac_bssid_templates(dev); 2735 b43legacy_write_mac_bssid_templates(dev);
2708 } 2736 }
@@ -2920,7 +2948,7 @@ static void setup_struct_phy_for_init(struct b43legacy_wldev *dev,
2920static void setup_struct_wldev_for_init(struct b43legacy_wldev *dev) 2948static void setup_struct_wldev_for_init(struct b43legacy_wldev *dev)
2921{ 2949{
2922 /* Flags */ 2950 /* Flags */
2923 dev->reg124_set_0x4 = 0; 2951 dev->dfq_valid = 0;
2924 2952
2925 /* Stats */ 2953 /* Stats */
2926 memset(&dev->stats, 0, sizeof(dev->stats)); 2954 memset(&dev->stats, 0, sizeof(dev->stats));
@@ -2979,6 +3007,34 @@ static void b43legacy_set_retry_limits(struct b43legacy_wldev *dev,
2979 b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, 0x0007, long_retry); 3007 b43legacy_shm_write16(dev, B43legacy_SHM_WIRELESS, 0x0007, long_retry);
2980} 3008}
2981 3009
3010static void b43legacy_set_synth_pu_delay(struct b43legacy_wldev *dev,
3011 bool idle) {
3012 u16 pu_delay = 1050;
3013
3014 if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS) || idle)
3015 pu_delay = 500;
3016 if ((dev->phy.radio_ver == 0x2050) && (dev->phy.radio_rev == 8))
3017 pu_delay = max(pu_delay, (u16)2400);
3018
3019 b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
3020 B43legacy_SHM_SH_SPUWKUP, pu_delay);
3021}
3022
3023/* Set the TSF CFP pre-TargetBeaconTransmissionTime. */
3024static void b43legacy_set_pretbtt(struct b43legacy_wldev *dev)
3025{
3026 u16 pretbtt;
3027
3028 /* The time value is in microseconds. */
3029 if (b43legacy_is_mode(dev->wl, IEEE80211_IF_TYPE_IBSS))
3030 pretbtt = 2;
3031 else
3032 pretbtt = 250;
3033 b43legacy_shm_write16(dev, B43legacy_SHM_SHARED,
3034 B43legacy_SHM_SH_PRETBTT, pretbtt);
3035 b43legacy_write16(dev, B43legacy_MMIO_TSF_CFP_PRETBTT, pretbtt);
3036}
3037
2982/* Shutdown a wireless core */ 3038/* Shutdown a wireless core */
2983/* Locking: wl->mutex */ 3039/* Locking: wl->mutex */
2984static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev) 3040static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev)
@@ -3015,6 +3071,11 @@ static void b43legacy_wireless_core_exit(struct b43legacy_wldev *dev)
3015 kfree(phy->tssi2dbm); 3071 kfree(phy->tssi2dbm);
3016 kfree(phy->lo_control); 3072 kfree(phy->lo_control);
3017 phy->lo_control = NULL; 3073 phy->lo_control = NULL;
3074 if (dev->wl->current_beacon) {
3075 dev_kfree_skb_any(dev->wl->current_beacon);
3076 dev->wl->current_beacon = NULL;
3077 }
3078
3018 ssb_device_disable(dev->dev, 0); 3079 ssb_device_disable(dev->dev, 0);
3019 ssb_bus_may_powerdown(dev->dev->bus); 3080 ssb_bus_may_powerdown(dev->dev->bus);
3020} 3081}
@@ -3160,9 +3221,7 @@ static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev)
3160 if (err) 3221 if (err)
3161 goto err_chip_exit; 3222 goto err_chip_exit;
3162 3223
3163 b43legacy_write16(dev, 0x0612, 0x0050); 3224 b43legacy_set_synth_pu_delay(dev, 1);
3164 b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0416, 0x0050);
3165 b43legacy_shm_write16(dev, B43legacy_SHM_SHARED, 0x0414, 0x01F4);
3166 3225
3167 ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */ 3226 ssb_bus_powerup(bus, 1); /* Enable dynamic PCTL */
3168 b43legacy_upload_card_macaddress(dev); 3227 b43legacy_upload_card_macaddress(dev);
@@ -3218,6 +3277,8 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
3218 3277
3219 spin_lock_irqsave(&wl->irq_lock, flags); 3278 spin_lock_irqsave(&wl->irq_lock, flags);
3220 b43legacy_adjust_opmode(dev); 3279 b43legacy_adjust_opmode(dev);
3280 b43legacy_set_pretbtt(dev);
3281 b43legacy_set_synth_pu_delay(dev, 0);
3221 b43legacy_upload_card_macaddress(dev); 3282 b43legacy_upload_card_macaddress(dev);
3222 spin_unlock_irqrestore(&wl->irq_lock, flags); 3283 spin_unlock_irqrestore(&wl->irq_lock, flags);
3223 3284
@@ -3339,6 +3400,41 @@ out_unlock:
3339 return err; 3400 return err;
3340} 3401}
3341 3402
3403static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw,
3404 int aid, int set)
3405{
3406 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
3407 struct sk_buff *beacon;
3408 unsigned long flags;
3409
3410 /* We could modify the existing beacon and set the aid bit in the TIM
3411 * field, but that would probably require resizing and moving of data
3412 * within the beacon template. Simply request a new beacon and let
3413 * mac80211 do the hard work. */
3414 beacon = ieee80211_beacon_get(hw, wl->vif, NULL);
3415 if (unlikely(!beacon))
3416 return -ENOMEM;
3417 spin_lock_irqsave(&wl->irq_lock, flags);
3418 b43legacy_update_templates(wl, beacon);
3419 spin_unlock_irqrestore(&wl->irq_lock, flags);
3420
3421 return 0;
3422}
3423
3424static int b43legacy_op_ibss_beacon_update(struct ieee80211_hw *hw,
3425 struct sk_buff *beacon,
3426 struct ieee80211_tx_control *ctl)
3427{
3428 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
3429 unsigned long flags;
3430
3431 spin_lock_irqsave(&wl->irq_lock, flags);
3432 b43legacy_update_templates(wl, beacon);
3433 spin_unlock_irqrestore(&wl->irq_lock, flags);
3434
3435 return 0;
3436}
3437
3342static const struct ieee80211_ops b43legacy_hw_ops = { 3438static const struct ieee80211_ops b43legacy_hw_ops = {
3343 .tx = b43legacy_op_tx, 3439 .tx = b43legacy_op_tx,
3344 .conf_tx = b43legacy_op_conf_tx, 3440 .conf_tx = b43legacy_op_conf_tx,
@@ -3352,6 +3448,8 @@ static const struct ieee80211_ops b43legacy_hw_ops = {
3352 .start = b43legacy_op_start, 3448 .start = b43legacy_op_start,
3353 .stop = b43legacy_op_stop, 3449 .stop = b43legacy_op_stop,
3354 .set_retry_limit = b43legacy_op_set_retry_limit, 3450 .set_retry_limit = b43legacy_op_set_retry_limit,
3451 .set_tim = b43legacy_op_beacon_set_tim,
3452 .beacon_update = b43legacy_op_ibss_beacon_update,
3355}; 3453};
3356 3454
3357/* Hard-reset the chip. Do not call this directly. 3455/* Hard-reset the chip. Do not call this directly.
@@ -3400,48 +3498,19 @@ static int b43legacy_setup_modes(struct b43legacy_wldev *dev,
3400 int have_gphy) 3498 int have_gphy)
3401{ 3499{
3402 struct ieee80211_hw *hw = dev->wl->hw; 3500 struct ieee80211_hw *hw = dev->wl->hw;
3403 struct ieee80211_hw_mode *mode;
3404 struct b43legacy_phy *phy = &dev->phy; 3501 struct b43legacy_phy *phy = &dev->phy;
3405 int cnt = 0;
3406 int err;
3407 3502
3408 phy->possible_phymodes = 0; 3503 phy->possible_phymodes = 0;
3409 for (; 1; cnt++) { 3504 if (have_bphy) {
3410 if (have_bphy) { 3505 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
3411 B43legacy_WARN_ON(cnt >= B43legacy_MAX_PHYHWMODES); 3506 &b43legacy_band_2GHz_BPHY;
3412 mode = &phy->hwmodes[cnt]; 3507 phy->possible_phymodes |= B43legacy_PHYMODE_B;
3413 3508 }
3414 mode->mode = MODE_IEEE80211B; 3509
3415 mode->num_channels = b43legacy_bg_chantable_size; 3510 if (have_gphy) {
3416 mode->channels = b43legacy_bg_chantable; 3511 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
3417 mode->num_rates = b43legacy_b_ratetable_size; 3512 &b43legacy_band_2GHz_GPHY;
3418 mode->rates = b43legacy_b_ratetable; 3513 phy->possible_phymodes |= B43legacy_PHYMODE_G;
3419 err = ieee80211_register_hwmode(hw, mode);
3420 if (err)
3421 return err;
3422
3423 phy->possible_phymodes |= B43legacy_PHYMODE_B;
3424 have_bphy = 0;
3425 continue;
3426 }
3427 if (have_gphy) {
3428 B43legacy_WARN_ON(cnt >= B43legacy_MAX_PHYHWMODES);
3429 mode = &phy->hwmodes[cnt];
3430
3431 mode->mode = MODE_IEEE80211G;
3432 mode->num_channels = b43legacy_bg_chantable_size;
3433 mode->channels = b43legacy_bg_chantable;
3434 mode->num_rates = b43legacy_g_ratetable_size;
3435 mode->rates = b43legacy_g_ratetable;
3436 err = ieee80211_register_hwmode(hw, mode);
3437 if (err)
3438 return err;
3439
3440 phy->possible_phymodes |= B43legacy_PHYMODE_G;
3441 have_gphy = 0;
3442 continue;
3443 }
3444 break;
3445 } 3514 }
3446 3515
3447 return 0; 3516 return 0;
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index d84408a82db9..dcad2491a606 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -37,45 +37,48 @@
37 37
38 38
39/* Extract the bitrate out of a CCK PLCP header. */ 39/* Extract the bitrate out of a CCK PLCP header. */
40static u8 b43legacy_plcp_get_bitrate_cck(struct b43legacy_plcp_hdr6 *plcp) 40static u8 b43legacy_plcp_get_bitrate_idx_cck(struct b43legacy_plcp_hdr6 *plcp)
41{ 41{
42 switch (plcp->raw[0]) { 42 switch (plcp->raw[0]) {
43 case 0x0A: 43 case 0x0A:
44 return B43legacy_CCK_RATE_1MB; 44 return 0;
45 case 0x14: 45 case 0x14:
46 return B43legacy_CCK_RATE_2MB; 46 return 1;
47 case 0x37: 47 case 0x37:
48 return B43legacy_CCK_RATE_5MB; 48 return 2;
49 case 0x6E: 49 case 0x6E:
50 return B43legacy_CCK_RATE_11MB; 50 return 3;
51 } 51 }
52 B43legacy_BUG_ON(1); 52 B43legacy_BUG_ON(1);
53 return 0; 53 return -1;
54} 54}
55 55
56/* Extract the bitrate out of an OFDM PLCP header. */ 56/* Extract the bitrate out of an OFDM PLCP header. */
57static u8 b43legacy_plcp_get_bitrate_ofdm(struct b43legacy_plcp_hdr6 *plcp) 57static u8 b43legacy_plcp_get_bitrate_idx_ofdm(struct b43legacy_plcp_hdr6 *plcp,
58 bool aphy)
58{ 59{
60 int base = aphy ? 0 : 4;
61
59 switch (plcp->raw[0] & 0xF) { 62 switch (plcp->raw[0] & 0xF) {
60 case 0xB: 63 case 0xB:
61 return B43legacy_OFDM_RATE_6MB; 64 return base + 0;
62 case 0xF: 65 case 0xF:
63 return B43legacy_OFDM_RATE_9MB; 66 return base + 1;
64 case 0xA: 67 case 0xA:
65 return B43legacy_OFDM_RATE_12MB; 68 return base + 2;
66 case 0xE: 69 case 0xE:
67 return B43legacy_OFDM_RATE_18MB; 70 return base + 3;
68 case 0x9: 71 case 0x9:
69 return B43legacy_OFDM_RATE_24MB; 72 return base + 4;
70 case 0xD: 73 case 0xD:
71 return B43legacy_OFDM_RATE_36MB; 74 return base + 5;
72 case 0x8: 75 case 0x8:
73 return B43legacy_OFDM_RATE_48MB; 76 return base + 6;
74 case 0xC: 77 case 0xC:
75 return B43legacy_OFDM_RATE_54MB; 78 return base + 7;
76 } 79 }
77 B43legacy_BUG_ON(1); 80 B43legacy_BUG_ON(1);
78 return 0; 81 return -1;
79} 82}
80 83
81u8 b43legacy_plcp_get_ratecode_cck(const u8 bitrate) 84u8 b43legacy_plcp_get_ratecode_cck(const u8 bitrate)
@@ -192,7 +195,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
192 int use_encryption = (!(txctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)); 195 int use_encryption = (!(txctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT));
193 u16 fctl; 196 u16 fctl;
194 u8 rate; 197 u8 rate;
195 u8 rate_fb; 198 struct ieee80211_rate *rate_fb;
196 int rate_ofdm; 199 int rate_ofdm;
197 int rate_fb_ofdm; 200 int rate_fb_ofdm;
198 unsigned int plcp_fragment_len; 201 unsigned int plcp_fragment_len;
@@ -204,16 +207,16 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
204 207
205 memset(txhdr, 0, sizeof(*txhdr)); 208 memset(txhdr, 0, sizeof(*txhdr));
206 209
207 rate = txctl->tx_rate; 210 rate = txctl->tx_rate->hw_value;
208 rate_ofdm = b43legacy_is_ofdm_rate(rate); 211 rate_ofdm = b43legacy_is_ofdm_rate(rate);
209 rate_fb = (txctl->alt_retry_rate == -1) ? rate : txctl->alt_retry_rate; 212 rate_fb = txctl->alt_retry_rate ? : txctl->tx_rate;
210 rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb); 213 rate_fb_ofdm = b43legacy_is_ofdm_rate(rate_fb->hw_value);
211 214
212 txhdr->mac_frame_ctl = wlhdr->frame_control; 215 txhdr->mac_frame_ctl = wlhdr->frame_control;
213 memcpy(txhdr->tx_receiver, wlhdr->addr1, 6); 216 memcpy(txhdr->tx_receiver, wlhdr->addr1, 6);
214 217
215 /* Calculate duration for fallback rate */ 218 /* Calculate duration for fallback rate */
216 if ((rate_fb == rate) || 219 if ((rate_fb->hw_value == rate) ||
217 (wlhdr->duration_id & cpu_to_le16(0x8000)) || 220 (wlhdr->duration_id & cpu_to_le16(0x8000)) ||
218 (wlhdr->duration_id == cpu_to_le16(0))) { 221 (wlhdr->duration_id == cpu_to_le16(0))) {
219 /* If the fallback rate equals the normal rate or the 222 /* If the fallback rate equals the normal rate or the
@@ -221,11 +224,10 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
221 * use the original dur_id field. */ 224 * use the original dur_id field. */
222 txhdr->dur_fb = wlhdr->duration_id; 225 txhdr->dur_fb = wlhdr->duration_id;
223 } else { 226 } else {
224 int fbrate_base100kbps = B43legacy_RATE_TO_100KBPS(rate_fb);
225 txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw, 227 txhdr->dur_fb = ieee80211_generic_frame_duration(dev->wl->hw,
226 txctl->vif, 228 txctl->vif,
227 fragment_len, 229 fragment_len,
228 fbrate_base100kbps); 230 rate_fb);
229 } 231 }
230 232
231 plcp_fragment_len = fragment_len + FCS_LEN; 233 plcp_fragment_len = fragment_len + FCS_LEN;
@@ -266,7 +268,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
266 rate); 268 rate);
267 b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *) 269 b43legacy_generate_plcp_hdr((struct b43legacy_plcp_hdr4 *)
268 (&txhdr->plcp_fb), plcp_fragment_len, 270 (&txhdr->plcp_fb), plcp_fragment_len,
269 rate_fb); 271 rate_fb->hw_value);
270 272
271 /* PHY TX Control word */ 273 /* PHY TX Control word */
272 if (rate_ofdm) 274 if (rate_ofdm)
@@ -310,7 +312,7 @@ static int generate_txhdr_fw3(struct b43legacy_wldev *dev,
310 int rts_rate_ofdm; 312 int rts_rate_ofdm;
311 int rts_rate_fb_ofdm; 313 int rts_rate_fb_ofdm;
312 314
313 rts_rate = txctl->rts_cts_rate; 315 rts_rate = txctl->rts_cts_rate->hw_value;
314 rts_rate_ofdm = b43legacy_is_ofdm_rate(rts_rate); 316 rts_rate_ofdm = b43legacy_is_ofdm_rate(rts_rate);
315 rts_rate_fb = b43legacy_calc_fallback_rate(rts_rate); 317 rts_rate_fb = b43legacy_calc_fallback_rate(rts_rate);
316 rts_rate_fb_ofdm = b43legacy_is_ofdm_rate(rts_rate_fb); 318 rts_rate_fb_ofdm = b43legacy_is_ofdm_rate(rts_rate_fb);
@@ -536,19 +538,24 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
536 (phystat3 & B43legacy_RX_PHYST3_TRSTATE)); 538 (phystat3 & B43legacy_RX_PHYST3_TRSTATE));
537 status.noise = dev->stats.link_noise; 539 status.noise = dev->stats.link_noise;
538 status.signal = (jssi * 100) / B43legacy_RX_MAX_SSI; 540 status.signal = (jssi * 100) / B43legacy_RX_MAX_SSI;
541 /* change to support A PHY */
539 if (phystat0 & B43legacy_RX_PHYST0_OFDM) 542 if (phystat0 & B43legacy_RX_PHYST0_OFDM)
540 status.rate = b43legacy_plcp_get_bitrate_ofdm(plcp); 543 status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false);
541 else 544 else
542 status.rate = b43legacy_plcp_get_bitrate_cck(plcp); 545 status.rate_idx = b43legacy_plcp_get_bitrate_idx_cck(plcp);
543 status.antenna = !!(phystat0 & B43legacy_RX_PHYST0_ANT); 546 status.antenna = !!(phystat0 & B43legacy_RX_PHYST0_ANT);
544 547
545 /* 548 /*
546 * If monitors are present get full 64-bit timestamp. This 549 * All frames on monitor interfaces and beacons always need a full
547 * code assumes we get to process the packet within 16 bits 550 * 64-bit timestamp. Monitor interfaces need it for diagnostic
548 * of timestamp, i.e. about 65 milliseconds after the PHY 551 * purposes and beacons for IBSS merging.
549 * received the first symbol. 552 * This code assumes we get to process the packet within 16 bits
553 * of timestamp, i.e. about 65 milliseconds after the PHY received
554 * the first symbol.
550 */ 555 */
551 if (dev->wl->radiotap_enabled) { 556 if (((fctl & (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE))
557 == (IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_BEACON)) ||
558 dev->wl->radiotap_enabled) {
552 u16 low_mactime_now; 559 u16 low_mactime_now;
553 560
554 b43legacy_tsf_read(dev, &status.mactime); 561 b43legacy_tsf_read(dev, &status.mactime);
@@ -564,14 +571,9 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
564 B43legacy_RX_CHAN_ID_SHIFT; 571 B43legacy_RX_CHAN_ID_SHIFT;
565 switch (chanstat & B43legacy_RX_CHAN_PHYTYPE) { 572 switch (chanstat & B43legacy_RX_CHAN_PHYTYPE) {
566 case B43legacy_PHYTYPE_B: 573 case B43legacy_PHYTYPE_B:
567 status.phymode = MODE_IEEE80211B;
568 status.freq = chanid + 2400;
569 status.channel = b43legacy_freq_to_channel_bg(chanid + 2400);
570 break;
571 case B43legacy_PHYTYPE_G: 574 case B43legacy_PHYTYPE_G:
572 status.phymode = MODE_IEEE80211G; 575 status.band = IEEE80211_BAND_2GHZ;
573 status.freq = chanid + 2400; 576 status.freq = chanid + 2400;
574 status.channel = b43legacy_freq_to_channel_bg(chanid + 2400);
575 break; 577 break;
576 default: 578 default:
577 b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n", 579 b43legacywarn(dev->wl, "Unexpected value for chanstat (0x%X)\n",
diff --git a/drivers/net/wireless/bcm43xx/Kconfig b/drivers/net/wireless/bcm43xx/Kconfig
deleted file mode 100644
index afb8f4305c24..000000000000
--- a/drivers/net/wireless/bcm43xx/Kconfig
+++ /dev/null
@@ -1,70 +0,0 @@
1config BCM43XX
2 tristate "Broadcom BCM43xx wireless support (DEPRECATED)"
3 depends on PCI && IEEE80211 && IEEE80211_SOFTMAC && WLAN_80211 && (!SSB_B43_PCI_BRIDGE || SSB != y) && EXPERIMENTAL
4 select WIRELESS_EXT
5 select FW_LOADER
6 select HW_RANDOM
7 ---help---
8 This is an experimental driver for the Broadcom 43xx wireless
9 chip, found in the Apple Airport Extreme and various other
10 devices. This driver is deprecated and will be removed
11 from the kernel in the near future. It has been replaced
12 by the b43 and b43legacy drivers.
13
14config BCM43XX_DEBUG
15 bool "Broadcom BCM43xx debugging (RECOMMENDED)"
16 depends on BCM43XX
17 default y
18 ---help---
19 Broadcom 43xx debugging messages.
20 Say Y, because the driver is still very experimental and
21 this will help you get it running.
22
23config BCM43XX_DMA
24 bool
25 depends on BCM43XX
26
27config BCM43XX_PIO
28 bool
29 depends on BCM43XX
30
31choice
32 prompt "BCM43xx data transfer mode"
33 depends on BCM43XX
34 default BCM43XX_DMA_AND_PIO_MODE
35
36config BCM43XX_DMA_AND_PIO_MODE
37 bool "DMA + PIO"
38 select BCM43XX_DMA
39 select BCM43XX_PIO
40 ---help---
41 Include both, Direct Memory Access (DMA) and Programmed I/O (PIO)
42 data transfer modes.
43 The actually used mode is selectable through the module
44 parameter "pio". If the module parameter is pio=0, DMA is used.
45 Otherwise PIO is used. DMA is default.
46
47 If unsure, choose this option.
48
49config BCM43XX_DMA_MODE
50 bool "DMA (Direct Memory Access) only"
51 select BCM43XX_DMA
52 ---help---
53 Only include Direct Memory Access (DMA).
54 This reduces the size of the driver module, by omitting the PIO code.
55
56config BCM43XX_PIO_MODE
57 bool "PIO (Programmed I/O) only"
58 select BCM43XX_PIO
59 ---help---
60 Only include Programmed I/O (PIO).
61 This reduces the size of the driver module, by omitting the DMA code.
62 Please note that PIO transfers are slow (compared to DMA).
63
64 Also note that not all devices of the 43xx series support PIO.
65 The 4306 (Apple Airport Extreme and others) supports PIO, while
66 the 4318 is known to _not_ support PIO.
67
68 Only use PIO, if DMA does not work for you.
69
70endchoice
diff --git a/drivers/net/wireless/bcm43xx/Makefile b/drivers/net/wireless/bcm43xx/Makefile
deleted file mode 100644
index bb5220c629d2..000000000000
--- a/drivers/net/wireless/bcm43xx/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
1obj-$(CONFIG_BCM43XX) += bcm43xx.o
2bcm43xx-obj-$(CONFIG_BCM43XX_DEBUG) += bcm43xx_debugfs.o
3
4bcm43xx-obj-$(CONFIG_BCM43XX_DMA) += bcm43xx_dma.o
5bcm43xx-obj-$(CONFIG_BCM43XX_PIO) += bcm43xx_pio.o
6
7bcm43xx-objs := bcm43xx_main.o bcm43xx_ilt.o \
8 bcm43xx_radio.o bcm43xx_phy.o \
9 bcm43xx_power.o bcm43xx_wx.o \
10 bcm43xx_leds.o bcm43xx_ethtool.o \
11 bcm43xx_xmit.o bcm43xx_sysfs.o \
12 $(bcm43xx-obj-y)
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
deleted file mode 100644
index 2ebd2edf5862..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ /dev/null
@@ -1,997 +0,0 @@
1#ifndef BCM43xx_H_
2#define BCM43xx_H_
3
4#include <linux/hw_random.h>
5#include <linux/version.h>
6#include <linux/kernel.h>
7#include <linux/spinlock.h>
8#include <linux/interrupt.h>
9#include <linux/stringify.h>
10#include <linux/pci.h>
11#include <net/ieee80211.h>
12#include <net/ieee80211softmac.h>
13#include <asm/atomic.h>
14#include <asm/io.h>
15
16
17#include "bcm43xx_debugfs.h"
18#include "bcm43xx_leds.h"
19
20
21#define PFX KBUILD_MODNAME ": "
22
23#define BCM43xx_SWITCH_CORE_MAX_RETRIES 50
24#define BCM43xx_IRQWAIT_MAX_RETRIES 100
25
26#define BCM43xx_IO_SIZE 8192
27
28/* Active Core PCI Configuration Register. */
29#define BCM43xx_PCICFG_ACTIVE_CORE 0x80
30/* SPROM control register. */
31#define BCM43xx_PCICFG_SPROMCTL 0x88
32/* Interrupt Control PCI Configuration Register. (Only on PCI cores with rev >= 6) */
33#define BCM43xx_PCICFG_ICR 0x94
34
35/* MMIO offsets */
36#define BCM43xx_MMIO_DMA0_REASON 0x20
37#define BCM43xx_MMIO_DMA0_IRQ_MASK 0x24
38#define BCM43xx_MMIO_DMA1_REASON 0x28
39#define BCM43xx_MMIO_DMA1_IRQ_MASK 0x2C
40#define BCM43xx_MMIO_DMA2_REASON 0x30
41#define BCM43xx_MMIO_DMA2_IRQ_MASK 0x34
42#define BCM43xx_MMIO_DMA3_REASON 0x38
43#define BCM43xx_MMIO_DMA3_IRQ_MASK 0x3C
44#define BCM43xx_MMIO_DMA4_REASON 0x40
45#define BCM43xx_MMIO_DMA4_IRQ_MASK 0x44
46#define BCM43xx_MMIO_DMA5_REASON 0x48
47#define BCM43xx_MMIO_DMA5_IRQ_MASK 0x4C
48#define BCM43xx_MMIO_STATUS_BITFIELD 0x120
49#define BCM43xx_MMIO_STATUS2_BITFIELD 0x124
50#define BCM43xx_MMIO_GEN_IRQ_REASON 0x128
51#define BCM43xx_MMIO_GEN_IRQ_MASK 0x12C
52#define BCM43xx_MMIO_RAM_CONTROL 0x130
53#define BCM43xx_MMIO_RAM_DATA 0x134
54#define BCM43xx_MMIO_PS_STATUS 0x140
55#define BCM43xx_MMIO_RADIO_HWENABLED_HI 0x158
56#define BCM43xx_MMIO_SHM_CONTROL 0x160
57#define BCM43xx_MMIO_SHM_DATA 0x164
58#define BCM43xx_MMIO_SHM_DATA_UNALIGNED 0x166
59#define BCM43xx_MMIO_XMITSTAT_0 0x170
60#define BCM43xx_MMIO_XMITSTAT_1 0x174
61#define BCM43xx_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */
62#define BCM43xx_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */
63
64/* 32-bit DMA */
65#define BCM43xx_MMIO_DMA32_BASE0 0x200
66#define BCM43xx_MMIO_DMA32_BASE1 0x220
67#define BCM43xx_MMIO_DMA32_BASE2 0x240
68#define BCM43xx_MMIO_DMA32_BASE3 0x260
69#define BCM43xx_MMIO_DMA32_BASE4 0x280
70#define BCM43xx_MMIO_DMA32_BASE5 0x2A0
71/* 64-bit DMA */
72#define BCM43xx_MMIO_DMA64_BASE0 0x200
73#define BCM43xx_MMIO_DMA64_BASE1 0x240
74#define BCM43xx_MMIO_DMA64_BASE2 0x280
75#define BCM43xx_MMIO_DMA64_BASE3 0x2C0
76#define BCM43xx_MMIO_DMA64_BASE4 0x300
77#define BCM43xx_MMIO_DMA64_BASE5 0x340
78/* PIO */
79#define BCM43xx_MMIO_PIO1_BASE 0x300
80#define BCM43xx_MMIO_PIO2_BASE 0x310
81#define BCM43xx_MMIO_PIO3_BASE 0x320
82#define BCM43xx_MMIO_PIO4_BASE 0x330
83
84#define BCM43xx_MMIO_PHY_VER 0x3E0
85#define BCM43xx_MMIO_PHY_RADIO 0x3E2
86#define BCM43xx_MMIO_ANTENNA 0x3E8
87#define BCM43xx_MMIO_CHANNEL 0x3F0
88#define BCM43xx_MMIO_CHANNEL_EXT 0x3F4
89#define BCM43xx_MMIO_RADIO_CONTROL 0x3F6
90#define BCM43xx_MMIO_RADIO_DATA_HIGH 0x3F8
91#define BCM43xx_MMIO_RADIO_DATA_LOW 0x3FA
92#define BCM43xx_MMIO_PHY_CONTROL 0x3FC
93#define BCM43xx_MMIO_PHY_DATA 0x3FE
94#define BCM43xx_MMIO_MACFILTER_CONTROL 0x420
95#define BCM43xx_MMIO_MACFILTER_DATA 0x422
96#define BCM43xx_MMIO_RADIO_HWENABLED_LO 0x49A
97#define BCM43xx_MMIO_GPIO_CONTROL 0x49C
98#define BCM43xx_MMIO_GPIO_MASK 0x49E
99#define BCM43xx_MMIO_TSF_0 0x632 /* core rev < 3 only */
100#define BCM43xx_MMIO_TSF_1 0x634 /* core rev < 3 only */
101#define BCM43xx_MMIO_TSF_2 0x636 /* core rev < 3 only */
102#define BCM43xx_MMIO_TSF_3 0x638 /* core rev < 3 only */
103#define BCM43xx_MMIO_RNG 0x65A
104#define BCM43xx_MMIO_POWERUP_DELAY 0x6A8
105
106/* SPROM offsets. */
107#define BCM43xx_SPROM_BASE 0x1000
108#define BCM43xx_SPROM_BOARDFLAGS2 0x1c
109#define BCM43xx_SPROM_IL0MACADDR 0x24
110#define BCM43xx_SPROM_ET0MACADDR 0x27
111#define BCM43xx_SPROM_ET1MACADDR 0x2a
112#define BCM43xx_SPROM_ETHPHY 0x2d
113#define BCM43xx_SPROM_BOARDREV 0x2e
114#define BCM43xx_SPROM_PA0B0 0x2f
115#define BCM43xx_SPROM_PA0B1 0x30
116#define BCM43xx_SPROM_PA0B2 0x31
117#define BCM43xx_SPROM_WL0GPIO0 0x32
118#define BCM43xx_SPROM_WL0GPIO2 0x33
119#define BCM43xx_SPROM_MAXPWR 0x34
120#define BCM43xx_SPROM_PA1B0 0x35
121#define BCM43xx_SPROM_PA1B1 0x36
122#define BCM43xx_SPROM_PA1B2 0x37
123#define BCM43xx_SPROM_IDL_TSSI_TGT 0x38
124#define BCM43xx_SPROM_BOARDFLAGS 0x39
125#define BCM43xx_SPROM_ANTENNA_GAIN 0x3a
126#define BCM43xx_SPROM_VERSION 0x3f
127
128/* BCM43xx_SPROM_BOARDFLAGS values */
129#define BCM43xx_BFL_BTCOEXIST 0x0001 /* implements Bluetooth coexistance */
130#define BCM43xx_BFL_PACTRL 0x0002 /* GPIO 9 controlling the PA */
131#define BCM43xx_BFL_AIRLINEMODE 0x0004 /* implements GPIO 13 radio disable indication */
132#define BCM43xx_BFL_RSSI 0x0008 /* software calculates nrssi slope. */
133#define BCM43xx_BFL_ENETSPI 0x0010 /* has ephy roboswitch spi */
134#define BCM43xx_BFL_XTAL_NOSLOW 0x0020 /* no slow clock available */
135#define BCM43xx_BFL_CCKHIPWR 0x0040 /* can do high power CCK transmission */
136#define BCM43xx_BFL_ENETADM 0x0080 /* has ADMtek switch */
137#define BCM43xx_BFL_ENETVLAN 0x0100 /* can do vlan */
138#define BCM43xx_BFL_AFTERBURNER 0x0200 /* supports Afterburner mode */
139#define BCM43xx_BFL_NOPCI 0x0400 /* leaves PCI floating */
140#define BCM43xx_BFL_FEM 0x0800 /* supports the Front End Module */
141#define BCM43xx_BFL_EXTLNA 0x1000 /* has an external LNA */
142#define BCM43xx_BFL_HGPA 0x2000 /* had high gain PA */
143#define BCM43xx_BFL_BTCMOD 0x4000 /* BFL_BTCOEXIST is given in alternate GPIOs */
144#define BCM43xx_BFL_ALTIQ 0x8000 /* alternate I/Q settings */
145
146/* GPIO register offset, in both ChipCommon and PCI core. */
147#define BCM43xx_GPIO_CONTROL 0x6c
148
149/* SHM Routing */
150#define BCM43xx_SHM_SHARED 0x0001
151#define BCM43xx_SHM_WIRELESS 0x0002
152#define BCM43xx_SHM_PCM 0x0003
153#define BCM43xx_SHM_HWMAC 0x0004
154#define BCM43xx_SHM_UCODE 0x0300
155
156/* MacFilter offsets. */
157#define BCM43xx_MACFILTER_SELF 0x0000
158#define BCM43xx_MACFILTER_ASSOC 0x0003
159
160/* Chipcommon registers. */
161#define BCM43xx_CHIPCOMMON_CAPABILITIES 0x04
162#define BCM43xx_CHIPCOMMON_CTL 0x28
163#define BCM43xx_CHIPCOMMON_PLLONDELAY 0xB0
164#define BCM43xx_CHIPCOMMON_FREFSELDELAY 0xB4
165#define BCM43xx_CHIPCOMMON_SLOWCLKCTL 0xB8
166#define BCM43xx_CHIPCOMMON_SYSCLKCTL 0xC0
167
168/* PCI core specific registers. */
169#define BCM43xx_PCICORE_BCAST_ADDR 0x50
170#define BCM43xx_PCICORE_BCAST_DATA 0x54
171#define BCM43xx_PCICORE_SBTOPCI2 0x108
172
173/* SBTOPCI2 values. */
174#define BCM43xx_SBTOPCI2_PREFETCH 0x4
175#define BCM43xx_SBTOPCI2_BURST 0x8
176#define BCM43xx_SBTOPCI2_MEMREAD_MULTI 0x20
177
178/* PCI-E core registers. */
179#define BCM43xx_PCIECORE_REG_ADDR 0x0130
180#define BCM43xx_PCIECORE_REG_DATA 0x0134
181#define BCM43xx_PCIECORE_MDIO_CTL 0x0128
182#define BCM43xx_PCIECORE_MDIO_DATA 0x012C
183
184/* PCI-E registers. */
185#define BCM43xx_PCIE_TLP_WORKAROUND 0x0004
186#define BCM43xx_PCIE_DLLP_LINKCTL 0x0100
187
188/* PCI-E MDIO bits. */
189#define BCM43xx_PCIE_MDIO_ST 0x40000000
190#define BCM43xx_PCIE_MDIO_WT 0x10000000
191#define BCM43xx_PCIE_MDIO_DEV 22
192#define BCM43xx_PCIE_MDIO_REG 18
193#define BCM43xx_PCIE_MDIO_TA 0x00020000
194#define BCM43xx_PCIE_MDIO_TC 0x0100
195
196/* MDIO devices. */
197#define BCM43xx_MDIO_SERDES_RX 0x1F
198
199/* SERDES RX registers. */
200#define BCM43xx_SERDES_RXTIMER 0x2
201#define BCM43xx_SERDES_CDR 0x6
202#define BCM43xx_SERDES_CDR_BW 0x7
203
204/* Chipcommon capabilities. */
205#define BCM43xx_CAPABILITIES_PCTL 0x00040000
206#define BCM43xx_CAPABILITIES_PLLMASK 0x00030000
207#define BCM43xx_CAPABILITIES_PLLSHIFT 16
208#define BCM43xx_CAPABILITIES_FLASHMASK 0x00000700
209#define BCM43xx_CAPABILITIES_FLASHSHIFT 8
210#define BCM43xx_CAPABILITIES_EXTBUSPRESENT 0x00000040
211#define BCM43xx_CAPABILITIES_UARTGPIO 0x00000020
212#define BCM43xx_CAPABILITIES_UARTCLOCKMASK 0x00000018
213#define BCM43xx_CAPABILITIES_UARTCLOCKSHIFT 3
214#define BCM43xx_CAPABILITIES_MIPSBIGENDIAN 0x00000004
215#define BCM43xx_CAPABILITIES_NRUARTSMASK 0x00000003
216
217/* PowerControl */
218#define BCM43xx_PCTL_IN 0xB0
219#define BCM43xx_PCTL_OUT 0xB4
220#define BCM43xx_PCTL_OUTENABLE 0xB8
221#define BCM43xx_PCTL_XTAL_POWERUP 0x40
222#define BCM43xx_PCTL_PLL_POWERDOWN 0x80
223
224/* PowerControl Clock Modes */
225#define BCM43xx_PCTL_CLK_FAST 0x00
226#define BCM43xx_PCTL_CLK_SLOW 0x01
227#define BCM43xx_PCTL_CLK_DYNAMIC 0x02
228
229#define BCM43xx_PCTL_FORCE_SLOW 0x0800
230#define BCM43xx_PCTL_FORCE_PLL 0x1000
231#define BCM43xx_PCTL_DYN_XTAL 0x2000
232
233/* COREIDs */
234#define BCM43xx_COREID_CHIPCOMMON 0x800
235#define BCM43xx_COREID_ILINE20 0x801
236#define BCM43xx_COREID_SDRAM 0x803
237#define BCM43xx_COREID_PCI 0x804
238#define BCM43xx_COREID_MIPS 0x805
239#define BCM43xx_COREID_ETHERNET 0x806
240#define BCM43xx_COREID_V90 0x807
241#define BCM43xx_COREID_USB11_HOSTDEV 0x80a
242#define BCM43xx_COREID_IPSEC 0x80b
243#define BCM43xx_COREID_PCMCIA 0x80d
244#define BCM43xx_COREID_EXT_IF 0x80f
245#define BCM43xx_COREID_80211 0x812
246#define BCM43xx_COREID_MIPS_3302 0x816
247#define BCM43xx_COREID_USB11_HOST 0x817
248#define BCM43xx_COREID_USB11_DEV 0x818
249#define BCM43xx_COREID_USB20_HOST 0x819
250#define BCM43xx_COREID_USB20_DEV 0x81a
251#define BCM43xx_COREID_SDIO_HOST 0x81b
252#define BCM43xx_COREID_PCIE 0x820
253
254/* Core Information Registers */
255#define BCM43xx_CIR_BASE 0xf00
256#define BCM43xx_CIR_SBTPSFLAG (BCM43xx_CIR_BASE + 0x18)
257#define BCM43xx_CIR_SBIMSTATE (BCM43xx_CIR_BASE + 0x90)
258#define BCM43xx_CIR_SBINTVEC (BCM43xx_CIR_BASE + 0x94)
259#define BCM43xx_CIR_SBTMSTATELOW (BCM43xx_CIR_BASE + 0x98)
260#define BCM43xx_CIR_SBTMSTATEHIGH (BCM43xx_CIR_BASE + 0x9c)
261#define BCM43xx_CIR_SBIMCONFIGLOW (BCM43xx_CIR_BASE + 0xa8)
262#define BCM43xx_CIR_SB_ID_HI (BCM43xx_CIR_BASE + 0xfc)
263
264/* Mask to get the Backplane Flag Number from SBTPSFLAG. */
265#define BCM43xx_BACKPLANE_FLAG_NR_MASK 0x3f
266
267/* SBIMCONFIGLOW values/masks. */
268#define BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK 0x00000007
269#define BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_SHIFT 0
270#define BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK 0x00000070
271#define BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_SHIFT 4
272#define BCM43xx_SBIMCONFIGLOW_CONNID_MASK 0x00ff0000
273#define BCM43xx_SBIMCONFIGLOW_CONNID_SHIFT 16
274
275/* sbtmstatelow state flags */
276#define BCM43xx_SBTMSTATELOW_RESET 0x01
277#define BCM43xx_SBTMSTATELOW_REJECT 0x02
278#define BCM43xx_SBTMSTATELOW_CLOCK 0x10000
279#define BCM43xx_SBTMSTATELOW_FORCE_GATE_CLOCK 0x20000
280#define BCM43xx_SBTMSTATELOW_G_MODE_ENABLE 0x20000000
281
282/* sbtmstatehigh state flags */
283#define BCM43xx_SBTMSTATEHIGH_SERROR 0x00000001
284#define BCM43xx_SBTMSTATEHIGH_BUSY 0x00000004
285#define BCM43xx_SBTMSTATEHIGH_TIMEOUT 0x00000020
286#define BCM43xx_SBTMSTATEHIGH_G_PHY_AVAIL 0x00010000
287#define BCM43xx_SBTMSTATEHIGH_A_PHY_AVAIL 0x00020000
288#define BCM43xx_SBTMSTATEHIGH_COREFLAGS 0x1FFF0000
289#define BCM43xx_SBTMSTATEHIGH_DMA64BIT 0x10000000
290#define BCM43xx_SBTMSTATEHIGH_GATEDCLK 0x20000000
291#define BCM43xx_SBTMSTATEHIGH_BISTFAILED 0x40000000
292#define BCM43xx_SBTMSTATEHIGH_BISTCOMPLETE 0x80000000
293
294/* sbimstate flags */
295#define BCM43xx_SBIMSTATE_IB_ERROR 0x20000
296#define BCM43xx_SBIMSTATE_TIMEOUT 0x40000
297
298/* PHYVersioning */
299#define BCM43xx_PHYTYPE_A 0x00
300#define BCM43xx_PHYTYPE_B 0x01
301#define BCM43xx_PHYTYPE_G 0x02
302
303/* PHYRegisters */
304#define BCM43xx_PHY_ILT_A_CTRL 0x0072
305#define BCM43xx_PHY_ILT_A_DATA1 0x0073
306#define BCM43xx_PHY_ILT_A_DATA2 0x0074
307#define BCM43xx_PHY_G_LO_CONTROL 0x0810
308#define BCM43xx_PHY_ILT_G_CTRL 0x0472
309#define BCM43xx_PHY_ILT_G_DATA1 0x0473
310#define BCM43xx_PHY_ILT_G_DATA2 0x0474
311#define BCM43xx_PHY_A_PCTL 0x007B
312#define BCM43xx_PHY_G_PCTL 0x0029
313#define BCM43xx_PHY_A_CRS 0x0029
314#define BCM43xx_PHY_RADIO_BITFIELD 0x0401
315#define BCM43xx_PHY_G_CRS 0x0429
316#define BCM43xx_PHY_NRSSILT_CTRL 0x0803
317#define BCM43xx_PHY_NRSSILT_DATA 0x0804
318
319/* RadioRegisters */
320#define BCM43xx_RADIOCTL_ID 0x01
321
322/* StatusBitField */
323#define BCM43xx_SBF_MAC_ENABLED 0x00000001
324#define BCM43xx_SBF_2 0x00000002 /*FIXME: fix name*/
325#define BCM43xx_SBF_CORE_READY 0x00000004
326#define BCM43xx_SBF_400 0x00000400 /*FIXME: fix name*/
327#define BCM43xx_SBF_4000 0x00004000 /*FIXME: fix name*/
328#define BCM43xx_SBF_8000 0x00008000 /*FIXME: fix name*/
329#define BCM43xx_SBF_XFER_REG_BYTESWAP 0x00010000
330#define BCM43xx_SBF_MODE_NOTADHOC 0x00020000
331#define BCM43xx_SBF_MODE_AP 0x00040000
332#define BCM43xx_SBF_RADIOREG_LOCK 0x00080000
333#define BCM43xx_SBF_MODE_MONITOR 0x00400000
334#define BCM43xx_SBF_MODE_PROMISC 0x01000000
335#define BCM43xx_SBF_PS1 0x02000000
336#define BCM43xx_SBF_PS2 0x04000000
337#define BCM43xx_SBF_NO_SSID_BCAST 0x08000000
338#define BCM43xx_SBF_TIME_UPDATE 0x10000000
339#define BCM43xx_SBF_MODE_G 0x80000000
340
341/* Microcode */
342#define BCM43xx_UCODE_REVISION 0x0000
343#define BCM43xx_UCODE_PATCHLEVEL 0x0002
344#define BCM43xx_UCODE_DATE 0x0004
345#define BCM43xx_UCODE_TIME 0x0006
346#define BCM43xx_UCODE_STATUS 0x0040
347
348/* MicrocodeFlagsBitfield (addr + lo-word values?)*/
349#define BCM43xx_UCODEFLAGS_OFFSET 0x005E
350
351#define BCM43xx_UCODEFLAG_AUTODIV 0x0001
352#define BCM43xx_UCODEFLAG_UNKBGPHY 0x0002
353#define BCM43xx_UCODEFLAG_UNKBPHY 0x0004
354#define BCM43xx_UCODEFLAG_UNKGPHY 0x0020
355#define BCM43xx_UCODEFLAG_UNKPACTRL 0x0040
356#define BCM43xx_UCODEFLAG_JAPAN 0x0080
357
358/* Hardware Radio Enable masks */
359#define BCM43xx_MMIO_RADIO_HWENABLED_HI_MASK (1 << 16)
360#define BCM43xx_MMIO_RADIO_HWENABLED_LO_MASK (1 << 4)
361
362/* Generic-Interrupt reasons. */
363#define BCM43xx_IRQ_READY (1 << 0)
364#define BCM43xx_IRQ_BEACON (1 << 1)
365#define BCM43xx_IRQ_PS (1 << 2)
366#define BCM43xx_IRQ_REG124 (1 << 5)
367#define BCM43xx_IRQ_PMQ (1 << 6)
368#define BCM43xx_IRQ_PIO_WORKAROUND (1 << 8)
369#define BCM43xx_IRQ_XMIT_ERROR (1 << 11)
370#define BCM43xx_IRQ_RX (1 << 15)
371#define BCM43xx_IRQ_SCAN (1 << 16)
372#define BCM43xx_IRQ_NOISE (1 << 18)
373#define BCM43xx_IRQ_XMIT_STATUS (1 << 29)
374
375#define BCM43xx_IRQ_ALL 0xffffffff
376#define BCM43xx_IRQ_INITIAL (BCM43xx_IRQ_PS | \
377 BCM43xx_IRQ_REG124 | \
378 BCM43xx_IRQ_PMQ | \
379 BCM43xx_IRQ_XMIT_ERROR | \
380 BCM43xx_IRQ_RX | \
381 BCM43xx_IRQ_SCAN | \
382 BCM43xx_IRQ_NOISE | \
383 BCM43xx_IRQ_XMIT_STATUS)
384
385
386/* Initial default iw_mode */
387#define BCM43xx_INITIAL_IWMODE IW_MODE_INFRA
388
389/* Bus type PCI. */
390#define BCM43xx_BUSTYPE_PCI 0
391/* Bus type Silicone Backplane Bus. */
392#define BCM43xx_BUSTYPE_SB 1
393/* Bus type PCMCIA. */
394#define BCM43xx_BUSTYPE_PCMCIA 2
395
396/* Threshold values. */
397#define BCM43xx_MIN_RTS_THRESHOLD 1U
398#define BCM43xx_MAX_RTS_THRESHOLD 2304U
399#define BCM43xx_DEFAULT_RTS_THRESHOLD BCM43xx_MAX_RTS_THRESHOLD
400
401#define BCM43xx_DEFAULT_SHORT_RETRY_LIMIT 7
402#define BCM43xx_DEFAULT_LONG_RETRY_LIMIT 4
403
404/* FIXME: the next line is a guess as to what the maximum RSSI value might be */
405#define RX_RSSI_MAX 60
406
407/* Max size of a security key */
408#define BCM43xx_SEC_KEYSIZE 16
409/* Security algorithms. */
410enum {
411 BCM43xx_SEC_ALGO_NONE = 0, /* unencrypted, as of TX header. */
412 BCM43xx_SEC_ALGO_WEP,
413 BCM43xx_SEC_ALGO_UNKNOWN,
414 BCM43xx_SEC_ALGO_AES,
415 BCM43xx_SEC_ALGO_WEP104,
416 BCM43xx_SEC_ALGO_TKIP,
417};
418
419#ifdef assert
420# undef assert
421#endif
422#ifdef CONFIG_BCM43XX_DEBUG
423#define assert(expr) \
424 do { \
425 if (unlikely(!(expr))) { \
426 printk(KERN_ERR PFX "ASSERTION FAILED (%s) at: %s:%d:%s()\n", \
427 #expr, __FILE__, __LINE__, __FUNCTION__); \
428 } \
429 } while (0)
430#else
431#define assert(expr) do { /* nothing */ } while (0)
432#endif
433
434/* rate limited printk(). */
435#ifdef printkl
436# undef printkl
437#endif
438#define printkl(f, x...) do { if (printk_ratelimit()) printk(f ,##x); } while (0)
439/* rate limited printk() for debugging */
440#ifdef dprintkl
441# undef dprintkl
442#endif
443#ifdef CONFIG_BCM43XX_DEBUG
444# define dprintkl printkl
445#else
446# define dprintkl(f, x...) do { /* nothing */ } while (0)
447#endif
448
449/* Helper macro for if branches.
450 * An if branch marked with this macro is only taken in DEBUG mode.
451 * Example:
452 * if (DEBUG_ONLY(foo == bar)) {
453 * do something
454 * }
455 * In DEBUG mode, the branch will be taken if (foo == bar).
456 * In non-DEBUG mode, the branch will never be taken.
457 */
458#ifdef DEBUG_ONLY
459# undef DEBUG_ONLY
460#endif
461#ifdef CONFIG_BCM43XX_DEBUG
462# define DEBUG_ONLY(x) (x)
463#else
464# define DEBUG_ONLY(x) 0
465#endif
466
467/* debugging printk() */
468#ifdef dprintk
469# undef dprintk
470#endif
471#ifdef CONFIG_BCM43XX_DEBUG
472# define dprintk(f, x...) do { printk(f ,##x); } while (0)
473#else
474# define dprintk(f, x...) do { /* nothing */ } while (0)
475#endif
476
477
478struct net_device;
479struct pci_dev;
480struct bcm43xx_dmaring;
481struct bcm43xx_pioqueue;
482
483struct bcm43xx_initval {
484 __be16 offset;
485 __be16 size;
486 __be32 value;
487} __attribute__((__packed__));
488
489/* Values for bcm430x_sprominfo.locale */
490enum {
491 BCM43xx_LOCALE_WORLD = 0,
492 BCM43xx_LOCALE_THAILAND,
493 BCM43xx_LOCALE_ISRAEL,
494 BCM43xx_LOCALE_JORDAN,
495 BCM43xx_LOCALE_CHINA,
496 BCM43xx_LOCALE_JAPAN,
497 BCM43xx_LOCALE_USA_CANADA_ANZ,
498 BCM43xx_LOCALE_EUROPE,
499 BCM43xx_LOCALE_USA_LOW,
500 BCM43xx_LOCALE_JAPAN_HIGH,
501 BCM43xx_LOCALE_ALL,
502 BCM43xx_LOCALE_NONE,
503};
504
505#define BCM43xx_SPROM_SIZE 64 /* in 16-bit words. */
506struct bcm43xx_sprominfo {
507 u16 boardflags2;
508 u8 il0macaddr[6];
509 u8 et0macaddr[6];
510 u8 et1macaddr[6];
511 u8 et0phyaddr:5;
512 u8 et1phyaddr:5;
513 u8 boardrev;
514 u8 locale:4;
515 u8 antennas_aphy:2;
516 u8 antennas_bgphy:2;
517 u16 pa0b0;
518 u16 pa0b1;
519 u16 pa0b2;
520 u8 wl0gpio0;
521 u8 wl0gpio1;
522 u8 wl0gpio2;
523 u8 wl0gpio3;
524 u8 maxpower_aphy;
525 u8 maxpower_bgphy;
526 u16 pa1b0;
527 u16 pa1b1;
528 u16 pa1b2;
529 u8 idle_tssi_tgt_aphy;
530 u8 idle_tssi_tgt_bgphy;
531 u16 boardflags;
532 u16 antennagain_aphy;
533 u16 antennagain_bgphy;
534};
535
536/* Value pair to measure the LocalOscillator. */
537struct bcm43xx_lopair {
538 s8 low;
539 s8 high;
540 u8 used:1;
541};
542#define BCM43xx_LO_COUNT (14*4)
543
544struct bcm43xx_phyinfo {
545 /* Hardware Data */
546 u8 analog;
547 u8 type;
548 u8 rev;
549 u16 antenna_diversity;
550 u16 savedpctlreg;
551 u16 minlowsig[2];
552 u16 minlowsigpos[2];
553 u8 connected:1,
554 calibrated:1,
555 is_locked:1, /* used in bcm43xx_phy_{un}lock() */
556 dyn_tssi_tbl:1; /* used in bcm43xx_phy_init_tssi2dbm_table() */
557 /* LO Measurement Data.
558 * Use bcm43xx_get_lopair() to get a value.
559 */
560 struct bcm43xx_lopair *_lo_pairs;
561
562 /* TSSI to dBm table in use */
563 const s8 *tssi2dbm;
564 /* idle TSSI value */
565 s8 idle_tssi;
566
567 /* Values from bcm43xx_calc_loopback_gain() */
568 u16 loopback_gain[2];
569
570 /* PHY lock for core.rev < 3
571 * This lock is only used by bcm43xx_phy_{un}lock()
572 */
573 spinlock_t lock;
574
575 /* Firmware. */
576 const struct firmware *ucode;
577 const struct firmware *pcm;
578 const struct firmware *initvals0;
579 const struct firmware *initvals1;
580};
581
582
583struct bcm43xx_radioinfo {
584 u16 manufact;
585 u16 version;
586 u8 revision;
587
588 /* Desired TX power in dBm Q5.2 */
589 u16 txpower_desired;
590 /* TX Power control values. */
591 union {
592 /* B/G PHY */
593 struct {
594 u16 baseband_atten;
595 u16 radio_atten;
596 u16 txctl1;
597 u16 txctl2;
598 };
599 /* A PHY */
600 struct {
601 u16 txpwr_offset;
602 };
603 };
604
605 /* Current Interference Mitigation mode */
606 int interfmode;
607 /* Stack of saved values from the Interference Mitigation code.
608 * Each value in the stack is layed out as follows:
609 * bit 0-11: offset
610 * bit 12-15: register ID
611 * bit 16-32: value
612 * register ID is: 0x1 PHY, 0x2 Radio, 0x3 ILT
613 */
614#define BCM43xx_INTERFSTACK_SIZE 26
615 u32 interfstack[BCM43xx_INTERFSTACK_SIZE];
616
617 /* Saved values from the NRSSI Slope calculation */
618 s16 nrssi[2];
619 s32 nrssislope;
620 /* In memory nrssi lookup table. */
621 s8 nrssi_lt[64];
622
623 /* current channel */
624 u8 channel;
625 u8 initial_channel;
626
627 u16 lofcal;
628
629 u16 initval;
630
631 u8 enabled:1;
632 /* ACI (adjacent channel interference) flags. */
633 u8 aci_enable:1,
634 aci_wlan_automatic:1,
635 aci_hw_rssi:1;
636};
637
638/* Data structures for DMA transmission, per 80211 core. */
639struct bcm43xx_dma {
640 struct bcm43xx_dmaring *tx_ring0;
641 struct bcm43xx_dmaring *tx_ring1;
642 struct bcm43xx_dmaring *tx_ring2;
643 struct bcm43xx_dmaring *tx_ring3;
644 struct bcm43xx_dmaring *tx_ring4;
645 struct bcm43xx_dmaring *tx_ring5;
646
647 struct bcm43xx_dmaring *rx_ring0;
648 struct bcm43xx_dmaring *rx_ring3; /* only available on core.rev < 5 */
649};
650
651/* Data structures for PIO transmission, per 80211 core. */
652struct bcm43xx_pio {
653 struct bcm43xx_pioqueue *queue0;
654 struct bcm43xx_pioqueue *queue1;
655 struct bcm43xx_pioqueue *queue2;
656 struct bcm43xx_pioqueue *queue3;
657};
658
659#define BCM43xx_MAX_80211_CORES 2
660
661/* Generic information about a core. */
662struct bcm43xx_coreinfo {
663 u8 available:1,
664 enabled:1,
665 initialized:1;
666 /** core_rev revision number */
667 u8 rev;
668 /** Index number for _switch_core() */
669 u8 index;
670 /** core_id ID number */
671 u16 id;
672 /** Core-specific data. */
673 void *priv;
674};
675
676/* Additional information for each 80211 core. */
677struct bcm43xx_coreinfo_80211 {
678 /* PHY device. */
679 struct bcm43xx_phyinfo phy;
680 /* Radio device. */
681 struct bcm43xx_radioinfo radio;
682 union {
683 /* DMA context. */
684 struct bcm43xx_dma dma;
685 /* PIO context. */
686 struct bcm43xx_pio pio;
687 };
688};
689
690/* Context information for a noise calculation (Link Quality). */
691struct bcm43xx_noise_calculation {
692 struct bcm43xx_coreinfo *core_at_start;
693 u8 channel_at_start;
694 u8 calculation_running:1;
695 u8 nr_samples;
696 s8 samples[8][4];
697};
698
699struct bcm43xx_stats {
700 u8 noise;
701 struct iw_statistics wstats;
702 /* Store the last TX/RX times here for updating the leds. */
703 unsigned long last_tx;
704 unsigned long last_rx;
705};
706
707struct bcm43xx_key {
708 u8 enabled:1;
709 u8 algorithm;
710};
711
712/* Driver initialization status. */
713enum {
714 BCM43xx_STAT_UNINIT, /* Uninitialized. */
715 BCM43xx_STAT_INITIALIZING, /* init_board() in progress. */
716 BCM43xx_STAT_INITIALIZED, /* Fully operational. */
717 BCM43xx_STAT_SHUTTINGDOWN, /* free_board() in progress. */
718 BCM43xx_STAT_RESTARTING, /* controller_restart() called. */
719};
720#define bcm43xx_status(bcm) atomic_read(&(bcm)->init_status)
721#define bcm43xx_set_status(bcm, stat) do { \
722 atomic_set(&(bcm)->init_status, (stat)); \
723 smp_wmb(); \
724 } while (0)
725
726/* *** THEORY OF LOCKING ***
727 *
728 * We have two different locks in the bcm43xx driver.
729 * => bcm->mutex: General sleeping mutex. Protects struct bcm43xx_private
730 * and the device registers. This mutex does _not_ protect
731 * against concurrency from the IRQ handler.
732 * => bcm->irq_lock: IRQ spinlock. Protects against IRQ handler concurrency.
733 *
734 * Please note that, if you only take the irq_lock, you are not protected
735 * against concurrency from the periodic work handlers.
736 * Most times you want to take _both_ locks.
737 */
738
739struct bcm43xx_private {
740 struct ieee80211_device *ieee;
741 struct ieee80211softmac_device *softmac;
742
743 struct net_device *net_dev;
744 struct pci_dev *pci_dev;
745 unsigned int irq;
746
747 void __iomem *mmio_addr;
748
749 spinlock_t irq_lock;
750 struct mutex mutex;
751
752 /* Driver initialization status BCM43xx_STAT_*** */
753 atomic_t init_status;
754
755 u16 was_initialized:1, /* for PCI suspend/resume. */
756 __using_pio:1, /* Internal, use bcm43xx_using_pio(). */
757 bad_frames_preempt:1, /* Use "Bad Frames Preemption" (default off) */
758 reg124_set_0x4:1, /* Some variable to keep track of IRQ stuff. */
759 short_preamble:1, /* TRUE, if short preamble is enabled. */
760 firmware_norelease:1, /* Do not release the firmware. Used on suspend. */
761 radio_hw_enable:1; /* TRUE if radio is hardware enabled */
762
763 struct bcm43xx_stats stats;
764
765 /* Bus type we are connected to.
766 * This is currently always BCM43xx_BUSTYPE_PCI
767 */
768 u8 bustype;
769 u64 dma_mask;
770
771 u16 board_vendor;
772 u16 board_type;
773 u16 board_revision;
774
775 u16 chip_id;
776 u8 chip_rev;
777 u8 chip_package;
778
779 struct bcm43xx_sprominfo sprom;
780#define BCM43xx_NR_LEDS 4
781 struct bcm43xx_led leds[BCM43xx_NR_LEDS];
782 spinlock_t leds_lock;
783
784 /* The currently active core. */
785 struct bcm43xx_coreinfo *current_core;
786 struct bcm43xx_coreinfo *active_80211_core;
787 /* coreinfo structs for all possible cores follow.
788 * Note that a core might not exist.
789 * So check the coreinfo flags before using it.
790 */
791 struct bcm43xx_coreinfo core_chipcommon;
792 struct bcm43xx_coreinfo core_pci;
793 struct bcm43xx_coreinfo core_80211[ BCM43xx_MAX_80211_CORES ];
794 /* Additional information, specific to the 80211 cores. */
795 struct bcm43xx_coreinfo_80211 core_80211_ext[ BCM43xx_MAX_80211_CORES ];
796 /* Number of available 80211 cores. */
797 int nr_80211_available;
798
799 u32 chipcommon_capabilities;
800
801 /* Reason code of the last interrupt. */
802 u32 irq_reason;
803 u32 dma_reason[6];
804 /* saved irq enable/disable state bitfield. */
805 u32 irq_savedstate;
806 /* Link Quality calculation context. */
807 struct bcm43xx_noise_calculation noisecalc;
808 /* if > 0 MAC is suspended. if == 0 MAC is enabled. */
809 int mac_suspended;
810
811 /* Threshold values. */
812 //TODO: The RTS thr has to be _used_. Currently, it is only set via WX.
813 u32 rts_threshold;
814
815 /* Interrupt Service Routine tasklet (bottom-half) */
816 struct tasklet_struct isr_tasklet;
817
818 /* Periodic tasks */
819 struct delayed_work periodic_work;
820 unsigned int periodic_state;
821
822 struct work_struct restart_work;
823
824 /* Informational stuff. */
825 char nick[IW_ESSID_MAX_SIZE + 1];
826
827 /* encryption/decryption */
828 u16 security_offset;
829 struct bcm43xx_key key[54];
830 u8 default_key_idx;
831
832 /* Random Number Generator. */
833 struct hwrng rng;
834 char rng_name[20 + 1];
835
836 /* Debugging stuff follows. */
837#ifdef CONFIG_BCM43XX_DEBUG
838 struct bcm43xx_dfsentry *dfsentry;
839#endif
840};
841
842
843static inline
844struct bcm43xx_private * bcm43xx_priv(struct net_device *dev)
845{
846 return ieee80211softmac_priv(dev);
847}
848
849struct device;
850
851static inline
852struct bcm43xx_private * dev_to_bcm(struct device *dev)
853{
854 struct net_device *net_dev;
855 struct bcm43xx_private *bcm;
856
857 net_dev = dev_get_drvdata(dev);
858 bcm = bcm43xx_priv(net_dev);
859
860 return bcm;
861}
862
863
864/* Helper function, which returns a boolean.
865 * TRUE, if PIO is used; FALSE, if DMA is used.
866 */
867#if defined(CONFIG_BCM43XX_DMA) && defined(CONFIG_BCM43XX_PIO)
868static inline
869int bcm43xx_using_pio(struct bcm43xx_private *bcm)
870{
871 return bcm->__using_pio;
872}
873#elif defined(CONFIG_BCM43XX_DMA)
874static inline
875int bcm43xx_using_pio(struct bcm43xx_private *bcm)
876{
877 return 0;
878}
879#elif defined(CONFIG_BCM43XX_PIO)
880static inline
881int bcm43xx_using_pio(struct bcm43xx_private *bcm)
882{
883 return 1;
884}
885#else
886# error "Using neither DMA nor PIO? Confused..."
887#endif
888
889/* Helper functions to access data structures private to the 80211 cores.
890 * Note that we _must_ have an 80211 core mapped when calling
891 * any of these functions.
892 */
893static inline
894struct bcm43xx_coreinfo_80211 *
895bcm43xx_current_80211_priv(struct bcm43xx_private *bcm)
896{
897 assert(bcm->current_core->id == BCM43xx_COREID_80211);
898 return bcm->current_core->priv;
899}
900static inline
901struct bcm43xx_pio * bcm43xx_current_pio(struct bcm43xx_private *bcm)
902{
903 assert(bcm43xx_using_pio(bcm));
904 return &(bcm43xx_current_80211_priv(bcm)->pio);
905}
906static inline
907struct bcm43xx_dma * bcm43xx_current_dma(struct bcm43xx_private *bcm)
908{
909 assert(!bcm43xx_using_pio(bcm));
910 return &(bcm43xx_current_80211_priv(bcm)->dma);
911}
912static inline
913struct bcm43xx_phyinfo * bcm43xx_current_phy(struct bcm43xx_private *bcm)
914{
915 return &(bcm43xx_current_80211_priv(bcm)->phy);
916}
917static inline
918struct bcm43xx_radioinfo * bcm43xx_current_radio(struct bcm43xx_private *bcm)
919{
920 return &(bcm43xx_current_80211_priv(bcm)->radio);
921}
922
923
924static inline
925struct bcm43xx_lopair * bcm43xx_get_lopair(struct bcm43xx_phyinfo *phy,
926 u16 radio_attenuation,
927 u16 baseband_attenuation)
928{
929 return phy->_lo_pairs + (radio_attenuation + 14 * (baseband_attenuation / 2));
930}
931
932
933static inline
934u16 bcm43xx_read16(struct bcm43xx_private *bcm, u16 offset)
935{
936 return ioread16(bcm->mmio_addr + offset);
937}
938
939static inline
940void bcm43xx_write16(struct bcm43xx_private *bcm, u16 offset, u16 value)
941{
942 iowrite16(value, bcm->mmio_addr + offset);
943}
944
945static inline
946u32 bcm43xx_read32(struct bcm43xx_private *bcm, u16 offset)
947{
948 return ioread32(bcm->mmio_addr + offset);
949}
950
951static inline
952void bcm43xx_write32(struct bcm43xx_private *bcm, u16 offset, u32 value)
953{
954 iowrite32(value, bcm->mmio_addr + offset);
955}
956
957static inline
958int bcm43xx_pci_read_config16(struct bcm43xx_private *bcm, int offset, u16 *value)
959{
960 return pci_read_config_word(bcm->pci_dev, offset, value);
961}
962
963static inline
964int bcm43xx_pci_read_config32(struct bcm43xx_private *bcm, int offset, u32 *value)
965{
966 return pci_read_config_dword(bcm->pci_dev, offset, value);
967}
968
969static inline
970int bcm43xx_pci_write_config16(struct bcm43xx_private *bcm, int offset, u16 value)
971{
972 return pci_write_config_word(bcm->pci_dev, offset, value);
973}
974
975static inline
976int bcm43xx_pci_write_config32(struct bcm43xx_private *bcm, int offset, u32 value)
977{
978 return pci_write_config_dword(bcm->pci_dev, offset, value);
979}
980
981/** Limit a value between two limits */
982#ifdef limit_value
983# undef limit_value
984#endif
985#define limit_value(value, min, max) \
986 ({ \
987 typeof(value) __value = (value); \
988 typeof(value) __min = (min); \
989 typeof(value) __max = (max); \
990 if (__value < __min) \
991 __value = __min; \
992 else if (__value > __max) \
993 __value = __max; \
994 __value; \
995 })
996
997#endif /* BCM43xx_H_ */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
deleted file mode 100644
index 76e9dd843faa..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.c
+++ /dev/null
@@ -1,556 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 debugfs driver debugging code
6
7 Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
22 Boston, MA 02110-1301, USA.
23
24*/
25
26
27
28#include <linux/fs.h>
29#include <linux/debugfs.h>
30#include <linux/slab.h>
31#include <linux/netdevice.h>
32#include <linux/pci.h>
33#include <asm/io.h>
34
35#include "bcm43xx.h"
36#include "bcm43xx_main.h"
37#include "bcm43xx_debugfs.h"
38#include "bcm43xx_dma.h"
39#include "bcm43xx_pio.h"
40#include "bcm43xx_xmit.h"
41
42#define REALLY_BIG_BUFFER_SIZE (1024*256)
43
44static struct bcm43xx_debugfs fs;
45static char really_big_buffer[REALLY_BIG_BUFFER_SIZE];
46static DECLARE_MUTEX(big_buffer_sem);
47
48
49static ssize_t write_file_dummy(struct file *file, const char __user *buf,
50 size_t count, loff_t *ppos)
51{
52 return count;
53}
54
55static int open_file_generic(struct inode *inode, struct file *file)
56{
57 file->private_data = inode->i_private;
58 return 0;
59}
60
61#define fappend(fmt, x...) pos += snprintf(buf + pos, len - pos, fmt , ##x)
62
63static ssize_t devinfo_read_file(struct file *file, char __user *userbuf,
64 size_t count, loff_t *ppos)
65{
66 const size_t len = REALLY_BIG_BUFFER_SIZE;
67
68 struct bcm43xx_private *bcm = file->private_data;
69 char *buf = really_big_buffer;
70 size_t pos = 0;
71 ssize_t res;
72 struct net_device *net_dev;
73 struct pci_dev *pci_dev;
74 unsigned long flags;
75 u16 tmp16;
76 int i;
77
78 down(&big_buffer_sem);
79
80 mutex_lock(&bcm->mutex);
81 spin_lock_irqsave(&bcm->irq_lock, flags);
82 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
83 fappend("Board not initialized.\n");
84 goto out;
85 }
86 net_dev = bcm->net_dev;
87 pci_dev = bcm->pci_dev;
88
89 /* This is where the information is written to the "devinfo" file */
90 fappend("*** %s devinfo ***\n", net_dev->name);
91 fappend("vendor: 0x%04x device: 0x%04x\n",
92 pci_dev->vendor, pci_dev->device);
93 fappend("subsystem_vendor: 0x%04x subsystem_device: 0x%04x\n",
94 pci_dev->subsystem_vendor, pci_dev->subsystem_device);
95 fappend("IRQ: %d\n", bcm->irq);
96 fappend("mmio_addr: 0x%p\n", bcm->mmio_addr);
97 fappend("chip_id: 0x%04x chip_rev: 0x%02x\n", bcm->chip_id, bcm->chip_rev);
98 if ((bcm->core_80211[0].rev >= 3) && (bcm43xx_read32(bcm, 0x0158) & (1 << 16)))
99 fappend("Radio disabled by hardware!\n");
100 if ((bcm->core_80211[0].rev < 3) && !(bcm43xx_read16(bcm, 0x049A) & (1 << 4)))
101 fappend("Radio disabled by hardware!\n");
102 fappend("board_vendor: 0x%04x board_type: 0x%04x\n", bcm->board_vendor,
103 bcm->board_type);
104
105 fappend("\nCores:\n");
106#define fappend_core(name, info) fappend("core \"" name "\" %s, %s, id: 0x%04x, " \
107 "rev: 0x%02x, index: 0x%02x\n", \
108 (info).available \
109 ? "available" : "nonavailable", \
110 (info).enabled \
111 ? "enabled" : "disabled", \
112 (info).id, (info).rev, (info).index)
113 fappend_core("CHIPCOMMON", bcm->core_chipcommon);
114 fappend_core("PCI", bcm->core_pci);
115 fappend_core("first 80211", bcm->core_80211[0]);
116 fappend_core("second 80211", bcm->core_80211[1]);
117#undef fappend_core
118 tmp16 = bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_CONTROL);
119 fappend("LEDs: ");
120 for (i = 0; i < BCM43xx_NR_LEDS; i++)
121 fappend("%d ", !!(tmp16 & (1 << i)));
122 fappend("\n");
123
124out:
125 spin_unlock_irqrestore(&bcm->irq_lock, flags);
126 mutex_unlock(&bcm->mutex);
127 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
128 up(&big_buffer_sem);
129 return res;
130}
131
132static ssize_t drvinfo_read_file(struct file *file, char __user *userbuf,
133 size_t count, loff_t *ppos)
134{
135 const size_t len = REALLY_BIG_BUFFER_SIZE;
136
137 char *buf = really_big_buffer;
138 size_t pos = 0;
139 ssize_t res;
140
141 down(&big_buffer_sem);
142
143 /* This is where the information is written to the "driver" file */
144 fappend(KBUILD_MODNAME " driver\n");
145 fappend("Compiled at: %s %s\n", __DATE__, __TIME__);
146
147 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
148 up(&big_buffer_sem);
149 return res;
150}
151
152static ssize_t spromdump_read_file(struct file *file, char __user *userbuf,
153 size_t count, loff_t *ppos)
154{
155 const size_t len = REALLY_BIG_BUFFER_SIZE;
156
157 struct bcm43xx_private *bcm = file->private_data;
158 char *buf = really_big_buffer;
159 size_t pos = 0;
160 ssize_t res;
161 unsigned long flags;
162
163 down(&big_buffer_sem);
164 mutex_lock(&bcm->mutex);
165 spin_lock_irqsave(&bcm->irq_lock, flags);
166 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
167 fappend("Board not initialized.\n");
168 goto out;
169 }
170
171 /* This is where the information is written to the "sprom_dump" file */
172 fappend("boardflags: 0x%04x\n", bcm->sprom.boardflags);
173
174out:
175 spin_unlock_irqrestore(&bcm->irq_lock, flags);
176 mutex_unlock(&bcm->mutex);
177 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
178 up(&big_buffer_sem);
179 return res;
180}
181
182static ssize_t tsf_read_file(struct file *file, char __user *userbuf,
183 size_t count, loff_t *ppos)
184{
185 const size_t len = REALLY_BIG_BUFFER_SIZE;
186
187 struct bcm43xx_private *bcm = file->private_data;
188 char *buf = really_big_buffer;
189 size_t pos = 0;
190 ssize_t res;
191 unsigned long flags;
192 u64 tsf;
193
194 down(&big_buffer_sem);
195 mutex_lock(&bcm->mutex);
196 spin_lock_irqsave(&bcm->irq_lock, flags);
197 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
198 fappend("Board not initialized.\n");
199 goto out;
200 }
201 bcm43xx_tsf_read(bcm, &tsf);
202 fappend("0x%08x%08x\n",
203 (unsigned int)((tsf & 0xFFFFFFFF00000000ULL) >> 32),
204 (unsigned int)(tsf & 0xFFFFFFFFULL));
205
206out:
207 spin_unlock_irqrestore(&bcm->irq_lock, flags);
208 mutex_unlock(&bcm->mutex);
209 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
210 up(&big_buffer_sem);
211 return res;
212}
213
214static ssize_t tsf_write_file(struct file *file, const char __user *user_buf,
215 size_t count, loff_t *ppos)
216{
217 struct bcm43xx_private *bcm = file->private_data;
218 char *buf = really_big_buffer;
219 ssize_t buf_size;
220 ssize_t res;
221 unsigned long flags;
222 unsigned long long tsf;
223
224 buf_size = min(count, sizeof (really_big_buffer) - 1);
225 down(&big_buffer_sem);
226 if (copy_from_user(buf, user_buf, buf_size)) {
227 res = -EFAULT;
228 goto out_up;
229 }
230 mutex_lock(&bcm->mutex);
231 spin_lock_irqsave(&bcm->irq_lock, flags);
232 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
233 printk(KERN_INFO PFX "debugfs: Board not initialized.\n");
234 res = -EFAULT;
235 goto out_unlock;
236 }
237 if (sscanf(buf, "%lli", &tsf) != 1) {
238 printk(KERN_INFO PFX "debugfs: invalid values for \"tsf\"\n");
239 res = -EINVAL;
240 goto out_unlock;
241 }
242 bcm43xx_tsf_write(bcm, tsf);
243 mmiowb();
244 res = buf_size;
245
246out_unlock:
247 spin_unlock_irqrestore(&bcm->irq_lock, flags);
248 mutex_unlock(&bcm->mutex);
249out_up:
250 up(&big_buffer_sem);
251 return res;
252}
253
254static ssize_t txstat_read_file(struct file *file, char __user *userbuf,
255 size_t count, loff_t *ppos)
256{
257 const size_t len = REALLY_BIG_BUFFER_SIZE;
258
259 struct bcm43xx_private *bcm = file->private_data;
260 char *buf = really_big_buffer;
261 size_t pos = 0;
262 ssize_t res;
263 unsigned long flags;
264 struct bcm43xx_dfsentry *e;
265 struct bcm43xx_xmitstatus *status;
266 int i, cnt, j = 0;
267
268 down(&big_buffer_sem);
269 mutex_lock(&bcm->mutex);
270 spin_lock_irqsave(&bcm->irq_lock, flags);
271
272 fappend("Last %d logged xmitstatus blobs (Latest first):\n\n",
273 BCM43xx_NR_LOGGED_XMITSTATUS);
274 e = bcm->dfsentry;
275 if (e->xmitstatus_printing == 0) {
276 /* At the beginning, make a copy of all data to avoid
277 * concurrency, as this function is called multiple
278 * times for big logs. Without copying, the data might
279 * change between reads. This would result in total trash.
280 */
281 e->xmitstatus_printing = 1;
282 e->saved_xmitstatus_ptr = e->xmitstatus_ptr;
283 e->saved_xmitstatus_cnt = e->xmitstatus_cnt;
284 memcpy(e->xmitstatus_print_buffer, e->xmitstatus_buffer,
285 BCM43xx_NR_LOGGED_XMITSTATUS * sizeof(*(e->xmitstatus_buffer)));
286 }
287 i = e->saved_xmitstatus_ptr - 1;
288 if (i < 0)
289 i = BCM43xx_NR_LOGGED_XMITSTATUS - 1;
290 cnt = e->saved_xmitstatus_cnt;
291 while (cnt) {
292 status = e->xmitstatus_print_buffer + i;
293 fappend("0x%02x: cookie: 0x%04x, flags: 0x%02x, "
294 "cnt1: 0x%02x, cnt2: 0x%02x, seq: 0x%04x, "
295 "unk: 0x%04x\n", j,
296 status->cookie, status->flags,
297 status->cnt1, status->cnt2, status->seq,
298 status->unknown);
299 j++;
300 cnt--;
301 i--;
302 if (i < 0)
303 i = BCM43xx_NR_LOGGED_XMITSTATUS - 1;
304 }
305
306 spin_unlock_irqrestore(&bcm->irq_lock, flags);
307 res = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
308 spin_lock_irqsave(&bcm->irq_lock, flags);
309 if (*ppos == pos) {
310 /* Done. Drop the copied data. */
311 e->xmitstatus_printing = 0;
312 }
313 spin_unlock_irqrestore(&bcm->irq_lock, flags);
314 mutex_unlock(&bcm->mutex);
315 up(&big_buffer_sem);
316 return res;
317}
318
319static ssize_t restart_write_file(struct file *file, const char __user *user_buf,
320 size_t count, loff_t *ppos)
321{
322 struct bcm43xx_private *bcm = file->private_data;
323 char *buf = really_big_buffer;
324 ssize_t buf_size;
325 ssize_t res;
326 unsigned long flags;
327
328 buf_size = min(count, sizeof (really_big_buffer) - 1);
329 down(&big_buffer_sem);
330 if (copy_from_user(buf, user_buf, buf_size)) {
331 res = -EFAULT;
332 goto out_up;
333 }
334 mutex_lock(&(bcm)->mutex);
335 spin_lock_irqsave(&(bcm)->irq_lock, flags);
336 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED) {
337 printk(KERN_INFO PFX "debugfs: Board not initialized.\n");
338 res = -EFAULT;
339 goto out_unlock;
340 }
341 if (count > 0 && buf[0] == '1') {
342 bcm43xx_controller_restart(bcm, "manually restarted");
343 res = count;
344 } else
345 res = -EINVAL;
346
347out_unlock:
348 spin_unlock_irqrestore(&(bcm)->irq_lock, flags);
349 mutex_unlock(&(bcm)->mutex);
350out_up:
351 up(&big_buffer_sem);
352 return res;
353}
354
355#undef fappend
356
357
358static const struct file_operations devinfo_fops = {
359 .read = devinfo_read_file,
360 .write = write_file_dummy,
361 .open = open_file_generic,
362};
363
364static const struct file_operations spromdump_fops = {
365 .read = spromdump_read_file,
366 .write = write_file_dummy,
367 .open = open_file_generic,
368};
369
370static const struct file_operations drvinfo_fops = {
371 .read = drvinfo_read_file,
372 .write = write_file_dummy,
373 .open = open_file_generic,
374};
375
376static const struct file_operations tsf_fops = {
377 .read = tsf_read_file,
378 .write = tsf_write_file,
379 .open = open_file_generic,
380};
381
382static const struct file_operations txstat_fops = {
383 .read = txstat_read_file,
384 .write = write_file_dummy,
385 .open = open_file_generic,
386};
387
388static const struct file_operations restart_fops = {
389 .write = restart_write_file,
390 .open = open_file_generic,
391};
392
393
394void bcm43xx_debugfs_add_device(struct bcm43xx_private *bcm)
395{
396 struct bcm43xx_dfsentry *e;
397 char devdir[IFNAMSIZ];
398
399 assert(bcm);
400 e = kzalloc(sizeof(*e), GFP_KERNEL);
401 if (!e) {
402 printk(KERN_ERR PFX "out of memory\n");
403 return;
404 }
405 e->bcm = bcm;
406 e->xmitstatus_buffer = kzalloc(BCM43xx_NR_LOGGED_XMITSTATUS
407 * sizeof(*(e->xmitstatus_buffer)),
408 GFP_KERNEL);
409 if (!e->xmitstatus_buffer) {
410 printk(KERN_ERR PFX "out of memory\n");
411 kfree(e);
412 return;
413 }
414 e->xmitstatus_print_buffer = kzalloc(BCM43xx_NR_LOGGED_XMITSTATUS
415 * sizeof(*(e->xmitstatus_buffer)),
416 GFP_KERNEL);
417 if (!e->xmitstatus_print_buffer) {
418 printk(KERN_ERR PFX "out of memory\n");
419 kfree(e);
420 return;
421 }
422
423
424 bcm->dfsentry = e;
425
426 strncpy(devdir, bcm->net_dev->name, ARRAY_SIZE(devdir));
427 e->subdir = debugfs_create_dir(devdir, fs.root);
428 e->dentry_devinfo = debugfs_create_file("devinfo", 0444, e->subdir,
429 bcm, &devinfo_fops);
430 if (!e->dentry_devinfo)
431 printk(KERN_ERR PFX "debugfs: creating \"devinfo\" for \"%s\" failed!\n", devdir);
432 e->dentry_spromdump = debugfs_create_file("sprom_dump", 0444, e->subdir,
433 bcm, &spromdump_fops);
434 if (!e->dentry_spromdump)
435 printk(KERN_ERR PFX "debugfs: creating \"sprom_dump\" for \"%s\" failed!\n", devdir);
436 e->dentry_tsf = debugfs_create_file("tsf", 0666, e->subdir,
437 bcm, &tsf_fops);
438 if (!e->dentry_tsf)
439 printk(KERN_ERR PFX "debugfs: creating \"tsf\" for \"%s\" failed!\n", devdir);
440 e->dentry_txstat = debugfs_create_file("tx_status", 0444, e->subdir,
441 bcm, &txstat_fops);
442 if (!e->dentry_txstat)
443 printk(KERN_ERR PFX "debugfs: creating \"tx_status\" for \"%s\" failed!\n", devdir);
444 e->dentry_restart = debugfs_create_file("restart", 0222, e->subdir,
445 bcm, &restart_fops);
446 if (!e->dentry_restart)
447 printk(KERN_ERR PFX "debugfs: creating \"restart\" for \"%s\" failed!\n", devdir);
448}
449
450void bcm43xx_debugfs_remove_device(struct bcm43xx_private *bcm)
451{
452 struct bcm43xx_dfsentry *e;
453
454 if (!bcm)
455 return;
456
457 e = bcm->dfsentry;
458 assert(e);
459 debugfs_remove(e->dentry_spromdump);
460 debugfs_remove(e->dentry_devinfo);
461 debugfs_remove(e->dentry_tsf);
462 debugfs_remove(e->dentry_txstat);
463 debugfs_remove(e->dentry_restart);
464 debugfs_remove(e->subdir);
465 kfree(e->xmitstatus_buffer);
466 kfree(e->xmitstatus_print_buffer);
467 kfree(e);
468}
469
470void bcm43xx_debugfs_log_txstat(struct bcm43xx_private *bcm,
471 struct bcm43xx_xmitstatus *status)
472{
473 struct bcm43xx_dfsentry *e;
474 struct bcm43xx_xmitstatus *savedstatus;
475
476 /* This is protected by bcm->_lock */
477 e = bcm->dfsentry;
478 assert(e);
479 savedstatus = e->xmitstatus_buffer + e->xmitstatus_ptr;
480 memcpy(savedstatus, status, sizeof(*status));
481 e->xmitstatus_ptr++;
482 if (e->xmitstatus_ptr >= BCM43xx_NR_LOGGED_XMITSTATUS)
483 e->xmitstatus_ptr = 0;
484 if (e->xmitstatus_cnt < BCM43xx_NR_LOGGED_XMITSTATUS)
485 e->xmitstatus_cnt++;
486}
487
488void bcm43xx_debugfs_init(void)
489{
490 memset(&fs, 0, sizeof(fs));
491 fs.root = debugfs_create_dir(KBUILD_MODNAME, NULL);
492 if (!fs.root)
493 printk(KERN_ERR PFX "debugfs: creating \"" KBUILD_MODNAME "\" subdir failed!\n");
494 fs.dentry_driverinfo = debugfs_create_file("driver", 0444, fs.root, NULL, &drvinfo_fops);
495 if (!fs.dentry_driverinfo)
496 printk(KERN_ERR PFX "debugfs: creating \"" KBUILD_MODNAME "/driver\" failed!\n");
497}
498
499void bcm43xx_debugfs_exit(void)
500{
501 debugfs_remove(fs.dentry_driverinfo);
502 debugfs_remove(fs.root);
503}
504
505void bcm43xx_printk_dump(const char *data,
506 size_t size,
507 const char *description)
508{
509 size_t i;
510 char c;
511
512 printk(KERN_INFO PFX "Data dump (%s, %zd bytes):",
513 description, size);
514 for (i = 0; i < size; i++) {
515 c = data[i];
516 if (i % 8 == 0)
517 printk("\n" KERN_INFO PFX "0x%08zx: 0x%02x, ", i, c & 0xff);
518 else
519 printk("0x%02x, ", c & 0xff);
520 }
521 printk("\n");
522}
523
524void bcm43xx_printk_bitdump(const unsigned char *data,
525 size_t bytes, int msb_to_lsb,
526 const char *description)
527{
528 size_t i;
529 int j;
530 const unsigned char *d;
531
532 printk(KERN_INFO PFX "*** Bitdump (%s, %zd bytes, %s) ***",
533 description, bytes, msb_to_lsb ? "MSB to LSB" : "LSB to MSB");
534 for (i = 0; i < bytes; i++) {
535 d = data + i;
536 if (i % 8 == 0)
537 printk("\n" KERN_INFO PFX "0x%08zx: ", i);
538 if (msb_to_lsb) {
539 for (j = 7; j >= 0; j--) {
540 if (*d & (1 << j))
541 printk("1");
542 else
543 printk("0");
544 }
545 } else {
546 for (j = 0; j < 8; j++) {
547 if (*d & (1 << j))
548 printk("1");
549 else
550 printk("0");
551 }
552 }
553 printk(" ");
554 }
555 printk("\n");
556}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.h b/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.h
deleted file mode 100644
index a40d1af35545..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_debugfs.h
+++ /dev/null
@@ -1,118 +0,0 @@
1#ifndef BCM43xx_DEBUGFS_H_
2#define BCM43xx_DEBUGFS_H_
3
4struct bcm43xx_private;
5struct bcm43xx_xmitstatus;
6
7#ifdef CONFIG_BCM43XX_DEBUG
8
9#include <linux/list.h>
10#include <asm/semaphore.h>
11
12struct dentry;
13
14/* limited by the size of the "really_big_buffer" */
15#define BCM43xx_NR_LOGGED_XMITSTATUS 100
16
17struct bcm43xx_dfsentry {
18 struct dentry *subdir;
19 struct dentry *dentry_devinfo;
20 struct dentry *dentry_spromdump;
21 struct dentry *dentry_tsf;
22 struct dentry *dentry_txstat;
23 struct dentry *dentry_restart;
24
25 struct bcm43xx_private *bcm;
26
27 /* saved xmitstatus. */
28 struct bcm43xx_xmitstatus *xmitstatus_buffer;
29 int xmitstatus_ptr;
30 int xmitstatus_cnt;
31 /* We need a seperate buffer while printing to avoid
32 * concurrency issues. (New xmitstatus can arrive
33 * while we are printing).
34 */
35 struct bcm43xx_xmitstatus *xmitstatus_print_buffer;
36 int saved_xmitstatus_ptr;
37 int saved_xmitstatus_cnt;
38 int xmitstatus_printing;
39};
40
41struct bcm43xx_debugfs {
42 struct dentry *root;
43 struct dentry *dentry_driverinfo;
44};
45
46void bcm43xx_debugfs_init(void);
47void bcm43xx_debugfs_exit(void);
48void bcm43xx_debugfs_add_device(struct bcm43xx_private *bcm);
49void bcm43xx_debugfs_remove_device(struct bcm43xx_private *bcm);
50void bcm43xx_debugfs_log_txstat(struct bcm43xx_private *bcm,
51 struct bcm43xx_xmitstatus *status);
52
53/* Debug helper: Dump binary data through printk. */
54void bcm43xx_printk_dump(const char *data,
55 size_t size,
56 const char *description);
57/* Debug helper: Dump bitwise binary data through printk. */
58void bcm43xx_printk_bitdump(const unsigned char *data,
59 size_t bytes, int msb_to_lsb,
60 const char *description);
61#define bcm43xx_printk_bitdumpt(pointer, msb_to_lsb, description) \
62 do { \
63 bcm43xx_printk_bitdump((const unsigned char *)(pointer), \
64 sizeof(*(pointer)), \
65 (msb_to_lsb), \
66 (description)); \
67 } while (0)
68
69#else /* CONFIG_BCM43XX_DEBUG*/
70
71static inline
72void bcm43xx_debugfs_init(void) { }
73static inline
74void bcm43xx_debugfs_exit(void) { }
75static inline
76void bcm43xx_debugfs_add_device(struct bcm43xx_private *bcm) { }
77static inline
78void bcm43xx_debugfs_remove_device(struct bcm43xx_private *bcm) { }
79static inline
80void bcm43xx_debugfs_log_txstat(struct bcm43xx_private *bcm,
81 struct bcm43xx_xmitstatus *status) { }
82
83static inline
84void bcm43xx_printk_dump(const char *data,
85 size_t size,
86 const char *description)
87{
88}
89static inline
90void bcm43xx_printk_bitdump(const unsigned char *data,
91 size_t bytes, int msb_to_lsb,
92 const char *description)
93{
94}
95#define bcm43xx_printk_bitdumpt(pointer, msb_to_lsb, description) do { /* nothing */ } while (0)
96
97#endif /* CONFIG_BCM43XX_DEBUG*/
98
99/* Ugly helper macros to make incomplete code more verbose on runtime */
100#ifdef TODO
101# undef TODO
102#endif
103#define TODO() \
104 do { \
105 printk(KERN_INFO PFX "TODO: Incomplete code in %s() at %s:%d\n", \
106 __FUNCTION__, __FILE__, __LINE__); \
107 } while (0)
108
109#ifdef FIXME
110# undef FIXME
111#endif
112#define FIXME() \
113 do { \
114 printk(KERN_INFO PFX "FIXME: Possibly broken code in %s() at %s:%d\n", \
115 __FUNCTION__, __FILE__, __LINE__); \
116 } while (0)
117
118#endif /* BCM43xx_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
deleted file mode 100644
index 1f7731fcfbd5..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
+++ /dev/null
@@ -1,1263 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 DMA ringbuffer and descriptor allocation/management
6
7 Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de>
8
9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller
11 Copyright (C) Pekka Pietikainen
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
27
28*/
29
30#include "bcm43xx.h"
31#include "bcm43xx_dma.h"
32#include "bcm43xx_main.h"
33#include "bcm43xx_debugfs.h"
34#include "bcm43xx_power.h"
35#include "bcm43xx_xmit.h"
36
37#include <linux/dma-mapping.h>
38#include <linux/pci.h>
39#include <linux/delay.h>
40#include <linux/skbuff.h>
41
42
43static inline int free_slots(struct bcm43xx_dmaring *ring)
44{
45 return (ring->nr_slots - ring->used_slots);
46}
47
48static inline int next_slot(struct bcm43xx_dmaring *ring, int slot)
49{
50 assert(slot >= -1 && slot <= ring->nr_slots - 1);
51 if (slot == ring->nr_slots - 1)
52 return 0;
53 return slot + 1;
54}
55
56static inline int prev_slot(struct bcm43xx_dmaring *ring, int slot)
57{
58 assert(slot >= 0 && slot <= ring->nr_slots - 1);
59 if (slot == 0)
60 return ring->nr_slots - 1;
61 return slot - 1;
62}
63
64/* Request a slot for usage. */
65static inline
66int request_slot(struct bcm43xx_dmaring *ring)
67{
68 int slot;
69
70 assert(ring->tx);
71 assert(!ring->suspended);
72 assert(free_slots(ring) != 0);
73
74 slot = next_slot(ring, ring->current_slot);
75 ring->current_slot = slot;
76 ring->used_slots++;
77
78 /* Check the number of available slots and suspend TX,
79 * if we are running low on free slots.
80 */
81 if (unlikely(free_slots(ring) < ring->suspend_mark)) {
82 netif_stop_queue(ring->bcm->net_dev);
83 ring->suspended = 1;
84 }
85#ifdef CONFIG_BCM43XX_DEBUG
86 if (ring->used_slots > ring->max_used_slots)
87 ring->max_used_slots = ring->used_slots;
88#endif /* CONFIG_BCM43XX_DEBUG*/
89
90 return slot;
91}
92
93/* Return a slot to the free slots. */
94static inline
95void return_slot(struct bcm43xx_dmaring *ring, int slot)
96{
97 assert(ring->tx);
98
99 ring->used_slots--;
100
101 /* Check if TX is suspended and check if we have
102 * enough free slots to resume it again.
103 */
104 if (unlikely(ring->suspended)) {
105 if (free_slots(ring) >= ring->resume_mark) {
106 ring->suspended = 0;
107 netif_wake_queue(ring->bcm->net_dev);
108 }
109 }
110}
111
112u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx)
113{
114 static const u16 map64[] = {
115 BCM43xx_MMIO_DMA64_BASE0,
116 BCM43xx_MMIO_DMA64_BASE1,
117 BCM43xx_MMIO_DMA64_BASE2,
118 BCM43xx_MMIO_DMA64_BASE3,
119 BCM43xx_MMIO_DMA64_BASE4,
120 BCM43xx_MMIO_DMA64_BASE5,
121 };
122 static const u16 map32[] = {
123 BCM43xx_MMIO_DMA32_BASE0,
124 BCM43xx_MMIO_DMA32_BASE1,
125 BCM43xx_MMIO_DMA32_BASE2,
126 BCM43xx_MMIO_DMA32_BASE3,
127 BCM43xx_MMIO_DMA32_BASE4,
128 BCM43xx_MMIO_DMA32_BASE5,
129 };
130
131 if (dma64bit) {
132 assert(controller_idx >= 0 &&
133 controller_idx < ARRAY_SIZE(map64));
134 return map64[controller_idx];
135 }
136 assert(controller_idx >= 0 &&
137 controller_idx < ARRAY_SIZE(map32));
138 return map32[controller_idx];
139}
140
141static inline
142dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
143 unsigned char *buf,
144 size_t len,
145 int tx)
146{
147 dma_addr_t dmaaddr;
148 int direction = PCI_DMA_FROMDEVICE;
149
150 if (tx)
151 direction = PCI_DMA_TODEVICE;
152
153 dmaaddr = pci_map_single(ring->bcm->pci_dev,
154 buf, len,
155 direction);
156
157 return dmaaddr;
158}
159
160static inline
161void unmap_descbuffer(struct bcm43xx_dmaring *ring,
162 dma_addr_t addr,
163 size_t len,
164 int tx)
165{
166 if (tx) {
167 pci_unmap_single(ring->bcm->pci_dev,
168 addr, len,
169 PCI_DMA_TODEVICE);
170 } else {
171 pci_unmap_single(ring->bcm->pci_dev,
172 addr, len,
173 PCI_DMA_FROMDEVICE);
174 }
175}
176
177static inline
178void sync_descbuffer_for_cpu(struct bcm43xx_dmaring *ring,
179 dma_addr_t addr,
180 size_t len)
181{
182 assert(!ring->tx);
183
184 pci_dma_sync_single_for_cpu(ring->bcm->pci_dev,
185 addr, len, PCI_DMA_FROMDEVICE);
186}
187
188static inline
189void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
190 dma_addr_t addr,
191 size_t len)
192{
193 assert(!ring->tx);
194
195 pci_dma_sync_single_for_cpu(ring->bcm->pci_dev,
196 addr, len, PCI_DMA_TODEVICE);
197}
198
199/* Unmap and free a descriptor buffer. */
200static inline
201void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
202 struct bcm43xx_dmadesc_meta *meta,
203 int irq_context)
204{
205 assert(meta->skb);
206 if (irq_context)
207 dev_kfree_skb_irq(meta->skb);
208 else
209 dev_kfree_skb(meta->skb);
210 meta->skb = NULL;
211}
212
213static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
214{
215 ring->descbase = pci_alloc_consistent(ring->bcm->pci_dev, BCM43xx_DMA_RINGMEMSIZE,
216 &(ring->dmabase));
217 if (!ring->descbase) {
218 /* Allocation may have failed due to pci_alloc_consistent
219 insisting on use of GFP_DMA, which is more restrictive
220 than necessary... */
221 struct dma_desc *rx_ring;
222 dma_addr_t rx_ring_dma;
223
224 rx_ring = kzalloc(BCM43xx_DMA_RINGMEMSIZE, GFP_KERNEL);
225 if (!rx_ring)
226 goto out_err;
227
228 rx_ring_dma = pci_map_single(ring->bcm->pci_dev, rx_ring,
229 BCM43xx_DMA_RINGMEMSIZE,
230 PCI_DMA_BIDIRECTIONAL);
231
232 if (pci_dma_mapping_error(rx_ring_dma) ||
233 rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) {
234 /* Sigh... */
235 if (!pci_dma_mapping_error(rx_ring_dma))
236 pci_unmap_single(ring->bcm->pci_dev,
237 rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE,
238 PCI_DMA_BIDIRECTIONAL);
239 rx_ring_dma = pci_map_single(ring->bcm->pci_dev,
240 rx_ring, BCM43xx_DMA_RINGMEMSIZE,
241 PCI_DMA_BIDIRECTIONAL);
242 if (pci_dma_mapping_error(rx_ring_dma) ||
243 rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) {
244 assert(0);
245 if (!pci_dma_mapping_error(rx_ring_dma))
246 pci_unmap_single(ring->bcm->pci_dev,
247 rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE,
248 PCI_DMA_BIDIRECTIONAL);
249 goto out_err;
250 }
251 }
252
253 ring->descbase = rx_ring;
254 ring->dmabase = rx_ring_dma;
255 }
256 memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
257
258 return 0;
259out_err:
260 printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
261 return -ENOMEM;
262}
263
264static void free_ringmemory(struct bcm43xx_dmaring *ring)
265{
266 struct device *dev = &(ring->bcm->pci_dev->dev);
267
268 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
269 ring->descbase, ring->dmabase);
270}
271
272/* Reset the RX DMA channel */
273int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
274 u16 mmio_base, int dma64)
275{
276 int i;
277 u32 value;
278 u16 offset;
279
280 offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL;
281 bcm43xx_write32(bcm, mmio_base + offset, 0);
282 for (i = 0; i < 1000; i++) {
283 offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS;
284 value = bcm43xx_read32(bcm, mmio_base + offset);
285 if (dma64) {
286 value &= BCM43xx_DMA64_RXSTAT;
287 if (value == BCM43xx_DMA64_RXSTAT_DISABLED) {
288 i = -1;
289 break;
290 }
291 } else {
292 value &= BCM43xx_DMA32_RXSTATE;
293 if (value == BCM43xx_DMA32_RXSTAT_DISABLED) {
294 i = -1;
295 break;
296 }
297 }
298 udelay(10);
299 }
300 if (i != -1) {
301 printk(KERN_ERR PFX "Error: Wait on DMA RX status timed out.\n");
302 return -ENODEV;
303 }
304
305 return 0;
306}
307
308/* Reset the RX DMA channel */
309int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
310 u16 mmio_base, int dma64)
311{
312 int i;
313 u32 value;
314 u16 offset;
315
316 for (i = 0; i < 1000; i++) {
317 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
318 value = bcm43xx_read32(bcm, mmio_base + offset);
319 if (dma64) {
320 value &= BCM43xx_DMA64_TXSTAT;
321 if (value == BCM43xx_DMA64_TXSTAT_DISABLED ||
322 value == BCM43xx_DMA64_TXSTAT_IDLEWAIT ||
323 value == BCM43xx_DMA64_TXSTAT_STOPPED)
324 break;
325 } else {
326 value &= BCM43xx_DMA32_TXSTATE;
327 if (value == BCM43xx_DMA32_TXSTAT_DISABLED ||
328 value == BCM43xx_DMA32_TXSTAT_IDLEWAIT ||
329 value == BCM43xx_DMA32_TXSTAT_STOPPED)
330 break;
331 }
332 udelay(10);
333 }
334 offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL;
335 bcm43xx_write32(bcm, mmio_base + offset, 0);
336 for (i = 0; i < 1000; i++) {
337 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
338 value = bcm43xx_read32(bcm, mmio_base + offset);
339 if (dma64) {
340 value &= BCM43xx_DMA64_TXSTAT;
341 if (value == BCM43xx_DMA64_TXSTAT_DISABLED) {
342 i = -1;
343 break;
344 }
345 } else {
346 value &= BCM43xx_DMA32_TXSTATE;
347 if (value == BCM43xx_DMA32_TXSTAT_DISABLED) {
348 i = -1;
349 break;
350 }
351 }
352 udelay(10);
353 }
354 if (i != -1) {
355 printk(KERN_ERR PFX "Error: Wait on DMA TX status timed out.\n");
356 return -ENODEV;
357 }
358 /* ensure the reset is completed. */
359 udelay(300);
360
361 return 0;
362}
363
364static void fill_descriptor(struct bcm43xx_dmaring *ring,
365 struct bcm43xx_dmadesc_generic *desc,
366 dma_addr_t dmaaddr,
367 u16 bufsize,
368 int start, int end, int irq)
369{
370 int slot;
371
372 slot = bcm43xx_dma_desc2idx(ring, desc);
373 assert(slot >= 0 && slot < ring->nr_slots);
374
375 if (ring->dma64) {
376 u32 ctl0 = 0, ctl1 = 0;
377 u32 addrlo, addrhi;
378 u32 addrext;
379
380 addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
381 addrhi = (((u64)dmaaddr >> 32) & ~BCM43xx_DMA64_ROUTING);
382 addrext = (((u64)dmaaddr >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
383 addrhi |= ring->routing;
384 if (slot == ring->nr_slots - 1)
385 ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND;
386 if (start)
387 ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART;
388 if (end)
389 ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND;
390 if (irq)
391 ctl0 |= BCM43xx_DMA64_DCTL0_IRQ;
392 ctl1 |= (bufsize - ring->frameoffset)
393 & BCM43xx_DMA64_DCTL1_BYTECNT;
394 ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT)
395 & BCM43xx_DMA64_DCTL1_ADDREXT_MASK;
396
397 desc->dma64.control0 = cpu_to_le32(ctl0);
398 desc->dma64.control1 = cpu_to_le32(ctl1);
399 desc->dma64.address_low = cpu_to_le32(addrlo);
400 desc->dma64.address_high = cpu_to_le32(addrhi);
401 } else {
402 u32 ctl;
403 u32 addr;
404 u32 addrext;
405
406 addr = (u32)(dmaaddr & ~BCM43xx_DMA32_ROUTING);
407 addrext = (u32)(dmaaddr & BCM43xx_DMA32_ROUTING)
408 >> BCM43xx_DMA32_ROUTING_SHIFT;
409 addr |= ring->routing;
410 ctl = (bufsize - ring->frameoffset)
411 & BCM43xx_DMA32_DCTL_BYTECNT;
412 if (slot == ring->nr_slots - 1)
413 ctl |= BCM43xx_DMA32_DCTL_DTABLEEND;
414 if (start)
415 ctl |= BCM43xx_DMA32_DCTL_FRAMESTART;
416 if (end)
417 ctl |= BCM43xx_DMA32_DCTL_FRAMEEND;
418 if (irq)
419 ctl |= BCM43xx_DMA32_DCTL_IRQ;
420 ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT)
421 & BCM43xx_DMA32_DCTL_ADDREXT_MASK;
422
423 desc->dma32.control = cpu_to_le32(ctl);
424 desc->dma32.address = cpu_to_le32(addr);
425 }
426}
427
428static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
429 struct bcm43xx_dmadesc_generic *desc,
430 struct bcm43xx_dmadesc_meta *meta,
431 gfp_t gfp_flags)
432{
433 struct bcm43xx_rxhdr *rxhdr;
434 struct bcm43xx_hwxmitstatus *xmitstat;
435 dma_addr_t dmaaddr;
436 struct sk_buff *skb;
437
438 assert(!ring->tx);
439
440 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
441 if (unlikely(!skb))
442 return -ENOMEM;
443 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
444 /* This hardware bug work-around adapted from the b44 driver.
445 The chip may be unable to do PCI DMA to/from anything above 1GB */
446 if (pci_dma_mapping_error(dmaaddr) ||
447 dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) {
448 /* This one has 30-bit addressing... */
449 if (!pci_dma_mapping_error(dmaaddr))
450 pci_unmap_single(ring->bcm->pci_dev,
451 dmaaddr, ring->rx_buffersize,
452 PCI_DMA_FROMDEVICE);
453 dev_kfree_skb_any(skb);
454 skb = __dev_alloc_skb(ring->rx_buffersize,GFP_DMA);
455 if (skb == NULL)
456 return -ENOMEM;
457 dmaaddr = pci_map_single(ring->bcm->pci_dev,
458 skb->data, ring->rx_buffersize,
459 PCI_DMA_FROMDEVICE);
460 if (pci_dma_mapping_error(dmaaddr) ||
461 dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) {
462 assert(0);
463 dev_kfree_skb_any(skb);
464 return -ENOMEM;
465 }
466 }
467 meta->skb = skb;
468 meta->dmaaddr = dmaaddr;
469 skb->dev = ring->bcm->net_dev;
470
471 fill_descriptor(ring, desc, dmaaddr,
472 ring->rx_buffersize, 0, 0, 0);
473
474 rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
475 rxhdr->frame_length = 0;
476 rxhdr->flags1 = 0;
477 xmitstat = (struct bcm43xx_hwxmitstatus *)(skb->data);
478 xmitstat->cookie = 0;
479
480 return 0;
481}
482
483/* Allocate the initial descbuffers.
484 * This is used for an RX ring only.
485 */
486static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
487{
488 int i, err = -ENOMEM;
489 struct bcm43xx_dmadesc_generic *desc;
490 struct bcm43xx_dmadesc_meta *meta;
491
492 for (i = 0; i < ring->nr_slots; i++) {
493 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
494
495 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
496 if (err)
497 goto err_unwind;
498 }
499 mb();
500 ring->used_slots = ring->nr_slots;
501 err = 0;
502out:
503 return err;
504
505err_unwind:
506 for (i--; i >= 0; i--) {
507 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
508
509 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
510 dev_kfree_skb(meta->skb);
511 }
512 goto out;
513}
514
515/* Do initial setup of the DMA controller.
516 * Reset the controller, write the ring busaddress
517 * and switch the "enable" bit on.
518 */
519static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
520{
521 int err = 0;
522 u32 value;
523 u32 addrext;
524
525 if (ring->tx) {
526 if (ring->dma64) {
527 u64 ringbase = (u64)(ring->dmabase);
528
529 addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
530 value = BCM43xx_DMA64_TXENABLE;
531 value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT)
532 & BCM43xx_DMA64_TXADDREXT_MASK;
533 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value);
534 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO,
535 (ringbase & 0xFFFFFFFF));
536 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI,
537 ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
538 | ring->routing);
539 } else {
540 u32 ringbase = (u32)(ring->dmabase);
541
542 addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
543 value = BCM43xx_DMA32_TXENABLE;
544 value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT)
545 & BCM43xx_DMA32_TXADDREXT_MASK;
546 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value);
547 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING,
548 (ringbase & ~BCM43xx_DMA32_ROUTING)
549 | ring->routing);
550 }
551 } else {
552 err = alloc_initial_descbuffers(ring);
553 if (err)
554 goto out;
555 if (ring->dma64) {
556 u64 ringbase = (u64)(ring->dmabase);
557
558 addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
559 value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT);
560 value |= BCM43xx_DMA64_RXENABLE;
561 value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT)
562 & BCM43xx_DMA64_RXADDREXT_MASK;
563 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value);
564 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO,
565 (ringbase & 0xFFFFFFFF));
566 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI,
567 ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
568 | ring->routing);
569 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200);
570 } else {
571 u32 ringbase = (u32)(ring->dmabase);
572
573 addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
574 value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT);
575 value |= BCM43xx_DMA32_RXENABLE;
576 value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT)
577 & BCM43xx_DMA32_RXADDREXT_MASK;
578 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value);
579 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING,
580 (ringbase & ~BCM43xx_DMA32_ROUTING)
581 | ring->routing);
582 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200);
583 }
584 }
585
586out:
587 return err;
588}
589
590/* Shutdown the DMA controller. */
591static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
592{
593 if (ring->tx) {
594 bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base, ring->dma64);
595 if (ring->dma64) {
596 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0);
597 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0);
598 } else
599 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0);
600 } else {
601 bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base, ring->dma64);
602 if (ring->dma64) {
603 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0);
604 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0);
605 } else
606 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0);
607 }
608}
609
610static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
611{
612 struct bcm43xx_dmadesc_generic *desc;
613 struct bcm43xx_dmadesc_meta *meta;
614 int i;
615
616 if (!ring->used_slots)
617 return;
618 for (i = 0; i < ring->nr_slots; i++) {
619 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
620
621 if (!meta->skb) {
622 assert(ring->tx);
623 continue;
624 }
625 if (ring->tx) {
626 unmap_descbuffer(ring, meta->dmaaddr,
627 meta->skb->len, 1);
628 } else {
629 unmap_descbuffer(ring, meta->dmaaddr,
630 ring->rx_buffersize, 0);
631 }
632 free_descriptor_buffer(ring, meta, 0);
633 }
634}
635
636/* Main initialization function. */
637static
638struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
639 int controller_index,
640 int for_tx,
641 int dma64)
642{
643 struct bcm43xx_dmaring *ring;
644 int err;
645 int nr_slots;
646
647 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
648 if (!ring)
649 goto out;
650
651 nr_slots = BCM43xx_RXRING_SLOTS;
652 if (for_tx)
653 nr_slots = BCM43xx_TXRING_SLOTS;
654
655 ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta),
656 GFP_KERNEL);
657 if (!ring->meta)
658 goto err_kfree_ring;
659
660 ring->routing = BCM43xx_DMA32_CLIENTTRANS;
661 if (dma64)
662 ring->routing = BCM43xx_DMA64_CLIENTTRANS;
663
664 ring->bcm = bcm;
665 ring->nr_slots = nr_slots;
666 ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
667 ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
668 assert(ring->suspend_mark < ring->resume_mark);
669 ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index);
670 ring->index = controller_index;
671 ring->dma64 = !!dma64;
672 if (for_tx) {
673 ring->tx = 1;
674 ring->current_slot = -1;
675 } else {
676 if (ring->index == 0) {
677 ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE;
678 ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET;
679 } else if (ring->index == 3) {
680 ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE;
681 ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET;
682 } else
683 assert(0);
684 }
685
686 err = alloc_ringmemory(ring);
687 if (err)
688 goto err_kfree_meta;
689 err = dmacontroller_setup(ring);
690 if (err)
691 goto err_free_ringmemory;
692 return ring;
693
694out:
695 printk(KERN_ERR PFX "Error in bcm43xx_setup_dmaring\n");
696 return ring;
697
698err_free_ringmemory:
699 free_ringmemory(ring);
700err_kfree_meta:
701 kfree(ring->meta);
702err_kfree_ring:
703 kfree(ring);
704 ring = NULL;
705 goto out;
706}
707
708/* Main cleanup function. */
709static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
710{
711 if (!ring)
712 return;
713
714 dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
715 (ring->dma64) ? "64" : "32",
716 ring->mmio_base,
717 (ring->tx) ? "TX" : "RX",
718 ring->max_used_slots, ring->nr_slots);
719 /* Device IRQs are disabled prior entering this function,
720 * so no need to take care of concurrency with rx handler stuff.
721 */
722 dmacontroller_cleanup(ring);
723 free_all_descbuffers(ring);
724 free_ringmemory(ring);
725
726 kfree(ring->meta);
727 kfree(ring);
728}
729
730void bcm43xx_dma_free(struct bcm43xx_private *bcm)
731{
732 struct bcm43xx_dma *dma;
733
734 if (bcm43xx_using_pio(bcm))
735 return;
736 dma = bcm43xx_current_dma(bcm);
737
738 bcm43xx_destroy_dmaring(dma->rx_ring3);
739 dma->rx_ring3 = NULL;
740 bcm43xx_destroy_dmaring(dma->rx_ring0);
741 dma->rx_ring0 = NULL;
742
743 bcm43xx_destroy_dmaring(dma->tx_ring5);
744 dma->tx_ring5 = NULL;
745 bcm43xx_destroy_dmaring(dma->tx_ring4);
746 dma->tx_ring4 = NULL;
747 bcm43xx_destroy_dmaring(dma->tx_ring3);
748 dma->tx_ring3 = NULL;
749 bcm43xx_destroy_dmaring(dma->tx_ring2);
750 dma->tx_ring2 = NULL;
751 bcm43xx_destroy_dmaring(dma->tx_ring1);
752 dma->tx_ring1 = NULL;
753 bcm43xx_destroy_dmaring(dma->tx_ring0);
754 dma->tx_ring0 = NULL;
755}
756
757int bcm43xx_dma_init(struct bcm43xx_private *bcm)
758{
759 struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
760 struct bcm43xx_dmaring *ring;
761 int err = -ENOMEM;
762 int dma64 = 0;
763
764 bcm->dma_mask = bcm43xx_get_supported_dma_mask(bcm);
765 if (bcm->dma_mask == DMA_64BIT_MASK)
766 dma64 = 1;
767 err = pci_set_dma_mask(bcm->pci_dev, bcm->dma_mask);
768 if (err)
769 goto no_dma;
770 err = pci_set_consistent_dma_mask(bcm->pci_dev, bcm->dma_mask);
771 if (err)
772 goto no_dma;
773
774 /* setup TX DMA channels. */
775 ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64);
776 if (!ring)
777 goto out;
778 dma->tx_ring0 = ring;
779
780 ring = bcm43xx_setup_dmaring(bcm, 1, 1, dma64);
781 if (!ring)
782 goto err_destroy_tx0;
783 dma->tx_ring1 = ring;
784
785 ring = bcm43xx_setup_dmaring(bcm, 2, 1, dma64);
786 if (!ring)
787 goto err_destroy_tx1;
788 dma->tx_ring2 = ring;
789
790 ring = bcm43xx_setup_dmaring(bcm, 3, 1, dma64);
791 if (!ring)
792 goto err_destroy_tx2;
793 dma->tx_ring3 = ring;
794
795 ring = bcm43xx_setup_dmaring(bcm, 4, 1, dma64);
796 if (!ring)
797 goto err_destroy_tx3;
798 dma->tx_ring4 = ring;
799
800 ring = bcm43xx_setup_dmaring(bcm, 5, 1, dma64);
801 if (!ring)
802 goto err_destroy_tx4;
803 dma->tx_ring5 = ring;
804
805 /* setup RX DMA channels. */
806 ring = bcm43xx_setup_dmaring(bcm, 0, 0, dma64);
807 if (!ring)
808 goto err_destroy_tx5;
809 dma->rx_ring0 = ring;
810
811 if (bcm->current_core->rev < 5) {
812 ring = bcm43xx_setup_dmaring(bcm, 3, 0, dma64);
813 if (!ring)
814 goto err_destroy_rx0;
815 dma->rx_ring3 = ring;
816 }
817
818 dprintk(KERN_INFO PFX "%d-bit DMA initialized\n",
819 (bcm->dma_mask == DMA_64BIT_MASK) ? 64 :
820 (bcm->dma_mask == DMA_32BIT_MASK) ? 32 : 30);
821 err = 0;
822out:
823 return err;
824
825err_destroy_rx0:
826 bcm43xx_destroy_dmaring(dma->rx_ring0);
827 dma->rx_ring0 = NULL;
828err_destroy_tx5:
829 bcm43xx_destroy_dmaring(dma->tx_ring5);
830 dma->tx_ring5 = NULL;
831err_destroy_tx4:
832 bcm43xx_destroy_dmaring(dma->tx_ring4);
833 dma->tx_ring4 = NULL;
834err_destroy_tx3:
835 bcm43xx_destroy_dmaring(dma->tx_ring3);
836 dma->tx_ring3 = NULL;
837err_destroy_tx2:
838 bcm43xx_destroy_dmaring(dma->tx_ring2);
839 dma->tx_ring2 = NULL;
840err_destroy_tx1:
841 bcm43xx_destroy_dmaring(dma->tx_ring1);
842 dma->tx_ring1 = NULL;
843err_destroy_tx0:
844 bcm43xx_destroy_dmaring(dma->tx_ring0);
845 dma->tx_ring0 = NULL;
846no_dma:
847#ifdef CONFIG_BCM43XX_PIO
848 printk(KERN_WARNING PFX "DMA not supported on this device."
849 " Falling back to PIO.\n");
850 bcm->__using_pio = 1;
851 return -ENOSYS;
852#else
853 printk(KERN_ERR PFX "FATAL: DMA not supported and PIO not configured. "
854 "Please recompile the driver with PIO support.\n");
855 return -ENODEV;
856#endif /* CONFIG_BCM43XX_PIO */
857}
858
859/* Generate a cookie for the TX header. */
860static u16 generate_cookie(struct bcm43xx_dmaring *ring,
861 int slot)
862{
863 u16 cookie = 0x1000;
864
865 /* Use the upper 4 bits of the cookie as
866 * DMA controller ID and store the slot number
867 * in the lower 12 bits.
868 * Note that the cookie must never be 0, as this
869 * is a special value used in RX path.
870 */
871 switch (ring->index) {
872 case 0:
873 cookie = 0xA000;
874 break;
875 case 1:
876 cookie = 0xB000;
877 break;
878 case 2:
879 cookie = 0xC000;
880 break;
881 case 3:
882 cookie = 0xD000;
883 break;
884 case 4:
885 cookie = 0xE000;
886 break;
887 case 5:
888 cookie = 0xF000;
889 break;
890 }
891 assert(((u16)slot & 0xF000) == 0x0000);
892 cookie |= (u16)slot;
893
894 return cookie;
895}
896
897/* Inspect a cookie and find out to which controller/slot it belongs. */
898static
899struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
900 u16 cookie, int *slot)
901{
902 struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
903 struct bcm43xx_dmaring *ring = NULL;
904
905 switch (cookie & 0xF000) {
906 case 0xA000:
907 ring = dma->tx_ring0;
908 break;
909 case 0xB000:
910 ring = dma->tx_ring1;
911 break;
912 case 0xC000:
913 ring = dma->tx_ring2;
914 break;
915 case 0xD000:
916 ring = dma->tx_ring3;
917 break;
918 case 0xE000:
919 ring = dma->tx_ring4;
920 break;
921 case 0xF000:
922 ring = dma->tx_ring5;
923 break;
924 default:
925 assert(0);
926 }
927 *slot = (cookie & 0x0FFF);
928 assert(*slot >= 0 && *slot < ring->nr_slots);
929
930 return ring;
931}
932
933static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
934 int slot)
935{
936 u16 offset;
937 int descsize;
938
939 /* Everything is ready to start. Buffers are DMA mapped and
940 * associated with slots.
941 * "slot" is the last slot of the new frame we want to transmit.
942 * Close your seat belts now, please.
943 */
944 wmb();
945 slot = next_slot(ring, slot);
946 offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX;
947 descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64)
948 : sizeof(struct bcm43xx_dmadesc32);
949 bcm43xx_dma_write(ring, offset,
950 (u32)(slot * descsize));
951}
952
953static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
954 struct sk_buff *skb,
955 u8 cur_frag)
956{
957 int slot;
958 struct bcm43xx_dmadesc_generic *desc;
959 struct bcm43xx_dmadesc_meta *meta;
960 dma_addr_t dmaaddr;
961 struct sk_buff *bounce_skb;
962
963 assert(skb_shinfo(skb)->nr_frags == 0);
964
965 slot = request_slot(ring);
966 desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
967
968 /* Add a device specific TX header. */
969 assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
970 /* Reserve enough headroom for the device tx header. */
971 __skb_push(skb, sizeof(struct bcm43xx_txhdr));
972 /* Now calculate and add the tx header.
973 * The tx header includes the PLCP header.
974 */
975 bcm43xx_generate_txhdr(ring->bcm,
976 (struct bcm43xx_txhdr *)skb->data,
977 skb->data + sizeof(struct bcm43xx_txhdr),
978 skb->len - sizeof(struct bcm43xx_txhdr),
979 (cur_frag == 0),
980 generate_cookie(ring, slot));
981 dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
982 if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {
983 /* chip cannot handle DMA to/from > 1GB, use bounce buffer (copied from b44 driver) */
984 if (!dma_mapping_error(dmaaddr))
985 unmap_descbuffer(ring, dmaaddr, skb->len, 1);
986 bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC|GFP_DMA);
987 if (!bounce_skb)
988 return;
989 dmaaddr = map_descbuffer(ring, bounce_skb->data, bounce_skb->len, 1);
990 if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {
991 if (!dma_mapping_error(dmaaddr))
992 unmap_descbuffer(ring, dmaaddr, skb->len, 1);
993 dev_kfree_skb_any(bounce_skb);
994 assert(0);
995 return;
996 }
997 skb_copy_from_linear_data(skb, skb_put(bounce_skb, skb->len),
998 skb->len);
999 dev_kfree_skb_any(skb);
1000 skb = bounce_skb;
1001 }
1002
1003 meta->skb = skb;
1004 meta->dmaaddr = dmaaddr;
1005
1006 fill_descriptor(ring, desc, dmaaddr,
1007 skb->len, 1, 1, 1);
1008
1009 /* Now transfer the whole frame. */
1010 dmacontroller_poke_tx(ring, slot);
1011}
1012
1013int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
1014 struct ieee80211_txb *txb)
1015{
1016 /* We just received a packet from the kernel network subsystem.
1017 * Add headers and DMA map the memory. Poke
1018 * the device to send the stuff.
1019 * Note that this is called from atomic context.
1020 */
1021 struct bcm43xx_dmaring *ring = bcm43xx_current_dma(bcm)->tx_ring1;
1022 u8 i;
1023 struct sk_buff *skb;
1024
1025 assert(ring->tx);
1026 if (unlikely(free_slots(ring) < txb->nr_frags)) {
1027 /* The queue should be stopped,
1028 * if we are low on free slots.
1029 * If this ever triggers, we have to lower the suspend_mark.
1030 */
1031 dprintkl(KERN_ERR PFX "Out of DMA descriptor slots!\n");
1032 return -ENOMEM;
1033 }
1034
1035 for (i = 0; i < txb->nr_frags; i++) {
1036 skb = txb->fragments[i];
1037 /* Take skb from ieee80211_txb_free */
1038 txb->fragments[i] = NULL;
1039 dma_tx_fragment(ring, skb, i);
1040 }
1041 ieee80211_txb_free(txb);
1042
1043 return 0;
1044}
1045
1046void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
1047 struct bcm43xx_xmitstatus *status)
1048{
1049 struct bcm43xx_dmaring *ring;
1050 struct bcm43xx_dmadesc_generic *desc;
1051 struct bcm43xx_dmadesc_meta *meta;
1052 int is_last_fragment;
1053 int slot;
1054 u32 tmp;
1055
1056 ring = parse_cookie(bcm, status->cookie, &slot);
1057 assert(ring);
1058 assert(ring->tx);
1059 while (1) {
1060 assert(slot >= 0 && slot < ring->nr_slots);
1061 desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
1062
1063 if (ring->dma64) {
1064 tmp = le32_to_cpu(desc->dma64.control0);
1065 is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND);
1066 } else {
1067 tmp = le32_to_cpu(desc->dma32.control);
1068 is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND);
1069 }
1070 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
1071 free_descriptor_buffer(ring, meta, 1);
1072 /* Everything belonging to the slot is unmapped
1073 * and freed, so we can return it.
1074 */
1075 return_slot(ring, slot);
1076
1077 if (is_last_fragment)
1078 break;
1079 slot = next_slot(ring, slot);
1080 }
1081 bcm->stats.last_tx = jiffies;
1082}
1083
1084static void dma_rx(struct bcm43xx_dmaring *ring,
1085 int *slot)
1086{
1087 struct bcm43xx_dmadesc_generic *desc;
1088 struct bcm43xx_dmadesc_meta *meta;
1089 struct bcm43xx_rxhdr *rxhdr;
1090 struct sk_buff *skb;
1091 u16 len;
1092 int err;
1093 dma_addr_t dmaaddr;
1094
1095 desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
1096
1097 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1098 skb = meta->skb;
1099
1100 if (ring->index == 3) {
1101 /* We received an xmit status. */
1102 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
1103 struct bcm43xx_xmitstatus stat;
1104 int i = 0;
1105
1106 stat.cookie = le16_to_cpu(hw->cookie);
1107 while (stat.cookie == 0) {
1108 if (unlikely(++i >= 10000)) {
1109 assert(0);
1110 break;
1111 }
1112 udelay(2);
1113 barrier();
1114 stat.cookie = le16_to_cpu(hw->cookie);
1115 }
1116 stat.flags = hw->flags;
1117 stat.cnt1 = hw->cnt1;
1118 stat.cnt2 = hw->cnt2;
1119 stat.seq = le16_to_cpu(hw->seq);
1120 stat.unknown = le16_to_cpu(hw->unknown);
1121
1122 bcm43xx_debugfs_log_txstat(ring->bcm, &stat);
1123 bcm43xx_dma_handle_xmitstatus(ring->bcm, &stat);
1124 /* recycle the descriptor buffer. */
1125 sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize);
1126
1127 return;
1128 }
1129 rxhdr = (struct bcm43xx_rxhdr *)skb->data;
1130 len = le16_to_cpu(rxhdr->frame_length);
1131 if (len == 0) {
1132 int i = 0;
1133
1134 do {
1135 udelay(2);
1136 barrier();
1137 len = le16_to_cpu(rxhdr->frame_length);
1138 } while (len == 0 && i++ < 5);
1139 if (unlikely(len == 0)) {
1140 /* recycle the descriptor buffer. */
1141 sync_descbuffer_for_device(ring, meta->dmaaddr,
1142 ring->rx_buffersize);
1143 goto drop;
1144 }
1145 }
1146 if (unlikely(len > ring->rx_buffersize)) {
1147 /* The data did not fit into one descriptor buffer
1148 * and is split over multiple buffers.
1149 * This should never happen, as we try to allocate buffers
1150 * big enough. So simply ignore this packet.
1151 */
1152 int cnt = 0;
1153 s32 tmp = len;
1154
1155 while (1) {
1156 desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
1157 /* recycle the descriptor buffer. */
1158 sync_descbuffer_for_device(ring, meta->dmaaddr,
1159 ring->rx_buffersize);
1160 *slot = next_slot(ring, *slot);
1161 cnt++;
1162 tmp -= ring->rx_buffersize;
1163 if (tmp <= 0)
1164 break;
1165 }
1166 printkl(KERN_ERR PFX "DMA RX buffer too small "
1167 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1168 len, ring->rx_buffersize, cnt);
1169 goto drop;
1170 }
1171 len -= IEEE80211_FCS_LEN;
1172
1173 dmaaddr = meta->dmaaddr;
1174 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1175 if (unlikely(err)) {
1176 dprintkl(KERN_ERR PFX "DMA RX: setup_rx_descbuffer() failed\n");
1177 sync_descbuffer_for_device(ring, dmaaddr,
1178 ring->rx_buffersize);
1179 goto drop;
1180 }
1181
1182 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1183 skb_put(skb, len + ring->frameoffset);
1184 skb_pull(skb, ring->frameoffset);
1185
1186 err = bcm43xx_rx(ring->bcm, skb, rxhdr);
1187 if (err) {
1188 dev_kfree_skb_irq(skb);
1189 goto drop;
1190 }
1191
1192drop:
1193 return;
1194}
1195
1196void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
1197{
1198 u32 status;
1199 u16 descptr;
1200 int slot, current_slot;
1201#ifdef CONFIG_BCM43XX_DEBUG
1202 int used_slots = 0;
1203#endif
1204
1205 assert(!ring->tx);
1206 if (ring->dma64) {
1207 status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);
1208 descptr = (status & BCM43xx_DMA64_RXSTATDPTR);
1209 current_slot = descptr / sizeof(struct bcm43xx_dmadesc64);
1210 } else {
1211 status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);
1212 descptr = (status & BCM43xx_DMA32_RXDPTR);
1213 current_slot = descptr / sizeof(struct bcm43xx_dmadesc32);
1214 }
1215 assert(current_slot >= 0 && current_slot < ring->nr_slots);
1216
1217 slot = ring->current_slot;
1218 for ( ; slot != current_slot; slot = next_slot(ring, slot)) {
1219 dma_rx(ring, &slot);
1220#ifdef CONFIG_BCM43XX_DEBUG
1221 if (++used_slots > ring->max_used_slots)
1222 ring->max_used_slots = used_slots;
1223#endif
1224 }
1225 if (ring->dma64) {
1226 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,
1227 (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
1228 } else {
1229 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,
1230 (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
1231 }
1232 ring->current_slot = slot;
1233}
1234
1235void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
1236{
1237 assert(ring->tx);
1238 bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1);
1239 if (ring->dma64) {
1240 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
1241 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
1242 | BCM43xx_DMA64_TXSUSPEND);
1243 } else {
1244 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
1245 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
1246 | BCM43xx_DMA32_TXSUSPEND);
1247 }
1248}
1249
1250void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
1251{
1252 assert(ring->tx);
1253 if (ring->dma64) {
1254 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
1255 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
1256 & ~BCM43xx_DMA64_TXSUSPEND);
1257 } else {
1258 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
1259 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
1260 & ~BCM43xx_DMA32_TXSUSPEND);
1261 }
1262 bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);
1263}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.h b/drivers/net/wireless/bcm43xx/bcm43xx_dma.h
deleted file mode 100644
index d1105e569a41..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.h
+++ /dev/null
@@ -1,386 +0,0 @@
1#ifndef BCM43xx_DMA_H_
2#define BCM43xx_DMA_H_
3
4#include <linux/list.h>
5#include <linux/spinlock.h>
6#include <linux/workqueue.h>
7#include <linux/dma-mapping.h>
8#include <linux/linkage.h>
9#include <asm/atomic.h>
10
11
12/* DMA-Interrupt reasons. */
13#define BCM43xx_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \
14 | (1 << 14) | (1 << 15))
15#define BCM43xx_DMAIRQ_NONFATALMASK (1 << 13)
16#define BCM43xx_DMAIRQ_RX_DONE (1 << 16)
17
18
19/*** 32-bit DMA Engine. ***/
20
21/* 32-bit DMA controller registers. */
22#define BCM43xx_DMA32_TXCTL 0x00
23#define BCM43xx_DMA32_TXENABLE 0x00000001
24#define BCM43xx_DMA32_TXSUSPEND 0x00000002
25#define BCM43xx_DMA32_TXLOOPBACK 0x00000004
26#define BCM43xx_DMA32_TXFLUSH 0x00000010
27#define BCM43xx_DMA32_TXADDREXT_MASK 0x00030000
28#define BCM43xx_DMA32_TXADDREXT_SHIFT 16
29#define BCM43xx_DMA32_TXRING 0x04
30#define BCM43xx_DMA32_TXINDEX 0x08
31#define BCM43xx_DMA32_TXSTATUS 0x0C
32#define BCM43xx_DMA32_TXDPTR 0x00000FFF
33#define BCM43xx_DMA32_TXSTATE 0x0000F000
34#define BCM43xx_DMA32_TXSTAT_DISABLED 0x00000000
35#define BCM43xx_DMA32_TXSTAT_ACTIVE 0x00001000
36#define BCM43xx_DMA32_TXSTAT_IDLEWAIT 0x00002000
37#define BCM43xx_DMA32_TXSTAT_STOPPED 0x00003000
38#define BCM43xx_DMA32_TXSTAT_SUSP 0x00004000
39#define BCM43xx_DMA32_TXERROR 0x000F0000
40#define BCM43xx_DMA32_TXERR_NOERR 0x00000000
41#define BCM43xx_DMA32_TXERR_PROT 0x00010000
42#define BCM43xx_DMA32_TXERR_UNDERRUN 0x00020000
43#define BCM43xx_DMA32_TXERR_BUFREAD 0x00030000
44#define BCM43xx_DMA32_TXERR_DESCREAD 0x00040000
45#define BCM43xx_DMA32_TXACTIVE 0xFFF00000
46#define BCM43xx_DMA32_RXCTL 0x10
47#define BCM43xx_DMA32_RXENABLE 0x00000001
48#define BCM43xx_DMA32_RXFROFF_MASK 0x000000FE
49#define BCM43xx_DMA32_RXFROFF_SHIFT 1
50#define BCM43xx_DMA32_RXDIRECTFIFO 0x00000100
51#define BCM43xx_DMA32_RXADDREXT_MASK 0x00030000
52#define BCM43xx_DMA32_RXADDREXT_SHIFT 16
53#define BCM43xx_DMA32_RXRING 0x14
54#define BCM43xx_DMA32_RXINDEX 0x18
55#define BCM43xx_DMA32_RXSTATUS 0x1C
56#define BCM43xx_DMA32_RXDPTR 0x00000FFF
57#define BCM43xx_DMA32_RXSTATE 0x0000F000
58#define BCM43xx_DMA32_RXSTAT_DISABLED 0x00000000
59#define BCM43xx_DMA32_RXSTAT_ACTIVE 0x00001000
60#define BCM43xx_DMA32_RXSTAT_IDLEWAIT 0x00002000
61#define BCM43xx_DMA32_RXSTAT_STOPPED 0x00003000
62#define BCM43xx_DMA32_RXERROR 0x000F0000
63#define BCM43xx_DMA32_RXERR_NOERR 0x00000000
64#define BCM43xx_DMA32_RXERR_PROT 0x00010000
65#define BCM43xx_DMA32_RXERR_OVERFLOW 0x00020000
66#define BCM43xx_DMA32_RXERR_BUFWRITE 0x00030000
67#define BCM43xx_DMA32_RXERR_DESCREAD 0x00040000
68#define BCM43xx_DMA32_RXACTIVE 0xFFF00000
69
70/* 32-bit DMA descriptor. */
71struct bcm43xx_dmadesc32 {
72 __le32 control;
73 __le32 address;
74} __attribute__((__packed__));
75#define BCM43xx_DMA32_DCTL_BYTECNT 0x00001FFF
76#define BCM43xx_DMA32_DCTL_ADDREXT_MASK 0x00030000
77#define BCM43xx_DMA32_DCTL_ADDREXT_SHIFT 16
78#define BCM43xx_DMA32_DCTL_DTABLEEND 0x10000000
79#define BCM43xx_DMA32_DCTL_IRQ 0x20000000
80#define BCM43xx_DMA32_DCTL_FRAMEEND 0x40000000
81#define BCM43xx_DMA32_DCTL_FRAMESTART 0x80000000
82
83/* Address field Routing value. */
84#define BCM43xx_DMA32_ROUTING 0xC0000000
85#define BCM43xx_DMA32_ROUTING_SHIFT 30
86#define BCM43xx_DMA32_NOTRANS 0x00000000
87#define BCM43xx_DMA32_CLIENTTRANS 0x40000000
88
89
90
91/*** 64-bit DMA Engine. ***/
92
93/* 64-bit DMA controller registers. */
94#define BCM43xx_DMA64_TXCTL 0x00
95#define BCM43xx_DMA64_TXENABLE 0x00000001
96#define BCM43xx_DMA64_TXSUSPEND 0x00000002
97#define BCM43xx_DMA64_TXLOOPBACK 0x00000004
98#define BCM43xx_DMA64_TXFLUSH 0x00000010
99#define BCM43xx_DMA64_TXADDREXT_MASK 0x00030000
100#define BCM43xx_DMA64_TXADDREXT_SHIFT 16
101#define BCM43xx_DMA64_TXINDEX 0x04
102#define BCM43xx_DMA64_TXRINGLO 0x08
103#define BCM43xx_DMA64_TXRINGHI 0x0C
104#define BCM43xx_DMA64_TXSTATUS 0x10
105#define BCM43xx_DMA64_TXSTATDPTR 0x00001FFF
106#define BCM43xx_DMA64_TXSTAT 0xF0000000
107#define BCM43xx_DMA64_TXSTAT_DISABLED 0x00000000
108#define BCM43xx_DMA64_TXSTAT_ACTIVE 0x10000000
109#define BCM43xx_DMA64_TXSTAT_IDLEWAIT 0x20000000
110#define BCM43xx_DMA64_TXSTAT_STOPPED 0x30000000
111#define BCM43xx_DMA64_TXSTAT_SUSP 0x40000000
112#define BCM43xx_DMA64_TXERROR 0x14
113#define BCM43xx_DMA64_TXERRDPTR 0x0001FFFF
114#define BCM43xx_DMA64_TXERR 0xF0000000
115#define BCM43xx_DMA64_TXERR_NOERR 0x00000000
116#define BCM43xx_DMA64_TXERR_PROT 0x10000000
117#define BCM43xx_DMA64_TXERR_UNDERRUN 0x20000000
118#define BCM43xx_DMA64_TXERR_TRANSFER 0x30000000
119#define BCM43xx_DMA64_TXERR_DESCREAD 0x40000000
120#define BCM43xx_DMA64_TXERR_CORE 0x50000000
121#define BCM43xx_DMA64_RXCTL 0x20
122#define BCM43xx_DMA64_RXENABLE 0x00000001
123#define BCM43xx_DMA64_RXFROFF_MASK 0x000000FE
124#define BCM43xx_DMA64_RXFROFF_SHIFT 1
125#define BCM43xx_DMA64_RXDIRECTFIFO 0x00000100
126#define BCM43xx_DMA64_RXADDREXT_MASK 0x00030000
127#define BCM43xx_DMA64_RXADDREXT_SHIFT 16
128#define BCM43xx_DMA64_RXINDEX 0x24
129#define BCM43xx_DMA64_RXRINGLO 0x28
130#define BCM43xx_DMA64_RXRINGHI 0x2C
131#define BCM43xx_DMA64_RXSTATUS 0x30
132#define BCM43xx_DMA64_RXSTATDPTR 0x00001FFF
133#define BCM43xx_DMA64_RXSTAT 0xF0000000
134#define BCM43xx_DMA64_RXSTAT_DISABLED 0x00000000
135#define BCM43xx_DMA64_RXSTAT_ACTIVE 0x10000000
136#define BCM43xx_DMA64_RXSTAT_IDLEWAIT 0x20000000
137#define BCM43xx_DMA64_RXSTAT_STOPPED 0x30000000
138#define BCM43xx_DMA64_RXSTAT_SUSP 0x40000000
139#define BCM43xx_DMA64_RXERROR 0x34
140#define BCM43xx_DMA64_RXERRDPTR 0x0001FFFF
141#define BCM43xx_DMA64_RXERR 0xF0000000
142#define BCM43xx_DMA64_RXERR_NOERR 0x00000000
143#define BCM43xx_DMA64_RXERR_PROT 0x10000000
144#define BCM43xx_DMA64_RXERR_UNDERRUN 0x20000000
145#define BCM43xx_DMA64_RXERR_TRANSFER 0x30000000
146#define BCM43xx_DMA64_RXERR_DESCREAD 0x40000000
147#define BCM43xx_DMA64_RXERR_CORE 0x50000000
148
149/* 64-bit DMA descriptor. */
150struct bcm43xx_dmadesc64 {
151 __le32 control0;
152 __le32 control1;
153 __le32 address_low;
154 __le32 address_high;
155} __attribute__((__packed__));
156#define BCM43xx_DMA64_DCTL0_DTABLEEND 0x10000000
157#define BCM43xx_DMA64_DCTL0_IRQ 0x20000000
158#define BCM43xx_DMA64_DCTL0_FRAMEEND 0x40000000
159#define BCM43xx_DMA64_DCTL0_FRAMESTART 0x80000000
160#define BCM43xx_DMA64_DCTL1_BYTECNT 0x00001FFF
161#define BCM43xx_DMA64_DCTL1_ADDREXT_MASK 0x00030000
162#define BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT 16
163
164/* Address field Routing value. */
165#define BCM43xx_DMA64_ROUTING 0xC0000000
166#define BCM43xx_DMA64_ROUTING_SHIFT 30
167#define BCM43xx_DMA64_NOTRANS 0x00000000
168#define BCM43xx_DMA64_CLIENTTRANS 0x80000000
169
170
171
172struct bcm43xx_dmadesc_generic {
173 union {
174 struct bcm43xx_dmadesc32 dma32;
175 struct bcm43xx_dmadesc64 dma64;
176 } __attribute__((__packed__));
177} __attribute__((__packed__));
178
179
180/* Misc DMA constants */
181#define BCM43xx_DMA_RINGMEMSIZE PAGE_SIZE
182#define BCM43xx_DMA0_RX_FRAMEOFFSET 30
183#define BCM43xx_DMA3_RX_FRAMEOFFSET 0
184
185
186/* DMA engine tuning knobs */
187#define BCM43xx_TXRING_SLOTS 512
188#define BCM43xx_RXRING_SLOTS 64
189#define BCM43xx_DMA0_RX_BUFFERSIZE (2304 + 100)
190#define BCM43xx_DMA3_RX_BUFFERSIZE 16
191/* Suspend the tx queue, if less than this percent slots are free. */
192#define BCM43xx_TXSUSPEND_PERCENT 20
193/* Resume the tx queue, if more than this percent slots are free. */
194#define BCM43xx_TXRESUME_PERCENT 50
195
196
197
198#ifdef CONFIG_BCM43XX_DMA
199
200
201struct sk_buff;
202struct bcm43xx_private;
203struct bcm43xx_xmitstatus;
204
205
206struct bcm43xx_dmadesc_meta {
207 /* The kernel DMA-able buffer. */
208 struct sk_buff *skb;
209 /* DMA base bus-address of the descriptor buffer. */
210 dma_addr_t dmaaddr;
211};
212
213struct bcm43xx_dmaring {
214 /* Kernel virtual base address of the ring memory. */
215 void *descbase;
216 /* Meta data about all descriptors. */
217 struct bcm43xx_dmadesc_meta *meta;
218 /* DMA Routing value. */
219 u32 routing;
220 /* (Unadjusted) DMA base bus-address of the ring memory. */
221 dma_addr_t dmabase;
222 /* Number of descriptor slots in the ring. */
223 int nr_slots;
224 /* Number of used descriptor slots. */
225 int used_slots;
226 /* Currently used slot in the ring. */
227 int current_slot;
228 /* Marks to suspend/resume the queue. */
229 int suspend_mark;
230 int resume_mark;
231 /* Frameoffset in octets. */
232 u32 frameoffset;
233 /* Descriptor buffer size. */
234 u16 rx_buffersize;
235 /* The MMIO base register of the DMA controller. */
236 u16 mmio_base;
237 /* DMA controller index number (0-5). */
238 int index;
239 /* Boolean. Is this a TX ring? */
240 u8 tx;
241 /* Boolean. 64bit DMA if true, 32bit DMA otherwise. */
242 u8 dma64;
243 /* Boolean. Are transfers suspended on this ring? */
244 u8 suspended;
245 struct bcm43xx_private *bcm;
246#ifdef CONFIG_BCM43XX_DEBUG
247 /* Maximum number of used slots. */
248 int max_used_slots;
249#endif /* CONFIG_BCM43XX_DEBUG*/
250};
251
252
253static inline
254int bcm43xx_dma_desc2idx(struct bcm43xx_dmaring *ring,
255 struct bcm43xx_dmadesc_generic *desc)
256{
257 if (ring->dma64) {
258 struct bcm43xx_dmadesc64 *dd64 = ring->descbase;
259 return (int)(&(desc->dma64) - dd64);
260 } else {
261 struct bcm43xx_dmadesc32 *dd32 = ring->descbase;
262 return (int)(&(desc->dma32) - dd32);
263 }
264}
265
266static inline
267struct bcm43xx_dmadesc_generic * bcm43xx_dma_idx2desc(struct bcm43xx_dmaring *ring,
268 int slot,
269 struct bcm43xx_dmadesc_meta **meta)
270{
271 *meta = &(ring->meta[slot]);
272 if (ring->dma64) {
273 struct bcm43xx_dmadesc64 *dd64 = ring->descbase;
274 return (struct bcm43xx_dmadesc_generic *)(&(dd64[slot]));
275 } else {
276 struct bcm43xx_dmadesc32 *dd32 = ring->descbase;
277 return (struct bcm43xx_dmadesc_generic *)(&(dd32[slot]));
278 }
279}
280
281static inline
282u32 bcm43xx_dma_read(struct bcm43xx_dmaring *ring,
283 u16 offset)
284{
285 return bcm43xx_read32(ring->bcm, ring->mmio_base + offset);
286}
287
288static inline
289void bcm43xx_dma_write(struct bcm43xx_dmaring *ring,
290 u16 offset, u32 value)
291{
292 bcm43xx_write32(ring->bcm, ring->mmio_base + offset, value);
293}
294
295
296int bcm43xx_dma_init(struct bcm43xx_private *bcm);
297void bcm43xx_dma_free(struct bcm43xx_private *bcm);
298
299int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
300 u16 dmacontroller_mmio_base,
301 int dma64);
302int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
303 u16 dmacontroller_mmio_base,
304 int dma64);
305
306u16 bcm43xx_dmacontroller_base(int dma64bit, int dmacontroller_idx);
307
308void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring);
309void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring);
310
311void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
312 struct bcm43xx_xmitstatus *status);
313
314int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
315 struct ieee80211_txb *txb);
316void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring);
317
318/* Helper function that returns the dma mask for this device. */
319static inline
320u64 bcm43xx_get_supported_dma_mask(struct bcm43xx_private *bcm)
321{
322 int dma64 = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATEHIGH) &
323 BCM43xx_SBTMSTATEHIGH_DMA64BIT;
324 u16 mmio_base = bcm43xx_dmacontroller_base(dma64, 0);
325 u32 mask = BCM43xx_DMA32_TXADDREXT_MASK;
326
327 if (dma64)
328 return DMA_64BIT_MASK;
329 bcm43xx_write32(bcm, mmio_base + BCM43xx_DMA32_TXCTL, mask);
330 if (bcm43xx_read32(bcm, mmio_base + BCM43xx_DMA32_TXCTL) & mask)
331 return DMA_32BIT_MASK;
332 return DMA_30BIT_MASK;
333}
334
335#else /* CONFIG_BCM43XX_DMA */
336
337
338static inline
339int bcm43xx_dma_init(struct bcm43xx_private *bcm)
340{
341 return 0;
342}
343static inline
344void bcm43xx_dma_free(struct bcm43xx_private *bcm)
345{
346}
347static inline
348int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
349 u16 dmacontroller_mmio_base,
350 int dma64)
351{
352 return 0;
353}
354static inline
355int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
356 u16 dmacontroller_mmio_base,
357 int dma64)
358{
359 return 0;
360}
361static inline
362int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
363 struct ieee80211_txb *txb)
364{
365 return 0;
366}
367static inline
368void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
369 struct bcm43xx_xmitstatus *status)
370{
371}
372static inline
373void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
374{
375}
376static inline
377void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
378{
379}
380static inline
381void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
382{
383}
384
385#endif /* CONFIG_BCM43XX_DMA */
386#endif /* BCM43xx_DMA_H_ */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_ethtool.c b/drivers/net/wireless/bcm43xx/bcm43xx_ethtool.c
deleted file mode 100644
index d2df6a0100a1..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_ethtool.c
+++ /dev/null
@@ -1,50 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 ethtool support
6
7 Copyright (c) 2006 Jason Lunz <lunz@falooley.org>
8
9 Some code in this file is derived from the 8139too.c driver
10 Copyright (C) 2002 Jeff Garzik
11
12 This program is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2 of the License, or
15 (at your option) any later version.
16
17 This program is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; see the file COPYING. If not, write to
24 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
25 Boston, MA 02110-1301, USA.
26
27*/
28
29#include "bcm43xx.h"
30#include "bcm43xx_ethtool.h"
31
32#include <linux/netdevice.h>
33#include <linux/pci.h>
34#include <linux/string.h>
35#include <linux/utsname.h>
36
37
38static void bcm43xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
39{
40 struct bcm43xx_private *bcm = bcm43xx_priv(dev);
41
42 strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
43 strncpy(info->version, utsname()->release, sizeof(info->version));
44 strncpy(info->bus_info, pci_name(bcm->pci_dev), ETHTOOL_BUSINFO_LEN);
45}
46
47const struct ethtool_ops bcm43xx_ethtool_ops = {
48 .get_drvinfo = bcm43xx_get_drvinfo,
49 .get_link = ethtool_op_get_link,
50};
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_ethtool.h b/drivers/net/wireless/bcm43xx/bcm43xx_ethtool.h
deleted file mode 100644
index 6f8d42d3cdf5..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_ethtool.h
+++ /dev/null
@@ -1,8 +0,0 @@
1#ifndef BCM43xx_ETHTOOL_H_
2#define BCM43xx_ETHTOOL_H_
3
4#include <linux/ethtool.h>
5
6extern const struct ethtool_ops bcm43xx_ethtool_ops;
7
8#endif /* BCM43xx_ETHTOOL_H_ */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_ilt.c b/drivers/net/wireless/bcm43xx/bcm43xx_ilt.c
deleted file mode 100644
index f2b8dbac55a4..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_ilt.c
+++ /dev/null
@@ -1,352 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
6 Stefano Brivio <st3@riseup.net>
7 Michael Buesch <mbuesch@freenet.de>
8 Danny van Dyk <kugelfang@gentoo.org>
9 Andreas Jaggi <andreas.jaggi@waterwave.ch>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2 of the License, or
14 (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program; see the file COPYING. If not, write to
23 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
24 Boston, MA 02110-1301, USA.
25
26*/
27
28#include "bcm43xx.h"
29#include "bcm43xx_ilt.h"
30#include "bcm43xx_phy.h"
31
32
33/**** Initial Internal Lookup Tables ****/
34
35const u32 bcm43xx_ilt_rotor[BCM43xx_ILT_ROTOR_SIZE] = {
36 0xFEB93FFD, 0xFEC63FFD, /* 0 */
37 0xFED23FFD, 0xFEDF3FFD,
38 0xFEEC3FFE, 0xFEF83FFE,
39 0xFF053FFE, 0xFF113FFE,
40 0xFF1E3FFE, 0xFF2A3FFF, /* 8 */
41 0xFF373FFF, 0xFF443FFF,
42 0xFF503FFF, 0xFF5D3FFF,
43 0xFF693FFF, 0xFF763FFF,
44 0xFF824000, 0xFF8F4000, /* 16 */
45 0xFF9B4000, 0xFFA84000,
46 0xFFB54000, 0xFFC14000,
47 0xFFCE4000, 0xFFDA4000,
48 0xFFE74000, 0xFFF34000, /* 24 */
49 0x00004000, 0x000D4000,
50 0x00194000, 0x00264000,
51 0x00324000, 0x003F4000,
52 0x004B4000, 0x00584000, /* 32 */
53 0x00654000, 0x00714000,
54 0x007E4000, 0x008A3FFF,
55 0x00973FFF, 0x00A33FFF,
56 0x00B03FFF, 0x00BC3FFF, /* 40 */
57 0x00C93FFF, 0x00D63FFF,
58 0x00E23FFE, 0x00EF3FFE,
59 0x00FB3FFE, 0x01083FFE,
60 0x01143FFE, 0x01213FFD, /* 48 */
61 0x012E3FFD, 0x013A3FFD,
62 0x01473FFD,
63};
64
65const u32 bcm43xx_ilt_retard[BCM43xx_ILT_RETARD_SIZE] = {
66 0xDB93CB87, 0xD666CF64, /* 0 */
67 0xD1FDD358, 0xCDA6D826,
68 0xCA38DD9F, 0xC729E2B4,
69 0xC469E88E, 0xC26AEE2B,
70 0xC0DEF46C, 0xC073FA62, /* 8 */
71 0xC01D00D5, 0xC0760743,
72 0xC1560D1E, 0xC2E51369,
73 0xC4ED18FF, 0xC7AC1ED7,
74 0xCB2823B2, 0xCEFA28D9, /* 16 */
75 0xD2F62D3F, 0xD7BB3197,
76 0xDCE53568, 0xE1FE3875,
77 0xE7D13B35, 0xED663D35,
78 0xF39B3EC4, 0xF98E3FA7, /* 24 */
79 0x00004000, 0x06723FA7,
80 0x0C653EC4, 0x129A3D35,
81 0x182F3B35, 0x1E023875,
82 0x231B3568, 0x28453197, /* 32 */
83 0x2D0A2D3F, 0x310628D9,
84 0x34D823B2, 0x38541ED7,
85 0x3B1318FF, 0x3D1B1369,
86 0x3EAA0D1E, 0x3F8A0743, /* 40 */
87 0x3FE300D5, 0x3F8DFA62,
88 0x3F22F46C, 0x3D96EE2B,
89 0x3B97E88E, 0x38D7E2B4,
90 0x35C8DD9F, 0x325AD826, /* 48 */
91 0x2E03D358, 0x299ACF64,
92 0x246DCB87,
93};
94
95const u16 bcm43xx_ilt_finefreqa[BCM43xx_ILT_FINEFREQA_SIZE] = {
96 0x0082, 0x0082, 0x0102, 0x0182, /* 0 */
97 0x0202, 0x0282, 0x0302, 0x0382,
98 0x0402, 0x0482, 0x0502, 0x0582,
99 0x05E2, 0x0662, 0x06E2, 0x0762,
100 0x07E2, 0x0842, 0x08C2, 0x0942, /* 16 */
101 0x09C2, 0x0A22, 0x0AA2, 0x0B02,
102 0x0B82, 0x0BE2, 0x0C62, 0x0CC2,
103 0x0D42, 0x0DA2, 0x0E02, 0x0E62,
104 0x0EE2, 0x0F42, 0x0FA2, 0x1002, /* 32 */
105 0x1062, 0x10C2, 0x1122, 0x1182,
106 0x11E2, 0x1242, 0x12A2, 0x12E2,
107 0x1342, 0x13A2, 0x1402, 0x1442,
108 0x14A2, 0x14E2, 0x1542, 0x1582, /* 48 */
109 0x15E2, 0x1622, 0x1662, 0x16C1,
110 0x1701, 0x1741, 0x1781, 0x17E1,
111 0x1821, 0x1861, 0x18A1, 0x18E1,
112 0x1921, 0x1961, 0x19A1, 0x19E1, /* 64 */
113 0x1A21, 0x1A61, 0x1AA1, 0x1AC1,
114 0x1B01, 0x1B41, 0x1B81, 0x1BA1,
115 0x1BE1, 0x1C21, 0x1C41, 0x1C81,
116 0x1CA1, 0x1CE1, 0x1D01, 0x1D41, /* 80 */
117 0x1D61, 0x1DA1, 0x1DC1, 0x1E01,
118 0x1E21, 0x1E61, 0x1E81, 0x1EA1,
119 0x1EE1, 0x1F01, 0x1F21, 0x1F41,
120 0x1F81, 0x1FA1, 0x1FC1, 0x1FE1, /* 96 */
121 0x2001, 0x2041, 0x2061, 0x2081,
122 0x20A1, 0x20C1, 0x20E1, 0x2101,
123 0x2121, 0x2141, 0x2161, 0x2181,
124 0x21A1, 0x21C1, 0x21E1, 0x2201, /* 112 */
125 0x2221, 0x2241, 0x2261, 0x2281,
126 0x22A1, 0x22C1, 0x22C1, 0x22E1,
127 0x2301, 0x2321, 0x2341, 0x2361,
128 0x2361, 0x2381, 0x23A1, 0x23C1, /* 128 */
129 0x23E1, 0x23E1, 0x2401, 0x2421,
130 0x2441, 0x2441, 0x2461, 0x2481,
131 0x2481, 0x24A1, 0x24C1, 0x24C1,
132 0x24E1, 0x2501, 0x2501, 0x2521, /* 144 */
133 0x2541, 0x2541, 0x2561, 0x2561,
134 0x2581, 0x25A1, 0x25A1, 0x25C1,
135 0x25C1, 0x25E1, 0x2601, 0x2601,
136 0x2621, 0x2621, 0x2641, 0x2641, /* 160 */
137 0x2661, 0x2661, 0x2681, 0x2681,
138 0x26A1, 0x26A1, 0x26C1, 0x26C1,
139 0x26E1, 0x26E1, 0x2701, 0x2701,
140 0x2721, 0x2721, 0x2740, 0x2740, /* 176 */
141 0x2760, 0x2760, 0x2780, 0x2780,
142 0x2780, 0x27A0, 0x27A0, 0x27C0,
143 0x27C0, 0x27E0, 0x27E0, 0x27E0,
144 0x2800, 0x2800, 0x2820, 0x2820, /* 192 */
145 0x2820, 0x2840, 0x2840, 0x2840,
146 0x2860, 0x2860, 0x2880, 0x2880,
147 0x2880, 0x28A0, 0x28A0, 0x28A0,
148 0x28C0, 0x28C0, 0x28C0, 0x28E0, /* 208 */
149 0x28E0, 0x28E0, 0x2900, 0x2900,
150 0x2900, 0x2920, 0x2920, 0x2920,
151 0x2940, 0x2940, 0x2940, 0x2960,
152 0x2960, 0x2960, 0x2960, 0x2980, /* 224 */
153 0x2980, 0x2980, 0x29A0, 0x29A0,
154 0x29A0, 0x29A0, 0x29C0, 0x29C0,
155 0x29C0, 0x29E0, 0x29E0, 0x29E0,
156 0x29E0, 0x2A00, 0x2A00, 0x2A00, /* 240 */
157 0x2A00, 0x2A20, 0x2A20, 0x2A20,
158 0x2A20, 0x2A40, 0x2A40, 0x2A40,
159 0x2A40, 0x2A60, 0x2A60, 0x2A60,
160};
161
162const u16 bcm43xx_ilt_finefreqg[BCM43xx_ILT_FINEFREQG_SIZE] = {
163 0x0089, 0x02E9, 0x0409, 0x04E9, /* 0 */
164 0x05A9, 0x0669, 0x0709, 0x0789,
165 0x0829, 0x08A9, 0x0929, 0x0989,
166 0x0A09, 0x0A69, 0x0AC9, 0x0B29,
167 0x0BA9, 0x0BE9, 0x0C49, 0x0CA9, /* 16 */
168 0x0D09, 0x0D69, 0x0DA9, 0x0E09,
169 0x0E69, 0x0EA9, 0x0F09, 0x0F49,
170 0x0FA9, 0x0FE9, 0x1029, 0x1089,
171 0x10C9, 0x1109, 0x1169, 0x11A9, /* 32 */
172 0x11E9, 0x1229, 0x1289, 0x12C9,
173 0x1309, 0x1349, 0x1389, 0x13C9,
174 0x1409, 0x1449, 0x14A9, 0x14E9,
175 0x1529, 0x1569, 0x15A9, 0x15E9, /* 48 */
176 0x1629, 0x1669, 0x16A9, 0x16E8,
177 0x1728, 0x1768, 0x17A8, 0x17E8,
178 0x1828, 0x1868, 0x18A8, 0x18E8,
179 0x1928, 0x1968, 0x19A8, 0x19E8, /* 64 */
180 0x1A28, 0x1A68, 0x1AA8, 0x1AE8,
181 0x1B28, 0x1B68, 0x1BA8, 0x1BE8,
182 0x1C28, 0x1C68, 0x1CA8, 0x1CE8,
183 0x1D28, 0x1D68, 0x1DC8, 0x1E08, /* 80 */
184 0x1E48, 0x1E88, 0x1EC8, 0x1F08,
185 0x1F48, 0x1F88, 0x1FE8, 0x2028,
186 0x2068, 0x20A8, 0x2108, 0x2148,
187 0x2188, 0x21C8, 0x2228, 0x2268, /* 96 */
188 0x22C8, 0x2308, 0x2348, 0x23A8,
189 0x23E8, 0x2448, 0x24A8, 0x24E8,
190 0x2548, 0x25A8, 0x2608, 0x2668,
191 0x26C8, 0x2728, 0x2787, 0x27E7, /* 112 */
192 0x2847, 0x28C7, 0x2947, 0x29A7,
193 0x2A27, 0x2AC7, 0x2B47, 0x2BE7,
194 0x2CA7, 0x2D67, 0x2E47, 0x2F67,
195 0x3247, 0x3526, 0x3646, 0x3726, /* 128 */
196 0x3806, 0x38A6, 0x3946, 0x39E6,
197 0x3A66, 0x3AE6, 0x3B66, 0x3BC6,
198 0x3C45, 0x3CA5, 0x3D05, 0x3D85,
199 0x3DE5, 0x3E45, 0x3EA5, 0x3EE5, /* 144 */
200 0x3F45, 0x3FA5, 0x4005, 0x4045,
201 0x40A5, 0x40E5, 0x4145, 0x4185,
202 0x41E5, 0x4225, 0x4265, 0x42C5,
203 0x4305, 0x4345, 0x43A5, 0x43E5, /* 160 */
204 0x4424, 0x4464, 0x44C4, 0x4504,
205 0x4544, 0x4584, 0x45C4, 0x4604,
206 0x4644, 0x46A4, 0x46E4, 0x4724,
207 0x4764, 0x47A4, 0x47E4, 0x4824, /* 176 */
208 0x4864, 0x48A4, 0x48E4, 0x4924,
209 0x4964, 0x49A4, 0x49E4, 0x4A24,
210 0x4A64, 0x4AA4, 0x4AE4, 0x4B23,
211 0x4B63, 0x4BA3, 0x4BE3, 0x4C23, /* 192 */
212 0x4C63, 0x4CA3, 0x4CE3, 0x4D23,
213 0x4D63, 0x4DA3, 0x4DE3, 0x4E23,
214 0x4E63, 0x4EA3, 0x4EE3, 0x4F23,
215 0x4F63, 0x4FC3, 0x5003, 0x5043, /* 208 */
216 0x5083, 0x50C3, 0x5103, 0x5143,
217 0x5183, 0x51E2, 0x5222, 0x5262,
218 0x52A2, 0x52E2, 0x5342, 0x5382,
219 0x53C2, 0x5402, 0x5462, 0x54A2, /* 224 */
220 0x5502, 0x5542, 0x55A2, 0x55E2,
221 0x5642, 0x5682, 0x56E2, 0x5722,
222 0x5782, 0x57E1, 0x5841, 0x58A1,
223 0x5901, 0x5961, 0x59C1, 0x5A21, /* 240 */
224 0x5AA1, 0x5B01, 0x5B81, 0x5BE1,
225 0x5C61, 0x5D01, 0x5D80, 0x5E20,
226 0x5EE0, 0x5FA0, 0x6080, 0x61C0,
227};
228
229const u16 bcm43xx_ilt_noisea2[BCM43xx_ILT_NOISEA2_SIZE] = {
230 0x0001, 0x0001, 0x0001, 0xFFFE,
231 0xFFFE, 0x3FFF, 0x1000, 0x0393,
232};
233
234const u16 bcm43xx_ilt_noisea3[BCM43xx_ILT_NOISEA3_SIZE] = {
235 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36,
236 0x4C4C, 0x4C4C, 0x4C4C, 0x2D36,
237};
238
239const u16 bcm43xx_ilt_noiseg1[BCM43xx_ILT_NOISEG1_SIZE] = {
240 0x013C, 0x01F5, 0x031A, 0x0631,
241 0x0001, 0x0001, 0x0001, 0x0001,
242};
243
244const u16 bcm43xx_ilt_noiseg2[BCM43xx_ILT_NOISEG2_SIZE] = {
245 0x5484, 0x3C40, 0x0000, 0x0000,
246 0x0000, 0x0000, 0x0000, 0x0000,
247};
248
249const u16 bcm43xx_ilt_noisescaleg1[BCM43xx_ILT_NOISESCALEG_SIZE] = {
250 0x6C77, 0x5162, 0x3B40, 0x3335, /* 0 */
251 0x2F2D, 0x2A2A, 0x2527, 0x1F21,
252 0x1A1D, 0x1719, 0x1616, 0x1414,
253 0x1414, 0x1400, 0x1414, 0x1614,
254 0x1716, 0x1A19, 0x1F1D, 0x2521, /* 16 */
255 0x2A27, 0x2F2A, 0x332D, 0x3B35,
256 0x5140, 0x6C62, 0x0077,
257};
258
259const u16 bcm43xx_ilt_noisescaleg2[BCM43xx_ILT_NOISESCALEG_SIZE] = {
260 0xD8DD, 0xCBD4, 0xBCC0, 0XB6B7, /* 0 */
261 0xB2B0, 0xADAD, 0xA7A9, 0x9FA1,
262 0x969B, 0x9195, 0x8F8F, 0x8A8A,
263 0x8A8A, 0x8A00, 0x8A8A, 0x8F8A,
264 0x918F, 0x9695, 0x9F9B, 0xA7A1, /* 16 */
265 0xADA9, 0xB2AD, 0xB6B0, 0xBCB7,
266 0xCBC0, 0xD8D4, 0x00DD,
267};
268
269const u16 bcm43xx_ilt_noisescaleg3[BCM43xx_ILT_NOISESCALEG_SIZE] = {
270 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 0 */
271 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4,
272 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4,
273 0xA4A4, 0xA400, 0xA4A4, 0xA4A4,
274 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4, /* 16 */
275 0xA4A4, 0xA4A4, 0xA4A4, 0xA4A4,
276 0xA4A4, 0xA4A4, 0x00A4,
277};
278
279const u16 bcm43xx_ilt_sigmasqr1[BCM43xx_ILT_SIGMASQR_SIZE] = {
280 0x007A, 0x0075, 0x0071, 0x006C, /* 0 */
281 0x0067, 0x0063, 0x005E, 0x0059,
282 0x0054, 0x0050, 0x004B, 0x0046,
283 0x0042, 0x003D, 0x003D, 0x003D,
284 0x003D, 0x003D, 0x003D, 0x003D, /* 16 */
285 0x003D, 0x003D, 0x003D, 0x003D,
286 0x003D, 0x003D, 0x0000, 0x003D,
287 0x003D, 0x003D, 0x003D, 0x003D,
288 0x003D, 0x003D, 0x003D, 0x003D, /* 32 */
289 0x003D, 0x003D, 0x003D, 0x003D,
290 0x0042, 0x0046, 0x004B, 0x0050,
291 0x0054, 0x0059, 0x005E, 0x0063,
292 0x0067, 0x006C, 0x0071, 0x0075, /* 48 */
293 0x007A,
294};
295
296const u16 bcm43xx_ilt_sigmasqr2[BCM43xx_ILT_SIGMASQR_SIZE] = {
297 0x00DE, 0x00DC, 0x00DA, 0x00D8, /* 0 */
298 0x00D6, 0x00D4, 0x00D2, 0x00CF,
299 0x00CD, 0x00CA, 0x00C7, 0x00C4,
300 0x00C1, 0x00BE, 0x00BE, 0x00BE,
301 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 16 */
302 0x00BE, 0x00BE, 0x00BE, 0x00BE,
303 0x00BE, 0x00BE, 0x0000, 0x00BE,
304 0x00BE, 0x00BE, 0x00BE, 0x00BE,
305 0x00BE, 0x00BE, 0x00BE, 0x00BE, /* 32 */
306 0x00BE, 0x00BE, 0x00BE, 0x00BE,
307 0x00C1, 0x00C4, 0x00C7, 0x00CA,
308 0x00CD, 0x00CF, 0x00D2, 0x00D4,
309 0x00D6, 0x00D8, 0x00DA, 0x00DC, /* 48 */
310 0x00DE,
311};
312
313/**** Helper functions to access the device Internal Lookup Tables ****/
314
315void bcm43xx_ilt_write(struct bcm43xx_private *bcm, u16 offset, u16 val)
316{
317 if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_A) {
318 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_A_CTRL, offset);
319 mmiowb();
320 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_A_DATA1, val);
321 } else {
322 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_G_CTRL, offset);
323 mmiowb();
324 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_G_DATA1, val);
325 }
326}
327
328void bcm43xx_ilt_write32(struct bcm43xx_private *bcm, u16 offset, u32 val)
329{
330 if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_A) {
331 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_A_CTRL, offset);
332 mmiowb();
333 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_A_DATA2, (val & 0xFFFF0000) >> 16);
334 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_A_DATA1, val & 0x0000FFFF);
335 } else {
336 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_G_CTRL, offset);
337 mmiowb();
338 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_G_DATA2, (val & 0xFFFF0000) >> 16);
339 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_G_DATA1, val & 0x0000FFFF);
340 }
341}
342
343u16 bcm43xx_ilt_read(struct bcm43xx_private *bcm, u16 offset)
344{
345 if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_A) {
346 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_A_CTRL, offset);
347 return bcm43xx_phy_read(bcm, BCM43xx_PHY_ILT_A_DATA1);
348 } else {
349 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_G_CTRL, offset);
350 return bcm43xx_phy_read(bcm, BCM43xx_PHY_ILT_G_DATA1);
351 }
352}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_ilt.h b/drivers/net/wireless/bcm43xx/bcm43xx_ilt.h
deleted file mode 100644
index d7eaf5f25b7f..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_ilt.h
+++ /dev/null
@@ -1,33 +0,0 @@
1#ifndef BCM43xx_ILT_H_
2#define BCM43xx_ILT_H_
3
4#define BCM43xx_ILT_ROTOR_SIZE 53
5extern const u32 bcm43xx_ilt_rotor[BCM43xx_ILT_ROTOR_SIZE];
6#define BCM43xx_ILT_RETARD_SIZE 53
7extern const u32 bcm43xx_ilt_retard[BCM43xx_ILT_RETARD_SIZE];
8#define BCM43xx_ILT_FINEFREQA_SIZE 256
9extern const u16 bcm43xx_ilt_finefreqa[BCM43xx_ILT_FINEFREQA_SIZE];
10#define BCM43xx_ILT_FINEFREQG_SIZE 256
11extern const u16 bcm43xx_ilt_finefreqg[BCM43xx_ILT_FINEFREQG_SIZE];
12#define BCM43xx_ILT_NOISEA2_SIZE 8
13extern const u16 bcm43xx_ilt_noisea2[BCM43xx_ILT_NOISEA2_SIZE];
14#define BCM43xx_ILT_NOISEA3_SIZE 8
15extern const u16 bcm43xx_ilt_noisea3[BCM43xx_ILT_NOISEA3_SIZE];
16#define BCM43xx_ILT_NOISEG1_SIZE 8
17extern const u16 bcm43xx_ilt_noiseg1[BCM43xx_ILT_NOISEG1_SIZE];
18#define BCM43xx_ILT_NOISEG2_SIZE 8
19extern const u16 bcm43xx_ilt_noiseg2[BCM43xx_ILT_NOISEG2_SIZE];
20#define BCM43xx_ILT_NOISESCALEG_SIZE 27
21extern const u16 bcm43xx_ilt_noisescaleg1[BCM43xx_ILT_NOISESCALEG_SIZE];
22extern const u16 bcm43xx_ilt_noisescaleg2[BCM43xx_ILT_NOISESCALEG_SIZE];
23extern const u16 bcm43xx_ilt_noisescaleg3[BCM43xx_ILT_NOISESCALEG_SIZE];
24#define BCM43xx_ILT_SIGMASQR_SIZE 53
25extern const u16 bcm43xx_ilt_sigmasqr1[BCM43xx_ILT_SIGMASQR_SIZE];
26extern const u16 bcm43xx_ilt_sigmasqr2[BCM43xx_ILT_SIGMASQR_SIZE];
27
28
29void bcm43xx_ilt_write(struct bcm43xx_private *bcm, u16 offset, u16 val);
30void bcm43xx_ilt_write32(struct bcm43xx_private *bcm, u16 offset, u32 val);
31u16 bcm43xx_ilt_read(struct bcm43xx_private *bcm, u16 offset);
32
33#endif /* BCM43xx_ILT_H_ */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c b/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
deleted file mode 100644
index cb51dc51cce6..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_leds.c
+++ /dev/null
@@ -1,307 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
6 Stefano Brivio <st3@riseup.net>
7 Michael Buesch <mbuesch@freenet.de>
8 Danny van Dyk <kugelfang@gentoo.org>
9 Andreas Jaggi <andreas.jaggi@waterwave.ch>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2 of the License, or
14 (at your option) any later version.
15
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program; see the file COPYING. If not, write to
23 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
24 Boston, MA 02110-1301, USA.
25
26*/
27
28#include "bcm43xx_leds.h"
29#include "bcm43xx_radio.h"
30#include "bcm43xx.h"
31
32#include <linux/bitops.h>
33
34
35static void bcm43xx_led_changestate(struct bcm43xx_led *led)
36{
37 struct bcm43xx_private *bcm = led->bcm;
38 const int index = bcm43xx_led_index(led);
39 const u16 mask = (1 << index);
40 u16 ledctl;
41
42 assert(index >= 0 && index < BCM43xx_NR_LEDS);
43 assert(led->blink_interval);
44 ledctl = bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_CONTROL);
45 ledctl = (ledctl & mask) ? (ledctl & ~mask) : (ledctl | mask);
46 bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_CONTROL, ledctl);
47}
48
49static void bcm43xx_led_blink(unsigned long d)
50{
51 struct bcm43xx_led *led = (struct bcm43xx_led *)d;
52 struct bcm43xx_private *bcm = led->bcm;
53 unsigned long flags;
54
55 spin_lock_irqsave(&bcm->leds_lock, flags);
56 if (led->blink_interval) {
57 bcm43xx_led_changestate(led);
58 mod_timer(&led->blink_timer, jiffies + led->blink_interval);
59 }
60 spin_unlock_irqrestore(&bcm->leds_lock, flags);
61}
62
63static void bcm43xx_led_blink_start(struct bcm43xx_led *led,
64 unsigned long interval)
65{
66 if (led->blink_interval)
67 return;
68 led->blink_interval = interval;
69 bcm43xx_led_changestate(led);
70 led->blink_timer.expires = jiffies + interval;
71 add_timer(&led->blink_timer);
72}
73
74static void bcm43xx_led_blink_stop(struct bcm43xx_led *led, int sync)
75{
76 struct bcm43xx_private *bcm = led->bcm;
77 const int index = bcm43xx_led_index(led);
78 u16 ledctl;
79
80 if (!led->blink_interval)
81 return;
82 if (unlikely(sync))
83 del_timer_sync(&led->blink_timer);
84 else
85 del_timer(&led->blink_timer);
86 led->blink_interval = 0;
87
88 /* Make sure the LED is turned off. */
89 assert(index >= 0 && index < BCM43xx_NR_LEDS);
90 ledctl = bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_CONTROL);
91 if (led->activelow)
92 ledctl |= (1 << index);
93 else
94 ledctl &= ~(1 << index);
95 bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_CONTROL, ledctl);
96}
97
98static void bcm43xx_led_init_hardcoded(struct bcm43xx_private *bcm,
99 struct bcm43xx_led *led,
100 int led_index)
101{
102 /* This function is called, if the behaviour (and activelow)
103 * information for a LED is missing in the SPROM.
104 * We hardcode the behaviour values for various devices here.
105 * Note that the BCM43xx_LED_TEST_XXX behaviour values can
106 * be used to figure out which led is mapped to which index.
107 */
108
109 switch (led_index) {
110 case 0:
111 led->behaviour = BCM43xx_LED_ACTIVITY;
112 led->activelow = 1;
113 if (bcm->board_vendor == PCI_VENDOR_ID_COMPAQ)
114 led->behaviour = BCM43xx_LED_RADIO_ALL;
115 break;
116 case 1:
117 led->behaviour = BCM43xx_LED_RADIO_B;
118 if (bcm->board_vendor == PCI_VENDOR_ID_ASUSTEK)
119 led->behaviour = BCM43xx_LED_ASSOC;
120 break;
121 case 2:
122 led->behaviour = BCM43xx_LED_RADIO_A;
123 break;
124 case 3:
125 led->behaviour = BCM43xx_LED_OFF;
126 break;
127 default:
128 assert(0);
129 }
130}
131
132int bcm43xx_leds_init(struct bcm43xx_private *bcm)
133{
134 struct bcm43xx_led *led;
135 u8 sprom[4];
136 int i;
137
138 sprom[0] = bcm->sprom.wl0gpio0;
139 sprom[1] = bcm->sprom.wl0gpio1;
140 sprom[2] = bcm->sprom.wl0gpio2;
141 sprom[3] = bcm->sprom.wl0gpio3;
142
143 for (i = 0; i < BCM43xx_NR_LEDS; i++) {
144 led = &(bcm->leds[i]);
145 led->bcm = bcm;
146 setup_timer(&led->blink_timer,
147 bcm43xx_led_blink,
148 (unsigned long)led);
149
150 if (sprom[i] == 0xFF) {
151 bcm43xx_led_init_hardcoded(bcm, led, i);
152 } else {
153 led->behaviour = sprom[i] & BCM43xx_LED_BEHAVIOUR;
154 led->activelow = !!(sprom[i] & BCM43xx_LED_ACTIVELOW);
155 }
156 }
157
158 return 0;
159}
160
161void bcm43xx_leds_exit(struct bcm43xx_private *bcm)
162{
163 struct bcm43xx_led *led;
164 int i;
165
166 for (i = 0; i < BCM43xx_NR_LEDS; i++) {
167 led = &(bcm->leds[i]);
168 bcm43xx_led_blink_stop(led, 1);
169 }
170 bcm43xx_leds_switch_all(bcm, 0);
171}
172
173void bcm43xx_leds_update(struct bcm43xx_private *bcm, int activity)
174{
175 struct bcm43xx_led *led;
176 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
177 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
178 const int transferring = (jiffies - bcm->stats.last_tx) < BCM43xx_LED_XFER_THRES;
179 int i, turn_on;
180 unsigned long interval = 0;
181 u16 ledctl;
182 unsigned long flags;
183
184 spin_lock_irqsave(&bcm->leds_lock, flags);
185 ledctl = bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_CONTROL);
186 for (i = 0; i < BCM43xx_NR_LEDS; i++) {
187 led = &(bcm->leds[i]);
188
189 turn_on = 0;
190 switch (led->behaviour) {
191 case BCM43xx_LED_INACTIVE:
192 continue;
193 case BCM43xx_LED_OFF:
194 case BCM43xx_LED_BCM4303_3:
195 break;
196 case BCM43xx_LED_ON:
197 turn_on = 1;
198 break;
199 case BCM43xx_LED_ACTIVITY:
200 case BCM43xx_LED_BCM4303_0:
201 turn_on = activity;
202 break;
203 case BCM43xx_LED_RADIO_ALL:
204 turn_on = radio->enabled && bcm43xx_is_hw_radio_enabled(bcm);
205 break;
206 case BCM43xx_LED_RADIO_A:
207 case BCM43xx_LED_BCM4303_2:
208 turn_on = (radio->enabled && bcm43xx_is_hw_radio_enabled(bcm) &&
209 phy->type == BCM43xx_PHYTYPE_A);
210 break;
211 case BCM43xx_LED_RADIO_B:
212 case BCM43xx_LED_BCM4303_1:
213 turn_on = (radio->enabled && bcm43xx_is_hw_radio_enabled(bcm) &&
214 (phy->type == BCM43xx_PHYTYPE_B ||
215 phy->type == BCM43xx_PHYTYPE_G));
216 break;
217 case BCM43xx_LED_MODE_BG:
218 if (phy->type == BCM43xx_PHYTYPE_G && bcm43xx_is_hw_radio_enabled(bcm) &&
219 1/*FIXME: using G rates.*/)
220 turn_on = 1;
221 break;
222 case BCM43xx_LED_TRANSFER:
223 if (transferring)
224 bcm43xx_led_blink_start(led, BCM43xx_LEDBLINK_MEDIUM);
225 else
226 bcm43xx_led_blink_stop(led, 0);
227 continue;
228 case BCM43xx_LED_APTRANSFER:
229 if (bcm->ieee->iw_mode == IW_MODE_MASTER) {
230 if (transferring) {
231 interval = BCM43xx_LEDBLINK_FAST;
232 turn_on = 1;
233 }
234 } else {
235 turn_on = 1;
236 if (0/*TODO: not assoc*/)
237 interval = BCM43xx_LEDBLINK_SLOW;
238 else if (transferring)
239 interval = BCM43xx_LEDBLINK_FAST;
240 else
241 turn_on = 0;
242 }
243 if (turn_on)
244 bcm43xx_led_blink_start(led, interval);
245 else
246 bcm43xx_led_blink_stop(led, 0);
247 continue;
248 case BCM43xx_LED_WEIRD:
249 //TODO
250 break;
251 case BCM43xx_LED_ASSOC:
252 if (bcm->softmac->associnfo.associated)
253 turn_on = 1;
254 break;
255#ifdef CONFIG_BCM43XX_DEBUG
256 case BCM43xx_LED_TEST_BLINKSLOW:
257 bcm43xx_led_blink_start(led, BCM43xx_LEDBLINK_SLOW);
258 continue;
259 case BCM43xx_LED_TEST_BLINKMEDIUM:
260 bcm43xx_led_blink_start(led, BCM43xx_LEDBLINK_MEDIUM);
261 continue;
262 case BCM43xx_LED_TEST_BLINKFAST:
263 bcm43xx_led_blink_start(led, BCM43xx_LEDBLINK_FAST);
264 continue;
265#endif /* CONFIG_BCM43XX_DEBUG */
266 default:
267 dprintkl(KERN_INFO PFX "Bad value in leds_update,"
268 " led->behaviour: 0x%x\n", led->behaviour);
269 };
270
271 if (led->activelow)
272 turn_on = !turn_on;
273 if (turn_on)
274 ledctl |= (1 << i);
275 else
276 ledctl &= ~(1 << i);
277 }
278 bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_CONTROL, ledctl);
279 spin_unlock_irqrestore(&bcm->leds_lock, flags);
280}
281
282void bcm43xx_leds_switch_all(struct bcm43xx_private *bcm, int on)
283{
284 struct bcm43xx_led *led;
285 u16 ledctl;
286 int i;
287 int bit_on;
288 unsigned long flags;
289
290 spin_lock_irqsave(&bcm->leds_lock, flags);
291 ledctl = bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_CONTROL);
292 for (i = 0; i < BCM43xx_NR_LEDS; i++) {
293 led = &(bcm->leds[i]);
294 if (led->behaviour == BCM43xx_LED_INACTIVE)
295 continue;
296 if (on)
297 bit_on = led->activelow ? 0 : 1;
298 else
299 bit_on = led->activelow ? 1 : 0;
300 if (bit_on)
301 ledctl |= (1 << i);
302 else
303 ledctl &= ~(1 << i);
304 }
305 bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_CONTROL, ledctl);
306 spin_unlock_irqrestore(&bcm->leds_lock, flags);
307}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_leds.h b/drivers/net/wireless/bcm43xx/bcm43xx_leds.h
deleted file mode 100644
index 811e14a81198..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_leds.h
+++ /dev/null
@@ -1,62 +0,0 @@
1#ifndef BCM43xx_LEDS_H_
2#define BCM43xx_LEDS_H_
3
4#include <linux/types.h>
5#include <linux/timer.h>
6
7
8struct bcm43xx_led {
9 u8 behaviour:7;
10 u8 activelow:1;
11
12 struct bcm43xx_private *bcm;
13 struct timer_list blink_timer;
14 unsigned long blink_interval;
15};
16#define bcm43xx_led_index(led) ((int)((led) - (led)->bcm->leds))
17
18/* Delay between state changes when blinking in jiffies */
19#define BCM43xx_LEDBLINK_SLOW (HZ / 1)
20#define BCM43xx_LEDBLINK_MEDIUM (HZ / 4)
21#define BCM43xx_LEDBLINK_FAST (HZ / 8)
22
23#define BCM43xx_LED_XFER_THRES (HZ / 100)
24
25#define BCM43xx_LED_BEHAVIOUR 0x7F
26#define BCM43xx_LED_ACTIVELOW 0x80
27enum { /* LED behaviour values */
28 BCM43xx_LED_OFF,
29 BCM43xx_LED_ON,
30 BCM43xx_LED_ACTIVITY,
31 BCM43xx_LED_RADIO_ALL,
32 BCM43xx_LED_RADIO_A,
33 BCM43xx_LED_RADIO_B,
34 BCM43xx_LED_MODE_BG,
35 BCM43xx_LED_TRANSFER,
36 BCM43xx_LED_APTRANSFER,
37 BCM43xx_LED_WEIRD,//FIXME
38 BCM43xx_LED_ASSOC,
39 BCM43xx_LED_INACTIVE,
40
41 /* Behaviour values for testing.
42 * With these values it is easier to figure out
43 * the real behaviour of leds, in case the SPROM
44 * is missing information.
45 */
46 BCM43xx_LED_TEST_BLINKSLOW,
47 BCM43xx_LED_TEST_BLINKMEDIUM,
48 BCM43xx_LED_TEST_BLINKFAST,
49
50 /* Misc values for BCM4303 */
51 BCM43xx_LED_BCM4303_0 = 0x2B,
52 BCM43xx_LED_BCM4303_1 = 0x78,
53 BCM43xx_LED_BCM4303_2 = 0x2E,
54 BCM43xx_LED_BCM4303_3 = 0x19,
55};
56
57int bcm43xx_leds_init(struct bcm43xx_private *bcm);
58void bcm43xx_leds_exit(struct bcm43xx_private *bcm);
59void bcm43xx_leds_update(struct bcm43xx_private *bcm, int activity);
60void bcm43xx_leds_switch_all(struct bcm43xx_private *bcm, int on);
61
62#endif /* BCM43xx_LEDS_H_ */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
deleted file mode 100644
index b96a325b6ec8..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ /dev/null
@@ -1,4281 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
6 Stefano Brivio <st3@riseup.net>
7 Michael Buesch <mbuesch@freenet.de>
8 Danny van Dyk <kugelfang@gentoo.org>
9 Andreas Jaggi <andreas.jaggi@waterwave.ch>
10
11 Some parts of the code in this file are derived from the ipw2200
12 driver Copyright(c) 2003 - 2004 Intel Corporation.
13
14 This program is free software; you can redistribute it and/or modify
15 it under the terms of the GNU General Public License as published by
16 the Free Software Foundation; either version 2 of the License, or
17 (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 GNU General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; see the file COPYING. If not, write to
26 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
27 Boston, MA 02110-1301, USA.
28
29*/
30
31#include <linux/delay.h>
32#include <linux/init.h>
33#include <linux/moduleparam.h>
34#include <linux/if_arp.h>
35#include <linux/etherdevice.h>
36#include <linux/version.h>
37#include <linux/firmware.h>
38#include <linux/wireless.h>
39#include <linux/workqueue.h>
40#include <linux/skbuff.h>
41#include <linux/dma-mapping.h>
42#include <net/iw_handler.h>
43
44#include "bcm43xx.h"
45#include "bcm43xx_main.h"
46#include "bcm43xx_debugfs.h"
47#include "bcm43xx_radio.h"
48#include "bcm43xx_phy.h"
49#include "bcm43xx_dma.h"
50#include "bcm43xx_pio.h"
51#include "bcm43xx_power.h"
52#include "bcm43xx_wx.h"
53#include "bcm43xx_ethtool.h"
54#include "bcm43xx_xmit.h"
55#include "bcm43xx_sysfs.h"
56
57
58MODULE_DESCRIPTION("Broadcom BCM43xx wireless driver");
59MODULE_AUTHOR("Martin Langer");
60MODULE_AUTHOR("Stefano Brivio");
61MODULE_AUTHOR("Michael Buesch");
62MODULE_LICENSE("GPL");
63
64#if defined(CONFIG_BCM43XX_DMA) && defined(CONFIG_BCM43XX_PIO)
65static int modparam_pio;
66module_param_named(pio, modparam_pio, int, 0444);
67MODULE_PARM_DESC(pio, "enable(1) / disable(0) PIO mode");
68#elif defined(CONFIG_BCM43XX_DMA)
69# define modparam_pio 0
70#elif defined(CONFIG_BCM43XX_PIO)
71# define modparam_pio 1
72#endif
73
74static int modparam_bad_frames_preempt;
75module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444);
76MODULE_PARM_DESC(bad_frames_preempt, "enable(1) / disable(0) Bad Frames Preemption");
77
78static int modparam_short_retry = BCM43xx_DEFAULT_SHORT_RETRY_LIMIT;
79module_param_named(short_retry, modparam_short_retry, int, 0444);
80MODULE_PARM_DESC(short_retry, "Short-Retry-Limit (0 - 15)");
81
82static int modparam_long_retry = BCM43xx_DEFAULT_LONG_RETRY_LIMIT;
83module_param_named(long_retry, modparam_long_retry, int, 0444);
84MODULE_PARM_DESC(long_retry, "Long-Retry-Limit (0 - 15)");
85
86static int modparam_locale = -1;
87module_param_named(locale, modparam_locale, int, 0444);
88MODULE_PARM_DESC(country, "Select LocaleCode 0-11 (For travelers)");
89
90static int modparam_noleds;
91module_param_named(noleds, modparam_noleds, int, 0444);
92MODULE_PARM_DESC(noleds, "Turn off all LED activity");
93
94static char modparam_fwpostfix[64];
95module_param_string(fwpostfix, modparam_fwpostfix, 64, 0444);
96MODULE_PARM_DESC(fwpostfix, "Postfix for .fw files. Useful for using multiple firmware image versions.");
97
98
99/* If you want to debug with just a single device, enable this,
100 * where the string is the pci device ID (as given by the kernel's
101 * pci_name function) of the device to be used.
102 */
103//#define DEBUG_SINGLE_DEVICE_ONLY "0001:11:00.0"
104
105/* If you want to enable printing of each MMIO access, enable this. */
106//#define DEBUG_ENABLE_MMIO_PRINT
107
108/* If you want to enable printing of MMIO access within
109 * ucode/pcm upload, initvals write, enable this.
110 */
111//#define DEBUG_ENABLE_UCODE_MMIO_PRINT
112
113/* If you want to enable printing of PCI Config Space access, enable this */
114//#define DEBUG_ENABLE_PCILOG
115
116
117/* Detailed list maintained at:
118 * http://openfacts.berlios.de/index-en.phtml?title=Bcm43xxDevices
119 */
120 static struct pci_device_id bcm43xx_pci_tbl[] = {
121 /* Broadcom 4303 802.11b */
122 { PCI_VENDOR_ID_BROADCOM, 0x4301, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
123 /* Broadcom 4307 802.11b */
124 { PCI_VENDOR_ID_BROADCOM, 0x4307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
125 /* Broadcom 4311 802.11(a)/b/g */
126 { PCI_VENDOR_ID_BROADCOM, 0x4311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
127 /* Broadcom 4312 802.11a/b/g */
128 { PCI_VENDOR_ID_BROADCOM, 0x4312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
129 /* Broadcom 4318 802.11b/g */
130 { PCI_VENDOR_ID_BROADCOM, 0x4318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
131 /* Broadcom 4319 802.11a/b/g */
132 { PCI_VENDOR_ID_BROADCOM, 0x4319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
133 /* Broadcom 4306 802.11b/g */
134 { PCI_VENDOR_ID_BROADCOM, 0x4320, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
135 /* Broadcom 4306 802.11a */
136// { PCI_VENDOR_ID_BROADCOM, 0x4321, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
137 /* Broadcom 4309 802.11a/b/g */
138 { PCI_VENDOR_ID_BROADCOM, 0x4324, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
139 /* Broadcom 43XG 802.11b/g */
140 { PCI_VENDOR_ID_BROADCOM, 0x4325, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
141 { 0 },
142};
143MODULE_DEVICE_TABLE(pci, bcm43xx_pci_tbl);
144
145static void bcm43xx_ram_write(struct bcm43xx_private *bcm, u16 offset, u32 val)
146{
147 u32 status;
148
149 status = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD);
150 if (!(status & BCM43xx_SBF_XFER_REG_BYTESWAP))
151 val = swab32(val);
152
153 bcm43xx_write32(bcm, BCM43xx_MMIO_RAM_CONTROL, offset);
154 mmiowb();
155 bcm43xx_write32(bcm, BCM43xx_MMIO_RAM_DATA, val);
156}
157
158static inline
159void bcm43xx_shm_control_word(struct bcm43xx_private *bcm,
160 u16 routing, u16 offset)
161{
162 u32 control;
163
164 /* "offset" is the WORD offset. */
165
166 control = routing;
167 control <<= 16;
168 control |= offset;
169 bcm43xx_write32(bcm, BCM43xx_MMIO_SHM_CONTROL, control);
170}
171
172u32 bcm43xx_shm_read32(struct bcm43xx_private *bcm,
173 u16 routing, u16 offset)
174{
175 u32 ret;
176
177 if (routing == BCM43xx_SHM_SHARED) {
178 if (offset & 0x0003) {
179 /* Unaligned access */
180 bcm43xx_shm_control_word(bcm, routing, offset >> 2);
181 ret = bcm43xx_read16(bcm, BCM43xx_MMIO_SHM_DATA_UNALIGNED);
182 ret <<= 16;
183 bcm43xx_shm_control_word(bcm, routing, (offset >> 2) + 1);
184 ret |= bcm43xx_read16(bcm, BCM43xx_MMIO_SHM_DATA);
185
186 return ret;
187 }
188 offset >>= 2;
189 }
190 bcm43xx_shm_control_word(bcm, routing, offset);
191 ret = bcm43xx_read32(bcm, BCM43xx_MMIO_SHM_DATA);
192
193 return ret;
194}
195
196u16 bcm43xx_shm_read16(struct bcm43xx_private *bcm,
197 u16 routing, u16 offset)
198{
199 u16 ret;
200
201 if (routing == BCM43xx_SHM_SHARED) {
202 if (offset & 0x0003) {
203 /* Unaligned access */
204 bcm43xx_shm_control_word(bcm, routing, offset >> 2);
205 ret = bcm43xx_read16(bcm, BCM43xx_MMIO_SHM_DATA_UNALIGNED);
206
207 return ret;
208 }
209 offset >>= 2;
210 }
211 bcm43xx_shm_control_word(bcm, routing, offset);
212 ret = bcm43xx_read16(bcm, BCM43xx_MMIO_SHM_DATA);
213
214 return ret;
215}
216
217void bcm43xx_shm_write32(struct bcm43xx_private *bcm,
218 u16 routing, u16 offset,
219 u32 value)
220{
221 if (routing == BCM43xx_SHM_SHARED) {
222 if (offset & 0x0003) {
223 /* Unaligned access */
224 bcm43xx_shm_control_word(bcm, routing, offset >> 2);
225 mmiowb();
226 bcm43xx_write16(bcm, BCM43xx_MMIO_SHM_DATA_UNALIGNED,
227 (value >> 16) & 0xffff);
228 mmiowb();
229 bcm43xx_shm_control_word(bcm, routing, (offset >> 2) + 1);
230 mmiowb();
231 bcm43xx_write16(bcm, BCM43xx_MMIO_SHM_DATA,
232 value & 0xffff);
233 return;
234 }
235 offset >>= 2;
236 }
237 bcm43xx_shm_control_word(bcm, routing, offset);
238 mmiowb();
239 bcm43xx_write32(bcm, BCM43xx_MMIO_SHM_DATA, value);
240}
241
242void bcm43xx_shm_write16(struct bcm43xx_private *bcm,
243 u16 routing, u16 offset,
244 u16 value)
245{
246 if (routing == BCM43xx_SHM_SHARED) {
247 if (offset & 0x0003) {
248 /* Unaligned access */
249 bcm43xx_shm_control_word(bcm, routing, offset >> 2);
250 mmiowb();
251 bcm43xx_write16(bcm, BCM43xx_MMIO_SHM_DATA_UNALIGNED,
252 value);
253 return;
254 }
255 offset >>= 2;
256 }
257 bcm43xx_shm_control_word(bcm, routing, offset);
258 mmiowb();
259 bcm43xx_write16(bcm, BCM43xx_MMIO_SHM_DATA, value);
260}
261
262void bcm43xx_tsf_read(struct bcm43xx_private *bcm, u64 *tsf)
263{
264 /* We need to be careful. As we read the TSF from multiple
265 * registers, we should take care of register overflows.
266 * In theory, the whole tsf read process should be atomic.
267 * We try to be atomic here, by restaring the read process,
268 * if any of the high registers changed (overflew).
269 */
270 if (bcm->current_core->rev >= 3) {
271 u32 low, high, high2;
272
273 do {
274 high = bcm43xx_read32(bcm, BCM43xx_MMIO_REV3PLUS_TSF_HIGH);
275 low = bcm43xx_read32(bcm, BCM43xx_MMIO_REV3PLUS_TSF_LOW);
276 high2 = bcm43xx_read32(bcm, BCM43xx_MMIO_REV3PLUS_TSF_HIGH);
277 } while (unlikely(high != high2));
278
279 *tsf = high;
280 *tsf <<= 32;
281 *tsf |= low;
282 } else {
283 u64 tmp;
284 u16 v0, v1, v2, v3;
285 u16 test1, test2, test3;
286
287 do {
288 v3 = bcm43xx_read16(bcm, BCM43xx_MMIO_TSF_3);
289 v2 = bcm43xx_read16(bcm, BCM43xx_MMIO_TSF_2);
290 v1 = bcm43xx_read16(bcm, BCM43xx_MMIO_TSF_1);
291 v0 = bcm43xx_read16(bcm, BCM43xx_MMIO_TSF_0);
292
293 test3 = bcm43xx_read16(bcm, BCM43xx_MMIO_TSF_3);
294 test2 = bcm43xx_read16(bcm, BCM43xx_MMIO_TSF_2);
295 test1 = bcm43xx_read16(bcm, BCM43xx_MMIO_TSF_1);
296 } while (v3 != test3 || v2 != test2 || v1 != test1);
297
298 *tsf = v3;
299 *tsf <<= 48;
300 tmp = v2;
301 tmp <<= 32;
302 *tsf |= tmp;
303 tmp = v1;
304 tmp <<= 16;
305 *tsf |= tmp;
306 *tsf |= v0;
307 }
308}
309
310void bcm43xx_tsf_write(struct bcm43xx_private *bcm, u64 tsf)
311{
312 u32 status;
313
314 status = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD);
315 status |= BCM43xx_SBF_TIME_UPDATE;
316 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, status);
317 mmiowb();
318
319 /* Be careful with the in-progress timer.
320 * First zero out the low register, so we have a full
321 * register-overflow duration to complete the operation.
322 */
323 if (bcm->current_core->rev >= 3) {
324 u32 lo = (tsf & 0x00000000FFFFFFFFULL);
325 u32 hi = (tsf & 0xFFFFFFFF00000000ULL) >> 32;
326
327 bcm43xx_write32(bcm, BCM43xx_MMIO_REV3PLUS_TSF_LOW, 0);
328 mmiowb();
329 bcm43xx_write32(bcm, BCM43xx_MMIO_REV3PLUS_TSF_HIGH, hi);
330 mmiowb();
331 bcm43xx_write32(bcm, BCM43xx_MMIO_REV3PLUS_TSF_LOW, lo);
332 } else {
333 u16 v0 = (tsf & 0x000000000000FFFFULL);
334 u16 v1 = (tsf & 0x00000000FFFF0000ULL) >> 16;
335 u16 v2 = (tsf & 0x0000FFFF00000000ULL) >> 32;
336 u16 v3 = (tsf & 0xFFFF000000000000ULL) >> 48;
337
338 bcm43xx_write16(bcm, BCM43xx_MMIO_TSF_0, 0);
339 mmiowb();
340 bcm43xx_write16(bcm, BCM43xx_MMIO_TSF_3, v3);
341 mmiowb();
342 bcm43xx_write16(bcm, BCM43xx_MMIO_TSF_2, v2);
343 mmiowb();
344 bcm43xx_write16(bcm, BCM43xx_MMIO_TSF_1, v1);
345 mmiowb();
346 bcm43xx_write16(bcm, BCM43xx_MMIO_TSF_0, v0);
347 }
348
349 status = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD);
350 status &= ~BCM43xx_SBF_TIME_UPDATE;
351 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, status);
352}
353
354static
355void bcm43xx_macfilter_set(struct bcm43xx_private *bcm,
356 u16 offset,
357 const u8 *mac)
358{
359 u16 data;
360
361 offset |= 0x0020;
362 bcm43xx_write16(bcm, BCM43xx_MMIO_MACFILTER_CONTROL, offset);
363
364 data = mac[0];
365 data |= mac[1] << 8;
366 bcm43xx_write16(bcm, BCM43xx_MMIO_MACFILTER_DATA, data);
367 data = mac[2];
368 data |= mac[3] << 8;
369 bcm43xx_write16(bcm, BCM43xx_MMIO_MACFILTER_DATA, data);
370 data = mac[4];
371 data |= mac[5] << 8;
372 bcm43xx_write16(bcm, BCM43xx_MMIO_MACFILTER_DATA, data);
373}
374
375static void bcm43xx_macfilter_clear(struct bcm43xx_private *bcm,
376 u16 offset)
377{
378 const u8 zero_addr[ETH_ALEN] = { 0 };
379
380 bcm43xx_macfilter_set(bcm, offset, zero_addr);
381}
382
383static void bcm43xx_write_mac_bssid_templates(struct bcm43xx_private *bcm)
384{
385 const u8 *mac = (const u8 *)(bcm->net_dev->dev_addr);
386 const u8 *bssid = (const u8 *)(bcm->ieee->bssid);
387 u8 mac_bssid[ETH_ALEN * 2];
388 int i;
389
390 memcpy(mac_bssid, mac, ETH_ALEN);
391 memcpy(mac_bssid + ETH_ALEN, bssid, ETH_ALEN);
392
393 /* Write our MAC address and BSSID to template ram */
394 for (i = 0; i < ARRAY_SIZE(mac_bssid); i += sizeof(u32))
395 bcm43xx_ram_write(bcm, 0x20 + i, *((u32 *)(mac_bssid + i)));
396 for (i = 0; i < ARRAY_SIZE(mac_bssid); i += sizeof(u32))
397 bcm43xx_ram_write(bcm, 0x78 + i, *((u32 *)(mac_bssid + i)));
398 for (i = 0; i < ARRAY_SIZE(mac_bssid); i += sizeof(u32))
399 bcm43xx_ram_write(bcm, 0x478 + i, *((u32 *)(mac_bssid + i)));
400}
401
402//FIXME: Well, we should probably call them from somewhere.
403#if 0
404static void bcm43xx_set_slot_time(struct bcm43xx_private *bcm, u16 slot_time)
405{
406 /* slot_time is in usec. */
407 if (bcm43xx_current_phy(bcm)->type != BCM43xx_PHYTYPE_G)
408 return;
409 bcm43xx_write16(bcm, 0x684, 510 + slot_time);
410 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0010, slot_time);
411}
412
413static void bcm43xx_short_slot_timing_enable(struct bcm43xx_private *bcm)
414{
415 bcm43xx_set_slot_time(bcm, 9);
416}
417
418static void bcm43xx_short_slot_timing_disable(struct bcm43xx_private *bcm)
419{
420 bcm43xx_set_slot_time(bcm, 20);
421}
422#endif
423
424/* FIXME: To get the MAC-filter working, we need to implement the
425 * following functions (and rename them :)
426 */
427#if 0
428static void bcm43xx_disassociate(struct bcm43xx_private *bcm)
429{
430 bcm43xx_mac_suspend(bcm);
431 bcm43xx_macfilter_clear(bcm, BCM43xx_MACFILTER_ASSOC);
432
433 bcm43xx_ram_write(bcm, 0x0026, 0x0000);
434 bcm43xx_ram_write(bcm, 0x0028, 0x0000);
435 bcm43xx_ram_write(bcm, 0x007E, 0x0000);
436 bcm43xx_ram_write(bcm, 0x0080, 0x0000);
437 bcm43xx_ram_write(bcm, 0x047E, 0x0000);
438 bcm43xx_ram_write(bcm, 0x0480, 0x0000);
439
440 if (bcm->current_core->rev < 3) {
441 bcm43xx_write16(bcm, 0x0610, 0x8000);
442 bcm43xx_write16(bcm, 0x060E, 0x0000);
443 } else
444 bcm43xx_write32(bcm, 0x0188, 0x80000000);
445
446 bcm43xx_shm_write32(bcm, BCM43xx_SHM_WIRELESS, 0x0004, 0x000003ff);
447
448 if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_G &&
449 ieee80211_is_ofdm_rate(bcm->softmac->txrates.default_rate))
450 bcm43xx_short_slot_timing_enable(bcm);
451
452 bcm43xx_mac_enable(bcm);
453}
454
455static void bcm43xx_associate(struct bcm43xx_private *bcm,
456 const u8 *mac)
457{
458 memcpy(bcm->ieee->bssid, mac, ETH_ALEN);
459
460 bcm43xx_mac_suspend(bcm);
461 bcm43xx_macfilter_set(bcm, BCM43xx_MACFILTER_ASSOC, mac);
462 bcm43xx_write_mac_bssid_templates(bcm);
463 bcm43xx_mac_enable(bcm);
464}
465#endif
466
467/* Enable a Generic IRQ. "mask" is the mask of which IRQs to enable.
468 * Returns the _previously_ enabled IRQ mask.
469 */
470static inline u32 bcm43xx_interrupt_enable(struct bcm43xx_private *bcm, u32 mask)
471{
472 u32 old_mask;
473
474 old_mask = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_MASK);
475 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_MASK, old_mask | mask);
476
477 return old_mask;
478}
479
480/* Disable a Generic IRQ. "mask" is the mask of which IRQs to disable.
481 * Returns the _previously_ enabled IRQ mask.
482 */
483static inline u32 bcm43xx_interrupt_disable(struct bcm43xx_private *bcm, u32 mask)
484{
485 u32 old_mask;
486
487 old_mask = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_MASK);
488 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_MASK, old_mask & ~mask);
489
490 return old_mask;
491}
492
493/* Synchronize IRQ top- and bottom-half.
494 * IRQs must be masked before calling this.
495 * This must not be called with the irq_lock held.
496 */
497static void bcm43xx_synchronize_irq(struct bcm43xx_private *bcm)
498{
499 synchronize_irq(bcm->irq);
500 tasklet_disable(&bcm->isr_tasklet);
501}
502
503/* Make sure we don't receive more data from the device. */
504static int bcm43xx_disable_interrupts_sync(struct bcm43xx_private *bcm)
505{
506 unsigned long flags;
507
508 spin_lock_irqsave(&bcm->irq_lock, flags);
509 if (unlikely(bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)) {
510 spin_unlock_irqrestore(&bcm->irq_lock, flags);
511 return -EBUSY;
512 }
513 bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
514 bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_MASK); /* flush */
515 spin_unlock_irqrestore(&bcm->irq_lock, flags);
516 bcm43xx_synchronize_irq(bcm);
517
518 return 0;
519}
520
521static int bcm43xx_read_radioinfo(struct bcm43xx_private *bcm)
522{
523 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
524 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
525 u32 radio_id;
526 u16 manufact;
527 u16 version;
528 u8 revision;
529
530 if (bcm->chip_id == 0x4317) {
531 if (bcm->chip_rev == 0x00)
532 radio_id = 0x3205017F;
533 else if (bcm->chip_rev == 0x01)
534 radio_id = 0x4205017F;
535 else
536 radio_id = 0x5205017F;
537 } else {
538 bcm43xx_write16(bcm, BCM43xx_MMIO_RADIO_CONTROL, BCM43xx_RADIOCTL_ID);
539 radio_id = bcm43xx_read16(bcm, BCM43xx_MMIO_RADIO_DATA_HIGH);
540 radio_id <<= 16;
541 bcm43xx_write16(bcm, BCM43xx_MMIO_RADIO_CONTROL, BCM43xx_RADIOCTL_ID);
542 radio_id |= bcm43xx_read16(bcm, BCM43xx_MMIO_RADIO_DATA_LOW);
543 }
544
545 manufact = (radio_id & 0x00000FFF);
546 version = (radio_id & 0x0FFFF000) >> 12;
547 revision = (radio_id & 0xF0000000) >> 28;
548
549 dprintk(KERN_INFO PFX "Detected Radio: ID: %x (Manuf: %x Ver: %x Rev: %x)\n",
550 radio_id, manufact, version, revision);
551
552 switch (phy->type) {
553 case BCM43xx_PHYTYPE_A:
554 if ((version != 0x2060) || (revision != 1) || (manufact != 0x17f))
555 goto err_unsupported_radio;
556 break;
557 case BCM43xx_PHYTYPE_B:
558 if ((version & 0xFFF0) != 0x2050)
559 goto err_unsupported_radio;
560 break;
561 case BCM43xx_PHYTYPE_G:
562 if (version != 0x2050)
563 goto err_unsupported_radio;
564 break;
565 }
566
567 radio->manufact = manufact;
568 radio->version = version;
569 radio->revision = revision;
570
571 if (phy->type == BCM43xx_PHYTYPE_A)
572 radio->txpower_desired = bcm->sprom.maxpower_aphy;
573 else
574 radio->txpower_desired = bcm->sprom.maxpower_bgphy;
575
576 return 0;
577
578err_unsupported_radio:
579 printk(KERN_ERR PFX "Unsupported Radio connected to the PHY!\n");
580 return -ENODEV;
581}
582
583static const char * bcm43xx_locale_iso(u8 locale)
584{
585 /* ISO 3166-1 country codes.
586 * Note that there aren't ISO 3166-1 codes for
587 * all or locales. (Not all locales are countries)
588 */
589 switch (locale) {
590 case BCM43xx_LOCALE_WORLD:
591 case BCM43xx_LOCALE_ALL:
592 return "XX";
593 case BCM43xx_LOCALE_THAILAND:
594 return "TH";
595 case BCM43xx_LOCALE_ISRAEL:
596 return "IL";
597 case BCM43xx_LOCALE_JORDAN:
598 return "JO";
599 case BCM43xx_LOCALE_CHINA:
600 return "CN";
601 case BCM43xx_LOCALE_JAPAN:
602 case BCM43xx_LOCALE_JAPAN_HIGH:
603 return "JP";
604 case BCM43xx_LOCALE_USA_CANADA_ANZ:
605 case BCM43xx_LOCALE_USA_LOW:
606 return "US";
607 case BCM43xx_LOCALE_EUROPE:
608 return "EU";
609 case BCM43xx_LOCALE_NONE:
610 return " ";
611 }
612 assert(0);
613 return " ";
614}
615
616static const char * bcm43xx_locale_string(u8 locale)
617{
618 switch (locale) {
619 case BCM43xx_LOCALE_WORLD:
620 return "World";
621 case BCM43xx_LOCALE_THAILAND:
622 return "Thailand";
623 case BCM43xx_LOCALE_ISRAEL:
624 return "Israel";
625 case BCM43xx_LOCALE_JORDAN:
626 return "Jordan";
627 case BCM43xx_LOCALE_CHINA:
628 return "China";
629 case BCM43xx_LOCALE_JAPAN:
630 return "Japan";
631 case BCM43xx_LOCALE_USA_CANADA_ANZ:
632 return "USA/Canada/ANZ";
633 case BCM43xx_LOCALE_EUROPE:
634 return "Europe";
635 case BCM43xx_LOCALE_USA_LOW:
636 return "USAlow";
637 case BCM43xx_LOCALE_JAPAN_HIGH:
638 return "JapanHigh";
639 case BCM43xx_LOCALE_ALL:
640 return "All";
641 case BCM43xx_LOCALE_NONE:
642 return "None";
643 }
644 assert(0);
645 return "";
646}
647
648static inline u8 bcm43xx_crc8(u8 crc, u8 data)
649{
650 static const u8 t[] = {
651 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B,
652 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21,
653 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF,
654 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5,
655 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14,
656 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E,
657 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80,
658 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA,
659 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95,
660 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF,
661 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01,
662 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B,
663 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA,
664 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0,
665 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E,
666 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34,
667 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0,
668 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A,
669 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54,
670 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E,
671 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF,
672 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5,
673 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B,
674 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61,
675 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E,
676 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74,
677 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA,
678 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0,
679 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41,
680 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B,
681 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5,
682 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F,
683 };
684 return t[crc ^ data];
685}
686
687static u8 bcm43xx_sprom_crc(const u16 *sprom)
688{
689 int word;
690 u8 crc = 0xFF;
691
692 for (word = 0; word < BCM43xx_SPROM_SIZE - 1; word++) {
693 crc = bcm43xx_crc8(crc, sprom[word] & 0x00FF);
694 crc = bcm43xx_crc8(crc, (sprom[word] & 0xFF00) >> 8);
695 }
696 crc = bcm43xx_crc8(crc, sprom[BCM43xx_SPROM_VERSION] & 0x00FF);
697 crc ^= 0xFF;
698
699 return crc;
700}
701
702int bcm43xx_sprom_read(struct bcm43xx_private *bcm, u16 *sprom)
703{
704 int i;
705 u8 crc, expected_crc;
706
707 for (i = 0; i < BCM43xx_SPROM_SIZE; i++)
708 sprom[i] = bcm43xx_read16(bcm, BCM43xx_SPROM_BASE + (i * 2));
709 /* CRC-8 check. */
710 crc = bcm43xx_sprom_crc(sprom);
711 expected_crc = (sprom[BCM43xx_SPROM_VERSION] & 0xFF00) >> 8;
712 if (crc != expected_crc) {
713 printk(KERN_WARNING PFX "WARNING: Invalid SPROM checksum "
714 "(0x%02X, expected: 0x%02X)\n",
715 crc, expected_crc);
716 return -EINVAL;
717 }
718
719 return 0;
720}
721
722int bcm43xx_sprom_write(struct bcm43xx_private *bcm, const u16 *sprom)
723{
724 int i, err;
725 u8 crc, expected_crc;
726 u32 spromctl;
727
728 /* CRC-8 validation of the input data. */
729 crc = bcm43xx_sprom_crc(sprom);
730 expected_crc = (sprom[BCM43xx_SPROM_VERSION] & 0xFF00) >> 8;
731 if (crc != expected_crc) {
732 printk(KERN_ERR PFX "SPROM input data: Invalid CRC\n");
733 return -EINVAL;
734 }
735
736 printk(KERN_INFO PFX "Writing SPROM. Do NOT turn off the power! Please stand by...\n");
737 err = bcm43xx_pci_read_config32(bcm, BCM43xx_PCICFG_SPROMCTL, &spromctl);
738 if (err)
739 goto err_ctlreg;
740 spromctl |= 0x10; /* SPROM WRITE enable. */
741 err = bcm43xx_pci_write_config32(bcm, BCM43xx_PCICFG_SPROMCTL, spromctl);
742 if (err)
743 goto err_ctlreg;
744 /* We must burn lots of CPU cycles here, but that does not
745 * really matter as one does not write the SPROM every other minute...
746 */
747 printk(KERN_INFO PFX "[ 0%%");
748 mdelay(500);
749 for (i = 0; i < BCM43xx_SPROM_SIZE; i++) {
750 if (i == 16)
751 printk("25%%");
752 else if (i == 32)
753 printk("50%%");
754 else if (i == 48)
755 printk("75%%");
756 else if (i % 2)
757 printk(".");
758 bcm43xx_write16(bcm, BCM43xx_SPROM_BASE + (i * 2), sprom[i]);
759 mmiowb();
760 mdelay(20);
761 }
762 spromctl &= ~0x10; /* SPROM WRITE enable. */
763 err = bcm43xx_pci_write_config32(bcm, BCM43xx_PCICFG_SPROMCTL, spromctl);
764 if (err)
765 goto err_ctlreg;
766 mdelay(500);
767 printk("100%% ]\n");
768 printk(KERN_INFO PFX "SPROM written.\n");
769 bcm43xx_controller_restart(bcm, "SPROM update");
770
771 return 0;
772err_ctlreg:
773 printk(KERN_ERR PFX "Could not access SPROM control register.\n");
774 return -ENODEV;
775}
776
777static int bcm43xx_sprom_extract(struct bcm43xx_private *bcm)
778{
779 u16 value;
780 u16 *sprom;
781
782 sprom = kzalloc(BCM43xx_SPROM_SIZE * sizeof(u16),
783 GFP_KERNEL);
784 if (!sprom) {
785 printk(KERN_ERR PFX "sprom_extract OOM\n");
786 return -ENOMEM;
787 }
788 bcm43xx_sprom_read(bcm, sprom);
789
790 /* boardflags2 */
791 value = sprom[BCM43xx_SPROM_BOARDFLAGS2];
792 bcm->sprom.boardflags2 = value;
793
794 /* il0macaddr */
795 value = sprom[BCM43xx_SPROM_IL0MACADDR + 0];
796 *(((__be16 *)bcm->sprom.il0macaddr) + 0) = cpu_to_be16(value);
797 value = sprom[BCM43xx_SPROM_IL0MACADDR + 1];
798 *(((__be16 *)bcm->sprom.il0macaddr) + 1) = cpu_to_be16(value);
799 value = sprom[BCM43xx_SPROM_IL0MACADDR + 2];
800 *(((__be16 *)bcm->sprom.il0macaddr) + 2) = cpu_to_be16(value);
801
802 /* et0macaddr */
803 value = sprom[BCM43xx_SPROM_ET0MACADDR + 0];
804 *(((__be16 *)bcm->sprom.et0macaddr) + 0) = cpu_to_be16(value);
805 value = sprom[BCM43xx_SPROM_ET0MACADDR + 1];
806 *(((__be16 *)bcm->sprom.et0macaddr) + 1) = cpu_to_be16(value);
807 value = sprom[BCM43xx_SPROM_ET0MACADDR + 2];
808 *(((__be16 *)bcm->sprom.et0macaddr) + 2) = cpu_to_be16(value);
809
810 /* et1macaddr */
811 value = sprom[BCM43xx_SPROM_ET1MACADDR + 0];
812 *(((__be16 *)bcm->sprom.et1macaddr) + 0) = cpu_to_be16(value);
813 value = sprom[BCM43xx_SPROM_ET1MACADDR + 1];
814 *(((__be16 *)bcm->sprom.et1macaddr) + 1) = cpu_to_be16(value);
815 value = sprom[BCM43xx_SPROM_ET1MACADDR + 2];
816 *(((__be16 *)bcm->sprom.et1macaddr) + 2) = cpu_to_be16(value);
817
818 /* ethernet phy settings */
819 value = sprom[BCM43xx_SPROM_ETHPHY];
820 bcm->sprom.et0phyaddr = (value & 0x001F);
821 bcm->sprom.et1phyaddr = (value & 0x03E0) >> 5;
822
823 /* boardrev, antennas, locale */
824 value = sprom[BCM43xx_SPROM_BOARDREV];
825 bcm->sprom.boardrev = (value & 0x00FF);
826 bcm->sprom.locale = (value & 0x0F00) >> 8;
827 bcm->sprom.antennas_aphy = (value & 0x3000) >> 12;
828 bcm->sprom.antennas_bgphy = (value & 0xC000) >> 14;
829 if (modparam_locale != -1) {
830 if (modparam_locale >= 0 && modparam_locale <= 11) {
831 bcm->sprom.locale = modparam_locale;
832 printk(KERN_WARNING PFX "Operating with modified "
833 "LocaleCode %u (%s)\n",
834 bcm->sprom.locale,
835 bcm43xx_locale_string(bcm->sprom.locale));
836 } else {
837 printk(KERN_WARNING PFX "Module parameter \"locale\" "
838 "invalid value. (0 - 11)\n");
839 }
840 }
841
842 /* pa0b* */
843 value = sprom[BCM43xx_SPROM_PA0B0];
844 bcm->sprom.pa0b0 = value;
845 value = sprom[BCM43xx_SPROM_PA0B1];
846 bcm->sprom.pa0b1 = value;
847 value = sprom[BCM43xx_SPROM_PA0B2];
848 bcm->sprom.pa0b2 = value;
849
850 /* wl0gpio* */
851 value = sprom[BCM43xx_SPROM_WL0GPIO0];
852 if (value == 0x0000)
853 value = 0xFFFF;
854 bcm->sprom.wl0gpio0 = value & 0x00FF;
855 bcm->sprom.wl0gpio1 = (value & 0xFF00) >> 8;
856 value = sprom[BCM43xx_SPROM_WL0GPIO2];
857 if (value == 0x0000)
858 value = 0xFFFF;
859 bcm->sprom.wl0gpio2 = value & 0x00FF;
860 bcm->sprom.wl0gpio3 = (value & 0xFF00) >> 8;
861
862 /* maxpower */
863 value = sprom[BCM43xx_SPROM_MAXPWR];
864 bcm->sprom.maxpower_aphy = (value & 0xFF00) >> 8;
865 bcm->sprom.maxpower_bgphy = value & 0x00FF;
866
867 /* pa1b* */
868 value = sprom[BCM43xx_SPROM_PA1B0];
869 bcm->sprom.pa1b0 = value;
870 value = sprom[BCM43xx_SPROM_PA1B1];
871 bcm->sprom.pa1b1 = value;
872 value = sprom[BCM43xx_SPROM_PA1B2];
873 bcm->sprom.pa1b2 = value;
874
875 /* idle tssi target */
876 value = sprom[BCM43xx_SPROM_IDL_TSSI_TGT];
877 bcm->sprom.idle_tssi_tgt_aphy = value & 0x00FF;
878 bcm->sprom.idle_tssi_tgt_bgphy = (value & 0xFF00) >> 8;
879
880 /* boardflags */
881 value = sprom[BCM43xx_SPROM_BOARDFLAGS];
882 if (value == 0xFFFF)
883 value = 0x0000;
884 bcm->sprom.boardflags = value;
885 /* boardflags workarounds */
886 if (bcm->board_vendor == PCI_VENDOR_ID_DELL &&
887 bcm->chip_id == 0x4301 &&
888 bcm->board_revision == 0x74)
889 bcm->sprom.boardflags |= BCM43xx_BFL_BTCOEXIST;
890 if (bcm->board_vendor == PCI_VENDOR_ID_APPLE &&
891 bcm->board_type == 0x4E &&
892 bcm->board_revision > 0x40)
893 bcm->sprom.boardflags |= BCM43xx_BFL_PACTRL;
894
895 /* antenna gain */
896 value = sprom[BCM43xx_SPROM_ANTENNA_GAIN];
897 if (value == 0x0000 || value == 0xFFFF)
898 value = 0x0202;
899 /* convert values to Q5.2 */
900 bcm->sprom.antennagain_aphy = ((value & 0xFF00) >> 8) * 4;
901 bcm->sprom.antennagain_bgphy = (value & 0x00FF) * 4;
902
903 kfree(sprom);
904
905 return 0;
906}
907
908static int bcm43xx_geo_init(struct bcm43xx_private *bcm)
909{
910 struct ieee80211_geo *geo;
911 struct ieee80211_channel *chan;
912 int have_a = 0, have_bg = 0;
913 int i;
914 u8 channel;
915 struct bcm43xx_phyinfo *phy;
916 const char *iso_country;
917 u8 max_bg_channel;
918
919 geo = kzalloc(sizeof(*geo), GFP_KERNEL);
920 if (!geo)
921 return -ENOMEM;
922
923 for (i = 0; i < bcm->nr_80211_available; i++) {
924 phy = &(bcm->core_80211_ext[i].phy);
925 switch (phy->type) {
926 case BCM43xx_PHYTYPE_B:
927 case BCM43xx_PHYTYPE_G:
928 have_bg = 1;
929 break;
930 case BCM43xx_PHYTYPE_A:
931 have_a = 1;
932 break;
933 default:
934 assert(0);
935 }
936 }
937 iso_country = bcm43xx_locale_iso(bcm->sprom.locale);
938
939/* set the maximum channel based on locale set in sprom or witle locale option */
940 switch (bcm->sprom.locale) {
941 case BCM43xx_LOCALE_THAILAND:
942 case BCM43xx_LOCALE_ISRAEL:
943 case BCM43xx_LOCALE_JORDAN:
944 case BCM43xx_LOCALE_USA_CANADA_ANZ:
945 case BCM43xx_LOCALE_USA_LOW:
946 max_bg_channel = 11;
947 break;
948 case BCM43xx_LOCALE_JAPAN:
949 case BCM43xx_LOCALE_JAPAN_HIGH:
950 max_bg_channel = 14;
951 break;
952 default:
953 max_bg_channel = 13;
954 }
955
956 if (have_a) {
957 for (i = 0, channel = IEEE80211_52GHZ_MIN_CHANNEL;
958 channel <= IEEE80211_52GHZ_MAX_CHANNEL; channel++) {
959 chan = &geo->a[i++];
960 chan->freq = bcm43xx_channel_to_freq_a(channel);
961 chan->channel = channel;
962 }
963 geo->a_channels = i;
964 }
965 if (have_bg) {
966 for (i = 0, channel = IEEE80211_24GHZ_MIN_CHANNEL;
967 channel <= max_bg_channel; channel++) {
968 chan = &geo->bg[i++];
969 chan->freq = bcm43xx_channel_to_freq_bg(channel);
970 chan->channel = channel;
971 }
972 geo->bg_channels = i;
973 }
974 memcpy(geo->name, iso_country, 2);
975 if (0 /*TODO: Outdoor use only */)
976 geo->name[2] = 'O';
977 else if (0 /*TODO: Indoor use only */)
978 geo->name[2] = 'I';
979 else
980 geo->name[2] = ' ';
981 geo->name[3] = '\0';
982
983 ieee80211_set_geo(bcm->ieee, geo);
984 kfree(geo);
985
986 return 0;
987}
988
989/* DummyTransmission function, as documented on
990 * http://bcm-specs.sipsolutions.net/DummyTransmission
991 */
992void bcm43xx_dummy_transmission(struct bcm43xx_private *bcm)
993{
994 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
995 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
996 unsigned int i, max_loop;
997 u16 value = 0;
998 u32 buffer[5] = {
999 0x00000000,
1000 0x0000D400,
1001 0x00000000,
1002 0x00000001,
1003 0x00000000,
1004 };
1005
1006 switch (phy->type) {
1007 case BCM43xx_PHYTYPE_A:
1008 max_loop = 0x1E;
1009 buffer[0] = 0xCC010200;
1010 break;
1011 case BCM43xx_PHYTYPE_B:
1012 case BCM43xx_PHYTYPE_G:
1013 max_loop = 0xFA;
1014 buffer[0] = 0x6E840B00;
1015 break;
1016 default:
1017 assert(0);
1018 return;
1019 }
1020
1021 for (i = 0; i < 5; i++)
1022 bcm43xx_ram_write(bcm, i * 4, buffer[i]);
1023
1024 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); /* dummy read */
1025
1026 bcm43xx_write16(bcm, 0x0568, 0x0000);
1027 bcm43xx_write16(bcm, 0x07C0, 0x0000);
1028 bcm43xx_write16(bcm, 0x050C, ((phy->type == BCM43xx_PHYTYPE_A) ? 1 : 0));
1029 bcm43xx_write16(bcm, 0x0508, 0x0000);
1030 bcm43xx_write16(bcm, 0x050A, 0x0000);
1031 bcm43xx_write16(bcm, 0x054C, 0x0000);
1032 bcm43xx_write16(bcm, 0x056A, 0x0014);
1033 bcm43xx_write16(bcm, 0x0568, 0x0826);
1034 bcm43xx_write16(bcm, 0x0500, 0x0000);
1035 bcm43xx_write16(bcm, 0x0502, 0x0030);
1036
1037 if (radio->version == 0x2050 && radio->revision <= 0x5)
1038 bcm43xx_radio_write16(bcm, 0x0051, 0x0017);
1039 for (i = 0x00; i < max_loop; i++) {
1040 value = bcm43xx_read16(bcm, 0x050E);
1041 if (value & 0x0080)
1042 break;
1043 udelay(10);
1044 }
1045 for (i = 0x00; i < 0x0A; i++) {
1046 value = bcm43xx_read16(bcm, 0x050E);
1047 if (value & 0x0400)
1048 break;
1049 udelay(10);
1050 }
1051 for (i = 0x00; i < 0x0A; i++) {
1052 value = bcm43xx_read16(bcm, 0x0690);
1053 if (!(value & 0x0100))
1054 break;
1055 udelay(10);
1056 }
1057 if (radio->version == 0x2050 && radio->revision <= 0x5)
1058 bcm43xx_radio_write16(bcm, 0x0051, 0x0037);
1059}
1060
1061static void key_write(struct bcm43xx_private *bcm,
1062 u8 index, u8 algorithm, const __le16 *key)
1063{
1064 unsigned int i, basic_wep = 0;
1065 u32 offset;
1066 u16 value;
1067
1068 /* Write associated key information */
1069 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x100 + (index * 2),
1070 ((index << 4) | (algorithm & 0x0F)));
1071
1072 /* The first 4 WEP keys need extra love */
1073 if (((algorithm == BCM43xx_SEC_ALGO_WEP) ||
1074 (algorithm == BCM43xx_SEC_ALGO_WEP104)) && (index < 4))
1075 basic_wep = 1;
1076
1077 /* Write key payload, 8 little endian words */
1078 offset = bcm->security_offset + (index * BCM43xx_SEC_KEYSIZE);
1079 for (i = 0; i < (BCM43xx_SEC_KEYSIZE / sizeof(u16)); i++) {
1080 value = le16_to_cpu(key[i]);
1081 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED,
1082 offset + (i * 2), value);
1083
1084 if (!basic_wep)
1085 continue;
1086
1087 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED,
1088 offset + (i * 2) + 4 * BCM43xx_SEC_KEYSIZE,
1089 value);
1090 }
1091}
1092
1093static void keymac_write(struct bcm43xx_private *bcm,
1094 u8 index, const __be32 *addr)
1095{
1096 /* for keys 0-3 there is no associated mac address */
1097 if (index < 4)
1098 return;
1099
1100 index -= 4;
1101 if (bcm->current_core->rev >= 5) {
1102 bcm43xx_shm_write32(bcm,
1103 BCM43xx_SHM_HWMAC,
1104 index * 2,
1105 be32_to_cpu(*addr));
1106 bcm43xx_shm_write16(bcm,
1107 BCM43xx_SHM_HWMAC,
1108 (index * 2) + 1,
1109 be16_to_cpu(*((__be16 *)(addr + 1))));
1110 } else {
1111 if (index < 8) {
1112 TODO(); /* Put them in the macaddress filter */
1113 } else {
1114 TODO();
1115 /* Put them BCM43xx_SHM_SHARED, stating index 0x0120.
1116 Keep in mind to update the count of keymacs in 0x003E as well! */
1117 }
1118 }
1119}
1120
1121static int bcm43xx_key_write(struct bcm43xx_private *bcm,
1122 u8 index, u8 algorithm,
1123 const u8 *_key, int key_len,
1124 const u8 *mac_addr)
1125{
1126 u8 key[BCM43xx_SEC_KEYSIZE] = { 0 };
1127
1128 if (index >= ARRAY_SIZE(bcm->key))
1129 return -EINVAL;
1130 if (key_len > ARRAY_SIZE(key))
1131 return -EINVAL;
1132 if (algorithm < 1 || algorithm > 5)
1133 return -EINVAL;
1134
1135 memcpy(key, _key, key_len);
1136 key_write(bcm, index, algorithm, (const __le16 *)key);
1137 keymac_write(bcm, index, (const __be32 *)mac_addr);
1138
1139 bcm->key[index].algorithm = algorithm;
1140
1141 return 0;
1142}
1143
1144static void bcm43xx_clear_keys(struct bcm43xx_private *bcm)
1145{
1146 static const __be32 zero_mac[2] = { 0 };
1147 unsigned int i,j, nr_keys = 54;
1148 u16 offset;
1149
1150 if (bcm->current_core->rev < 5)
1151 nr_keys = 16;
1152 assert(nr_keys <= ARRAY_SIZE(bcm->key));
1153
1154 for (i = 0; i < nr_keys; i++) {
1155 bcm->key[i].enabled = 0;
1156 /* returns for i < 4 immediately */
1157 keymac_write(bcm, i, zero_mac);
1158 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED,
1159 0x100 + (i * 2), 0x0000);
1160 for (j = 0; j < 8; j++) {
1161 offset = bcm->security_offset + (j * 4) + (i * BCM43xx_SEC_KEYSIZE);
1162 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED,
1163 offset, 0x0000);
1164 }
1165 }
1166 dprintk(KERN_INFO PFX "Keys cleared\n");
1167}
1168
1169/* Lowlevel core-switch function. This is only to be used in
1170 * bcm43xx_switch_core() and bcm43xx_probe_cores()
1171 */
1172static int _switch_core(struct bcm43xx_private *bcm, int core)
1173{
1174 int err;
1175 int attempts = 0;
1176 u32 current_core;
1177
1178 assert(core >= 0);
1179 while (1) {
1180 err = bcm43xx_pci_write_config32(bcm, BCM43xx_PCICFG_ACTIVE_CORE,
1181 (core * 0x1000) + 0x18000000);
1182 if (unlikely(err))
1183 goto error;
1184 err = bcm43xx_pci_read_config32(bcm, BCM43xx_PCICFG_ACTIVE_CORE,
1185 &current_core);
1186 if (unlikely(err))
1187 goto error;
1188 current_core = (current_core - 0x18000000) / 0x1000;
1189 if (current_core == core)
1190 break;
1191
1192 if (unlikely(attempts++ > BCM43xx_SWITCH_CORE_MAX_RETRIES))
1193 goto error;
1194 udelay(10);
1195 }
1196
1197 return 0;
1198error:
1199 printk(KERN_ERR PFX "Failed to switch to core %d\n", core);
1200 return -ENODEV;
1201}
1202
1203int bcm43xx_switch_core(struct bcm43xx_private *bcm, struct bcm43xx_coreinfo *new_core)
1204{
1205 int err;
1206
1207 if (unlikely(!new_core))
1208 return 0;
1209 if (!new_core->available)
1210 return -ENODEV;
1211 if (bcm->current_core == new_core)
1212 return 0;
1213 err = _switch_core(bcm, new_core->index);
1214 if (unlikely(err))
1215 goto out;
1216
1217 bcm->current_core = new_core;
1218out:
1219 return err;
1220}
1221
1222static int bcm43xx_core_enabled(struct bcm43xx_private *bcm)
1223{
1224 u32 value;
1225
1226 value = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
1227 value &= BCM43xx_SBTMSTATELOW_CLOCK | BCM43xx_SBTMSTATELOW_RESET
1228 | BCM43xx_SBTMSTATELOW_REJECT;
1229
1230 return (value == BCM43xx_SBTMSTATELOW_CLOCK);
1231}
1232
1233/* disable current core */
1234static int bcm43xx_core_disable(struct bcm43xx_private *bcm, u32 core_flags)
1235{
1236 u32 sbtmstatelow;
1237 u32 sbtmstatehigh;
1238 int i;
1239
1240 /* fetch sbtmstatelow from core information registers */
1241 sbtmstatelow = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
1242
1243 /* core is already in reset */
1244 if (sbtmstatelow & BCM43xx_SBTMSTATELOW_RESET)
1245 goto out;
1246
1247 if (sbtmstatelow & BCM43xx_SBTMSTATELOW_CLOCK) {
1248 sbtmstatelow = BCM43xx_SBTMSTATELOW_CLOCK |
1249 BCM43xx_SBTMSTATELOW_REJECT;
1250 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow);
1251
1252 for (i = 0; i < 1000; i++) {
1253 sbtmstatelow = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
1254 if (sbtmstatelow & BCM43xx_SBTMSTATELOW_REJECT) {
1255 i = -1;
1256 break;
1257 }
1258 udelay(10);
1259 }
1260 if (i != -1) {
1261 printk(KERN_ERR PFX "Error: core_disable() REJECT timeout!\n");
1262 return -EBUSY;
1263 }
1264
1265 for (i = 0; i < 1000; i++) {
1266 sbtmstatehigh = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATEHIGH);
1267 if (!(sbtmstatehigh & BCM43xx_SBTMSTATEHIGH_BUSY)) {
1268 i = -1;
1269 break;
1270 }
1271 udelay(10);
1272 }
1273 if (i != -1) {
1274 printk(KERN_ERR PFX "Error: core_disable() BUSY timeout!\n");
1275 return -EBUSY;
1276 }
1277
1278 sbtmstatelow = BCM43xx_SBTMSTATELOW_FORCE_GATE_CLOCK |
1279 BCM43xx_SBTMSTATELOW_REJECT |
1280 BCM43xx_SBTMSTATELOW_RESET |
1281 BCM43xx_SBTMSTATELOW_CLOCK |
1282 core_flags;
1283 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow);
1284 udelay(10);
1285 }
1286
1287 sbtmstatelow = BCM43xx_SBTMSTATELOW_RESET |
1288 BCM43xx_SBTMSTATELOW_REJECT |
1289 core_flags;
1290 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow);
1291
1292out:
1293 bcm->current_core->enabled = 0;
1294
1295 return 0;
1296}
1297
1298/* enable (reset) current core */
1299static int bcm43xx_core_enable(struct bcm43xx_private *bcm, u32 core_flags)
1300{
1301 u32 sbtmstatelow;
1302 u32 sbtmstatehigh;
1303 u32 sbimstate;
1304 int err;
1305
1306 err = bcm43xx_core_disable(bcm, core_flags);
1307 if (err)
1308 goto out;
1309
1310 sbtmstatelow = BCM43xx_SBTMSTATELOW_CLOCK |
1311 BCM43xx_SBTMSTATELOW_RESET |
1312 BCM43xx_SBTMSTATELOW_FORCE_GATE_CLOCK |
1313 core_flags;
1314 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow);
1315 udelay(1);
1316
1317 sbtmstatehigh = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATEHIGH);
1318 if (sbtmstatehigh & BCM43xx_SBTMSTATEHIGH_SERROR) {
1319 sbtmstatehigh = 0x00000000;
1320 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATEHIGH, sbtmstatehigh);
1321 }
1322
1323 sbimstate = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMSTATE);
1324 if (sbimstate & (BCM43xx_SBIMSTATE_IB_ERROR | BCM43xx_SBIMSTATE_TIMEOUT)) {
1325 sbimstate &= ~(BCM43xx_SBIMSTATE_IB_ERROR | BCM43xx_SBIMSTATE_TIMEOUT);
1326 bcm43xx_write32(bcm, BCM43xx_CIR_SBIMSTATE, sbimstate);
1327 }
1328
1329 sbtmstatelow = BCM43xx_SBTMSTATELOW_CLOCK |
1330 BCM43xx_SBTMSTATELOW_FORCE_GATE_CLOCK |
1331 core_flags;
1332 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow);
1333 udelay(1);
1334
1335 sbtmstatelow = BCM43xx_SBTMSTATELOW_CLOCK | core_flags;
1336 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow);
1337 udelay(1);
1338
1339 bcm->current_core->enabled = 1;
1340 assert(err == 0);
1341out:
1342 return err;
1343}
1344
1345/* http://bcm-specs.sipsolutions.net/80211CoreReset */
1346void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy)
1347{
1348 u32 flags = 0x00040000;
1349
1350 if ((bcm43xx_core_enabled(bcm)) &&
1351 !bcm43xx_using_pio(bcm)) {
1352 }
1353 if (bcm43xx_status(bcm) == BCM43xx_STAT_SHUTTINGDOWN) {
1354 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
1355 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD)
1356 & ~(BCM43xx_SBF_MAC_ENABLED | 0x00000002));
1357 } else {
1358 if (connect_phy)
1359 flags |= BCM43xx_SBTMSTATELOW_G_MODE_ENABLE;
1360 bcm43xx_phy_connect(bcm, connect_phy);
1361 bcm43xx_core_enable(bcm, flags);
1362 bcm43xx_write16(bcm, 0x03E6, 0x0000);
1363 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
1364 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD)
1365 | BCM43xx_SBF_400);
1366 }
1367}
1368
1369static void bcm43xx_wireless_core_disable(struct bcm43xx_private *bcm)
1370{
1371 bcm43xx_radio_turn_off(bcm);
1372 bcm43xx_write16(bcm, 0x03E6, 0x00F4);
1373 bcm43xx_core_disable(bcm, 0);
1374}
1375
1376/* Mark the current 80211 core inactive. */
1377static void bcm43xx_wireless_core_mark_inactive(struct bcm43xx_private *bcm)
1378{
1379 u32 sbtmstatelow;
1380
1381 bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
1382 bcm43xx_radio_turn_off(bcm);
1383 sbtmstatelow = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
1384 sbtmstatelow &= 0xDFF5FFFF;
1385 sbtmstatelow |= 0x000A0000;
1386 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow);
1387 udelay(1);
1388 sbtmstatelow = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
1389 sbtmstatelow &= 0xFFF5FFFF;
1390 sbtmstatelow |= 0x00080000;
1391 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow);
1392 udelay(1);
1393}
1394
1395static void handle_irq_transmit_status(struct bcm43xx_private *bcm)
1396{
1397 u32 v0, v1;
1398 u16 tmp;
1399 struct bcm43xx_xmitstatus stat;
1400
1401 while (1) {
1402 v0 = bcm43xx_read32(bcm, BCM43xx_MMIO_XMITSTAT_0);
1403 if (!v0)
1404 break;
1405 v1 = bcm43xx_read32(bcm, BCM43xx_MMIO_XMITSTAT_1);
1406
1407 stat.cookie = (v0 >> 16) & 0x0000FFFF;
1408 tmp = (u16)((v0 & 0xFFF0) | ((v0 & 0xF) >> 1));
1409 stat.flags = tmp & 0xFF;
1410 stat.cnt1 = (tmp & 0x0F00) >> 8;
1411 stat.cnt2 = (tmp & 0xF000) >> 12;
1412 stat.seq = (u16)(v1 & 0xFFFF);
1413 stat.unknown = (u16)((v1 >> 16) & 0xFF);
1414
1415 bcm43xx_debugfs_log_txstat(bcm, &stat);
1416
1417 if (stat.flags & BCM43xx_TXSTAT_FLAG_AMPDU)
1418 continue;
1419 if (stat.flags & BCM43xx_TXSTAT_FLAG_INTER)
1420 continue;
1421
1422 if (bcm43xx_using_pio(bcm))
1423 bcm43xx_pio_handle_xmitstatus(bcm, &stat);
1424 else
1425 bcm43xx_dma_handle_xmitstatus(bcm, &stat);
1426 }
1427}
1428
1429static void drain_txstatus_queue(struct bcm43xx_private *bcm)
1430{
1431 u32 dummy;
1432
1433 if (bcm->current_core->rev < 5)
1434 return;
1435 /* Read all entries from the microcode TXstatus FIFO
1436 * and throw them away.
1437 */
1438 while (1) {
1439 dummy = bcm43xx_read32(bcm, BCM43xx_MMIO_XMITSTAT_0);
1440 if (!dummy)
1441 break;
1442 dummy = bcm43xx_read32(bcm, BCM43xx_MMIO_XMITSTAT_1);
1443 }
1444}
1445
1446static void bcm43xx_generate_noise_sample(struct bcm43xx_private *bcm)
1447{
1448 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x408, 0x7F7F);
1449 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x40A, 0x7F7F);
1450 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS2_BITFIELD,
1451 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS2_BITFIELD) | (1 << 4));
1452 assert(bcm->noisecalc.core_at_start == bcm->current_core);
1453 assert(bcm->noisecalc.channel_at_start == bcm43xx_current_radio(bcm)->channel);
1454}
1455
1456static void bcm43xx_calculate_link_quality(struct bcm43xx_private *bcm)
1457{
1458 /* Top half of Link Quality calculation. */
1459
1460 if (bcm->noisecalc.calculation_running)
1461 return;
1462 bcm->noisecalc.core_at_start = bcm->current_core;
1463 bcm->noisecalc.channel_at_start = bcm43xx_current_radio(bcm)->channel;
1464 bcm->noisecalc.calculation_running = 1;
1465 bcm->noisecalc.nr_samples = 0;
1466
1467 bcm43xx_generate_noise_sample(bcm);
1468}
1469
1470static void handle_irq_noise(struct bcm43xx_private *bcm)
1471{
1472 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
1473 u16 tmp;
1474 u8 noise[4];
1475 u8 i, j;
1476 s32 average;
1477
1478 /* Bottom half of Link Quality calculation. */
1479
1480 assert(bcm->noisecalc.calculation_running);
1481 if (bcm->noisecalc.core_at_start != bcm->current_core ||
1482 bcm->noisecalc.channel_at_start != radio->channel)
1483 goto drop_calculation;
1484 tmp = bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED, 0x408);
1485 noise[0] = (tmp & 0x00FF);
1486 noise[1] = (tmp & 0xFF00) >> 8;
1487 tmp = bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED, 0x40A);
1488 noise[2] = (tmp & 0x00FF);
1489 noise[3] = (tmp & 0xFF00) >> 8;
1490 if (noise[0] == 0x7F || noise[1] == 0x7F ||
1491 noise[2] == 0x7F || noise[3] == 0x7F)
1492 goto generate_new;
1493
1494 /* Get the noise samples. */
1495 assert(bcm->noisecalc.nr_samples < 8);
1496 i = bcm->noisecalc.nr_samples;
1497 noise[0] = limit_value(noise[0], 0, ARRAY_SIZE(radio->nrssi_lt) - 1);
1498 noise[1] = limit_value(noise[1], 0, ARRAY_SIZE(radio->nrssi_lt) - 1);
1499 noise[2] = limit_value(noise[2], 0, ARRAY_SIZE(radio->nrssi_lt) - 1);
1500 noise[3] = limit_value(noise[3], 0, ARRAY_SIZE(radio->nrssi_lt) - 1);
1501 bcm->noisecalc.samples[i][0] = radio->nrssi_lt[noise[0]];
1502 bcm->noisecalc.samples[i][1] = radio->nrssi_lt[noise[1]];
1503 bcm->noisecalc.samples[i][2] = radio->nrssi_lt[noise[2]];
1504 bcm->noisecalc.samples[i][3] = radio->nrssi_lt[noise[3]];
1505 bcm->noisecalc.nr_samples++;
1506 if (bcm->noisecalc.nr_samples == 8) {
1507 /* Calculate the Link Quality by the noise samples. */
1508 average = 0;
1509 for (i = 0; i < 8; i++) {
1510 for (j = 0; j < 4; j++)
1511 average += bcm->noisecalc.samples[i][j];
1512 }
1513 average /= (8 * 4);
1514 average *= 125;
1515 average += 64;
1516 average /= 128;
1517
1518 tmp = bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED, 0x40C);
1519 tmp = (tmp / 128) & 0x1F;
1520 if (tmp >= 8)
1521 average += 2;
1522 else
1523 average -= 25;
1524 if (tmp == 8)
1525 average -= 72;
1526 else
1527 average -= 48;
1528
1529 bcm->stats.noise = average;
1530drop_calculation:
1531 bcm->noisecalc.calculation_running = 0;
1532 return;
1533 }
1534generate_new:
1535 bcm43xx_generate_noise_sample(bcm);
1536}
1537
1538static void handle_irq_ps(struct bcm43xx_private *bcm)
1539{
1540 if (bcm->ieee->iw_mode == IW_MODE_MASTER) {
1541 ///TODO: PS TBTT
1542 } else {
1543 if (1/*FIXME: the last PSpoll frame was sent successfully */)
1544 bcm43xx_power_saving_ctl_bits(bcm, -1, -1);
1545 }
1546 if (bcm->ieee->iw_mode == IW_MODE_ADHOC)
1547 bcm->reg124_set_0x4 = 1;
1548 //FIXME else set to false?
1549}
1550
1551static void handle_irq_reg124(struct bcm43xx_private *bcm)
1552{
1553 if (!bcm->reg124_set_0x4)
1554 return;
1555 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS2_BITFIELD,
1556 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS2_BITFIELD)
1557 | 0x4);
1558 //FIXME: reset reg124_set_0x4 to false?
1559}
1560
1561static void handle_irq_pmq(struct bcm43xx_private *bcm)
1562{
1563 u32 tmp;
1564
1565 //TODO: AP mode.
1566
1567 while (1) {
1568 tmp = bcm43xx_read32(bcm, BCM43xx_MMIO_PS_STATUS);
1569 if (!(tmp & 0x00000008))
1570 break;
1571 }
1572 /* 16bit write is odd, but correct. */
1573 bcm43xx_write16(bcm, BCM43xx_MMIO_PS_STATUS, 0x0002);
1574}
1575
1576static void bcm43xx_generate_beacon_template(struct bcm43xx_private *bcm,
1577 u16 ram_offset, u16 shm_size_offset)
1578{
1579 u32 value;
1580 u16 size = 0;
1581
1582 /* Timestamp. */
1583 //FIXME: assumption: The chip sets the timestamp
1584 value = 0;
1585 bcm43xx_ram_write(bcm, ram_offset++, value);
1586 bcm43xx_ram_write(bcm, ram_offset++, value);
1587 size += 8;
1588
1589 /* Beacon Interval / Capability Information */
1590 value = 0x0000;//FIXME: Which interval?
1591 value |= (1 << 0) << 16; /* ESS */
1592 value |= (1 << 2) << 16; /* CF Pollable */ //FIXME?
1593 value |= (1 << 3) << 16; /* CF Poll Request */ //FIXME?
1594 if (!bcm->ieee->open_wep)
1595 value |= (1 << 4) << 16; /* Privacy */
1596 bcm43xx_ram_write(bcm, ram_offset++, value);
1597 size += 4;
1598
1599 /* SSID */
1600 //TODO
1601
1602 /* FH Parameter Set */
1603 //TODO
1604
1605 /* DS Parameter Set */
1606 //TODO
1607
1608 /* CF Parameter Set */
1609 //TODO
1610
1611 /* TIM */
1612 //TODO
1613
1614 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, shm_size_offset, size);
1615}
1616
1617static void handle_irq_beacon(struct bcm43xx_private *bcm)
1618{
1619 u32 status;
1620
1621 bcm->irq_savedstate &= ~BCM43xx_IRQ_BEACON;
1622 status = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS2_BITFIELD);
1623
1624 if ((status & 0x1) && (status & 0x2)) {
1625 /* ACK beacon IRQ. */
1626 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON,
1627 BCM43xx_IRQ_BEACON);
1628 bcm->irq_savedstate |= BCM43xx_IRQ_BEACON;
1629 return;
1630 }
1631 if (!(status & 0x1)) {
1632 bcm43xx_generate_beacon_template(bcm, 0x68, 0x18);
1633 status |= 0x1;
1634 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS2_BITFIELD, status);
1635 }
1636 if (!(status & 0x2)) {
1637 bcm43xx_generate_beacon_template(bcm, 0x468, 0x1A);
1638 status |= 0x2;
1639 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS2_BITFIELD, status);
1640 }
1641}
1642
1643/* Interrupt handler bottom-half */
1644static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
1645{
1646 u32 reason;
1647 u32 dma_reason[6];
1648 u32 merged_dma_reason = 0;
1649 int i, activity = 0;
1650 unsigned long flags;
1651
1652#ifdef CONFIG_BCM43XX_DEBUG
1653 u32 _handled = 0x00000000;
1654# define bcmirq_handled(irq) do { _handled |= (irq); } while (0)
1655#else
1656# define bcmirq_handled(irq) do { /* nothing */ } while (0)
1657#endif /* CONFIG_BCM43XX_DEBUG*/
1658
1659 spin_lock_irqsave(&bcm->irq_lock, flags);
1660 reason = bcm->irq_reason;
1661 for (i = 5; i >= 0; i--) {
1662 dma_reason[i] = bcm->dma_reason[i];
1663 merged_dma_reason |= dma_reason[i];
1664 }
1665
1666 if (unlikely(reason & BCM43xx_IRQ_XMIT_ERROR)) {
1667 /* TX error. We get this when Template Ram is written in wrong endianess
1668 * in dummy_tx(). We also get this if something is wrong with the TX header
1669 * on DMA or PIO queues.
1670 * Maybe we get this in other error conditions, too.
1671 */
1672 printkl(KERN_ERR PFX "FATAL ERROR: BCM43xx_IRQ_XMIT_ERROR\n");
1673 bcmirq_handled(BCM43xx_IRQ_XMIT_ERROR);
1674 }
1675 if (unlikely(merged_dma_reason & BCM43xx_DMAIRQ_FATALMASK)) {
1676 printkl(KERN_ERR PFX "FATAL ERROR: Fatal DMA error: "
1677 "0x%08X, 0x%08X, 0x%08X, "
1678 "0x%08X, 0x%08X, 0x%08X\n",
1679 dma_reason[0], dma_reason[1],
1680 dma_reason[2], dma_reason[3],
1681 dma_reason[4], dma_reason[5]);
1682 bcm43xx_controller_restart(bcm, "DMA error");
1683 mmiowb();
1684 spin_unlock_irqrestore(&bcm->irq_lock, flags);
1685 return;
1686 }
1687 if (unlikely(merged_dma_reason & BCM43xx_DMAIRQ_NONFATALMASK)) {
1688 printkl(KERN_ERR PFX "DMA error: "
1689 "0x%08X, 0x%08X, 0x%08X, "
1690 "0x%08X, 0x%08X, 0x%08X\n",
1691 dma_reason[0], dma_reason[1],
1692 dma_reason[2], dma_reason[3],
1693 dma_reason[4], dma_reason[5]);
1694 }
1695
1696 if (reason & BCM43xx_IRQ_PS) {
1697 handle_irq_ps(bcm);
1698 bcmirq_handled(BCM43xx_IRQ_PS);
1699 }
1700
1701 if (reason & BCM43xx_IRQ_REG124) {
1702 handle_irq_reg124(bcm);
1703 bcmirq_handled(BCM43xx_IRQ_REG124);
1704 }
1705
1706 if (reason & BCM43xx_IRQ_BEACON) {
1707 if (bcm->ieee->iw_mode == IW_MODE_MASTER)
1708 handle_irq_beacon(bcm);
1709 bcmirq_handled(BCM43xx_IRQ_BEACON);
1710 }
1711
1712 if (reason & BCM43xx_IRQ_PMQ) {
1713 handle_irq_pmq(bcm);
1714 bcmirq_handled(BCM43xx_IRQ_PMQ);
1715 }
1716
1717 if (reason & BCM43xx_IRQ_SCAN) {
1718 /*TODO*/
1719 //bcmirq_handled(BCM43xx_IRQ_SCAN);
1720 }
1721
1722 if (reason & BCM43xx_IRQ_NOISE) {
1723 handle_irq_noise(bcm);
1724 bcmirq_handled(BCM43xx_IRQ_NOISE);
1725 }
1726
1727 /* Check the DMA reason registers for received data. */
1728 if (dma_reason[0] & BCM43xx_DMAIRQ_RX_DONE) {
1729 if (bcm43xx_using_pio(bcm))
1730 bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue0);
1731 else
1732 bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring0);
1733 /* We intentionally don't set "activity" to 1, here. */
1734 }
1735 assert(!(dma_reason[1] & BCM43xx_DMAIRQ_RX_DONE));
1736 assert(!(dma_reason[2] & BCM43xx_DMAIRQ_RX_DONE));
1737 if (dma_reason[3] & BCM43xx_DMAIRQ_RX_DONE) {
1738 if (bcm43xx_using_pio(bcm))
1739 bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue3);
1740 else
1741 bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring3);
1742 activity = 1;
1743 }
1744 assert(!(dma_reason[4] & BCM43xx_DMAIRQ_RX_DONE));
1745 assert(!(dma_reason[5] & BCM43xx_DMAIRQ_RX_DONE));
1746 bcmirq_handled(BCM43xx_IRQ_RX);
1747
1748 if (reason & BCM43xx_IRQ_XMIT_STATUS) {
1749 handle_irq_transmit_status(bcm);
1750 activity = 1;
1751 //TODO: In AP mode, this also causes sending of powersave responses.
1752 bcmirq_handled(BCM43xx_IRQ_XMIT_STATUS);
1753 }
1754
1755 /* IRQ_PIO_WORKAROUND is handled in the top-half. */
1756 bcmirq_handled(BCM43xx_IRQ_PIO_WORKAROUND);
1757#ifdef CONFIG_BCM43XX_DEBUG
1758 if (unlikely(reason & ~_handled)) {
1759 printkl(KERN_WARNING PFX
1760 "Unhandled IRQ! Reason: 0x%08x, Unhandled: 0x%08x, "
1761 "DMA: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
1762 reason, (reason & ~_handled),
1763 dma_reason[0], dma_reason[1],
1764 dma_reason[2], dma_reason[3]);
1765 }
1766#endif
1767#undef bcmirq_handled
1768
1769 if (!modparam_noleds)
1770 bcm43xx_leds_update(bcm, activity);
1771 bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate);
1772 mmiowb();
1773 spin_unlock_irqrestore(&bcm->irq_lock, flags);
1774}
1775
1776static void pio_irq_workaround(struct bcm43xx_private *bcm,
1777 u16 base, int queueidx)
1778{
1779 u16 rxctl;
1780
1781 rxctl = bcm43xx_read16(bcm, base + BCM43xx_PIO_RXCTL);
1782 if (rxctl & BCM43xx_PIO_RXCTL_DATAAVAILABLE)
1783 bcm->dma_reason[queueidx] |= BCM43xx_DMAIRQ_RX_DONE;
1784 else
1785 bcm->dma_reason[queueidx] &= ~BCM43xx_DMAIRQ_RX_DONE;
1786}
1787
1788static void bcm43xx_interrupt_ack(struct bcm43xx_private *bcm, u32 reason)
1789{
1790 if (bcm43xx_using_pio(bcm) &&
1791 (bcm->current_core->rev < 3) &&
1792 (!(reason & BCM43xx_IRQ_PIO_WORKAROUND))) {
1793 /* Apply a PIO specific workaround to the dma_reasons */
1794 pio_irq_workaround(bcm, BCM43xx_MMIO_PIO1_BASE, 0);
1795 pio_irq_workaround(bcm, BCM43xx_MMIO_PIO2_BASE, 1);
1796 pio_irq_workaround(bcm, BCM43xx_MMIO_PIO3_BASE, 2);
1797 pio_irq_workaround(bcm, BCM43xx_MMIO_PIO4_BASE, 3);
1798 }
1799
1800 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, reason);
1801
1802 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA0_REASON,
1803 bcm->dma_reason[0]);
1804 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_REASON,
1805 bcm->dma_reason[1]);
1806 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_REASON,
1807 bcm->dma_reason[2]);
1808 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_REASON,
1809 bcm->dma_reason[3]);
1810 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_REASON,
1811 bcm->dma_reason[4]);
1812 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA5_REASON,
1813 bcm->dma_reason[5]);
1814}
1815
1816/* Interrupt handler top-half */
1817static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id)
1818{
1819 irqreturn_t ret = IRQ_HANDLED;
1820 struct bcm43xx_private *bcm = dev_id;
1821 u32 reason;
1822
1823 if (!bcm)
1824 return IRQ_NONE;
1825
1826 spin_lock(&bcm->irq_lock);
1827
1828 reason = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
1829 if (reason == 0xffffffff) {
1830 /* irq not for us (shared irq) */
1831 ret = IRQ_NONE;
1832 goto out;
1833 }
1834 reason &= bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_MASK);
1835 if (!reason)
1836 goto out;
1837
1838 assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
1839 assert(bcm->current_core->id == BCM43xx_COREID_80211);
1840
1841 bcm->dma_reason[0] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA0_REASON)
1842 & 0x0001DC00;
1843 bcm->dma_reason[1] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA1_REASON)
1844 & 0x0000DC00;
1845 bcm->dma_reason[2] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA2_REASON)
1846 & 0x0000DC00;
1847 bcm->dma_reason[3] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA3_REASON)
1848 & 0x0001DC00;
1849 bcm->dma_reason[4] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA4_REASON)
1850 & 0x0000DC00;
1851 bcm->dma_reason[5] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA5_REASON)
1852 & 0x0000DC00;
1853
1854 bcm43xx_interrupt_ack(bcm, reason);
1855
1856 /* disable all IRQs. They are enabled again in the bottom half. */
1857 bcm->irq_savedstate = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
1858 /* save the reason code and call our bottom half. */
1859 bcm->irq_reason = reason;
1860 tasklet_schedule(&bcm->isr_tasklet);
1861
1862out:
1863 mmiowb();
1864 spin_unlock(&bcm->irq_lock);
1865
1866 return ret;
1867}
1868
1869static void bcm43xx_release_firmware(struct bcm43xx_private *bcm, int force)
1870{
1871 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1872
1873 if (bcm->firmware_norelease && !force)
1874 return; /* Suspending or controller reset. */
1875 release_firmware(phy->ucode);
1876 phy->ucode = NULL;
1877 release_firmware(phy->pcm);
1878 phy->pcm = NULL;
1879 release_firmware(phy->initvals0);
1880 phy->initvals0 = NULL;
1881 release_firmware(phy->initvals1);
1882 phy->initvals1 = NULL;
1883}
1884
1885static int bcm43xx_request_firmware(struct bcm43xx_private *bcm)
1886{
1887 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1888 u8 rev = bcm->current_core->rev;
1889 int err = 0;
1890 int nr;
1891 char buf[22 + sizeof(modparam_fwpostfix) - 1] = { 0 };
1892
1893 if (!phy->ucode) {
1894 snprintf(buf, ARRAY_SIZE(buf), "bcm43xx_microcode%d%s.fw",
1895 (rev >= 5 ? 5 : rev),
1896 modparam_fwpostfix);
1897 err = request_firmware(&phy->ucode, buf, &bcm->pci_dev->dev);
1898 if (err) {
1899 printk(KERN_ERR PFX
1900 "Error: Microcode \"%s\" not available or load failed.\n",
1901 buf);
1902 goto error;
1903 }
1904 }
1905
1906 if (!phy->pcm) {
1907 snprintf(buf, ARRAY_SIZE(buf),
1908 "bcm43xx_pcm%d%s.fw",
1909 (rev < 5 ? 4 : 5),
1910 modparam_fwpostfix);
1911 err = request_firmware(&phy->pcm, buf, &bcm->pci_dev->dev);
1912 if (err) {
1913 printk(KERN_ERR PFX
1914 "Error: PCM \"%s\" not available or load failed.\n",
1915 buf);
1916 goto error;
1917 }
1918 }
1919
1920 if (!phy->initvals0) {
1921 if (rev == 2 || rev == 4) {
1922 switch (phy->type) {
1923 case BCM43xx_PHYTYPE_A:
1924 nr = 3;
1925 break;
1926 case BCM43xx_PHYTYPE_B:
1927 case BCM43xx_PHYTYPE_G:
1928 nr = 1;
1929 break;
1930 default:
1931 goto err_noinitval;
1932 }
1933
1934 } else if (rev >= 5) {
1935 switch (phy->type) {
1936 case BCM43xx_PHYTYPE_A:
1937 nr = 7;
1938 break;
1939 case BCM43xx_PHYTYPE_B:
1940 case BCM43xx_PHYTYPE_G:
1941 nr = 5;
1942 break;
1943 default:
1944 goto err_noinitval;
1945 }
1946 } else
1947 goto err_noinitval;
1948 snprintf(buf, ARRAY_SIZE(buf), "bcm43xx_initval%02d%s.fw",
1949 nr, modparam_fwpostfix);
1950
1951 err = request_firmware(&phy->initvals0, buf, &bcm->pci_dev->dev);
1952 if (err) {
1953 printk(KERN_ERR PFX
1954 "Error: InitVals \"%s\" not available or load failed.\n",
1955 buf);
1956 goto error;
1957 }
1958 if (phy->initvals0->size % sizeof(struct bcm43xx_initval)) {
1959 printk(KERN_ERR PFX "InitVals fileformat error.\n");
1960 goto error;
1961 }
1962 }
1963
1964 if (!phy->initvals1) {
1965 if (rev >= 5) {
1966 u32 sbtmstatehigh;
1967
1968 switch (phy->type) {
1969 case BCM43xx_PHYTYPE_A:
1970 sbtmstatehigh = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATEHIGH);
1971 if (sbtmstatehigh & 0x00010000)
1972 nr = 9;
1973 else
1974 nr = 10;
1975 break;
1976 case BCM43xx_PHYTYPE_B:
1977 case BCM43xx_PHYTYPE_G:
1978 nr = 6;
1979 break;
1980 default:
1981 goto err_noinitval;
1982 }
1983 snprintf(buf, ARRAY_SIZE(buf), "bcm43xx_initval%02d%s.fw",
1984 nr, modparam_fwpostfix);
1985
1986 err = request_firmware(&phy->initvals1, buf, &bcm->pci_dev->dev);
1987 if (err) {
1988 printk(KERN_ERR PFX
1989 "Error: InitVals \"%s\" not available or load failed.\n",
1990 buf);
1991 goto error;
1992 }
1993 if (phy->initvals1->size % sizeof(struct bcm43xx_initval)) {
1994 printk(KERN_ERR PFX "InitVals fileformat error.\n");
1995 goto error;
1996 }
1997 }
1998 }
1999
2000out:
2001 return err;
2002error:
2003 bcm43xx_release_firmware(bcm, 1);
2004 goto out;
2005err_noinitval:
2006 printk(KERN_ERR PFX "Error: No InitVals available!\n");
2007 err = -ENOENT;
2008 goto error;
2009}
2010
2011static void bcm43xx_upload_microcode(struct bcm43xx_private *bcm)
2012{
2013 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
2014 const __be32 *data;
2015 unsigned int i, len;
2016
2017 /* Upload Microcode. */
2018 data = (__be32 *)(phy->ucode->data);
2019 len = phy->ucode->size / sizeof(u32);
2020 bcm43xx_shm_control_word(bcm, BCM43xx_SHM_UCODE, 0x0000);
2021 for (i = 0; i < len; i++) {
2022 bcm43xx_write32(bcm, BCM43xx_MMIO_SHM_DATA,
2023 be32_to_cpu(data[i]));
2024 udelay(10);
2025 }
2026
2027 /* Upload PCM data. */
2028 data = (__be32 *)(phy->pcm->data);
2029 len = phy->pcm->size / sizeof(u32);
2030 bcm43xx_shm_control_word(bcm, BCM43xx_SHM_PCM, 0x01ea);
2031 bcm43xx_write32(bcm, BCM43xx_MMIO_SHM_DATA, 0x00004000);
2032 bcm43xx_shm_control_word(bcm, BCM43xx_SHM_PCM, 0x01eb);
2033 for (i = 0; i < len; i++) {
2034 bcm43xx_write32(bcm, BCM43xx_MMIO_SHM_DATA,
2035 be32_to_cpu(data[i]));
2036 udelay(10);
2037 }
2038}
2039
2040static int bcm43xx_write_initvals(struct bcm43xx_private *bcm,
2041 const struct bcm43xx_initval *data,
2042 const unsigned int len)
2043{
2044 u16 offset, size;
2045 u32 value;
2046 unsigned int i;
2047
2048 for (i = 0; i < len; i++) {
2049 offset = be16_to_cpu(data[i].offset);
2050 size = be16_to_cpu(data[i].size);
2051 value = be32_to_cpu(data[i].value);
2052
2053 if (unlikely(offset >= 0x1000))
2054 goto err_format;
2055 if (size == 2) {
2056 if (unlikely(value & 0xFFFF0000))
2057 goto err_format;
2058 bcm43xx_write16(bcm, offset, (u16)value);
2059 } else if (size == 4) {
2060 bcm43xx_write32(bcm, offset, value);
2061 } else
2062 goto err_format;
2063 }
2064
2065 return 0;
2066
2067err_format:
2068 printk(KERN_ERR PFX "InitVals (bcm43xx_initvalXX.fw) file-format error. "
2069 "Please fix your bcm43xx firmware files.\n");
2070 return -EPROTO;
2071}
2072
2073static int bcm43xx_upload_initvals(struct bcm43xx_private *bcm)
2074{
2075 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
2076 int err;
2077
2078 err = bcm43xx_write_initvals(bcm, (struct bcm43xx_initval *)phy->initvals0->data,
2079 phy->initvals0->size / sizeof(struct bcm43xx_initval));
2080 if (err)
2081 goto out;
2082 if (phy->initvals1) {
2083 err = bcm43xx_write_initvals(bcm, (struct bcm43xx_initval *)phy->initvals1->data,
2084 phy->initvals1->size / sizeof(struct bcm43xx_initval));
2085 if (err)
2086 goto out;
2087 }
2088out:
2089 return err;
2090}
2091
2092static int bcm43xx_initialize_irq(struct bcm43xx_private *bcm)
2093{
2094 int err;
2095
2096 bcm->irq = bcm->pci_dev->irq;
2097 err = request_irq(bcm->irq, bcm43xx_interrupt_handler,
2098 IRQF_SHARED, KBUILD_MODNAME, bcm);
2099 if (err)
2100 printk(KERN_ERR PFX "Cannot register IRQ%d\n", bcm->irq);
2101
2102 return err;
2103}
2104
2105/* Switch to the core used to write the GPIO register.
2106 * This is either the ChipCommon, or the PCI core.
2107 */
2108static int switch_to_gpio_core(struct bcm43xx_private *bcm)
2109{
2110 int err;
2111
2112 /* Where to find the GPIO register depends on the chipset.
2113 * If it has a ChipCommon, its register at offset 0x6c is the GPIO
2114 * control register. Otherwise the register at offset 0x6c in the
2115 * PCI core is the GPIO control register.
2116 */
2117 err = bcm43xx_switch_core(bcm, &bcm->core_chipcommon);
2118 if (err == -ENODEV) {
2119 err = bcm43xx_switch_core(bcm, &bcm->core_pci);
2120 if (unlikely(err == -ENODEV)) {
2121 printk(KERN_ERR PFX "gpio error: "
2122 "Neither ChipCommon nor PCI core available!\n");
2123 }
2124 }
2125
2126 return err;
2127}
2128
2129/* Initialize the GPIOs
2130 * http://bcm-specs.sipsolutions.net/GPIO
2131 */
2132static int bcm43xx_gpio_init(struct bcm43xx_private *bcm)
2133{
2134 struct bcm43xx_coreinfo *old_core;
2135 int err;
2136 u32 mask, set;
2137
2138 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
2139 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD)
2140 & 0xFFFF3FFF);
2141
2142 bcm43xx_leds_switch_all(bcm, 0);
2143 bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_MASK,
2144 bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_MASK) | 0x000F);
2145
2146 mask = 0x0000001F;
2147 set = 0x0000000F;
2148 if (bcm->chip_id == 0x4301) {
2149 mask |= 0x0060;
2150 set |= 0x0060;
2151 }
2152 if (0 /* FIXME: conditional unknown */) {
2153 bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_MASK,
2154 bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_MASK)
2155 | 0x0100);
2156 mask |= 0x0180;
2157 set |= 0x0180;
2158 }
2159 if (bcm->sprom.boardflags & BCM43xx_BFL_PACTRL) {
2160 bcm43xx_write16(bcm, BCM43xx_MMIO_GPIO_MASK,
2161 bcm43xx_read16(bcm, BCM43xx_MMIO_GPIO_MASK)
2162 | 0x0200);
2163 mask |= 0x0200;
2164 set |= 0x0200;
2165 }
2166 if (bcm->current_core->rev >= 2)
2167 mask |= 0x0010; /* FIXME: This is redundant. */
2168
2169 old_core = bcm->current_core;
2170 err = switch_to_gpio_core(bcm);
2171 if (err)
2172 goto out;
2173 bcm43xx_write32(bcm, BCM43xx_GPIO_CONTROL,
2174 (bcm43xx_read32(bcm, BCM43xx_GPIO_CONTROL) & mask) | set);
2175 err = bcm43xx_switch_core(bcm, old_core);
2176out:
2177 return err;
2178}
2179
2180/* Turn off all GPIO stuff. Call this on module unload, for example. */
2181static int bcm43xx_gpio_cleanup(struct bcm43xx_private *bcm)
2182{
2183 struct bcm43xx_coreinfo *old_core;
2184 int err;
2185
2186 old_core = bcm->current_core;
2187 err = switch_to_gpio_core(bcm);
2188 if (err)
2189 return err;
2190 bcm43xx_write32(bcm, BCM43xx_GPIO_CONTROL, 0x00000000);
2191 err = bcm43xx_switch_core(bcm, old_core);
2192 assert(err == 0);
2193
2194 return 0;
2195}
2196
2197/* http://bcm-specs.sipsolutions.net/EnableMac */
2198void bcm43xx_mac_enable(struct bcm43xx_private *bcm)
2199{
2200 bcm->mac_suspended--;
2201 assert(bcm->mac_suspended >= 0);
2202 if (bcm->mac_suspended == 0) {
2203 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
2204 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD)
2205 | BCM43xx_SBF_MAC_ENABLED);
2206 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, BCM43xx_IRQ_READY);
2207 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); /* dummy read */
2208 bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); /* dummy read */
2209 bcm43xx_power_saving_ctl_bits(bcm, -1, -1);
2210 }
2211}
2212
2213/* http://bcm-specs.sipsolutions.net/SuspendMAC */
2214void bcm43xx_mac_suspend(struct bcm43xx_private *bcm)
2215{
2216 int i;
2217 u32 tmp;
2218
2219 assert(bcm->mac_suspended >= 0);
2220 if (bcm->mac_suspended == 0) {
2221 bcm43xx_power_saving_ctl_bits(bcm, -1, 1);
2222 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
2223 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD)
2224 & ~BCM43xx_SBF_MAC_ENABLED);
2225 bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); /* dummy read */
2226 for (i = 10000; i; i--) {
2227 tmp = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
2228 if (tmp & BCM43xx_IRQ_READY)
2229 goto out;
2230 udelay(1);
2231 }
2232 printkl(KERN_ERR PFX "MAC suspend failed\n");
2233 }
2234out:
2235 bcm->mac_suspended++;
2236}
2237
2238void bcm43xx_set_iwmode(struct bcm43xx_private *bcm,
2239 int iw_mode)
2240{
2241 unsigned long flags;
2242 struct net_device *net_dev = bcm->net_dev;
2243 u32 status;
2244 u16 value;
2245
2246 spin_lock_irqsave(&bcm->ieee->lock, flags);
2247 bcm->ieee->iw_mode = iw_mode;
2248 spin_unlock_irqrestore(&bcm->ieee->lock, flags);
2249 if (iw_mode == IW_MODE_MONITOR)
2250 net_dev->type = ARPHRD_IEEE80211;
2251 else
2252 net_dev->type = ARPHRD_ETHER;
2253
2254 status = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD);
2255 /* Reset status to infrastructured mode */
2256 status &= ~(BCM43xx_SBF_MODE_AP | BCM43xx_SBF_MODE_MONITOR);
2257 status &= ~BCM43xx_SBF_MODE_PROMISC;
2258 status |= BCM43xx_SBF_MODE_NOTADHOC;
2259
2260/* FIXME: Always enable promisc mode, until we get the MAC filters working correctly. */
2261status |= BCM43xx_SBF_MODE_PROMISC;
2262
2263 switch (iw_mode) {
2264 case IW_MODE_MONITOR:
2265 status |= BCM43xx_SBF_MODE_MONITOR;
2266 status |= BCM43xx_SBF_MODE_PROMISC;
2267 break;
2268 case IW_MODE_ADHOC:
2269 status &= ~BCM43xx_SBF_MODE_NOTADHOC;
2270 break;
2271 case IW_MODE_MASTER:
2272 status |= BCM43xx_SBF_MODE_AP;
2273 break;
2274 case IW_MODE_SECOND:
2275 case IW_MODE_REPEAT:
2276 TODO(); /* TODO */
2277 break;
2278 case IW_MODE_INFRA:
2279 /* nothing to be done here... */
2280 break;
2281 default:
2282 dprintk(KERN_ERR PFX "Unknown mode in set_iwmode: %d\n", iw_mode);
2283 }
2284 if (net_dev->flags & IFF_PROMISC)
2285 status |= BCM43xx_SBF_MODE_PROMISC;
2286 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, status);
2287
2288 value = 0x0002;
2289 if (iw_mode != IW_MODE_ADHOC && iw_mode != IW_MODE_MASTER) {
2290 if (bcm->chip_id == 0x4306 && bcm->chip_rev == 3)
2291 value = 0x0064;
2292 else
2293 value = 0x0032;
2294 }
2295 bcm43xx_write16(bcm, 0x0612, value);
2296}
2297
2298/* This is the opposite of bcm43xx_chip_init() */
2299static void bcm43xx_chip_cleanup(struct bcm43xx_private *bcm)
2300{
2301 bcm43xx_radio_turn_off(bcm);
2302 if (!modparam_noleds)
2303 bcm43xx_leds_exit(bcm);
2304 bcm43xx_gpio_cleanup(bcm);
2305 bcm43xx_release_firmware(bcm, 0);
2306}
2307
2308/* Initialize the chip
2309 * http://bcm-specs.sipsolutions.net/ChipInit
2310 */
2311static int bcm43xx_chip_init(struct bcm43xx_private *bcm)
2312{
2313 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
2314 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
2315 int err;
2316 int i, tmp;
2317 u32 value32;
2318 u16 value16;
2319
2320 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
2321 BCM43xx_SBF_CORE_READY
2322 | BCM43xx_SBF_400);
2323
2324 err = bcm43xx_request_firmware(bcm);
2325 if (err)
2326 goto out;
2327 bcm43xx_upload_microcode(bcm);
2328
2329 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, 0xFFFFFFFF);
2330 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, 0x00020402);
2331 i = 0;
2332 while (1) {
2333 value32 = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
2334 if (value32 == BCM43xx_IRQ_READY)
2335 break;
2336 i++;
2337 if (i >= BCM43xx_IRQWAIT_MAX_RETRIES) {
2338 printk(KERN_ERR PFX "IRQ_READY timeout\n");
2339 err = -ENODEV;
2340 goto err_release_fw;
2341 }
2342 udelay(10);
2343 }
2344 bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); /* dummy read */
2345
2346 value16 = bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
2347 BCM43xx_UCODE_REVISION);
2348
2349 dprintk(KERN_INFO PFX "Microcode rev 0x%x, pl 0x%x "
2350 "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n", value16,
2351 bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
2352 BCM43xx_UCODE_PATCHLEVEL),
2353 (bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
2354 BCM43xx_UCODE_DATE) >> 12) & 0xf,
2355 (bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
2356 BCM43xx_UCODE_DATE) >> 8) & 0xf,
2357 bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
2358 BCM43xx_UCODE_DATE) & 0xff,
2359 (bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
2360 BCM43xx_UCODE_TIME) >> 11) & 0x1f,
2361 (bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
2362 BCM43xx_UCODE_TIME) >> 5) & 0x3f,
2363 bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
2364 BCM43xx_UCODE_TIME) & 0x1f);
2365
2366 if ( value16 > 0x128 ) {
2367 printk(KERN_ERR PFX
2368 "Firmware: no support for microcode extracted "
2369 "from version 4.x binary drivers.\n");
2370 err = -EOPNOTSUPP;
2371 goto err_release_fw;
2372 }
2373
2374 err = bcm43xx_gpio_init(bcm);
2375 if (err)
2376 goto err_release_fw;
2377
2378 err = bcm43xx_upload_initvals(bcm);
2379 if (err)
2380 goto err_gpio_cleanup;
2381 bcm43xx_radio_turn_on(bcm);
2382 bcm->radio_hw_enable = bcm43xx_is_hw_radio_enabled(bcm);
2383 printk(KERN_INFO PFX "Radio %s by hardware\n",
2384 (bcm->radio_hw_enable == 0) ? "disabled" : "enabled");
2385
2386 bcm43xx_write16(bcm, 0x03E6, 0x0000);
2387 err = bcm43xx_phy_init(bcm);
2388 if (err)
2389 goto err_radio_off;
2390
2391 /* Select initial Interference Mitigation. */
2392 tmp = radio->interfmode;
2393 radio->interfmode = BCM43xx_RADIO_INTERFMODE_NONE;
2394 bcm43xx_radio_set_interference_mitigation(bcm, tmp);
2395
2396 bcm43xx_phy_set_antenna_diversity(bcm);
2397 bcm43xx_radio_set_txantenna(bcm, BCM43xx_RADIO_TXANTENNA_DEFAULT);
2398 if (phy->type == BCM43xx_PHYTYPE_B) {
2399 value16 = bcm43xx_read16(bcm, 0x005E);
2400 value16 |= 0x0004;
2401 bcm43xx_write16(bcm, 0x005E, value16);
2402 }
2403 bcm43xx_write32(bcm, 0x0100, 0x01000000);
2404 if (bcm->current_core->rev < 5)
2405 bcm43xx_write32(bcm, 0x010C, 0x01000000);
2406
2407 value32 = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD);
2408 value32 &= ~ BCM43xx_SBF_MODE_NOTADHOC;
2409 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, value32);
2410 value32 = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD);
2411 value32 |= BCM43xx_SBF_MODE_NOTADHOC;
2412 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, value32);
2413
2414 value32 = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD);
2415 value32 |= 0x100000;
2416 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, value32);
2417
2418 if (bcm43xx_using_pio(bcm)) {
2419 bcm43xx_write32(bcm, 0x0210, 0x00000100);
2420 bcm43xx_write32(bcm, 0x0230, 0x00000100);
2421 bcm43xx_write32(bcm, 0x0250, 0x00000100);
2422 bcm43xx_write32(bcm, 0x0270, 0x00000100);
2423 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0034, 0x0000);
2424 }
2425
2426 /* Probe Response Timeout value */
2427 /* FIXME: Default to 0, has to be set by ioctl probably... :-/ */
2428 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0074, 0x0000);
2429
2430 /* Initially set the wireless operation mode. */
2431 bcm43xx_set_iwmode(bcm, bcm->ieee->iw_mode);
2432
2433 if (bcm->current_core->rev < 3) {
2434 bcm43xx_write16(bcm, 0x060E, 0x0000);
2435 bcm43xx_write16(bcm, 0x0610, 0x8000);
2436 bcm43xx_write16(bcm, 0x0604, 0x0000);
2437 bcm43xx_write16(bcm, 0x0606, 0x0200);
2438 } else {
2439 bcm43xx_write32(bcm, 0x0188, 0x80000000);
2440 bcm43xx_write32(bcm, 0x018C, 0x02000000);
2441 }
2442 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, 0x00004000);
2443 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA0_IRQ_MASK, 0x0001DC00);
2444 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
2445 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
2446 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
2447 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_IRQ_MASK, 0x0000DC00);
2448 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA5_IRQ_MASK, 0x0000DC00);
2449
2450 value32 = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
2451 value32 |= 0x00100000;
2452 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, value32);
2453
2454 bcm43xx_write16(bcm, BCM43xx_MMIO_POWERUP_DELAY, bcm43xx_pctl_powerup_delay(bcm));
2455
2456 assert(err == 0);
2457 dprintk(KERN_INFO PFX "Chip initialized\n");
2458out:
2459 return err;
2460
2461err_radio_off:
2462 bcm43xx_radio_turn_off(bcm);
2463err_gpio_cleanup:
2464 bcm43xx_gpio_cleanup(bcm);
2465err_release_fw:
2466 bcm43xx_release_firmware(bcm, 1);
2467 goto out;
2468}
2469
2470/* Validate chip access
2471 * http://bcm-specs.sipsolutions.net/ValidateChipAccess */
2472static int bcm43xx_validate_chip(struct bcm43xx_private *bcm)
2473{
2474 u32 value;
2475 u32 shm_backup;
2476
2477 shm_backup = bcm43xx_shm_read32(bcm, BCM43xx_SHM_SHARED, 0x0000);
2478 bcm43xx_shm_write32(bcm, BCM43xx_SHM_SHARED, 0x0000, 0xAA5555AA);
2479 if (bcm43xx_shm_read32(bcm, BCM43xx_SHM_SHARED, 0x0000) != 0xAA5555AA)
2480 goto error;
2481 bcm43xx_shm_write32(bcm, BCM43xx_SHM_SHARED, 0x0000, 0x55AAAA55);
2482 if (bcm43xx_shm_read32(bcm, BCM43xx_SHM_SHARED, 0x0000) != 0x55AAAA55)
2483 goto error;
2484 bcm43xx_shm_write32(bcm, BCM43xx_SHM_SHARED, 0x0000, shm_backup);
2485
2486 value = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD);
2487 if ((value | 0x80000000) != 0x80000400)
2488 goto error;
2489
2490 value = bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON);
2491 if (value != 0x00000000)
2492 goto error;
2493
2494 return 0;
2495error:
2496 printk(KERN_ERR PFX "Failed to validate the chipaccess\n");
2497 return -ENODEV;
2498}
2499
2500static void bcm43xx_init_struct_phyinfo(struct bcm43xx_phyinfo *phy)
2501{
2502 /* Initialize a "phyinfo" structure. The structure is already
2503 * zeroed out.
2504 * This is called on insmod time to initialize members.
2505 */
2506 phy->savedpctlreg = 0xFFFF;
2507 spin_lock_init(&phy->lock);
2508}
2509
2510static void bcm43xx_init_struct_radioinfo(struct bcm43xx_radioinfo *radio)
2511{
2512 /* Initialize a "radioinfo" structure. The structure is already
2513 * zeroed out.
2514 * This is called on insmod time to initialize members.
2515 */
2516 radio->interfmode = BCM43xx_RADIO_INTERFMODE_NONE;
2517 radio->channel = 0xFF;
2518 radio->initial_channel = 0xFF;
2519}
2520
2521static int bcm43xx_probe_cores(struct bcm43xx_private *bcm)
2522{
2523 int err, i;
2524 int current_core;
2525 u32 core_vendor, core_id, core_rev;
2526 u32 sb_id_hi, chip_id_32 = 0;
2527 u16 pci_device, chip_id_16;
2528 u8 core_count;
2529
2530 memset(&bcm->core_chipcommon, 0, sizeof(struct bcm43xx_coreinfo));
2531 memset(&bcm->core_pci, 0, sizeof(struct bcm43xx_coreinfo));
2532 memset(&bcm->core_80211, 0, sizeof(struct bcm43xx_coreinfo)
2533 * BCM43xx_MAX_80211_CORES);
2534 memset(&bcm->core_80211_ext, 0, sizeof(struct bcm43xx_coreinfo_80211)
2535 * BCM43xx_MAX_80211_CORES);
2536 bcm->nr_80211_available = 0;
2537 bcm->current_core = NULL;
2538 bcm->active_80211_core = NULL;
2539
2540 /* map core 0 */
2541 err = _switch_core(bcm, 0);
2542 if (err)
2543 goto out;
2544
2545 /* fetch sb_id_hi from core information registers */
2546 sb_id_hi = bcm43xx_read32(bcm, BCM43xx_CIR_SB_ID_HI);
2547
2548 core_id = (sb_id_hi & 0x8FF0) >> 4;
2549 core_rev = (sb_id_hi & 0x7000) >> 8;
2550 core_rev |= (sb_id_hi & 0xF);
2551 core_vendor = (sb_id_hi & 0xFFFF0000) >> 16;
2552
2553 /* if present, chipcommon is always core 0; read the chipid from it */
2554 if (core_id == BCM43xx_COREID_CHIPCOMMON) {
2555 chip_id_32 = bcm43xx_read32(bcm, 0);
2556 chip_id_16 = chip_id_32 & 0xFFFF;
2557 bcm->core_chipcommon.available = 1;
2558 bcm->core_chipcommon.id = core_id;
2559 bcm->core_chipcommon.rev = core_rev;
2560 bcm->core_chipcommon.index = 0;
2561 /* While we are at it, also read the capabilities. */
2562 bcm->chipcommon_capabilities = bcm43xx_read32(bcm, BCM43xx_CHIPCOMMON_CAPABILITIES);
2563 } else {
2564 /* without a chipCommon, use a hard coded table. */
2565 pci_device = bcm->pci_dev->device;
2566 if (pci_device == 0x4301)
2567 chip_id_16 = 0x4301;
2568 else if ((pci_device >= 0x4305) && (pci_device <= 0x4307))
2569 chip_id_16 = 0x4307;
2570 else if ((pci_device >= 0x4402) && (pci_device <= 0x4403))
2571 chip_id_16 = 0x4402;
2572 else if ((pci_device >= 0x4610) && (pci_device <= 0x4615))
2573 chip_id_16 = 0x4610;
2574 else if ((pci_device >= 0x4710) && (pci_device <= 0x4715))
2575 chip_id_16 = 0x4710;
2576 else {
2577 printk(KERN_ERR PFX "Could not determine Chip ID\n");
2578 return -ENODEV;
2579 }
2580 }
2581
2582 /* ChipCommon with Core Rev >=4 encodes number of cores,
2583 * otherwise consult hardcoded table */
2584 if ((core_id == BCM43xx_COREID_CHIPCOMMON) && (core_rev >= 4)) {
2585 core_count = (chip_id_32 & 0x0F000000) >> 24;
2586 } else {
2587 switch (chip_id_16) {
2588 case 0x4610:
2589 case 0x4704:
2590 case 0x4710:
2591 core_count = 9;
2592 break;
2593 case 0x4310:
2594 core_count = 8;
2595 break;
2596 case 0x5365:
2597 core_count = 7;
2598 break;
2599 case 0x4306:
2600 core_count = 6;
2601 break;
2602 case 0x4301:
2603 case 0x4307:
2604 core_count = 5;
2605 break;
2606 case 0x4402:
2607 core_count = 3;
2608 break;
2609 default:
2610 /* SOL if we get here */
2611 assert(0);
2612 core_count = 1;
2613 }
2614 }
2615
2616 bcm->chip_id = chip_id_16;
2617 bcm->chip_rev = (chip_id_32 & 0x000F0000) >> 16;
2618 bcm->chip_package = (chip_id_32 & 0x00F00000) >> 20;
2619
2620 dprintk(KERN_INFO PFX "Chip ID 0x%x, rev 0x%x\n",
2621 bcm->chip_id, bcm->chip_rev);
2622 dprintk(KERN_INFO PFX "Number of cores: %d\n", core_count);
2623 if (bcm->core_chipcommon.available) {
2624 dprintk(KERN_INFO PFX "Core 0: ID 0x%x, rev 0x%x, vendor 0x%x\n",
2625 core_id, core_rev, core_vendor);
2626 current_core = 1;
2627 } else
2628 current_core = 0;
2629 for ( ; current_core < core_count; current_core++) {
2630 struct bcm43xx_coreinfo *core;
2631 struct bcm43xx_coreinfo_80211 *ext_80211;
2632
2633 err = _switch_core(bcm, current_core);
2634 if (err)
2635 goto out;
2636 /* Gather information */
2637 /* fetch sb_id_hi from core information registers */
2638 sb_id_hi = bcm43xx_read32(bcm, BCM43xx_CIR_SB_ID_HI);
2639
2640 /* extract core_id, core_rev, core_vendor */
2641 core_id = (sb_id_hi & 0x8FF0) >> 4;
2642 core_rev = ((sb_id_hi & 0xF) | ((sb_id_hi & 0x7000) >> 8));
2643 core_vendor = (sb_id_hi & 0xFFFF0000) >> 16;
2644
2645 dprintk(KERN_INFO PFX "Core %d: ID 0x%x, rev 0x%x, vendor 0x%x\n",
2646 current_core, core_id, core_rev, core_vendor);
2647
2648 core = NULL;
2649 switch (core_id) {
2650 case BCM43xx_COREID_PCI:
2651 case BCM43xx_COREID_PCIE:
2652 core = &bcm->core_pci;
2653 if (core->available) {
2654 printk(KERN_WARNING PFX "Multiple PCI cores found.\n");
2655 continue;
2656 }
2657 break;
2658 case BCM43xx_COREID_80211:
2659 for (i = 0; i < BCM43xx_MAX_80211_CORES; i++) {
2660 core = &(bcm->core_80211[i]);
2661 ext_80211 = &(bcm->core_80211_ext[i]);
2662 if (!core->available)
2663 break;
2664 core = NULL;
2665 }
2666 if (!core) {
2667 printk(KERN_WARNING PFX "More than %d cores of type 802.11 found.\n",
2668 BCM43xx_MAX_80211_CORES);
2669 continue;
2670 }
2671 if (i != 0) {
2672 /* More than one 80211 core is only supported
2673 * by special chips.
2674 * There are chips with two 80211 cores, but with
2675 * dangling pins on the second core. Be careful
2676 * and ignore these cores here.
2677 */
2678 if (1 /*bcm->pci_dev->device != 0x4324*/ ) {
2679 /* TODO: A PHY */
2680 dprintk(KERN_INFO PFX "Ignoring additional 802.11a core.\n");
2681 continue;
2682 }
2683 }
2684 switch (core_rev) {
2685 case 2:
2686 case 4:
2687 case 5:
2688 case 6:
2689 case 7:
2690 case 9:
2691 case 10:
2692 break;
2693 default:
2694 printk(KERN_WARNING PFX
2695 "Unsupported 80211 core revision %u\n",
2696 core_rev);
2697 }
2698 bcm->nr_80211_available++;
2699 core->priv = ext_80211;
2700 bcm43xx_init_struct_phyinfo(&ext_80211->phy);
2701 bcm43xx_init_struct_radioinfo(&ext_80211->radio);
2702 break;
2703 case BCM43xx_COREID_CHIPCOMMON:
2704 printk(KERN_WARNING PFX "Multiple CHIPCOMMON cores found.\n");
2705 break;
2706 }
2707 if (core) {
2708 core->available = 1;
2709 core->id = core_id;
2710 core->rev = core_rev;
2711 core->index = current_core;
2712 }
2713 }
2714
2715 if (!bcm->core_80211[0].available) {
2716 printk(KERN_ERR PFX "Error: No 80211 core found!\n");
2717 err = -ENODEV;
2718 goto out;
2719 }
2720
2721 err = bcm43xx_switch_core(bcm, &bcm->core_80211[0]);
2722
2723 assert(err == 0);
2724out:
2725 return err;
2726}
2727
2728static void bcm43xx_gen_bssid(struct bcm43xx_private *bcm)
2729{
2730 const u8 *mac = (const u8*)(bcm->net_dev->dev_addr);
2731 u8 *bssid = bcm->ieee->bssid;
2732
2733 switch (bcm->ieee->iw_mode) {
2734 case IW_MODE_ADHOC:
2735 random_ether_addr(bssid);
2736 break;
2737 case IW_MODE_MASTER:
2738 case IW_MODE_INFRA:
2739 case IW_MODE_REPEAT:
2740 case IW_MODE_SECOND:
2741 case IW_MODE_MONITOR:
2742 memcpy(bssid, mac, ETH_ALEN);
2743 break;
2744 default:
2745 assert(0);
2746 }
2747}
2748
2749static void bcm43xx_rate_memory_write(struct bcm43xx_private *bcm,
2750 u16 rate,
2751 int is_ofdm)
2752{
2753 u16 offset;
2754
2755 if (is_ofdm) {
2756 offset = 0x480;
2757 offset += (bcm43xx_plcp_get_ratecode_ofdm(rate) & 0x000F) * 2;
2758 }
2759 else {
2760 offset = 0x4C0;
2761 offset += (bcm43xx_plcp_get_ratecode_cck(rate) & 0x000F) * 2;
2762 }
2763 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, offset + 0x20,
2764 bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED, offset));
2765}
2766
2767static void bcm43xx_rate_memory_init(struct bcm43xx_private *bcm)
2768{
2769 switch (bcm43xx_current_phy(bcm)->type) {
2770 case BCM43xx_PHYTYPE_A:
2771 case BCM43xx_PHYTYPE_G:
2772 bcm43xx_rate_memory_write(bcm, IEEE80211_OFDM_RATE_6MB, 1);
2773 bcm43xx_rate_memory_write(bcm, IEEE80211_OFDM_RATE_12MB, 1);
2774 bcm43xx_rate_memory_write(bcm, IEEE80211_OFDM_RATE_18MB, 1);
2775 bcm43xx_rate_memory_write(bcm, IEEE80211_OFDM_RATE_24MB, 1);
2776 bcm43xx_rate_memory_write(bcm, IEEE80211_OFDM_RATE_36MB, 1);
2777 bcm43xx_rate_memory_write(bcm, IEEE80211_OFDM_RATE_48MB, 1);
2778 bcm43xx_rate_memory_write(bcm, IEEE80211_OFDM_RATE_54MB, 1);
2779 case BCM43xx_PHYTYPE_B:
2780 bcm43xx_rate_memory_write(bcm, IEEE80211_CCK_RATE_1MB, 0);
2781 bcm43xx_rate_memory_write(bcm, IEEE80211_CCK_RATE_2MB, 0);
2782 bcm43xx_rate_memory_write(bcm, IEEE80211_CCK_RATE_5MB, 0);
2783 bcm43xx_rate_memory_write(bcm, IEEE80211_CCK_RATE_11MB, 0);
2784 break;
2785 default:
2786 assert(0);
2787 }
2788}
2789
2790static void bcm43xx_wireless_core_cleanup(struct bcm43xx_private *bcm)
2791{
2792 bcm43xx_chip_cleanup(bcm);
2793 bcm43xx_pio_free(bcm);
2794 bcm43xx_dma_free(bcm);
2795
2796 bcm->current_core->initialized = 0;
2797}
2798
2799/* http://bcm-specs.sipsolutions.net/80211Init */
2800static int bcm43xx_wireless_core_init(struct bcm43xx_private *bcm,
2801 int active_wlcore)
2802{
2803 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
2804 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
2805 u32 ucodeflags;
2806 int err;
2807 u32 sbimconfiglow;
2808 u8 limit;
2809
2810 if (bcm->core_pci.rev <= 5 && bcm->core_pci.id != BCM43xx_COREID_PCIE) {
2811 sbimconfiglow = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW);
2812 sbimconfiglow &= ~ BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK;
2813 sbimconfiglow &= ~ BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK;
2814 if (bcm->bustype == BCM43xx_BUSTYPE_PCI)
2815 sbimconfiglow |= 0x32;
2816 else
2817 sbimconfiglow |= 0x53;
2818 bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, sbimconfiglow);
2819 }
2820
2821 bcm43xx_phy_calibrate(bcm);
2822 err = bcm43xx_chip_init(bcm);
2823 if (err)
2824 goto out;
2825
2826 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0016, bcm->current_core->rev);
2827 ucodeflags = bcm43xx_shm_read32(bcm, BCM43xx_SHM_SHARED, BCM43xx_UCODEFLAGS_OFFSET);
2828
2829 if (0 /*FIXME: which condition has to be used here? */)
2830 ucodeflags |= 0x00000010;
2831
2832 /* HW decryption needs to be set now */
2833 ucodeflags |= 0x40000000;
2834
2835 if (phy->type == BCM43xx_PHYTYPE_G) {
2836 ucodeflags |= BCM43xx_UCODEFLAG_UNKBGPHY;
2837 if (phy->rev == 1)
2838 ucodeflags |= BCM43xx_UCODEFLAG_UNKGPHY;
2839 if (bcm->sprom.boardflags & BCM43xx_BFL_PACTRL)
2840 ucodeflags |= BCM43xx_UCODEFLAG_UNKPACTRL;
2841 } else if (phy->type == BCM43xx_PHYTYPE_B) {
2842 ucodeflags |= BCM43xx_UCODEFLAG_UNKBGPHY;
2843 if (phy->rev >= 2 && radio->version == 0x2050)
2844 ucodeflags &= ~BCM43xx_UCODEFLAG_UNKGPHY;
2845 }
2846
2847 if (ucodeflags != bcm43xx_shm_read32(bcm, BCM43xx_SHM_SHARED,
2848 BCM43xx_UCODEFLAGS_OFFSET)) {
2849 bcm43xx_shm_write32(bcm, BCM43xx_SHM_SHARED,
2850 BCM43xx_UCODEFLAGS_OFFSET, ucodeflags);
2851 }
2852
2853 /* Short/Long Retry Limit.
2854 * The retry-limit is a 4-bit counter. Enforce this to avoid overflowing
2855 * the chip-internal counter.
2856 */
2857 limit = limit_value(modparam_short_retry, 0, 0xF);
2858 bcm43xx_shm_write32(bcm, BCM43xx_SHM_WIRELESS, 0x0006, limit);
2859 limit = limit_value(modparam_long_retry, 0, 0xF);
2860 bcm43xx_shm_write32(bcm, BCM43xx_SHM_WIRELESS, 0x0007, limit);
2861
2862 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0044, 3);
2863 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0046, 2);
2864
2865 bcm43xx_rate_memory_init(bcm);
2866
2867 /* Minimum Contention Window */
2868 if (phy->type == BCM43xx_PHYTYPE_B)
2869 bcm43xx_shm_write32(bcm, BCM43xx_SHM_WIRELESS, 0x0003, 0x0000001f);
2870 else
2871 bcm43xx_shm_write32(bcm, BCM43xx_SHM_WIRELESS, 0x0003, 0x0000000f);
2872 /* Maximum Contention Window */
2873 bcm43xx_shm_write32(bcm, BCM43xx_SHM_WIRELESS, 0x0004, 0x000003ff);
2874
2875 bcm43xx_gen_bssid(bcm);
2876 bcm43xx_write_mac_bssid_templates(bcm);
2877
2878 if (bcm->current_core->rev >= 5)
2879 bcm43xx_write16(bcm, 0x043C, 0x000C);
2880
2881 if (active_wlcore) {
2882 if (bcm43xx_using_pio(bcm)) {
2883 err = bcm43xx_pio_init(bcm);
2884 } else {
2885 err = bcm43xx_dma_init(bcm);
2886 if (err == -ENOSYS)
2887 err = bcm43xx_pio_init(bcm);
2888 }
2889 if (err)
2890 goto err_chip_cleanup;
2891 }
2892 bcm43xx_write16(bcm, 0x0612, 0x0050);
2893 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0416, 0x0050);
2894 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0414, 0x01F4);
2895
2896 if (active_wlcore) {
2897 if (radio->initial_channel != 0xFF)
2898 bcm43xx_radio_selectchannel(bcm, radio->initial_channel, 0);
2899 }
2900
2901 /* Don't enable MAC/IRQ here, as it will race with the IRQ handler.
2902 * We enable it later.
2903 */
2904 bcm->current_core->initialized = 1;
2905out:
2906 return err;
2907
2908err_chip_cleanup:
2909 bcm43xx_chip_cleanup(bcm);
2910 goto out;
2911}
2912
2913static int bcm43xx_chipset_attach(struct bcm43xx_private *bcm)
2914{
2915 int err;
2916 u16 pci_status;
2917
2918 err = bcm43xx_pctl_set_crystal(bcm, 1);
2919 if (err)
2920 goto out;
2921 err = bcm43xx_pci_read_config16(bcm, PCI_STATUS, &pci_status);
2922 if (err)
2923 goto out;
2924 err = bcm43xx_pci_write_config16(bcm, PCI_STATUS, pci_status & ~PCI_STATUS_SIG_TARGET_ABORT);
2925
2926out:
2927 return err;
2928}
2929
2930static void bcm43xx_chipset_detach(struct bcm43xx_private *bcm)
2931{
2932 bcm43xx_pctl_set_clock(bcm, BCM43xx_PCTL_CLK_SLOW);
2933 bcm43xx_pctl_set_crystal(bcm, 0);
2934}
2935
2936static void bcm43xx_pcicore_broadcast_value(struct bcm43xx_private *bcm,
2937 u32 address,
2938 u32 data)
2939{
2940 bcm43xx_write32(bcm, BCM43xx_PCICORE_BCAST_ADDR, address);
2941 bcm43xx_write32(bcm, BCM43xx_PCICORE_BCAST_DATA, data);
2942}
2943
2944static int bcm43xx_pcicore_commit_settings(struct bcm43xx_private *bcm)
2945{
2946 int err = 0;
2947
2948 bcm->irq_savedstate = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
2949
2950 if (bcm->core_chipcommon.available) {
2951 err = bcm43xx_switch_core(bcm, &bcm->core_chipcommon);
2952 if (err)
2953 goto out;
2954
2955 bcm43xx_pcicore_broadcast_value(bcm, 0xfd8, 0x00000000);
2956
2957 /* this function is always called when a PCI core is mapped */
2958 err = bcm43xx_switch_core(bcm, &bcm->core_pci);
2959 if (err)
2960 goto out;
2961 } else
2962 bcm43xx_pcicore_broadcast_value(bcm, 0xfd8, 0x00000000);
2963
2964 bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate);
2965
2966out:
2967 return err;
2968}
2969
2970static u32 bcm43xx_pcie_reg_read(struct bcm43xx_private *bcm, u32 address)
2971{
2972 bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_ADDR, address);
2973 return bcm43xx_read32(bcm, BCM43xx_PCIECORE_REG_DATA);
2974}
2975
2976static void bcm43xx_pcie_reg_write(struct bcm43xx_private *bcm, u32 address,
2977 u32 data)
2978{
2979 bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_ADDR, address);
2980 bcm43xx_write32(bcm, BCM43xx_PCIECORE_REG_DATA, data);
2981}
2982
2983static void bcm43xx_pcie_mdio_write(struct bcm43xx_private *bcm, u8 dev, u8 reg,
2984 u16 data)
2985{
2986 int i;
2987
2988 bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_CTL, 0x0082);
2989 bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_DATA, BCM43xx_PCIE_MDIO_ST |
2990 BCM43xx_PCIE_MDIO_WT | (dev << BCM43xx_PCIE_MDIO_DEV) |
2991 (reg << BCM43xx_PCIE_MDIO_REG) | BCM43xx_PCIE_MDIO_TA |
2992 data);
2993 udelay(10);
2994
2995 for (i = 0; i < 10; i++) {
2996 if (bcm43xx_read32(bcm, BCM43xx_PCIECORE_MDIO_CTL) &
2997 BCM43xx_PCIE_MDIO_TC)
2998 break;
2999 msleep(1);
3000 }
3001 bcm43xx_write32(bcm, BCM43xx_PCIECORE_MDIO_CTL, 0);
3002}
3003
3004/* Make an I/O Core usable. "core_mask" is the bitmask of the cores to enable.
3005 * To enable core 0, pass a core_mask of 1<<0
3006 */
3007static int bcm43xx_setup_backplane_pci_connection(struct bcm43xx_private *bcm,
3008 u32 core_mask)
3009{
3010 u32 backplane_flag_nr;
3011 u32 value;
3012 struct bcm43xx_coreinfo *old_core;
3013 int err = 0;
3014
3015 value = bcm43xx_read32(bcm, BCM43xx_CIR_SBTPSFLAG);
3016 backplane_flag_nr = value & BCM43xx_BACKPLANE_FLAG_NR_MASK;
3017
3018 old_core = bcm->current_core;
3019 err = bcm43xx_switch_core(bcm, &bcm->core_pci);
3020 if (err)
3021 goto out;
3022
3023 if (bcm->current_core->rev < 6 &&
3024 bcm->current_core->id == BCM43xx_COREID_PCI) {
3025 value = bcm43xx_read32(bcm, BCM43xx_CIR_SBINTVEC);
3026 value |= (1 << backplane_flag_nr);
3027 bcm43xx_write32(bcm, BCM43xx_CIR_SBINTVEC, value);
3028 } else {
3029 err = bcm43xx_pci_read_config32(bcm, BCM43xx_PCICFG_ICR, &value);
3030 if (err) {
3031 printk(KERN_ERR PFX "Error: ICR setup failure!\n");
3032 goto out_switch_back;
3033 }
3034 value |= core_mask << 8;
3035 err = bcm43xx_pci_write_config32(bcm, BCM43xx_PCICFG_ICR, value);
3036 if (err) {
3037 printk(KERN_ERR PFX "Error: ICR setup failure!\n");
3038 goto out_switch_back;
3039 }
3040 }
3041
3042 if (bcm->current_core->id == BCM43xx_COREID_PCI) {
3043 value = bcm43xx_read32(bcm, BCM43xx_PCICORE_SBTOPCI2);
3044 value |= BCM43xx_SBTOPCI2_PREFETCH | BCM43xx_SBTOPCI2_BURST;
3045 bcm43xx_write32(bcm, BCM43xx_PCICORE_SBTOPCI2, value);
3046
3047 if (bcm->current_core->rev < 5) {
3048 value = bcm43xx_read32(bcm, BCM43xx_CIR_SBIMCONFIGLOW);
3049 value |= (2 << BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_SHIFT)
3050 & BCM43xx_SBIMCONFIGLOW_SERVICE_TOUT_MASK;
3051 value |= (3 << BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_SHIFT)
3052 & BCM43xx_SBIMCONFIGLOW_REQUEST_TOUT_MASK;
3053 bcm43xx_write32(bcm, BCM43xx_CIR_SBIMCONFIGLOW, value);
3054 err = bcm43xx_pcicore_commit_settings(bcm);
3055 assert(err == 0);
3056 } else if (bcm->current_core->rev >= 11) {
3057 value = bcm43xx_read32(bcm, BCM43xx_PCICORE_SBTOPCI2);
3058 value |= BCM43xx_SBTOPCI2_MEMREAD_MULTI;
3059 bcm43xx_write32(bcm, BCM43xx_PCICORE_SBTOPCI2, value);
3060 }
3061 } else {
3062 if (bcm->current_core->rev == 0 || bcm->current_core->rev == 1) {
3063 value = bcm43xx_pcie_reg_read(bcm, BCM43xx_PCIE_TLP_WORKAROUND);
3064 value |= 0x8;
3065 bcm43xx_pcie_reg_write(bcm, BCM43xx_PCIE_TLP_WORKAROUND,
3066 value);
3067 }
3068 if (bcm->current_core->rev == 0) {
3069 bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX,
3070 BCM43xx_SERDES_RXTIMER, 0x8128);
3071 bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX,
3072 BCM43xx_SERDES_CDR, 0x0100);
3073 bcm43xx_pcie_mdio_write(bcm, BCM43xx_MDIO_SERDES_RX,
3074 BCM43xx_SERDES_CDR_BW, 0x1466);
3075 } else if (bcm->current_core->rev == 1) {
3076 value = bcm43xx_pcie_reg_read(bcm, BCM43xx_PCIE_DLLP_LINKCTL);
3077 value |= 0x40;
3078 bcm43xx_pcie_reg_write(bcm, BCM43xx_PCIE_DLLP_LINKCTL,
3079 value);
3080 }
3081 }
3082out_switch_back:
3083 err = bcm43xx_switch_core(bcm, old_core);
3084out:
3085 return err;
3086}
3087
3088static void bcm43xx_periodic_every120sec(struct bcm43xx_private *bcm)
3089{
3090 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
3091
3092 if (phy->type != BCM43xx_PHYTYPE_G || phy->rev < 2)
3093 return;
3094
3095 bcm43xx_mac_suspend(bcm);
3096 bcm43xx_phy_lo_g_measure(bcm);
3097 bcm43xx_mac_enable(bcm);
3098}
3099
3100static void bcm43xx_periodic_every60sec(struct bcm43xx_private *bcm)
3101{
3102 bcm43xx_phy_lo_mark_all_unused(bcm);
3103 if (bcm->sprom.boardflags & BCM43xx_BFL_RSSI) {
3104 bcm43xx_mac_suspend(bcm);
3105 bcm43xx_calc_nrssi_slope(bcm);
3106 bcm43xx_mac_enable(bcm);
3107 }
3108}
3109
3110static void bcm43xx_periodic_every30sec(struct bcm43xx_private *bcm)
3111{
3112 /* Update device statistics. */
3113 bcm43xx_calculate_link_quality(bcm);
3114}
3115
3116static void bcm43xx_periodic_every15sec(struct bcm43xx_private *bcm)
3117{
3118 bcm43xx_phy_xmitpower(bcm); //FIXME: unless scanning?
3119 //TODO for APHY (temperature?)
3120}
3121
3122static void bcm43xx_periodic_every1sec(struct bcm43xx_private *bcm)
3123{
3124 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
3125 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
3126 int radio_hw_enable;
3127
3128 /* check if radio hardware enabled status changed */
3129 radio_hw_enable = bcm43xx_is_hw_radio_enabled(bcm);
3130 if (unlikely(bcm->radio_hw_enable != radio_hw_enable)) {
3131 bcm->radio_hw_enable = radio_hw_enable;
3132 printk(KERN_INFO PFX "Radio hardware status changed to %s\n",
3133 (radio_hw_enable == 0) ? "disabled" : "enabled");
3134 bcm43xx_leds_update(bcm, 0);
3135 }
3136 if (phy->type == BCM43xx_PHYTYPE_G) {
3137 //TODO: update_aci_moving_average
3138 if (radio->aci_enable && radio->aci_wlan_automatic) {
3139 bcm43xx_mac_suspend(bcm);
3140 if (!radio->aci_enable && 1 /*TODO: not scanning? */) {
3141 if (0 /*TODO: bunch of conditions*/) {
3142 bcm43xx_radio_set_interference_mitigation(bcm,
3143 BCM43xx_RADIO_INTERFMODE_MANUALWLAN);
3144 }
3145 } else if (1/*TODO*/) {
3146 /*
3147 if ((aci_average > 1000) && !(bcm43xx_radio_aci_scan(bcm))) {
3148 bcm43xx_radio_set_interference_mitigation(bcm,
3149 BCM43xx_RADIO_INTERFMODE_NONE);
3150 }
3151 */
3152 }
3153 bcm43xx_mac_enable(bcm);
3154 } else if (radio->interfmode == BCM43xx_RADIO_INTERFMODE_NONWLAN &&
3155 phy->rev == 1) {
3156 //TODO: implement rev1 workaround
3157 }
3158 }
3159}
3160
3161static void do_periodic_work(struct bcm43xx_private *bcm)
3162{
3163 if (bcm->periodic_state % 120 == 0)
3164 bcm43xx_periodic_every120sec(bcm);
3165 if (bcm->periodic_state % 60 == 0)
3166 bcm43xx_periodic_every60sec(bcm);
3167 if (bcm->periodic_state % 30 == 0)
3168 bcm43xx_periodic_every30sec(bcm);
3169 if (bcm->periodic_state % 15 == 0)
3170 bcm43xx_periodic_every15sec(bcm);
3171 bcm43xx_periodic_every1sec(bcm);
3172
3173 schedule_delayed_work(&bcm->periodic_work, HZ);
3174}
3175
3176static void bcm43xx_periodic_work_handler(struct work_struct *work)
3177{
3178 struct bcm43xx_private *bcm =
3179 container_of(work, struct bcm43xx_private, periodic_work.work);
3180 struct net_device *net_dev = bcm->net_dev;
3181 unsigned long flags;
3182 u32 savedirqs = 0;
3183 unsigned long orig_trans_start = 0;
3184
3185 mutex_lock(&bcm->mutex);
3186 /* keep from doing and rearming periodic work if shutting down */
3187 if (bcm43xx_status(bcm) == BCM43xx_STAT_UNINIT)
3188 goto unlock_mutex;
3189 if (unlikely(bcm->periodic_state % 60 == 0)) {
3190 /* Periodic work will take a long time, so we want it to
3191 * be preemtible.
3192 */
3193
3194 netif_tx_lock_bh(net_dev);
3195 /* We must fake a started transmission here, as we are going to
3196 * disable TX. If we wouldn't fake a TX, it would be possible to
3197 * trigger the netdev watchdog, if the last real TX is already
3198 * some time on the past (slightly less than 5secs)
3199 */
3200 orig_trans_start = net_dev->trans_start;
3201 net_dev->trans_start = jiffies;
3202 netif_stop_queue(net_dev);
3203 netif_tx_unlock_bh(net_dev);
3204
3205 spin_lock_irqsave(&bcm->irq_lock, flags);
3206 bcm43xx_mac_suspend(bcm);
3207 if (bcm43xx_using_pio(bcm))
3208 bcm43xx_pio_freeze_txqueues(bcm);
3209 savedirqs = bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
3210 spin_unlock_irqrestore(&bcm->irq_lock, flags);
3211 bcm43xx_synchronize_irq(bcm);
3212 } else {
3213 /* Periodic work should take short time, so we want low
3214 * locking overhead.
3215 */
3216 spin_lock_irqsave(&bcm->irq_lock, flags);
3217 }
3218
3219 do_periodic_work(bcm);
3220
3221 if (unlikely(bcm->periodic_state % 60 == 0)) {
3222 spin_lock_irqsave(&bcm->irq_lock, flags);
3223 tasklet_enable(&bcm->isr_tasklet);
3224 bcm43xx_interrupt_enable(bcm, savedirqs);
3225 if (bcm43xx_using_pio(bcm))
3226 bcm43xx_pio_thaw_txqueues(bcm);
3227 bcm43xx_mac_enable(bcm);
3228 netif_wake_queue(bcm->net_dev);
3229 net_dev->trans_start = orig_trans_start;
3230 }
3231 mmiowb();
3232 bcm->periodic_state++;
3233 spin_unlock_irqrestore(&bcm->irq_lock, flags);
3234unlock_mutex:
3235 mutex_unlock(&bcm->mutex);
3236}
3237
3238void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm)
3239{
3240 struct delayed_work *work = &bcm->periodic_work;
3241
3242 assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
3243 INIT_DELAYED_WORK(work, bcm43xx_periodic_work_handler);
3244 schedule_delayed_work(work, 0);
3245}
3246
3247static void bcm43xx_security_init(struct bcm43xx_private *bcm)
3248{
3249 bcm->security_offset = bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
3250 0x0056) * 2;
3251 bcm43xx_clear_keys(bcm);
3252}
3253
3254static int bcm43xx_rng_read(struct hwrng *rng, u32 *data)
3255{
3256 struct bcm43xx_private *bcm = (struct bcm43xx_private *)rng->priv;
3257 unsigned long flags;
3258
3259 spin_lock_irqsave(&(bcm)->irq_lock, flags);
3260 *data = bcm43xx_read16(bcm, BCM43xx_MMIO_RNG);
3261 spin_unlock_irqrestore(&(bcm)->irq_lock, flags);
3262
3263 return (sizeof(u16));
3264}
3265
3266static void bcm43xx_rng_exit(struct bcm43xx_private *bcm)
3267{
3268 hwrng_unregister(&bcm->rng);
3269}
3270
3271static int bcm43xx_rng_init(struct bcm43xx_private *bcm)
3272{
3273 int err;
3274
3275 snprintf(bcm->rng_name, ARRAY_SIZE(bcm->rng_name),
3276 "%s_%s", KBUILD_MODNAME, bcm->net_dev->name);
3277 bcm->rng.name = bcm->rng_name;
3278 bcm->rng.data_read = bcm43xx_rng_read;
3279 bcm->rng.priv = (unsigned long)bcm;
3280 err = hwrng_register(&bcm->rng);
3281 if (err)
3282 printk(KERN_ERR PFX "RNG init failed (%d)\n", err);
3283
3284 return err;
3285}
3286
3287void bcm43xx_cancel_work(struct bcm43xx_private *bcm)
3288{
3289 /* The system must be unlocked when this routine is entered.
3290 * If not, the next 2 steps may deadlock */
3291 cancel_work_sync(&bcm->restart_work);
3292 cancel_delayed_work_sync(&bcm->periodic_work);
3293}
3294
3295static int bcm43xx_shutdown_all_wireless_cores(struct bcm43xx_private *bcm)
3296{
3297 int ret = 0;
3298 int i, err;
3299 struct bcm43xx_coreinfo *core;
3300
3301 bcm43xx_set_status(bcm, BCM43xx_STAT_SHUTTINGDOWN);
3302 for (i = 0; i < bcm->nr_80211_available; i++) {
3303 core = &(bcm->core_80211[i]);
3304 assert(core->available);
3305 if (!core->initialized)
3306 continue;
3307 err = bcm43xx_switch_core(bcm, core);
3308 if (err) {
3309 dprintk(KERN_ERR PFX "shutdown_all_wireless_cores "
3310 "switch_core failed (%d)\n", err);
3311 ret = err;
3312 continue;
3313 }
3314 bcm43xx_interrupt_disable(bcm, BCM43xx_IRQ_ALL);
3315 bcm43xx_read32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON); /* dummy read */
3316 bcm43xx_wireless_core_cleanup(bcm);
3317 if (core == bcm->active_80211_core)
3318 bcm->active_80211_core = NULL;
3319 }
3320 free_irq(bcm->irq, bcm);
3321 bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT);
3322
3323 return ret;
3324}
3325
3326/* This is the opposite of bcm43xx_init_board() */
3327static void bcm43xx_free_board(struct bcm43xx_private *bcm)
3328{
3329 bcm43xx_rng_exit(bcm);
3330 bcm43xx_sysfs_unregister(bcm);
3331
3332 mutex_lock(&(bcm)->mutex);
3333 bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT);
3334 mutex_unlock(&(bcm)->mutex);
3335
3336 bcm43xx_cancel_work(bcm);
3337
3338 mutex_lock(&(bcm)->mutex);
3339 bcm43xx_shutdown_all_wireless_cores(bcm);
3340 bcm43xx_pctl_set_crystal(bcm, 0);
3341 mutex_unlock(&(bcm)->mutex);
3342}
3343
3344static void prepare_phydata_for_init(struct bcm43xx_phyinfo *phy)
3345{
3346 phy->antenna_diversity = 0xFFFF;
3347 memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig));
3348 memset(phy->minlowsigpos, 0, sizeof(phy->minlowsigpos));
3349
3350 /* Flags */
3351 phy->calibrated = 0;
3352 phy->is_locked = 0;
3353
3354 if (phy->_lo_pairs) {
3355 memset(phy->_lo_pairs, 0,
3356 sizeof(struct bcm43xx_lopair) * BCM43xx_LO_COUNT);
3357 }
3358 memset(phy->loopback_gain, 0, sizeof(phy->loopback_gain));
3359}
3360
3361static void prepare_radiodata_for_init(struct bcm43xx_private *bcm,
3362 struct bcm43xx_radioinfo *radio)
3363{
3364 int i;
3365
3366 /* Set default attenuation values. */
3367 radio->baseband_atten = bcm43xx_default_baseband_attenuation(bcm);
3368 radio->radio_atten = bcm43xx_default_radio_attenuation(bcm);
3369 radio->txctl1 = bcm43xx_default_txctl1(bcm);
3370 radio->txctl2 = 0xFFFF;
3371 radio->txpwr_offset = 0;
3372
3373 /* NRSSI */
3374 radio->nrssislope = 0;
3375 for (i = 0; i < ARRAY_SIZE(radio->nrssi); i++)
3376 radio->nrssi[i] = -1000;
3377 for (i = 0; i < ARRAY_SIZE(radio->nrssi_lt); i++)
3378 radio->nrssi_lt[i] = i;
3379
3380 radio->lofcal = 0xFFFF;
3381 radio->initval = 0xFFFF;
3382
3383 radio->aci_enable = 0;
3384 radio->aci_wlan_automatic = 0;
3385 radio->aci_hw_rssi = 0;
3386}
3387
3388static void prepare_priv_for_init(struct bcm43xx_private *bcm)
3389{
3390 int i;
3391 struct bcm43xx_coreinfo *core;
3392 struct bcm43xx_coreinfo_80211 *wlext;
3393
3394 assert(!bcm->active_80211_core);
3395
3396 bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZING);
3397
3398 /* Flags */
3399 bcm->was_initialized = 0;
3400 bcm->reg124_set_0x4 = 0;
3401
3402 /* Stats */
3403 memset(&bcm->stats, 0, sizeof(bcm->stats));
3404
3405 /* Wireless core data */
3406 for (i = 0; i < BCM43xx_MAX_80211_CORES; i++) {
3407 core = &(bcm->core_80211[i]);
3408 wlext = core->priv;
3409
3410 if (!core->available)
3411 continue;
3412 assert(wlext == &(bcm->core_80211_ext[i]));
3413
3414 prepare_phydata_for_init(&wlext->phy);
3415 prepare_radiodata_for_init(bcm, &wlext->radio);
3416 }
3417
3418 /* IRQ related flags */
3419 bcm->irq_reason = 0;
3420 memset(bcm->dma_reason, 0, sizeof(bcm->dma_reason));
3421 bcm->irq_savedstate = BCM43xx_IRQ_INITIAL;
3422
3423 bcm->mac_suspended = 1;
3424
3425 /* Noise calculation context */
3426 memset(&bcm->noisecalc, 0, sizeof(bcm->noisecalc));
3427
3428 /* Periodic work context */
3429 bcm->periodic_state = 0;
3430}
3431
3432static int wireless_core_up(struct bcm43xx_private *bcm,
3433 int active_wlcore)
3434{
3435 int err;
3436
3437 if (!bcm43xx_core_enabled(bcm))
3438 bcm43xx_wireless_core_reset(bcm, 1);
3439 if (!active_wlcore)
3440 bcm43xx_wireless_core_mark_inactive(bcm);
3441 err = bcm43xx_wireless_core_init(bcm, active_wlcore);
3442 if (err)
3443 goto out;
3444 if (!active_wlcore)
3445 bcm43xx_radio_turn_off(bcm);
3446out:
3447 return err;
3448}
3449
3450/* Select and enable the "to be used" wireless core.
3451 * Locking: bcm->mutex must be aquired before calling this.
3452 * bcm->irq_lock must not be aquired.
3453 */
3454int bcm43xx_select_wireless_core(struct bcm43xx_private *bcm,
3455 int phytype)
3456{
3457 int i, err;
3458 struct bcm43xx_coreinfo *active_core = NULL;
3459 struct bcm43xx_coreinfo_80211 *active_wlext = NULL;
3460 struct bcm43xx_coreinfo *core;
3461 struct bcm43xx_coreinfo_80211 *wlext;
3462 int adjust_active_sbtmstatelow = 0;
3463
3464 might_sleep();
3465
3466 if (phytype < 0) {
3467 /* If no phytype is requested, select the first core. */
3468 assert(bcm->core_80211[0].available);
3469 wlext = bcm->core_80211[0].priv;
3470 phytype = wlext->phy.type;
3471 }
3472 /* Find the requested core. */
3473 for (i = 0; i < bcm->nr_80211_available; i++) {
3474 core = &(bcm->core_80211[i]);
3475 wlext = core->priv;
3476 if (wlext->phy.type == phytype) {
3477 active_core = core;
3478 active_wlext = wlext;
3479 break;
3480 }
3481 }
3482 if (!active_core)
3483 return -ESRCH; /* No such PHYTYPE on this board. */
3484
3485 if (bcm->active_80211_core) {
3486 /* We already selected a wl core in the past.
3487 * So first clean up everything.
3488 */
3489 dprintk(KERN_INFO PFX "select_wireless_core: cleanup\n");
3490 ieee80211softmac_stop(bcm->net_dev);
3491 bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZED);
3492 err = bcm43xx_disable_interrupts_sync(bcm);
3493 assert(!err);
3494 tasklet_enable(&bcm->isr_tasklet);
3495 err = bcm43xx_shutdown_all_wireless_cores(bcm);
3496 if (err)
3497 goto error;
3498 /* Ok, everything down, continue to re-initialize. */
3499 bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZING);
3500 }
3501
3502 /* Reset all data structures. */
3503 prepare_priv_for_init(bcm);
3504
3505 err = bcm43xx_pctl_set_clock(bcm, BCM43xx_PCTL_CLK_FAST);
3506 if (err)
3507 goto error;
3508
3509 /* Mark all unused cores "inactive". */
3510 for (i = 0; i < bcm->nr_80211_available; i++) {
3511 core = &(bcm->core_80211[i]);
3512 wlext = core->priv;
3513
3514 if (core == active_core)
3515 continue;
3516 err = bcm43xx_switch_core(bcm, core);
3517 if (err) {
3518 dprintk(KERN_ERR PFX "Could not switch to inactive "
3519 "802.11 core (%d)\n", err);
3520 goto error;
3521 }
3522 err = wireless_core_up(bcm, 0);
3523 if (err) {
3524 dprintk(KERN_ERR PFX "core_up for inactive 802.11 core "
3525 "failed (%d)\n", err);
3526 goto error;
3527 }
3528 adjust_active_sbtmstatelow = 1;
3529 }
3530
3531 /* Now initialize the active 802.11 core. */
3532 err = bcm43xx_switch_core(bcm, active_core);
3533 if (err) {
3534 dprintk(KERN_ERR PFX "Could not switch to active "
3535 "802.11 core (%d)\n", err);
3536 goto error;
3537 }
3538 if (adjust_active_sbtmstatelow &&
3539 active_wlext->phy.type == BCM43xx_PHYTYPE_G) {
3540 u32 sbtmstatelow;
3541
3542 sbtmstatelow = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
3543 sbtmstatelow |= BCM43xx_SBTMSTATELOW_G_MODE_ENABLE;
3544 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, sbtmstatelow);
3545 }
3546 err = wireless_core_up(bcm, 1);
3547 if (err) {
3548 dprintk(KERN_ERR PFX "core_up for active 802.11 core "
3549 "failed (%d)\n", err);
3550 goto error;
3551 }
3552 err = bcm43xx_pctl_set_clock(bcm, BCM43xx_PCTL_CLK_DYNAMIC);
3553 if (err)
3554 goto error;
3555 bcm->active_80211_core = active_core;
3556
3557 bcm43xx_macfilter_clear(bcm, BCM43xx_MACFILTER_ASSOC);
3558 bcm43xx_macfilter_set(bcm, BCM43xx_MACFILTER_SELF, (u8 *)(bcm->net_dev->dev_addr));
3559 bcm43xx_security_init(bcm);
3560 drain_txstatus_queue(bcm);
3561 ieee80211softmac_start(bcm->net_dev);
3562
3563 /* Let's go! Be careful after enabling the IRQs.
3564 * Don't switch cores, for example.
3565 */
3566 bcm43xx_mac_enable(bcm);
3567 bcm43xx_set_status(bcm, BCM43xx_STAT_INITIALIZED);
3568 err = bcm43xx_initialize_irq(bcm);
3569 if (err)
3570 goto error;
3571 bcm43xx_interrupt_enable(bcm, bcm->irq_savedstate);
3572
3573 dprintk(KERN_INFO PFX "Selected 802.11 core (phytype %d)\n",
3574 active_wlext->phy.type);
3575
3576 return 0;
3577
3578error:
3579 bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT);
3580 bcm43xx_pctl_set_clock(bcm, BCM43xx_PCTL_CLK_SLOW);
3581 return err;
3582}
3583
3584static int bcm43xx_init_board(struct bcm43xx_private *bcm)
3585{
3586 int err;
3587
3588 mutex_lock(&(bcm)->mutex);
3589
3590 tasklet_enable(&bcm->isr_tasklet);
3591 err = bcm43xx_pctl_set_crystal(bcm, 1);
3592 if (err)
3593 goto err_tasklet;
3594 err = bcm43xx_pctl_init(bcm);
3595 if (err)
3596 goto err_crystal_off;
3597 err = bcm43xx_select_wireless_core(bcm, -1);
3598 if (err)
3599 goto err_crystal_off;
3600 err = bcm43xx_sysfs_register(bcm);
3601 if (err)
3602 goto err_wlshutdown;
3603 err = bcm43xx_rng_init(bcm);
3604 if (err)
3605 goto err_sysfs_unreg;
3606 bcm43xx_periodic_tasks_setup(bcm);
3607
3608 /*FIXME: This should be handled by softmac instead. */
3609 schedule_delayed_work(&bcm->softmac->associnfo.work, 0);
3610
3611out:
3612 mutex_unlock(&(bcm)->mutex);
3613
3614 return err;
3615
3616err_sysfs_unreg:
3617 bcm43xx_sysfs_unregister(bcm);
3618err_wlshutdown:
3619 bcm43xx_shutdown_all_wireless_cores(bcm);
3620err_crystal_off:
3621 bcm43xx_pctl_set_crystal(bcm, 0);
3622err_tasklet:
3623 tasklet_disable(&bcm->isr_tasklet);
3624 goto out;
3625}
3626
3627static void bcm43xx_detach_board(struct bcm43xx_private *bcm)
3628{
3629 struct pci_dev *pci_dev = bcm->pci_dev;
3630 int i;
3631
3632 bcm43xx_chipset_detach(bcm);
3633 /* Do _not_ access the chip, after it is detached. */
3634 pci_iounmap(pci_dev, bcm->mmio_addr);
3635 pci_release_regions(pci_dev);
3636 pci_disable_device(pci_dev);
3637
3638 /* Free allocated structures/fields */
3639 for (i = 0; i < BCM43xx_MAX_80211_CORES; i++) {
3640 kfree(bcm->core_80211_ext[i].phy._lo_pairs);
3641 if (bcm->core_80211_ext[i].phy.dyn_tssi_tbl)
3642 kfree(bcm->core_80211_ext[i].phy.tssi2dbm);
3643 }
3644}
3645
3646static int bcm43xx_read_phyinfo(struct bcm43xx_private *bcm)
3647{
3648 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
3649 u16 value;
3650 u8 phy_analog;
3651 u8 phy_type;
3652 u8 phy_rev;
3653 int phy_rev_ok = 1;
3654 void *p;
3655
3656 value = bcm43xx_read16(bcm, BCM43xx_MMIO_PHY_VER);
3657
3658 phy_analog = (value & 0xF000) >> 12;
3659 phy_type = (value & 0x0F00) >> 8;
3660 phy_rev = (value & 0x000F);
3661
3662 dprintk(KERN_INFO PFX "Detected PHY: Analog: %x, Type %x, Revision %x\n",
3663 phy_analog, phy_type, phy_rev);
3664
3665 switch (phy_type) {
3666 case BCM43xx_PHYTYPE_A:
3667 if (phy_rev >= 4)
3668 phy_rev_ok = 0;
3669 /*FIXME: We need to switch the ieee->modulation, etc.. flags,
3670 * if we switch 80211 cores after init is done.
3671 * As we do not implement on the fly switching between
3672 * wireless cores, I will leave this as a future task.
3673 */
3674 bcm->ieee->modulation = IEEE80211_OFDM_MODULATION;
3675 bcm->ieee->mode = IEEE_A;
3676 bcm->ieee->freq_band = IEEE80211_52GHZ_BAND |
3677 IEEE80211_24GHZ_BAND;
3678 break;
3679 case BCM43xx_PHYTYPE_B:
3680 if (phy_rev != 2 && phy_rev != 4 && phy_rev != 6 && phy_rev != 7)
3681 phy_rev_ok = 0;
3682 bcm->ieee->modulation = IEEE80211_CCK_MODULATION;
3683 bcm->ieee->mode = IEEE_B;
3684 bcm->ieee->freq_band = IEEE80211_24GHZ_BAND;
3685 break;
3686 case BCM43xx_PHYTYPE_G:
3687 if (phy_rev > 8)
3688 phy_rev_ok = 0;
3689 bcm->ieee->modulation = IEEE80211_OFDM_MODULATION |
3690 IEEE80211_CCK_MODULATION;
3691 bcm->ieee->mode = IEEE_G;
3692 bcm->ieee->freq_band = IEEE80211_24GHZ_BAND;
3693 break;
3694 default:
3695 printk(KERN_ERR PFX "Error: Unknown PHY Type %x\n",
3696 phy_type);
3697 return -ENODEV;
3698 };
3699 bcm->ieee->perfect_rssi = RX_RSSI_MAX;
3700 bcm->ieee->worst_rssi = 0;
3701 if (!phy_rev_ok) {
3702 printk(KERN_WARNING PFX "Invalid PHY Revision %x\n",
3703 phy_rev);
3704 }
3705
3706 phy->analog = phy_analog;
3707 phy->type = phy_type;
3708 phy->rev = phy_rev;
3709 if ((phy_type == BCM43xx_PHYTYPE_B) || (phy_type == BCM43xx_PHYTYPE_G)) {
3710 p = kzalloc(sizeof(struct bcm43xx_lopair) * BCM43xx_LO_COUNT,
3711 GFP_KERNEL);
3712 if (!p)
3713 return -ENOMEM;
3714 phy->_lo_pairs = p;
3715 }
3716
3717 return 0;
3718}
3719
3720static int bcm43xx_attach_board(struct bcm43xx_private *bcm)
3721{
3722 struct pci_dev *pci_dev = bcm->pci_dev;
3723 struct net_device *net_dev = bcm->net_dev;
3724 int err;
3725 int i;
3726 u32 coremask;
3727
3728 err = pci_enable_device(pci_dev);
3729 if (err) {
3730 printk(KERN_ERR PFX "pci_enable_device() failed\n");
3731 goto out;
3732 }
3733 err = pci_request_regions(pci_dev, KBUILD_MODNAME);
3734 if (err) {
3735 printk(KERN_ERR PFX "pci_request_regions() failed\n");
3736 goto err_pci_disable;
3737 }
3738 /* enable PCI bus-mastering */
3739 pci_set_master(pci_dev);
3740 bcm->mmio_addr = pci_iomap(pci_dev, 0, ~0UL);
3741 if (!bcm->mmio_addr) {
3742 printk(KERN_ERR PFX "pci_iomap() failed\n");
3743 err = -EIO;
3744 goto err_pci_release;
3745 }
3746 net_dev->base_addr = (unsigned long)bcm->mmio_addr;
3747
3748 err = bcm43xx_pci_read_config16(bcm, PCI_SUBSYSTEM_VENDOR_ID,
3749 &bcm->board_vendor);
3750 if (err)
3751 goto err_iounmap;
3752 err = bcm43xx_pci_read_config16(bcm, PCI_SUBSYSTEM_ID,
3753 &bcm->board_type);
3754 if (err)
3755 goto err_iounmap;
3756
3757 bcm->board_revision = bcm->pci_dev->revision;
3758
3759 err = bcm43xx_chipset_attach(bcm);
3760 if (err)
3761 goto err_iounmap;
3762 err = bcm43xx_pctl_init(bcm);
3763 if (err)
3764 goto err_chipset_detach;
3765 err = bcm43xx_probe_cores(bcm);
3766 if (err)
3767 goto err_chipset_detach;
3768
3769 /* Attach all IO cores to the backplane. */
3770 coremask = 0;
3771 for (i = 0; i < bcm->nr_80211_available; i++)
3772 coremask |= (1 << bcm->core_80211[i].index);
3773 //FIXME: Also attach some non80211 cores?
3774 err = bcm43xx_setup_backplane_pci_connection(bcm, coremask);
3775 if (err) {
3776 printk(KERN_ERR PFX "Backplane->PCI connection failed!\n");
3777 goto err_chipset_detach;
3778 }
3779
3780 err = bcm43xx_sprom_extract(bcm);
3781 if (err)
3782 goto err_chipset_detach;
3783 err = bcm43xx_leds_init(bcm);
3784 if (err)
3785 goto err_chipset_detach;
3786
3787 for (i = 0; i < bcm->nr_80211_available; i++) {
3788 err = bcm43xx_switch_core(bcm, &bcm->core_80211[i]);
3789 assert(err != -ENODEV);
3790 if (err)
3791 goto err_80211_unwind;
3792
3793 /* Enable the selected wireless core.
3794 * Connect PHY only on the first core.
3795 */
3796 bcm43xx_wireless_core_reset(bcm, (i == 0));
3797
3798 err = bcm43xx_read_phyinfo(bcm);
3799 if (err && (i == 0))
3800 goto err_80211_unwind;
3801
3802 err = bcm43xx_read_radioinfo(bcm);
3803 if (err && (i == 0))
3804 goto err_80211_unwind;
3805
3806 err = bcm43xx_validate_chip(bcm);
3807 if (err && (i == 0))
3808 goto err_80211_unwind;
3809
3810 bcm43xx_radio_turn_off(bcm);
3811 err = bcm43xx_phy_init_tssi2dbm_table(bcm);
3812 if (err)
3813 goto err_80211_unwind;
3814 bcm43xx_wireless_core_disable(bcm);
3815 }
3816 err = bcm43xx_geo_init(bcm);
3817 if (err)
3818 goto err_80211_unwind;
3819 bcm43xx_pctl_set_crystal(bcm, 0);
3820
3821 /* Set the MAC address in the networking subsystem */
3822 if (is_valid_ether_addr(bcm->sprom.et1macaddr))
3823 memcpy(bcm->net_dev->dev_addr, bcm->sprom.et1macaddr, 6);
3824 else
3825 memcpy(bcm->net_dev->dev_addr, bcm->sprom.il0macaddr, 6);
3826
3827 snprintf(bcm->nick, IW_ESSID_MAX_SIZE,
3828 "Broadcom %04X", bcm->chip_id);
3829
3830 assert(err == 0);
3831out:
3832 return err;
3833
3834err_80211_unwind:
3835 for (i = 0; i < BCM43xx_MAX_80211_CORES; i++) {
3836 kfree(bcm->core_80211_ext[i].phy._lo_pairs);
3837 if (bcm->core_80211_ext[i].phy.dyn_tssi_tbl)
3838 kfree(bcm->core_80211_ext[i].phy.tssi2dbm);
3839 }
3840err_chipset_detach:
3841 bcm43xx_chipset_detach(bcm);
3842err_iounmap:
3843 pci_iounmap(pci_dev, bcm->mmio_addr);
3844err_pci_release:
3845 pci_release_regions(pci_dev);
3846err_pci_disable:
3847 pci_disable_device(pci_dev);
3848 printk(KERN_ERR PFX "Unable to attach board\n");
3849 goto out;
3850}
3851
3852/* Do the Hardware IO operations to send the txb */
3853static inline int bcm43xx_tx(struct bcm43xx_private *bcm,
3854 struct ieee80211_txb *txb)
3855{
3856 int err = -ENODEV;
3857
3858 if (bcm43xx_using_pio(bcm))
3859 err = bcm43xx_pio_tx(bcm, txb);
3860 else
3861 err = bcm43xx_dma_tx(bcm, txb);
3862 bcm->net_dev->trans_start = jiffies;
3863
3864 return err;
3865}
3866
3867static void bcm43xx_ieee80211_set_chan(struct net_device *net_dev,
3868 u8 channel)
3869{
3870 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
3871 struct bcm43xx_radioinfo *radio;
3872 unsigned long flags;
3873
3874 mutex_lock(&bcm->mutex);
3875 spin_lock_irqsave(&bcm->irq_lock, flags);
3876 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
3877 bcm43xx_mac_suspend(bcm);
3878 bcm43xx_radio_selectchannel(bcm, channel, 0);
3879 bcm43xx_mac_enable(bcm);
3880 } else {
3881 radio = bcm43xx_current_radio(bcm);
3882 radio->initial_channel = channel;
3883 }
3884 spin_unlock_irqrestore(&bcm->irq_lock, flags);
3885 mutex_unlock(&bcm->mutex);
3886}
3887
3888/* set_security() callback in struct ieee80211_device */
3889static void bcm43xx_ieee80211_set_security(struct net_device *net_dev,
3890 struct ieee80211_security *sec)
3891{
3892 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
3893 struct ieee80211_security *secinfo = &bcm->ieee->sec;
3894 unsigned long flags;
3895 int keyidx;
3896
3897 dprintk(KERN_INFO PFX "set security called");
3898
3899 mutex_lock(&bcm->mutex);
3900 spin_lock_irqsave(&bcm->irq_lock, flags);
3901
3902 for (keyidx = 0; keyidx<WEP_KEYS; keyidx++)
3903 if (sec->flags & (1<<keyidx)) {
3904 secinfo->encode_alg[keyidx] = sec->encode_alg[keyidx];
3905 secinfo->key_sizes[keyidx] = sec->key_sizes[keyidx];
3906 memcpy(secinfo->keys[keyidx], sec->keys[keyidx], SCM_KEY_LEN);
3907 }
3908
3909 if (sec->flags & SEC_ACTIVE_KEY) {
3910 secinfo->active_key = sec->active_key;
3911 dprintk(", .active_key = %d", sec->active_key);
3912 }
3913 if (sec->flags & SEC_UNICAST_GROUP) {
3914 secinfo->unicast_uses_group = sec->unicast_uses_group;
3915 dprintk(", .unicast_uses_group = %d", sec->unicast_uses_group);
3916 }
3917 if (sec->flags & SEC_LEVEL) {
3918 secinfo->level = sec->level;
3919 dprintk(", .level = %d", sec->level);
3920 }
3921 if (sec->flags & SEC_ENABLED) {
3922 secinfo->enabled = sec->enabled;
3923 dprintk(", .enabled = %d", sec->enabled);
3924 }
3925 if (sec->flags & SEC_ENCRYPT) {
3926 secinfo->encrypt = sec->encrypt;
3927 dprintk(", .encrypt = %d", sec->encrypt);
3928 }
3929 if (sec->flags & SEC_AUTH_MODE) {
3930 secinfo->auth_mode = sec->auth_mode;
3931 dprintk(", .auth_mode = %d", sec->auth_mode);
3932 }
3933 dprintk("\n");
3934 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED &&
3935 !bcm->ieee->host_encrypt) {
3936 if (secinfo->enabled) {
3937 /* upload WEP keys to hardware */
3938 char null_address[6] = { 0 };
3939 u8 algorithm = 0;
3940 for (keyidx = 0; keyidx<WEP_KEYS; keyidx++) {
3941 if (!(sec->flags & (1<<keyidx)))
3942 continue;
3943 switch (sec->encode_alg[keyidx]) {
3944 case SEC_ALG_NONE: algorithm = BCM43xx_SEC_ALGO_NONE; break;
3945 case SEC_ALG_WEP:
3946 algorithm = BCM43xx_SEC_ALGO_WEP;
3947 if (secinfo->key_sizes[keyidx] == 13)
3948 algorithm = BCM43xx_SEC_ALGO_WEP104;
3949 break;
3950 case SEC_ALG_TKIP:
3951 FIXME();
3952 algorithm = BCM43xx_SEC_ALGO_TKIP;
3953 break;
3954 case SEC_ALG_CCMP:
3955 FIXME();
3956 algorithm = BCM43xx_SEC_ALGO_AES;
3957 break;
3958 default:
3959 assert(0);
3960 break;
3961 }
3962 bcm43xx_key_write(bcm, keyidx, algorithm, sec->keys[keyidx], secinfo->key_sizes[keyidx], &null_address[0]);
3963 bcm->key[keyidx].enabled = 1;
3964 bcm->key[keyidx].algorithm = algorithm;
3965 }
3966 } else
3967 bcm43xx_clear_keys(bcm);
3968 }
3969 spin_unlock_irqrestore(&bcm->irq_lock, flags);
3970 mutex_unlock(&bcm->mutex);
3971}
3972
3973/* hard_start_xmit() callback in struct ieee80211_device */
3974static int bcm43xx_ieee80211_hard_start_xmit(struct ieee80211_txb *txb,
3975 struct net_device *net_dev,
3976 int pri)
3977{
3978 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
3979 int err = -ENODEV;
3980 unsigned long flags;
3981
3982 spin_lock_irqsave(&bcm->irq_lock, flags);
3983 if (likely(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED))
3984 err = bcm43xx_tx(bcm, txb);
3985 spin_unlock_irqrestore(&bcm->irq_lock, flags);
3986
3987 if (unlikely(err))
3988 return NETDEV_TX_BUSY;
3989 return NETDEV_TX_OK;
3990}
3991
3992static void bcm43xx_net_tx_timeout(struct net_device *net_dev)
3993{
3994 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
3995 unsigned long flags;
3996
3997 spin_lock_irqsave(&bcm->irq_lock, flags);
3998 bcm43xx_controller_restart(bcm, "TX timeout");
3999 spin_unlock_irqrestore(&bcm->irq_lock, flags);
4000}
4001
4002#ifdef CONFIG_NET_POLL_CONTROLLER
4003static void bcm43xx_net_poll_controller(struct net_device *net_dev)
4004{
4005 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
4006 unsigned long flags;
4007
4008 local_irq_save(flags);
4009 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)
4010 bcm43xx_interrupt_handler(bcm->irq, bcm);
4011 local_irq_restore(flags);
4012}
4013#endif /* CONFIG_NET_POLL_CONTROLLER */
4014
4015static int bcm43xx_net_open(struct net_device *net_dev)
4016{
4017 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
4018
4019 return bcm43xx_init_board(bcm);
4020}
4021
4022static int bcm43xx_net_stop(struct net_device *net_dev)
4023{
4024 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
4025 int err;
4026
4027 ieee80211softmac_stop(net_dev);
4028 err = bcm43xx_disable_interrupts_sync(bcm);
4029 assert(!err);
4030 bcm43xx_free_board(bcm);
4031 bcm43xx_cancel_work(bcm);
4032
4033 return 0;
4034}
4035
4036static int bcm43xx_init_private(struct bcm43xx_private *bcm,
4037 struct net_device *net_dev,
4038 struct pci_dev *pci_dev)
4039{
4040 bcm43xx_set_status(bcm, BCM43xx_STAT_UNINIT);
4041 bcm->ieee = netdev_priv(net_dev);
4042 bcm->softmac = ieee80211_priv(net_dev);
4043 bcm->softmac->set_channel = bcm43xx_ieee80211_set_chan;
4044
4045 bcm->irq_savedstate = BCM43xx_IRQ_INITIAL;
4046 bcm->mac_suspended = 1;
4047 bcm->pci_dev = pci_dev;
4048 bcm->net_dev = net_dev;
4049 bcm->bad_frames_preempt = modparam_bad_frames_preempt;
4050 spin_lock_init(&bcm->irq_lock);
4051 spin_lock_init(&bcm->leds_lock);
4052 mutex_init(&bcm->mutex);
4053 tasklet_init(&bcm->isr_tasklet,
4054 (void (*)(unsigned long))bcm43xx_interrupt_tasklet,
4055 (unsigned long)bcm);
4056 tasklet_disable_nosync(&bcm->isr_tasklet);
4057 if (modparam_pio)
4058 bcm->__using_pio = 1;
4059 bcm->rts_threshold = BCM43xx_DEFAULT_RTS_THRESHOLD;
4060
4061 /* default to sw encryption for now */
4062 bcm->ieee->host_build_iv = 0;
4063 bcm->ieee->host_encrypt = 1;
4064 bcm->ieee->host_decrypt = 1;
4065
4066 bcm->ieee->iw_mode = BCM43xx_INITIAL_IWMODE;
4067 bcm->ieee->tx_headroom = sizeof(struct bcm43xx_txhdr);
4068 bcm->ieee->set_security = bcm43xx_ieee80211_set_security;
4069 bcm->ieee->hard_start_xmit = bcm43xx_ieee80211_hard_start_xmit;
4070
4071 return 0;
4072}
4073
4074static int __devinit bcm43xx_init_one(struct pci_dev *pdev,
4075 const struct pci_device_id *ent)
4076{
4077 struct net_device *net_dev;
4078 struct bcm43xx_private *bcm;
4079 int err;
4080
4081#ifdef DEBUG_SINGLE_DEVICE_ONLY
4082 if (strcmp(pci_name(pdev), DEBUG_SINGLE_DEVICE_ONLY))
4083 return -ENODEV;
4084#endif
4085
4086 net_dev = alloc_ieee80211softmac(sizeof(*bcm));
4087 if (!net_dev) {
4088 printk(KERN_ERR PFX
4089 "could not allocate ieee80211 device %s\n",
4090 pci_name(pdev));
4091 err = -ENOMEM;
4092 goto out;
4093 }
4094 /* initialize the net_device struct */
4095 SET_NETDEV_DEV(net_dev, &pdev->dev);
4096
4097 net_dev->open = bcm43xx_net_open;
4098 net_dev->stop = bcm43xx_net_stop;
4099 net_dev->tx_timeout = bcm43xx_net_tx_timeout;
4100#ifdef CONFIG_NET_POLL_CONTROLLER
4101 net_dev->poll_controller = bcm43xx_net_poll_controller;
4102#endif
4103 net_dev->wireless_handlers = &bcm43xx_wx_handlers_def;
4104 net_dev->irq = pdev->irq;
4105 SET_ETHTOOL_OPS(net_dev, &bcm43xx_ethtool_ops);
4106
4107 /* initialize the bcm43xx_private struct */
4108 bcm = bcm43xx_priv(net_dev);
4109 memset(bcm, 0, sizeof(*bcm));
4110 err = bcm43xx_init_private(bcm, net_dev, pdev);
4111 if (err)
4112 goto err_free_netdev;
4113
4114 pci_set_drvdata(pdev, net_dev);
4115
4116 err = bcm43xx_attach_board(bcm);
4117 if (err)
4118 goto err_free_netdev;
4119
4120 err = register_netdev(net_dev);
4121 if (err) {
4122 printk(KERN_ERR PFX "Cannot register net device, "
4123 "aborting.\n");
4124 err = -ENOMEM;
4125 goto err_detach_board;
4126 }
4127
4128 bcm43xx_debugfs_add_device(bcm);
4129
4130 assert(err == 0);
4131out:
4132 return err;
4133
4134err_detach_board:
4135 bcm43xx_detach_board(bcm);
4136err_free_netdev:
4137 free_ieee80211softmac(net_dev);
4138 goto out;
4139}
4140
4141static void __devexit bcm43xx_remove_one(struct pci_dev *pdev)
4142{
4143 struct net_device *net_dev = pci_get_drvdata(pdev);
4144 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
4145
4146 bcm43xx_debugfs_remove_device(bcm);
4147 unregister_netdev(net_dev);
4148 bcm43xx_detach_board(bcm);
4149 free_ieee80211softmac(net_dev);
4150}
4151
4152/* Hard-reset the chip. Do not call this directly.
4153 * Use bcm43xx_controller_restart()
4154 */
4155static void bcm43xx_chip_reset(struct work_struct *work)
4156{
4157 struct bcm43xx_private *bcm =
4158 container_of(work, struct bcm43xx_private, restart_work);
4159 struct bcm43xx_phyinfo *phy;
4160 int err = -ENODEV;
4161
4162 bcm43xx_cancel_work(bcm);
4163 mutex_lock(&(bcm)->mutex);
4164 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
4165 phy = bcm43xx_current_phy(bcm);
4166 err = bcm43xx_select_wireless_core(bcm, phy->type);
4167 if (!err)
4168 bcm43xx_periodic_tasks_setup(bcm);
4169 }
4170 mutex_unlock(&(bcm)->mutex);
4171
4172 printk(KERN_ERR PFX "Controller restart%s\n",
4173 (err == 0) ? "ed" : " failed");
4174}
4175
4176/* Hard-reset the chip.
4177 * This can be called from interrupt or process context.
4178 * bcm->irq_lock must be locked.
4179 */
4180void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason)
4181{
4182 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
4183 return;
4184 printk(KERN_ERR PFX "Controller RESET (%s) ...\n", reason);
4185 INIT_WORK(&bcm->restart_work, bcm43xx_chip_reset);
4186 schedule_work(&bcm->restart_work);
4187}
4188
4189#ifdef CONFIG_PM
4190
4191static int bcm43xx_suspend(struct pci_dev *pdev, pm_message_t state)
4192{
4193 struct net_device *net_dev = pci_get_drvdata(pdev);
4194 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
4195 int err;
4196
4197 dprintk(KERN_INFO PFX "Suspending...\n");
4198
4199 netif_device_detach(net_dev);
4200 bcm->was_initialized = 0;
4201 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
4202 bcm->was_initialized = 1;
4203 ieee80211softmac_stop(net_dev);
4204 err = bcm43xx_disable_interrupts_sync(bcm);
4205 if (unlikely(err)) {
4206 dprintk(KERN_ERR PFX "Suspend failed.\n");
4207 return -EAGAIN;
4208 }
4209 bcm->firmware_norelease = 1;
4210 bcm43xx_free_board(bcm);
4211 bcm->firmware_norelease = 0;
4212 }
4213 bcm43xx_chipset_detach(bcm);
4214
4215 pci_save_state(pdev);
4216 pci_disable_device(pdev);
4217 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4218
4219 dprintk(KERN_INFO PFX "Device suspended.\n");
4220
4221 return 0;
4222}
4223
4224static int bcm43xx_resume(struct pci_dev *pdev)
4225{
4226 struct net_device *net_dev = pci_get_drvdata(pdev);
4227 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
4228 int err = 0;
4229
4230 dprintk(KERN_INFO PFX "Resuming...\n");
4231
4232 pci_set_power_state(pdev, 0);
4233 err = pci_enable_device(pdev);
4234 if (err) {
4235 printk(KERN_ERR PFX "Failure with pci_enable_device!\n");
4236 return err;
4237 }
4238 pci_restore_state(pdev);
4239
4240 bcm43xx_chipset_attach(bcm);
4241 if (bcm->was_initialized)
4242 err = bcm43xx_init_board(bcm);
4243 if (err) {
4244 printk(KERN_ERR PFX "Resume failed!\n");
4245 return err;
4246 }
4247 netif_device_attach(net_dev);
4248
4249 dprintk(KERN_INFO PFX "Device resumed.\n");
4250
4251 return 0;
4252}
4253
4254#endif /* CONFIG_PM */
4255
4256static struct pci_driver bcm43xx_pci_driver = {
4257 .name = KBUILD_MODNAME,
4258 .id_table = bcm43xx_pci_tbl,
4259 .probe = bcm43xx_init_one,
4260 .remove = __devexit_p(bcm43xx_remove_one),
4261#ifdef CONFIG_PM
4262 .suspend = bcm43xx_suspend,
4263 .resume = bcm43xx_resume,
4264#endif /* CONFIG_PM */
4265};
4266
4267static int __init bcm43xx_init(void)
4268{
4269 printk(KERN_INFO KBUILD_MODNAME " driver\n");
4270 bcm43xx_debugfs_init();
4271 return pci_register_driver(&bcm43xx_pci_driver);
4272}
4273
4274static void __exit bcm43xx_exit(void)
4275{
4276 pci_unregister_driver(&bcm43xx_pci_driver);
4277 bcm43xx_debugfs_exit();
4278}
4279
4280module_init(bcm43xx_init)
4281module_exit(bcm43xx_exit)
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.h b/drivers/net/wireless/bcm43xx/bcm43xx_main.h
deleted file mode 100644
index 14cfbeb582ef..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.h
+++ /dev/null
@@ -1,133 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
6 Stefano Brivio <st3@riseup.net>
7 Michael Buesch <mbuesch@freenet.de>
8 Danny van Dyk <kugelfang@gentoo.org>
9 Andreas Jaggi <andreas.jaggi@waterwave.ch>
10
11 Some parts of the code in this file are derived from the ipw2200
12 driver Copyright(c) 2003 - 2004 Intel Corporation.
13
14 This program is free software; you can redistribute it and/or modify
15 it under the terms of the GNU General Public License as published by
16 the Free Software Foundation; either version 2 of the License, or
17 (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 GNU General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; see the file COPYING. If not, write to
26 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
27 Boston, MA 02110-1301, USA.
28
29*/
30
31#ifndef BCM43xx_MAIN_H_
32#define BCM43xx_MAIN_H_
33
34#include "bcm43xx.h"
35
36#define P4D_BYT3S(magic, nr_bytes) u8 __p4dding##magic[nr_bytes]
37#define P4D_BYTES(line, nr_bytes) P4D_BYT3S(line, nr_bytes)
38/* Magic helper macro to pad structures. Ignore those above. It's magic. */
39#define PAD_BYTES(nr_bytes) P4D_BYTES( __LINE__ , (nr_bytes))
40
41
42/* Lightweight function to convert a frequency (in Mhz) to a channel number. */
43static inline
44u8 bcm43xx_freq_to_channel_a(int freq)
45{
46 return ((freq - 5000) / 5);
47}
48static inline
49u8 bcm43xx_freq_to_channel_bg(int freq)
50{
51 u8 channel;
52
53 if (freq == 2484)
54 channel = 14;
55 else
56 channel = (freq - 2407) / 5;
57
58 return channel;
59}
60static inline
61u8 bcm43xx_freq_to_channel(struct bcm43xx_private *bcm,
62 int freq)
63{
64 if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_A)
65 return bcm43xx_freq_to_channel_a(freq);
66 return bcm43xx_freq_to_channel_bg(freq);
67}
68
69/* Lightweight function to convert a channel number to a frequency (in Mhz). */
70static inline
71int bcm43xx_channel_to_freq_a(u8 channel)
72{
73 return (5000 + (5 * channel));
74}
75static inline
76int bcm43xx_channel_to_freq_bg(u8 channel)
77{
78 int freq;
79
80 if (channel == 14)
81 freq = 2484;
82 else
83 freq = 2407 + (5 * channel);
84
85 return freq;
86}
87static inline
88int bcm43xx_channel_to_freq(struct bcm43xx_private *bcm,
89 u8 channel)
90{
91 if (bcm43xx_current_phy(bcm)->type == BCM43xx_PHYTYPE_A)
92 return bcm43xx_channel_to_freq_a(channel);
93 return bcm43xx_channel_to_freq_bg(channel);
94}
95
96void bcm43xx_tsf_read(struct bcm43xx_private *bcm, u64 *tsf);
97void bcm43xx_tsf_write(struct bcm43xx_private *bcm, u64 tsf);
98
99void bcm43xx_set_iwmode(struct bcm43xx_private *bcm,
100 int iw_mode);
101
102u32 bcm43xx_shm_read32(struct bcm43xx_private *bcm,
103 u16 routing, u16 offset);
104u16 bcm43xx_shm_read16(struct bcm43xx_private *bcm,
105 u16 routing, u16 offset);
106void bcm43xx_shm_write32(struct bcm43xx_private *bcm,
107 u16 routing, u16 offset,
108 u32 value);
109void bcm43xx_shm_write16(struct bcm43xx_private *bcm,
110 u16 routing, u16 offset,
111 u16 value);
112
113void bcm43xx_dummy_transmission(struct bcm43xx_private *bcm);
114
115int bcm43xx_switch_core(struct bcm43xx_private *bcm, struct bcm43xx_coreinfo *new_core);
116
117int bcm43xx_select_wireless_core(struct bcm43xx_private *bcm,
118 int phytype);
119
120void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy);
121
122void bcm43xx_mac_suspend(struct bcm43xx_private *bcm);
123void bcm43xx_mac_enable(struct bcm43xx_private *bcm);
124
125void bcm43xx_cancel_work(struct bcm43xx_private *bcm);
126void bcm43xx_periodic_tasks_setup(struct bcm43xx_private *bcm);
127
128void bcm43xx_controller_restart(struct bcm43xx_private *bcm, const char *reason);
129
130int bcm43xx_sprom_read(struct bcm43xx_private *bcm, u16 *sprom);
131int bcm43xx_sprom_write(struct bcm43xx_private *bcm, const u16 *sprom);
132
133#endif /* BCM43xx_MAIN_H_ */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
deleted file mode 100644
index af3de3343650..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
+++ /dev/null
@@ -1,2346 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
6 Stefano Brivio <st3@riseup.net>
7 Michael Buesch <mbuesch@freenet.de>
8 Danny van Dyk <kugelfang@gentoo.org>
9 Andreas Jaggi <andreas.jaggi@waterwave.ch>
10
11 Some parts of the code in this file are derived from the ipw2200
12 driver Copyright(c) 2003 - 2004 Intel Corporation.
13
14 This program is free software; you can redistribute it and/or modify
15 it under the terms of the GNU General Public License as published by
16 the Free Software Foundation; either version 2 of the License, or
17 (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 GNU General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; see the file COPYING. If not, write to
26 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
27 Boston, MA 02110-1301, USA.
28
29*/
30
31#include <linux/delay.h>
32#include <linux/pci.h>
33#include <linux/types.h>
34
35#include "bcm43xx.h"
36#include "bcm43xx_phy.h"
37#include "bcm43xx_main.h"
38#include "bcm43xx_radio.h"
39#include "bcm43xx_ilt.h"
40#include "bcm43xx_power.h"
41
42
43static const s8 bcm43xx_tssi2dbm_b_table[] = {
44 0x4D, 0x4C, 0x4B, 0x4A,
45 0x4A, 0x49, 0x48, 0x47,
46 0x47, 0x46, 0x45, 0x45,
47 0x44, 0x43, 0x42, 0x42,
48 0x41, 0x40, 0x3F, 0x3E,
49 0x3D, 0x3C, 0x3B, 0x3A,
50 0x39, 0x38, 0x37, 0x36,
51 0x35, 0x34, 0x32, 0x31,
52 0x30, 0x2F, 0x2D, 0x2C,
53 0x2B, 0x29, 0x28, 0x26,
54 0x25, 0x23, 0x21, 0x1F,
55 0x1D, 0x1A, 0x17, 0x14,
56 0x10, 0x0C, 0x06, 0x00,
57 -7, -7, -7, -7,
58 -7, -7, -7, -7,
59 -7, -7, -7, -7,
60};
61
62static const s8 bcm43xx_tssi2dbm_g_table[] = {
63 77, 77, 77, 76,
64 76, 76, 75, 75,
65 74, 74, 73, 73,
66 73, 72, 72, 71,
67 71, 70, 70, 69,
68 68, 68, 67, 67,
69 66, 65, 65, 64,
70 63, 63, 62, 61,
71 60, 59, 58, 57,
72 56, 55, 54, 53,
73 52, 50, 49, 47,
74 45, 43, 40, 37,
75 33, 28, 22, 14,
76 5, -7, -20, -20,
77 -20, -20, -20, -20,
78 -20, -20, -20, -20,
79};
80
81static void bcm43xx_phy_initg(struct bcm43xx_private *bcm);
82
83
84static inline
85void bcm43xx_voluntary_preempt(void)
86{
87 assert(!in_atomic() && !in_irq() &&
88 !in_interrupt() && !irqs_disabled());
89#ifndef CONFIG_PREEMPT
90 cond_resched();
91#endif /* CONFIG_PREEMPT */
92}
93
94void bcm43xx_raw_phy_lock(struct bcm43xx_private *bcm)
95{
96 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
97
98 assert(irqs_disabled());
99 if (bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD) == 0x00000000) {
100 phy->is_locked = 0;
101 return;
102 }
103 if (bcm->current_core->rev < 3) {
104 bcm43xx_mac_suspend(bcm);
105 spin_lock(&phy->lock);
106 } else {
107 if (bcm->ieee->iw_mode != IW_MODE_MASTER)
108 bcm43xx_power_saving_ctl_bits(bcm, -1, 1);
109 }
110 phy->is_locked = 1;
111}
112
113void bcm43xx_raw_phy_unlock(struct bcm43xx_private *bcm)
114{
115 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
116
117 assert(irqs_disabled());
118 if (bcm->current_core->rev < 3) {
119 if (phy->is_locked) {
120 spin_unlock(&phy->lock);
121 bcm43xx_mac_enable(bcm);
122 }
123 } else {
124 if (bcm->ieee->iw_mode != IW_MODE_MASTER)
125 bcm43xx_power_saving_ctl_bits(bcm, -1, -1);
126 }
127 phy->is_locked = 0;
128}
129
130u16 bcm43xx_phy_read(struct bcm43xx_private *bcm, u16 offset)
131{
132 bcm43xx_write16(bcm, BCM43xx_MMIO_PHY_CONTROL, offset);
133 return bcm43xx_read16(bcm, BCM43xx_MMIO_PHY_DATA);
134}
135
136void bcm43xx_phy_write(struct bcm43xx_private *bcm, u16 offset, u16 val)
137{
138 bcm43xx_write16(bcm, BCM43xx_MMIO_PHY_CONTROL, offset);
139 mmiowb();
140 bcm43xx_write16(bcm, BCM43xx_MMIO_PHY_DATA, val);
141}
142
143void bcm43xx_phy_calibrate(struct bcm43xx_private *bcm)
144{
145 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
146
147 bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD); /* Dummy read. */
148 if (phy->calibrated)
149 return;
150 if (phy->type == BCM43xx_PHYTYPE_G && phy->rev == 1) {
151 bcm43xx_wireless_core_reset(bcm, 0);
152 bcm43xx_phy_initg(bcm);
153 bcm43xx_wireless_core_reset(bcm, 1);
154 }
155 phy->calibrated = 1;
156}
157
158/* Connect the PHY
159 * http://bcm-specs.sipsolutions.net/SetPHY
160 */
161int bcm43xx_phy_connect(struct bcm43xx_private *bcm, int connect)
162{
163 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
164 u32 flags;
165
166 if (bcm->current_core->rev < 5)
167 goto out;
168
169 flags = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATEHIGH);
170 if (connect) {
171 if (!(flags & BCM43xx_SBTMSTATEHIGH_G_PHY_AVAIL))
172 return -ENODEV;
173 flags = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
174 flags |= BCM43xx_SBTMSTATELOW_G_MODE_ENABLE;
175 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, flags);
176 } else {
177 if (!(flags & BCM43xx_SBTMSTATEHIGH_A_PHY_AVAIL))
178 return -ENODEV;
179 flags = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
180 flags &= ~BCM43xx_SBTMSTATELOW_G_MODE_ENABLE;
181 bcm43xx_write32(bcm, BCM43xx_CIR_SBTMSTATELOW, flags);
182 }
183out:
184 phy->connected = connect;
185 if (connect)
186 dprintk(KERN_INFO PFX "PHY connected\n");
187 else
188 dprintk(KERN_INFO PFX "PHY disconnected\n");
189
190 return 0;
191}
192
193/* intialize B PHY power control
194 * as described in http://bcm-specs.sipsolutions.net/InitPowerControl
195 */
196static void bcm43xx_phy_init_pctl(struct bcm43xx_private *bcm)
197{
198 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
199 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
200 u16 saved_batt = 0, saved_ratt = 0, saved_txctl1 = 0;
201 int must_reset_txpower = 0;
202
203 assert(phy->type != BCM43xx_PHYTYPE_A);
204 if ((bcm->board_vendor == PCI_VENDOR_ID_BROADCOM) &&
205 (bcm->board_type == 0x0416))
206 return;
207
208 bcm43xx_phy_write(bcm, 0x0028, 0x8018);
209 bcm43xx_write16(bcm, 0x03E6, bcm43xx_read16(bcm, 0x03E6) & 0xFFDF);
210
211 if (phy->type == BCM43xx_PHYTYPE_G) {
212 if (!phy->connected)
213 return;
214 bcm43xx_phy_write(bcm, 0x047A, 0xC111);
215 }
216 if (phy->savedpctlreg != 0xFFFF)
217 return;
218
219 if (phy->type == BCM43xx_PHYTYPE_B &&
220 phy->rev >= 2 &&
221 radio->version == 0x2050) {
222 bcm43xx_radio_write16(bcm, 0x0076,
223 bcm43xx_radio_read16(bcm, 0x0076) | 0x0084);
224 } else {
225 saved_batt = radio->baseband_atten;
226 saved_ratt = radio->radio_atten;
227 saved_txctl1 = radio->txctl1;
228 if ((radio->revision >= 6) && (radio->revision <= 8)
229 && /*FIXME: incomplete specs for 5 < revision < 9 */ 0)
230 bcm43xx_radio_set_txpower_bg(bcm, 0xB, 0x1F, 0);
231 else
232 bcm43xx_radio_set_txpower_bg(bcm, 0xB, 9, 0);
233 must_reset_txpower = 1;
234 }
235 bcm43xx_dummy_transmission(bcm);
236
237 phy->savedpctlreg = bcm43xx_phy_read(bcm, BCM43xx_PHY_G_PCTL);
238
239 if (must_reset_txpower)
240 bcm43xx_radio_set_txpower_bg(bcm, saved_batt, saved_ratt, saved_txctl1);
241 else
242 bcm43xx_radio_write16(bcm, 0x0076, bcm43xx_radio_read16(bcm, 0x0076) & 0xFF7B);
243 bcm43xx_radio_clear_tssi(bcm);
244}
245
246static void bcm43xx_phy_agcsetup(struct bcm43xx_private *bcm)
247{
248 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
249 u16 offset = 0x0000;
250
251 if (phy->rev == 1)
252 offset = 0x4C00;
253
254 bcm43xx_ilt_write(bcm, offset, 0x00FE);
255 bcm43xx_ilt_write(bcm, offset + 1, 0x000D);
256 bcm43xx_ilt_write(bcm, offset + 2, 0x0013);
257 bcm43xx_ilt_write(bcm, offset + 3, 0x0019);
258
259 if (phy->rev == 1) {
260 bcm43xx_ilt_write(bcm, 0x1800, 0x2710);
261 bcm43xx_ilt_write(bcm, 0x1801, 0x9B83);
262 bcm43xx_ilt_write(bcm, 0x1802, 0x9B83);
263 bcm43xx_ilt_write(bcm, 0x1803, 0x0F8D);
264 bcm43xx_phy_write(bcm, 0x0455, 0x0004);
265 }
266
267 bcm43xx_phy_write(bcm, 0x04A5, (bcm43xx_phy_read(bcm, 0x04A5) & 0x00FF) | 0x5700);
268 bcm43xx_phy_write(bcm, 0x041A, (bcm43xx_phy_read(bcm, 0x041A) & 0xFF80) | 0x000F);
269 bcm43xx_phy_write(bcm, 0x041A, (bcm43xx_phy_read(bcm, 0x041A) & 0xC07F) | 0x2B80);
270 bcm43xx_phy_write(bcm, 0x048C, (bcm43xx_phy_read(bcm, 0x048C) & 0xF0FF) | 0x0300);
271
272 bcm43xx_radio_write16(bcm, 0x007A, bcm43xx_radio_read16(bcm, 0x007A) | 0x0008);
273
274 bcm43xx_phy_write(bcm, 0x04A0, (bcm43xx_phy_read(bcm, 0x04A0) & 0xFFF0) | 0x0008);
275 bcm43xx_phy_write(bcm, 0x04A1, (bcm43xx_phy_read(bcm, 0x04A1) & 0xF0FF) | 0x0600);
276 bcm43xx_phy_write(bcm, 0x04A2, (bcm43xx_phy_read(bcm, 0x04A2) & 0xF0FF) | 0x0700);
277 bcm43xx_phy_write(bcm, 0x04A0, (bcm43xx_phy_read(bcm, 0x04A0) & 0xF0FF) | 0x0100);
278
279 if (phy->rev == 1)
280 bcm43xx_phy_write(bcm, 0x04A2, (bcm43xx_phy_read(bcm, 0x04A2) & 0xFFF0) | 0x0007);
281
282 bcm43xx_phy_write(bcm, 0x0488, (bcm43xx_phy_read(bcm, 0x0488) & 0xFF00) | 0x001C);
283 bcm43xx_phy_write(bcm, 0x0488, (bcm43xx_phy_read(bcm, 0x0488) & 0xC0FF) | 0x0200);
284 bcm43xx_phy_write(bcm, 0x0496, (bcm43xx_phy_read(bcm, 0x0496) & 0xFF00) | 0x001C);
285 bcm43xx_phy_write(bcm, 0x0489, (bcm43xx_phy_read(bcm, 0x0489) & 0xFF00) | 0x0020);
286 bcm43xx_phy_write(bcm, 0x0489, (bcm43xx_phy_read(bcm, 0x0489) & 0xC0FF) | 0x0200);
287 bcm43xx_phy_write(bcm, 0x0482, (bcm43xx_phy_read(bcm, 0x0482) & 0xFF00) | 0x002E);
288 bcm43xx_phy_write(bcm, 0x0496, (bcm43xx_phy_read(bcm, 0x0496) & 0x00FF) | 0x1A00);
289 bcm43xx_phy_write(bcm, 0x0481, (bcm43xx_phy_read(bcm, 0x0481) & 0xFF00) | 0x0028);
290 bcm43xx_phy_write(bcm, 0x0481, (bcm43xx_phy_read(bcm, 0x0481) & 0x00FF) | 0x2C00);
291
292 if (phy->rev == 1) {
293 bcm43xx_phy_write(bcm, 0x0430, 0x092B);
294 bcm43xx_phy_write(bcm, 0x041B, (bcm43xx_phy_read(bcm, 0x041B) & 0xFFE1) | 0x0002);
295 } else {
296 bcm43xx_phy_write(bcm, 0x041B, bcm43xx_phy_read(bcm, 0x041B) & 0xFFE1);
297 bcm43xx_phy_write(bcm, 0x041F, 0x287A);
298 bcm43xx_phy_write(bcm, 0x0420, (bcm43xx_phy_read(bcm, 0x0420) & 0xFFF0) | 0x0004);
299 }
300
301 if (phy->rev > 2) {
302 bcm43xx_phy_write(bcm, 0x0422, 0x287A);
303 bcm43xx_phy_write(bcm, 0x0420, (bcm43xx_phy_read(bcm, 0x0420)
304 & 0x0FFF) | 0x3000);
305 }
306
307 bcm43xx_phy_write(bcm, 0x04A8, (bcm43xx_phy_read(bcm, 0x04A8) & 0x8080)
308 | 0x7874);
309 bcm43xx_phy_write(bcm, 0x048E, 0x1C00);
310
311 if (phy->rev == 1) {
312 bcm43xx_phy_write(bcm, 0x04AB, (bcm43xx_phy_read(bcm, 0x04AB)
313 & 0xF0FF) | 0x0600);
314 bcm43xx_phy_write(bcm, 0x048B, 0x005E);
315 bcm43xx_phy_write(bcm, 0x048C, (bcm43xx_phy_read(bcm, 0x048C)
316 & 0xFF00) | 0x001E);
317 bcm43xx_phy_write(bcm, 0x048D, 0x0002);
318 }
319
320 bcm43xx_ilt_write(bcm, offset + 0x0800, 0);
321 bcm43xx_ilt_write(bcm, offset + 0x0801, 7);
322 bcm43xx_ilt_write(bcm, offset + 0x0802, 16);
323 bcm43xx_ilt_write(bcm, offset + 0x0803, 28);
324
325 if (phy->rev >= 6) {
326 bcm43xx_phy_write(bcm, 0x0426, (bcm43xx_phy_read(bcm, 0x0426)
327 & 0xFFFC));
328 bcm43xx_phy_write(bcm, 0x0426, (bcm43xx_phy_read(bcm, 0x0426)
329 & 0xEFFF));
330 }
331}
332
333static void bcm43xx_phy_setupg(struct bcm43xx_private *bcm)
334{
335 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
336 u16 i;
337
338 assert(phy->type == BCM43xx_PHYTYPE_G);
339 if (phy->rev == 1) {
340 bcm43xx_phy_write(bcm, 0x0406, 0x4F19);
341 bcm43xx_phy_write(bcm, BCM43xx_PHY_G_CRS,
342 (bcm43xx_phy_read(bcm, BCM43xx_PHY_G_CRS)
343 & 0xFC3F) | 0x0340);
344 bcm43xx_phy_write(bcm, 0x042C, 0x005A);
345 bcm43xx_phy_write(bcm, 0x0427, 0x001A);
346
347 for (i = 0; i < BCM43xx_ILT_FINEFREQG_SIZE; i++)
348 bcm43xx_ilt_write(bcm, 0x5800 + i, bcm43xx_ilt_finefreqg[i]);
349 for (i = 0; i < BCM43xx_ILT_NOISEG1_SIZE; i++)
350 bcm43xx_ilt_write(bcm, 0x1800 + i, bcm43xx_ilt_noiseg1[i]);
351 for (i = 0; i < BCM43xx_ILT_ROTOR_SIZE; i++)
352 bcm43xx_ilt_write32(bcm, 0x2000 + i, bcm43xx_ilt_rotor[i]);
353 } else {
354 /* nrssi values are signed 6-bit values. Not sure why we write 0x7654 here... */
355 bcm43xx_nrssi_hw_write(bcm, 0xBA98, (s16)0x7654);
356
357 if (phy->rev == 2) {
358 bcm43xx_phy_write(bcm, 0x04C0, 0x1861);
359 bcm43xx_phy_write(bcm, 0x04C1, 0x0271);
360 } else if (phy->rev > 2) {
361 bcm43xx_phy_write(bcm, 0x04C0, 0x0098);
362 bcm43xx_phy_write(bcm, 0x04C1, 0x0070);
363 bcm43xx_phy_write(bcm, 0x04C9, 0x0080);
364 }
365 bcm43xx_phy_write(bcm, 0x042B, bcm43xx_phy_read(bcm, 0x042B) | 0x800);
366
367 for (i = 0; i < 64; i++)
368 bcm43xx_ilt_write(bcm, 0x4000 + i, i);
369 for (i = 0; i < BCM43xx_ILT_NOISEG2_SIZE; i++)
370 bcm43xx_ilt_write(bcm, 0x1800 + i, bcm43xx_ilt_noiseg2[i]);
371 }
372
373 if (phy->rev <= 2)
374 for (i = 0; i < BCM43xx_ILT_NOISESCALEG_SIZE; i++)
375 bcm43xx_ilt_write(bcm, 0x1400 + i, bcm43xx_ilt_noisescaleg1[i]);
376 else if ((phy->rev >= 7) && (bcm43xx_phy_read(bcm, 0x0449) & 0x0200))
377 for (i = 0; i < BCM43xx_ILT_NOISESCALEG_SIZE; i++)
378 bcm43xx_ilt_write(bcm, 0x1400 + i, bcm43xx_ilt_noisescaleg3[i]);
379 else
380 for (i = 0; i < BCM43xx_ILT_NOISESCALEG_SIZE; i++)
381 bcm43xx_ilt_write(bcm, 0x1400 + i, bcm43xx_ilt_noisescaleg2[i]);
382
383 if (phy->rev == 2)
384 for (i = 0; i < BCM43xx_ILT_SIGMASQR_SIZE; i++)
385 bcm43xx_ilt_write(bcm, 0x5000 + i, bcm43xx_ilt_sigmasqr1[i]);
386 else if ((phy->rev > 2) && (phy->rev <= 8))
387 for (i = 0; i < BCM43xx_ILT_SIGMASQR_SIZE; i++)
388 bcm43xx_ilt_write(bcm, 0x5000 + i, bcm43xx_ilt_sigmasqr2[i]);
389
390 if (phy->rev == 1) {
391 for (i = 0; i < BCM43xx_ILT_RETARD_SIZE; i++)
392 bcm43xx_ilt_write32(bcm, 0x2400 + i, bcm43xx_ilt_retard[i]);
393 for (i = 0; i < 4; i++) {
394 bcm43xx_ilt_write(bcm, 0x5404 + i, 0x0020);
395 bcm43xx_ilt_write(bcm, 0x5408 + i, 0x0020);
396 bcm43xx_ilt_write(bcm, 0x540C + i, 0x0020);
397 bcm43xx_ilt_write(bcm, 0x5410 + i, 0x0020);
398 }
399 bcm43xx_phy_agcsetup(bcm);
400
401 if ((bcm->board_vendor == PCI_VENDOR_ID_BROADCOM) &&
402 (bcm->board_type == 0x0416) &&
403 (bcm->board_revision == 0x0017))
404 return;
405
406 bcm43xx_ilt_write(bcm, 0x5001, 0x0002);
407 bcm43xx_ilt_write(bcm, 0x5002, 0x0001);
408 } else {
409 for (i = 0; i <= 0x2F; i++)
410 bcm43xx_ilt_write(bcm, 0x1000 + i, 0x0820);
411 bcm43xx_phy_agcsetup(bcm);
412 bcm43xx_phy_read(bcm, 0x0400); /* dummy read */
413 bcm43xx_phy_write(bcm, 0x0403, 0x1000);
414 bcm43xx_ilt_write(bcm, 0x3C02, 0x000F);
415 bcm43xx_ilt_write(bcm, 0x3C03, 0x0014);
416
417 if ((bcm->board_vendor == PCI_VENDOR_ID_BROADCOM) &&
418 (bcm->board_type == 0x0416) &&
419 (bcm->board_revision == 0x0017))
420 return;
421
422 bcm43xx_ilt_write(bcm, 0x0401, 0x0002);
423 bcm43xx_ilt_write(bcm, 0x0402, 0x0001);
424 }
425}
426
427/* Initialize the noisescaletable for APHY */
428static void bcm43xx_phy_init_noisescaletbl(struct bcm43xx_private *bcm)
429{
430 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
431 int i;
432
433 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_A_CTRL, 0x1400);
434 for (i = 0; i < 12; i++) {
435 if (phy->rev == 2)
436 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_A_DATA1, 0x6767);
437 else
438 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_A_DATA1, 0x2323);
439 }
440 if (phy->rev == 2)
441 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_A_DATA1, 0x6700);
442 else
443 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_A_DATA1, 0x2300);
444 for (i = 0; i < 11; i++) {
445 if (phy->rev == 2)
446 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_A_DATA1, 0x6767);
447 else
448 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_A_DATA1, 0x2323);
449 }
450 if (phy->rev == 2)
451 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_A_DATA1, 0x0067);
452 else
453 bcm43xx_phy_write(bcm, BCM43xx_PHY_ILT_A_DATA1, 0x0023);
454}
455
456static void bcm43xx_phy_setupa(struct bcm43xx_private *bcm)
457{
458 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
459 u16 i;
460
461 assert(phy->type == BCM43xx_PHYTYPE_A);
462 switch (phy->rev) {
463 case 2:
464 bcm43xx_phy_write(bcm, 0x008E, 0x3800);
465 bcm43xx_phy_write(bcm, 0x0035, 0x03FF);
466 bcm43xx_phy_write(bcm, 0x0036, 0x0400);
467
468 bcm43xx_ilt_write(bcm, 0x3807, 0x0051);
469
470 bcm43xx_phy_write(bcm, 0x001C, 0x0FF9);
471 bcm43xx_phy_write(bcm, 0x0020, bcm43xx_phy_read(bcm, 0x0020) & 0xFF0F);
472 bcm43xx_ilt_write(bcm, 0x3C0C, 0x07BF);
473 bcm43xx_radio_write16(bcm, 0x0002, 0x07BF);
474
475 bcm43xx_phy_write(bcm, 0x0024, 0x4680);
476 bcm43xx_phy_write(bcm, 0x0020, 0x0003);
477 bcm43xx_phy_write(bcm, 0x001D, 0x0F40);
478 bcm43xx_phy_write(bcm, 0x001F, 0x1C00);
479
480 bcm43xx_phy_write(bcm, 0x002A, (bcm43xx_phy_read(bcm, 0x002A) & 0x00FF) | 0x0400);
481 bcm43xx_phy_write(bcm, 0x002B, bcm43xx_phy_read(bcm, 0x002B) & 0xFBFF);
482 bcm43xx_phy_write(bcm, 0x008E, 0x58C1);
483
484 bcm43xx_ilt_write(bcm, 0x0803, 0x000F);
485 bcm43xx_ilt_write(bcm, 0x0804, 0x001F);
486 bcm43xx_ilt_write(bcm, 0x0805, 0x002A);
487 bcm43xx_ilt_write(bcm, 0x0805, 0x0030);
488 bcm43xx_ilt_write(bcm, 0x0807, 0x003A);
489
490 bcm43xx_ilt_write(bcm, 0x0000, 0x0013);
491 bcm43xx_ilt_write(bcm, 0x0001, 0x0013);
492 bcm43xx_ilt_write(bcm, 0x0002, 0x0013);
493 bcm43xx_ilt_write(bcm, 0x0003, 0x0013);
494 bcm43xx_ilt_write(bcm, 0x0004, 0x0015);
495 bcm43xx_ilt_write(bcm, 0x0005, 0x0015);
496 bcm43xx_ilt_write(bcm, 0x0006, 0x0019);
497
498 bcm43xx_ilt_write(bcm, 0x0404, 0x0003);
499 bcm43xx_ilt_write(bcm, 0x0405, 0x0003);
500 bcm43xx_ilt_write(bcm, 0x0406, 0x0007);
501
502 for (i = 0; i < 16; i++)
503 bcm43xx_ilt_write(bcm, 0x4000 + i, (0x8 + i) & 0x000F);
504
505 bcm43xx_ilt_write(bcm, 0x3003, 0x1044);
506 bcm43xx_ilt_write(bcm, 0x3004, 0x7201);
507 bcm43xx_ilt_write(bcm, 0x3006, 0x0040);
508 bcm43xx_ilt_write(bcm, 0x3001, (bcm43xx_ilt_read(bcm, 0x3001) & 0x0010) | 0x0008);
509
510 for (i = 0; i < BCM43xx_ILT_FINEFREQA_SIZE; i++)
511 bcm43xx_ilt_write(bcm, 0x5800 + i, bcm43xx_ilt_finefreqa[i]);
512 for (i = 0; i < BCM43xx_ILT_NOISEA2_SIZE; i++)
513 bcm43xx_ilt_write(bcm, 0x1800 + i, bcm43xx_ilt_noisea2[i]);
514 for (i = 0; i < BCM43xx_ILT_ROTOR_SIZE; i++)
515 bcm43xx_ilt_write32(bcm, 0x2000 + i, bcm43xx_ilt_rotor[i]);
516 bcm43xx_phy_init_noisescaletbl(bcm);
517 for (i = 0; i < BCM43xx_ILT_RETARD_SIZE; i++)
518 bcm43xx_ilt_write32(bcm, 0x2400 + i, bcm43xx_ilt_retard[i]);
519 break;
520 case 3:
521 for (i = 0; i < 64; i++)
522 bcm43xx_ilt_write(bcm, 0x4000 + i, i);
523
524 bcm43xx_ilt_write(bcm, 0x3807, 0x0051);
525
526 bcm43xx_phy_write(bcm, 0x001C, 0x0FF9);
527 bcm43xx_phy_write(bcm, 0x0020, bcm43xx_phy_read(bcm, 0x0020) & 0xFF0F);
528 bcm43xx_radio_write16(bcm, 0x0002, 0x07BF);
529
530 bcm43xx_phy_write(bcm, 0x0024, 0x4680);
531 bcm43xx_phy_write(bcm, 0x0020, 0x0003);
532 bcm43xx_phy_write(bcm, 0x001D, 0x0F40);
533 bcm43xx_phy_write(bcm, 0x001F, 0x1C00);
534 bcm43xx_phy_write(bcm, 0x002A, (bcm43xx_phy_read(bcm, 0x002A) & 0x00FF) | 0x0400);
535
536 bcm43xx_ilt_write(bcm, 0x3001, (bcm43xx_ilt_read(bcm, 0x3001) & 0x0010) | 0x0008);
537 for (i = 0; i < BCM43xx_ILT_NOISEA3_SIZE; i++)
538 bcm43xx_ilt_write(bcm, 0x1800 + i, bcm43xx_ilt_noisea3[i]);
539 bcm43xx_phy_init_noisescaletbl(bcm);
540 for (i = 0; i < BCM43xx_ILT_SIGMASQR_SIZE; i++)
541 bcm43xx_ilt_write(bcm, 0x5000 + i, bcm43xx_ilt_sigmasqr1[i]);
542
543 bcm43xx_phy_write(bcm, 0x0003, 0x1808);
544
545 bcm43xx_ilt_write(bcm, 0x0803, 0x000F);
546 bcm43xx_ilt_write(bcm, 0x0804, 0x001F);
547 bcm43xx_ilt_write(bcm, 0x0805, 0x002A);
548 bcm43xx_ilt_write(bcm, 0x0805, 0x0030);
549 bcm43xx_ilt_write(bcm, 0x0807, 0x003A);
550
551 bcm43xx_ilt_write(bcm, 0x0000, 0x0013);
552 bcm43xx_ilt_write(bcm, 0x0001, 0x0013);
553 bcm43xx_ilt_write(bcm, 0x0002, 0x0013);
554 bcm43xx_ilt_write(bcm, 0x0003, 0x0013);
555 bcm43xx_ilt_write(bcm, 0x0004, 0x0015);
556 bcm43xx_ilt_write(bcm, 0x0005, 0x0015);
557 bcm43xx_ilt_write(bcm, 0x0006, 0x0019);
558
559 bcm43xx_ilt_write(bcm, 0x0404, 0x0003);
560 bcm43xx_ilt_write(bcm, 0x0405, 0x0003);
561 bcm43xx_ilt_write(bcm, 0x0406, 0x0007);
562
563 bcm43xx_ilt_write(bcm, 0x3C02, 0x000F);
564 bcm43xx_ilt_write(bcm, 0x3C03, 0x0014);
565 break;
566 default:
567 assert(0);
568 }
569}
570
571/* Initialize APHY. This is also called for the GPHY in some cases. */
572static void bcm43xx_phy_inita(struct bcm43xx_private *bcm)
573{
574 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
575 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
576 u16 tval;
577
578 if (phy->type == BCM43xx_PHYTYPE_A) {
579 bcm43xx_phy_setupa(bcm);
580 } else {
581 bcm43xx_phy_setupg(bcm);
582 if (bcm->sprom.boardflags & BCM43xx_BFL_PACTRL)
583 bcm43xx_phy_write(bcm, 0x046E, 0x03CF);
584 return;
585 }
586
587 bcm43xx_phy_write(bcm, BCM43xx_PHY_A_CRS,
588 (bcm43xx_phy_read(bcm, BCM43xx_PHY_A_CRS) & 0xF83C) | 0x0340);
589 bcm43xx_phy_write(bcm, 0x0034, 0x0001);
590
591 TODO();//TODO: RSSI AGC
592 bcm43xx_phy_write(bcm, BCM43xx_PHY_A_CRS,
593 bcm43xx_phy_read(bcm, BCM43xx_PHY_A_CRS) | (1 << 14));
594 bcm43xx_radio_init2060(bcm);
595
596 if ((bcm->board_vendor == PCI_VENDOR_ID_BROADCOM)
597 && ((bcm->board_type == 0x0416) || (bcm->board_type == 0x040A))) {
598 if (radio->lofcal == 0xFFFF) {
599 TODO();//TODO: LOF Cal
600 bcm43xx_radio_set_tx_iq(bcm);
601 } else
602 bcm43xx_radio_write16(bcm, 0x001E, radio->lofcal);
603 }
604
605 bcm43xx_phy_write(bcm, 0x007A, 0xF111);
606
607 if (phy->savedpctlreg == 0xFFFF) {
608 bcm43xx_radio_write16(bcm, 0x0019, 0x0000);
609 bcm43xx_radio_write16(bcm, 0x0017, 0x0020);
610
611 tval = bcm43xx_ilt_read(bcm, 0x3001);
612 if (phy->rev == 1) {
613 bcm43xx_ilt_write(bcm, 0x3001,
614 (bcm43xx_ilt_read(bcm, 0x3001) & 0xFF87)
615 | 0x0058);
616 } else {
617 bcm43xx_ilt_write(bcm, 0x3001,
618 (bcm43xx_ilt_read(bcm, 0x3001) & 0xFFC3)
619 | 0x002C);
620 }
621 bcm43xx_dummy_transmission(bcm);
622 phy->savedpctlreg = bcm43xx_phy_read(bcm, BCM43xx_PHY_A_PCTL);
623 bcm43xx_ilt_write(bcm, 0x3001, tval);
624
625 bcm43xx_radio_set_txpower_a(bcm, 0x0018);
626 }
627 bcm43xx_radio_clear_tssi(bcm);
628}
629
630static void bcm43xx_phy_initb2(struct bcm43xx_private *bcm)
631{
632 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
633 u16 offset, val;
634
635 bcm43xx_write16(bcm, 0x03EC, 0x3F22);
636 bcm43xx_phy_write(bcm, 0x0020, 0x301C);
637 bcm43xx_phy_write(bcm, 0x0026, 0x0000);
638 bcm43xx_phy_write(bcm, 0x0030, 0x00C6);
639 bcm43xx_phy_write(bcm, 0x0088, 0x3E00);
640 val = 0x3C3D;
641 for (offset = 0x0089; offset < 0x00A7; offset++) {
642 bcm43xx_phy_write(bcm, offset, val);
643 val -= 0x0202;
644 }
645 bcm43xx_phy_write(bcm, 0x03E4, 0x3000);
646 if (radio->channel == 0xFF)
647 bcm43xx_radio_selectchannel(bcm, BCM43xx_RADIO_DEFAULT_CHANNEL_BG, 0);
648 else
649 bcm43xx_radio_selectchannel(bcm, radio->channel, 0);
650 if (radio->version != 0x2050) {
651 bcm43xx_radio_write16(bcm, 0x0075, 0x0080);
652 bcm43xx_radio_write16(bcm, 0x0079, 0x0081);
653 }
654 bcm43xx_radio_write16(bcm, 0x0050, 0x0020);
655 bcm43xx_radio_write16(bcm, 0x0050, 0x0023);
656 if (radio->version == 0x2050) {
657 bcm43xx_radio_write16(bcm, 0x0050, 0x0020);
658 bcm43xx_radio_write16(bcm, 0x005A, 0x0070);
659 bcm43xx_radio_write16(bcm, 0x005B, 0x007B);
660 bcm43xx_radio_write16(bcm, 0x005C, 0x00B0);
661 bcm43xx_radio_write16(bcm, 0x007A, 0x000F);
662 bcm43xx_phy_write(bcm, 0x0038, 0x0677);
663 bcm43xx_radio_init2050(bcm);
664 }
665 bcm43xx_phy_write(bcm, 0x0014, 0x0080);
666 bcm43xx_phy_write(bcm, 0x0032, 0x00CA);
667 bcm43xx_phy_write(bcm, 0x0032, 0x00CC);
668 bcm43xx_phy_write(bcm, 0x0035, 0x07C2);
669 bcm43xx_phy_lo_b_measure(bcm);
670 bcm43xx_phy_write(bcm, 0x0026, 0xCC00);
671 if (radio->version != 0x2050)
672 bcm43xx_phy_write(bcm, 0x0026, 0xCE00);
673 bcm43xx_write16(bcm, BCM43xx_MMIO_CHANNEL_EXT, 0x1000);
674 bcm43xx_phy_write(bcm, 0x002A, 0x88A3);
675 if (radio->version != 0x2050)
676 bcm43xx_phy_write(bcm, 0x002A, 0x88C2);
677 bcm43xx_radio_set_txpower_bg(bcm, 0xFFFF, 0xFFFF, 0xFFFF);
678 bcm43xx_phy_init_pctl(bcm);
679}
680
681static void bcm43xx_phy_initb4(struct bcm43xx_private *bcm)
682{
683 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
684 u16 offset, val;
685
686 bcm43xx_write16(bcm, 0x03EC, 0x3F22);
687 bcm43xx_phy_write(bcm, 0x0020, 0x301C);
688 bcm43xx_phy_write(bcm, 0x0026, 0x0000);
689 bcm43xx_phy_write(bcm, 0x0030, 0x00C6);
690 bcm43xx_phy_write(bcm, 0x0088, 0x3E00);
691 val = 0x3C3D;
692 for (offset = 0x0089; offset < 0x00A7; offset++) {
693 bcm43xx_phy_write(bcm, offset, val);
694 val -= 0x0202;
695 }
696 bcm43xx_phy_write(bcm, 0x03E4, 0x3000);
697 if (radio->channel == 0xFF)
698 bcm43xx_radio_selectchannel(bcm, BCM43xx_RADIO_DEFAULT_CHANNEL_BG, 0);
699 else
700 bcm43xx_radio_selectchannel(bcm, radio->channel, 0);
701 if (radio->version != 0x2050) {
702 bcm43xx_radio_write16(bcm, 0x0075, 0x0080);
703 bcm43xx_radio_write16(bcm, 0x0079, 0x0081);
704 }
705 bcm43xx_radio_write16(bcm, 0x0050, 0x0020);
706 bcm43xx_radio_write16(bcm, 0x0050, 0x0023);
707 if (radio->version == 0x2050) {
708 bcm43xx_radio_write16(bcm, 0x0050, 0x0020);
709 bcm43xx_radio_write16(bcm, 0x005A, 0x0070);
710 bcm43xx_radio_write16(bcm, 0x005B, 0x007B);
711 bcm43xx_radio_write16(bcm, 0x005C, 0x00B0);
712 bcm43xx_radio_write16(bcm, 0x007A, 0x000F);
713 bcm43xx_phy_write(bcm, 0x0038, 0x0677);
714 bcm43xx_radio_init2050(bcm);
715 }
716 bcm43xx_phy_write(bcm, 0x0014, 0x0080);
717 bcm43xx_phy_write(bcm, 0x0032, 0x00CA);
718 if (radio->version == 0x2050)
719 bcm43xx_phy_write(bcm, 0x0032, 0x00E0);
720 bcm43xx_phy_write(bcm, 0x0035, 0x07C2);
721
722 bcm43xx_phy_lo_b_measure(bcm);
723
724 bcm43xx_phy_write(bcm, 0x0026, 0xCC00);
725 if (radio->version == 0x2050)
726 bcm43xx_phy_write(bcm, 0x0026, 0xCE00);
727 bcm43xx_write16(bcm, BCM43xx_MMIO_CHANNEL_EXT, 0x1100);
728 bcm43xx_phy_write(bcm, 0x002A, 0x88A3);
729 if (radio->version == 0x2050)
730 bcm43xx_phy_write(bcm, 0x002A, 0x88C2);
731 bcm43xx_radio_set_txpower_bg(bcm, 0xFFFF, 0xFFFF, 0xFFFF);
732 if (bcm->sprom.boardflags & BCM43xx_BFL_RSSI) {
733 bcm43xx_calc_nrssi_slope(bcm);
734 bcm43xx_calc_nrssi_threshold(bcm);
735 }
736 bcm43xx_phy_init_pctl(bcm);
737}
738
739static void bcm43xx_phy_initb5(struct bcm43xx_private *bcm)
740{
741 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
742 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
743 u16 offset;
744 u16 value;
745 u8 old_channel;
746
747 if (phy->analog == 1)
748 bcm43xx_radio_write16(bcm, 0x007A,
749 bcm43xx_radio_read16(bcm, 0x007A)
750 | 0x0050);
751 if ((bcm->board_vendor != PCI_VENDOR_ID_BROADCOM) &&
752 (bcm->board_type != 0x0416)) {
753 value = 0x2120;
754 for (offset = 0x00A8 ; offset < 0x00C7; offset++) {
755 bcm43xx_phy_write(bcm, offset, value);
756 value += 0x0202;
757 }
758 }
759 bcm43xx_phy_write(bcm, 0x0035,
760 (bcm43xx_phy_read(bcm, 0x0035) & 0xF0FF)
761 | 0x0700);
762 if (radio->version == 0x2050)
763 bcm43xx_phy_write(bcm, 0x0038, 0x0667);
764
765 if (phy->connected) {
766 if (radio->version == 0x2050) {
767 bcm43xx_radio_write16(bcm, 0x007A,
768 bcm43xx_radio_read16(bcm, 0x007A)
769 | 0x0020);
770 bcm43xx_radio_write16(bcm, 0x0051,
771 bcm43xx_radio_read16(bcm, 0x0051)
772 | 0x0004);
773 }
774 bcm43xx_write16(bcm, BCM43xx_MMIO_PHY_RADIO, 0x0000);
775
776 bcm43xx_phy_write(bcm, 0x0802, bcm43xx_phy_read(bcm, 0x0802) | 0x0100);
777 bcm43xx_phy_write(bcm, 0x042B, bcm43xx_phy_read(bcm, 0x042B) | 0x2000);
778
779 bcm43xx_phy_write(bcm, 0x001C, 0x186A);
780
781 bcm43xx_phy_write(bcm, 0x0013, (bcm43xx_phy_read(bcm, 0x0013) & 0x00FF) | 0x1900);
782 bcm43xx_phy_write(bcm, 0x0035, (bcm43xx_phy_read(bcm, 0x0035) & 0xFFC0) | 0x0064);
783 bcm43xx_phy_write(bcm, 0x005D, (bcm43xx_phy_read(bcm, 0x005D) & 0xFF80) | 0x000A);
784 }
785
786 if (bcm->bad_frames_preempt) {
787 bcm43xx_phy_write(bcm, BCM43xx_PHY_RADIO_BITFIELD,
788 bcm43xx_phy_read(bcm, BCM43xx_PHY_RADIO_BITFIELD) | (1 << 11));
789 }
790
791 if (phy->analog == 1) {
792 bcm43xx_phy_write(bcm, 0x0026, 0xCE00);
793 bcm43xx_phy_write(bcm, 0x0021, 0x3763);
794 bcm43xx_phy_write(bcm, 0x0022, 0x1BC3);
795 bcm43xx_phy_write(bcm, 0x0023, 0x06F9);
796 bcm43xx_phy_write(bcm, 0x0024, 0x037E);
797 } else
798 bcm43xx_phy_write(bcm, 0x0026, 0xCC00);
799 bcm43xx_phy_write(bcm, 0x0030, 0x00C6);
800 bcm43xx_write16(bcm, 0x03EC, 0x3F22);
801
802 if (phy->analog == 1)
803 bcm43xx_phy_write(bcm, 0x0020, 0x3E1C);
804 else
805 bcm43xx_phy_write(bcm, 0x0020, 0x301C);
806
807 if (phy->analog == 0)
808 bcm43xx_write16(bcm, 0x03E4, 0x3000);
809
810 old_channel = radio->channel;
811 /* Force to channel 7, even if not supported. */
812 bcm43xx_radio_selectchannel(bcm, 7, 0);
813
814 if (radio->version != 0x2050) {
815 bcm43xx_radio_write16(bcm, 0x0075, 0x0080);
816 bcm43xx_radio_write16(bcm, 0x0079, 0x0081);
817 }
818
819 bcm43xx_radio_write16(bcm, 0x0050, 0x0020);
820 bcm43xx_radio_write16(bcm, 0x0050, 0x0023);
821
822 if (radio->version == 0x2050) {
823 bcm43xx_radio_write16(bcm, 0x0050, 0x0020);
824 bcm43xx_radio_write16(bcm, 0x005A, 0x0070);
825 }
826
827 bcm43xx_radio_write16(bcm, 0x005B, 0x007B);
828 bcm43xx_radio_write16(bcm, 0x005C, 0x00B0);
829
830 bcm43xx_radio_write16(bcm, 0x007A, bcm43xx_radio_read16(bcm, 0x007A) | 0x0007);
831
832 bcm43xx_radio_selectchannel(bcm, old_channel, 0);
833
834 bcm43xx_phy_write(bcm, 0x0014, 0x0080);
835 bcm43xx_phy_write(bcm, 0x0032, 0x00CA);
836 bcm43xx_phy_write(bcm, 0x002A, 0x88A3);
837
838 bcm43xx_radio_set_txpower_bg(bcm, 0xFFFF, 0xFFFF, 0xFFFF);
839
840 if (radio->version == 0x2050)
841 bcm43xx_radio_write16(bcm, 0x005D, 0x000D);
842
843 bcm43xx_write16(bcm, 0x03E4, (bcm43xx_read16(bcm, 0x03E4) & 0xFFC0) | 0x0004);
844}
845
846static void bcm43xx_phy_initb6(struct bcm43xx_private *bcm)
847{
848 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
849 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
850 u16 offset, val;
851 u8 old_channel;
852
853 bcm43xx_phy_write(bcm, 0x003E, 0x817A);
854 bcm43xx_radio_write16(bcm, 0x007A,
855 (bcm43xx_radio_read16(bcm, 0x007A) | 0x0058));
856 if (radio->revision == 4 ||
857 radio->revision == 5) {
858 bcm43xx_radio_write16(bcm, 0x0051, 0x0037);
859 bcm43xx_radio_write16(bcm, 0x0052, 0x0070);
860 bcm43xx_radio_write16(bcm, 0x0053, 0x00B3);
861 bcm43xx_radio_write16(bcm, 0x0054, 0x009B);
862 bcm43xx_radio_write16(bcm, 0x005A, 0x0088);
863 bcm43xx_radio_write16(bcm, 0x005B, 0x0088);
864 bcm43xx_radio_write16(bcm, 0x005D, 0x0088);
865 bcm43xx_radio_write16(bcm, 0x005E, 0x0088);
866 bcm43xx_radio_write16(bcm, 0x007D, 0x0088);
867 bcm43xx_shm_write32(bcm, BCM43xx_SHM_SHARED,
868 BCM43xx_UCODEFLAGS_OFFSET,
869 (bcm43xx_shm_read32(bcm, BCM43xx_SHM_SHARED,
870 BCM43xx_UCODEFLAGS_OFFSET)
871 | 0x00000200));
872 }
873 if (radio->revision == 8) {
874 bcm43xx_radio_write16(bcm, 0x0051, 0x0000);
875 bcm43xx_radio_write16(bcm, 0x0052, 0x0040);
876 bcm43xx_radio_write16(bcm, 0x0053, 0x00B7);
877 bcm43xx_radio_write16(bcm, 0x0054, 0x0098);
878 bcm43xx_radio_write16(bcm, 0x005A, 0x0088);
879 bcm43xx_radio_write16(bcm, 0x005B, 0x006B);
880 bcm43xx_radio_write16(bcm, 0x005C, 0x000F);
881 if (bcm->sprom.boardflags & 0x8000) {
882 bcm43xx_radio_write16(bcm, 0x005D, 0x00FA);
883 bcm43xx_radio_write16(bcm, 0x005E, 0x00D8);
884 } else {
885 bcm43xx_radio_write16(bcm, 0x005D, 0x00F5);
886 bcm43xx_radio_write16(bcm, 0x005E, 0x00B8);
887 }
888 bcm43xx_radio_write16(bcm, 0x0073, 0x0003);
889 bcm43xx_radio_write16(bcm, 0x007D, 0x00A8);
890 bcm43xx_radio_write16(bcm, 0x007C, 0x0001);
891 bcm43xx_radio_write16(bcm, 0x007E, 0x0008);
892 }
893 val = 0x1E1F;
894 for (offset = 0x0088; offset < 0x0098; offset++) {
895 bcm43xx_phy_write(bcm, offset, val);
896 val -= 0x0202;
897 }
898 val = 0x3E3F;
899 for (offset = 0x0098; offset < 0x00A8; offset++) {
900 bcm43xx_phy_write(bcm, offset, val);
901 val -= 0x0202;
902 }
903 val = 0x2120;
904 for (offset = 0x00A8; offset < 0x00C8; offset++) {
905 bcm43xx_phy_write(bcm, offset, (val & 0x3F3F));
906 val += 0x0202;
907 }
908 if (phy->type == BCM43xx_PHYTYPE_G) {
909 bcm43xx_radio_write16(bcm, 0x007A,
910 bcm43xx_radio_read16(bcm, 0x007A) | 0x0020);
911 bcm43xx_radio_write16(bcm, 0x0051,
912 bcm43xx_radio_read16(bcm, 0x0051) | 0x0004);
913 bcm43xx_phy_write(bcm, 0x0802,
914 bcm43xx_phy_read(bcm, 0x0802) | 0x0100);
915 bcm43xx_phy_write(bcm, 0x042B,
916 bcm43xx_phy_read(bcm, 0x042B) | 0x2000);
917 bcm43xx_phy_write(bcm, 0x5B, 0x0000);
918 bcm43xx_phy_write(bcm, 0x5C, 0x0000);
919 }
920
921 old_channel = radio->channel;
922 if (old_channel >= 8)
923 bcm43xx_radio_selectchannel(bcm, 1, 0);
924 else
925 bcm43xx_radio_selectchannel(bcm, 13, 0);
926
927 bcm43xx_radio_write16(bcm, 0x0050, 0x0020);
928 bcm43xx_radio_write16(bcm, 0x0050, 0x0023);
929 udelay(40);
930 if (radio->revision < 6 || radio-> revision == 8) {
931 bcm43xx_radio_write16(bcm, 0x007C, (bcm43xx_radio_read16(bcm, 0x007C)
932 | 0x0002));
933 bcm43xx_radio_write16(bcm, 0x0050, 0x0020);
934 }
935 if (radio->revision <= 2) {
936 bcm43xx_radio_write16(bcm, 0x007C, 0x0020);
937 bcm43xx_radio_write16(bcm, 0x005A, 0x0070);
938 bcm43xx_radio_write16(bcm, 0x005B, 0x007B);
939 bcm43xx_radio_write16(bcm, 0x005C, 0x00B0);
940 }
941 bcm43xx_radio_write16(bcm, 0x007A,
942 (bcm43xx_radio_read16(bcm, 0x007A) & 0x00F8) | 0x0007);
943
944 bcm43xx_radio_selectchannel(bcm, old_channel, 0);
945
946 bcm43xx_phy_write(bcm, 0x0014, 0x0200);
947 if (radio->revision >= 6)
948 bcm43xx_phy_write(bcm, 0x002A, 0x88C2);
949 else
950 bcm43xx_phy_write(bcm, 0x002A, 0x8AC0);
951 bcm43xx_phy_write(bcm, 0x0038, 0x0668);
952 bcm43xx_radio_set_txpower_bg(bcm, 0xFFFF, 0xFFFF, 0xFFFF);
953 if (radio->revision <= 5)
954 bcm43xx_phy_write(bcm, 0x005D, (bcm43xx_phy_read(bcm, 0x005D)
955 & 0xFF80) | 0x0003);
956 if (radio->revision <= 2)
957 bcm43xx_radio_write16(bcm, 0x005D, 0x000D);
958
959 if (phy->analog == 4){
960 bcm43xx_write16(bcm, 0x03E4, 0x0009);
961 bcm43xx_phy_write(bcm, 0x61, bcm43xx_phy_read(bcm, 0x61) & 0xFFF);
962 } else {
963 bcm43xx_phy_write(bcm, 0x0002, (bcm43xx_phy_read(bcm, 0x0002) & 0xFFC0) | 0x0004);
964 }
965 if (phy->type == BCM43xx_PHYTYPE_G)
966 bcm43xx_write16(bcm, 0x03E6, 0x0);
967 if (phy->type == BCM43xx_PHYTYPE_B) {
968 bcm43xx_write16(bcm, 0x03E6, 0x8140);
969 bcm43xx_phy_write(bcm, 0x0016, 0x0410);
970 bcm43xx_phy_write(bcm, 0x0017, 0x0820);
971 bcm43xx_phy_write(bcm, 0x0062, 0x0007);
972 bcm43xx_radio_init2050(bcm);
973 bcm43xx_phy_lo_g_measure(bcm);
974 if (bcm->sprom.boardflags & BCM43xx_BFL_RSSI) {
975 bcm43xx_calc_nrssi_slope(bcm);
976 bcm43xx_calc_nrssi_threshold(bcm);
977 }
978 bcm43xx_phy_init_pctl(bcm);
979 }
980}
981
982static void bcm43xx_calc_loopback_gain(struct bcm43xx_private *bcm)
983{
984 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
985 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
986 u16 backup_phy[15] = {0};
987 u16 backup_radio[3];
988 u16 backup_bband;
989 u16 i;
990 u16 loop1_cnt, loop1_done, loop1_omitted;
991 u16 loop2_done;
992
993 backup_phy[0] = bcm43xx_phy_read(bcm, 0x0429);
994 backup_phy[1] = bcm43xx_phy_read(bcm, 0x0001);
995 backup_phy[2] = bcm43xx_phy_read(bcm, 0x0811);
996 backup_phy[3] = bcm43xx_phy_read(bcm, 0x0812);
997 if (phy->rev != 1) {
998 backup_phy[4] = bcm43xx_phy_read(bcm, 0x0814);
999 backup_phy[5] = bcm43xx_phy_read(bcm, 0x0815);
1000 }
1001 backup_phy[6] = bcm43xx_phy_read(bcm, 0x005A);
1002 backup_phy[7] = bcm43xx_phy_read(bcm, 0x0059);
1003 backup_phy[8] = bcm43xx_phy_read(bcm, 0x0058);
1004 backup_phy[9] = bcm43xx_phy_read(bcm, 0x000A);
1005 backup_phy[10] = bcm43xx_phy_read(bcm, 0x0003);
1006 backup_phy[11] = bcm43xx_phy_read(bcm, 0x080F);
1007 backup_phy[12] = bcm43xx_phy_read(bcm, 0x0810);
1008 backup_phy[13] = bcm43xx_phy_read(bcm, 0x002B);
1009 backup_phy[14] = bcm43xx_phy_read(bcm, 0x0015);
1010 bcm43xx_phy_read(bcm, 0x002D); /* dummy read */
1011 backup_bband = radio->baseband_atten;
1012 backup_radio[0] = bcm43xx_radio_read16(bcm, 0x0052);
1013 backup_radio[1] = bcm43xx_radio_read16(bcm, 0x0043);
1014 backup_radio[2] = bcm43xx_radio_read16(bcm, 0x007A);
1015
1016 bcm43xx_phy_write(bcm, 0x0429,
1017 bcm43xx_phy_read(bcm, 0x0429) & 0x3FFF);
1018 bcm43xx_phy_write(bcm, 0x0001,
1019 bcm43xx_phy_read(bcm, 0x0001) & 0x8000);
1020 bcm43xx_phy_write(bcm, 0x0811,
1021 bcm43xx_phy_read(bcm, 0x0811) | 0x0002);
1022 bcm43xx_phy_write(bcm, 0x0812,
1023 bcm43xx_phy_read(bcm, 0x0812) & 0xFFFD);
1024 bcm43xx_phy_write(bcm, 0x0811,
1025 bcm43xx_phy_read(bcm, 0x0811) | 0x0001);
1026 bcm43xx_phy_write(bcm, 0x0812,
1027 bcm43xx_phy_read(bcm, 0x0812) & 0xFFFE);
1028 if (phy->rev != 1) {
1029 bcm43xx_phy_write(bcm, 0x0814,
1030 bcm43xx_phy_read(bcm, 0x0814) | 0x0001);
1031 bcm43xx_phy_write(bcm, 0x0815,
1032 bcm43xx_phy_read(bcm, 0x0815) & 0xFFFE);
1033 bcm43xx_phy_write(bcm, 0x0814,
1034 bcm43xx_phy_read(bcm, 0x0814) | 0x0002);
1035 bcm43xx_phy_write(bcm, 0x0815,
1036 bcm43xx_phy_read(bcm, 0x0815) & 0xFFFD);
1037 }
1038 bcm43xx_phy_write(bcm, 0x0811,
1039 bcm43xx_phy_read(bcm, 0x0811) | 0x000C);
1040 bcm43xx_phy_write(bcm, 0x0812,
1041 bcm43xx_phy_read(bcm, 0x0812) | 0x000C);
1042
1043 bcm43xx_phy_write(bcm, 0x0811,
1044 (bcm43xx_phy_read(bcm, 0x0811)
1045 & 0xFFCF) | 0x0030);
1046 bcm43xx_phy_write(bcm, 0x0812,
1047 (bcm43xx_phy_read(bcm, 0x0812)
1048 & 0xFFCF) | 0x0010);
1049
1050 bcm43xx_phy_write(bcm, 0x005A, 0x0780);
1051 bcm43xx_phy_write(bcm, 0x0059, 0xC810);
1052 bcm43xx_phy_write(bcm, 0x0058, 0x000D);
1053 if (phy->analog == 0) {
1054 bcm43xx_phy_write(bcm, 0x0003, 0x0122);
1055 } else {
1056 bcm43xx_phy_write(bcm, 0x000A,
1057 bcm43xx_phy_read(bcm, 0x000A)
1058 | 0x2000);
1059 }
1060 if (phy->rev != 1) {
1061 bcm43xx_phy_write(bcm, 0x0814,
1062 bcm43xx_phy_read(bcm, 0x0814) | 0x0004);
1063 bcm43xx_phy_write(bcm, 0x0815,
1064 bcm43xx_phy_read(bcm, 0x0815) & 0xFFFB);
1065 }
1066 bcm43xx_phy_write(bcm, 0x0003,
1067 (bcm43xx_phy_read(bcm, 0x0003)
1068 & 0xFF9F) | 0x0040);
1069 if (radio->version == 0x2050 && radio->revision == 2) {
1070 bcm43xx_radio_write16(bcm, 0x0052, 0x0000);
1071 bcm43xx_radio_write16(bcm, 0x0043,
1072 (bcm43xx_radio_read16(bcm, 0x0043)
1073 & 0xFFF0) | 0x0009);
1074 loop1_cnt = 9;
1075 } else if (radio->revision == 8) {
1076 bcm43xx_radio_write16(bcm, 0x0043, 0x000F);
1077 loop1_cnt = 15;
1078 } else
1079 loop1_cnt = 0;
1080
1081 bcm43xx_phy_set_baseband_attenuation(bcm, 11);
1082
1083 if (phy->rev >= 3)
1084 bcm43xx_phy_write(bcm, 0x080F, 0xC020);
1085 else
1086 bcm43xx_phy_write(bcm, 0x080F, 0x8020);
1087 bcm43xx_phy_write(bcm, 0x0810, 0x0000);
1088
1089 bcm43xx_phy_write(bcm, 0x002B,
1090 (bcm43xx_phy_read(bcm, 0x002B)
1091 & 0xFFC0) | 0x0001);
1092 bcm43xx_phy_write(bcm, 0x002B,
1093 (bcm43xx_phy_read(bcm, 0x002B)
1094 & 0xC0FF) | 0x0800);
1095 bcm43xx_phy_write(bcm, 0x0811,
1096 bcm43xx_phy_read(bcm, 0x0811) | 0x0100);
1097 bcm43xx_phy_write(bcm, 0x0812,
1098 bcm43xx_phy_read(bcm, 0x0812) & 0xCFFF);
1099 if (bcm->sprom.boardflags & BCM43xx_BFL_EXTLNA) {
1100 if (phy->rev >= 7) {
1101 bcm43xx_phy_write(bcm, 0x0811,
1102 bcm43xx_phy_read(bcm, 0x0811)
1103 | 0x0800);
1104 bcm43xx_phy_write(bcm, 0x0812,
1105 bcm43xx_phy_read(bcm, 0x0812)
1106 | 0x8000);
1107 }
1108 }
1109 bcm43xx_radio_write16(bcm, 0x007A,
1110 bcm43xx_radio_read16(bcm, 0x007A)
1111 & 0x00F7);
1112
1113 for (i = 0; i < loop1_cnt; i++) {
1114 bcm43xx_radio_write16(bcm, 0x0043, loop1_cnt);
1115 bcm43xx_phy_write(bcm, 0x0812,
1116 (bcm43xx_phy_read(bcm, 0x0812)
1117 & 0xF0FF) | (i << 8));
1118 bcm43xx_phy_write(bcm, 0x0015,
1119 (bcm43xx_phy_read(bcm, 0x0015)
1120 & 0x0FFF) | 0xA000);
1121 bcm43xx_phy_write(bcm, 0x0015,
1122 (bcm43xx_phy_read(bcm, 0x0015)
1123 & 0x0FFF) | 0xF000);
1124 udelay(20);
1125 if (bcm43xx_phy_read(bcm, 0x002D) >= 0x0DFC)
1126 break;
1127 }
1128 loop1_done = i;
1129 loop1_omitted = loop1_cnt - loop1_done;
1130
1131 loop2_done = 0;
1132 if (loop1_done >= 8) {
1133 bcm43xx_phy_write(bcm, 0x0812,
1134 bcm43xx_phy_read(bcm, 0x0812)
1135 | 0x0030);
1136 for (i = loop1_done - 8; i < 16; i++) {
1137 bcm43xx_phy_write(bcm, 0x0812,
1138 (bcm43xx_phy_read(bcm, 0x0812)
1139 & 0xF0FF) | (i << 8));
1140 bcm43xx_phy_write(bcm, 0x0015,
1141 (bcm43xx_phy_read(bcm, 0x0015)
1142 & 0x0FFF) | 0xA000);
1143 bcm43xx_phy_write(bcm, 0x0015,
1144 (bcm43xx_phy_read(bcm, 0x0015)
1145 & 0x0FFF) | 0xF000);
1146 udelay(20);
1147 if (bcm43xx_phy_read(bcm, 0x002D) >= 0x0DFC)
1148 break;
1149 }
1150 }
1151
1152 if (phy->rev != 1) {
1153 bcm43xx_phy_write(bcm, 0x0814, backup_phy[4]);
1154 bcm43xx_phy_write(bcm, 0x0815, backup_phy[5]);
1155 }
1156 bcm43xx_phy_write(bcm, 0x005A, backup_phy[6]);
1157 bcm43xx_phy_write(bcm, 0x0059, backup_phy[7]);
1158 bcm43xx_phy_write(bcm, 0x0058, backup_phy[8]);
1159 bcm43xx_phy_write(bcm, 0x000A, backup_phy[9]);
1160 bcm43xx_phy_write(bcm, 0x0003, backup_phy[10]);
1161 bcm43xx_phy_write(bcm, 0x080F, backup_phy[11]);
1162 bcm43xx_phy_write(bcm, 0x0810, backup_phy[12]);
1163 bcm43xx_phy_write(bcm, 0x002B, backup_phy[13]);
1164 bcm43xx_phy_write(bcm, 0x0015, backup_phy[14]);
1165
1166 bcm43xx_phy_set_baseband_attenuation(bcm, backup_bband);
1167
1168 bcm43xx_radio_write16(bcm, 0x0052, backup_radio[0]);
1169 bcm43xx_radio_write16(bcm, 0x0043, backup_radio[1]);
1170 bcm43xx_radio_write16(bcm, 0x007A, backup_radio[2]);
1171
1172 bcm43xx_phy_write(bcm, 0x0811, backup_phy[2] | 0x0003);
1173 udelay(10);
1174 bcm43xx_phy_write(bcm, 0x0811, backup_phy[2]);
1175 bcm43xx_phy_write(bcm, 0x0812, backup_phy[3]);
1176 bcm43xx_phy_write(bcm, 0x0429, backup_phy[0]);
1177 bcm43xx_phy_write(bcm, 0x0001, backup_phy[1]);
1178
1179 phy->loopback_gain[0] = ((loop1_done * 6) - (loop1_omitted * 4)) - 11;
1180 phy->loopback_gain[1] = (24 - (3 * loop2_done)) * 2;
1181}
1182
1183static void bcm43xx_phy_initg(struct bcm43xx_private *bcm)
1184{
1185 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1186 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
1187 u16 tmp;
1188
1189 if (phy->rev == 1)
1190 bcm43xx_phy_initb5(bcm);
1191 else
1192 bcm43xx_phy_initb6(bcm);
1193 if (phy->rev >= 2 || phy->connected)
1194 bcm43xx_phy_inita(bcm);
1195
1196 if (phy->rev >= 2) {
1197 bcm43xx_phy_write(bcm, 0x0814, 0x0000);
1198 bcm43xx_phy_write(bcm, 0x0815, 0x0000);
1199 }
1200 if (phy->rev == 2) {
1201 bcm43xx_phy_write(bcm, 0x0811, 0x0000);
1202 bcm43xx_phy_write(bcm, 0x0015, 0x00C0);
1203 }
1204 if (phy->rev > 5) {
1205 bcm43xx_phy_write(bcm, 0x0811, 0x0400);
1206 bcm43xx_phy_write(bcm, 0x0015, 0x00C0);
1207 }
1208 if (phy->rev >= 2 && phy->connected) {
1209 tmp = bcm43xx_phy_read(bcm, 0x0400) & 0xFF;
1210 if (tmp ==3 || tmp == 5) {
1211 bcm43xx_phy_write(bcm, 0x04C2, 0x1816);
1212 bcm43xx_phy_write(bcm, 0x04C3, 0x8006);
1213 if (tmp == 5) {
1214 bcm43xx_phy_write(bcm, 0x04CC,
1215 (bcm43xx_phy_read(bcm, 0x04CC)
1216 & 0x00FF) | 0x1F00);
1217 }
1218 }
1219 bcm43xx_phy_write(bcm, 0x047E, 0x0078);
1220 }
1221 if (radio->revision == 8) {
1222 bcm43xx_phy_write(bcm, 0x0801, bcm43xx_phy_read(bcm, 0x0801) | 0x0080);
1223 bcm43xx_phy_write(bcm, 0x043E, bcm43xx_phy_read(bcm, 0x043E) | 0x0004);
1224 }
1225 if (phy->rev >= 2 && phy->connected)
1226 bcm43xx_calc_loopback_gain(bcm);
1227 if (radio->revision != 8) {
1228 if (radio->initval == 0xFFFF)
1229 radio->initval = bcm43xx_radio_init2050(bcm);
1230 else
1231 bcm43xx_radio_write16(bcm, 0x0078, radio->initval);
1232 }
1233 if (radio->txctl2 == 0xFFFF) {
1234 bcm43xx_phy_lo_g_measure(bcm);
1235 } else {
1236 if (radio->version == 0x2050 && radio->revision == 8) {
1237 bcm43xx_radio_write16(bcm, 0x0052,
1238 (radio->txctl1 << 4) | radio->txctl2);
1239 } else {
1240 bcm43xx_radio_write16(bcm, 0x0052,
1241 (bcm43xx_radio_read16(bcm, 0x0052)
1242 & 0xFFF0) | radio->txctl1);
1243 }
1244 if (phy->rev >= 6) {
1245 bcm43xx_phy_write(bcm, 0x0036,
1246 (bcm43xx_phy_read(bcm, 0x0036)
1247 & 0x0FFF) | (radio->txctl2 << 12));
1248 }
1249 if (bcm->sprom.boardflags & BCM43xx_BFL_PACTRL)
1250 bcm43xx_phy_write(bcm, 0x002E, 0x8075);
1251 else
1252 bcm43xx_phy_write(bcm, 0x002E, 0x807F);
1253 if (phy->rev < 2)
1254 bcm43xx_phy_write(bcm, 0x002F, 0x0101);
1255 else
1256 bcm43xx_phy_write(bcm, 0x002F, 0x0202);
1257 }
1258 if (phy->connected || phy->rev >= 2) {
1259 bcm43xx_phy_lo_adjust(bcm, 0);
1260 bcm43xx_phy_write(bcm, 0x080F, 0x8078);
1261 }
1262
1263 if (!(bcm->sprom.boardflags & BCM43xx_BFL_RSSI)) {
1264 /* The specs state to update the NRSSI LT with
1265 * the value 0x7FFFFFFF here. I think that is some weird
1266 * compiler optimization in the original driver.
1267 * Essentially, what we do here is resetting all NRSSI LT
1268 * entries to -32 (see the limit_value() in nrssi_hw_update())
1269 */
1270 bcm43xx_nrssi_hw_update(bcm, 0xFFFF);
1271 bcm43xx_calc_nrssi_threshold(bcm);
1272 } else if (phy->connected || phy->rev >= 2) {
1273 if (radio->nrssi[0] == -1000) {
1274 assert(radio->nrssi[1] == -1000);
1275 bcm43xx_calc_nrssi_slope(bcm);
1276 } else {
1277 assert(radio->nrssi[1] != -1000);
1278 bcm43xx_calc_nrssi_threshold(bcm);
1279 }
1280 }
1281 if (radio->revision == 8)
1282 bcm43xx_phy_write(bcm, 0x0805, 0x3230);
1283 bcm43xx_phy_init_pctl(bcm);
1284 if (bcm->chip_id == 0x4306 && bcm->chip_package == 2) {
1285 bcm43xx_phy_write(bcm, 0x0429,
1286 bcm43xx_phy_read(bcm, 0x0429) & 0xBFFF);
1287 bcm43xx_phy_write(bcm, 0x04C3,
1288 bcm43xx_phy_read(bcm, 0x04C3) & 0x7FFF);
1289 }
1290}
1291
1292static u16 bcm43xx_phy_lo_b_r15_loop(struct bcm43xx_private *bcm)
1293{
1294 int i;
1295 u16 ret = 0;
1296 unsigned long flags;
1297
1298 local_irq_save(flags);
1299 for (i = 0; i < 10; i++){
1300 bcm43xx_phy_write(bcm, 0x0015, 0xAFA0);
1301 udelay(1);
1302 bcm43xx_phy_write(bcm, 0x0015, 0xEFA0);
1303 udelay(10);
1304 bcm43xx_phy_write(bcm, 0x0015, 0xFFA0);
1305 udelay(40);
1306 ret += bcm43xx_phy_read(bcm, 0x002C);
1307 }
1308 local_irq_restore(flags);
1309 bcm43xx_voluntary_preempt();
1310
1311 return ret;
1312}
1313
1314void bcm43xx_phy_lo_b_measure(struct bcm43xx_private *bcm)
1315{
1316 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
1317 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1318 u16 regstack[12] = { 0 };
1319 u16 mls;
1320 u16 fval;
1321 int i, j;
1322
1323 regstack[0] = bcm43xx_phy_read(bcm, 0x0015);
1324 regstack[1] = bcm43xx_radio_read16(bcm, 0x0052) & 0xFFF0;
1325
1326 if (radio->version == 0x2053) {
1327 regstack[2] = bcm43xx_phy_read(bcm, 0x000A);
1328 regstack[3] = bcm43xx_phy_read(bcm, 0x002A);
1329 regstack[4] = bcm43xx_phy_read(bcm, 0x0035);
1330 regstack[5] = bcm43xx_phy_read(bcm, 0x0003);
1331 regstack[6] = bcm43xx_phy_read(bcm, 0x0001);
1332 regstack[7] = bcm43xx_phy_read(bcm, 0x0030);
1333
1334 regstack[8] = bcm43xx_radio_read16(bcm, 0x0043);
1335 regstack[9] = bcm43xx_radio_read16(bcm, 0x007A);
1336 regstack[10] = bcm43xx_read16(bcm, 0x03EC);
1337 regstack[11] = bcm43xx_radio_read16(bcm, 0x0052) & 0x00F0;
1338
1339 bcm43xx_phy_write(bcm, 0x0030, 0x00FF);
1340 bcm43xx_write16(bcm, 0x03EC, 0x3F3F);
1341 bcm43xx_phy_write(bcm, 0x0035, regstack[4] & 0xFF7F);
1342 bcm43xx_radio_write16(bcm, 0x007A, regstack[9] & 0xFFF0);
1343 }
1344 bcm43xx_phy_write(bcm, 0x0015, 0xB000);
1345 bcm43xx_phy_write(bcm, 0x002B, 0x0004);
1346
1347 if (radio->version == 0x2053) {
1348 bcm43xx_phy_write(bcm, 0x002B, 0x0203);
1349 bcm43xx_phy_write(bcm, 0x002A, 0x08A3);
1350 }
1351
1352 phy->minlowsig[0] = 0xFFFF;
1353
1354 for (i = 0; i < 4; i++) {
1355 bcm43xx_radio_write16(bcm, 0x0052, regstack[1] | i);
1356 bcm43xx_phy_lo_b_r15_loop(bcm);
1357 }
1358 for (i = 0; i < 10; i++) {
1359 bcm43xx_radio_write16(bcm, 0x0052, regstack[1] | i);
1360 mls = bcm43xx_phy_lo_b_r15_loop(bcm) / 10;
1361 if (mls < phy->minlowsig[0]) {
1362 phy->minlowsig[0] = mls;
1363 phy->minlowsigpos[0] = i;
1364 }
1365 }
1366 bcm43xx_radio_write16(bcm, 0x0052, regstack[1] | phy->minlowsigpos[0]);
1367
1368 phy->minlowsig[1] = 0xFFFF;
1369
1370 for (i = -4; i < 5; i += 2) {
1371 for (j = -4; j < 5; j += 2) {
1372 if (j < 0)
1373 fval = (0x0100 * i) + j + 0x0100;
1374 else
1375 fval = (0x0100 * i) + j;
1376 bcm43xx_phy_write(bcm, 0x002F, fval);
1377 mls = bcm43xx_phy_lo_b_r15_loop(bcm) / 10;
1378 if (mls < phy->minlowsig[1]) {
1379 phy->minlowsig[1] = mls;
1380 phy->minlowsigpos[1] = fval;
1381 }
1382 }
1383 }
1384 phy->minlowsigpos[1] += 0x0101;
1385
1386 bcm43xx_phy_write(bcm, 0x002F, phy->minlowsigpos[1]);
1387 if (radio->version == 0x2053) {
1388 bcm43xx_phy_write(bcm, 0x000A, regstack[2]);
1389 bcm43xx_phy_write(bcm, 0x002A, regstack[3]);
1390 bcm43xx_phy_write(bcm, 0x0035, regstack[4]);
1391 bcm43xx_phy_write(bcm, 0x0003, regstack[5]);
1392 bcm43xx_phy_write(bcm, 0x0001, regstack[6]);
1393 bcm43xx_phy_write(bcm, 0x0030, regstack[7]);
1394
1395 bcm43xx_radio_write16(bcm, 0x0043, regstack[8]);
1396 bcm43xx_radio_write16(bcm, 0x007A, regstack[9]);
1397
1398 bcm43xx_radio_write16(bcm, 0x0052,
1399 (bcm43xx_radio_read16(bcm, 0x0052) & 0x000F)
1400 | regstack[11]);
1401
1402 bcm43xx_write16(bcm, 0x03EC, regstack[10]);
1403 }
1404 bcm43xx_phy_write(bcm, 0x0015, regstack[0]);
1405}
1406
1407static inline
1408u16 bcm43xx_phy_lo_g_deviation_subval(struct bcm43xx_private *bcm, u16 control)
1409{
1410 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1411 u16 ret;
1412 unsigned long flags;
1413
1414 local_irq_save(flags);
1415 if (phy->connected) {
1416 bcm43xx_phy_write(bcm, 0x15, 0xE300);
1417 control <<= 8;
1418 bcm43xx_phy_write(bcm, 0x0812, control | 0x00B0);
1419 udelay(5);
1420 bcm43xx_phy_write(bcm, 0x0812, control | 0x00B2);
1421 udelay(2);
1422 bcm43xx_phy_write(bcm, 0x0812, control | 0x00B3);
1423 udelay(4);
1424 bcm43xx_phy_write(bcm, 0x0015, 0xF300);
1425 udelay(8);
1426 } else {
1427 bcm43xx_phy_write(bcm, 0x0015, control | 0xEFA0);
1428 udelay(2);
1429 bcm43xx_phy_write(bcm, 0x0015, control | 0xEFE0);
1430 udelay(4);
1431 bcm43xx_phy_write(bcm, 0x0015, control | 0xFFE0);
1432 udelay(8);
1433 }
1434 ret = bcm43xx_phy_read(bcm, 0x002D);
1435 local_irq_restore(flags);
1436 bcm43xx_voluntary_preempt();
1437
1438 return ret;
1439}
1440
1441static u32 bcm43xx_phy_lo_g_singledeviation(struct bcm43xx_private *bcm, u16 control)
1442{
1443 int i;
1444 u32 ret = 0;
1445
1446 for (i = 0; i < 8; i++)
1447 ret += bcm43xx_phy_lo_g_deviation_subval(bcm, control);
1448
1449 return ret;
1450}
1451
1452/* Write the LocalOscillator CONTROL */
1453static inline
1454void bcm43xx_lo_write(struct bcm43xx_private *bcm,
1455 struct bcm43xx_lopair *pair)
1456{
1457 u16 value;
1458
1459 value = (u8)(pair->low);
1460 value |= ((u8)(pair->high)) << 8;
1461
1462#ifdef CONFIG_BCM43XX_DEBUG
1463 /* Sanity check. */
1464 if (pair->low < -8 || pair->low > 8 ||
1465 pair->high < -8 || pair->high > 8) {
1466 printk(KERN_WARNING PFX
1467 "WARNING: Writing invalid LOpair "
1468 "(low: %d, high: %d, index: %lu)\n",
1469 pair->low, pair->high,
1470 (unsigned long)(pair - bcm43xx_current_phy(bcm)->_lo_pairs));
1471 dump_stack();
1472 }
1473#endif
1474
1475 bcm43xx_phy_write(bcm, BCM43xx_PHY_G_LO_CONTROL, value);
1476}
1477
1478static inline
1479struct bcm43xx_lopair * bcm43xx_find_lopair(struct bcm43xx_private *bcm,
1480 u16 baseband_attenuation,
1481 u16 radio_attenuation,
1482 u16 tx)
1483{
1484 static const u8 dict[10] = { 11, 10, 11, 12, 13, 12, 13, 12, 13, 12 };
1485 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1486
1487 if (baseband_attenuation > 6)
1488 baseband_attenuation = 6;
1489 assert(radio_attenuation < 10);
1490
1491 if (tx == 3) {
1492 return bcm43xx_get_lopair(phy,
1493 radio_attenuation,
1494 baseband_attenuation);
1495 }
1496 return bcm43xx_get_lopair(phy, dict[radio_attenuation], baseband_attenuation);
1497}
1498
1499static inline
1500struct bcm43xx_lopair * bcm43xx_current_lopair(struct bcm43xx_private *bcm)
1501{
1502 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
1503
1504 return bcm43xx_find_lopair(bcm,
1505 radio->baseband_atten,
1506 radio->radio_atten,
1507 radio->txctl1);
1508}
1509
1510/* Adjust B/G LO */
1511void bcm43xx_phy_lo_adjust(struct bcm43xx_private *bcm, int fixed)
1512{
1513 struct bcm43xx_lopair *pair;
1514
1515 if (fixed) {
1516 /* Use fixed values. Only for initialization. */
1517 pair = bcm43xx_find_lopair(bcm, 2, 3, 0);
1518 } else
1519 pair = bcm43xx_current_lopair(bcm);
1520 bcm43xx_lo_write(bcm, pair);
1521}
1522
1523static void bcm43xx_phy_lo_g_measure_txctl2(struct bcm43xx_private *bcm)
1524{
1525 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
1526 u16 txctl2 = 0, i;
1527 u32 smallest, tmp;
1528
1529 bcm43xx_radio_write16(bcm, 0x0052, 0x0000);
1530 udelay(10);
1531 smallest = bcm43xx_phy_lo_g_singledeviation(bcm, 0);
1532 for (i = 0; i < 16; i++) {
1533 bcm43xx_radio_write16(bcm, 0x0052, i);
1534 udelay(10);
1535 tmp = bcm43xx_phy_lo_g_singledeviation(bcm, 0);
1536 if (tmp < smallest) {
1537 smallest = tmp;
1538 txctl2 = i;
1539 }
1540 }
1541 radio->txctl2 = txctl2;
1542}
1543
1544static
1545void bcm43xx_phy_lo_g_state(struct bcm43xx_private *bcm,
1546 const struct bcm43xx_lopair *in_pair,
1547 struct bcm43xx_lopair *out_pair,
1548 u16 r27)
1549{
1550 static const struct bcm43xx_lopair transitions[8] = {
1551 { .high = 1, .low = 1, },
1552 { .high = 1, .low = 0, },
1553 { .high = 1, .low = -1, },
1554 { .high = 0, .low = -1, },
1555 { .high = -1, .low = -1, },
1556 { .high = -1, .low = 0, },
1557 { .high = -1, .low = 1, },
1558 { .high = 0, .low = 1, },
1559 };
1560 struct bcm43xx_lopair lowest_transition = {
1561 .high = in_pair->high,
1562 .low = in_pair->low,
1563 };
1564 struct bcm43xx_lopair tmp_pair;
1565 struct bcm43xx_lopair transition;
1566 int i = 12;
1567 int state = 0;
1568 int found_lower;
1569 int j, begin, end;
1570 u32 lowest_deviation;
1571 u32 tmp;
1572
1573 /* Note that in_pair and out_pair can point to the same pair. Be careful. */
1574
1575 bcm43xx_lo_write(bcm, &lowest_transition);
1576 lowest_deviation = bcm43xx_phy_lo_g_singledeviation(bcm, r27);
1577 do {
1578 found_lower = 0;
1579 assert(state >= 0 && state <= 8);
1580 if (state == 0) {
1581 begin = 1;
1582 end = 8;
1583 } else if (state % 2 == 0) {
1584 begin = state - 1;
1585 end = state + 1;
1586 } else {
1587 begin = state - 2;
1588 end = state + 2;
1589 }
1590 if (begin < 1)
1591 begin += 8;
1592 if (end > 8)
1593 end -= 8;
1594
1595 j = begin;
1596 tmp_pair.high = lowest_transition.high;
1597 tmp_pair.low = lowest_transition.low;
1598 while (1) {
1599 assert(j >= 1 && j <= 8);
1600 transition.high = tmp_pair.high + transitions[j - 1].high;
1601 transition.low = tmp_pair.low + transitions[j - 1].low;
1602 if ((abs(transition.low) < 9) && (abs(transition.high) < 9)) {
1603 bcm43xx_lo_write(bcm, &transition);
1604 tmp = bcm43xx_phy_lo_g_singledeviation(bcm, r27);
1605 if (tmp < lowest_deviation) {
1606 lowest_deviation = tmp;
1607 state = j;
1608 found_lower = 1;
1609
1610 lowest_transition.high = transition.high;
1611 lowest_transition.low = transition.low;
1612 }
1613 }
1614 if (j == end)
1615 break;
1616 if (j == 8)
1617 j = 1;
1618 else
1619 j++;
1620 }
1621 } while (i-- && found_lower);
1622
1623 out_pair->high = lowest_transition.high;
1624 out_pair->low = lowest_transition.low;
1625}
1626
1627/* Set the baseband attenuation value on chip. */
1628void bcm43xx_phy_set_baseband_attenuation(struct bcm43xx_private *bcm,
1629 u16 baseband_attenuation)
1630{
1631 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1632 u16 value;
1633
1634 if (phy->analog == 0) {
1635 value = (bcm43xx_read16(bcm, 0x03E6) & 0xFFF0);
1636 value |= (baseband_attenuation & 0x000F);
1637 bcm43xx_write16(bcm, 0x03E6, value);
1638 return;
1639 }
1640
1641 if (phy->analog > 1) {
1642 value = bcm43xx_phy_read(bcm, 0x0060) & ~0x003C;
1643 value |= (baseband_attenuation << 2) & 0x003C;
1644 } else {
1645 value = bcm43xx_phy_read(bcm, 0x0060) & ~0x0078;
1646 value |= (baseband_attenuation << 3) & 0x0078;
1647 }
1648 bcm43xx_phy_write(bcm, 0x0060, value);
1649}
1650
1651/* http://bcm-specs.sipsolutions.net/LocalOscillator/Measure */
1652void bcm43xx_phy_lo_g_measure(struct bcm43xx_private *bcm)
1653{
1654 static const u8 pairorder[10] = { 3, 1, 5, 7, 9, 2, 0, 4, 6, 8 };
1655 const int is_initializing = (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZING);
1656 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1657 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
1658 u16 h, i, oldi = 0, j;
1659 struct bcm43xx_lopair control;
1660 struct bcm43xx_lopair *tmp_control;
1661 u16 tmp;
1662 u16 regstack[16] = { 0 };
1663 u8 oldchannel;
1664
1665 //XXX: What are these?
1666 u8 r27 = 0, r31;
1667
1668 oldchannel = radio->channel;
1669 /* Setup */
1670 if (phy->connected) {
1671 regstack[0] = bcm43xx_phy_read(bcm, BCM43xx_PHY_G_CRS);
1672 regstack[1] = bcm43xx_phy_read(bcm, 0x0802);
1673 bcm43xx_phy_write(bcm, BCM43xx_PHY_G_CRS, regstack[0] & 0x7FFF);
1674 bcm43xx_phy_write(bcm, 0x0802, regstack[1] & 0xFFFC);
1675 }
1676 regstack[3] = bcm43xx_read16(bcm, 0x03E2);
1677 bcm43xx_write16(bcm, 0x03E2, regstack[3] | 0x8000);
1678 regstack[4] = bcm43xx_read16(bcm, BCM43xx_MMIO_CHANNEL_EXT);
1679 regstack[5] = bcm43xx_phy_read(bcm, 0x15);
1680 regstack[6] = bcm43xx_phy_read(bcm, 0x2A);
1681 regstack[7] = bcm43xx_phy_read(bcm, 0x35);
1682 regstack[8] = bcm43xx_phy_read(bcm, 0x60);
1683 regstack[9] = bcm43xx_radio_read16(bcm, 0x43);
1684 regstack[10] = bcm43xx_radio_read16(bcm, 0x7A);
1685 regstack[11] = bcm43xx_radio_read16(bcm, 0x52);
1686 if (phy->connected) {
1687 regstack[12] = bcm43xx_phy_read(bcm, 0x0811);
1688 regstack[13] = bcm43xx_phy_read(bcm, 0x0812);
1689 regstack[14] = bcm43xx_phy_read(bcm, 0x0814);
1690 regstack[15] = bcm43xx_phy_read(bcm, 0x0815);
1691 }
1692 bcm43xx_radio_selectchannel(bcm, 6, 0);
1693 if (phy->connected) {
1694 bcm43xx_phy_write(bcm, BCM43xx_PHY_G_CRS, regstack[0] & 0x7FFF);
1695 bcm43xx_phy_write(bcm, 0x0802, regstack[1] & 0xFFFC);
1696 bcm43xx_dummy_transmission(bcm);
1697 }
1698 bcm43xx_radio_write16(bcm, 0x0043, 0x0006);
1699
1700 bcm43xx_phy_set_baseband_attenuation(bcm, 2);
1701
1702 bcm43xx_write16(bcm, BCM43xx_MMIO_CHANNEL_EXT, 0x0000);
1703 bcm43xx_phy_write(bcm, 0x002E, 0x007F);
1704 bcm43xx_phy_write(bcm, 0x080F, 0x0078);
1705 bcm43xx_phy_write(bcm, 0x0035, regstack[7] & ~(1 << 7));
1706 bcm43xx_radio_write16(bcm, 0x007A, regstack[10] & 0xFFF0);
1707 bcm43xx_phy_write(bcm, 0x002B, 0x0203);
1708 bcm43xx_phy_write(bcm, 0x002A, 0x08A3);
1709 if (phy->connected) {
1710 bcm43xx_phy_write(bcm, 0x0814, regstack[14] | 0x0003);
1711 bcm43xx_phy_write(bcm, 0x0815, regstack[15] & 0xFFFC);
1712 bcm43xx_phy_write(bcm, 0x0811, 0x01B3);
1713 bcm43xx_phy_write(bcm, 0x0812, 0x00B2);
1714 }
1715 if (is_initializing)
1716 bcm43xx_phy_lo_g_measure_txctl2(bcm);
1717 bcm43xx_phy_write(bcm, 0x080F, 0x8078);
1718
1719 /* Measure */
1720 control.low = 0;
1721 control.high = 0;
1722 for (h = 0; h < 10; h++) {
1723 /* Loop over each possible RadioAttenuation (0-9) */
1724 i = pairorder[h];
1725 if (is_initializing) {
1726 if (i == 3) {
1727 control.low = 0;
1728 control.high = 0;
1729 } else if (((i % 2 == 1) && (oldi % 2 == 1)) ||
1730 ((i % 2 == 0) && (oldi % 2 == 0))) {
1731 tmp_control = bcm43xx_get_lopair(phy, oldi, 0);
1732 memcpy(&control, tmp_control, sizeof(control));
1733 } else {
1734 tmp_control = bcm43xx_get_lopair(phy, 3, 0);
1735 memcpy(&control, tmp_control, sizeof(control));
1736 }
1737 }
1738 /* Loop over each possible BasebandAttenuation/2 */
1739 for (j = 0; j < 4; j++) {
1740 if (is_initializing) {
1741 tmp = i * 2 + j;
1742 r27 = 0;
1743 r31 = 0;
1744 if (tmp > 14) {
1745 r31 = 1;
1746 if (tmp > 17)
1747 r27 = 1;
1748 if (tmp > 19)
1749 r27 = 2;
1750 }
1751 } else {
1752 tmp_control = bcm43xx_get_lopair(phy, i, j * 2);
1753 if (!tmp_control->used)
1754 continue;
1755 memcpy(&control, tmp_control, sizeof(control));
1756 r27 = 3;
1757 r31 = 0;
1758 }
1759 bcm43xx_radio_write16(bcm, 0x43, i);
1760 bcm43xx_radio_write16(bcm, 0x52, radio->txctl2);
1761 udelay(10);
1762 bcm43xx_voluntary_preempt();
1763
1764 bcm43xx_phy_set_baseband_attenuation(bcm, j * 2);
1765
1766 tmp = (regstack[10] & 0xFFF0);
1767 if (r31)
1768 tmp |= 0x0008;
1769 bcm43xx_radio_write16(bcm, 0x007A, tmp);
1770
1771 tmp_control = bcm43xx_get_lopair(phy, i, j * 2);
1772 bcm43xx_phy_lo_g_state(bcm, &control, tmp_control, r27);
1773 }
1774 oldi = i;
1775 }
1776 /* Loop over each possible RadioAttenuation (10-13) */
1777 for (i = 10; i < 14; i++) {
1778 /* Loop over each possible BasebandAttenuation/2 */
1779 for (j = 0; j < 4; j++) {
1780 if (is_initializing) {
1781 tmp_control = bcm43xx_get_lopair(phy, i - 9, j * 2);
1782 memcpy(&control, tmp_control, sizeof(control));
1783 tmp = (i - 9) * 2 + j - 5;//FIXME: This is wrong, as the following if statement can never trigger.
1784 r27 = 0;
1785 r31 = 0;
1786 if (tmp > 14) {
1787 r31 = 1;
1788 if (tmp > 17)
1789 r27 = 1;
1790 if (tmp > 19)
1791 r27 = 2;
1792 }
1793 } else {
1794 tmp_control = bcm43xx_get_lopair(phy, i - 9, j * 2);
1795 if (!tmp_control->used)
1796 continue;
1797 memcpy(&control, tmp_control, sizeof(control));
1798 r27 = 3;
1799 r31 = 0;
1800 }
1801 bcm43xx_radio_write16(bcm, 0x43, i - 9);
1802 bcm43xx_radio_write16(bcm, 0x52,
1803 radio->txctl2
1804 | (3/*txctl1*/ << 4));//FIXME: shouldn't txctl1 be zero here and 3 in the loop above?
1805 udelay(10);
1806 bcm43xx_voluntary_preempt();
1807
1808 bcm43xx_phy_set_baseband_attenuation(bcm, j * 2);
1809
1810 tmp = (regstack[10] & 0xFFF0);
1811 if (r31)
1812 tmp |= 0x0008;
1813 bcm43xx_radio_write16(bcm, 0x7A, tmp);
1814
1815 tmp_control = bcm43xx_get_lopair(phy, i, j * 2);
1816 bcm43xx_phy_lo_g_state(bcm, &control, tmp_control, r27);
1817 }
1818 }
1819
1820 /* Restoration */
1821 if (phy->connected) {
1822 bcm43xx_phy_write(bcm, 0x0015, 0xE300);
1823 bcm43xx_phy_write(bcm, 0x0812, (r27 << 8) | 0xA0);
1824 udelay(5);
1825 bcm43xx_phy_write(bcm, 0x0812, (r27 << 8) | 0xA2);
1826 udelay(2);
1827 bcm43xx_phy_write(bcm, 0x0812, (r27 << 8) | 0xA3);
1828 bcm43xx_voluntary_preempt();
1829 } else
1830 bcm43xx_phy_write(bcm, 0x0015, r27 | 0xEFA0);
1831 bcm43xx_phy_lo_adjust(bcm, is_initializing);
1832 bcm43xx_phy_write(bcm, 0x002E, 0x807F);
1833 if (phy->connected)
1834 bcm43xx_phy_write(bcm, 0x002F, 0x0202);
1835 else
1836 bcm43xx_phy_write(bcm, 0x002F, 0x0101);
1837 bcm43xx_write16(bcm, BCM43xx_MMIO_CHANNEL_EXT, regstack[4]);
1838 bcm43xx_phy_write(bcm, 0x0015, regstack[5]);
1839 bcm43xx_phy_write(bcm, 0x002A, regstack[6]);
1840 bcm43xx_phy_write(bcm, 0x0035, regstack[7]);
1841 bcm43xx_phy_write(bcm, 0x0060, regstack[8]);
1842 bcm43xx_radio_write16(bcm, 0x0043, regstack[9]);
1843 bcm43xx_radio_write16(bcm, 0x007A, regstack[10]);
1844 regstack[11] &= 0x00F0;
1845 regstack[11] |= (bcm43xx_radio_read16(bcm, 0x52) & 0x000F);
1846 bcm43xx_radio_write16(bcm, 0x52, regstack[11]);
1847 bcm43xx_write16(bcm, 0x03E2, regstack[3]);
1848 if (phy->connected) {
1849 bcm43xx_phy_write(bcm, 0x0811, regstack[12]);
1850 bcm43xx_phy_write(bcm, 0x0812, regstack[13]);
1851 bcm43xx_phy_write(bcm, 0x0814, regstack[14]);
1852 bcm43xx_phy_write(bcm, 0x0815, regstack[15]);
1853 bcm43xx_phy_write(bcm, BCM43xx_PHY_G_CRS, regstack[0]);
1854 bcm43xx_phy_write(bcm, 0x0802, regstack[1]);
1855 }
1856 bcm43xx_radio_selectchannel(bcm, oldchannel, 1);
1857
1858#ifdef CONFIG_BCM43XX_DEBUG
1859 {
1860 /* Sanity check for all lopairs. */
1861 for (i = 0; i < BCM43xx_LO_COUNT; i++) {
1862 tmp_control = phy->_lo_pairs + i;
1863 if (tmp_control->low < -8 || tmp_control->low > 8 ||
1864 tmp_control->high < -8 || tmp_control->high > 8) {
1865 printk(KERN_WARNING PFX
1866 "WARNING: Invalid LOpair (low: %d, high: %d, index: %d)\n",
1867 tmp_control->low, tmp_control->high, i);
1868 }
1869 }
1870 }
1871#endif /* CONFIG_BCM43XX_DEBUG */
1872}
1873
1874static
1875void bcm43xx_phy_lo_mark_current_used(struct bcm43xx_private *bcm)
1876{
1877 struct bcm43xx_lopair *pair;
1878
1879 pair = bcm43xx_current_lopair(bcm);
1880 pair->used = 1;
1881}
1882
1883void bcm43xx_phy_lo_mark_all_unused(struct bcm43xx_private *bcm)
1884{
1885 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1886 struct bcm43xx_lopair *pair;
1887 int i;
1888
1889 for (i = 0; i < BCM43xx_LO_COUNT; i++) {
1890 pair = phy->_lo_pairs + i;
1891 pair->used = 0;
1892 }
1893}
1894
1895/* http://bcm-specs.sipsolutions.net/EstimatePowerOut
1896 * This function converts a TSSI value to dBm in Q5.2
1897 */
1898static s8 bcm43xx_phy_estimate_power_out(struct bcm43xx_private *bcm, s8 tssi)
1899{
1900 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1901 s8 dbm = 0;
1902 s32 tmp;
1903
1904 tmp = phy->idle_tssi;
1905 tmp += tssi;
1906 tmp -= phy->savedpctlreg;
1907
1908 switch (phy->type) {
1909 case BCM43xx_PHYTYPE_A:
1910 tmp += 0x80;
1911 tmp = limit_value(tmp, 0x00, 0xFF);
1912 dbm = phy->tssi2dbm[tmp];
1913 TODO(); //TODO: There's a FIXME on the specs
1914 break;
1915 case BCM43xx_PHYTYPE_B:
1916 case BCM43xx_PHYTYPE_G:
1917 tmp = limit_value(tmp, 0x00, 0x3F);
1918 dbm = phy->tssi2dbm[tmp];
1919 break;
1920 default:
1921 assert(0);
1922 }
1923
1924 return dbm;
1925}
1926
1927/* http://bcm-specs.sipsolutions.net/RecalculateTransmissionPower */
1928void bcm43xx_phy_xmitpower(struct bcm43xx_private *bcm)
1929{
1930 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
1931 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1932
1933 if (phy->savedpctlreg == 0xFFFF)
1934 return;
1935 if ((bcm->board_type == 0x0416) &&
1936 (bcm->board_vendor == PCI_VENDOR_ID_BROADCOM))
1937 return;
1938
1939 switch (phy->type) {
1940 case BCM43xx_PHYTYPE_A: {
1941
1942 TODO(); //TODO: Nothing for A PHYs yet :-/
1943
1944 break;
1945 }
1946 case BCM43xx_PHYTYPE_B:
1947 case BCM43xx_PHYTYPE_G: {
1948 u16 tmp;
1949 u16 txpower;
1950 s8 v0, v1, v2, v3;
1951 s8 average;
1952 u8 max_pwr;
1953 s16 desired_pwr, estimated_pwr, pwr_adjust;
1954 s16 radio_att_delta, baseband_att_delta;
1955 s16 radio_attenuation, baseband_attenuation;
1956 unsigned long phylock_flags;
1957
1958 tmp = bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED, 0x0058);
1959 v0 = (s8)(tmp & 0x00FF);
1960 v1 = (s8)((tmp & 0xFF00) >> 8);
1961 tmp = bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED, 0x005A);
1962 v2 = (s8)(tmp & 0x00FF);
1963 v3 = (s8)((tmp & 0xFF00) >> 8);
1964 tmp = 0;
1965
1966 if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F || v3 == 0x7F) {
1967 tmp = bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED, 0x0070);
1968 v0 = (s8)(tmp & 0x00FF);
1969 v1 = (s8)((tmp & 0xFF00) >> 8);
1970 tmp = bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED, 0x0072);
1971 v2 = (s8)(tmp & 0x00FF);
1972 v3 = (s8)((tmp & 0xFF00) >> 8);
1973 if (v0 == 0x7F || v1 == 0x7F || v2 == 0x7F || v3 == 0x7F)
1974 return;
1975 v0 = (v0 + 0x20) & 0x3F;
1976 v1 = (v1 + 0x20) & 0x3F;
1977 v2 = (v2 + 0x20) & 0x3F;
1978 v3 = (v3 + 0x20) & 0x3F;
1979 tmp = 1;
1980 }
1981 bcm43xx_radio_clear_tssi(bcm);
1982
1983 average = (v0 + v1 + v2 + v3 + 2) / 4;
1984
1985 if (tmp && (bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED, 0x005E) & 0x8))
1986 average -= 13;
1987
1988 estimated_pwr = bcm43xx_phy_estimate_power_out(bcm, average);
1989
1990 max_pwr = bcm->sprom.maxpower_bgphy;
1991
1992 if ((bcm->sprom.boardflags & BCM43xx_BFL_PACTRL) &&
1993 (phy->type == BCM43xx_PHYTYPE_G))
1994 max_pwr -= 0x3;
1995
1996 /*TODO:
1997 max_pwr = min(REG - bcm->sprom.antennagain_bgphy - 0x6, max_pwr)
1998 where REG is the max power as per the regulatory domain
1999 */
2000
2001 desired_pwr = limit_value(radio->txpower_desired, 0, max_pwr);
2002 /* Check if we need to adjust the current power. */
2003 pwr_adjust = desired_pwr - estimated_pwr;
2004 radio_att_delta = -(pwr_adjust + 7) >> 3;
2005 baseband_att_delta = -(pwr_adjust >> 1) - (4 * radio_att_delta);
2006 if ((radio_att_delta == 0) && (baseband_att_delta == 0)) {
2007 bcm43xx_phy_lo_mark_current_used(bcm);
2008 return;
2009 }
2010
2011 /* Calculate the new attenuation values. */
2012 baseband_attenuation = radio->baseband_atten;
2013 baseband_attenuation += baseband_att_delta;
2014 radio_attenuation = radio->radio_atten;
2015 radio_attenuation += radio_att_delta;
2016
2017 /* Get baseband and radio attenuation values into their permitted ranges.
2018 * baseband 0-11, radio 0-9.
2019 * Radio attenuation affects power level 4 times as much as baseband.
2020 */
2021 if (radio_attenuation < 0) {
2022 baseband_attenuation -= (4 * -radio_attenuation);
2023 radio_attenuation = 0;
2024 } else if (radio_attenuation > 9) {
2025 baseband_attenuation += (4 * (radio_attenuation - 9));
2026 radio_attenuation = 9;
2027 } else {
2028 while (baseband_attenuation < 0 && radio_attenuation > 0) {
2029 baseband_attenuation += 4;
2030 radio_attenuation--;
2031 }
2032 while (baseband_attenuation > 11 && radio_attenuation < 9) {
2033 baseband_attenuation -= 4;
2034 radio_attenuation++;
2035 }
2036 }
2037 baseband_attenuation = limit_value(baseband_attenuation, 0, 11);
2038
2039 txpower = radio->txctl1;
2040 if ((radio->version == 0x2050) && (radio->revision == 2)) {
2041 if (radio_attenuation <= 1) {
2042 if (txpower == 0) {
2043 txpower = 3;
2044 radio_attenuation += 2;
2045 baseband_attenuation += 2;
2046 } else if (bcm->sprom.boardflags & BCM43xx_BFL_PACTRL) {
2047 baseband_attenuation += 4 * (radio_attenuation - 2);
2048 radio_attenuation = 2;
2049 }
2050 } else if (radio_attenuation > 4 && txpower != 0) {
2051 txpower = 0;
2052 if (baseband_attenuation < 3) {
2053 radio_attenuation -= 3;
2054 baseband_attenuation += 2;
2055 } else {
2056 radio_attenuation -= 2;
2057 baseband_attenuation -= 2;
2058 }
2059 }
2060 }
2061 radio->txctl1 = txpower;
2062 baseband_attenuation = limit_value(baseband_attenuation, 0, 11);
2063 radio_attenuation = limit_value(radio_attenuation, 0, 9);
2064
2065 bcm43xx_phy_lock(bcm, phylock_flags);
2066 bcm43xx_radio_lock(bcm);
2067 bcm43xx_radio_set_txpower_bg(bcm, baseband_attenuation,
2068 radio_attenuation, txpower);
2069 bcm43xx_phy_lo_mark_current_used(bcm);
2070 bcm43xx_radio_unlock(bcm);
2071 bcm43xx_phy_unlock(bcm, phylock_flags);
2072 break;
2073 }
2074 default:
2075 assert(0);
2076 }
2077}
2078
2079static inline
2080s32 bcm43xx_tssi2dbm_ad(s32 num, s32 den)
2081{
2082 if (num < 0)
2083 return num/den;
2084 else
2085 return (num+den/2)/den;
2086}
2087
2088static inline
2089s8 bcm43xx_tssi2dbm_entry(s8 entry [], u8 index, s16 pab0, s16 pab1, s16 pab2)
2090{
2091 s32 m1, m2, f = 256, q, delta;
2092 s8 i = 0;
2093
2094 m1 = bcm43xx_tssi2dbm_ad(16 * pab0 + index * pab1, 32);
2095 m2 = max(bcm43xx_tssi2dbm_ad(32768 + index * pab2, 256), 1);
2096 do {
2097 if (i > 15)
2098 return -EINVAL;
2099 q = bcm43xx_tssi2dbm_ad(f * 4096 -
2100 bcm43xx_tssi2dbm_ad(m2 * f, 16) * f, 2048);
2101 delta = abs(q - f);
2102 f = q;
2103 i++;
2104 } while (delta >= 2);
2105 entry[index] = limit_value(bcm43xx_tssi2dbm_ad(m1 * f, 8192), -127, 128);
2106 return 0;
2107}
2108
2109/* http://bcm-specs.sipsolutions.net/TSSI_to_DBM_Table */
2110int bcm43xx_phy_init_tssi2dbm_table(struct bcm43xx_private *bcm)
2111{
2112 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
2113 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
2114 s16 pab0, pab1, pab2;
2115 u8 idx;
2116 s8 *dyn_tssi2dbm;
2117
2118 if (phy->type == BCM43xx_PHYTYPE_A) {
2119 pab0 = (s16)(bcm->sprom.pa1b0);
2120 pab1 = (s16)(bcm->sprom.pa1b1);
2121 pab2 = (s16)(bcm->sprom.pa1b2);
2122 } else {
2123 pab0 = (s16)(bcm->sprom.pa0b0);
2124 pab1 = (s16)(bcm->sprom.pa0b1);
2125 pab2 = (s16)(bcm->sprom.pa0b2);
2126 }
2127
2128 if ((bcm->chip_id == 0x4301) && (radio->version != 0x2050)) {
2129 phy->idle_tssi = 0x34;
2130 phy->tssi2dbm = bcm43xx_tssi2dbm_b_table;
2131 return 0;
2132 }
2133
2134 if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
2135 pab0 != -1 && pab1 != -1 && pab2 != -1) {
2136 /* The pabX values are set in SPROM. Use them. */
2137 if (phy->type == BCM43xx_PHYTYPE_A) {
2138 if ((s8)bcm->sprom.idle_tssi_tgt_aphy != 0 &&
2139 (s8)bcm->sprom.idle_tssi_tgt_aphy != -1)
2140 phy->idle_tssi = (s8)(bcm->sprom.idle_tssi_tgt_aphy);
2141 else
2142 phy->idle_tssi = 62;
2143 } else {
2144 if ((s8)bcm->sprom.idle_tssi_tgt_bgphy != 0 &&
2145 (s8)bcm->sprom.idle_tssi_tgt_bgphy != -1)
2146 phy->idle_tssi = (s8)(bcm->sprom.idle_tssi_tgt_bgphy);
2147 else
2148 phy->idle_tssi = 62;
2149 }
2150 dyn_tssi2dbm = kmalloc(64, GFP_KERNEL);
2151 if (dyn_tssi2dbm == NULL) {
2152 printk(KERN_ERR PFX "Could not allocate memory "
2153 "for tssi2dbm table\n");
2154 return -ENOMEM;
2155 }
2156 for (idx = 0; idx < 64; idx++)
2157 if (bcm43xx_tssi2dbm_entry(dyn_tssi2dbm, idx, pab0, pab1, pab2)) {
2158 phy->tssi2dbm = NULL;
2159 printk(KERN_ERR PFX "Could not generate "
2160 "tssi2dBm table\n");
2161 kfree(dyn_tssi2dbm);
2162 return -ENODEV;
2163 }
2164 phy->tssi2dbm = dyn_tssi2dbm;
2165 phy->dyn_tssi_tbl = 1;
2166 } else {
2167 /* pabX values not set in SPROM. */
2168 switch (phy->type) {
2169 case BCM43xx_PHYTYPE_A:
2170 /* APHY needs a generated table. */
2171 phy->tssi2dbm = NULL;
2172 printk(KERN_ERR PFX "Could not generate tssi2dBm "
2173 "table (wrong SPROM info)!\n");
2174 return -ENODEV;
2175 case BCM43xx_PHYTYPE_B:
2176 phy->idle_tssi = 0x34;
2177 phy->tssi2dbm = bcm43xx_tssi2dbm_b_table;
2178 break;
2179 case BCM43xx_PHYTYPE_G:
2180 phy->idle_tssi = 0x34;
2181 phy->tssi2dbm = bcm43xx_tssi2dbm_g_table;
2182 break;
2183 }
2184 }
2185
2186 return 0;
2187}
2188
2189int bcm43xx_phy_init(struct bcm43xx_private *bcm)
2190{
2191 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
2192 int err = -ENODEV;
2193
2194 switch (phy->type) {
2195 case BCM43xx_PHYTYPE_A:
2196 if (phy->rev == 2 || phy->rev == 3) {
2197 bcm43xx_phy_inita(bcm);
2198 err = 0;
2199 }
2200 break;
2201 case BCM43xx_PHYTYPE_B:
2202 switch (phy->rev) {
2203 case 2:
2204 bcm43xx_phy_initb2(bcm);
2205 err = 0;
2206 break;
2207 case 4:
2208 bcm43xx_phy_initb4(bcm);
2209 err = 0;
2210 break;
2211 case 5:
2212 bcm43xx_phy_initb5(bcm);
2213 err = 0;
2214 break;
2215 case 6:
2216 bcm43xx_phy_initb6(bcm);
2217 err = 0;
2218 break;
2219 }
2220 break;
2221 case BCM43xx_PHYTYPE_G:
2222 bcm43xx_phy_initg(bcm);
2223 err = 0;
2224 break;
2225 }
2226 if (err)
2227 printk(KERN_WARNING PFX "Unknown PHYTYPE found!\n");
2228
2229 return err;
2230}
2231
2232void bcm43xx_phy_set_antenna_diversity(struct bcm43xx_private *bcm)
2233{
2234 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
2235 u16 antennadiv;
2236 u16 offset;
2237 u16 value;
2238 u32 ucodeflags;
2239
2240 antennadiv = phy->antenna_diversity;
2241
2242 if (antennadiv == 0xFFFF)
2243 antennadiv = 3;
2244 assert(antennadiv <= 3);
2245
2246 ucodeflags = bcm43xx_shm_read32(bcm, BCM43xx_SHM_SHARED,
2247 BCM43xx_UCODEFLAGS_OFFSET);
2248 bcm43xx_shm_write32(bcm, BCM43xx_SHM_SHARED,
2249 BCM43xx_UCODEFLAGS_OFFSET,
2250 ucodeflags & ~BCM43xx_UCODEFLAG_AUTODIV);
2251
2252 switch (phy->type) {
2253 case BCM43xx_PHYTYPE_A:
2254 case BCM43xx_PHYTYPE_G:
2255 if (phy->type == BCM43xx_PHYTYPE_A)
2256 offset = 0x0000;
2257 else
2258 offset = 0x0400;
2259
2260 if (antennadiv == 2)
2261 value = (3/*automatic*/ << 7);
2262 else
2263 value = (antennadiv << 7);
2264 bcm43xx_phy_write(bcm, offset + 1,
2265 (bcm43xx_phy_read(bcm, offset + 1)
2266 & 0x7E7F) | value);
2267
2268 if (antennadiv >= 2) {
2269 if (antennadiv == 2)
2270 value = (antennadiv << 7);
2271 else
2272 value = (0/*force0*/ << 7);
2273 bcm43xx_phy_write(bcm, offset + 0x2B,
2274 (bcm43xx_phy_read(bcm, offset + 0x2B)
2275 & 0xFEFF) | value);
2276 }
2277
2278 if (phy->type == BCM43xx_PHYTYPE_G) {
2279 if (antennadiv >= 2)
2280 bcm43xx_phy_write(bcm, 0x048C,
2281 bcm43xx_phy_read(bcm, 0x048C)
2282 | 0x2000);
2283 else
2284 bcm43xx_phy_write(bcm, 0x048C,
2285 bcm43xx_phy_read(bcm, 0x048C)
2286 & ~0x2000);
2287 if (phy->rev >= 2) {
2288 bcm43xx_phy_write(bcm, 0x0461,
2289 bcm43xx_phy_read(bcm, 0x0461)
2290 | 0x0010);
2291 bcm43xx_phy_write(bcm, 0x04AD,
2292 (bcm43xx_phy_read(bcm, 0x04AD)
2293 & 0x00FF) | 0x0015);
2294 if (phy->rev == 2)
2295 bcm43xx_phy_write(bcm, 0x0427, 0x0008);
2296 else
2297 bcm43xx_phy_write(bcm, 0x0427,
2298 (bcm43xx_phy_read(bcm, 0x0427)
2299 & 0x00FF) | 0x0008);
2300 }
2301 else if (phy->rev >= 6)
2302 bcm43xx_phy_write(bcm, 0x049B, 0x00DC);
2303 } else {
2304 if (phy->rev < 3)
2305 bcm43xx_phy_write(bcm, 0x002B,
2306 (bcm43xx_phy_read(bcm, 0x002B)
2307 & 0x00FF) | 0x0024);
2308 else {
2309 bcm43xx_phy_write(bcm, 0x0061,
2310 bcm43xx_phy_read(bcm, 0x0061)
2311 | 0x0010);
2312 if (phy->rev == 3) {
2313 bcm43xx_phy_write(bcm, 0x0093, 0x001D);
2314 bcm43xx_phy_write(bcm, 0x0027, 0x0008);
2315 } else {
2316 bcm43xx_phy_write(bcm, 0x0093, 0x003A);
2317 bcm43xx_phy_write(bcm, 0x0027,
2318 (bcm43xx_phy_read(bcm, 0x0027)
2319 & 0x00FF) | 0x0008);
2320 }
2321 }
2322 }
2323 break;
2324 case BCM43xx_PHYTYPE_B:
2325 if (bcm->current_core->rev == 2)
2326 value = (3/*automatic*/ << 7);
2327 else
2328 value = (antennadiv << 7);
2329 bcm43xx_phy_write(bcm, 0x03E2,
2330 (bcm43xx_phy_read(bcm, 0x03E2)
2331 & 0xFE7F) | value);
2332 break;
2333 default:
2334 assert(0);
2335 }
2336
2337 if (antennadiv >= 2) {
2338 ucodeflags = bcm43xx_shm_read32(bcm, BCM43xx_SHM_SHARED,
2339 BCM43xx_UCODEFLAGS_OFFSET);
2340 bcm43xx_shm_write32(bcm, BCM43xx_SHM_SHARED,
2341 BCM43xx_UCODEFLAGS_OFFSET,
2342 ucodeflags | BCM43xx_UCODEFLAG_AUTODIV);
2343 }
2344
2345 phy->antenna_diversity = antennadiv;
2346}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.h b/drivers/net/wireless/bcm43xx/bcm43xx_phy.h
deleted file mode 100644
index 73118364b552..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.h
+++ /dev/null
@@ -1,78 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
6 Stefano Brivio <st3@riseup.net>
7 Michael Buesch <mbuesch@freenet.de>
8 Danny van Dyk <kugelfang@gentoo.org>
9 Andreas Jaggi <andreas.jaggi@waterwave.ch>
10
11 Some parts of the code in this file are derived from the ipw2200
12 driver Copyright(c) 2003 - 2004 Intel Corporation.
13
14 This program is free software; you can redistribute it and/or modify
15 it under the terms of the GNU General Public License as published by
16 the Free Software Foundation; either version 2 of the License, or
17 (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 GNU General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; see the file COPYING. If not, write to
26 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
27 Boston, MA 02110-1301, USA.
28
29*/
30
31#ifndef BCM43xx_PHY_H_
32#define BCM43xx_PHY_H_
33
34#include <linux/types.h>
35
36struct bcm43xx_private;
37
38void bcm43xx_raw_phy_lock(struct bcm43xx_private *bcm);
39#define bcm43xx_phy_lock(bcm, flags) \
40 do { \
41 local_irq_save(flags); \
42 bcm43xx_raw_phy_lock(bcm); \
43 } while (0)
44void bcm43xx_raw_phy_unlock(struct bcm43xx_private *bcm);
45#define bcm43xx_phy_unlock(bcm, flags) \
46 do { \
47 bcm43xx_raw_phy_unlock(bcm); \
48 local_irq_restore(flags); \
49 } while (0)
50
51/* Card uses the loopback gain stuff */
52#define has_loopback_gain(phy) \
53 (((phy)->rev > 1) || ((phy)->connected))
54
55u16 bcm43xx_phy_read(struct bcm43xx_private *bcm, u16 offset);
56void bcm43xx_phy_write(struct bcm43xx_private *bcm, u16 offset, u16 val);
57
58int bcm43xx_phy_init_tssi2dbm_table(struct bcm43xx_private *bcm);
59int bcm43xx_phy_init(struct bcm43xx_private *bcm);
60
61void bcm43xx_phy_set_antenna_diversity(struct bcm43xx_private *bcm);
62void bcm43xx_phy_calibrate(struct bcm43xx_private *bcm);
63int bcm43xx_phy_connect(struct bcm43xx_private *bcm, int connect);
64
65void bcm43xx_phy_lo_b_measure(struct bcm43xx_private *bcm);
66void bcm43xx_phy_lo_g_measure(struct bcm43xx_private *bcm);
67void bcm43xx_phy_xmitpower(struct bcm43xx_private *bcm);
68
69/* Adjust the LocalOscillator to the saved values.
70 * "fixed" is only set to 1 once in initialization. Set to 0 otherwise.
71 */
72void bcm43xx_phy_lo_adjust(struct bcm43xx_private *bcm, int fixed);
73void bcm43xx_phy_lo_mark_all_unused(struct bcm43xx_private *bcm);
74
75void bcm43xx_phy_set_baseband_attenuation(struct bcm43xx_private *bcm,
76 u16 baseband_attenuation);
77
78#endif /* BCM43xx_PHY_H_ */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_pio.c b/drivers/net/wireless/bcm43xx/bcm43xx_pio.c
deleted file mode 100644
index 76ab109cd2db..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_pio.c
+++ /dev/null
@@ -1,674 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 PIO Transmission
6
7 Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
22 Boston, MA 02110-1301, USA.
23
24*/
25
26#include "bcm43xx.h"
27#include "bcm43xx_pio.h"
28#include "bcm43xx_main.h"
29#include "bcm43xx_xmit.h"
30#include "bcm43xx_power.h"
31
32#include <linux/delay.h>
33
34
35static void tx_start(struct bcm43xx_pioqueue *queue)
36{
37 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
38 BCM43xx_PIO_TXCTL_INIT);
39}
40
41static void tx_octet(struct bcm43xx_pioqueue *queue,
42 u8 octet)
43{
44 if (queue->need_workarounds) {
45 bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA,
46 octet);
47 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
48 BCM43xx_PIO_TXCTL_WRITELO);
49 } else {
50 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
51 BCM43xx_PIO_TXCTL_WRITELO);
52 bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA,
53 octet);
54 }
55}
56
57static u16 tx_get_next_word(struct bcm43xx_txhdr *txhdr,
58 const u8 *packet,
59 unsigned int *pos)
60{
61 const u8 *source;
62 unsigned int i = *pos;
63 u16 ret;
64
65 if (i < sizeof(*txhdr)) {
66 source = (const u8 *)txhdr;
67 } else {
68 source = packet;
69 i -= sizeof(*txhdr);
70 }
71 ret = le16_to_cpu( *((__le16 *)(source + i)) );
72 *pos += 2;
73
74 return ret;
75}
76
77static void tx_data(struct bcm43xx_pioqueue *queue,
78 struct bcm43xx_txhdr *txhdr,
79 const u8 *packet,
80 unsigned int octets)
81{
82 u16 data;
83 unsigned int i = 0;
84
85 if (queue->need_workarounds) {
86 data = tx_get_next_word(txhdr, packet, &i);
87 bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA, data);
88 }
89 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
90 BCM43xx_PIO_TXCTL_WRITELO |
91 BCM43xx_PIO_TXCTL_WRITEHI);
92 while (i < octets - 1) {
93 data = tx_get_next_word(txhdr, packet, &i);
94 bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA, data);
95 }
96 if (octets % 2)
97 tx_octet(queue, packet[octets - sizeof(*txhdr) - 1]);
98}
99
100static void tx_complete(struct bcm43xx_pioqueue *queue,
101 struct sk_buff *skb)
102{
103 if (queue->need_workarounds) {
104 bcm43xx_pio_write(queue, BCM43xx_PIO_TXDATA,
105 skb->data[skb->len - 1]);
106 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
107 BCM43xx_PIO_TXCTL_WRITELO |
108 BCM43xx_PIO_TXCTL_COMPLETE);
109 } else {
110 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
111 BCM43xx_PIO_TXCTL_COMPLETE);
112 }
113}
114
115static u16 generate_cookie(struct bcm43xx_pioqueue *queue,
116 struct bcm43xx_pio_txpacket *packet)
117{
118 u16 cookie = 0x0000;
119 int packetindex;
120
121 /* We use the upper 4 bits for the PIO
122 * controller ID and the lower 12 bits
123 * for the packet index (in the cache).
124 */
125 switch (queue->mmio_base) {
126 case BCM43xx_MMIO_PIO1_BASE:
127 break;
128 case BCM43xx_MMIO_PIO2_BASE:
129 cookie = 0x1000;
130 break;
131 case BCM43xx_MMIO_PIO3_BASE:
132 cookie = 0x2000;
133 break;
134 case BCM43xx_MMIO_PIO4_BASE:
135 cookie = 0x3000;
136 break;
137 default:
138 assert(0);
139 }
140 packetindex = pio_txpacket_getindex(packet);
141 assert(((u16)packetindex & 0xF000) == 0x0000);
142 cookie |= (u16)packetindex;
143
144 return cookie;
145}
146
147static
148struct bcm43xx_pioqueue * parse_cookie(struct bcm43xx_private *bcm,
149 u16 cookie,
150 struct bcm43xx_pio_txpacket **packet)
151{
152 struct bcm43xx_pio *pio = bcm43xx_current_pio(bcm);
153 struct bcm43xx_pioqueue *queue = NULL;
154 int packetindex;
155
156 switch (cookie & 0xF000) {
157 case 0x0000:
158 queue = pio->queue0;
159 break;
160 case 0x1000:
161 queue = pio->queue1;
162 break;
163 case 0x2000:
164 queue = pio->queue2;
165 break;
166 case 0x3000:
167 queue = pio->queue3;
168 break;
169 default:
170 assert(0);
171 }
172 packetindex = (cookie & 0x0FFF);
173 assert(packetindex >= 0 && packetindex < BCM43xx_PIO_MAXTXPACKETS);
174 *packet = &(queue->tx_packets_cache[packetindex]);
175
176 return queue;
177}
178
179static void pio_tx_write_fragment(struct bcm43xx_pioqueue *queue,
180 struct sk_buff *skb,
181 struct bcm43xx_pio_txpacket *packet)
182{
183 struct bcm43xx_txhdr txhdr;
184 unsigned int octets;
185
186 assert(skb_shinfo(skb)->nr_frags == 0);
187 bcm43xx_generate_txhdr(queue->bcm,
188 &txhdr, skb->data, skb->len,
189 (packet->xmitted_frags == 0),
190 generate_cookie(queue, packet));
191
192 tx_start(queue);
193 octets = skb->len + sizeof(txhdr);
194 if (queue->need_workarounds)
195 octets--;
196 tx_data(queue, &txhdr, (u8 *)skb->data, octets);
197 tx_complete(queue, skb);
198}
199
200static void free_txpacket(struct bcm43xx_pio_txpacket *packet,
201 int irq_context)
202{
203 struct bcm43xx_pioqueue *queue = packet->queue;
204
205 ieee80211_txb_free(packet->txb);
206 list_move(&packet->list, &queue->txfree);
207 queue->nr_txfree++;
208
209 assert(queue->tx_devq_used >= packet->xmitted_octets);
210 assert(queue->tx_devq_packets >= packet->xmitted_frags);
211 queue->tx_devq_used -= packet->xmitted_octets;
212 queue->tx_devq_packets -= packet->xmitted_frags;
213}
214
215static int pio_tx_packet(struct bcm43xx_pio_txpacket *packet)
216{
217 struct bcm43xx_pioqueue *queue = packet->queue;
218 struct ieee80211_txb *txb = packet->txb;
219 struct sk_buff *skb;
220 u16 octets;
221 int i;
222
223 for (i = packet->xmitted_frags; i < txb->nr_frags; i++) {
224 skb = txb->fragments[i];
225
226 octets = (u16)skb->len + sizeof(struct bcm43xx_txhdr);
227 assert(queue->tx_devq_size >= octets);
228 assert(queue->tx_devq_packets <= BCM43xx_PIO_MAXTXDEVQPACKETS);
229 assert(queue->tx_devq_used <= queue->tx_devq_size);
230 /* Check if there is sufficient free space on the device
231 * TX queue. If not, return and let the TX tasklet
232 * retry later.
233 */
234 if (queue->tx_devq_packets == BCM43xx_PIO_MAXTXDEVQPACKETS)
235 return -EBUSY;
236 if (queue->tx_devq_used + octets > queue->tx_devq_size)
237 return -EBUSY;
238 /* Now poke the device. */
239 pio_tx_write_fragment(queue, skb, packet);
240
241 /* Account for the packet size.
242 * (We must not overflow the device TX queue)
243 */
244 queue->tx_devq_packets++;
245 queue->tx_devq_used += octets;
246
247 assert(packet->xmitted_frags < packet->txb->nr_frags);
248 packet->xmitted_frags++;
249 packet->xmitted_octets += octets;
250 }
251 list_move_tail(&packet->list, &queue->txrunning);
252
253 return 0;
254}
255
256static void tx_tasklet(unsigned long d)
257{
258 struct bcm43xx_pioqueue *queue = (struct bcm43xx_pioqueue *)d;
259 struct bcm43xx_private *bcm = queue->bcm;
260 unsigned long flags;
261 struct bcm43xx_pio_txpacket *packet, *tmp_packet;
262 int err;
263 u16 txctl;
264
265 spin_lock_irqsave(&bcm->irq_lock, flags);
266
267 if (queue->tx_frozen)
268 goto out_unlock;
269 txctl = bcm43xx_pio_read(queue, BCM43xx_PIO_TXCTL);
270 if (txctl & BCM43xx_PIO_TXCTL_SUSPEND)
271 goto out_unlock;
272
273 list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) {
274 assert(packet->xmitted_frags < packet->txb->nr_frags);
275 if (packet->xmitted_frags == 0) {
276 int i;
277 struct sk_buff *skb;
278
279 /* Check if the device queue is big
280 * enough for every fragment. If not, drop the
281 * whole packet.
282 */
283 for (i = 0; i < packet->txb->nr_frags; i++) {
284 skb = packet->txb->fragments[i];
285 if (unlikely(skb->len > queue->tx_devq_size)) {
286 dprintkl(KERN_ERR PFX "PIO TX device queue too small. "
287 "Dropping packet.\n");
288 free_txpacket(packet, 1);
289 goto next_packet;
290 }
291 }
292 }
293 /* Try to transmit the packet.
294 * This may not completely succeed.
295 */
296 err = pio_tx_packet(packet);
297 if (err)
298 break;
299 next_packet:
300 continue;
301 }
302out_unlock:
303 spin_unlock_irqrestore(&bcm->irq_lock, flags);
304}
305
306static void setup_txqueues(struct bcm43xx_pioqueue *queue)
307{
308 struct bcm43xx_pio_txpacket *packet;
309 int i;
310
311 queue->nr_txfree = BCM43xx_PIO_MAXTXPACKETS;
312 for (i = 0; i < BCM43xx_PIO_MAXTXPACKETS; i++) {
313 packet = &(queue->tx_packets_cache[i]);
314
315 packet->queue = queue;
316 INIT_LIST_HEAD(&packet->list);
317
318 list_add(&packet->list, &queue->txfree);
319 }
320}
321
322static
323struct bcm43xx_pioqueue * bcm43xx_setup_pioqueue(struct bcm43xx_private *bcm,
324 u16 pio_mmio_base)
325{
326 struct bcm43xx_pioqueue *queue;
327 u32 value;
328 u16 qsize;
329
330 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
331 if (!queue)
332 goto out;
333
334 queue->bcm = bcm;
335 queue->mmio_base = pio_mmio_base;
336 queue->need_workarounds = (bcm->current_core->rev < 3);
337
338 INIT_LIST_HEAD(&queue->txfree);
339 INIT_LIST_HEAD(&queue->txqueue);
340 INIT_LIST_HEAD(&queue->txrunning);
341 tasklet_init(&queue->txtask, tx_tasklet,
342 (unsigned long)queue);
343
344 value = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD);
345 value &= ~BCM43xx_SBF_XFER_REG_BYTESWAP;
346 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, value);
347
348 qsize = bcm43xx_read16(bcm, queue->mmio_base + BCM43xx_PIO_TXQBUFSIZE);
349 if (qsize == 0) {
350 printk(KERN_ERR PFX "ERROR: This card does not support PIO "
351 "operation mode. Please use DMA mode "
352 "(module parameter pio=0).\n");
353 goto err_freequeue;
354 }
355 if (qsize <= BCM43xx_PIO_TXQADJUST) {
356 printk(KERN_ERR PFX "PIO tx device-queue too small (%u)\n",
357 qsize);
358 goto err_freequeue;
359 }
360 qsize -= BCM43xx_PIO_TXQADJUST;
361 queue->tx_devq_size = qsize;
362
363 setup_txqueues(queue);
364
365out:
366 return queue;
367
368err_freequeue:
369 kfree(queue);
370 queue = NULL;
371 goto out;
372}
373
374static void cancel_transfers(struct bcm43xx_pioqueue *queue)
375{
376 struct bcm43xx_pio_txpacket *packet, *tmp_packet;
377
378 netif_tx_disable(queue->bcm->net_dev);
379 tasklet_disable(&queue->txtask);
380
381 list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list)
382 free_txpacket(packet, 0);
383 list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list)
384 free_txpacket(packet, 0);
385}
386
387static void bcm43xx_destroy_pioqueue(struct bcm43xx_pioqueue *queue)
388{
389 if (!queue)
390 return;
391
392 cancel_transfers(queue);
393 kfree(queue);
394}
395
396void bcm43xx_pio_free(struct bcm43xx_private *bcm)
397{
398 struct bcm43xx_pio *pio;
399
400 if (!bcm43xx_using_pio(bcm))
401 return;
402 pio = bcm43xx_current_pio(bcm);
403
404 bcm43xx_destroy_pioqueue(pio->queue3);
405 pio->queue3 = NULL;
406 bcm43xx_destroy_pioqueue(pio->queue2);
407 pio->queue2 = NULL;
408 bcm43xx_destroy_pioqueue(pio->queue1);
409 pio->queue1 = NULL;
410 bcm43xx_destroy_pioqueue(pio->queue0);
411 pio->queue0 = NULL;
412}
413
414int bcm43xx_pio_init(struct bcm43xx_private *bcm)
415{
416 struct bcm43xx_pio *pio = bcm43xx_current_pio(bcm);
417 struct bcm43xx_pioqueue *queue;
418 int err = -ENOMEM;
419
420 queue = bcm43xx_setup_pioqueue(bcm, BCM43xx_MMIO_PIO1_BASE);
421 if (!queue)
422 goto out;
423 pio->queue0 = queue;
424
425 queue = bcm43xx_setup_pioqueue(bcm, BCM43xx_MMIO_PIO2_BASE);
426 if (!queue)
427 goto err_destroy0;
428 pio->queue1 = queue;
429
430 queue = bcm43xx_setup_pioqueue(bcm, BCM43xx_MMIO_PIO3_BASE);
431 if (!queue)
432 goto err_destroy1;
433 pio->queue2 = queue;
434
435 queue = bcm43xx_setup_pioqueue(bcm, BCM43xx_MMIO_PIO4_BASE);
436 if (!queue)
437 goto err_destroy2;
438 pio->queue3 = queue;
439
440 if (bcm->current_core->rev < 3)
441 bcm->irq_savedstate |= BCM43xx_IRQ_PIO_WORKAROUND;
442
443 dprintk(KERN_INFO PFX "PIO initialized\n");
444 err = 0;
445out:
446 return err;
447
448err_destroy2:
449 bcm43xx_destroy_pioqueue(pio->queue2);
450 pio->queue2 = NULL;
451err_destroy1:
452 bcm43xx_destroy_pioqueue(pio->queue1);
453 pio->queue1 = NULL;
454err_destroy0:
455 bcm43xx_destroy_pioqueue(pio->queue0);
456 pio->queue0 = NULL;
457 goto out;
458}
459
460int bcm43xx_pio_tx(struct bcm43xx_private *bcm,
461 struct ieee80211_txb *txb)
462{
463 struct bcm43xx_pioqueue *queue = bcm43xx_current_pio(bcm)->queue1;
464 struct bcm43xx_pio_txpacket *packet;
465
466 assert(!queue->tx_suspended);
467 assert(!list_empty(&queue->txfree));
468
469 packet = list_entry(queue->txfree.next, struct bcm43xx_pio_txpacket, list);
470 packet->txb = txb;
471 packet->xmitted_frags = 0;
472 packet->xmitted_octets = 0;
473 list_move_tail(&packet->list, &queue->txqueue);
474 queue->nr_txfree--;
475 assert(queue->nr_txfree < BCM43xx_PIO_MAXTXPACKETS);
476
477 /* Suspend TX, if we are out of packets in the "free" queue. */
478 if (list_empty(&queue->txfree)) {
479 netif_stop_queue(queue->bcm->net_dev);
480 queue->tx_suspended = 1;
481 }
482
483 tasklet_schedule(&queue->txtask);
484
485 return 0;
486}
487
488void bcm43xx_pio_handle_xmitstatus(struct bcm43xx_private *bcm,
489 struct bcm43xx_xmitstatus *status)
490{
491 struct bcm43xx_pioqueue *queue;
492 struct bcm43xx_pio_txpacket *packet;
493
494 queue = parse_cookie(bcm, status->cookie, &packet);
495 assert(queue);
496
497 free_txpacket(packet, 1);
498 if (queue->tx_suspended) {
499 queue->tx_suspended = 0;
500 netif_wake_queue(queue->bcm->net_dev);
501 }
502 /* If there are packets on the txqueue, poke the tasklet
503 * to transmit them.
504 */
505 if (!list_empty(&queue->txqueue))
506 tasklet_schedule(&queue->txtask);
507}
508
509static void pio_rx_error(struct bcm43xx_pioqueue *queue,
510 int clear_buffers,
511 const char *error)
512{
513 int i;
514
515 printkl("PIO RX error: %s\n", error);
516 bcm43xx_pio_write(queue, BCM43xx_PIO_RXCTL,
517 BCM43xx_PIO_RXCTL_READY);
518 if (clear_buffers) {
519 assert(queue->mmio_base == BCM43xx_MMIO_PIO1_BASE);
520 for (i = 0; i < 15; i++) {
521 /* Dummy read. */
522 bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA);
523 }
524 }
525}
526
527void bcm43xx_pio_rx(struct bcm43xx_pioqueue *queue)
528{
529 __le16 preamble[21] = { 0 };
530 struct bcm43xx_rxhdr *rxhdr;
531 u16 tmp, len, rxflags2;
532 int i, preamble_readwords;
533 struct sk_buff *skb;
534
535 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXCTL);
536 if (!(tmp & BCM43xx_PIO_RXCTL_DATAAVAILABLE))
537 return;
538 bcm43xx_pio_write(queue, BCM43xx_PIO_RXCTL,
539 BCM43xx_PIO_RXCTL_DATAAVAILABLE);
540
541 for (i = 0; i < 10; i++) {
542 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXCTL);
543 if (tmp & BCM43xx_PIO_RXCTL_READY)
544 goto data_ready;
545 udelay(10);
546 }
547 dprintkl(KERN_ERR PFX "PIO RX timed out\n");
548 return;
549data_ready:
550
551 len = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA);
552 if (unlikely(len > 0x700)) {
553 pio_rx_error(queue, 0, "len > 0x700");
554 return;
555 }
556 if (unlikely(len == 0 && queue->mmio_base != BCM43xx_MMIO_PIO4_BASE)) {
557 pio_rx_error(queue, 0, "len == 0");
558 return;
559 }
560 preamble[0] = cpu_to_le16(len);
561 if (queue->mmio_base == BCM43xx_MMIO_PIO4_BASE)
562 preamble_readwords = 14 / sizeof(u16);
563 else
564 preamble_readwords = 18 / sizeof(u16);
565 for (i = 0; i < preamble_readwords; i++) {
566 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA);
567 preamble[i + 1] = cpu_to_le16(tmp);
568 }
569 rxhdr = (struct bcm43xx_rxhdr *)preamble;
570 rxflags2 = le16_to_cpu(rxhdr->flags2);
571 if (unlikely(rxflags2 & BCM43xx_RXHDR_FLAGS2_INVALIDFRAME)) {
572 pio_rx_error(queue,
573 (queue->mmio_base == BCM43xx_MMIO_PIO1_BASE),
574 "invalid frame");
575 return;
576 }
577 if (queue->mmio_base == BCM43xx_MMIO_PIO4_BASE) {
578 /* We received an xmit status. */
579 struct bcm43xx_hwxmitstatus *hw;
580 struct bcm43xx_xmitstatus stat;
581
582 hw = (struct bcm43xx_hwxmitstatus *)(preamble + 1);
583 stat.cookie = le16_to_cpu(hw->cookie);
584 stat.flags = hw->flags;
585 stat.cnt1 = hw->cnt1;
586 stat.cnt2 = hw->cnt2;
587 stat.seq = le16_to_cpu(hw->seq);
588 stat.unknown = le16_to_cpu(hw->unknown);
589
590 bcm43xx_debugfs_log_txstat(queue->bcm, &stat);
591 bcm43xx_pio_handle_xmitstatus(queue->bcm, &stat);
592
593 return;
594 }
595
596 skb = dev_alloc_skb(len);
597 if (unlikely(!skb)) {
598 pio_rx_error(queue, 1, "OOM");
599 return;
600 }
601 skb_put(skb, len);
602 for (i = 0; i < len - 1; i += 2) {
603 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA);
604 *((__le16 *)(skb->data + i)) = cpu_to_le16(tmp);
605 }
606 if (len % 2) {
607 tmp = bcm43xx_pio_read(queue, BCM43xx_PIO_RXDATA);
608 skb->data[len - 1] = (tmp & 0x00FF);
609/* The specs say the following is required, but
610 * it is wrong and corrupts the PLCP. If we don't do
611 * this, the PLCP seems to be correct. So ifdef it out for now.
612 */
613#if 0
614 if (rxflags2 & BCM43xx_RXHDR_FLAGS2_TYPE2FRAME)
615 skb->data[2] = (tmp & 0xFF00) >> 8;
616 else
617 skb->data[0] = (tmp & 0xFF00) >> 8;
618#endif
619 }
620 skb_trim(skb, len - IEEE80211_FCS_LEN);
621 bcm43xx_rx(queue->bcm, skb, rxhdr);
622}
623
624void bcm43xx_pio_tx_suspend(struct bcm43xx_pioqueue *queue)
625{
626 bcm43xx_power_saving_ctl_bits(queue->bcm, -1, 1);
627 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
628 bcm43xx_pio_read(queue, BCM43xx_PIO_TXCTL)
629 | BCM43xx_PIO_TXCTL_SUSPEND);
630}
631
632void bcm43xx_pio_tx_resume(struct bcm43xx_pioqueue *queue)
633{
634 bcm43xx_pio_write(queue, BCM43xx_PIO_TXCTL,
635 bcm43xx_pio_read(queue, BCM43xx_PIO_TXCTL)
636 & ~BCM43xx_PIO_TXCTL_SUSPEND);
637 bcm43xx_power_saving_ctl_bits(queue->bcm, -1, -1);
638 if (!list_empty(&queue->txqueue))
639 tasklet_schedule(&queue->txtask);
640}
641
642void bcm43xx_pio_freeze_txqueues(struct bcm43xx_private *bcm)
643{
644 struct bcm43xx_pio *pio;
645
646 assert(bcm43xx_using_pio(bcm));
647 pio = bcm43xx_current_pio(bcm);
648 pio->queue0->tx_frozen = 1;
649 pio->queue1->tx_frozen = 1;
650 pio->queue2->tx_frozen = 1;
651 pio->queue3->tx_frozen = 1;
652}
653
654void bcm43xx_pio_thaw_txqueues(struct bcm43xx_private *bcm)
655{
656 struct bcm43xx_pio *pio;
657
658 assert(bcm43xx_using_pio(bcm));
659 pio = bcm43xx_current_pio(bcm);
660 pio->queue0->tx_frozen = 0;
661 pio->queue1->tx_frozen = 0;
662 pio->queue2->tx_frozen = 0;
663 pio->queue3->tx_frozen = 0;
664 if (!list_empty(&pio->queue0->txqueue))
665 tasklet_schedule(&pio->queue0->txtask);
666 if (!list_empty(&pio->queue1->txqueue))
667 tasklet_schedule(&pio->queue1->txtask);
668 if (!list_empty(&pio->queue2->txqueue))
669 tasklet_schedule(&pio->queue2->txtask);
670 if (!list_empty(&pio->queue3->txqueue))
671 tasklet_schedule(&pio->queue3->txtask);
672}
673
674
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_pio.h b/drivers/net/wireless/bcm43xx/bcm43xx_pio.h
deleted file mode 100644
index bc78a3c2cafb..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_pio.h
+++ /dev/null
@@ -1,163 +0,0 @@
1#ifndef BCM43xx_PIO_H_
2#define BCM43xx_PIO_H_
3
4#include "bcm43xx.h"
5
6#include <linux/interrupt.h>
7#include <linux/list.h>
8#include <linux/skbuff.h>
9
10
11#define BCM43xx_PIO_TXCTL 0x00
12#define BCM43xx_PIO_TXDATA 0x02
13#define BCM43xx_PIO_TXQBUFSIZE 0x04
14#define BCM43xx_PIO_RXCTL 0x08
15#define BCM43xx_PIO_RXDATA 0x0A
16
17#define BCM43xx_PIO_TXCTL_WRITELO (1 << 0)
18#define BCM43xx_PIO_TXCTL_WRITEHI (1 << 1)
19#define BCM43xx_PIO_TXCTL_COMPLETE (1 << 2)
20#define BCM43xx_PIO_TXCTL_INIT (1 << 3)
21#define BCM43xx_PIO_TXCTL_SUSPEND (1 << 7)
22
23#define BCM43xx_PIO_RXCTL_DATAAVAILABLE (1 << 0)
24#define BCM43xx_PIO_RXCTL_READY (1 << 1)
25
26/* PIO constants */
27#define BCM43xx_PIO_MAXTXDEVQPACKETS 31
28#define BCM43xx_PIO_TXQADJUST 80
29
30/* PIO tuning knobs */
31#define BCM43xx_PIO_MAXTXPACKETS 256
32
33
34
35#ifdef CONFIG_BCM43XX_PIO
36
37
38struct bcm43xx_pioqueue;
39struct bcm43xx_xmitstatus;
40
41struct bcm43xx_pio_txpacket {
42 struct bcm43xx_pioqueue *queue;
43 struct ieee80211_txb *txb;
44 struct list_head list;
45
46 u8 xmitted_frags;
47 u16 xmitted_octets;
48};
49
50#define pio_txpacket_getindex(packet) ((int)((packet) - (packet)->queue->tx_packets_cache))
51
52struct bcm43xx_pioqueue {
53 struct bcm43xx_private *bcm;
54 u16 mmio_base;
55
56 u8 tx_suspended:1,
57 tx_frozen:1,
58 need_workarounds:1; /* Workarounds needed for core.rev < 3 */
59
60 /* Adjusted size of the device internal TX buffer. */
61 u16 tx_devq_size;
62 /* Used octets of the device internal TX buffer. */
63 u16 tx_devq_used;
64 /* Used packet slots in the device internal TX buffer. */
65 u8 tx_devq_packets;
66 /* Packets from the txfree list can
67 * be taken on incoming TX requests.
68 */
69 struct list_head txfree;
70 unsigned int nr_txfree;
71 /* Packets on the txqueue are queued,
72 * but not completely written to the chip, yet.
73 */
74 struct list_head txqueue;
75 /* Packets on the txrunning queue are completely
76 * posted to the device. We are waiting for the txstatus.
77 */
78 struct list_head txrunning;
79 /* Total number or packets sent.
80 * (This counter can obviously wrap).
81 */
82 unsigned int nr_tx_packets;
83 struct tasklet_struct txtask;
84 struct bcm43xx_pio_txpacket tx_packets_cache[BCM43xx_PIO_MAXTXPACKETS];
85};
86
87static inline
88u16 bcm43xx_pio_read(struct bcm43xx_pioqueue *queue,
89 u16 offset)
90{
91 return bcm43xx_read16(queue->bcm, queue->mmio_base + offset);
92}
93
94static inline
95void bcm43xx_pio_write(struct bcm43xx_pioqueue *queue,
96 u16 offset, u16 value)
97{
98 bcm43xx_write16(queue->bcm, queue->mmio_base + offset, value);
99 mmiowb();
100}
101
102
103int bcm43xx_pio_init(struct bcm43xx_private *bcm);
104void bcm43xx_pio_free(struct bcm43xx_private *bcm);
105
106int bcm43xx_pio_tx(struct bcm43xx_private *bcm,
107 struct ieee80211_txb *txb);
108void bcm43xx_pio_handle_xmitstatus(struct bcm43xx_private *bcm,
109 struct bcm43xx_xmitstatus *status);
110void bcm43xx_pio_rx(struct bcm43xx_pioqueue *queue);
111
112/* Suspend a TX queue on hardware level. */
113void bcm43xx_pio_tx_suspend(struct bcm43xx_pioqueue *queue);
114void bcm43xx_pio_tx_resume(struct bcm43xx_pioqueue *queue);
115/* Suspend (freeze) the TX tasklet (software level). */
116void bcm43xx_pio_freeze_txqueues(struct bcm43xx_private *bcm);
117void bcm43xx_pio_thaw_txqueues(struct bcm43xx_private *bcm);
118
119#else /* CONFIG_BCM43XX_PIO */
120
121static inline
122int bcm43xx_pio_init(struct bcm43xx_private *bcm)
123{
124 return 0;
125}
126static inline
127void bcm43xx_pio_free(struct bcm43xx_private *bcm)
128{
129}
130static inline
131int bcm43xx_pio_tx(struct bcm43xx_private *bcm,
132 struct ieee80211_txb *txb)
133{
134 return 0;
135}
136static inline
137void bcm43xx_pio_handle_xmitstatus(struct bcm43xx_private *bcm,
138 struct bcm43xx_xmitstatus *status)
139{
140}
141static inline
142void bcm43xx_pio_rx(struct bcm43xx_pioqueue *queue)
143{
144}
145static inline
146void bcm43xx_pio_tx_suspend(struct bcm43xx_pioqueue *queue)
147{
148}
149static inline
150void bcm43xx_pio_tx_resume(struct bcm43xx_pioqueue *queue)
151{
152}
153static inline
154void bcm43xx_pio_freeze_txqueues(struct bcm43xx_private *bcm)
155{
156}
157static inline
158void bcm43xx_pio_thaw_txqueues(struct bcm43xx_private *bcm)
159{
160}
161
162#endif /* CONFIG_BCM43XX_PIO */
163#endif /* BCM43xx_PIO_H_ */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_power.c b/drivers/net/wireless/bcm43xx/bcm43xx_power.c
deleted file mode 100644
index 7e774f410953..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_power.c
+++ /dev/null
@@ -1,393 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
6 Stefano Brivio <st3@riseup.net>
7 Michael Buesch <mbuesch@freenet.de>
8 Danny van Dyk <kugelfang@gentoo.org>
9 Andreas Jaggi <andreas.jaggi@waterwave.ch>
10
11 Some parts of the code in this file are derived from the ipw2200
12 driver Copyright(c) 2003 - 2004 Intel Corporation.
13
14 This program is free software; you can redistribute it and/or modify
15 it under the terms of the GNU General Public License as published by
16 the Free Software Foundation; either version 2 of the License, or
17 (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 GNU General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; see the file COPYING. If not, write to
26 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
27 Boston, MA 02110-1301, USA.
28
29*/
30
31#include <linux/delay.h>
32
33#include "bcm43xx.h"
34#include "bcm43xx_power.h"
35#include "bcm43xx_main.h"
36
37
38/* Get the Slow Clock Source */
39static int bcm43xx_pctl_get_slowclksrc(struct bcm43xx_private *bcm)
40{
41 u32 tmp;
42 int err;
43
44 assert(bcm->current_core == &bcm->core_chipcommon);
45 if (bcm->current_core->rev < 6) {
46 if (bcm->bustype == BCM43xx_BUSTYPE_PCMCIA ||
47 bcm->bustype == BCM43xx_BUSTYPE_SB)
48 return BCM43xx_PCTL_CLKSRC_XTALOS;
49 if (bcm->bustype == BCM43xx_BUSTYPE_PCI) {
50 err = bcm43xx_pci_read_config32(bcm, BCM43xx_PCTL_OUT, &tmp);
51 assert(!err);
52 if (tmp & 0x10)
53 return BCM43xx_PCTL_CLKSRC_PCI;
54 return BCM43xx_PCTL_CLKSRC_XTALOS;
55 }
56 }
57 if (bcm->current_core->rev < 10) {
58 tmp = bcm43xx_read32(bcm, BCM43xx_CHIPCOMMON_SLOWCLKCTL);
59 tmp &= 0x7;
60 if (tmp == 0)
61 return BCM43xx_PCTL_CLKSRC_LOPWROS;
62 if (tmp == 1)
63 return BCM43xx_PCTL_CLKSRC_XTALOS;
64 if (tmp == 2)
65 return BCM43xx_PCTL_CLKSRC_PCI;
66 }
67
68 return BCM43xx_PCTL_CLKSRC_XTALOS;
69}
70
71/* Get max/min slowclock frequency
72 * as described in http://bcm-specs.sipsolutions.net/PowerControl
73 */
74static int bcm43xx_pctl_clockfreqlimit(struct bcm43xx_private *bcm,
75 int get_max)
76{
77 int limit;
78 int clocksrc;
79 int divisor;
80 u32 tmp;
81
82 assert(bcm->chipcommon_capabilities & BCM43xx_CAPABILITIES_PCTL);
83 assert(bcm->current_core == &bcm->core_chipcommon);
84
85 clocksrc = bcm43xx_pctl_get_slowclksrc(bcm);
86 if (bcm->current_core->rev < 6) {
87 switch (clocksrc) {
88 case BCM43xx_PCTL_CLKSRC_PCI:
89 divisor = 64;
90 break;
91 case BCM43xx_PCTL_CLKSRC_XTALOS:
92 divisor = 32;
93 break;
94 default:
95 assert(0);
96 divisor = 1;
97 }
98 } else if (bcm->current_core->rev < 10) {
99 switch (clocksrc) {
100 case BCM43xx_PCTL_CLKSRC_LOPWROS:
101 divisor = 1;
102 break;
103 case BCM43xx_PCTL_CLKSRC_XTALOS:
104 case BCM43xx_PCTL_CLKSRC_PCI:
105 tmp = bcm43xx_read32(bcm, BCM43xx_CHIPCOMMON_SLOWCLKCTL);
106 divisor = ((tmp & 0xFFFF0000) >> 16) + 1;
107 divisor *= 4;
108 break;
109 default:
110 assert(0);
111 divisor = 1;
112 }
113 } else {
114 tmp = bcm43xx_read32(bcm, BCM43xx_CHIPCOMMON_SYSCLKCTL);
115 divisor = ((tmp & 0xFFFF0000) >> 16) + 1;
116 divisor *= 4;
117 }
118
119 switch (clocksrc) {
120 case BCM43xx_PCTL_CLKSRC_LOPWROS:
121 if (get_max)
122 limit = 43000;
123 else
124 limit = 25000;
125 break;
126 case BCM43xx_PCTL_CLKSRC_XTALOS:
127 if (get_max)
128 limit = 20200000;
129 else
130 limit = 19800000;
131 break;
132 case BCM43xx_PCTL_CLKSRC_PCI:
133 if (get_max)
134 limit = 34000000;
135 else
136 limit = 25000000;
137 break;
138 default:
139 assert(0);
140 limit = 0;
141 }
142 limit /= divisor;
143
144 return limit;
145}
146
147
148/* init power control
149 * as described in http://bcm-specs.sipsolutions.net/PowerControl
150 */
151int bcm43xx_pctl_init(struct bcm43xx_private *bcm)
152{
153 int err, maxfreq;
154 struct bcm43xx_coreinfo *old_core;
155
156 old_core = bcm->current_core;
157 err = bcm43xx_switch_core(bcm, &bcm->core_chipcommon);
158 if (err == -ENODEV)
159 return 0;
160 if (err)
161 goto out;
162
163 if (bcm->chip_id == 0x4321) {
164 if (bcm->chip_rev == 0)
165 bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_CTL, 0x03A4);
166 if (bcm->chip_rev == 1)
167 bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_CTL, 0x00A4);
168 }
169
170 if (bcm->chipcommon_capabilities & BCM43xx_CAPABILITIES_PCTL) {
171 if (bcm->current_core->rev >= 10) {
172 /* Set Idle Power clock rate to 1Mhz */
173 bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_SYSCLKCTL,
174 (bcm43xx_read32(bcm, BCM43xx_CHIPCOMMON_SYSCLKCTL)
175 & 0x0000FFFF) | 0x40000);
176 } else {
177 maxfreq = bcm43xx_pctl_clockfreqlimit(bcm, 1);
178 bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_PLLONDELAY,
179 (maxfreq * 150 + 999999) / 1000000);
180 bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_FREFSELDELAY,
181 (maxfreq * 15 + 999999) / 1000000);
182 }
183 }
184
185 err = bcm43xx_switch_core(bcm, old_core);
186 assert(err == 0);
187
188out:
189 return err;
190}
191
192u16 bcm43xx_pctl_powerup_delay(struct bcm43xx_private *bcm)
193{
194 u16 delay = 0;
195 int err;
196 u32 pll_on_delay;
197 struct bcm43xx_coreinfo *old_core;
198 int minfreq;
199
200 if (bcm->bustype != BCM43xx_BUSTYPE_PCI)
201 goto out;
202 if (!(bcm->chipcommon_capabilities & BCM43xx_CAPABILITIES_PCTL))
203 goto out;
204 old_core = bcm->current_core;
205 err = bcm43xx_switch_core(bcm, &bcm->core_chipcommon);
206 if (err == -ENODEV)
207 goto out;
208
209 minfreq = bcm43xx_pctl_clockfreqlimit(bcm, 0);
210 pll_on_delay = bcm43xx_read32(bcm, BCM43xx_CHIPCOMMON_PLLONDELAY);
211 delay = (((pll_on_delay + 2) * 1000000) + (minfreq - 1)) / minfreq;
212
213 err = bcm43xx_switch_core(bcm, old_core);
214 assert(err == 0);
215
216out:
217 return delay;
218}
219
220/* set the powercontrol clock
221 * as described in http://bcm-specs.sipsolutions.net/PowerControl
222 */
223int bcm43xx_pctl_set_clock(struct bcm43xx_private *bcm, u16 mode)
224{
225 int err;
226 struct bcm43xx_coreinfo *old_core;
227 u32 tmp;
228
229 old_core = bcm->current_core;
230 err = bcm43xx_switch_core(bcm, &bcm->core_chipcommon);
231 if (err == -ENODEV)
232 return 0;
233 if (err)
234 goto out;
235
236 if (bcm->core_chipcommon.rev < 6) {
237 if (mode == BCM43xx_PCTL_CLK_FAST) {
238 err = bcm43xx_pctl_set_crystal(bcm, 1);
239 if (err)
240 goto out;
241 }
242 } else {
243 if ((bcm->chipcommon_capabilities & BCM43xx_CAPABILITIES_PCTL) &&
244 (bcm->core_chipcommon.rev < 10)) {
245 switch (mode) {
246 case BCM43xx_PCTL_CLK_FAST:
247 tmp = bcm43xx_read32(bcm, BCM43xx_CHIPCOMMON_SLOWCLKCTL);
248 tmp = (tmp & ~BCM43xx_PCTL_FORCE_SLOW) | BCM43xx_PCTL_FORCE_PLL;
249 bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_SLOWCLKCTL, tmp);
250 break;
251 case BCM43xx_PCTL_CLK_SLOW:
252 tmp = bcm43xx_read32(bcm, BCM43xx_CHIPCOMMON_SLOWCLKCTL);
253 tmp |= BCM43xx_PCTL_FORCE_SLOW;
254 bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_SLOWCLKCTL, tmp);
255 break;
256 case BCM43xx_PCTL_CLK_DYNAMIC:
257 tmp = bcm43xx_read32(bcm, BCM43xx_CHIPCOMMON_SLOWCLKCTL);
258 tmp &= ~BCM43xx_PCTL_FORCE_SLOW;
259 tmp |= BCM43xx_PCTL_FORCE_PLL;
260 tmp &= ~BCM43xx_PCTL_DYN_XTAL;
261 bcm43xx_write32(bcm, BCM43xx_CHIPCOMMON_SLOWCLKCTL, tmp);
262 }
263 }
264 }
265
266 err = bcm43xx_switch_core(bcm, old_core);
267 assert(err == 0);
268
269out:
270 return err;
271}
272
273int bcm43xx_pctl_set_crystal(struct bcm43xx_private *bcm, int on)
274{
275 int err;
276 u32 in, out, outenable;
277
278 err = bcm43xx_pci_read_config32(bcm, BCM43xx_PCTL_IN, &in);
279 if (err)
280 goto err_pci;
281 err = bcm43xx_pci_read_config32(bcm, BCM43xx_PCTL_OUT, &out);
282 if (err)
283 goto err_pci;
284 err = bcm43xx_pci_read_config32(bcm, BCM43xx_PCTL_OUTENABLE, &outenable);
285 if (err)
286 goto err_pci;
287
288 outenable |= (BCM43xx_PCTL_XTAL_POWERUP | BCM43xx_PCTL_PLL_POWERDOWN);
289
290 if (on) {
291 if (in & 0x40)
292 return 0;
293
294 out |= (BCM43xx_PCTL_XTAL_POWERUP | BCM43xx_PCTL_PLL_POWERDOWN);
295
296 err = bcm43xx_pci_write_config32(bcm, BCM43xx_PCTL_OUT, out);
297 if (err)
298 goto err_pci;
299 err = bcm43xx_pci_write_config32(bcm, BCM43xx_PCTL_OUTENABLE, outenable);
300 if (err)
301 goto err_pci;
302 udelay(1000);
303
304 out &= ~BCM43xx_PCTL_PLL_POWERDOWN;
305 err = bcm43xx_pci_write_config32(bcm, BCM43xx_PCTL_OUT, out);
306 if (err)
307 goto err_pci;
308 udelay(5000);
309 } else {
310 if (bcm->current_core->rev < 5)
311 return 0;
312 if (bcm->sprom.boardflags & BCM43xx_BFL_XTAL_NOSLOW)
313 return 0;
314
315/* XXX: Why BCM43xx_MMIO_RADIO_HWENABLED_xx can't be read at this time?
316 * err = bcm43xx_switch_core(bcm, bcm->active_80211_core);
317 * if (err)
318 * return err;
319 * if (((bcm->current_core->rev >= 3) &&
320 * (bcm43xx_read32(bcm, BCM43xx_MMIO_RADIO_HWENABLED_HI) & (1 << 16))) ||
321 * ((bcm->current_core->rev < 3) &&
322 * !(bcm43xx_read16(bcm, BCM43xx_MMIO_RADIO_HWENABLED_LO) & (1 << 4))))
323 * return 0;
324 * err = bcm43xx_switch_core(bcm, &bcm->core_chipcommon);
325 * if (err)
326 * return err;
327 */
328
329 err = bcm43xx_pctl_set_clock(bcm, BCM43xx_PCTL_CLK_SLOW);
330 if (err)
331 goto out;
332 out &= ~BCM43xx_PCTL_XTAL_POWERUP;
333 out |= BCM43xx_PCTL_PLL_POWERDOWN;
334 err = bcm43xx_pci_write_config32(bcm, BCM43xx_PCTL_OUT, out);
335 if (err)
336 goto err_pci;
337 err = bcm43xx_pci_write_config32(bcm, BCM43xx_PCTL_OUTENABLE, outenable);
338 if (err)
339 goto err_pci;
340 }
341
342out:
343 return err;
344
345err_pci:
346 printk(KERN_ERR PFX "Error: pctl_set_clock() could not access PCI config space!\n");
347 err = -EBUSY;
348 goto out;
349}
350
351/* Set the PowerSavingControlBits.
352 * Bitvalues:
353 * 0 => unset the bit
354 * 1 => set the bit
355 * -1 => calculate the bit
356 */
357void bcm43xx_power_saving_ctl_bits(struct bcm43xx_private *bcm,
358 int bit25, int bit26)
359{
360 int i;
361 u32 status;
362
363//FIXME: Force 25 to off and 26 to on for now:
364bit25 = 0;
365bit26 = 1;
366
367 if (bit25 == -1) {
368 //TODO: If powersave is not off and FIXME is not set and we are not in adhoc
369 // and thus is not an AP and we are associated, set bit 25
370 }
371 if (bit26 == -1) {
372 //TODO: If the device is awake or this is an AP, or we are scanning, or FIXME,
373 // or we are associated, or FIXME, or the latest PS-Poll packet sent was
374 // successful, set bit26
375 }
376 status = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD);
377 if (bit25)
378 status |= BCM43xx_SBF_PS1;
379 else
380 status &= ~BCM43xx_SBF_PS1;
381 if (bit26)
382 status |= BCM43xx_SBF_PS2;
383 else
384 status &= ~BCM43xx_SBF_PS2;
385 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, status);
386 if (bit26 && bcm->current_core->rev >= 5) {
387 for (i = 0; i < 100; i++) {
388 if (bcm43xx_shm_read32(bcm, BCM43xx_SHM_SHARED, 0x0040) != 4)
389 break;
390 udelay(10);
391 }
392 }
393}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_power.h b/drivers/net/wireless/bcm43xx/bcm43xx_power.h
deleted file mode 100644
index c966ab3a5a8c..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_power.h
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
6 Stefano Brivio <st3@riseup.net>
7 Michael Buesch <mbuesch@freenet.de>
8 Danny van Dyk <kugelfang@gentoo.org>
9 Andreas Jaggi <andreas.jaggi@waterwave.ch>
10
11 Some parts of the code in this file are derived from the ipw2200
12 driver Copyright(c) 2003 - 2004 Intel Corporation.
13
14 This program is free software; you can redistribute it and/or modify
15 it under the terms of the GNU General Public License as published by
16 the Free Software Foundation; either version 2 of the License, or
17 (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 GNU General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; see the file COPYING. If not, write to
26 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
27 Boston, MA 02110-1301, USA.
28
29*/
30
31#ifndef BCM43xx_POWER_H_
32#define BCM43xx_POWER_H_
33
34#include <linux/types.h>
35
36/* Clock sources */
37enum {
38 /* PCI clock */
39 BCM43xx_PCTL_CLKSRC_PCI,
40 /* Crystal slow clock oscillator */
41 BCM43xx_PCTL_CLKSRC_XTALOS,
42 /* Low power oscillator */
43 BCM43xx_PCTL_CLKSRC_LOPWROS,
44};
45
46struct bcm43xx_private;
47
48int bcm43xx_pctl_init(struct bcm43xx_private *bcm);
49int bcm43xx_pctl_set_clock(struct bcm43xx_private *bcm, u16 mode);
50int bcm43xx_pctl_set_crystal(struct bcm43xx_private *bcm, int on);
51u16 bcm43xx_pctl_powerup_delay(struct bcm43xx_private *bcm);
52
53void bcm43xx_power_saving_ctl_bits(struct bcm43xx_private *bcm,
54 int bit25, int bit26);
55
56#endif /* BCM43xx_POWER_H_ */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c b/drivers/net/wireless/bcm43xx/bcm43xx_radio.c
deleted file mode 100644
index c605099c9baf..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_radio.c
+++ /dev/null
@@ -1,2170 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
6 Stefano Brivio <st3@riseup.net>
7 Michael Buesch <mbuesch@freenet.de>
8 Danny van Dyk <kugelfang@gentoo.org>
9 Andreas Jaggi <andreas.jaggi@waterwave.ch>
10
11 Some parts of the code in this file are derived from the ipw2200
12 driver Copyright(c) 2003 - 2004 Intel Corporation.
13
14 This program is free software; you can redistribute it and/or modify
15 it under the terms of the GNU General Public License as published by
16 the Free Software Foundation; either version 2 of the License, or
17 (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 GNU General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; see the file COPYING. If not, write to
26 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
27 Boston, MA 02110-1301, USA.
28
29*/
30
31#include <linux/delay.h>
32
33#include "bcm43xx.h"
34#include "bcm43xx_main.h"
35#include "bcm43xx_phy.h"
36#include "bcm43xx_radio.h"
37#include "bcm43xx_ilt.h"
38
39
40/* Table for bcm43xx_radio_calibrationvalue() */
41static const u16 rcc_table[16] = {
42 0x0002, 0x0003, 0x0001, 0x000F,
43 0x0006, 0x0007, 0x0005, 0x000F,
44 0x000A, 0x000B, 0x0009, 0x000F,
45 0x000E, 0x000F, 0x000D, 0x000F,
46};
47
48/* Reverse the bits of a 4bit value.
49 * Example: 1101 is flipped 1011
50 */
51static u16 flip_4bit(u16 value)
52{
53 u16 flipped = 0x0000;
54
55 assert((value & ~0x000F) == 0x0000);
56
57 flipped |= (value & 0x0001) << 3;
58 flipped |= (value & 0x0002) << 1;
59 flipped |= (value & 0x0004) >> 1;
60 flipped |= (value & 0x0008) >> 3;
61
62 return flipped;
63}
64
65/* Get the freq, as it has to be written to the device. */
66static inline
67u16 channel2freq_bg(u8 channel)
68{
69 /* Frequencies are given as frequencies_bg[index] + 2.4GHz
70 * Starting with channel 1
71 */
72 static const u16 frequencies_bg[14] = {
73 12, 17, 22, 27,
74 32, 37, 42, 47,
75 52, 57, 62, 67,
76 72, 84,
77 };
78
79 assert(channel >= 1 && channel <= 14);
80
81 return frequencies_bg[channel - 1];
82}
83
84/* Get the freq, as it has to be written to the device. */
85static inline
86u16 channel2freq_a(u8 channel)
87{
88 assert(channel <= 200);
89
90 return (5000 + 5 * channel);
91}
92
93void bcm43xx_radio_lock(struct bcm43xx_private *bcm)
94{
95 u32 status;
96
97 status = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD);
98 status |= BCM43xx_SBF_RADIOREG_LOCK;
99 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, status);
100 mmiowb();
101 udelay(10);
102}
103
104void bcm43xx_radio_unlock(struct bcm43xx_private *bcm)
105{
106 u32 status;
107
108 bcm43xx_read16(bcm, BCM43xx_MMIO_PHY_VER); /* dummy read */
109 status = bcm43xx_read32(bcm, BCM43xx_MMIO_STATUS_BITFIELD);
110 status &= ~BCM43xx_SBF_RADIOREG_LOCK;
111 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, status);
112 mmiowb();
113}
114
115u16 bcm43xx_radio_read16(struct bcm43xx_private *bcm, u16 offset)
116{
117 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
118 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
119
120 switch (phy->type) {
121 case BCM43xx_PHYTYPE_A:
122 offset |= 0x0040;
123 break;
124 case BCM43xx_PHYTYPE_B:
125 if (radio->version == 0x2053) {
126 if (offset < 0x70)
127 offset += 0x80;
128 else if (offset < 0x80)
129 offset += 0x70;
130 } else if (radio->version == 0x2050) {
131 offset |= 0x80;
132 } else
133 assert(0);
134 break;
135 case BCM43xx_PHYTYPE_G:
136 offset |= 0x80;
137 break;
138 }
139
140 bcm43xx_write16(bcm, BCM43xx_MMIO_RADIO_CONTROL, offset);
141 return bcm43xx_read16(bcm, BCM43xx_MMIO_RADIO_DATA_LOW);
142}
143
144void bcm43xx_radio_write16(struct bcm43xx_private *bcm, u16 offset, u16 val)
145{
146 bcm43xx_write16(bcm, BCM43xx_MMIO_RADIO_CONTROL, offset);
147 mmiowb();
148 bcm43xx_write16(bcm, BCM43xx_MMIO_RADIO_DATA_LOW, val);
149}
150
151static void bcm43xx_set_all_gains(struct bcm43xx_private *bcm,
152 s16 first, s16 second, s16 third)
153{
154 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
155 u16 i;
156 u16 start = 0x08, end = 0x18;
157 u16 offset = 0x0400;
158 u16 tmp;
159
160 if (phy->rev <= 1) {
161 offset = 0x5000;
162 start = 0x10;
163 end = 0x20;
164 }
165
166 for (i = 0; i < 4; i++)
167 bcm43xx_ilt_write(bcm, offset + i, first);
168
169 for (i = start; i < end; i++)
170 bcm43xx_ilt_write(bcm, offset + i, second);
171
172 if (third != -1) {
173 tmp = ((u16)third << 14) | ((u16)third << 6);
174 bcm43xx_phy_write(bcm, 0x04A0,
175 (bcm43xx_phy_read(bcm, 0x04A0) & 0xBFBF) | tmp);
176 bcm43xx_phy_write(bcm, 0x04A1,
177 (bcm43xx_phy_read(bcm, 0x04A1) & 0xBFBF) | tmp);
178 bcm43xx_phy_write(bcm, 0x04A2,
179 (bcm43xx_phy_read(bcm, 0x04A2) & 0xBFBF) | tmp);
180 }
181 bcm43xx_dummy_transmission(bcm);
182}
183
184static void bcm43xx_set_original_gains(struct bcm43xx_private *bcm)
185{
186 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
187 u16 i, tmp;
188 u16 offset = 0x0400;
189 u16 start = 0x0008, end = 0x0018;
190
191 if (phy->rev <= 1) {
192 offset = 0x5000;
193 start = 0x0010;
194 end = 0x0020;
195 }
196
197 for (i = 0; i < 4; i++) {
198 tmp = (i & 0xFFFC);
199 tmp |= (i & 0x0001) << 1;
200 tmp |= (i & 0x0002) >> 1;
201
202 bcm43xx_ilt_write(bcm, offset + i, tmp);
203 }
204
205 for (i = start; i < end; i++)
206 bcm43xx_ilt_write(bcm, offset + i, i - start);
207
208 bcm43xx_phy_write(bcm, 0x04A0,
209 (bcm43xx_phy_read(bcm, 0x04A0) & 0xBFBF) | 0x4040);
210 bcm43xx_phy_write(bcm, 0x04A1,
211 (bcm43xx_phy_read(bcm, 0x04A1) & 0xBFBF) | 0x4040);
212 bcm43xx_phy_write(bcm, 0x04A2,
213 (bcm43xx_phy_read(bcm, 0x04A2) & 0xBFBF) | 0x4000);
214 bcm43xx_dummy_transmission(bcm);
215}
216
217/* Synthetic PU workaround */
218static void bcm43xx_synth_pu_workaround(struct bcm43xx_private *bcm, u8 channel)
219{
220 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
221
222 if (radio->version != 0x2050 || radio->revision >= 6) {
223 /* We do not need the workaround. */
224 return;
225 }
226
227 if (channel <= 10) {
228 bcm43xx_write16(bcm, BCM43xx_MMIO_CHANNEL,
229 channel2freq_bg(channel + 4));
230 } else {
231 bcm43xx_write16(bcm, BCM43xx_MMIO_CHANNEL,
232 channel2freq_bg(1));
233 }
234 udelay(100);
235 bcm43xx_write16(bcm, BCM43xx_MMIO_CHANNEL,
236 channel2freq_bg(channel));
237}
238
239u8 bcm43xx_radio_aci_detect(struct bcm43xx_private *bcm, u8 channel)
240{
241 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
242 u8 ret = 0;
243 u16 saved, rssi, temp;
244 int i, j = 0;
245
246 saved = bcm43xx_phy_read(bcm, 0x0403);
247 bcm43xx_radio_selectchannel(bcm, channel, 0);
248 bcm43xx_phy_write(bcm, 0x0403, (saved & 0xFFF8) | 5);
249 if (radio->aci_hw_rssi)
250 rssi = bcm43xx_phy_read(bcm, 0x048A) & 0x3F;
251 else
252 rssi = saved & 0x3F;
253 /* clamp temp to signed 5bit */
254 if (rssi > 32)
255 rssi -= 64;
256 for (i = 0;i < 100; i++) {
257 temp = (bcm43xx_phy_read(bcm, 0x047F) >> 8) & 0x3F;
258 if (temp > 32)
259 temp -= 64;
260 if (temp < rssi)
261 j++;
262 if (j >= 20)
263 ret = 1;
264 }
265 bcm43xx_phy_write(bcm, 0x0403, saved);
266
267 return ret;
268}
269
270u8 bcm43xx_radio_aci_scan(struct bcm43xx_private *bcm)
271{
272 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
273 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
274 u8 ret[13];
275 unsigned int channel = radio->channel;
276 unsigned int i, j, start, end;
277 unsigned long phylock_flags;
278
279 if (!((phy->type == BCM43xx_PHYTYPE_G) && (phy->rev > 0)))
280 return 0;
281
282 bcm43xx_phy_lock(bcm, phylock_flags);
283 bcm43xx_radio_lock(bcm);
284 bcm43xx_phy_write(bcm, 0x0802,
285 bcm43xx_phy_read(bcm, 0x0802) & 0xFFFC);
286 bcm43xx_phy_write(bcm, BCM43xx_PHY_G_CRS,
287 bcm43xx_phy_read(bcm, BCM43xx_PHY_G_CRS) & 0x7FFF);
288 bcm43xx_set_all_gains(bcm, 3, 8, 1);
289
290 start = (channel - 5 > 0) ? channel - 5 : 1;
291 end = (channel + 5 < 14) ? channel + 5 : 13;
292
293 for (i = start; i <= end; i++) {
294 if (abs(channel - i) > 2)
295 ret[i-1] = bcm43xx_radio_aci_detect(bcm, i);
296 }
297 bcm43xx_radio_selectchannel(bcm, channel, 0);
298 bcm43xx_phy_write(bcm, 0x0802,
299 (bcm43xx_phy_read(bcm, 0x0802) & 0xFFFC) | 0x0003);
300 bcm43xx_phy_write(bcm, 0x0403,
301 bcm43xx_phy_read(bcm, 0x0403) & 0xFFF8);
302 bcm43xx_phy_write(bcm, BCM43xx_PHY_G_CRS,
303 bcm43xx_phy_read(bcm, BCM43xx_PHY_G_CRS) | 0x8000);
304 bcm43xx_set_original_gains(bcm);
305 for (i = 0; i < 13; i++) {
306 if (!ret[i])
307 continue;
308 end = (i + 5 < 13) ? i + 5 : 13;
309 for (j = i; j < end; j++)
310 ret[j] = 1;
311 }
312 bcm43xx_radio_unlock(bcm);
313 bcm43xx_phy_unlock(bcm, phylock_flags);
314
315 return ret[channel - 1];
316}
317
318/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
319void bcm43xx_nrssi_hw_write(struct bcm43xx_private *bcm, u16 offset, s16 val)
320{
321 bcm43xx_phy_write(bcm, BCM43xx_PHY_NRSSILT_CTRL, offset);
322 mmiowb();
323 bcm43xx_phy_write(bcm, BCM43xx_PHY_NRSSILT_DATA, (u16)val);
324}
325
326/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
327s16 bcm43xx_nrssi_hw_read(struct bcm43xx_private *bcm, u16 offset)
328{
329 u16 val;
330
331 bcm43xx_phy_write(bcm, BCM43xx_PHY_NRSSILT_CTRL, offset);
332 val = bcm43xx_phy_read(bcm, BCM43xx_PHY_NRSSILT_DATA);
333
334 return (s16)val;
335}
336
337/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
338void bcm43xx_nrssi_hw_update(struct bcm43xx_private *bcm, u16 val)
339{
340 u16 i;
341 s16 tmp;
342
343 for (i = 0; i < 64; i++) {
344 tmp = bcm43xx_nrssi_hw_read(bcm, i);
345 tmp -= val;
346 tmp = limit_value(tmp, -32, 31);
347 bcm43xx_nrssi_hw_write(bcm, i, tmp);
348 }
349}
350
351/* http://bcm-specs.sipsolutions.net/NRSSILookupTable */
352void bcm43xx_nrssi_mem_update(struct bcm43xx_private *bcm)
353{
354 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
355 s16 i, delta;
356 s32 tmp;
357
358 delta = 0x1F - radio->nrssi[0];
359 for (i = 0; i < 64; i++) {
360 tmp = (i - delta) * radio->nrssislope;
361 tmp /= 0x10000;
362 tmp += 0x3A;
363 tmp = limit_value(tmp, 0, 0x3F);
364 radio->nrssi_lt[i] = tmp;
365 }
366}
367
368static void bcm43xx_calc_nrssi_offset(struct bcm43xx_private *bcm)
369{
370 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
371 u16 backup[20] = { 0 };
372 s16 v47F;
373 u16 i;
374 u16 saved = 0xFFFF;
375
376 backup[0] = bcm43xx_phy_read(bcm, 0x0001);
377 backup[1] = bcm43xx_phy_read(bcm, 0x0811);
378 backup[2] = bcm43xx_phy_read(bcm, 0x0812);
379 backup[3] = bcm43xx_phy_read(bcm, 0x0814);
380 backup[4] = bcm43xx_phy_read(bcm, 0x0815);
381 backup[5] = bcm43xx_phy_read(bcm, 0x005A);
382 backup[6] = bcm43xx_phy_read(bcm, 0x0059);
383 backup[7] = bcm43xx_phy_read(bcm, 0x0058);
384 backup[8] = bcm43xx_phy_read(bcm, 0x000A);
385 backup[9] = bcm43xx_phy_read(bcm, 0x0003);
386 backup[10] = bcm43xx_radio_read16(bcm, 0x007A);
387 backup[11] = bcm43xx_radio_read16(bcm, 0x0043);
388
389 bcm43xx_phy_write(bcm, 0x0429,
390 bcm43xx_phy_read(bcm, 0x0429) & 0x7FFF);
391 bcm43xx_phy_write(bcm, 0x0001,
392 (bcm43xx_phy_read(bcm, 0x0001) & 0x3FFF) | 0x4000);
393 bcm43xx_phy_write(bcm, 0x0811,
394 bcm43xx_phy_read(bcm, 0x0811) | 0x000C);
395 bcm43xx_phy_write(bcm, 0x0812,
396 (bcm43xx_phy_read(bcm, 0x0812) & 0xFFF3) | 0x0004);
397 bcm43xx_phy_write(bcm, 0x0802,
398 bcm43xx_phy_read(bcm, 0x0802) & ~(0x1 | 0x2));
399 if (phy->rev >= 6) {
400 backup[12] = bcm43xx_phy_read(bcm, 0x002E);
401 backup[13] = bcm43xx_phy_read(bcm, 0x002F);
402 backup[14] = bcm43xx_phy_read(bcm, 0x080F);
403 backup[15] = bcm43xx_phy_read(bcm, 0x0810);
404 backup[16] = bcm43xx_phy_read(bcm, 0x0801);
405 backup[17] = bcm43xx_phy_read(bcm, 0x0060);
406 backup[18] = bcm43xx_phy_read(bcm, 0x0014);
407 backup[19] = bcm43xx_phy_read(bcm, 0x0478);
408
409 bcm43xx_phy_write(bcm, 0x002E, 0);
410 bcm43xx_phy_write(bcm, 0x002F, 0);
411 bcm43xx_phy_write(bcm, 0x080F, 0);
412 bcm43xx_phy_write(bcm, 0x0810, 0);
413 bcm43xx_phy_write(bcm, 0x0478,
414 bcm43xx_phy_read(bcm, 0x0478) | 0x0100);
415 bcm43xx_phy_write(bcm, 0x0801,
416 bcm43xx_phy_read(bcm, 0x0801) | 0x0040);
417 bcm43xx_phy_write(bcm, 0x0060,
418 bcm43xx_phy_read(bcm, 0x0060) | 0x0040);
419 bcm43xx_phy_write(bcm, 0x0014,
420 bcm43xx_phy_read(bcm, 0x0014) | 0x0200);
421 }
422 bcm43xx_radio_write16(bcm, 0x007A,
423 bcm43xx_radio_read16(bcm, 0x007A) | 0x0070);
424 bcm43xx_radio_write16(bcm, 0x007A,
425 bcm43xx_radio_read16(bcm, 0x007A) | 0x0080);
426 udelay(30);
427
428 v47F = (s16)((bcm43xx_phy_read(bcm, 0x047F) >> 8) & 0x003F);
429 if (v47F >= 0x20)
430 v47F -= 0x40;
431 if (v47F == 31) {
432 for (i = 7; i >= 4; i--) {
433 bcm43xx_radio_write16(bcm, 0x007B, i);
434 udelay(20);
435 v47F = (s16)((bcm43xx_phy_read(bcm, 0x047F) >> 8) & 0x003F);
436 if (v47F >= 0x20)
437 v47F -= 0x40;
438 if (v47F < 31 && saved == 0xFFFF)
439 saved = i;
440 }
441 if (saved == 0xFFFF)
442 saved = 4;
443 } else {
444 bcm43xx_radio_write16(bcm, 0x007A,
445 bcm43xx_radio_read16(bcm, 0x007A) & 0x007F);
446 bcm43xx_phy_write(bcm, 0x0814,
447 bcm43xx_phy_read(bcm, 0x0814) | 0x0001);
448 bcm43xx_phy_write(bcm, 0x0815,
449 bcm43xx_phy_read(bcm, 0x0815) & 0xFFFE);
450 bcm43xx_phy_write(bcm, 0x0811,
451 bcm43xx_phy_read(bcm, 0x0811) | 0x000C);
452 bcm43xx_phy_write(bcm, 0x0812,
453 bcm43xx_phy_read(bcm, 0x0812) | 0x000C);
454 bcm43xx_phy_write(bcm, 0x0811,
455 bcm43xx_phy_read(bcm, 0x0811) | 0x0030);
456 bcm43xx_phy_write(bcm, 0x0812,
457 bcm43xx_phy_read(bcm, 0x0812) | 0x0030);
458 bcm43xx_phy_write(bcm, 0x005A, 0x0480);
459 bcm43xx_phy_write(bcm, 0x0059, 0x0810);
460 bcm43xx_phy_write(bcm, 0x0058, 0x000D);
461 if (phy->analog == 0) {
462 bcm43xx_phy_write(bcm, 0x0003, 0x0122);
463 } else {
464 bcm43xx_phy_write(bcm, 0x000A,
465 bcm43xx_phy_read(bcm, 0x000A)
466 | 0x2000);
467 }
468 bcm43xx_phy_write(bcm, 0x0814,
469 bcm43xx_phy_read(bcm, 0x0814) | 0x0004);
470 bcm43xx_phy_write(bcm, 0x0815,
471 bcm43xx_phy_read(bcm, 0x0815) & 0xFFFB);
472 bcm43xx_phy_write(bcm, 0x0003,
473 (bcm43xx_phy_read(bcm, 0x0003) & 0xFF9F)
474 | 0x0040);
475 bcm43xx_radio_write16(bcm, 0x007A,
476 bcm43xx_radio_read16(bcm, 0x007A) | 0x000F);
477 bcm43xx_set_all_gains(bcm, 3, 0, 1);
478 bcm43xx_radio_write16(bcm, 0x0043,
479 (bcm43xx_radio_read16(bcm, 0x0043)
480 & 0x00F0) | 0x000F);
481 udelay(30);
482 v47F = (s16)((bcm43xx_phy_read(bcm, 0x047F) >> 8) & 0x003F);
483 if (v47F >= 0x20)
484 v47F -= 0x40;
485 if (v47F == -32) {
486 for (i = 0; i < 4; i++) {
487 bcm43xx_radio_write16(bcm, 0x007B, i);
488 udelay(20);
489 v47F = (s16)((bcm43xx_phy_read(bcm, 0x047F) >> 8) & 0x003F);
490 if (v47F >= 0x20)
491 v47F -= 0x40;
492 if (v47F > -31 && saved == 0xFFFF)
493 saved = i;
494 }
495 if (saved == 0xFFFF)
496 saved = 3;
497 } else
498 saved = 0;
499 }
500 bcm43xx_radio_write16(bcm, 0x007B, saved);
501
502 if (phy->rev >= 6) {
503 bcm43xx_phy_write(bcm, 0x002E, backup[12]);
504 bcm43xx_phy_write(bcm, 0x002F, backup[13]);
505 bcm43xx_phy_write(bcm, 0x080F, backup[14]);
506 bcm43xx_phy_write(bcm, 0x0810, backup[15]);
507 }
508 bcm43xx_phy_write(bcm, 0x0814, backup[3]);
509 bcm43xx_phy_write(bcm, 0x0815, backup[4]);
510 bcm43xx_phy_write(bcm, 0x005A, backup[5]);
511 bcm43xx_phy_write(bcm, 0x0059, backup[6]);
512 bcm43xx_phy_write(bcm, 0x0058, backup[7]);
513 bcm43xx_phy_write(bcm, 0x000A, backup[8]);
514 bcm43xx_phy_write(bcm, 0x0003, backup[9]);
515 bcm43xx_radio_write16(bcm, 0x0043, backup[11]);
516 bcm43xx_radio_write16(bcm, 0x007A, backup[10]);
517 bcm43xx_phy_write(bcm, 0x0802,
518 bcm43xx_phy_read(bcm, 0x0802) | 0x1 | 0x2);
519 bcm43xx_phy_write(bcm, 0x0429,
520 bcm43xx_phy_read(bcm, 0x0429) | 0x8000);
521 bcm43xx_set_original_gains(bcm);
522 if (phy->rev >= 6) {
523 bcm43xx_phy_write(bcm, 0x0801, backup[16]);
524 bcm43xx_phy_write(bcm, 0x0060, backup[17]);
525 bcm43xx_phy_write(bcm, 0x0014, backup[18]);
526 bcm43xx_phy_write(bcm, 0x0478, backup[19]);
527 }
528 bcm43xx_phy_write(bcm, 0x0001, backup[0]);
529 bcm43xx_phy_write(bcm, 0x0812, backup[2]);
530 bcm43xx_phy_write(bcm, 0x0811, backup[1]);
531}
532
533void bcm43xx_calc_nrssi_slope(struct bcm43xx_private *bcm)
534{
535 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
536 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
537 u16 backup[18] = { 0 };
538 u16 tmp;
539 s16 nrssi0, nrssi1;
540
541 switch (phy->type) {
542 case BCM43xx_PHYTYPE_B:
543 backup[0] = bcm43xx_radio_read16(bcm, 0x007A);
544 backup[1] = bcm43xx_radio_read16(bcm, 0x0052);
545 backup[2] = bcm43xx_radio_read16(bcm, 0x0043);
546 backup[3] = bcm43xx_phy_read(bcm, 0x0030);
547 backup[4] = bcm43xx_phy_read(bcm, 0x0026);
548 backup[5] = bcm43xx_phy_read(bcm, 0x0015);
549 backup[6] = bcm43xx_phy_read(bcm, 0x002A);
550 backup[7] = bcm43xx_phy_read(bcm, 0x0020);
551 backup[8] = bcm43xx_phy_read(bcm, 0x005A);
552 backup[9] = bcm43xx_phy_read(bcm, 0x0059);
553 backup[10] = bcm43xx_phy_read(bcm, 0x0058);
554 backup[11] = bcm43xx_read16(bcm, 0x03E2);
555 backup[12] = bcm43xx_read16(bcm, 0x03E6);
556 backup[13] = bcm43xx_read16(bcm, BCM43xx_MMIO_CHANNEL_EXT);
557
558 tmp = bcm43xx_radio_read16(bcm, 0x007A);
559 tmp &= (phy->rev >= 5) ? 0x007F : 0x000F;
560 bcm43xx_radio_write16(bcm, 0x007A, tmp);
561 bcm43xx_phy_write(bcm, 0x0030, 0x00FF);
562 bcm43xx_write16(bcm, 0x03EC, 0x7F7F);
563 bcm43xx_phy_write(bcm, 0x0026, 0x0000);
564 bcm43xx_phy_write(bcm, 0x0015,
565 bcm43xx_phy_read(bcm, 0x0015) | 0x0020);
566 bcm43xx_phy_write(bcm, 0x002A, 0x08A3);
567 bcm43xx_radio_write16(bcm, 0x007A,
568 bcm43xx_radio_read16(bcm, 0x007A) | 0x0080);
569
570 nrssi0 = (s16)bcm43xx_phy_read(bcm, 0x0027);
571 bcm43xx_radio_write16(bcm, 0x007A,
572 bcm43xx_radio_read16(bcm, 0x007A) & 0x007F);
573 if (phy->analog >= 2) {
574 bcm43xx_write16(bcm, 0x03E6, 0x0040);
575 } else if (phy->analog == 0) {
576 bcm43xx_write16(bcm, 0x03E6, 0x0122);
577 } else {
578 bcm43xx_write16(bcm, BCM43xx_MMIO_CHANNEL_EXT,
579 bcm43xx_read16(bcm, BCM43xx_MMIO_CHANNEL_EXT) & 0x2000);
580 }
581 bcm43xx_phy_write(bcm, 0x0020, 0x3F3F);
582 bcm43xx_phy_write(bcm, 0x0015, 0xF330);
583 bcm43xx_radio_write16(bcm, 0x005A, 0x0060);
584 bcm43xx_radio_write16(bcm, 0x0043,
585 bcm43xx_radio_read16(bcm, 0x0043) & 0x00F0);
586 bcm43xx_phy_write(bcm, 0x005A, 0x0480);
587 bcm43xx_phy_write(bcm, 0x0059, 0x0810);
588 bcm43xx_phy_write(bcm, 0x0058, 0x000D);
589 udelay(20);
590
591 nrssi1 = (s16)bcm43xx_phy_read(bcm, 0x0027);
592 bcm43xx_phy_write(bcm, 0x0030, backup[3]);
593 bcm43xx_radio_write16(bcm, 0x007A, backup[0]);
594 bcm43xx_write16(bcm, 0x03E2, backup[11]);
595 bcm43xx_phy_write(bcm, 0x0026, backup[4]);
596 bcm43xx_phy_write(bcm, 0x0015, backup[5]);
597 bcm43xx_phy_write(bcm, 0x002A, backup[6]);
598 bcm43xx_synth_pu_workaround(bcm, radio->channel);
599 if (phy->analog != 0)
600 bcm43xx_write16(bcm, 0x03F4, backup[13]);
601
602 bcm43xx_phy_write(bcm, 0x0020, backup[7]);
603 bcm43xx_phy_write(bcm, 0x005A, backup[8]);
604 bcm43xx_phy_write(bcm, 0x0059, backup[9]);
605 bcm43xx_phy_write(bcm, 0x0058, backup[10]);
606 bcm43xx_radio_write16(bcm, 0x0052, backup[1]);
607 bcm43xx_radio_write16(bcm, 0x0043, backup[2]);
608
609 if (nrssi0 == nrssi1)
610 radio->nrssislope = 0x00010000;
611 else
612 radio->nrssislope = 0x00400000 / (nrssi0 - nrssi1);
613
614 if (nrssi0 <= -4) {
615 radio->nrssi[0] = nrssi0;
616 radio->nrssi[1] = nrssi1;
617 }
618 break;
619 case BCM43xx_PHYTYPE_G:
620 if (radio->revision >= 9)
621 return;
622 if (radio->revision == 8)
623 bcm43xx_calc_nrssi_offset(bcm);
624
625 bcm43xx_phy_write(bcm, BCM43xx_PHY_G_CRS,
626 bcm43xx_phy_read(bcm, BCM43xx_PHY_G_CRS) & 0x7FFF);
627 bcm43xx_phy_write(bcm, 0x0802,
628 bcm43xx_phy_read(bcm, 0x0802) & 0xFFFC);
629 backup[7] = bcm43xx_read16(bcm, 0x03E2);
630 bcm43xx_write16(bcm, 0x03E2,
631 bcm43xx_read16(bcm, 0x03E2) | 0x8000);
632 backup[0] = bcm43xx_radio_read16(bcm, 0x007A);
633 backup[1] = bcm43xx_radio_read16(bcm, 0x0052);
634 backup[2] = bcm43xx_radio_read16(bcm, 0x0043);
635 backup[3] = bcm43xx_phy_read(bcm, 0x0015);
636 backup[4] = bcm43xx_phy_read(bcm, 0x005A);
637 backup[5] = bcm43xx_phy_read(bcm, 0x0059);
638 backup[6] = bcm43xx_phy_read(bcm, 0x0058);
639 backup[8] = bcm43xx_read16(bcm, 0x03E6);
640 backup[9] = bcm43xx_read16(bcm, BCM43xx_MMIO_CHANNEL_EXT);
641 if (phy->rev >= 3) {
642 backup[10] = bcm43xx_phy_read(bcm, 0x002E);
643 backup[11] = bcm43xx_phy_read(bcm, 0x002F);
644 backup[12] = bcm43xx_phy_read(bcm, 0x080F);
645 backup[13] = bcm43xx_phy_read(bcm, BCM43xx_PHY_G_LO_CONTROL);
646 backup[14] = bcm43xx_phy_read(bcm, 0x0801);
647 backup[15] = bcm43xx_phy_read(bcm, 0x0060);
648 backup[16] = bcm43xx_phy_read(bcm, 0x0014);
649 backup[17] = bcm43xx_phy_read(bcm, 0x0478);
650 bcm43xx_phy_write(bcm, 0x002E, 0);
651 bcm43xx_phy_write(bcm, BCM43xx_PHY_G_LO_CONTROL, 0);
652 switch (phy->rev) {
653 case 4: case 6: case 7:
654 bcm43xx_phy_write(bcm, 0x0478,
655 bcm43xx_phy_read(bcm, 0x0478)
656 | 0x0100);
657 bcm43xx_phy_write(bcm, 0x0801,
658 bcm43xx_phy_read(bcm, 0x0801)
659 | 0x0040);
660 break;
661 case 3: case 5:
662 bcm43xx_phy_write(bcm, 0x0801,
663 bcm43xx_phy_read(bcm, 0x0801)
664 & 0xFFBF);
665 break;
666 }
667 bcm43xx_phy_write(bcm, 0x0060,
668 bcm43xx_phy_read(bcm, 0x0060)
669 | 0x0040);
670 bcm43xx_phy_write(bcm, 0x0014,
671 bcm43xx_phy_read(bcm, 0x0014)
672 | 0x0200);
673 }
674 bcm43xx_radio_write16(bcm, 0x007A,
675 bcm43xx_radio_read16(bcm, 0x007A) | 0x0070);
676 bcm43xx_set_all_gains(bcm, 0, 8, 0);
677 bcm43xx_radio_write16(bcm, 0x007A,
678 bcm43xx_radio_read16(bcm, 0x007A) & 0x00F7);
679 if (phy->rev >= 2) {
680 bcm43xx_phy_write(bcm, 0x0811,
681 (bcm43xx_phy_read(bcm, 0x0811) & 0xFFCF) | 0x0030);
682 bcm43xx_phy_write(bcm, 0x0812,
683 (bcm43xx_phy_read(bcm, 0x0812) & 0xFFCF) | 0x0010);
684 }
685 bcm43xx_radio_write16(bcm, 0x007A,
686 bcm43xx_radio_read16(bcm, 0x007A) | 0x0080);
687 udelay(20);
688
689 nrssi0 = (s16)((bcm43xx_phy_read(bcm, 0x047F) >> 8) & 0x003F);
690 if (nrssi0 >= 0x0020)
691 nrssi0 -= 0x0040;
692
693 bcm43xx_radio_write16(bcm, 0x007A,
694 bcm43xx_radio_read16(bcm, 0x007A) & 0x007F);
695 if (phy->analog >= 2) {
696 bcm43xx_phy_write(bcm, 0x0003,
697 (bcm43xx_phy_read(bcm, 0x0003)
698 & 0xFF9F) | 0x0040);
699 }
700
701 bcm43xx_write16(bcm, BCM43xx_MMIO_CHANNEL_EXT,
702 bcm43xx_read16(bcm, BCM43xx_MMIO_CHANNEL_EXT)
703 | 0x2000);
704 bcm43xx_radio_write16(bcm, 0x007A,
705 bcm43xx_radio_read16(bcm, 0x007A) | 0x000F);
706 bcm43xx_phy_write(bcm, 0x0015, 0xF330);
707 if (phy->rev >= 2) {
708 bcm43xx_phy_write(bcm, 0x0812,
709 (bcm43xx_phy_read(bcm, 0x0812) & 0xFFCF) | 0x0020);
710 bcm43xx_phy_write(bcm, 0x0811,
711 (bcm43xx_phy_read(bcm, 0x0811) & 0xFFCF) | 0x0020);
712 }
713
714 bcm43xx_set_all_gains(bcm, 3, 0, 1);
715 if (radio->revision == 8) {
716 bcm43xx_radio_write16(bcm, 0x0043, 0x001F);
717 } else {
718 tmp = bcm43xx_radio_read16(bcm, 0x0052) & 0xFF0F;
719 bcm43xx_radio_write16(bcm, 0x0052, tmp | 0x0060);
720 tmp = bcm43xx_radio_read16(bcm, 0x0043) & 0xFFF0;
721 bcm43xx_radio_write16(bcm, 0x0043, tmp | 0x0009);
722 }
723 bcm43xx_phy_write(bcm, 0x005A, 0x0480);
724 bcm43xx_phy_write(bcm, 0x0059, 0x0810);
725 bcm43xx_phy_write(bcm, 0x0058, 0x000D);
726 udelay(20);
727 nrssi1 = (s16)((bcm43xx_phy_read(bcm, 0x047F) >> 8) & 0x003F);
728 if (nrssi1 >= 0x0020)
729 nrssi1 -= 0x0040;
730 if (nrssi0 == nrssi1)
731 radio->nrssislope = 0x00010000;
732 else
733 radio->nrssislope = 0x00400000 / (nrssi0 - nrssi1);
734 if (nrssi0 >= -4) {
735 radio->nrssi[0] = nrssi1;
736 radio->nrssi[1] = nrssi0;
737 }
738 if (phy->rev >= 3) {
739 bcm43xx_phy_write(bcm, 0x002E, backup[10]);
740 bcm43xx_phy_write(bcm, 0x002F, backup[11]);
741 bcm43xx_phy_write(bcm, 0x080F, backup[12]);
742 bcm43xx_phy_write(bcm, BCM43xx_PHY_G_LO_CONTROL, backup[13]);
743 }
744 if (phy->rev >= 2) {
745 bcm43xx_phy_write(bcm, 0x0812,
746 bcm43xx_phy_read(bcm, 0x0812) & 0xFFCF);
747 bcm43xx_phy_write(bcm, 0x0811,
748 bcm43xx_phy_read(bcm, 0x0811) & 0xFFCF);
749 }
750
751 bcm43xx_radio_write16(bcm, 0x007A, backup[0]);
752 bcm43xx_radio_write16(bcm, 0x0052, backup[1]);
753 bcm43xx_radio_write16(bcm, 0x0043, backup[2]);
754 bcm43xx_write16(bcm, 0x03E2, backup[7]);
755 bcm43xx_write16(bcm, 0x03E6, backup[8]);
756 bcm43xx_write16(bcm, BCM43xx_MMIO_CHANNEL_EXT, backup[9]);
757 bcm43xx_phy_write(bcm, 0x0015, backup[3]);
758 bcm43xx_phy_write(bcm, 0x005A, backup[4]);
759 bcm43xx_phy_write(bcm, 0x0059, backup[5]);
760 bcm43xx_phy_write(bcm, 0x0058, backup[6]);
761 bcm43xx_synth_pu_workaround(bcm, radio->channel);
762 bcm43xx_phy_write(bcm, 0x0802,
763 bcm43xx_phy_read(bcm, 0x0802) | (0x0001 | 0x0002));
764 bcm43xx_set_original_gains(bcm);
765 bcm43xx_phy_write(bcm, BCM43xx_PHY_G_CRS,
766 bcm43xx_phy_read(bcm, BCM43xx_PHY_G_CRS) | 0x8000);
767 if (phy->rev >= 3) {
768 bcm43xx_phy_write(bcm, 0x0801, backup[14]);
769 bcm43xx_phy_write(bcm, 0x0060, backup[15]);
770 bcm43xx_phy_write(bcm, 0x0014, backup[16]);
771 bcm43xx_phy_write(bcm, 0x0478, backup[17]);
772 }
773 bcm43xx_nrssi_mem_update(bcm);
774 bcm43xx_calc_nrssi_threshold(bcm);
775 break;
776 default:
777 assert(0);
778 }
779}
780
781void bcm43xx_calc_nrssi_threshold(struct bcm43xx_private *bcm)
782{
783 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
784 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
785 s32 threshold;
786 s32 a, b;
787 s16 tmp16;
788 u16 tmp_u16;
789
790 switch (phy->type) {
791 case BCM43xx_PHYTYPE_B: {
792 if (radio->version != 0x2050)
793 return;
794 if (!(bcm->sprom.boardflags & BCM43xx_BFL_RSSI))
795 return;
796
797 if (radio->revision >= 6) {
798 threshold = (radio->nrssi[1] - radio->nrssi[0]) * 32;
799 threshold += 20 * (radio->nrssi[0] + 1);
800 threshold /= 40;
801 } else
802 threshold = radio->nrssi[1] - 5;
803
804 threshold = limit_value(threshold, 0, 0x3E);
805 bcm43xx_phy_read(bcm, 0x0020); /* dummy read */
806 bcm43xx_phy_write(bcm, 0x0020, (((u16)threshold) << 8) | 0x001C);
807
808 if (radio->revision >= 6) {
809 bcm43xx_phy_write(bcm, 0x0087, 0x0E0D);
810 bcm43xx_phy_write(bcm, 0x0086, 0x0C0B);
811 bcm43xx_phy_write(bcm, 0x0085, 0x0A09);
812 bcm43xx_phy_write(bcm, 0x0084, 0x0808);
813 bcm43xx_phy_write(bcm, 0x0083, 0x0808);
814 bcm43xx_phy_write(bcm, 0x0082, 0x0604);
815 bcm43xx_phy_write(bcm, 0x0081, 0x0302);
816 bcm43xx_phy_write(bcm, 0x0080, 0x0100);
817 }
818 break;
819 }
820 case BCM43xx_PHYTYPE_G:
821 if (!phy->connected ||
822 !(bcm->sprom.boardflags & BCM43xx_BFL_RSSI)) {
823 tmp16 = bcm43xx_nrssi_hw_read(bcm, 0x20);
824 if (tmp16 >= 0x20)
825 tmp16 -= 0x40;
826 if (tmp16 < 3) {
827 bcm43xx_phy_write(bcm, 0x048A,
828 (bcm43xx_phy_read(bcm, 0x048A)
829 & 0xF000) | 0x09EB);
830 } else {
831 bcm43xx_phy_write(bcm, 0x048A,
832 (bcm43xx_phy_read(bcm, 0x048A)
833 & 0xF000) | 0x0AED);
834 }
835 } else {
836 if (radio->interfmode == BCM43xx_RADIO_INTERFMODE_NONWLAN) {
837 a = 0xE;
838 b = 0xA;
839 } else if (!radio->aci_wlan_automatic && radio->aci_enable) {
840 a = 0x13;
841 b = 0x12;
842 } else {
843 a = 0xE;
844 b = 0x11;
845 }
846
847 a = a * (radio->nrssi[1] - radio->nrssi[0]);
848 a += (radio->nrssi[0] << 6);
849 if (a < 32)
850 a += 31;
851 else
852 a += 32;
853 a = a >> 6;
854 a = limit_value(a, -31, 31);
855
856 b = b * (radio->nrssi[1] - radio->nrssi[0]);
857 b += (radio->nrssi[0] << 6);
858 if (b < 32)
859 b += 31;
860 else
861 b += 32;
862 b = b >> 6;
863 b = limit_value(b, -31, 31);
864
865 tmp_u16 = bcm43xx_phy_read(bcm, 0x048A) & 0xF000;
866 tmp_u16 |= ((u32)b & 0x0000003F);
867 tmp_u16 |= (((u32)a & 0x0000003F) << 6);
868 bcm43xx_phy_write(bcm, 0x048A, tmp_u16);
869 }
870 break;
871 default:
872 assert(0);
873 }
874}
875
876/* Stack implementation to save/restore values from the
877 * interference mitigation code.
878 * It is save to restore values in random order.
879 */
880static void _stack_save(u32 *_stackptr, size_t *stackidx,
881 u8 id, u16 offset, u16 value)
882{
883 u32 *stackptr = &(_stackptr[*stackidx]);
884
885 assert((offset & 0xE000) == 0x0000);
886 assert((id & 0xF8) == 0x00);
887 *stackptr = offset;
888 *stackptr |= ((u32)id) << 13;
889 *stackptr |= ((u32)value) << 16;
890 (*stackidx)++;
891 assert(*stackidx < BCM43xx_INTERFSTACK_SIZE);
892}
893
894static u16 _stack_restore(u32 *stackptr,
895 u8 id, u16 offset)
896{
897 size_t i;
898
899 assert((offset & 0xE000) == 0x0000);
900 assert((id & 0xF8) == 0x00);
901 for (i = 0; i < BCM43xx_INTERFSTACK_SIZE; i++, stackptr++) {
902 if ((*stackptr & 0x00001FFF) != offset)
903 continue;
904 if (((*stackptr & 0x00007000) >> 13) != id)
905 continue;
906 return ((*stackptr & 0xFFFF0000) >> 16);
907 }
908 assert(0);
909
910 return 0;
911}
912
913#define phy_stacksave(offset) \
914 do { \
915 _stack_save(stack, &stackidx, 0x1, (offset), \
916 bcm43xx_phy_read(bcm, (offset))); \
917 } while (0)
918#define phy_stackrestore(offset) \
919 do { \
920 bcm43xx_phy_write(bcm, (offset), \
921 _stack_restore(stack, 0x1, \
922 (offset))); \
923 } while (0)
924#define radio_stacksave(offset) \
925 do { \
926 _stack_save(stack, &stackidx, 0x2, (offset), \
927 bcm43xx_radio_read16(bcm, (offset))); \
928 } while (0)
929#define radio_stackrestore(offset) \
930 do { \
931 bcm43xx_radio_write16(bcm, (offset), \
932 _stack_restore(stack, 0x2, \
933 (offset))); \
934 } while (0)
935#define ilt_stacksave(offset) \
936 do { \
937 _stack_save(stack, &stackidx, 0x3, (offset), \
938 bcm43xx_ilt_read(bcm, (offset))); \
939 } while (0)
940#define ilt_stackrestore(offset) \
941 do { \
942 bcm43xx_ilt_write(bcm, (offset), \
943 _stack_restore(stack, 0x3, \
944 (offset))); \
945 } while (0)
946
947static void
948bcm43xx_radio_interference_mitigation_enable(struct bcm43xx_private *bcm,
949 int mode)
950{
951 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
952 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
953 u16 tmp, flipped;
954 u32 tmp32;
955 size_t stackidx = 0;
956 u32 *stack = radio->interfstack;
957
958 switch (mode) {
959 case BCM43xx_RADIO_INTERFMODE_NONWLAN:
960 if (phy->rev != 1) {
961 bcm43xx_phy_write(bcm, 0x042B,
962 bcm43xx_phy_read(bcm, 0x042B) | 0x0800);
963 bcm43xx_phy_write(bcm, BCM43xx_PHY_G_CRS,
964 bcm43xx_phy_read(bcm, BCM43xx_PHY_G_CRS) & ~0x4000);
965 break;
966 }
967 radio_stacksave(0x0078);
968 tmp = (bcm43xx_radio_read16(bcm, 0x0078) & 0x001E);
969 flipped = flip_4bit(tmp);
970 if (flipped < 10 && flipped >= 8)
971 flipped = 7;
972 else if (flipped >= 10)
973 flipped -= 3;
974 flipped = flip_4bit(flipped);
975 flipped = (flipped << 1) | 0x0020;
976 bcm43xx_radio_write16(bcm, 0x0078, flipped);
977
978 bcm43xx_calc_nrssi_threshold(bcm);
979
980 phy_stacksave(0x0406);
981 bcm43xx_phy_write(bcm, 0x0406, 0x7E28);
982
983 bcm43xx_phy_write(bcm, 0x042B,
984 bcm43xx_phy_read(bcm, 0x042B) | 0x0800);
985 bcm43xx_phy_write(bcm, BCM43xx_PHY_RADIO_BITFIELD,
986 bcm43xx_phy_read(bcm, BCM43xx_PHY_RADIO_BITFIELD) | 0x1000);
987
988 phy_stacksave(0x04A0);
989 bcm43xx_phy_write(bcm, 0x04A0,
990 (bcm43xx_phy_read(bcm, 0x04A0) & 0xC0C0) | 0x0008);
991 phy_stacksave(0x04A1);
992 bcm43xx_phy_write(bcm, 0x04A1,
993 (bcm43xx_phy_read(bcm, 0x04A1) & 0xC0C0) | 0x0605);
994 phy_stacksave(0x04A2);
995 bcm43xx_phy_write(bcm, 0x04A2,
996 (bcm43xx_phy_read(bcm, 0x04A2) & 0xC0C0) | 0x0204);
997 phy_stacksave(0x04A8);
998 bcm43xx_phy_write(bcm, 0x04A8,
999 (bcm43xx_phy_read(bcm, 0x04A8) & 0xC0C0) | 0x0803);
1000 phy_stacksave(0x04AB);
1001 bcm43xx_phy_write(bcm, 0x04AB,
1002 (bcm43xx_phy_read(bcm, 0x04AB) & 0xC0C0) | 0x0605);
1003
1004 phy_stacksave(0x04A7);
1005 bcm43xx_phy_write(bcm, 0x04A7, 0x0002);
1006 phy_stacksave(0x04A3);
1007 bcm43xx_phy_write(bcm, 0x04A3, 0x287A);
1008 phy_stacksave(0x04A9);
1009 bcm43xx_phy_write(bcm, 0x04A9, 0x2027);
1010 phy_stacksave(0x0493);
1011 bcm43xx_phy_write(bcm, 0x0493, 0x32F5);
1012 phy_stacksave(0x04AA);
1013 bcm43xx_phy_write(bcm, 0x04AA, 0x2027);
1014 phy_stacksave(0x04AC);
1015 bcm43xx_phy_write(bcm, 0x04AC, 0x32F5);
1016 break;
1017 case BCM43xx_RADIO_INTERFMODE_MANUALWLAN:
1018 if (bcm43xx_phy_read(bcm, 0x0033) & 0x0800)
1019 break;
1020
1021 radio->aci_enable = 1;
1022
1023 phy_stacksave(BCM43xx_PHY_RADIO_BITFIELD);
1024 phy_stacksave(BCM43xx_PHY_G_CRS);
1025 if (phy->rev < 2) {
1026 phy_stacksave(0x0406);
1027 } else {
1028 phy_stacksave(0x04C0);
1029 phy_stacksave(0x04C1);
1030 }
1031 phy_stacksave(0x0033);
1032 phy_stacksave(0x04A7);
1033 phy_stacksave(0x04A3);
1034 phy_stacksave(0x04A9);
1035 phy_stacksave(0x04AA);
1036 phy_stacksave(0x04AC);
1037 phy_stacksave(0x0493);
1038 phy_stacksave(0x04A1);
1039 phy_stacksave(0x04A0);
1040 phy_stacksave(0x04A2);
1041 phy_stacksave(0x048A);
1042 phy_stacksave(0x04A8);
1043 phy_stacksave(0x04AB);
1044 if (phy->rev == 2) {
1045 phy_stacksave(0x04AD);
1046 phy_stacksave(0x04AE);
1047 } else if (phy->rev >= 3) {
1048 phy_stacksave(0x04AD);
1049 phy_stacksave(0x0415);
1050 phy_stacksave(0x0416);
1051 phy_stacksave(0x0417);
1052 ilt_stacksave(0x1A00 + 0x2);
1053 ilt_stacksave(0x1A00 + 0x3);
1054 }
1055 phy_stacksave(0x042B);
1056 phy_stacksave(0x048C);
1057
1058 bcm43xx_phy_write(bcm, BCM43xx_PHY_RADIO_BITFIELD,
1059 bcm43xx_phy_read(bcm, BCM43xx_PHY_RADIO_BITFIELD)
1060 & ~0x1000);
1061 bcm43xx_phy_write(bcm, BCM43xx_PHY_G_CRS,
1062 (bcm43xx_phy_read(bcm, BCM43xx_PHY_G_CRS)
1063 & 0xFFFC) | 0x0002);
1064
1065 bcm43xx_phy_write(bcm, 0x0033, 0x0800);
1066 bcm43xx_phy_write(bcm, 0x04A3, 0x2027);
1067 bcm43xx_phy_write(bcm, 0x04A9, 0x1CA8);
1068 bcm43xx_phy_write(bcm, 0x0493, 0x287A);
1069 bcm43xx_phy_write(bcm, 0x04AA, 0x1CA8);
1070 bcm43xx_phy_write(bcm, 0x04AC, 0x287A);
1071
1072 bcm43xx_phy_write(bcm, 0x04A0,
1073 (bcm43xx_phy_read(bcm, 0x04A0)
1074 & 0xFFC0) | 0x001A);
1075 bcm43xx_phy_write(bcm, 0x04A7, 0x000D);
1076
1077 if (phy->rev < 2) {
1078 bcm43xx_phy_write(bcm, 0x0406, 0xFF0D);
1079 } else if (phy->rev == 2) {
1080 bcm43xx_phy_write(bcm, 0x04C0, 0xFFFF);
1081 bcm43xx_phy_write(bcm, 0x04C1, 0x00A9);
1082 } else {
1083 bcm43xx_phy_write(bcm, 0x04C0, 0x00C1);
1084 bcm43xx_phy_write(bcm, 0x04C1, 0x0059);
1085 }
1086
1087 bcm43xx_phy_write(bcm, 0x04A1,
1088 (bcm43xx_phy_read(bcm, 0x04A1)
1089 & 0xC0FF) | 0x1800);
1090 bcm43xx_phy_write(bcm, 0x04A1,
1091 (bcm43xx_phy_read(bcm, 0x04A1)
1092 & 0xFFC0) | 0x0015);
1093 bcm43xx_phy_write(bcm, 0x04A8,
1094 (bcm43xx_phy_read(bcm, 0x04A8)
1095 & 0xCFFF) | 0x1000);
1096 bcm43xx_phy_write(bcm, 0x04A8,
1097 (bcm43xx_phy_read(bcm, 0x04A8)
1098 & 0xF0FF) | 0x0A00);
1099 bcm43xx_phy_write(bcm, 0x04AB,
1100 (bcm43xx_phy_read(bcm, 0x04AB)
1101 & 0xCFFF) | 0x1000);
1102 bcm43xx_phy_write(bcm, 0x04AB,
1103 (bcm43xx_phy_read(bcm, 0x04AB)
1104 & 0xF0FF) | 0x0800);
1105 bcm43xx_phy_write(bcm, 0x04AB,
1106 (bcm43xx_phy_read(bcm, 0x04AB)
1107 & 0xFFCF) | 0x0010);
1108 bcm43xx_phy_write(bcm, 0x04AB,
1109 (bcm43xx_phy_read(bcm, 0x04AB)
1110 & 0xFFF0) | 0x0005);
1111 bcm43xx_phy_write(bcm, 0x04A8,
1112 (bcm43xx_phy_read(bcm, 0x04A8)
1113 & 0xFFCF) | 0x0010);
1114 bcm43xx_phy_write(bcm, 0x04A8,
1115 (bcm43xx_phy_read(bcm, 0x04A8)
1116 & 0xFFF0) | 0x0006);
1117 bcm43xx_phy_write(bcm, 0x04A2,
1118 (bcm43xx_phy_read(bcm, 0x04A2)
1119 & 0xF0FF) | 0x0800);
1120 bcm43xx_phy_write(bcm, 0x04A0,
1121 (bcm43xx_phy_read(bcm, 0x04A0)
1122 & 0xF0FF) | 0x0500);
1123 bcm43xx_phy_write(bcm, 0x04A2,
1124 (bcm43xx_phy_read(bcm, 0x04A2)
1125 & 0xFFF0) | 0x000B);
1126
1127 if (phy->rev >= 3) {
1128 bcm43xx_phy_write(bcm, 0x048A,
1129 bcm43xx_phy_read(bcm, 0x048A)
1130 & ~0x8000);
1131 bcm43xx_phy_write(bcm, 0x0415,
1132 (bcm43xx_phy_read(bcm, 0x0415)
1133 & 0x8000) | 0x36D8);
1134 bcm43xx_phy_write(bcm, 0x0416,
1135 (bcm43xx_phy_read(bcm, 0x0416)
1136 & 0x8000) | 0x36D8);
1137 bcm43xx_phy_write(bcm, 0x0417,
1138 (bcm43xx_phy_read(bcm, 0x0417)
1139 & 0xFE00) | 0x016D);
1140 } else {
1141 bcm43xx_phy_write(bcm, 0x048A,
1142 bcm43xx_phy_read(bcm, 0x048A)
1143 | 0x1000);
1144 bcm43xx_phy_write(bcm, 0x048A,
1145 (bcm43xx_phy_read(bcm, 0x048A)
1146 & 0x9FFF) | 0x2000);
1147 tmp32 = bcm43xx_shm_read32(bcm, BCM43xx_SHM_SHARED,
1148 BCM43xx_UCODEFLAGS_OFFSET);
1149 if (!(tmp32 & 0x800)) {
1150 tmp32 |= 0x800;
1151 bcm43xx_shm_write32(bcm, BCM43xx_SHM_SHARED,
1152 BCM43xx_UCODEFLAGS_OFFSET,
1153 tmp32);
1154 }
1155 }
1156 if (phy->rev >= 2) {
1157 bcm43xx_phy_write(bcm, 0x042B,
1158 bcm43xx_phy_read(bcm, 0x042B)
1159 | 0x0800);
1160 }
1161 bcm43xx_phy_write(bcm, 0x048C,
1162 (bcm43xx_phy_read(bcm, 0x048C)
1163 & 0xF0FF) | 0x0200);
1164 if (phy->rev == 2) {
1165 bcm43xx_phy_write(bcm, 0x04AE,
1166 (bcm43xx_phy_read(bcm, 0x04AE)
1167 & 0xFF00) | 0x007F);
1168 bcm43xx_phy_write(bcm, 0x04AD,
1169 (bcm43xx_phy_read(bcm, 0x04AD)
1170 & 0x00FF) | 0x1300);
1171 } else if (phy->rev >= 6) {
1172 bcm43xx_ilt_write(bcm, 0x1A00 + 0x3, 0x007F);
1173 bcm43xx_ilt_write(bcm, 0x1A00 + 0x2, 0x007F);
1174 bcm43xx_phy_write(bcm, 0x04AD,
1175 bcm43xx_phy_read(bcm, 0x04AD)
1176 & 0x00FF);
1177 }
1178 bcm43xx_calc_nrssi_slope(bcm);
1179 break;
1180 default:
1181 assert(0);
1182 }
1183}
1184
1185static void
1186bcm43xx_radio_interference_mitigation_disable(struct bcm43xx_private *bcm,
1187 int mode)
1188{
1189 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1190 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
1191 u32 tmp32;
1192 u32 *stack = radio->interfstack;
1193
1194 switch (mode) {
1195 case BCM43xx_RADIO_INTERFMODE_NONWLAN:
1196 if (phy->rev != 1) {
1197 bcm43xx_phy_write(bcm, 0x042B,
1198 bcm43xx_phy_read(bcm, 0x042B) & ~0x0800);
1199 bcm43xx_phy_write(bcm, BCM43xx_PHY_G_CRS,
1200 bcm43xx_phy_read(bcm, BCM43xx_PHY_G_CRS) | 0x4000);
1201 break;
1202 }
1203 phy_stackrestore(0x0078);
1204 bcm43xx_calc_nrssi_threshold(bcm);
1205 phy_stackrestore(0x0406);
1206 bcm43xx_phy_write(bcm, 0x042B,
1207 bcm43xx_phy_read(bcm, 0x042B) & ~0x0800);
1208 if (!bcm->bad_frames_preempt) {
1209 bcm43xx_phy_write(bcm, BCM43xx_PHY_RADIO_BITFIELD,
1210 bcm43xx_phy_read(bcm, BCM43xx_PHY_RADIO_BITFIELD)
1211 & ~(1 << 11));
1212 }
1213 bcm43xx_phy_write(bcm, BCM43xx_PHY_G_CRS,
1214 bcm43xx_phy_read(bcm, BCM43xx_PHY_G_CRS) | 0x4000);
1215 phy_stackrestore(0x04A0);
1216 phy_stackrestore(0x04A1);
1217 phy_stackrestore(0x04A2);
1218 phy_stackrestore(0x04A8);
1219 phy_stackrestore(0x04AB);
1220 phy_stackrestore(0x04A7);
1221 phy_stackrestore(0x04A3);
1222 phy_stackrestore(0x04A9);
1223 phy_stackrestore(0x0493);
1224 phy_stackrestore(0x04AA);
1225 phy_stackrestore(0x04AC);
1226 break;
1227 case BCM43xx_RADIO_INTERFMODE_MANUALWLAN:
1228 if (!(bcm43xx_phy_read(bcm, 0x0033) & 0x0800))
1229 break;
1230
1231 radio->aci_enable = 0;
1232
1233 phy_stackrestore(BCM43xx_PHY_RADIO_BITFIELD);
1234 phy_stackrestore(BCM43xx_PHY_G_CRS);
1235 phy_stackrestore(0x0033);
1236 phy_stackrestore(0x04A3);
1237 phy_stackrestore(0x04A9);
1238 phy_stackrestore(0x0493);
1239 phy_stackrestore(0x04AA);
1240 phy_stackrestore(0x04AC);
1241 phy_stackrestore(0x04A0);
1242 phy_stackrestore(0x04A7);
1243 if (phy->rev >= 2) {
1244 phy_stackrestore(0x04C0);
1245 phy_stackrestore(0x04C1);
1246 } else
1247 phy_stackrestore(0x0406);
1248 phy_stackrestore(0x04A1);
1249 phy_stackrestore(0x04AB);
1250 phy_stackrestore(0x04A8);
1251 if (phy->rev == 2) {
1252 phy_stackrestore(0x04AD);
1253 phy_stackrestore(0x04AE);
1254 } else if (phy->rev >= 3) {
1255 phy_stackrestore(0x04AD);
1256 phy_stackrestore(0x0415);
1257 phy_stackrestore(0x0416);
1258 phy_stackrestore(0x0417);
1259 ilt_stackrestore(0x1A00 + 0x2);
1260 ilt_stackrestore(0x1A00 + 0x3);
1261 }
1262 phy_stackrestore(0x04A2);
1263 phy_stackrestore(0x04A8);
1264 phy_stackrestore(0x042B);
1265 phy_stackrestore(0x048C);
1266 tmp32 = bcm43xx_shm_read32(bcm, BCM43xx_SHM_SHARED,
1267 BCM43xx_UCODEFLAGS_OFFSET);
1268 if (tmp32 & 0x800) {
1269 tmp32 &= ~0x800;
1270 bcm43xx_shm_write32(bcm, BCM43xx_SHM_SHARED,
1271 BCM43xx_UCODEFLAGS_OFFSET,
1272 tmp32);
1273 }
1274 bcm43xx_calc_nrssi_slope(bcm);
1275 break;
1276 default:
1277 assert(0);
1278 }
1279}
1280
1281#undef phy_stacksave
1282#undef phy_stackrestore
1283#undef radio_stacksave
1284#undef radio_stackrestore
1285#undef ilt_stacksave
1286#undef ilt_stackrestore
1287
1288int bcm43xx_radio_set_interference_mitigation(struct bcm43xx_private *bcm,
1289 int mode)
1290{
1291 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1292 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
1293 int currentmode;
1294
1295 if ((phy->type != BCM43xx_PHYTYPE_G) ||
1296 (phy->rev == 0) ||
1297 (!phy->connected))
1298 return -ENODEV;
1299
1300 radio->aci_wlan_automatic = 0;
1301 switch (mode) {
1302 case BCM43xx_RADIO_INTERFMODE_AUTOWLAN:
1303 radio->aci_wlan_automatic = 1;
1304 if (radio->aci_enable)
1305 mode = BCM43xx_RADIO_INTERFMODE_MANUALWLAN;
1306 else
1307 mode = BCM43xx_RADIO_INTERFMODE_NONE;
1308 break;
1309 case BCM43xx_RADIO_INTERFMODE_NONE:
1310 case BCM43xx_RADIO_INTERFMODE_NONWLAN:
1311 case BCM43xx_RADIO_INTERFMODE_MANUALWLAN:
1312 break;
1313 default:
1314 return -EINVAL;
1315 }
1316
1317 currentmode = radio->interfmode;
1318 if (currentmode == mode)
1319 return 0;
1320 if (currentmode != BCM43xx_RADIO_INTERFMODE_NONE)
1321 bcm43xx_radio_interference_mitigation_disable(bcm, currentmode);
1322
1323 if (mode == BCM43xx_RADIO_INTERFMODE_NONE) {
1324 radio->aci_enable = 0;
1325 radio->aci_hw_rssi = 0;
1326 } else
1327 bcm43xx_radio_interference_mitigation_enable(bcm, mode);
1328 radio->interfmode = mode;
1329
1330 return 0;
1331}
1332
1333u16 bcm43xx_radio_calibrationvalue(struct bcm43xx_private *bcm)
1334{
1335 u16 reg, index, ret;
1336
1337 reg = bcm43xx_radio_read16(bcm, 0x0060);
1338 index = (reg & 0x001E) >> 1;
1339 ret = rcc_table[index] << 1;
1340 ret |= (reg & 0x0001);
1341 ret |= 0x0020;
1342
1343 return ret;
1344}
1345
1346#define LPD(L, P, D) (((L) << 2) | ((P) << 1) | ((D) << 0))
1347static u16 bcm43xx_get_812_value(struct bcm43xx_private *bcm, u8 lpd)
1348{
1349 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1350 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
1351 u16 loop_or = 0;
1352 u16 adj_loopback_gain = phy->loopback_gain[0];
1353 u8 loop;
1354 u16 extern_lna_control;
1355
1356 if (!phy->connected)
1357 return 0;
1358 if (!has_loopback_gain(phy)) {
1359 if (phy->rev < 7 || !(bcm->sprom.boardflags
1360 & BCM43xx_BFL_EXTLNA)) {
1361 switch (lpd) {
1362 case LPD(0, 1, 1):
1363 return 0x0FB2;
1364 case LPD(0, 0, 1):
1365 return 0x00B2;
1366 case LPD(1, 0, 1):
1367 return 0x30B2;
1368 case LPD(1, 0, 0):
1369 return 0x30B3;
1370 default:
1371 assert(0);
1372 }
1373 } else {
1374 switch (lpd) {
1375 case LPD(0, 1, 1):
1376 return 0x8FB2;
1377 case LPD(0, 0, 1):
1378 return 0x80B2;
1379 case LPD(1, 0, 1):
1380 return 0x20B2;
1381 case LPD(1, 0, 0):
1382 return 0x20B3;
1383 default:
1384 assert(0);
1385 }
1386 }
1387 } else {
1388 if (radio->revision == 8)
1389 adj_loopback_gain += 0x003E;
1390 else
1391 adj_loopback_gain += 0x0026;
1392 if (adj_loopback_gain >= 0x46) {
1393 adj_loopback_gain -= 0x46;
1394 extern_lna_control = 0x3000;
1395 } else if (adj_loopback_gain >= 0x3A) {
1396 adj_loopback_gain -= 0x3A;
1397 extern_lna_control = 0x2000;
1398 } else if (adj_loopback_gain >= 0x2E) {
1399 adj_loopback_gain -= 0x2E;
1400 extern_lna_control = 0x1000;
1401 } else {
1402 adj_loopback_gain -= 0x10;
1403 extern_lna_control = 0x0000;
1404 }
1405 for (loop = 0; loop < 16; loop++) {
1406 u16 tmp = adj_loopback_gain - 6 * loop;
1407 if (tmp < 6)
1408 break;
1409 }
1410
1411 loop_or = (loop << 8) | extern_lna_control;
1412 if (phy->rev >= 7 && bcm->sprom.boardflags
1413 & BCM43xx_BFL_EXTLNA) {
1414 if (extern_lna_control)
1415 loop_or |= 0x8000;
1416 switch (lpd) {
1417 case LPD(0, 1, 1):
1418 return 0x8F92;
1419 case LPD(0, 0, 1):
1420 return (0x8092 | loop_or);
1421 case LPD(1, 0, 1):
1422 return (0x2092 | loop_or);
1423 case LPD(1, 0, 0):
1424 return (0x2093 | loop_or);
1425 default:
1426 assert(0);
1427 }
1428 } else {
1429 switch (lpd) {
1430 case LPD(0, 1, 1):
1431 return 0x0F92;
1432 case LPD(0, 0, 1):
1433 case LPD(1, 0, 1):
1434 return (0x0092 | loop_or);
1435 case LPD(1, 0, 0):
1436 return (0x0093 | loop_or);
1437 default:
1438 assert(0);
1439 }
1440 }
1441 }
1442 return 0;
1443}
1444
1445u16 bcm43xx_radio_init2050(struct bcm43xx_private *bcm)
1446{
1447 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1448 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
1449 u16 backup[21] = { 0 };
1450 u16 ret;
1451 u16 i, j;
1452 u32 tmp1 = 0, tmp2 = 0;
1453
1454 backup[0] = bcm43xx_radio_read16(bcm, 0x0043);
1455 backup[14] = bcm43xx_radio_read16(bcm, 0x0051);
1456 backup[15] = bcm43xx_radio_read16(bcm, 0x0052);
1457 backup[1] = bcm43xx_phy_read(bcm, 0x0015);
1458 backup[16] = bcm43xx_phy_read(bcm, 0x005A);
1459 backup[17] = bcm43xx_phy_read(bcm, 0x0059);
1460 backup[18] = bcm43xx_phy_read(bcm, 0x0058);
1461 if (phy->type == BCM43xx_PHYTYPE_B) {
1462 backup[2] = bcm43xx_phy_read(bcm, 0x0030);
1463 backup[3] = bcm43xx_read16(bcm, 0x03EC);
1464 bcm43xx_phy_write(bcm, 0x0030, 0x00FF);
1465 bcm43xx_write16(bcm, 0x03EC, 0x3F3F);
1466 } else {
1467 if (phy->connected) {
1468 backup[4] = bcm43xx_phy_read(bcm, 0x0811);
1469 backup[5] = bcm43xx_phy_read(bcm, 0x0812);
1470 backup[6] = bcm43xx_phy_read(bcm, 0x0814);
1471 backup[7] = bcm43xx_phy_read(bcm, 0x0815);
1472 backup[8] = bcm43xx_phy_read(bcm, BCM43xx_PHY_G_CRS);
1473 backup[9] = bcm43xx_phy_read(bcm, 0x0802);
1474 bcm43xx_phy_write(bcm, 0x0814,
1475 (bcm43xx_phy_read(bcm, 0x0814)
1476 | 0x0003));
1477 bcm43xx_phy_write(bcm, 0x0815,
1478 (bcm43xx_phy_read(bcm, 0x0815)
1479 & 0xFFFC));
1480 bcm43xx_phy_write(bcm, BCM43xx_PHY_G_CRS,
1481 (bcm43xx_phy_read(bcm, BCM43xx_PHY_G_CRS)
1482 & 0x7FFF));
1483 bcm43xx_phy_write(bcm, 0x0802,
1484 (bcm43xx_phy_read(bcm, 0x0802) & 0xFFFC));
1485 if (phy->rev > 1) { /* loopback gain enabled */
1486 backup[19] = bcm43xx_phy_read(bcm, 0x080F);
1487 backup[20] = bcm43xx_phy_read(bcm, 0x0810);
1488 if (phy->rev >= 3)
1489 bcm43xx_phy_write(bcm, 0x080F, 0xC020);
1490 else
1491 bcm43xx_phy_write(bcm, 0x080F, 0x8020);
1492 bcm43xx_phy_write(bcm, 0x0810, 0x0000);
1493 }
1494 bcm43xx_phy_write(bcm, 0x0812,
1495 bcm43xx_get_812_value(bcm, LPD(0, 1, 1)));
1496 if (phy->rev < 7 || !(bcm->sprom.boardflags
1497 & BCM43xx_BFL_EXTLNA))
1498 bcm43xx_phy_write(bcm, 0x0811, 0x01B3);
1499 else
1500 bcm43xx_phy_write(bcm, 0x0811, 0x09B3);
1501 }
1502 }
1503 bcm43xx_write16(bcm, BCM43xx_MMIO_PHY_RADIO,
1504 (bcm43xx_read16(bcm, BCM43xx_MMIO_PHY_RADIO) | 0x8000));
1505 backup[10] = bcm43xx_phy_read(bcm, 0x0035);
1506 bcm43xx_phy_write(bcm, 0x0035,
1507 (bcm43xx_phy_read(bcm, 0x0035) & 0xFF7F));
1508 backup[11] = bcm43xx_read16(bcm, 0x03E6);
1509 backup[12] = bcm43xx_read16(bcm, BCM43xx_MMIO_CHANNEL_EXT);
1510
1511 // Initialization
1512 if (phy->analog == 0) {
1513 bcm43xx_write16(bcm, 0x03E6, 0x0122);
1514 } else {
1515 if (phy->analog >= 2)
1516 bcm43xx_phy_write(bcm, 0x0003,
1517 (bcm43xx_phy_read(bcm, 0x0003)
1518 & 0xFFBF) | 0x0040);
1519 bcm43xx_write16(bcm, BCM43xx_MMIO_CHANNEL_EXT,
1520 (bcm43xx_read16(bcm, BCM43xx_MMIO_CHANNEL_EXT)
1521 | 0x2000));
1522 }
1523
1524 ret = bcm43xx_radio_calibrationvalue(bcm);
1525
1526 if (phy->type == BCM43xx_PHYTYPE_B)
1527 bcm43xx_radio_write16(bcm, 0x0078, 0x0026);
1528
1529 if (phy->connected)
1530 bcm43xx_phy_write(bcm, 0x0812,
1531 bcm43xx_get_812_value(bcm, LPD(0, 1, 1)));
1532 bcm43xx_phy_write(bcm, 0x0015, 0xBFAF);
1533 bcm43xx_phy_write(bcm, 0x002B, 0x1403);
1534 if (phy->connected)
1535 bcm43xx_phy_write(bcm, 0x0812,
1536 bcm43xx_get_812_value(bcm, LPD(0, 0, 1)));
1537 bcm43xx_phy_write(bcm, 0x0015, 0xBFA0);
1538 bcm43xx_radio_write16(bcm, 0x0051,
1539 (bcm43xx_radio_read16(bcm, 0x0051) | 0x0004));
1540 if (radio->revision == 8)
1541 bcm43xx_radio_write16(bcm, 0x0043, 0x001F);
1542 else {
1543 bcm43xx_radio_write16(bcm, 0x0052, 0x0000);
1544 bcm43xx_radio_write16(bcm, 0x0043,
1545 (bcm43xx_radio_read16(bcm, 0x0043) & 0xFFF0)
1546 | 0x0009);
1547 }
1548 bcm43xx_phy_write(bcm, 0x0058, 0x0000);
1549
1550 for (i = 0; i < 16; i++) {
1551 bcm43xx_phy_write(bcm, 0x005A, 0x0480);
1552 bcm43xx_phy_write(bcm, 0x0059, 0xC810);
1553 bcm43xx_phy_write(bcm, 0x0058, 0x000D);
1554 if (phy->connected)
1555 bcm43xx_phy_write(bcm, 0x0812,
1556 bcm43xx_get_812_value(bcm, LPD(1, 0, 1)));
1557 bcm43xx_phy_write(bcm, 0x0015, 0xAFB0);
1558 udelay(10);
1559 if (phy->connected)
1560 bcm43xx_phy_write(bcm, 0x0812,
1561 bcm43xx_get_812_value(bcm, LPD(1, 0, 1)));
1562 bcm43xx_phy_write(bcm, 0x0015, 0xEFB0);
1563 udelay(10);
1564 if (phy->connected)
1565 bcm43xx_phy_write(bcm, 0x0812,
1566 bcm43xx_get_812_value(bcm, LPD(1, 0, 0)));
1567 bcm43xx_phy_write(bcm, 0x0015, 0xFFF0);
1568 udelay(20);
1569 tmp1 += bcm43xx_phy_read(bcm, 0x002D);
1570 bcm43xx_phy_write(bcm, 0x0058, 0x0000);
1571 if (phy->connected)
1572 bcm43xx_phy_write(bcm, 0x0812,
1573 bcm43xx_get_812_value(bcm, LPD(1, 0, 1)));
1574 bcm43xx_phy_write(bcm, 0x0015, 0xAFB0);
1575 }
1576
1577 tmp1++;
1578 tmp1 >>= 9;
1579 udelay(10);
1580 bcm43xx_phy_write(bcm, 0x0058, 0x0000);
1581
1582 for (i = 0; i < 16; i++) {
1583 bcm43xx_radio_write16(bcm, 0x0078, (flip_4bit(i) << 1) | 0x0020);
1584 backup[13] = bcm43xx_radio_read16(bcm, 0x0078);
1585 udelay(10);
1586 for (j = 0; j < 16; j++) {
1587 bcm43xx_phy_write(bcm, 0x005A, 0x0D80);
1588 bcm43xx_phy_write(bcm, 0x0059, 0xC810);
1589 bcm43xx_phy_write(bcm, 0x0058, 0x000D);
1590 if (phy->connected)
1591 bcm43xx_phy_write(bcm, 0x0812,
1592 bcm43xx_get_812_value(bcm,
1593 LPD(1, 0, 1)));
1594 bcm43xx_phy_write(bcm, 0x0015, 0xAFB0);
1595 udelay(10);
1596 if (phy->connected)
1597 bcm43xx_phy_write(bcm, 0x0812,
1598 bcm43xx_get_812_value(bcm,
1599 LPD(1, 0, 1)));
1600 bcm43xx_phy_write(bcm, 0x0015, 0xEFB0);
1601 udelay(10);
1602 if (phy->connected)
1603 bcm43xx_phy_write(bcm, 0x0812,
1604 bcm43xx_get_812_value(bcm,
1605 LPD(1, 0, 0)));
1606 bcm43xx_phy_write(bcm, 0x0015, 0xFFF0);
1607 udelay(10);
1608 tmp2 += bcm43xx_phy_read(bcm, 0x002D);
1609 bcm43xx_phy_write(bcm, 0x0058, 0x0000);
1610 if (phy->connected)
1611 bcm43xx_phy_write(bcm, 0x0812,
1612 bcm43xx_get_812_value(bcm,
1613 LPD(1, 0, 1)));
1614 bcm43xx_phy_write(bcm, 0x0015, 0xAFB0);
1615 }
1616 tmp2++;
1617 tmp2 >>= 8;
1618 if (tmp1 < tmp2)
1619 break;
1620 }
1621
1622 /* Restore the registers */
1623 bcm43xx_phy_write(bcm, 0x0015, backup[1]);
1624 bcm43xx_radio_write16(bcm, 0x0051, backup[14]);
1625 bcm43xx_radio_write16(bcm, 0x0052, backup[15]);
1626 bcm43xx_radio_write16(bcm, 0x0043, backup[0]);
1627 bcm43xx_phy_write(bcm, 0x005A, backup[16]);
1628 bcm43xx_phy_write(bcm, 0x0059, backup[17]);
1629 bcm43xx_phy_write(bcm, 0x0058, backup[18]);
1630 bcm43xx_write16(bcm, 0x03E6, backup[11]);
1631 if (phy->analog != 0)
1632 bcm43xx_write16(bcm, BCM43xx_MMIO_CHANNEL_EXT, backup[12]);
1633 bcm43xx_phy_write(bcm, 0x0035, backup[10]);
1634 bcm43xx_radio_selectchannel(bcm, radio->channel, 1);
1635 if (phy->type == BCM43xx_PHYTYPE_B) {
1636 bcm43xx_phy_write(bcm, 0x0030, backup[2]);
1637 bcm43xx_write16(bcm, 0x03EC, backup[3]);
1638 } else {
1639 if (phy->connected) {
1640 bcm43xx_write16(bcm, BCM43xx_MMIO_PHY_RADIO,
1641 (bcm43xx_read16(bcm,
1642 BCM43xx_MMIO_PHY_RADIO) & 0x7FFF));
1643 bcm43xx_phy_write(bcm, 0x0811, backup[4]);
1644 bcm43xx_phy_write(bcm, 0x0812, backup[5]);
1645 bcm43xx_phy_write(bcm, 0x0814, backup[6]);
1646 bcm43xx_phy_write(bcm, 0x0815, backup[7]);
1647 bcm43xx_phy_write(bcm, BCM43xx_PHY_G_CRS, backup[8]);
1648 bcm43xx_phy_write(bcm, 0x0802, backup[9]);
1649 if (phy->rev > 1) {
1650 bcm43xx_phy_write(bcm, 0x080F, backup[19]);
1651 bcm43xx_phy_write(bcm, 0x0810, backup[20]);
1652 }
1653 }
1654 }
1655 if (i >= 15)
1656 ret = backup[13];
1657
1658 return ret;
1659}
1660
1661void bcm43xx_radio_init2060(struct bcm43xx_private *bcm)
1662{
1663 int err;
1664
1665 bcm43xx_radio_write16(bcm, 0x0004, 0x00C0);
1666 bcm43xx_radio_write16(bcm, 0x0005, 0x0008);
1667 bcm43xx_radio_write16(bcm, 0x0009, 0x0040);
1668 bcm43xx_radio_write16(bcm, 0x0005, 0x00AA);
1669 bcm43xx_radio_write16(bcm, 0x0032, 0x008F);
1670 bcm43xx_radio_write16(bcm, 0x0006, 0x008F);
1671 bcm43xx_radio_write16(bcm, 0x0034, 0x008F);
1672 bcm43xx_radio_write16(bcm, 0x002C, 0x0007);
1673 bcm43xx_radio_write16(bcm, 0x0082, 0x0080);
1674 bcm43xx_radio_write16(bcm, 0x0080, 0x0000);
1675 bcm43xx_radio_write16(bcm, 0x003F, 0x00DA);
1676 bcm43xx_radio_write16(bcm, 0x0005, bcm43xx_radio_read16(bcm, 0x0005) & ~0x0008);
1677 bcm43xx_radio_write16(bcm, 0x0081, bcm43xx_radio_read16(bcm, 0x0081) & ~0x0010);
1678 bcm43xx_radio_write16(bcm, 0x0081, bcm43xx_radio_read16(bcm, 0x0081) & ~0x0020);
1679 bcm43xx_radio_write16(bcm, 0x0081, bcm43xx_radio_read16(bcm, 0x0081) & ~0x0020);
1680 udelay(400);
1681
1682 bcm43xx_radio_write16(bcm, 0x0081, (bcm43xx_radio_read16(bcm, 0x0081) & ~0x0020) | 0x0010);
1683 udelay(400);
1684
1685 bcm43xx_radio_write16(bcm, 0x0005, (bcm43xx_radio_read16(bcm, 0x0005) & ~0x0008) | 0x0008);
1686 bcm43xx_radio_write16(bcm, 0x0085, bcm43xx_radio_read16(bcm, 0x0085) & ~0x0010);
1687 bcm43xx_radio_write16(bcm, 0x0005, bcm43xx_radio_read16(bcm, 0x0005) & ~0x0008);
1688 bcm43xx_radio_write16(bcm, 0x0081, bcm43xx_radio_read16(bcm, 0x0081) & ~0x0040);
1689 bcm43xx_radio_write16(bcm, 0x0081, (bcm43xx_radio_read16(bcm, 0x0081) & ~0x0040) | 0x0040);
1690 bcm43xx_radio_write16(bcm, 0x0005, (bcm43xx_radio_read16(bcm, 0x0081) & ~0x0008) | 0x0008);
1691 bcm43xx_phy_write(bcm, 0x0063, 0xDDC6);
1692 bcm43xx_phy_write(bcm, 0x0069, 0x07BE);
1693 bcm43xx_phy_write(bcm, 0x006A, 0x0000);
1694
1695 err = bcm43xx_radio_selectchannel(bcm, BCM43xx_RADIO_DEFAULT_CHANNEL_A, 0);
1696 assert(err == 0);
1697 udelay(1000);
1698}
1699
1700static inline
1701u16 freq_r3A_value(u16 frequency)
1702{
1703 u16 value;
1704
1705 if (frequency < 5091)
1706 value = 0x0040;
1707 else if (frequency < 5321)
1708 value = 0x0000;
1709 else if (frequency < 5806)
1710 value = 0x0080;
1711 else
1712 value = 0x0040;
1713
1714 return value;
1715}
1716
1717void bcm43xx_radio_set_tx_iq(struct bcm43xx_private *bcm)
1718{
1719 static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 };
1720 static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A };
1721 u16 tmp = bcm43xx_radio_read16(bcm, 0x001E);
1722 int i, j;
1723
1724 for (i = 0; i < 5; i++) {
1725 for (j = 0; j < 5; j++) {
1726 if (tmp == (data_high[i] | data_low[j])) {
1727 bcm43xx_phy_write(bcm, 0x0069, (i - j) << 8 | 0x00C0);
1728 return;
1729 }
1730 }
1731 }
1732}
1733
1734int bcm43xx_radio_selectchannel(struct bcm43xx_private *bcm,
1735 u8 channel,
1736 int synthetic_pu_workaround)
1737{
1738 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
1739 u16 r8, tmp;
1740 u16 freq;
1741
1742 if (!ieee80211_is_valid_channel(bcm->ieee, channel))
1743 return -EINVAL;
1744 if ((radio->manufact == 0x17F) &&
1745 (radio->version == 0x2060) &&
1746 (radio->revision == 1)) {
1747 freq = channel2freq_a(channel);
1748
1749 r8 = bcm43xx_radio_read16(bcm, 0x0008);
1750 bcm43xx_write16(bcm, 0x03F0, freq);
1751 bcm43xx_radio_write16(bcm, 0x0008, r8);
1752
1753 TODO();//TODO: write max channel TX power? to Radio 0x2D
1754 tmp = bcm43xx_radio_read16(bcm, 0x002E);
1755 tmp &= 0x0080;
1756 TODO();//TODO: OR tmp with the Power out estimation for this channel?
1757 bcm43xx_radio_write16(bcm, 0x002E, tmp);
1758
1759 if (freq >= 4920 && freq <= 5500) {
1760 /*
1761 * r8 = (((freq * 15 * 0xE1FC780F) >> 32) / 29) & 0x0F;
1762 * = (freq * 0.025862069
1763 */
1764 r8 = 3 * freq / 116; /* is equal to r8 = freq * 0.025862 */
1765 }
1766 bcm43xx_radio_write16(bcm, 0x0007, (r8 << 4) | r8);
1767 bcm43xx_radio_write16(bcm, 0x0020, (r8 << 4) | r8);
1768 bcm43xx_radio_write16(bcm, 0x0021, (r8 << 4) | r8);
1769 bcm43xx_radio_write16(bcm, 0x0022,
1770 (bcm43xx_radio_read16(bcm, 0x0022)
1771 & 0x000F) | (r8 << 4));
1772 bcm43xx_radio_write16(bcm, 0x002A, (r8 << 4));
1773 bcm43xx_radio_write16(bcm, 0x002B, (r8 << 4));
1774 bcm43xx_radio_write16(bcm, 0x0008,
1775 (bcm43xx_radio_read16(bcm, 0x0008)
1776 & 0x00F0) | (r8 << 4));
1777 bcm43xx_radio_write16(bcm, 0x0029,
1778 (bcm43xx_radio_read16(bcm, 0x0029)
1779 & 0xFF0F) | 0x00B0);
1780 bcm43xx_radio_write16(bcm, 0x0035, 0x00AA);
1781 bcm43xx_radio_write16(bcm, 0x0036, 0x0085);
1782 bcm43xx_radio_write16(bcm, 0x003A,
1783 (bcm43xx_radio_read16(bcm, 0x003A)
1784 & 0xFF20) | freq_r3A_value(freq));
1785 bcm43xx_radio_write16(bcm, 0x003D,
1786 bcm43xx_radio_read16(bcm, 0x003D) & 0x00FF);
1787 bcm43xx_radio_write16(bcm, 0x0081,
1788 (bcm43xx_radio_read16(bcm, 0x0081)
1789 & 0xFF7F) | 0x0080);
1790 bcm43xx_radio_write16(bcm, 0x0035,
1791 bcm43xx_radio_read16(bcm, 0x0035) & 0xFFEF);
1792 bcm43xx_radio_write16(bcm, 0x0035,
1793 (bcm43xx_radio_read16(bcm, 0x0035)
1794 & 0xFFEF) | 0x0010);
1795 bcm43xx_radio_set_tx_iq(bcm);
1796 TODO(); //TODO: TSSI2dbm workaround
1797 bcm43xx_phy_xmitpower(bcm);//FIXME correct?
1798 } else {
1799 if (synthetic_pu_workaround)
1800 bcm43xx_synth_pu_workaround(bcm, channel);
1801
1802 bcm43xx_write16(bcm, BCM43xx_MMIO_CHANNEL,
1803 channel2freq_bg(channel));
1804
1805 if (channel == 14) {
1806 if (bcm->sprom.locale == BCM43xx_LOCALE_JAPAN) {
1807 bcm43xx_shm_write32(bcm, BCM43xx_SHM_SHARED,
1808 BCM43xx_UCODEFLAGS_OFFSET,
1809 bcm43xx_shm_read32(bcm, BCM43xx_SHM_SHARED,
1810 BCM43xx_UCODEFLAGS_OFFSET)
1811 & ~(1 << 7));
1812 } else {
1813 bcm43xx_shm_write32(bcm, BCM43xx_SHM_SHARED,
1814 BCM43xx_UCODEFLAGS_OFFSET,
1815 bcm43xx_shm_read32(bcm, BCM43xx_SHM_SHARED,
1816 BCM43xx_UCODEFLAGS_OFFSET)
1817 | (1 << 7));
1818 }
1819 bcm43xx_write16(bcm, BCM43xx_MMIO_CHANNEL_EXT,
1820 bcm43xx_read16(bcm, BCM43xx_MMIO_CHANNEL_EXT)
1821 | (1 << 11));
1822 } else {
1823 bcm43xx_write16(bcm, BCM43xx_MMIO_CHANNEL_EXT,
1824 bcm43xx_read16(bcm, BCM43xx_MMIO_CHANNEL_EXT)
1825 & 0xF7BF);
1826 }
1827 }
1828
1829 radio->channel = channel;
1830 //XXX: Using the longer of 2 timeouts (8000 vs 2000 usecs). Specs states
1831 // that 2000 usecs might suffice.
1832 udelay(8000);
1833
1834 return 0;
1835}
1836
1837void bcm43xx_radio_set_txantenna(struct bcm43xx_private *bcm, u32 val)
1838{
1839 u16 tmp;
1840
1841 val <<= 8;
1842 tmp = bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED, 0x0022) & 0xFCFF;
1843 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0022, tmp | val);
1844 tmp = bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED, 0x03A8) & 0xFCFF;
1845 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x03A8, tmp | val);
1846 tmp = bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED, 0x0054) & 0xFCFF;
1847 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0054, tmp | val);
1848}
1849
1850/* http://bcm-specs.sipsolutions.net/TX_Gain_Base_Band */
1851static u16 bcm43xx_get_txgain_base_band(u16 txpower)
1852{
1853 u16 ret;
1854
1855 assert(txpower <= 63);
1856
1857 if (txpower >= 54)
1858 ret = 2;
1859 else if (txpower >= 49)
1860 ret = 4;
1861 else if (txpower >= 44)
1862 ret = 5;
1863 else
1864 ret = 6;
1865
1866 return ret;
1867}
1868
1869/* http://bcm-specs.sipsolutions.net/TX_Gain_Radio_Frequency_Power_Amplifier */
1870static u16 bcm43xx_get_txgain_freq_power_amp(u16 txpower)
1871{
1872 u16 ret;
1873
1874 assert(txpower <= 63);
1875
1876 if (txpower >= 32)
1877 ret = 0;
1878 else if (txpower >= 25)
1879 ret = 1;
1880 else if (txpower >= 20)
1881 ret = 2;
1882 else if (txpower >= 12)
1883 ret = 3;
1884 else
1885 ret = 4;
1886
1887 return ret;
1888}
1889
1890/* http://bcm-specs.sipsolutions.net/TX_Gain_Digital_Analog_Converter */
1891static u16 bcm43xx_get_txgain_dac(u16 txpower)
1892{
1893 u16 ret;
1894
1895 assert(txpower <= 63);
1896
1897 if (txpower >= 54)
1898 ret = txpower - 53;
1899 else if (txpower >= 49)
1900 ret = txpower - 42;
1901 else if (txpower >= 44)
1902 ret = txpower - 37;
1903 else if (txpower >= 32)
1904 ret = txpower - 32;
1905 else if (txpower >= 25)
1906 ret = txpower - 20;
1907 else if (txpower >= 20)
1908 ret = txpower - 13;
1909 else if (txpower >= 12)
1910 ret = txpower - 8;
1911 else
1912 ret = txpower;
1913
1914 return ret;
1915}
1916
1917void bcm43xx_radio_set_txpower_a(struct bcm43xx_private *bcm, u16 txpower)
1918{
1919 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
1920 u16 pamp, base, dac, ilt;
1921
1922 txpower = limit_value(txpower, 0, 63);
1923
1924 pamp = bcm43xx_get_txgain_freq_power_amp(txpower);
1925 pamp <<= 5;
1926 pamp &= 0x00E0;
1927 bcm43xx_phy_write(bcm, 0x0019, pamp);
1928
1929 base = bcm43xx_get_txgain_base_band(txpower);
1930 base &= 0x000F;
1931 bcm43xx_phy_write(bcm, 0x0017, base | 0x0020);
1932
1933 ilt = bcm43xx_ilt_read(bcm, 0x3001);
1934 ilt &= 0x0007;
1935
1936 dac = bcm43xx_get_txgain_dac(txpower);
1937 dac <<= 3;
1938 dac |= ilt;
1939
1940 bcm43xx_ilt_write(bcm, 0x3001, dac);
1941
1942 radio->txpwr_offset = txpower;
1943
1944 TODO();
1945 //TODO: FuncPlaceholder (Adjust BB loft cancel)
1946}
1947
1948void bcm43xx_radio_set_txpower_bg(struct bcm43xx_private *bcm,
1949 u16 baseband_attenuation, u16 radio_attenuation,
1950 u16 txpower)
1951{
1952 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
1953 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1954
1955 if (baseband_attenuation == 0xFFFF)
1956 baseband_attenuation = radio->baseband_atten;
1957 if (radio_attenuation == 0xFFFF)
1958 radio_attenuation = radio->radio_atten;
1959 if (txpower == 0xFFFF)
1960 txpower = radio->txctl1;
1961 radio->baseband_atten = baseband_attenuation;
1962 radio->radio_atten = radio_attenuation;
1963 radio->txctl1 = txpower;
1964
1965 assert(/*baseband_attenuation >= 0 &&*/ baseband_attenuation <= 11);
1966 if (radio->revision < 6)
1967 assert(/*radio_attenuation >= 0 &&*/ radio_attenuation <= 9);
1968 else
1969 assert(/* radio_attenuation >= 0 &&*/ radio_attenuation <= 31);
1970 assert(/*txpower >= 0 &&*/ txpower <= 7);
1971
1972 bcm43xx_phy_set_baseband_attenuation(bcm, baseband_attenuation);
1973 bcm43xx_radio_write16(bcm, 0x0043, radio_attenuation);
1974 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0064, radio_attenuation);
1975 if (radio->version == 0x2050) {
1976 bcm43xx_radio_write16(bcm, 0x0052,
1977 (bcm43xx_radio_read16(bcm, 0x0052) & ~0x0070)
1978 | ((txpower << 4) & 0x0070));
1979 }
1980 //FIXME: The spec is very weird and unclear here.
1981 if (phy->type == BCM43xx_PHYTYPE_G)
1982 bcm43xx_phy_lo_adjust(bcm, 0);
1983}
1984
1985u16 bcm43xx_default_baseband_attenuation(struct bcm43xx_private *bcm)
1986{
1987 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
1988
1989 if (radio->version == 0x2050 && radio->revision < 6)
1990 return 0;
1991 return 2;
1992}
1993
1994u16 bcm43xx_default_radio_attenuation(struct bcm43xx_private *bcm)
1995{
1996 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
1997 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
1998 u16 att = 0xFFFF;
1999
2000 if (phy->type == BCM43xx_PHYTYPE_A)
2001 return 0x60;
2002
2003 switch (radio->version) {
2004 case 0x2053:
2005 switch (radio->revision) {
2006 case 1:
2007 att = 6;
2008 break;
2009 }
2010 break;
2011 case 0x2050:
2012 switch (radio->revision) {
2013 case 0:
2014 att = 5;
2015 break;
2016 case 1:
2017 if (phy->type == BCM43xx_PHYTYPE_G) {
2018 if (bcm->board_vendor == PCI_VENDOR_ID_BROADCOM &&
2019 bcm->board_type == 0x421 &&
2020 bcm->board_revision >= 30)
2021 att = 3;
2022 else if (bcm->board_vendor == PCI_VENDOR_ID_BROADCOM &&
2023 bcm->board_type == 0x416)
2024 att = 3;
2025 else
2026 att = 1;
2027 } else {
2028 if (bcm->board_vendor == PCI_VENDOR_ID_BROADCOM &&
2029 bcm->board_type == 0x421 &&
2030 bcm->board_revision >= 30)
2031 att = 7;
2032 else
2033 att = 6;
2034 }
2035 break;
2036 case 2:
2037 if (phy->type == BCM43xx_PHYTYPE_G) {
2038 if (bcm->board_vendor == PCI_VENDOR_ID_BROADCOM &&
2039 bcm->board_type == 0x421 &&
2040 bcm->board_revision >= 30)
2041 att = 3;
2042 else if (bcm->board_vendor == PCI_VENDOR_ID_BROADCOM &&
2043 bcm->board_type == 0x416)
2044 att = 5;
2045 else if (bcm->chip_id == 0x4320)
2046 att = 4;
2047 else
2048 att = 3;
2049 } else
2050 att = 6;
2051 break;
2052 case 3:
2053 att = 5;
2054 break;
2055 case 4:
2056 case 5:
2057 att = 1;
2058 break;
2059 case 6:
2060 case 7:
2061 att = 5;
2062 break;
2063 case 8:
2064 att = 0x1A;
2065 break;
2066 case 9:
2067 default:
2068 att = 5;
2069 }
2070 }
2071 if (bcm->board_vendor == PCI_VENDOR_ID_BROADCOM &&
2072 bcm->board_type == 0x421) {
2073 if (bcm->board_revision < 0x43)
2074 att = 2;
2075 else if (bcm->board_revision < 0x51)
2076 att = 3;
2077 }
2078 if (att == 0xFFFF)
2079 att = 5;
2080
2081 return att;
2082}
2083
2084u16 bcm43xx_default_txctl1(struct bcm43xx_private *bcm)
2085{
2086 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
2087
2088 if (radio->version != 0x2050)
2089 return 0;
2090 if (radio->revision == 1)
2091 return 3;
2092 if (radio->revision < 6)
2093 return 2;
2094 if (radio->revision == 8)
2095 return 1;
2096 return 0;
2097}
2098
2099void bcm43xx_radio_turn_on(struct bcm43xx_private *bcm)
2100{
2101 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
2102 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
2103 int err;
2104
2105 if (radio->enabled)
2106 return;
2107
2108 switch (phy->type) {
2109 case BCM43xx_PHYTYPE_A:
2110 bcm43xx_radio_write16(bcm, 0x0004, 0x00C0);
2111 bcm43xx_radio_write16(bcm, 0x0005, 0x0008);
2112 bcm43xx_phy_write(bcm, 0x0010, bcm43xx_phy_read(bcm, 0x0010) & 0xFFF7);
2113 bcm43xx_phy_write(bcm, 0x0011, bcm43xx_phy_read(bcm, 0x0011) & 0xFFF7);
2114 bcm43xx_radio_init2060(bcm);
2115 break;
2116 case BCM43xx_PHYTYPE_B:
2117 case BCM43xx_PHYTYPE_G:
2118 bcm43xx_phy_write(bcm, 0x0015, 0x8000);
2119 bcm43xx_phy_write(bcm, 0x0015, 0xCC00);
2120 bcm43xx_phy_write(bcm, 0x0015, (phy->connected ? 0x00C0 : 0x0000));
2121 err = bcm43xx_radio_selectchannel(bcm, BCM43xx_RADIO_DEFAULT_CHANNEL_BG, 1);
2122 assert(err == 0);
2123 break;
2124 default:
2125 assert(0);
2126 }
2127 radio->enabled = 1;
2128 dprintk(KERN_INFO PFX "Radio turned on\n");
2129 bcm43xx_leds_update(bcm, 0);
2130}
2131
2132void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm)
2133{
2134 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
2135 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
2136
2137 if (phy->type == BCM43xx_PHYTYPE_A) {
2138 bcm43xx_radio_write16(bcm, 0x0004, 0x00FF);
2139 bcm43xx_radio_write16(bcm, 0x0005, 0x00FB);
2140 bcm43xx_phy_write(bcm, 0x0010, bcm43xx_phy_read(bcm, 0x0010) | 0x0008);
2141 bcm43xx_phy_write(bcm, 0x0011, bcm43xx_phy_read(bcm, 0x0011) | 0x0008);
2142 }
2143 if (phy->type == BCM43xx_PHYTYPE_G && bcm->current_core->rev >= 5) {
2144 bcm43xx_phy_write(bcm, 0x0811, bcm43xx_phy_read(bcm, 0x0811) | 0x008C);
2145 bcm43xx_phy_write(bcm, 0x0812, bcm43xx_phy_read(bcm, 0x0812) & 0xFF73);
2146 } else
2147 bcm43xx_phy_write(bcm, 0x0015, 0xAA00);
2148 radio->enabled = 0;
2149 dprintk(KERN_INFO PFX "Radio initialized\n");
2150 bcm43xx_leds_update(bcm, 0);
2151}
2152
2153void bcm43xx_radio_clear_tssi(struct bcm43xx_private *bcm)
2154{
2155 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
2156
2157 switch (phy->type) {
2158 case BCM43xx_PHYTYPE_A:
2159 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0068, 0x7F7F);
2160 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x006a, 0x7F7F);
2161 break;
2162 case BCM43xx_PHYTYPE_B:
2163 case BCM43xx_PHYTYPE_G:
2164 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0058, 0x7F7F);
2165 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x005a, 0x7F7F);
2166 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0070, 0x7F7F);
2167 bcm43xx_shm_write16(bcm, BCM43xx_SHM_SHARED, 0x0072, 0x7F7F);
2168 break;
2169 }
2170}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_radio.h b/drivers/net/wireless/bcm43xx/bcm43xx_radio.h
deleted file mode 100644
index 77a98a53a2e2..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_radio.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
6 Stefano Brivio <st3@riseup.net>
7 Michael Buesch <mbuesch@freenet.de>
8 Danny van Dyk <kugelfang@gentoo.org>
9 Andreas Jaggi <andreas.jaggi@waterwave.ch>
10
11 Some parts of the code in this file are derived from the ipw2200
12 driver Copyright(c) 2003 - 2004 Intel Corporation.
13
14 This program is free software; you can redistribute it and/or modify
15 it under the terms of the GNU General Public License as published by
16 the Free Software Foundation; either version 2 of the License, or
17 (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 GNU General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; see the file COPYING. If not, write to
26 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
27 Boston, MA 02110-1301, USA.
28
29*/
30
31#ifndef BCM43xx_RADIO_H_
32#define BCM43xx_RADIO_H_
33
34#include "bcm43xx.h"
35
36
37#define BCM43xx_RADIO_DEFAULT_CHANNEL_A 36
38#define BCM43xx_RADIO_DEFAULT_CHANNEL_BG 6
39
40/* Force antenna 0. */
41#define BCM43xx_RADIO_TXANTENNA_0 0
42/* Force antenna 1. */
43#define BCM43xx_RADIO_TXANTENNA_1 1
44/* Use the RX antenna, that was selected for the most recently
45 * received good PLCP header.
46 */
47#define BCM43xx_RADIO_TXANTENNA_LASTPLCP 3
48#define BCM43xx_RADIO_TXANTENNA_DEFAULT BCM43xx_RADIO_TXANTENNA_LASTPLCP
49
50#define BCM43xx_RADIO_INTERFMODE_NONE 0
51#define BCM43xx_RADIO_INTERFMODE_NONWLAN 1
52#define BCM43xx_RADIO_INTERFMODE_MANUALWLAN 2
53#define BCM43xx_RADIO_INTERFMODE_AUTOWLAN 3
54
55
56void bcm43xx_radio_lock(struct bcm43xx_private *bcm);
57void bcm43xx_radio_unlock(struct bcm43xx_private *bcm);
58
59u16 bcm43xx_radio_read16(struct bcm43xx_private *bcm, u16 offset);
60void bcm43xx_radio_write16(struct bcm43xx_private *bcm, u16 offset, u16 val);
61
62u16 bcm43xx_radio_init2050(struct bcm43xx_private *bcm);
63void bcm43xx_radio_init2060(struct bcm43xx_private *bcm);
64
65void bcm43xx_radio_turn_on(struct bcm43xx_private *bcm);
66void bcm43xx_radio_turn_off(struct bcm43xx_private *bcm);
67
68static inline
69int bcm43xx_is_hw_radio_enabled(struct bcm43xx_private *bcm)
70{
71 /* function to return state of hardware enable of radio
72 * returns 0 if radio disabled, 1 if radio enabled
73 */
74 if (bcm->current_core->rev >= 3)
75 return ((bcm43xx_read32(bcm, BCM43xx_MMIO_RADIO_HWENABLED_HI)
76 & BCM43xx_MMIO_RADIO_HWENABLED_HI_MASK)
77 == 0) ? 1 : 0;
78 else
79 return ((bcm43xx_read16(bcm, BCM43xx_MMIO_RADIO_HWENABLED_LO)
80 & BCM43xx_MMIO_RADIO_HWENABLED_LO_MASK)
81 == 0) ? 0 : 1;
82}
83
84int bcm43xx_radio_selectchannel(struct bcm43xx_private *bcm, u8 channel,
85 int synthetic_pu_workaround);
86
87void bcm43xx_radio_set_txpower_a(struct bcm43xx_private *bcm, u16 txpower);
88void bcm43xx_radio_set_txpower_bg(struct bcm43xx_private *bcm,
89 u16 baseband_attenuation, u16 attenuation,
90 u16 txpower);
91
92u16 bcm43xx_default_baseband_attenuation(struct bcm43xx_private *bcm);
93u16 bcm43xx_default_radio_attenuation(struct bcm43xx_private *bcm);
94u16 bcm43xx_default_txctl1(struct bcm43xx_private *bcm);
95
96void bcm43xx_radio_set_txantenna(struct bcm43xx_private *bcm, u32 val);
97
98void bcm43xx_radio_clear_tssi(struct bcm43xx_private *bcm);
99
100u8 bcm43xx_radio_aci_detect(struct bcm43xx_private *bcm, u8 channel);
101u8 bcm43xx_radio_aci_scan(struct bcm43xx_private *bcm);
102
103int bcm43xx_radio_set_interference_mitigation(struct bcm43xx_private *bcm, int mode);
104
105void bcm43xx_calc_nrssi_slope(struct bcm43xx_private *bcm);
106void bcm43xx_calc_nrssi_threshold(struct bcm43xx_private *bcm);
107s16 bcm43xx_nrssi_hw_read(struct bcm43xx_private *bcm, u16 offset);
108void bcm43xx_nrssi_hw_write(struct bcm43xx_private *bcm, u16 offset, s16 val);
109void bcm43xx_nrssi_hw_update(struct bcm43xx_private *bcm, u16 val);
110void bcm43xx_nrssi_mem_update(struct bcm43xx_private *bcm);
111
112void bcm43xx_radio_set_tx_iq(struct bcm43xx_private *bcm);
113u16 bcm43xx_radio_calibrationvalue(struct bcm43xx_private *bcm);
114
115#endif /* BCM43xx_RADIO_H_ */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
deleted file mode 100644
index 8ab5f93d192a..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.c
+++ /dev/null
@@ -1,471 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 SYSFS support routines
6
7 Copyright (c) 2006 Michael Buesch <mbuesch@freenet.de>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
22 Boston, MA 02110-1301, USA.
23
24*/
25
26#include "bcm43xx_sysfs.h"
27#include "bcm43xx.h"
28#include "bcm43xx_main.h"
29#include "bcm43xx_radio.h"
30
31#include <linux/capability.h>
32
33
34#define GENERIC_FILESIZE 64
35
36
37static int get_integer(const char *buf, size_t count)
38{
39 char tmp[10 + 1] = { 0 };
40 int ret = -EINVAL;
41
42 if (count == 0)
43 goto out;
44 count = min(count, (size_t)10);
45 memcpy(tmp, buf, count);
46 ret = simple_strtol(tmp, NULL, 10);
47out:
48 return ret;
49}
50
51static int get_boolean(const char *buf, size_t count)
52{
53 if (count != 0) {
54 if (buf[0] == '1')
55 return 1;
56 if (buf[0] == '0')
57 return 0;
58 if (count >= 4 && memcmp(buf, "true", 4) == 0)
59 return 1;
60 if (count >= 5 && memcmp(buf, "false", 5) == 0)
61 return 0;
62 if (count >= 3 && memcmp(buf, "yes", 3) == 0)
63 return 1;
64 if (count >= 2 && memcmp(buf, "no", 2) == 0)
65 return 0;
66 if (count >= 2 && memcmp(buf, "on", 2) == 0)
67 return 1;
68 if (count >= 3 && memcmp(buf, "off", 3) == 0)
69 return 0;
70 }
71 return -EINVAL;
72}
73
74static int sprom2hex(const u16 *sprom, char *buf, size_t buf_len)
75{
76 int i, pos = 0;
77
78 for (i = 0; i < BCM43xx_SPROM_SIZE; i++) {
79 pos += snprintf(buf + pos, buf_len - pos - 1,
80 "%04X", swab16(sprom[i]) & 0xFFFF);
81 }
82 pos += snprintf(buf + pos, buf_len - pos - 1, "\n");
83
84 return pos + 1;
85}
86
87static int hex2sprom(u16 *sprom, const char *dump, size_t len)
88{
89 char tmp[5] = { 0 };
90 int cnt = 0;
91 unsigned long parsed;
92
93 if (len < BCM43xx_SPROM_SIZE * sizeof(u16) * 2)
94 return -EINVAL;
95
96 while (cnt < BCM43xx_SPROM_SIZE) {
97 memcpy(tmp, dump, 4);
98 dump += 4;
99 parsed = simple_strtoul(tmp, NULL, 16);
100 sprom[cnt++] = swab16((u16)parsed);
101 }
102
103 return 0;
104}
105
106static ssize_t bcm43xx_attr_sprom_show(struct device *dev,
107 struct device_attribute *attr,
108 char *buf)
109{
110 struct bcm43xx_private *bcm = dev_to_bcm(dev);
111 u16 *sprom;
112 unsigned long flags;
113 int err;
114
115 if (!capable(CAP_NET_ADMIN))
116 return -EPERM;
117
118 assert(BCM43xx_SPROM_SIZE * sizeof(u16) <= PAGE_SIZE);
119 sprom = kmalloc(BCM43xx_SPROM_SIZE * sizeof(*sprom),
120 GFP_KERNEL);
121 if (!sprom)
122 return -ENOMEM;
123 mutex_lock(&bcm->mutex);
124 spin_lock_irqsave(&bcm->irq_lock, flags);
125 err = bcm43xx_sprom_read(bcm, sprom);
126 if (!err)
127 err = sprom2hex(sprom, buf, PAGE_SIZE);
128 mmiowb();
129 spin_unlock_irqrestore(&bcm->irq_lock, flags);
130 mutex_unlock(&bcm->mutex);
131 kfree(sprom);
132
133 return err;
134}
135
136static ssize_t bcm43xx_attr_sprom_store(struct device *dev,
137 struct device_attribute *attr,
138 const char *buf, size_t count)
139{
140 struct bcm43xx_private *bcm = dev_to_bcm(dev);
141 u16 *sprom;
142 unsigned long flags;
143 int err;
144
145 if (!capable(CAP_NET_ADMIN))
146 return -EPERM;
147
148 sprom = kmalloc(BCM43xx_SPROM_SIZE * sizeof(*sprom),
149 GFP_KERNEL);
150 if (!sprom)
151 return -ENOMEM;
152 err = hex2sprom(sprom, buf, count);
153 if (err)
154 goto out_kfree;
155 mutex_lock(&bcm->mutex);
156 spin_lock_irqsave(&bcm->irq_lock, flags);
157 spin_lock(&bcm->leds_lock);
158 err = bcm43xx_sprom_write(bcm, sprom);
159 mmiowb();
160 spin_unlock(&bcm->leds_lock);
161 spin_unlock_irqrestore(&bcm->irq_lock, flags);
162 mutex_unlock(&bcm->mutex);
163out_kfree:
164 kfree(sprom);
165
166 return err ? err : count;
167
168}
169
170static DEVICE_ATTR(sprom, 0600,
171 bcm43xx_attr_sprom_show,
172 bcm43xx_attr_sprom_store);
173
174static ssize_t bcm43xx_attr_interfmode_show(struct device *dev,
175 struct device_attribute *attr,
176 char *buf)
177{
178 struct bcm43xx_private *bcm = dev_to_bcm(dev);
179 ssize_t count = 0;
180
181 if (!capable(CAP_NET_ADMIN))
182 return -EPERM;
183
184 mutex_lock(&bcm->mutex);
185
186 switch (bcm43xx_current_radio(bcm)->interfmode) {
187 case BCM43xx_RADIO_INTERFMODE_NONE:
188 count = snprintf(buf, PAGE_SIZE, "0 (No Interference Mitigation)\n");
189 break;
190 case BCM43xx_RADIO_INTERFMODE_NONWLAN:
191 count = snprintf(buf, PAGE_SIZE, "1 (Non-WLAN Interference Mitigation)\n");
192 break;
193 case BCM43xx_RADIO_INTERFMODE_MANUALWLAN:
194 count = snprintf(buf, PAGE_SIZE, "2 (WLAN Interference Mitigation)\n");
195 break;
196 default:
197 assert(0);
198 }
199
200 mutex_unlock(&bcm->mutex);
201
202 return count;
203
204}
205
206static ssize_t bcm43xx_attr_interfmode_store(struct device *dev,
207 struct device_attribute *attr,
208 const char *buf, size_t count)
209{
210 struct bcm43xx_private *bcm = dev_to_bcm(dev);
211 unsigned long flags;
212 int err;
213 int mode;
214
215 if (!capable(CAP_NET_ADMIN))
216 return -EPERM;
217
218 mode = get_integer(buf, count);
219 switch (mode) {
220 case 0:
221 mode = BCM43xx_RADIO_INTERFMODE_NONE;
222 break;
223 case 1:
224 mode = BCM43xx_RADIO_INTERFMODE_NONWLAN;
225 break;
226 case 2:
227 mode = BCM43xx_RADIO_INTERFMODE_MANUALWLAN;
228 break;
229 case 3:
230 mode = BCM43xx_RADIO_INTERFMODE_AUTOWLAN;
231 break;
232 default:
233 return -EINVAL;
234 }
235
236 mutex_lock(&bcm->mutex);
237 spin_lock_irqsave(&bcm->irq_lock, flags);
238
239 err = bcm43xx_radio_set_interference_mitigation(bcm, mode);
240 if (err) {
241 printk(KERN_ERR PFX "Interference Mitigation not "
242 "supported by device\n");
243 }
244 mmiowb();
245 spin_unlock_irqrestore(&bcm->irq_lock, flags);
246 mutex_unlock(&bcm->mutex);
247
248 return err ? err : count;
249}
250
251static DEVICE_ATTR(interference, 0644,
252 bcm43xx_attr_interfmode_show,
253 bcm43xx_attr_interfmode_store);
254
255static ssize_t bcm43xx_attr_preamble_show(struct device *dev,
256 struct device_attribute *attr,
257 char *buf)
258{
259 struct bcm43xx_private *bcm = dev_to_bcm(dev);
260 ssize_t count;
261
262 if (!capable(CAP_NET_ADMIN))
263 return -EPERM;
264
265 mutex_lock(&bcm->mutex);
266
267 if (bcm->short_preamble)
268 count = snprintf(buf, PAGE_SIZE, "1 (Short Preamble enabled)\n");
269 else
270 count = snprintf(buf, PAGE_SIZE, "0 (Short Preamble disabled)\n");
271
272 mutex_unlock(&bcm->mutex);
273
274 return count;
275}
276
277static ssize_t bcm43xx_attr_preamble_store(struct device *dev,
278 struct device_attribute *attr,
279 const char *buf, size_t count)
280{
281 struct bcm43xx_private *bcm = dev_to_bcm(dev);
282 unsigned long flags;
283 int value;
284
285 if (!capable(CAP_NET_ADMIN))
286 return -EPERM;
287
288 value = get_boolean(buf, count);
289 if (value < 0)
290 return value;
291 mutex_lock(&bcm->mutex);
292 spin_lock_irqsave(&bcm->irq_lock, flags);
293
294 bcm->short_preamble = !!value;
295
296 spin_unlock_irqrestore(&bcm->irq_lock, flags);
297 mutex_unlock(&bcm->mutex);
298
299 return count;
300}
301
302static DEVICE_ATTR(shortpreamble, 0644,
303 bcm43xx_attr_preamble_show,
304 bcm43xx_attr_preamble_store);
305
306static ssize_t bcm43xx_attr_phymode_store(struct device *dev,
307 struct device_attribute *attr,
308 const char *buf, size_t count)
309{
310 struct bcm43xx_private *bcm = dev_to_bcm(dev);
311 int phytype;
312 int err = -EINVAL;
313
314 if (count < 1)
315 goto out;
316 switch (buf[0]) {
317 case 'a': case 'A':
318 phytype = BCM43xx_PHYTYPE_A;
319 break;
320 case 'b': case 'B':
321 phytype = BCM43xx_PHYTYPE_B;
322 break;
323 case 'g': case 'G':
324 phytype = BCM43xx_PHYTYPE_G;
325 break;
326 default:
327 goto out;
328 }
329
330 bcm43xx_cancel_work(bcm);
331 mutex_lock(&(bcm)->mutex);
332 err = bcm43xx_select_wireless_core(bcm, phytype);
333 if (!err)
334 bcm43xx_periodic_tasks_setup(bcm);
335 mutex_unlock(&(bcm)->mutex);
336 if (err == -ESRCH)
337 err = -ENODEV;
338
339out:
340 return err ? err : count;
341}
342
343static ssize_t bcm43xx_attr_phymode_show(struct device *dev,
344 struct device_attribute *attr,
345 char *buf)
346{
347 struct bcm43xx_private *bcm = dev_to_bcm(dev);
348 ssize_t count = 0;
349
350 mutex_lock(&(bcm)->mutex);
351 switch (bcm43xx_current_phy(bcm)->type) {
352 case BCM43xx_PHYTYPE_A:
353 snprintf(buf, PAGE_SIZE, "A");
354 break;
355 case BCM43xx_PHYTYPE_B:
356 snprintf(buf, PAGE_SIZE, "B");
357 break;
358 case BCM43xx_PHYTYPE_G:
359 snprintf(buf, PAGE_SIZE, "G");
360 break;
361 default:
362 assert(0);
363 }
364 mutex_unlock(&(bcm)->mutex);
365
366 return count;
367}
368
369static DEVICE_ATTR(phymode, 0644,
370 bcm43xx_attr_phymode_show,
371 bcm43xx_attr_phymode_store);
372
373static ssize_t bcm43xx_attr_microcode_show(struct device *dev,
374 struct device_attribute *attr,
375 char *buf)
376{
377 unsigned long flags;
378 struct bcm43xx_private *bcm = dev_to_bcm(dev);
379 ssize_t count = 0;
380 u16 status;
381
382 if (!capable(CAP_NET_ADMIN))
383 return -EPERM;
384
385 mutex_lock(&(bcm)->mutex);
386 spin_lock_irqsave(&bcm->irq_lock, flags);
387 status = bcm43xx_shm_read16(bcm, BCM43xx_SHM_SHARED,
388 BCM43xx_UCODE_STATUS);
389
390 spin_unlock_irqrestore(&bcm->irq_lock, flags);
391 mutex_unlock(&(bcm)->mutex);
392 switch (status) {
393 case 0x0000:
394 count = snprintf(buf, PAGE_SIZE, "0x%.4x (invalid)\n",
395 status);
396 break;
397 case 0x0001:
398 count = snprintf(buf, PAGE_SIZE, "0x%.4x (init)\n",
399 status);
400 break;
401 case 0x0002:
402 count = snprintf(buf, PAGE_SIZE, "0x%.4x (active)\n",
403 status);
404 break;
405 case 0x0003:
406 count = snprintf(buf, PAGE_SIZE, "0x%.4x (suspended)\n",
407 status);
408 break;
409 case 0x0004:
410 count = snprintf(buf, PAGE_SIZE, "0x%.4x (asleep)\n",
411 status);
412 break;
413 default:
414 count = snprintf(buf, PAGE_SIZE, "0x%.4x (unknown)\n",
415 status);
416 break;
417 }
418
419 return count;
420}
421
422static DEVICE_ATTR(microcodestatus, 0444,
423 bcm43xx_attr_microcode_show,
424 NULL);
425
426int bcm43xx_sysfs_register(struct bcm43xx_private *bcm)
427{
428 struct device *dev = &bcm->pci_dev->dev;
429 int err;
430
431 assert(bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED);
432
433 err = device_create_file(dev, &dev_attr_sprom);
434 if (err)
435 goto out;
436 err = device_create_file(dev, &dev_attr_interference);
437 if (err)
438 goto err_remove_sprom;
439 err = device_create_file(dev, &dev_attr_shortpreamble);
440 if (err)
441 goto err_remove_interfmode;
442 err = device_create_file(dev, &dev_attr_phymode);
443 if (err)
444 goto err_remove_shortpreamble;
445 err = device_create_file(dev, &dev_attr_microcodestatus);
446 if (err)
447 goto err_remove_phymode;
448
449out:
450 return err;
451err_remove_phymode:
452 device_remove_file(dev, &dev_attr_phymode);
453err_remove_shortpreamble:
454 device_remove_file(dev, &dev_attr_shortpreamble);
455err_remove_interfmode:
456 device_remove_file(dev, &dev_attr_interference);
457err_remove_sprom:
458 device_remove_file(dev, &dev_attr_sprom);
459 goto out;
460}
461
462void bcm43xx_sysfs_unregister(struct bcm43xx_private *bcm)
463{
464 struct device *dev = &bcm->pci_dev->dev;
465
466 device_remove_file(dev, &dev_attr_microcodestatus);
467 device_remove_file(dev, &dev_attr_phymode);
468 device_remove_file(dev, &dev_attr_shortpreamble);
469 device_remove_file(dev, &dev_attr_interference);
470 device_remove_file(dev, &dev_attr_sprom);
471}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.h b/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.h
deleted file mode 100644
index cc701df71e2a..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_sysfs.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef BCM43xx_SYSFS_H_
2#define BCM43xx_SYSFS_H_
3
4struct bcm43xx_private;
5
6int bcm43xx_sysfs_register(struct bcm43xx_private *bcm);
7void bcm43xx_sysfs_unregister(struct bcm43xx_private *bcm);
8
9#endif /* BCM43xx_SYSFS_H_ */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c b/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
deleted file mode 100644
index 6acfdc49dccd..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.c
+++ /dev/null
@@ -1,1035 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
6 Stefano Brivio <st3@riseup.net>
7 Michael Buesch <mbuesch@freenet.de>
8 Danny van Dyk <kugelfang@gentoo.org>
9 Andreas Jaggi <andreas.jaggi@waterwave.ch>
10
11 Some parts of the code in this file are derived from the ipw2200
12 driver Copyright(c) 2003 - 2004 Intel Corporation.
13
14 This program is free software; you can redistribute it and/or modify
15 it under the terms of the GNU General Public License as published by
16 the Free Software Foundation; either version 2 of the License, or
17 (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 GNU General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; see the file COPYING. If not, write to
26 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
27 Boston, MA 02110-1301, USA.
28
29*/
30
31#include <linux/wireless.h>
32#include <net/iw_handler.h>
33#include <net/ieee80211softmac.h>
34#include <net/ieee80211softmac_wx.h>
35#include <linux/capability.h>
36#include <linux/delay.h>
37
38#include "bcm43xx.h"
39#include "bcm43xx_wx.h"
40#include "bcm43xx_main.h"
41#include "bcm43xx_radio.h"
42#include "bcm43xx_phy.h"
43
44
45/* The WIRELESS_EXT version, which is implemented by this driver. */
46#define BCM43xx_WX_VERSION 18
47
48#define MAX_WX_STRING 80
49
50static int bcm43xx_wx_get_name(struct net_device *net_dev,
51 struct iw_request_info *info,
52 union iwreq_data *data,
53 char *extra)
54{
55 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
56 int i;
57 struct bcm43xx_phyinfo *phy;
58 char suffix[7] = { 0 };
59 int have_a = 0, have_b = 0, have_g = 0;
60
61 mutex_lock(&bcm->mutex);
62 for (i = 0; i < bcm->nr_80211_available; i++) {
63 phy = &(bcm->core_80211_ext[i].phy);
64 switch (phy->type) {
65 case BCM43xx_PHYTYPE_A:
66 have_a = 1;
67 break;
68 case BCM43xx_PHYTYPE_G:
69 have_g = 1;
70 case BCM43xx_PHYTYPE_B:
71 have_b = 1;
72 break;
73 default:
74 assert(0);
75 }
76 }
77 mutex_unlock(&bcm->mutex);
78
79 i = 0;
80 if (have_a) {
81 suffix[i++] = 'a';
82 suffix[i++] = '/';
83 }
84 if (have_b) {
85 suffix[i++] = 'b';
86 suffix[i++] = '/';
87 }
88 if (have_g) {
89 suffix[i++] = 'g';
90 suffix[i++] = '/';
91 }
92 if (i != 0)
93 suffix[i - 1] = '\0';
94
95 snprintf(data->name, IFNAMSIZ, "IEEE 802.11%s", suffix);
96
97 return 0;
98}
99
100static int bcm43xx_wx_set_channelfreq(struct net_device *net_dev,
101 struct iw_request_info *info,
102 union iwreq_data *data,
103 char *extra)
104{
105 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
106 unsigned long flags;
107 u8 channel;
108 s8 expon;
109 int freq;
110 int err = -EINVAL;
111
112 mutex_lock(&bcm->mutex);
113 spin_lock_irqsave(&bcm->irq_lock, flags);
114
115 if ((data->freq.e == 0) &&
116 (data->freq.m >= 0) && (data->freq.m <= 1000)) {
117 channel = data->freq.m;
118 freq = bcm43xx_channel_to_freq(bcm, channel);
119 } else {
120 freq = data->freq.m;
121 expon = 6 - data->freq.e;
122 while (--expon >= 0) /* scale down the frequency to MHz */
123 freq /= 10;
124 assert(freq > 1000);
125 channel = bcm43xx_freq_to_channel(bcm, freq);
126 }
127 if (!ieee80211_is_valid_channel(bcm->ieee, channel))
128 goto out_unlock;
129 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
130 //ieee80211softmac_disassoc(softmac, $REASON);
131 bcm43xx_mac_suspend(bcm);
132 err = bcm43xx_radio_selectchannel(bcm, channel, 0);
133 bcm43xx_mac_enable(bcm);
134 } else {
135 bcm43xx_current_radio(bcm)->initial_channel = channel;
136 err = 0;
137 }
138out_unlock:
139 spin_unlock_irqrestore(&bcm->irq_lock, flags);
140 mutex_unlock(&bcm->mutex);
141
142 return err;
143}
144
145static int bcm43xx_wx_get_channelfreq(struct net_device *net_dev,
146 struct iw_request_info *info,
147 union iwreq_data *data,
148 char *extra)
149{
150 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
151 struct bcm43xx_radioinfo *radio;
152 int err = -ENODEV;
153 u16 channel;
154
155 mutex_lock(&bcm->mutex);
156 radio = bcm43xx_current_radio(bcm);
157 channel = radio->channel;
158 if (channel == 0xFF) {
159 channel = radio->initial_channel;
160 if (channel == 0xFF)
161 goto out_unlock;
162 }
163 assert(channel > 0 && channel <= 1000);
164 data->freq.e = 1;
165 data->freq.m = bcm43xx_channel_to_freq(bcm, channel) * 100000;
166 data->freq.flags = 1;
167
168 err = 0;
169out_unlock:
170 mutex_unlock(&bcm->mutex);
171
172 return err;
173}
174
175static int bcm43xx_wx_set_mode(struct net_device *net_dev,
176 struct iw_request_info *info,
177 union iwreq_data *data,
178 char *extra)
179{
180 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
181 unsigned long flags;
182 int mode;
183
184 mode = data->mode;
185 if (mode == IW_MODE_AUTO)
186 mode = BCM43xx_INITIAL_IWMODE;
187
188 mutex_lock(&bcm->mutex);
189 spin_lock_irqsave(&bcm->irq_lock, flags);
190 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
191 if (bcm->ieee->iw_mode != mode)
192 bcm43xx_set_iwmode(bcm, mode);
193 } else
194 bcm->ieee->iw_mode = mode;
195 spin_unlock_irqrestore(&bcm->irq_lock, flags);
196 mutex_unlock(&bcm->mutex);
197
198 return 0;
199}
200
201static int bcm43xx_wx_get_mode(struct net_device *net_dev,
202 struct iw_request_info *info,
203 union iwreq_data *data,
204 char *extra)
205{
206 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
207
208 mutex_lock(&bcm->mutex);
209 data->mode = bcm->ieee->iw_mode;
210 mutex_unlock(&bcm->mutex);
211
212 return 0;
213}
214
215static int bcm43xx_wx_get_rangeparams(struct net_device *net_dev,
216 struct iw_request_info *info,
217 union iwreq_data *data,
218 char *extra)
219{
220 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
221 struct iw_range *range = (struct iw_range *)extra;
222 const struct ieee80211_geo *geo;
223 int i, j;
224 struct bcm43xx_phyinfo *phy;
225
226 data->data.length = sizeof(*range);
227 memset(range, 0, sizeof(*range));
228
229 //TODO: What about 802.11b?
230 /* 54Mb/s == ~27Mb/s payload throughput (802.11g) */
231 range->throughput = 27 * 1000 * 1000;
232
233 range->max_qual.qual = 100;
234 range->max_qual.level = 146; /* set floor at -110 dBm (146 - 256) */
235 range->max_qual.noise = 146;
236 range->max_qual.updated = IW_QUAL_ALL_UPDATED;
237
238 range->avg_qual.qual = 50;
239 range->avg_qual.level = 0;
240 range->avg_qual.noise = 0;
241 range->avg_qual.updated = IW_QUAL_ALL_UPDATED;
242
243 range->min_rts = BCM43xx_MIN_RTS_THRESHOLD;
244 range->max_rts = BCM43xx_MAX_RTS_THRESHOLD;
245 range->min_frag = MIN_FRAG_THRESHOLD;
246 range->max_frag = MAX_FRAG_THRESHOLD;
247
248 range->encoding_size[0] = 5;
249 range->encoding_size[1] = 13;
250 range->num_encoding_sizes = 2;
251 range->max_encoding_tokens = WEP_KEYS;
252
253 range->we_version_compiled = WIRELESS_EXT;
254 range->we_version_source = BCM43xx_WX_VERSION;
255
256 range->enc_capa = IW_ENC_CAPA_WPA |
257 IW_ENC_CAPA_WPA2 |
258 IW_ENC_CAPA_CIPHER_TKIP |
259 IW_ENC_CAPA_CIPHER_CCMP;
260
261 mutex_lock(&bcm->mutex);
262 phy = bcm43xx_current_phy(bcm);
263
264 range->num_bitrates = 0;
265 i = 0;
266 if (phy->type == BCM43xx_PHYTYPE_A ||
267 phy->type == BCM43xx_PHYTYPE_G) {
268 range->num_bitrates = 8;
269 range->bitrate[i++] = IEEE80211_OFDM_RATE_6MB * 500000;
270 range->bitrate[i++] = IEEE80211_OFDM_RATE_9MB * 500000;
271 range->bitrate[i++] = IEEE80211_OFDM_RATE_12MB * 500000;
272 range->bitrate[i++] = IEEE80211_OFDM_RATE_18MB * 500000;
273 range->bitrate[i++] = IEEE80211_OFDM_RATE_24MB * 500000;
274 range->bitrate[i++] = IEEE80211_OFDM_RATE_36MB * 500000;
275 range->bitrate[i++] = IEEE80211_OFDM_RATE_48MB * 500000;
276 range->bitrate[i++] = IEEE80211_OFDM_RATE_54MB * 500000;
277 }
278 if (phy->type == BCM43xx_PHYTYPE_B ||
279 phy->type == BCM43xx_PHYTYPE_G) {
280 range->num_bitrates += 4;
281 range->bitrate[i++] = IEEE80211_CCK_RATE_1MB * 500000;
282 range->bitrate[i++] = IEEE80211_CCK_RATE_2MB * 500000;
283 range->bitrate[i++] = IEEE80211_CCK_RATE_5MB * 500000;
284 range->bitrate[i++] = IEEE80211_CCK_RATE_11MB * 500000;
285 }
286
287 geo = ieee80211_get_geo(bcm->ieee);
288 range->num_channels = geo->a_channels + geo->bg_channels;
289 j = 0;
290 for (i = 0; i < geo->a_channels; i++) {
291 if (j == IW_MAX_FREQUENCIES)
292 break;
293 range->freq[j].i = j + 1;
294 range->freq[j].m = geo->a[i].freq * 100000;
295 range->freq[j].e = 1;
296 j++;
297 }
298 for (i = 0; i < geo->bg_channels; i++) {
299 if (j == IW_MAX_FREQUENCIES)
300 break;
301 range->freq[j].i = j + 1;
302 range->freq[j].m = geo->bg[i].freq * 100000;
303 range->freq[j].e = 1;
304 j++;
305 }
306 range->num_frequency = j;
307
308 mutex_unlock(&bcm->mutex);
309
310 return 0;
311}
312
313static int bcm43xx_wx_set_nick(struct net_device *net_dev,
314 struct iw_request_info *info,
315 union iwreq_data *data,
316 char *extra)
317{
318 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
319 size_t len;
320
321 mutex_lock(&bcm->mutex);
322 len = min((size_t)data->data.length, (size_t)IW_ESSID_MAX_SIZE);
323 memcpy(bcm->nick, extra, len);
324 bcm->nick[len] = '\0';
325 mutex_unlock(&bcm->mutex);
326
327 return 0;
328}
329
330static int bcm43xx_wx_get_nick(struct net_device *net_dev,
331 struct iw_request_info *info,
332 union iwreq_data *data,
333 char *extra)
334{
335 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
336 size_t len;
337
338 mutex_lock(&bcm->mutex);
339 len = strlen(bcm->nick);
340 memcpy(extra, bcm->nick, len);
341 data->data.length = (__u16)len;
342 data->data.flags = 1;
343 mutex_unlock(&bcm->mutex);
344
345 return 0;
346}
347
348static int bcm43xx_wx_set_rts(struct net_device *net_dev,
349 struct iw_request_info *info,
350 union iwreq_data *data,
351 char *extra)
352{
353 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
354 unsigned long flags;
355 int err = -EINVAL;
356
357 mutex_lock(&bcm->mutex);
358 spin_lock_irqsave(&bcm->irq_lock, flags);
359 if (data->rts.disabled) {
360 bcm->rts_threshold = BCM43xx_MAX_RTS_THRESHOLD;
361 err = 0;
362 } else {
363 if (data->rts.value >= BCM43xx_MIN_RTS_THRESHOLD &&
364 data->rts.value <= BCM43xx_MAX_RTS_THRESHOLD) {
365 bcm->rts_threshold = data->rts.value;
366 err = 0;
367 }
368 }
369 spin_unlock_irqrestore(&bcm->irq_lock, flags);
370 mutex_unlock(&bcm->mutex);
371
372 return err;
373}
374
375static int bcm43xx_wx_get_rts(struct net_device *net_dev,
376 struct iw_request_info *info,
377 union iwreq_data *data,
378 char *extra)
379{
380 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
381
382 mutex_lock(&bcm->mutex);
383 data->rts.value = bcm->rts_threshold;
384 data->rts.fixed = 0;
385 data->rts.disabled = (bcm->rts_threshold == BCM43xx_MAX_RTS_THRESHOLD);
386 mutex_unlock(&bcm->mutex);
387
388 return 0;
389}
390
391static int bcm43xx_wx_set_frag(struct net_device *net_dev,
392 struct iw_request_info *info,
393 union iwreq_data *data,
394 char *extra)
395{
396 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
397 unsigned long flags;
398 int err = -EINVAL;
399
400 mutex_lock(&bcm->mutex);
401 spin_lock_irqsave(&bcm->irq_lock, flags);
402 if (data->frag.disabled) {
403 bcm->ieee->fts = MAX_FRAG_THRESHOLD;
404 err = 0;
405 } else {
406 if (data->frag.value >= MIN_FRAG_THRESHOLD &&
407 data->frag.value <= MAX_FRAG_THRESHOLD) {
408 bcm->ieee->fts = data->frag.value & ~0x1;
409 err = 0;
410 }
411 }
412 spin_unlock_irqrestore(&bcm->irq_lock, flags);
413 mutex_unlock(&bcm->mutex);
414
415 return err;
416}
417
418static int bcm43xx_wx_get_frag(struct net_device *net_dev,
419 struct iw_request_info *info,
420 union iwreq_data *data,
421 char *extra)
422{
423 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
424
425 mutex_lock(&bcm->mutex);
426 data->frag.value = bcm->ieee->fts;
427 data->frag.fixed = 0;
428 data->frag.disabled = (bcm->ieee->fts == MAX_FRAG_THRESHOLD);
429 mutex_unlock(&bcm->mutex);
430
431 return 0;
432}
433
434static int bcm43xx_wx_set_xmitpower(struct net_device *net_dev,
435 struct iw_request_info *info,
436 union iwreq_data *data,
437 char *extra)
438{
439 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
440 struct bcm43xx_radioinfo *radio;
441 struct bcm43xx_phyinfo *phy;
442 unsigned long flags;
443 int err = -ENODEV;
444 u16 maxpower;
445
446 if ((data->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) {
447 printk(KERN_ERR PFX "TX power not in dBm.\n");
448 return -EOPNOTSUPP;
449 }
450
451 mutex_lock(&bcm->mutex);
452 spin_lock_irqsave(&bcm->irq_lock, flags);
453 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
454 goto out_unlock;
455 radio = bcm43xx_current_radio(bcm);
456 phy = bcm43xx_current_phy(bcm);
457 if (data->txpower.disabled != (!(radio->enabled))) {
458 if (data->txpower.disabled)
459 bcm43xx_radio_turn_off(bcm);
460 else
461 bcm43xx_radio_turn_on(bcm);
462 }
463 if (data->txpower.value > 0) {
464 /* desired and maxpower dBm values are in Q5.2 */
465 if (phy->type == BCM43xx_PHYTYPE_A)
466 maxpower = bcm->sprom.maxpower_aphy;
467 else
468 maxpower = bcm->sprom.maxpower_bgphy;
469 radio->txpower_desired = limit_value(data->txpower.value << 2,
470 0, maxpower);
471 bcm43xx_phy_xmitpower(bcm);
472 }
473 err = 0;
474
475out_unlock:
476 spin_unlock_irqrestore(&bcm->irq_lock, flags);
477 mutex_unlock(&bcm->mutex);
478
479 return err;
480}
481
482static int bcm43xx_wx_get_xmitpower(struct net_device *net_dev,
483 struct iw_request_info *info,
484 union iwreq_data *data,
485 char *extra)
486{
487 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
488 struct bcm43xx_radioinfo *radio;
489 int err = -ENODEV;
490
491 mutex_lock(&bcm->mutex);
492 if (bcm43xx_status(bcm) != BCM43xx_STAT_INITIALIZED)
493 goto out_unlock;
494 radio = bcm43xx_current_radio(bcm);
495 /* desired dBm value is in Q5.2 */
496 data->txpower.value = radio->txpower_desired >> 2;
497 data->txpower.fixed = 1;
498 data->txpower.flags = IW_TXPOW_DBM;
499 data->txpower.disabled = !(radio->enabled);
500
501 err = 0;
502out_unlock:
503 mutex_unlock(&bcm->mutex);
504
505 return err;
506}
507
508static int bcm43xx_wx_set_encoding(struct net_device *net_dev,
509 struct iw_request_info *info,
510 union iwreq_data *data,
511 char *extra)
512{
513 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
514 int err;
515
516 err = ieee80211_wx_set_encode(bcm->ieee, info, data, extra);
517
518 return err;
519}
520
521static int bcm43xx_wx_set_encodingext(struct net_device *net_dev,
522 struct iw_request_info *info,
523 union iwreq_data *data,
524 char *extra)
525{
526 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
527 int err;
528
529 err = ieee80211_wx_set_encodeext(bcm->ieee, info, data, extra);
530
531 return err;
532}
533
534static int bcm43xx_wx_get_encoding(struct net_device *net_dev,
535 struct iw_request_info *info,
536 union iwreq_data *data,
537 char *extra)
538{
539 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
540 int err;
541
542 err = ieee80211_wx_get_encode(bcm->ieee, info, data, extra);
543
544 return err;
545}
546
547static int bcm43xx_wx_get_encodingext(struct net_device *net_dev,
548 struct iw_request_info *info,
549 union iwreq_data *data,
550 char *extra)
551{
552 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
553 int err;
554
555 err = ieee80211_wx_get_encodeext(bcm->ieee, info, data, extra);
556
557 return err;
558}
559
560static int bcm43xx_wx_set_interfmode(struct net_device *net_dev,
561 struct iw_request_info *info,
562 union iwreq_data *data,
563 char *extra)
564{
565 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
566 unsigned long flags;
567 int mode, err = 0;
568
569 mode = *((int *)extra);
570 switch (mode) {
571 case 0:
572 mode = BCM43xx_RADIO_INTERFMODE_NONE;
573 break;
574 case 1:
575 mode = BCM43xx_RADIO_INTERFMODE_NONWLAN;
576 break;
577 case 2:
578 mode = BCM43xx_RADIO_INTERFMODE_MANUALWLAN;
579 break;
580 case 3:
581 mode = BCM43xx_RADIO_INTERFMODE_AUTOWLAN;
582 break;
583 default:
584 printk(KERN_ERR PFX "set_interfmode allowed parameters are: "
585 "0 => None, 1 => Non-WLAN, 2 => WLAN, "
586 "3 => Auto-WLAN\n");
587 return -EINVAL;
588 }
589
590 mutex_lock(&bcm->mutex);
591 spin_lock_irqsave(&bcm->irq_lock, flags);
592 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED) {
593 err = bcm43xx_radio_set_interference_mitigation(bcm, mode);
594 if (err) {
595 printk(KERN_ERR PFX "Interference Mitigation not "
596 "supported by device\n");
597 }
598 } else {
599 if (mode == BCM43xx_RADIO_INTERFMODE_AUTOWLAN) {
600 printk(KERN_ERR PFX "Interference Mitigation mode Auto-WLAN "
601 "not supported while the interface is down.\n");
602 err = -ENODEV;
603 } else
604 bcm43xx_current_radio(bcm)->interfmode = mode;
605 }
606 spin_unlock_irqrestore(&bcm->irq_lock, flags);
607 mutex_unlock(&bcm->mutex);
608
609 return err;
610}
611
612static int bcm43xx_wx_get_interfmode(struct net_device *net_dev,
613 struct iw_request_info *info,
614 union iwreq_data *data,
615 char *extra)
616{
617 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
618 int mode;
619
620 mutex_lock(&bcm->mutex);
621 mode = bcm43xx_current_radio(bcm)->interfmode;
622 mutex_unlock(&bcm->mutex);
623
624 switch (mode) {
625 case BCM43xx_RADIO_INTERFMODE_NONE:
626 strncpy(extra, "0 (No Interference Mitigation)", MAX_WX_STRING);
627 break;
628 case BCM43xx_RADIO_INTERFMODE_NONWLAN:
629 strncpy(extra, "1 (Non-WLAN Interference Mitigation)", MAX_WX_STRING);
630 break;
631 case BCM43xx_RADIO_INTERFMODE_MANUALWLAN:
632 strncpy(extra, "2 (WLAN Interference Mitigation)", MAX_WX_STRING);
633 break;
634 default:
635 assert(0);
636 }
637 data->data.length = strlen(extra) + 1;
638
639 return 0;
640}
641
642static int bcm43xx_wx_set_shortpreamble(struct net_device *net_dev,
643 struct iw_request_info *info,
644 union iwreq_data *data,
645 char *extra)
646{
647 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
648 unsigned long flags;
649 int on;
650
651 on = *((int *)extra);
652 mutex_lock(&bcm->mutex);
653 spin_lock_irqsave(&bcm->irq_lock, flags);
654 bcm->short_preamble = !!on;
655 spin_unlock_irqrestore(&bcm->irq_lock, flags);
656 mutex_unlock(&bcm->mutex);
657
658 return 0;
659}
660
661static int bcm43xx_wx_get_shortpreamble(struct net_device *net_dev,
662 struct iw_request_info *info,
663 union iwreq_data *data,
664 char *extra)
665{
666 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
667 int on;
668
669 mutex_lock(&bcm->mutex);
670 on = bcm->short_preamble;
671 mutex_unlock(&bcm->mutex);
672
673 if (on)
674 strncpy(extra, "1 (Short Preamble enabled)", MAX_WX_STRING);
675 else
676 strncpy(extra, "0 (Short Preamble disabled)", MAX_WX_STRING);
677 data->data.length = strlen(extra) + 1;
678
679 return 0;
680}
681
682static int bcm43xx_wx_set_swencryption(struct net_device *net_dev,
683 struct iw_request_info *info,
684 union iwreq_data *data,
685 char *extra)
686{
687 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
688 unsigned long flags;
689 int on;
690
691 on = *((int *)extra);
692
693 mutex_lock(&bcm->mutex);
694 spin_lock_irqsave(&bcm->irq_lock, flags);
695 bcm->ieee->host_encrypt = !!on;
696 bcm->ieee->host_decrypt = !!on;
697 bcm->ieee->host_build_iv = !on;
698 bcm->ieee->host_strip_iv_icv = !on;
699 spin_unlock_irqrestore(&bcm->irq_lock, flags);
700 mutex_unlock(&bcm->mutex);
701
702 return 0;
703}
704
705static int bcm43xx_wx_get_swencryption(struct net_device *net_dev,
706 struct iw_request_info *info,
707 union iwreq_data *data,
708 char *extra)
709{
710 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
711 int on;
712
713 mutex_lock(&bcm->mutex);
714 on = bcm->ieee->host_encrypt;
715 mutex_unlock(&bcm->mutex);
716
717 if (on)
718 strncpy(extra, "1 (SW encryption enabled) ", MAX_WX_STRING);
719 else
720 strncpy(extra, "0 (SW encryption disabled) ", MAX_WX_STRING);
721 data->data.length = strlen(extra + 1);
722
723 return 0;
724}
725
726/* Enough buffer to hold a hexdump of the sprom data. */
727#define SPROM_BUFFERSIZE 512
728
729static int sprom2hex(const u16 *sprom, char *dump)
730{
731 int i, pos = 0;
732
733 for (i = 0; i < BCM43xx_SPROM_SIZE; i++) {
734 pos += snprintf(dump + pos, SPROM_BUFFERSIZE - pos - 1,
735 "%04X", swab16(sprom[i]) & 0xFFFF);
736 }
737
738 return pos + 1;
739}
740
741static int hex2sprom(u16 *sprom, const char *dump, unsigned int len)
742{
743 char tmp[5] = { 0 };
744 int cnt = 0;
745 unsigned long parsed;
746
747 if (len < BCM43xx_SPROM_SIZE * sizeof(u16) * 2)
748 return -EINVAL;
749 while (cnt < BCM43xx_SPROM_SIZE) {
750 memcpy(tmp, dump, 4);
751 dump += 4;
752 parsed = simple_strtoul(tmp, NULL, 16);
753 sprom[cnt++] = swab16((u16)parsed);
754 }
755
756 return 0;
757}
758
759static int bcm43xx_wx_sprom_read(struct net_device *net_dev,
760 struct iw_request_info *info,
761 union iwreq_data *data,
762 char *extra)
763{
764 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
765 int err = -EPERM;
766 u16 *sprom;
767 unsigned long flags;
768
769 if (!capable(CAP_SYS_RAWIO))
770 goto out;
771
772 err = -ENOMEM;
773 sprom = kmalloc(BCM43xx_SPROM_SIZE * sizeof(*sprom),
774 GFP_KERNEL);
775 if (!sprom)
776 goto out;
777
778 mutex_lock(&bcm->mutex);
779 spin_lock_irqsave(&bcm->irq_lock, flags);
780 err = -ENODEV;
781 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)
782 err = bcm43xx_sprom_read(bcm, sprom);
783 spin_unlock_irqrestore(&bcm->irq_lock, flags);
784 mutex_unlock(&bcm->mutex);
785 if (!err)
786 data->data.length = sprom2hex(sprom, extra);
787 kfree(sprom);
788out:
789 return err;
790}
791
792static int bcm43xx_wx_sprom_write(struct net_device *net_dev,
793 struct iw_request_info *info,
794 union iwreq_data *data,
795 char *extra)
796{
797 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
798 int err = -EPERM;
799 u16 *sprom;
800 unsigned long flags;
801 char *input;
802 unsigned int len;
803
804 if (!capable(CAP_SYS_RAWIO))
805 goto out;
806
807 err = -ENOMEM;
808 sprom = kmalloc(BCM43xx_SPROM_SIZE * sizeof(*sprom),
809 GFP_KERNEL);
810 if (!sprom)
811 goto out;
812
813 len = data->data.length;
814 extra[len - 1] = '\0';
815 input = strchr(extra, ':');
816 if (input) {
817 input++;
818 len -= input - extra;
819 } else
820 input = extra;
821 err = hex2sprom(sprom, input, len);
822 if (err)
823 goto out_kfree;
824
825 mutex_lock(&bcm->mutex);
826 spin_lock_irqsave(&bcm->irq_lock, flags);
827 spin_lock(&bcm->leds_lock);
828 err = -ENODEV;
829 if (bcm43xx_status(bcm) == BCM43xx_STAT_INITIALIZED)
830 err = bcm43xx_sprom_write(bcm, sprom);
831 spin_unlock(&bcm->leds_lock);
832 spin_unlock_irqrestore(&bcm->irq_lock, flags);
833 mutex_unlock(&bcm->mutex);
834out_kfree:
835 kfree(sprom);
836out:
837 return err;
838}
839
840/* Get wireless statistics. Called by /proc/net/wireless and by SIOCGIWSTATS */
841
842static struct iw_statistics *bcm43xx_get_wireless_stats(struct net_device *net_dev)
843{
844 struct bcm43xx_private *bcm = bcm43xx_priv(net_dev);
845 struct ieee80211softmac_device *mac = ieee80211_priv(net_dev);
846 struct iw_statistics *wstats;
847 struct ieee80211_network *network = NULL;
848 static int tmp_level = 0;
849 static int tmp_qual = 0;
850 unsigned long flags;
851
852 wstats = &bcm->stats.wstats;
853 if (!mac->associnfo.associated) {
854 wstats->miss.beacon = 0;
855// bcm->ieee->ieee_stats.tx_retry_limit_exceeded = 0; // FIXME: should this be cleared here?
856 wstats->discard.retries = 0;
857// bcm->ieee->ieee_stats.tx_discards_wrong_sa = 0; // FIXME: same question
858 wstats->discard.nwid = 0;
859// bcm->ieee->ieee_stats.rx_discards_undecryptable = 0; // FIXME: ditto
860 wstats->discard.code = 0;
861// bcm->ieee->ieee_stats.rx_fragments = 0; // FIXME: same here
862 wstats->discard.fragment = 0;
863 wstats->discard.misc = 0;
864 wstats->qual.qual = 0;
865 wstats->qual.level = 0;
866 wstats->qual.noise = 0;
867 wstats->qual.updated = 7;
868 wstats->qual.updated |= IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
869 return wstats;
870 }
871 /* fill in the real statistics when iface associated */
872 spin_lock_irqsave(&mac->ieee->lock, flags);
873 list_for_each_entry(network, &mac->ieee->network_list, list) {
874 if (!memcmp(mac->associnfo.bssid, network->bssid, ETH_ALEN)) {
875 if (!tmp_level) { /* get initial values */
876 tmp_level = network->stats.signal;
877 tmp_qual = network->stats.rssi;
878 } else { /* smooth results */
879 tmp_level = (15 * tmp_level + network->stats.signal)/16;
880 tmp_qual = (15 * tmp_qual + network->stats.rssi)/16;
881 }
882 break;
883 }
884 }
885 spin_unlock_irqrestore(&mac->ieee->lock, flags);
886 wstats->qual.level = tmp_level;
887 wstats->qual.qual = 100 * tmp_qual / RX_RSSI_MAX;
888 wstats->qual.noise = bcm->stats.noise;
889 wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
890 wstats->discard.code = bcm->ieee->ieee_stats.rx_discards_undecryptable;
891 wstats->discard.retries = bcm->ieee->ieee_stats.tx_retry_limit_exceeded;
892 wstats->discard.nwid = bcm->ieee->ieee_stats.tx_discards_wrong_sa;
893 wstats->discard.fragment = bcm->ieee->ieee_stats.rx_fragments;
894 wstats->discard.misc = 0; // FIXME
895 wstats->miss.beacon = 0; // FIXME
896 return wstats;
897}
898
899
900#ifdef WX
901# undef WX
902#endif
903#define WX(ioctl) [(ioctl) - SIOCSIWCOMMIT]
904static const iw_handler bcm43xx_wx_handlers[] = {
905 /* Wireless Identification */
906 WX(SIOCGIWNAME) = bcm43xx_wx_get_name,
907 /* Basic operations */
908 WX(SIOCSIWFREQ) = bcm43xx_wx_set_channelfreq,
909 WX(SIOCGIWFREQ) = bcm43xx_wx_get_channelfreq,
910 WX(SIOCSIWMODE) = bcm43xx_wx_set_mode,
911 WX(SIOCGIWMODE) = bcm43xx_wx_get_mode,
912 /* Informative stuff */
913 WX(SIOCGIWRANGE) = bcm43xx_wx_get_rangeparams,
914 /* Access Point manipulation */
915 WX(SIOCSIWAP) = ieee80211softmac_wx_set_wap,
916 WX(SIOCGIWAP) = ieee80211softmac_wx_get_wap,
917 WX(SIOCSIWSCAN) = ieee80211softmac_wx_trigger_scan,
918 WX(SIOCGIWSCAN) = ieee80211softmac_wx_get_scan_results,
919 /* 802.11 specific support */
920 WX(SIOCSIWESSID) = ieee80211softmac_wx_set_essid,
921 WX(SIOCGIWESSID) = ieee80211softmac_wx_get_essid,
922 WX(SIOCSIWNICKN) = bcm43xx_wx_set_nick,
923 WX(SIOCGIWNICKN) = bcm43xx_wx_get_nick,
924 /* Other parameters */
925 WX(SIOCSIWRATE) = ieee80211softmac_wx_set_rate,
926 WX(SIOCGIWRATE) = ieee80211softmac_wx_get_rate,
927 WX(SIOCSIWRTS) = bcm43xx_wx_set_rts,
928 WX(SIOCGIWRTS) = bcm43xx_wx_get_rts,
929 WX(SIOCSIWFRAG) = bcm43xx_wx_set_frag,
930 WX(SIOCGIWFRAG) = bcm43xx_wx_get_frag,
931 WX(SIOCSIWTXPOW) = bcm43xx_wx_set_xmitpower,
932 WX(SIOCGIWTXPOW) = bcm43xx_wx_get_xmitpower,
933//TODO WX(SIOCSIWRETRY) = bcm43xx_wx_set_retry,
934//TODO WX(SIOCGIWRETRY) = bcm43xx_wx_get_retry,
935 /* Encoding */
936 WX(SIOCSIWENCODE) = bcm43xx_wx_set_encoding,
937 WX(SIOCGIWENCODE) = bcm43xx_wx_get_encoding,
938 WX(SIOCSIWENCODEEXT) = bcm43xx_wx_set_encodingext,
939 WX(SIOCGIWENCODEEXT) = bcm43xx_wx_get_encodingext,
940 /* Power saving */
941//TODO WX(SIOCSIWPOWER) = bcm43xx_wx_set_power,
942//TODO WX(SIOCGIWPOWER) = bcm43xx_wx_get_power,
943 WX(SIOCSIWGENIE) = ieee80211softmac_wx_set_genie,
944 WX(SIOCGIWGENIE) = ieee80211softmac_wx_get_genie,
945 WX(SIOCSIWAUTH) = ieee80211_wx_set_auth,
946 WX(SIOCGIWAUTH) = ieee80211_wx_get_auth,
947};
948#undef WX
949
950static const iw_handler bcm43xx_priv_wx_handlers[] = {
951 /* Set Interference Mitigation Mode. */
952 bcm43xx_wx_set_interfmode,
953 /* Get Interference Mitigation Mode. */
954 bcm43xx_wx_get_interfmode,
955 /* Enable/Disable Short Preamble mode. */
956 bcm43xx_wx_set_shortpreamble,
957 /* Get Short Preamble mode. */
958 bcm43xx_wx_get_shortpreamble,
959 /* Enable/Disable Software Encryption mode */
960 bcm43xx_wx_set_swencryption,
961 /* Get Software Encryption mode */
962 bcm43xx_wx_get_swencryption,
963 /* Write SRPROM data. */
964 bcm43xx_wx_sprom_write,
965 /* Read SPROM data. */
966 bcm43xx_wx_sprom_read,
967};
968
969#define PRIV_WX_SET_INTERFMODE (SIOCIWFIRSTPRIV + 0)
970#define PRIV_WX_GET_INTERFMODE (SIOCIWFIRSTPRIV + 1)
971#define PRIV_WX_SET_SHORTPREAMBLE (SIOCIWFIRSTPRIV + 2)
972#define PRIV_WX_GET_SHORTPREAMBLE (SIOCIWFIRSTPRIV + 3)
973#define PRIV_WX_SET_SWENCRYPTION (SIOCIWFIRSTPRIV + 4)
974#define PRIV_WX_GET_SWENCRYPTION (SIOCIWFIRSTPRIV + 5)
975#define PRIV_WX_SPROM_WRITE (SIOCIWFIRSTPRIV + 6)
976#define PRIV_WX_SPROM_READ (SIOCIWFIRSTPRIV + 7)
977
978#define PRIV_WX_DUMMY(ioctl) \
979 { \
980 .cmd = (ioctl), \
981 .name = "__unused" \
982 }
983
984static const struct iw_priv_args bcm43xx_priv_wx_args[] = {
985 {
986 .cmd = PRIV_WX_SET_INTERFMODE,
987 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
988 .name = "set_interfmode",
989 },
990 {
991 .cmd = PRIV_WX_GET_INTERFMODE,
992 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
993 .name = "get_interfmode",
994 },
995 {
996 .cmd = PRIV_WX_SET_SHORTPREAMBLE,
997 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
998 .name = "set_shortpreamb",
999 },
1000 {
1001 .cmd = PRIV_WX_GET_SHORTPREAMBLE,
1002 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
1003 .name = "get_shortpreamb",
1004 },
1005 {
1006 .cmd = PRIV_WX_SET_SWENCRYPTION,
1007 .set_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
1008 .name = "set_swencrypt",
1009 },
1010 {
1011 .cmd = PRIV_WX_GET_SWENCRYPTION,
1012 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_WX_STRING,
1013 .name = "get_swencrypt",
1014 },
1015 {
1016 .cmd = PRIV_WX_SPROM_WRITE,
1017 .set_args = IW_PRIV_TYPE_CHAR | SPROM_BUFFERSIZE,
1018 .name = "write_sprom",
1019 },
1020 {
1021 .cmd = PRIV_WX_SPROM_READ,
1022 .get_args = IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | SPROM_BUFFERSIZE,
1023 .name = "read_sprom",
1024 },
1025};
1026
1027const struct iw_handler_def bcm43xx_wx_handlers_def = {
1028 .standard = bcm43xx_wx_handlers,
1029 .num_standard = ARRAY_SIZE(bcm43xx_wx_handlers),
1030 .num_private = ARRAY_SIZE(bcm43xx_priv_wx_handlers),
1031 .num_private_args = ARRAY_SIZE(bcm43xx_priv_wx_args),
1032 .private = bcm43xx_priv_wx_handlers,
1033 .private_args = bcm43xx_priv_wx_args,
1034 .get_wireless_stats = bcm43xx_get_wireless_stats,
1035};
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_wx.h b/drivers/net/wireless/bcm43xx/bcm43xx_wx.h
deleted file mode 100644
index 1f29ff3aa4c3..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_wx.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
6 Stefano Brivio <st3@riseup.net>
7 Michael Buesch <mbuesch@freenet.de>
8 Danny van Dyk <kugelfang@gentoo.org>
9 Andreas Jaggi <andreas.jaggi@waterwave.ch>
10
11 Some parts of the code in this file are derived from the ipw2200
12 driver Copyright(c) 2003 - 2004 Intel Corporation.
13
14 This program is free software; you can redistribute it and/or modify
15 it under the terms of the GNU General Public License as published by
16 the Free Software Foundation; either version 2 of the License, or
17 (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful,
20 but WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 GNU General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; see the file COPYING. If not, write to
26 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
27 Boston, MA 02110-1301, USA.
28
29*/
30
31#ifndef BCM43xx_WX_H_
32#define BCM43xx_WX_H_
33
34extern const struct iw_handler_def bcm43xx_wx_handlers_def;
35
36#endif /* BCM43xx_WX_H_ */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c b/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
deleted file mode 100644
index f79fe11f9e81..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.c
+++ /dev/null
@@ -1,565 +0,0 @@
1/*
2
3 Broadcom BCM43xx wireless driver
4
5 Transmission (TX/RX) related functions.
6
7 Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
8 Stefano Brivio <st3@riseup.net>
9 Michael Buesch <mbuesch@freenet.de>
10 Danny van Dyk <kugelfang@gentoo.org>
11 Andreas Jaggi <andreas.jaggi@waterwave.ch>
12
13 This program is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2 of the License, or
16 (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; see the file COPYING. If not, write to
25 the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26 Boston, MA 02110-1301, USA.
27
28*/
29
30#include "bcm43xx_xmit.h"
31
32#include <linux/etherdevice.h>
33
34
35/* Extract the bitrate out of a CCK PLCP header. */
36static u8 bcm43xx_plcp_get_bitrate_cck(struct bcm43xx_plcp_hdr4 *plcp)
37{
38 switch (plcp->raw[0]) {
39 case 0x0A:
40 return IEEE80211_CCK_RATE_1MB;
41 case 0x14:
42 return IEEE80211_CCK_RATE_2MB;
43 case 0x37:
44 return IEEE80211_CCK_RATE_5MB;
45 case 0x6E:
46 return IEEE80211_CCK_RATE_11MB;
47 }
48 assert(0);
49 return 0;
50}
51
52/* Extract the bitrate out of an OFDM PLCP header. */
53static u8 bcm43xx_plcp_get_bitrate_ofdm(struct bcm43xx_plcp_hdr4 *plcp)
54{
55 switch (plcp->raw[0] & 0xF) {
56 case 0xB:
57 return IEEE80211_OFDM_RATE_6MB;
58 case 0xF:
59 return IEEE80211_OFDM_RATE_9MB;
60 case 0xA:
61 return IEEE80211_OFDM_RATE_12MB;
62 case 0xE:
63 return IEEE80211_OFDM_RATE_18MB;
64 case 0x9:
65 return IEEE80211_OFDM_RATE_24MB;
66 case 0xD:
67 return IEEE80211_OFDM_RATE_36MB;
68 case 0x8:
69 return IEEE80211_OFDM_RATE_48MB;
70 case 0xC:
71 return IEEE80211_OFDM_RATE_54MB;
72 }
73 assert(0);
74 return 0;
75}
76
77u8 bcm43xx_plcp_get_ratecode_cck(const u8 bitrate)
78{
79 switch (bitrate) {
80 case IEEE80211_CCK_RATE_1MB:
81 return 0x0A;
82 case IEEE80211_CCK_RATE_2MB:
83 return 0x14;
84 case IEEE80211_CCK_RATE_5MB:
85 return 0x37;
86 case IEEE80211_CCK_RATE_11MB:
87 return 0x6E;
88 }
89 assert(0);
90 return 0;
91}
92
93u8 bcm43xx_plcp_get_ratecode_ofdm(const u8 bitrate)
94{
95 switch (bitrate) {
96 case IEEE80211_OFDM_RATE_6MB:
97 return 0xB;
98 case IEEE80211_OFDM_RATE_9MB:
99 return 0xF;
100 case IEEE80211_OFDM_RATE_12MB:
101 return 0xA;
102 case IEEE80211_OFDM_RATE_18MB:
103 return 0xE;
104 case IEEE80211_OFDM_RATE_24MB:
105 return 0x9;
106 case IEEE80211_OFDM_RATE_36MB:
107 return 0xD;
108 case IEEE80211_OFDM_RATE_48MB:
109 return 0x8;
110 case IEEE80211_OFDM_RATE_54MB:
111 return 0xC;
112 }
113 assert(0);
114 return 0;
115}
116
117static void bcm43xx_generate_plcp_hdr(struct bcm43xx_plcp_hdr4 *plcp,
118 const u16 octets, const u8 bitrate,
119 const int ofdm_modulation)
120{
121 __le32 *data = &(plcp->data);
122 __u8 *raw = plcp->raw;
123
124 if (ofdm_modulation) {
125 u32 val = bcm43xx_plcp_get_ratecode_ofdm(bitrate);
126 assert(!(octets & 0xF000));
127 val |= (octets << 5);
128 *data = cpu_to_le32(val);
129 } else {
130 u32 plen;
131
132 plen = octets * 16 / bitrate;
133 if ((octets * 16 % bitrate) > 0) {
134 plen++;
135 if ((bitrate == IEEE80211_CCK_RATE_11MB)
136 && ((octets * 8 % 11) < 4)) {
137 raw[1] = 0x84;
138 } else
139 raw[1] = 0x04;
140 } else
141 raw[1] = 0x04;
142 *data |= cpu_to_le32(plen << 16);
143 raw[0] = bcm43xx_plcp_get_ratecode_cck(bitrate);
144 }
145}
146
147static u8 bcm43xx_calc_fallback_rate(u8 bitrate)
148{
149 switch (bitrate) {
150 case IEEE80211_CCK_RATE_1MB:
151 return IEEE80211_CCK_RATE_1MB;
152 case IEEE80211_CCK_RATE_2MB:
153 return IEEE80211_CCK_RATE_1MB;
154 case IEEE80211_CCK_RATE_5MB:
155 return IEEE80211_CCK_RATE_2MB;
156 case IEEE80211_CCK_RATE_11MB:
157 return IEEE80211_CCK_RATE_5MB;
158 case IEEE80211_OFDM_RATE_6MB:
159 return IEEE80211_CCK_RATE_5MB;
160 case IEEE80211_OFDM_RATE_9MB:
161 return IEEE80211_OFDM_RATE_6MB;
162 case IEEE80211_OFDM_RATE_12MB:
163 return IEEE80211_OFDM_RATE_9MB;
164 case IEEE80211_OFDM_RATE_18MB:
165 return IEEE80211_OFDM_RATE_12MB;
166 case IEEE80211_OFDM_RATE_24MB:
167 return IEEE80211_OFDM_RATE_18MB;
168 case IEEE80211_OFDM_RATE_36MB:
169 return IEEE80211_OFDM_RATE_24MB;
170 case IEEE80211_OFDM_RATE_48MB:
171 return IEEE80211_OFDM_RATE_36MB;
172 case IEEE80211_OFDM_RATE_54MB:
173 return IEEE80211_OFDM_RATE_48MB;
174 }
175 assert(0);
176 return 0;
177}
178
179static
180__le16 bcm43xx_calc_duration_id(const struct ieee80211_hdr *wireless_header,
181 u8 bitrate)
182{
183 const u16 frame_ctl = le16_to_cpu(wireless_header->frame_ctl);
184 __le16 duration_id = wireless_header->duration_id;
185
186 switch (WLAN_FC_GET_TYPE(frame_ctl)) {
187 case IEEE80211_FTYPE_DATA:
188 case IEEE80211_FTYPE_MGMT:
189 //TODO: Steal the code from ieee80211, once it is completed there.
190 break;
191 case IEEE80211_FTYPE_CTL:
192 /* Use the original duration/id. */
193 break;
194 default:
195 assert(0);
196 }
197
198 return duration_id;
199}
200
201static inline
202u16 ceiling_div(u16 dividend, u16 divisor)
203{
204 return ((dividend + divisor - 1) / divisor);
205}
206
207static void bcm43xx_generate_rts(const struct bcm43xx_phyinfo *phy,
208 struct bcm43xx_txhdr *txhdr,
209 u16 *flags,
210 u8 bitrate,
211 const struct ieee80211_hdr_4addr *wlhdr)
212{
213 u16 fctl;
214 u16 dur;
215 u8 fallback_bitrate;
216 int ofdm_modulation;
217 int fallback_ofdm_modulation;
218// u8 *sa, *da;
219 u16 flen;
220
221//FIXME sa = ieee80211_get_SA((struct ieee80211_hdr *)wlhdr);
222//FIXME da = ieee80211_get_DA((struct ieee80211_hdr *)wlhdr);
223 fallback_bitrate = bcm43xx_calc_fallback_rate(bitrate);
224 ofdm_modulation = !(ieee80211_is_cck_rate(bitrate));
225 fallback_ofdm_modulation = !(ieee80211_is_cck_rate(fallback_bitrate));
226
227 flen = sizeof(u16) + sizeof(u16) + ETH_ALEN + ETH_ALEN + IEEE80211_FCS_LEN,
228 bcm43xx_generate_plcp_hdr((struct bcm43xx_plcp_hdr4 *)(&txhdr->rts_cts_plcp),
229 flen, bitrate,
230 !ieee80211_is_cck_rate(bitrate));
231 bcm43xx_generate_plcp_hdr((struct bcm43xx_plcp_hdr4 *)(&txhdr->rts_cts_fallback_plcp),
232 flen, fallback_bitrate,
233 !ieee80211_is_cck_rate(fallback_bitrate));
234 fctl = IEEE80211_FTYPE_CTL;
235 fctl |= IEEE80211_STYPE_RTS;
236 dur = le16_to_cpu(wlhdr->duration_id);
237/*FIXME: should we test for dur==0 here and let it unmodified in this case?
238 * The following assert checks for this case...
239 */
240assert(dur);
241/*FIXME: The duration calculation is not really correct.
242 * I am not 100% sure which bitrate to use. We use the RTS rate here,
243 * but this is likely to be wrong.
244 */
245 if (phy->type == BCM43xx_PHYTYPE_A) {
246 /* Three times SIFS */
247 dur += 16 * 3;
248 /* Add ACK duration. */
249 dur += ceiling_div((16 + 8 * (14 /*bytes*/) + 6) * 10,
250 bitrate * 4);
251 /* Add CTS duration. */
252 dur += ceiling_div((16 + 8 * (14 /*bytes*/) + 6) * 10,
253 bitrate * 4);
254 } else {
255 /* Three times SIFS */
256 dur += 10 * 3;
257 /* Add ACK duration. */
258 dur += ceiling_div(8 * (14 /*bytes*/) * 10,
259 bitrate);
260 /* Add CTS duration. */
261 dur += ceiling_div(8 * (14 /*bytes*/) * 10,
262 bitrate);
263 }
264
265 txhdr->rts_cts_frame_control = cpu_to_le16(fctl);
266 txhdr->rts_cts_dur = cpu_to_le16(dur);
267//printk(BCM43xx_MACFMT " " BCM43xx_MACFMT " " BCM43xx_MACFMT "\n", BCM43xx_MACARG(wlhdr->addr1), BCM43xx_MACARG(wlhdr->addr2), BCM43xx_MACARG(wlhdr->addr3));
268//printk(BCM43xx_MACFMT " " BCM43xx_MACFMT "\n", BCM43xx_MACARG(sa), BCM43xx_MACARG(da));
269 memcpy(txhdr->rts_cts_mac1, wlhdr->addr1, ETH_ALEN);//FIXME!
270// memcpy(txhdr->rts_cts_mac2, sa, ETH_ALEN);
271
272 *flags |= BCM43xx_TXHDRFLAG_RTSCTS;
273 *flags |= BCM43xx_TXHDRFLAG_RTS;
274 if (ofdm_modulation)
275 *flags |= BCM43xx_TXHDRFLAG_RTSCTS_OFDM;
276 if (fallback_ofdm_modulation)
277 *flags |= BCM43xx_TXHDRFLAG_RTSCTSFALLBACK_OFDM;
278}
279
280void bcm43xx_generate_txhdr(struct bcm43xx_private *bcm,
281 struct bcm43xx_txhdr *txhdr,
282 const unsigned char *fragment_data,
283 const unsigned int fragment_len,
284 const int is_first_fragment,
285 const u16 cookie)
286{
287 const struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
288 const struct ieee80211_hdr_4addr *wireless_header = (const struct ieee80211_hdr_4addr *)fragment_data;
289 const struct ieee80211_security *secinfo = &bcm->ieee->sec;
290 u8 bitrate;
291 u8 fallback_bitrate;
292 int ofdm_modulation;
293 int fallback_ofdm_modulation;
294 u16 plcp_fragment_len = fragment_len;
295 u16 flags = 0;
296 u16 control = 0;
297 u16 wsec_rate = 0;
298 u16 encrypt_frame;
299 const u16 ftype = WLAN_FC_GET_TYPE(le16_to_cpu(wireless_header->frame_ctl));
300 const int is_mgt = (ftype == IEEE80211_FTYPE_MGMT);
301
302 /* Now construct the TX header. */
303 memset(txhdr, 0, sizeof(*txhdr));
304
305 bitrate = ieee80211softmac_suggest_txrate(bcm->softmac,
306 is_multicast_ether_addr(wireless_header->addr1), is_mgt);
307 ofdm_modulation = !(ieee80211_is_cck_rate(bitrate));
308 fallback_bitrate = bcm43xx_calc_fallback_rate(bitrate);
309 fallback_ofdm_modulation = !(ieee80211_is_cck_rate(fallback_bitrate));
310
311 /* Set Frame Control from 80211 header. */
312 txhdr->frame_control = wireless_header->frame_ctl;
313 /* Copy address1 from 80211 header. */
314 memcpy(txhdr->mac1, wireless_header->addr1, 6);
315 /* Set the fallback duration ID. */
316 txhdr->fallback_dur_id = bcm43xx_calc_duration_id((const struct ieee80211_hdr *)wireless_header,
317 fallback_bitrate);
318 /* Set the cookie (used as driver internal ID for the frame) */
319 txhdr->cookie = cpu_to_le16(cookie);
320
321 /* Hardware appends FCS. */
322 plcp_fragment_len += IEEE80211_FCS_LEN;
323
324 /* Hardware encryption. */
325 encrypt_frame = le16_to_cpup(&wireless_header->frame_ctl) & IEEE80211_FCTL_PROTECTED;
326 if (encrypt_frame && !bcm->ieee->host_encrypt) {
327 const struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)wireless_header;
328 memcpy(txhdr->wep_iv, hdr->payload, 4);
329 /* Hardware appends ICV. */
330 plcp_fragment_len += 4;
331
332 wsec_rate |= (bcm->key[secinfo->active_key].algorithm << BCM43xx_TXHDR_WSEC_ALGO_SHIFT)
333 & BCM43xx_TXHDR_WSEC_ALGO_MASK;
334 wsec_rate |= (secinfo->active_key << BCM43xx_TXHDR_WSEC_KEYINDEX_SHIFT)
335 & BCM43xx_TXHDR_WSEC_KEYINDEX_MASK;
336 }
337
338 /* Generate the PLCP header and the fallback PLCP header. */
339 bcm43xx_generate_plcp_hdr((struct bcm43xx_plcp_hdr4 *)(&txhdr->plcp),
340 plcp_fragment_len,
341 bitrate, ofdm_modulation);
342 bcm43xx_generate_plcp_hdr(&txhdr->fallback_plcp, plcp_fragment_len,
343 fallback_bitrate, fallback_ofdm_modulation);
344
345 /* Set the CONTROL field */
346 if (ofdm_modulation)
347 control |= BCM43xx_TXHDRCTL_OFDM;
348 if (bcm->short_preamble) //FIXME: could be the other way around, please test
349 control |= BCM43xx_TXHDRCTL_SHORT_PREAMBLE;
350 control |= (phy->antenna_diversity << BCM43xx_TXHDRCTL_ANTENNADIV_SHIFT)
351 & BCM43xx_TXHDRCTL_ANTENNADIV_MASK;
352
353 /* Set the FLAGS field */
354 if (!is_multicast_ether_addr(wireless_header->addr1) &&
355 !is_broadcast_ether_addr(wireless_header->addr1))
356 flags |= BCM43xx_TXHDRFLAG_EXPECTACK;
357 if (1 /* FIXME: PS poll?? */)
358 flags |= 0x10; // FIXME: unknown meaning.
359 if (fallback_ofdm_modulation)
360 flags |= BCM43xx_TXHDRFLAG_FALLBACKOFDM;
361 if (is_first_fragment)
362 flags |= BCM43xx_TXHDRFLAG_FIRSTFRAGMENT;
363
364 /* Set WSEC/RATE field */
365 wsec_rate |= (txhdr->plcp.raw[0] << BCM43xx_TXHDR_RATE_SHIFT)
366 & BCM43xx_TXHDR_RATE_MASK;
367
368 /* Generate the RTS/CTS packet, if required. */
369 /* FIXME: We should first try with CTS-to-self,
370 * if we are on 80211g. If we get too many
371 * failures (hidden nodes), we should switch back to RTS/CTS.
372 */
373 if (0/*FIXME txctl->use_rts_cts*/) {
374 bcm43xx_generate_rts(phy, txhdr, &flags,
375 0/*FIXME txctl->rts_cts_rate*/,
376 wireless_header);
377 }
378
379 txhdr->flags = cpu_to_le16(flags);
380 txhdr->control = cpu_to_le16(control);
381 txhdr->wsec_rate = cpu_to_le16(wsec_rate);
382}
383
384static s8 bcm43xx_rssi_postprocess(struct bcm43xx_private *bcm,
385 u8 in_rssi, int ofdm,
386 int adjust_2053, int adjust_2050)
387{
388 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
389 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
390 s32 tmp;
391
392 switch (radio->version) {
393 case 0x2050:
394 if (ofdm) {
395 tmp = in_rssi;
396 if (tmp > 127)
397 tmp -= 256;
398 tmp *= 73;
399 tmp /= 64;
400 if (adjust_2050)
401 tmp += 25;
402 else
403 tmp -= 3;
404 } else {
405 if (bcm->sprom.boardflags & BCM43xx_BFL_RSSI) {
406 if (in_rssi > 63)
407 in_rssi = 63;
408 tmp = radio->nrssi_lt[in_rssi];
409 tmp = 31 - tmp;
410 tmp *= -131;
411 tmp /= 128;
412 tmp -= 57;
413 } else {
414 tmp = in_rssi;
415 tmp = 31 - tmp;
416 tmp *= -149;
417 tmp /= 128;
418 tmp -= 68;
419 }
420 if (phy->type == BCM43xx_PHYTYPE_G &&
421 adjust_2050)
422 tmp += 25;
423 }
424 break;
425 case 0x2060:
426 if (in_rssi > 127)
427 tmp = in_rssi - 256;
428 else
429 tmp = in_rssi;
430 break;
431 default:
432 tmp = in_rssi;
433 tmp -= 11;
434 tmp *= 103;
435 tmp /= 64;
436 if (adjust_2053)
437 tmp -= 109;
438 else
439 tmp -= 83;
440 }
441
442 return (s8)tmp;
443}
444
445//TODO
446#if 0
447static s8 bcm43xx_rssinoise_postprocess(struct bcm43xx_private *bcm,
448 u8 in_rssi)
449{
450 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
451 s8 ret;
452
453 if (phy->type == BCM43xx_PHYTYPE_A) {
454 //TODO: Incomplete specs.
455 ret = 0;
456 } else
457 ret = bcm43xx_rssi_postprocess(bcm, in_rssi, 0, 1, 1);
458
459 return ret;
460}
461#endif
462
463int bcm43xx_rx(struct bcm43xx_private *bcm,
464 struct sk_buff *skb,
465 struct bcm43xx_rxhdr *rxhdr)
466{
467 struct bcm43xx_radioinfo *radio = bcm43xx_current_radio(bcm);
468 struct bcm43xx_phyinfo *phy = bcm43xx_current_phy(bcm);
469 struct bcm43xx_plcp_hdr4 *plcp;
470 struct ieee80211_rx_stats stats;
471 struct ieee80211_hdr_4addr *wlhdr;
472 u16 frame_ctl;
473 int is_packet_for_us = 0;
474 int err = -EINVAL;
475 const u16 rxflags1 = le16_to_cpu(rxhdr->flags1);
476 const u16 rxflags2 = le16_to_cpu(rxhdr->flags2);
477 const u16 rxflags3 = le16_to_cpu(rxhdr->flags3);
478 const int is_ofdm = !!(rxflags1 & BCM43xx_RXHDR_FLAGS1_OFDM);
479
480 if (rxflags2 & BCM43xx_RXHDR_FLAGS2_TYPE2FRAME) {
481 plcp = (struct bcm43xx_plcp_hdr4 *)(skb->data + 2);
482 /* Skip two unknown bytes and the PLCP header. */
483 skb_pull(skb, 2 + sizeof(struct bcm43xx_plcp_hdr6));
484 } else {
485 plcp = (struct bcm43xx_plcp_hdr4 *)(skb->data);
486 /* Skip the PLCP header. */
487 skb_pull(skb, sizeof(struct bcm43xx_plcp_hdr6));
488 }
489 /* The SKB contains the PAYLOAD (wireless header + data)
490 * at this point. The FCS at the end is stripped.
491 */
492
493 memset(&stats, 0, sizeof(stats));
494 stats.mac_time = le16_to_cpu(rxhdr->mactime);
495 stats.rssi = rxhdr->rssi;
496 stats.signal = bcm43xx_rssi_postprocess(bcm, rxhdr->rssi, is_ofdm,
497 !!(rxflags1 & BCM43xx_RXHDR_FLAGS1_2053RSSIADJ),
498 !!(rxflags3 & BCM43xx_RXHDR_FLAGS3_2050RSSIADJ));
499 stats.noise = bcm->stats.noise;
500 if (is_ofdm)
501 stats.rate = bcm43xx_plcp_get_bitrate_ofdm(plcp);
502 else
503 stats.rate = bcm43xx_plcp_get_bitrate_cck(plcp);
504 stats.received_channel = radio->channel;
505 stats.mask = IEEE80211_STATMASK_SIGNAL |
506 IEEE80211_STATMASK_NOISE |
507 IEEE80211_STATMASK_RATE |
508 IEEE80211_STATMASK_RSSI;
509 if (phy->type == BCM43xx_PHYTYPE_A)
510 stats.freq = IEEE80211_52GHZ_BAND;
511 else
512 stats.freq = IEEE80211_24GHZ_BAND;
513 stats.len = skb->len;
514
515 bcm->stats.last_rx = jiffies;
516 if (bcm->ieee->iw_mode == IW_MODE_MONITOR) {
517 err = ieee80211_rx(bcm->ieee, skb, &stats);
518 return (err == 0) ? -EINVAL : 0;
519 }
520
521 wlhdr = (struct ieee80211_hdr_4addr *)(skb->data);
522
523 switch (bcm->ieee->iw_mode) {
524 case IW_MODE_ADHOC:
525 if (memcmp(wlhdr->addr1, bcm->net_dev->dev_addr, ETH_ALEN) == 0 ||
526 memcmp(wlhdr->addr3, bcm->ieee->bssid, ETH_ALEN) == 0 ||
527 is_broadcast_ether_addr(wlhdr->addr1) ||
528 is_multicast_ether_addr(wlhdr->addr1) ||
529 bcm->net_dev->flags & IFF_PROMISC)
530 is_packet_for_us = 1;
531 break;
532 case IW_MODE_INFRA:
533 default:
534 /* When receiving multicast or broadcast packets, filter out
535 the packets we send ourself; we shouldn't see those */
536 if (memcmp(wlhdr->addr3, bcm->ieee->bssid, ETH_ALEN) == 0 ||
537 memcmp(wlhdr->addr1, bcm->net_dev->dev_addr, ETH_ALEN) == 0 ||
538 (memcmp(wlhdr->addr3, bcm->net_dev->dev_addr, ETH_ALEN) &&
539 (is_broadcast_ether_addr(wlhdr->addr1) ||
540 is_multicast_ether_addr(wlhdr->addr1) ||
541 bcm->net_dev->flags & IFF_PROMISC)))
542 is_packet_for_us = 1;
543 break;
544 }
545
546 frame_ctl = le16_to_cpu(wlhdr->frame_ctl);
547 switch (WLAN_FC_GET_TYPE(frame_ctl)) {
548 case IEEE80211_FTYPE_MGMT:
549 ieee80211_rx_mgt(bcm->ieee, wlhdr, &stats);
550 break;
551 case IEEE80211_FTYPE_DATA:
552 if (is_packet_for_us) {
553 err = ieee80211_rx(bcm->ieee, skb, &stats);
554 err = (err == 0) ? -EINVAL : 0;
555 }
556 break;
557 case IEEE80211_FTYPE_CTL:
558 break;
559 default:
560 assert(0);
561 return -EINVAL;
562 }
563
564 return err;
565}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.h b/drivers/net/wireless/bcm43xx/bcm43xx_xmit.h
deleted file mode 100644
index 47c135a7f4dc..000000000000
--- a/drivers/net/wireless/bcm43xx/bcm43xx_xmit.h
+++ /dev/null
@@ -1,150 +0,0 @@
1#ifndef BCM43xx_XMIT_H_
2#define BCM43xx_XMIT_H_
3
4#include "bcm43xx_main.h"
5
6
7#define _bcm43xx_declare_plcp_hdr(size) \
8 struct bcm43xx_plcp_hdr##size { \
9 union { \
10 __le32 data; \
11 __u8 raw[size]; \
12 } __attribute__((__packed__)); \
13 } __attribute__((__packed__))
14
15/* struct bcm43xx_plcp_hdr4 */
16_bcm43xx_declare_plcp_hdr(4);
17/* struct bcm43xx_plcp_hdr6 */
18_bcm43xx_declare_plcp_hdr(6);
19
20#undef _bcm43xx_declare_plcp_hdr
21
22/* Device specific TX header. To be prepended to TX frames. */
23struct bcm43xx_txhdr {
24 union {
25 struct {
26 __le16 flags;
27 __le16 wsec_rate;
28 __le16 frame_control;
29 u16 unknown_zeroed_0;
30 __le16 control;
31 u8 wep_iv[10];
32 u8 unknown_wsec_tkip_data[3]; //FIXME
33 PAD_BYTES(3);
34 u8 mac1[6];
35 u16 unknown_zeroed_1;
36 struct bcm43xx_plcp_hdr4 rts_cts_fallback_plcp;
37 __le16 rts_cts_dur_fallback;
38 struct bcm43xx_plcp_hdr4 fallback_plcp;
39 __le16 fallback_dur_id;
40 PAD_BYTES(2);
41 __le16 cookie;
42 __le16 unknown_scb_stuff; //FIXME
43 struct bcm43xx_plcp_hdr6 rts_cts_plcp;
44 __le16 rts_cts_frame_control;
45 __le16 rts_cts_dur;
46 u8 rts_cts_mac1[6];
47 u8 rts_cts_mac2[6];
48 PAD_BYTES(2);
49 struct bcm43xx_plcp_hdr6 plcp;
50 } __attribute__((__packed__));
51 u8 raw[82];
52 } __attribute__((__packed__));
53} __attribute__((__packed__));
54
55/* Values/Masks for the device TX header */
56#define BCM43xx_TXHDRFLAG_EXPECTACK 0x0001
57#define BCM43xx_TXHDRFLAG_RTSCTS 0x0002
58#define BCM43xx_TXHDRFLAG_RTS 0x0004
59#define BCM43xx_TXHDRFLAG_FIRSTFRAGMENT 0x0008
60#define BCM43xx_TXHDRFLAG_DESTPSMODE 0x0020
61#define BCM43xx_TXHDRFLAG_RTSCTS_OFDM 0x0080
62#define BCM43xx_TXHDRFLAG_FALLBACKOFDM 0x0100
63#define BCM43xx_TXHDRFLAG_RTSCTSFALLBACK_OFDM 0x0200
64#define BCM43xx_TXHDRFLAG_CTS 0x0400
65#define BCM43xx_TXHDRFLAG_FRAMEBURST 0x0800
66
67#define BCM43xx_TXHDRCTL_OFDM 0x0001
68#define BCM43xx_TXHDRCTL_SHORT_PREAMBLE 0x0010
69#define BCM43xx_TXHDRCTL_ANTENNADIV_MASK 0x0030
70#define BCM43xx_TXHDRCTL_ANTENNADIV_SHIFT 8
71
72#define BCM43xx_TXHDR_RATE_MASK 0x0F00
73#define BCM43xx_TXHDR_RATE_SHIFT 8
74#define BCM43xx_TXHDR_RTSRATE_MASK 0xF000
75#define BCM43xx_TXHDR_RTSRATE_SHIFT 12
76#define BCM43xx_TXHDR_WSEC_KEYINDEX_MASK 0x00F0
77#define BCM43xx_TXHDR_WSEC_KEYINDEX_SHIFT 4
78#define BCM43xx_TXHDR_WSEC_ALGO_MASK 0x0003
79#define BCM43xx_TXHDR_WSEC_ALGO_SHIFT 0
80
81void bcm43xx_generate_txhdr(struct bcm43xx_private *bcm,
82 struct bcm43xx_txhdr *txhdr,
83 const unsigned char *fragment_data,
84 const unsigned int fragment_len,
85 const int is_first_fragment,
86 const u16 cookie);
87
88/* RX header as received from the hardware. */
89struct bcm43xx_rxhdr {
90 /* Frame Length. Must be generated explicitly in PIO mode. */
91 __le16 frame_length;
92 PAD_BYTES(2);
93 /* Flags field 1 */
94 __le16 flags1;
95 u8 rssi;
96 u8 signal_quality;
97 PAD_BYTES(2);
98 /* Flags field 3 */
99 __le16 flags3;
100 /* Flags field 2 */
101 __le16 flags2;
102 /* Lower 16bits of the TSF at the time the frame started. */
103 __le16 mactime;
104 PAD_BYTES(14);
105} __attribute__((__packed__));
106
107#define BCM43xx_RXHDR_FLAGS1_OFDM (1 << 0)
108/*#define BCM43xx_RXHDR_FLAGS1_SIGNAL??? (1 << 3) FIXME */
109#define BCM43xx_RXHDR_FLAGS1_SHORTPREAMBLE (1 << 7)
110#define BCM43xx_RXHDR_FLAGS1_2053RSSIADJ (1 << 14)
111
112#define BCM43xx_RXHDR_FLAGS2_INVALIDFRAME (1 << 0)
113#define BCM43xx_RXHDR_FLAGS2_TYPE2FRAME (1 << 2)
114/*FIXME: WEP related flags */
115
116#define BCM43xx_RXHDR_FLAGS3_2050RSSIADJ (1 << 10)
117
118/* Transmit Status as received from the hardware. */
119struct bcm43xx_hwxmitstatus {
120 PAD_BYTES(4);
121 __le16 cookie;
122 u8 flags;
123 u8 cnt1:4,
124 cnt2:4;
125 PAD_BYTES(2);
126 __le16 seq;
127 __le16 unknown; //FIXME
128} __attribute__((__packed__));
129
130/* Transmit Status in CPU byteorder. */
131struct bcm43xx_xmitstatus {
132 u16 cookie;
133 u8 flags;
134 u8 cnt1:4,
135 cnt2:4;
136 u16 seq;
137 u16 unknown; //FIXME
138};
139
140#define BCM43xx_TXSTAT_FLAG_AMPDU 0x10
141#define BCM43xx_TXSTAT_FLAG_INTER 0x20
142
143u8 bcm43xx_plcp_get_ratecode_cck(const u8 bitrate);
144u8 bcm43xx_plcp_get_ratecode_ofdm(const u8 bitrate);
145
146int bcm43xx_rx(struct bcm43xx_private *bcm,
147 struct sk_buff *skb,
148 struct bcm43xx_rxhdr *rxhdr);
149
150#endif /* BCM43xx_XMIT_H_ */
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 98d6ff69d375..fa87c5c2ae0b 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -4495,9 +4495,9 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4495 priv-> 4495 priv->
4496 essid_len), 4496 essid_len),
4497 print_mac(mac, priv->bssid), 4497 print_mac(mac, priv->bssid),
4498 ntohs(auth->status), 4498 le16_to_cpu(auth->status),
4499 ipw_get_status_code 4499 ipw_get_status_code
4500 (ntohs 4500 (le16_to_cpu
4501 (auth->status))); 4501 (auth->status)));
4502 4502
4503 priv->status &= 4503 priv->status &=
@@ -4532,9 +4532,9 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4532 IPW_DL_STATE | 4532 IPW_DL_STATE |
4533 IPW_DL_ASSOC, 4533 IPW_DL_ASSOC,
4534 "association failed (0x%04X): %s\n", 4534 "association failed (0x%04X): %s\n",
4535 ntohs(resp->status), 4535 le16_to_cpu(resp->status),
4536 ipw_get_status_code 4536 ipw_get_status_code
4537 (ntohs 4537 (le16_to_cpu
4538 (resp->status))); 4538 (resp->status)));
4539 } 4539 }
4540 4540
@@ -4591,8 +4591,8 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4591 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4591 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4592 IPW_DL_ASSOC, 4592 IPW_DL_ASSOC,
4593 "authentication failed (0x%04X): %s\n", 4593 "authentication failed (0x%04X): %s\n",
4594 ntohs(auth->status), 4594 le16_to_cpu(auth->status),
4595 ipw_get_status_code(ntohs 4595 ipw_get_status_code(le16_to_cpu
4596 (auth-> 4596 (auth->
4597 status))); 4597 status)));
4598 } 4598 }
@@ -10350,9 +10350,7 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
10350 remaining_bytes, 10350 remaining_bytes,
10351 PCI_DMA_TODEVICE)); 10351 PCI_DMA_TODEVICE));
10352 10352
10353 tfd->u.data.num_chunks = 10353 le32_add_cpu(&tfd->u.data.num_chunks, 1);
10354 cpu_to_le32(le32_to_cpu(tfd->u.data.num_chunks) +
10355 1);
10356 } 10354 }
10357 } 10355 }
10358 10356
diff --git a/drivers/net/wireless/ipw2200.h b/drivers/net/wireless/ipw2200.h
index fdc187e0769d..cd3295b66dd6 100644
--- a/drivers/net/wireless/ipw2200.h
+++ b/drivers/net/wireless/ipw2200.h
@@ -385,73 +385,73 @@ struct clx2_queue {
385 dma_addr_t dma_addr; /**< physical addr for BD's */ 385 dma_addr_t dma_addr; /**< physical addr for BD's */
386 int low_mark; /**< low watermark, resume queue if free space more than this */ 386 int low_mark; /**< low watermark, resume queue if free space more than this */
387 int high_mark; /**< high watermark, stop queue if free space less than this */ 387 int high_mark; /**< high watermark, stop queue if free space less than this */
388} __attribute__ ((packed)); 388} __attribute__ ((packed)); /* XXX */
389 389
390struct machdr32 { 390struct machdr32 {
391 __le16 frame_ctl; 391 __le16 frame_ctl;
392 u16 duration; // watch out for endians! 392 __le16 duration; // watch out for endians!
393 u8 addr1[MACADRR_BYTE_LEN]; 393 u8 addr1[MACADRR_BYTE_LEN];
394 u8 addr2[MACADRR_BYTE_LEN]; 394 u8 addr2[MACADRR_BYTE_LEN];
395 u8 addr3[MACADRR_BYTE_LEN]; 395 u8 addr3[MACADRR_BYTE_LEN];
396 u16 seq_ctrl; // more endians! 396 __le16 seq_ctrl; // more endians!
397 u8 addr4[MACADRR_BYTE_LEN]; 397 u8 addr4[MACADRR_BYTE_LEN];
398 __le16 qos_ctrl; 398 __le16 qos_ctrl;
399} __attribute__ ((packed)); 399} __attribute__ ((packed));
400 400
401struct machdr30 { 401struct machdr30 {
402 __le16 frame_ctl; 402 __le16 frame_ctl;
403 u16 duration; // watch out for endians! 403 __le16 duration; // watch out for endians!
404 u8 addr1[MACADRR_BYTE_LEN]; 404 u8 addr1[MACADRR_BYTE_LEN];
405 u8 addr2[MACADRR_BYTE_LEN]; 405 u8 addr2[MACADRR_BYTE_LEN];
406 u8 addr3[MACADRR_BYTE_LEN]; 406 u8 addr3[MACADRR_BYTE_LEN];
407 u16 seq_ctrl; // more endians! 407 __le16 seq_ctrl; // more endians!
408 u8 addr4[MACADRR_BYTE_LEN]; 408 u8 addr4[MACADRR_BYTE_LEN];
409} __attribute__ ((packed)); 409} __attribute__ ((packed));
410 410
411struct machdr26 { 411struct machdr26 {
412 __le16 frame_ctl; 412 __le16 frame_ctl;
413 u16 duration; // watch out for endians! 413 __le16 duration; // watch out for endians!
414 u8 addr1[MACADRR_BYTE_LEN]; 414 u8 addr1[MACADRR_BYTE_LEN];
415 u8 addr2[MACADRR_BYTE_LEN]; 415 u8 addr2[MACADRR_BYTE_LEN];
416 u8 addr3[MACADRR_BYTE_LEN]; 416 u8 addr3[MACADRR_BYTE_LEN];
417 u16 seq_ctrl; // more endians! 417 __le16 seq_ctrl; // more endians!
418 __le16 qos_ctrl; 418 __le16 qos_ctrl;
419} __attribute__ ((packed)); 419} __attribute__ ((packed));
420 420
421struct machdr24 { 421struct machdr24 {
422 __le16 frame_ctl; 422 __le16 frame_ctl;
423 u16 duration; // watch out for endians! 423 __le16 duration; // watch out for endians!
424 u8 addr1[MACADRR_BYTE_LEN]; 424 u8 addr1[MACADRR_BYTE_LEN];
425 u8 addr2[MACADRR_BYTE_LEN]; 425 u8 addr2[MACADRR_BYTE_LEN];
426 u8 addr3[MACADRR_BYTE_LEN]; 426 u8 addr3[MACADRR_BYTE_LEN];
427 u16 seq_ctrl; // more endians! 427 __le16 seq_ctrl; // more endians!
428} __attribute__ ((packed)); 428} __attribute__ ((packed));
429 429
430// TX TFD with 32 byte MAC Header 430// TX TFD with 32 byte MAC Header
431struct tx_tfd_32 { 431struct tx_tfd_32 {
432 struct machdr32 mchdr; // 32 432 struct machdr32 mchdr; // 32
433 u32 uivplaceholder[2]; // 8 433 __le32 uivplaceholder[2]; // 8
434} __attribute__ ((packed)); 434} __attribute__ ((packed));
435 435
436// TX TFD with 30 byte MAC Header 436// TX TFD with 30 byte MAC Header
437struct tx_tfd_30 { 437struct tx_tfd_30 {
438 struct machdr30 mchdr; // 30 438 struct machdr30 mchdr; // 30
439 u8 reserved[2]; // 2 439 u8 reserved[2]; // 2
440 u32 uivplaceholder[2]; // 8 440 __le32 uivplaceholder[2]; // 8
441} __attribute__ ((packed)); 441} __attribute__ ((packed));
442 442
443// tx tfd with 26 byte mac header 443// tx tfd with 26 byte mac header
444struct tx_tfd_26 { 444struct tx_tfd_26 {
445 struct machdr26 mchdr; // 26 445 struct machdr26 mchdr; // 26
446 u8 reserved1[2]; // 2 446 u8 reserved1[2]; // 2
447 u32 uivplaceholder[2]; // 8 447 __le32 uivplaceholder[2]; // 8
448 u8 reserved2[4]; // 4 448 u8 reserved2[4]; // 4
449} __attribute__ ((packed)); 449} __attribute__ ((packed));
450 450
451// tx tfd with 24 byte mac header 451// tx tfd with 24 byte mac header
452struct tx_tfd_24 { 452struct tx_tfd_24 {
453 struct machdr24 mchdr; // 24 453 struct machdr24 mchdr; // 24
454 u32 uivplaceholder[2]; // 8 454 __le32 uivplaceholder[2]; // 8
455 u8 reserved[8]; // 8 455 u8 reserved[8]; // 8
456} __attribute__ ((packed)); 456} __attribute__ ((packed));
457 457
@@ -460,7 +460,7 @@ struct tx_tfd_24 {
460struct tfd_command { 460struct tfd_command {
461 u8 index; 461 u8 index;
462 u8 length; 462 u8 length;
463 u16 reserved; 463 __le16 reserved;
464 u8 payload[0]; 464 u8 payload[0];
465} __attribute__ ((packed)); 465} __attribute__ ((packed));
466 466
@@ -562,27 +562,27 @@ struct rate_histogram {
562struct ipw_cmd_stats { 562struct ipw_cmd_stats {
563 u8 cmd_id; 563 u8 cmd_id;
564 u8 seq_num; 564 u8 seq_num;
565 u16 good_sfd; 565 __le16 good_sfd;
566 u16 bad_plcp; 566 __le16 bad_plcp;
567 u16 wrong_bssid; 567 __le16 wrong_bssid;
568 u16 valid_mpdu; 568 __le16 valid_mpdu;
569 u16 bad_mac_header; 569 __le16 bad_mac_header;
570 u16 reserved_frame_types; 570 __le16 reserved_frame_types;
571 u16 rx_ina; 571 __le16 rx_ina;
572 u16 bad_crc32; 572 __le16 bad_crc32;
573 u16 invalid_cts; 573 __le16 invalid_cts;
574 u16 invalid_acks; 574 __le16 invalid_acks;
575 u16 long_distance_ina_fina; 575 __le16 long_distance_ina_fina;
576 u16 dsp_silence_unreachable; 576 __le16 dsp_silence_unreachable;
577 u16 accumulated_rssi; 577 __le16 accumulated_rssi;
578 u16 rx_ovfl_frame_tossed; 578 __le16 rx_ovfl_frame_tossed;
579 u16 rssi_silence_threshold; 579 __le16 rssi_silence_threshold;
580 u16 rx_ovfl_frame_supplied; 580 __le16 rx_ovfl_frame_supplied;
581 u16 last_rx_frame_signal; 581 __le16 last_rx_frame_signal;
582 u16 last_rx_frame_noise; 582 __le16 last_rx_frame_noise;
583 u16 rx_autodetec_no_ofdm; 583 __le16 rx_autodetec_no_ofdm;
584 u16 rx_autodetec_no_barker; 584 __le16 rx_autodetec_no_barker;
585 u16 reserved; 585 __le16 reserved;
586} __attribute__ ((packed)); 586} __attribute__ ((packed));
587 587
588struct notif_channel_result { 588struct notif_channel_result {
@@ -637,7 +637,7 @@ struct notif_association {
637struct notif_authenticate { 637struct notif_authenticate {
638 u8 state; 638 u8 state;
639 struct machdr24 addr; 639 struct machdr24 addr;
640 u16 status; 640 __le16 status;
641} __attribute__ ((packed)); 641} __attribute__ ((packed));
642 642
643struct notif_calibration { 643struct notif_calibration {
@@ -732,14 +732,14 @@ struct ipw_rx_queue {
732struct alive_command_responce { 732struct alive_command_responce {
733 u8 alive_command; 733 u8 alive_command;
734 u8 sequence_number; 734 u8 sequence_number;
735 u16 software_revision; 735 __le16 software_revision;
736 u8 device_identifier; 736 u8 device_identifier;
737 u8 reserved1[5]; 737 u8 reserved1[5];
738 u16 reserved2; 738 __le16 reserved2;
739 u16 reserved3; 739 __le16 reserved3;
740 u16 clock_settle_time; 740 __le16 clock_settle_time;
741 u16 powerup_settle_time; 741 __le16 powerup_settle_time;
742 u16 reserved4; 742 __le16 reserved4;
743 u8 time_stamp[5]; /* month, day, year, hours, minutes */ 743 u8 time_stamp[5]; /* month, day, year, hours, minutes */
744 u8 ucode_valid; 744 u8 ucode_valid;
745} __attribute__ ((packed)); 745} __attribute__ ((packed));
@@ -878,7 +878,11 @@ static inline void ipw_set_scan_type(struct ipw_scan_request_ext *scan,
878 878
879struct ipw_associate { 879struct ipw_associate {
880 u8 channel; 880 u8 channel;
881#ifdef __LITTLE_ENDIAN_BITFIELD
881 u8 auth_type:4, auth_key:4; 882 u8 auth_type:4, auth_key:4;
883#else
884 u8 auth_key:4, auth_type:4;
885#endif
882 u8 assoc_type; 886 u8 assoc_type;
883 u8 reserved; 887 u8 reserved;
884 __le16 policy_support; 888 __le16 policy_support;
@@ -918,12 +922,12 @@ struct ipw_frag_threshold {
918struct ipw_retry_limit { 922struct ipw_retry_limit {
919 u8 short_retry_limit; 923 u8 short_retry_limit;
920 u8 long_retry_limit; 924 u8 long_retry_limit;
921 u16 reserved; 925 __le16 reserved;
922} __attribute__ ((packed)); 926} __attribute__ ((packed));
923 927
924struct ipw_dino_config { 928struct ipw_dino_config {
925 u32 dino_config_addr; 929 __le32 dino_config_addr;
926 u16 dino_config_size; 930 __le16 dino_config_size;
927 u8 dino_response; 931 u8 dino_response;
928 u8 reserved; 932 u8 reserved;
929} __attribute__ ((packed)); 933} __attribute__ ((packed));
@@ -998,7 +1002,7 @@ struct ipw_sensitivity_calib {
998 * - \a status contains status; 1002 * - \a status contains status;
999 * - \a param filled with status parameters. 1003 * - \a param filled with status parameters.
1000 */ 1004 */
1001struct ipw_cmd { 1005struct ipw_cmd { /* XXX */
1002 u32 cmd; /**< Host command */ 1006 u32 cmd; /**< Host command */
1003 u32 status;/**< Status */ 1007 u32 status;/**< Status */
1004 u32 status_len; 1008 u32 status_len;
@@ -1092,7 +1096,7 @@ struct ipw_ibss_seq {
1092 struct list_head list; 1096 struct list_head list;
1093}; 1097};
1094 1098
1095struct ipw_error_elem { 1099struct ipw_error_elem { /* XXX */
1096 u32 desc; 1100 u32 desc;
1097 u32 time; 1101 u32 time;
1098 u32 blink1; 1102 u32 blink1;
@@ -1102,13 +1106,13 @@ struct ipw_error_elem {
1102 u32 data; 1106 u32 data;
1103}; 1107};
1104 1108
1105struct ipw_event { 1109struct ipw_event { /* XXX */
1106 u32 event; 1110 u32 event;
1107 u32 time; 1111 u32 time;
1108 u32 data; 1112 u32 data;
1109} __attribute__ ((packed)); 1113} __attribute__ ((packed));
1110 1114
1111struct ipw_fw_error { 1115struct ipw_fw_error { /* XXX */
1112 unsigned long jiffies; 1116 unsigned long jiffies;
1113 u32 status; 1117 u32 status;
1114 u32 config; 1118 u32 config;
@@ -1153,7 +1157,7 @@ struct ipw_prom_priv {
1153 */ 1157 */
1154struct ipw_rt_hdr { 1158struct ipw_rt_hdr {
1155 struct ieee80211_radiotap_header rt_hdr; 1159 struct ieee80211_radiotap_header rt_hdr;
1156 u64 rt_tsf; /* TSF */ 1160 u64 rt_tsf; /* TSF */ /* XXX */
1157 u8 rt_flags; /* radiotap packet flags */ 1161 u8 rt_flags; /* radiotap packet flags */
1158 u8 rt_rate; /* rate in 500kb/s */ 1162 u8 rt_rate; /* rate in 500kb/s */
1159 __le16 rt_channel; /* channel in mhz */ 1163 __le16 rt_channel; /* channel in mhz */
@@ -1940,8 +1944,8 @@ enum {
1940#define IPW_MEM_FIXED_OVERRIDE (IPW_SHARED_LOWER_BOUND + 0x41C) 1944#define IPW_MEM_FIXED_OVERRIDE (IPW_SHARED_LOWER_BOUND + 0x41C)
1941 1945
1942struct ipw_fixed_rate { 1946struct ipw_fixed_rate {
1943 u16 tx_rates; 1947 __le16 tx_rates;
1944 u16 reserved; 1948 __le16 reserved;
1945} __attribute__ ((packed)); 1949} __attribute__ ((packed));
1946 1950
1947#define IPW_INDIRECT_ADDR_MASK (~0x3ul) 1951#define IPW_INDIRECT_ADDR_MASK (~0x3ul)
@@ -1951,12 +1955,12 @@ struct host_cmd {
1951 u8 len; 1955 u8 len;
1952 u16 reserved; 1956 u16 reserved;
1953 u32 *param; 1957 u32 *param;
1954} __attribute__ ((packed)); 1958} __attribute__ ((packed)); /* XXX */
1955 1959
1956struct cmdlog_host_cmd { 1960struct cmdlog_host_cmd {
1957 u8 cmd; 1961 u8 cmd;
1958 u8 len; 1962 u8 len;
1959 u16 reserved; 1963 __le16 reserved;
1960 char param[124]; 1964 char param[124];
1961} __attribute__ ((packed)); 1965} __attribute__ ((packed));
1962 1966
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index b54ff712e703..f844b738d34e 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,7 +1,22 @@
1config IWLCORE
2 tristate "Intel Wireless Wifi Core"
3 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
4
5config IWLWIFI_LEDS
6 bool
7 default n
8
9config IWLWIFI_RFKILL
10 boolean "IWLWIFI RF kill support"
11 depends on IWLCORE
12 select RFKILL
13 select RFKILL_INPUT
14
1config IWL4965 15config IWL4965
2 tristate "Intel Wireless WiFi 4965AGN" 16 tristate "Intel Wireless WiFi 4965AGN"
3 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL 17 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
4 select FW_LOADER 18 select FW_LOADER
19 select IWLCORE
5 ---help--- 20 ---help---
6 Select to build the driver supporting the: 21 Select to build the driver supporting the:
7 22
@@ -24,21 +39,22 @@ config IWL4965
24 say M here and read <file:Documentation/kbuild/modules.txt>. The 39 say M here and read <file:Documentation/kbuild/modules.txt>. The
25 module will be called iwl4965.ko. 40 module will be called iwl4965.ko.
26 41
27config IWL4965_QOS
28 bool "Enable Wireless QoS in iwl4965 driver"
29 depends on IWL4965
30 ---help---
31 This option will enable wireless quality of service (QoS) for the
32 iwl4965 driver.
33
34config IWL4965_HT 42config IWL4965_HT
35 bool "Enable 802.11n HT features in iwl4965 driver" 43 bool "Enable 802.11n HT features in iwl4965 driver"
36 depends on EXPERIMENTAL 44 depends on EXPERIMENTAL
37 depends on IWL4965 && IWL4965_QOS 45 depends on IWL4965
38 ---help--- 46 ---help---
39 This option enables IEEE 802.11n High Throughput features 47 This option enables IEEE 802.11n High Throughput features
40 for the iwl4965 driver. 48 for the iwl4965 driver.
41 49
50config IWL4965_LEDS
51 bool "Enable LEDS features in iwl4965 driver"
52 depends on IWL4965 && MAC80211_LEDS && LEDS_CLASS
53 select IWLWIFI_LEDS
54 ---help---
55 This option enables LEDS for the iwlwifi drivers
56
57
42config IWL4965_SPECTRUM_MEASUREMENT 58config IWL4965_SPECTRUM_MEASUREMENT
43 bool "Enable Spectrum Measurement in iwl4965 driver" 59 bool "Enable Spectrum Measurement in iwl4965 driver"
44 depends on IWL4965 60 depends on IWL4965
@@ -52,7 +68,7 @@ config IWL4965_SENSITIVITY
52 This option will enable sensitivity calibration for the iwl4965 68 This option will enable sensitivity calibration for the iwl4965
53 driver. 69 driver.
54 70
55config IWL4965_DEBUG 71config IWLWIFI_DEBUG
56 bool "Enable full debugging output in iwl4965 driver" 72 bool "Enable full debugging output in iwl4965 driver"
57 depends on IWL4965 73 depends on IWL4965
58 ---help--- 74 ---help---
@@ -78,6 +94,12 @@ config IWL4965_DEBUG
78 as the debug information can assist others in helping you resolve 94 as the debug information can assist others in helping you resolve
79 any problems you may encounter. 95 any problems you may encounter.
80 96
97config IWLWIFI_DEBUGFS
98 bool "Iwlwifi debugfs support"
99 depends on IWLCORE && IWLWIFI_DEBUG && MAC80211_DEBUGFS
100 ---help---
101 Enable creation of debugfs files for the iwlwifi drivers.
102
81config IWL3945 103config IWL3945
82 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection" 104 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection"
83 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL 105 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
@@ -104,19 +126,18 @@ config IWL3945
104 say M here and read <file:Documentation/kbuild/modules.txt>. The 126 say M here and read <file:Documentation/kbuild/modules.txt>. The
105 module will be called iwl3945.ko. 127 module will be called iwl3945.ko.
106 128
107config IWL3945_QOS
108 bool "Enable Wireless QoS in iwl3945 driver"
109 depends on IWL3945
110 ---help---
111 This option will enable wireless quality of service (QoS) for the
112 iwl3945 driver.
113
114config IWL3945_SPECTRUM_MEASUREMENT 129config IWL3945_SPECTRUM_MEASUREMENT
115 bool "Enable Spectrum Measurement in iwl3945 drivers" 130 bool "Enable Spectrum Measurement in iwl3945 drivers"
116 depends on IWL3945 131 depends on IWL3945
117 ---help--- 132 ---help---
118 This option will enable spectrum measurement for the iwl3945 driver. 133 This option will enable spectrum measurement for the iwl3945 driver.
119 134
135config IWL3945_LEDS
136 bool "Enable LEDS features in iwl3945 driver"
137 depends on IWL3945 && MAC80211_LEDS && LEDS_CLASS
138 ---help---
139 This option enables LEDS for the iwl3945 driver.
140
120config IWL3945_DEBUG 141config IWL3945_DEBUG
121 bool "Enable full debugging output in iwl3945 driver" 142 bool "Enable full debugging output in iwl3945 driver"
122 depends on IWL3945 143 depends on IWL3945
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 3bbd38358d53..4f3e88b12e3a 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,5 +1,13 @@
1obj-$(CONFIG_IWLCORE) := iwlcore.o
2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o
3iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
4iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o
5iwlcore-$(CONFIG_IWLWIFI_RFKILL) += iwl-rfkill.o
6
1obj-$(CONFIG_IWL3945) += iwl3945.o 7obj-$(CONFIG_IWL3945) += iwl3945.o
2iwl3945-objs = iwl3945-base.o iwl-3945.o iwl-3945-rs.o 8iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o
9iwl3945-$(CONFIG_IWL3945_LEDS) += iwl-3945-led.o
3 10
4obj-$(CONFIG_IWL4965) += iwl4965.o 11obj-$(CONFIG_IWL4965) += iwl4965.o
5iwl4965-objs = iwl4965-base.o iwl-4965.o iwl-4965-rs.o 12iwl4965-objs := iwl4965-base.o iwl-4965.o iwl-4965-rs.o iwl-sta.o
13
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-commands.h b/drivers/net/wireless/iwlwifi/iwl-3945-commands.h
index 46bb2c7d11dd..817ece773643 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-commands.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -515,14 +515,20 @@ struct iwl3945_qosparam_cmd {
515#define STA_CONTROL_MODIFY_MSK 0x01 515#define STA_CONTROL_MODIFY_MSK 0x01
516 516
517/* key flags __le16*/ 517/* key flags __le16*/
518#define STA_KEY_FLG_ENCRYPT_MSK __constant_cpu_to_le16(0x7) 518#define STA_KEY_FLG_ENCRYPT_MSK __constant_cpu_to_le16(0x0007)
519#define STA_KEY_FLG_NO_ENC __constant_cpu_to_le16(0x0) 519#define STA_KEY_FLG_NO_ENC __constant_cpu_to_le16(0x0000)
520#define STA_KEY_FLG_WEP __constant_cpu_to_le16(0x1) 520#define STA_KEY_FLG_WEP __constant_cpu_to_le16(0x0001)
521#define STA_KEY_FLG_CCMP __constant_cpu_to_le16(0x2) 521#define STA_KEY_FLG_CCMP __constant_cpu_to_le16(0x0002)
522#define STA_KEY_FLG_TKIP __constant_cpu_to_le16(0x3) 522#define STA_KEY_FLG_TKIP __constant_cpu_to_le16(0x0003)
523 523
524#define STA_KEY_FLG_KEYID_POS 8 524#define STA_KEY_FLG_KEYID_POS 8
525#define STA_KEY_FLG_INVALID __constant_cpu_to_le16(0x0800) 525#define STA_KEY_FLG_INVALID __constant_cpu_to_le16(0x0800)
526/* wep key is either from global key (0) or from station info array (1) */
527#define STA_KEY_FLG_WEP_KEY_MAP_MSK __constant_cpu_to_le16(0x0008)
528
529/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
530#define STA_KEY_FLG_KEY_SIZE_MSK __constant_cpu_to_le16(0x1000)
531#define STA_KEY_MULTICAST_MSK __constant_cpu_to_le16(0x4000)
526 532
527/* Flags indicate whether to modify vs. don't change various station params */ 533/* Flags indicate whether to modify vs. don't change various station params */
528#define STA_MODIFY_KEY_MASK 0x01 534#define STA_MODIFY_KEY_MASK 0x01
@@ -546,7 +552,8 @@ struct iwl3945_keyinfo {
546 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */ 552 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
547 u8 reserved1; 553 u8 reserved1;
548 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */ 554 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
549 __le16 reserved2; 555 u8 key_offset;
556 u8 reserved2;
550 u8 key[16]; /* 16-byte unicast decryption key */ 557 u8 key[16]; /* 16-byte unicast decryption key */
551} __attribute__ ((packed)); 558} __attribute__ ((packed));
552 559
@@ -659,26 +666,26 @@ struct iwl3945_rx_frame_hdr {
659 u8 payload[0]; 666 u8 payload[0];
660} __attribute__ ((packed)); 667} __attribute__ ((packed));
661 668
662#define RX_RES_STATUS_NO_CRC32_ERROR __constant_cpu_to_le32(1 << 0) 669#define RX_RES_STATUS_NO_CRC32_ERROR __constant_cpu_to_le32(1 << 0)
663#define RX_RES_STATUS_NO_RXE_OVERFLOW __constant_cpu_to_le32(1 << 1) 670#define RX_RES_STATUS_NO_RXE_OVERFLOW __constant_cpu_to_le32(1 << 1)
664 671
665#define RX_RES_PHY_FLAGS_BAND_24_MSK __constant_cpu_to_le16(1 << 0) 672#define RX_RES_PHY_FLAGS_BAND_24_MSK __constant_cpu_to_le16(1 << 0)
666#define RX_RES_PHY_FLAGS_MOD_CCK_MSK __constant_cpu_to_le16(1 << 1) 673#define RX_RES_PHY_FLAGS_MOD_CCK_MSK __constant_cpu_to_le16(1 << 1)
667#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK __constant_cpu_to_le16(1 << 2) 674#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK __constant_cpu_to_le16(1 << 2)
668#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK __constant_cpu_to_le16(1 << 3) 675#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK __constant_cpu_to_le16(1 << 3)
669#define RX_RES_PHY_FLAGS_ANTENNA_MSK __constant_cpu_to_le16(0xf0) 676#define RX_RES_PHY_FLAGS_ANTENNA_MSK __constant_cpu_to_le16(0xf0)
670 677
671#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) 678#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
672#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8) 679#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
673#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8) 680#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8)
674#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8) 681#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8)
675#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8) 682#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8)
676 683
677#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11) 684#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11)
678#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11) 685#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11)
679#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11) 686#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11)
680#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11) 687#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11)
681#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11) 688#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11)
682 689
683struct iwl3945_rx_frame_end { 690struct iwl3945_rx_frame_end {
684 __le32 status; 691 __le32 status;
@@ -700,45 +707,6 @@ struct iwl3945_rx_frame {
700 struct iwl3945_rx_frame_end end; 707 struct iwl3945_rx_frame_end end;
701} __attribute__ ((packed)); 708} __attribute__ ((packed));
702 709
703/* Fixed (non-configurable) rx data from phy */
704#define RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
705#define RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
706#define IWL_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
707#define IWL_AGC_DB_POS (7)
708struct iwl4965_rx_non_cfg_phy {
709 __le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
710 __le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
711 u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
712 u8 pad[0];
713} __attribute__ ((packed));
714
715/*
716 * REPLY_4965_RX = 0xc3 (response only, not a command)
717 * Used only for legacy (non 11n) frames.
718 */
719#define RX_RES_PHY_CNT 14
720struct iwl4965_rx_phy_res {
721 u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
722 u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
723 u8 stat_id; /* configurable DSP phy data set ID */
724 u8 reserved1;
725 __le64 timestamp; /* TSF at on air rise */
726 __le32 beacon_time_stamp; /* beacon at on-air rise */
727 __le16 phy_flags; /* general phy flags: band, modulation, ... */
728 __le16 channel; /* channel number */
729 __le16 non_cfg_phy[RX_RES_PHY_CNT]; /* upto 14 phy entries */
730 __le32 reserved2;
731 __le32 rate_n_flags;
732 __le16 byte_count; /* frame's byte-count */
733 __le16 reserved3;
734} __attribute__ ((packed));
735
736struct iwl4965_rx_mpdu_res_start {
737 __le16 byte_count;
738 __le16 reserved;
739} __attribute__ ((packed));
740
741
742/****************************************************************************** 710/******************************************************************************
743 * (5) 711 * (5)
744 * Tx Commands & Responses: 712 * Tx Commands & Responses:
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-core.h b/drivers/net/wireless/iwlwifi/iwl-3945-core.h
new file mode 100644
index 000000000000..bc12f97ba0b1
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-core.h
@@ -0,0 +1,80 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Tomas Winkler <tomas.winkler@intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_3945_dev_h__
64#define __iwl_3945_dev_h__
65
66#define IWL_PCI_DEVICE(dev, subdev, cfg) \
67 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
68 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
69 .driver_data = (kernel_ulong_t)&(cfg)
70
71#define IWL_SKU_G 0x1
72#define IWL_SKU_A 0x2
73
74struct iwl_3945_cfg {
75 const char *name;
76 const char *fw_name;
77 unsigned int sku;
78};
79
80#endif /* __iwl_dev_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debug.h b/drivers/net/wireless/iwlwifi/iwl-3945-debug.h
index f853c6b9f76e..f1d002f7b790 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debug.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project. 5 * Portions of this file are derived from the ipw3945 project.
6 * 6 *
@@ -40,6 +40,15 @@ do { if (iwl3945_debug_level & (level)) \
40do { if ((iwl3945_debug_level & (level)) && net_ratelimit()) \ 40do { if ((iwl3945_debug_level & (level)) && net_ratelimit()) \
41 printk(KERN_ERR DRV_NAME": %c %s " fmt, \ 41 printk(KERN_ERR DRV_NAME": %c %s " fmt, \
42 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 42 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
43
44static inline void iwl3945_print_hex_dump(int level, void *p, u32 len)
45{
46 if (!(iwl3945_debug_level & level))
47 return;
48
49 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
50 p, len, 1);
51}
43#else 52#else
44static inline void IWL_DEBUG(int level, const char *fmt, ...) 53static inline void IWL_DEBUG(int level, const char *fmt, ...)
45{ 54{
@@ -47,7 +56,12 @@ static inline void IWL_DEBUG(int level, const char *fmt, ...)
47static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...) 56static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...)
48{ 57{
49} 58}
50#endif /* CONFIG_IWL3945_DEBUG */ 59static inline void iwl3945_print_hex_dump(int level, void *p, u32 len)
60{
61}
62#endif /* CONFIG_IWL3945_DEBUG */
63
64
51 65
52/* 66/*
53 * To use the debug system; 67 * To use the debug system;
@@ -143,6 +157,7 @@ static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...)
143 IWL_DEBUG_LIMIT(IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) 157 IWL_DEBUG_LIMIT(IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
144#define IWL_DEBUG_HT(f, a...) IWL_DEBUG(IWL_DL_HT, f, ## a) 158#define IWL_DEBUG_HT(f, a...) IWL_DEBUG(IWL_DL_HT, f, ## a)
145#define IWL_DEBUG_STATS(f, a...) IWL_DEBUG(IWL_DL_STATS, f, ## a) 159#define IWL_DEBUG_STATS(f, a...) IWL_DEBUG(IWL_DL_STATS, f, ## a)
160#define IWL_DEBUG_STATS_LIMIT(f, a...) IWL_DEBUG_LIMIT(IWL_DL_STATS, f, ## a)
146#define IWL_DEBUG_TX_REPLY(f, a...) IWL_DEBUG(IWL_DL_TX_REPLY, f, ## a) 161#define IWL_DEBUG_TX_REPLY(f, a...) IWL_DEBUG(IWL_DL_TX_REPLY, f, ## a)
147#define IWL_DEBUG_QOS(f, a...) IWL_DEBUG(IWL_DL_QOS, f, ## a) 162#define IWL_DEBUG_QOS(f, a...) IWL_DEBUG(IWL_DL_QOS, f, ## a)
148#define IWL_DEBUG_RADIO(f, a...) IWL_DEBUG(IWL_DL_RADIO, f, ## a) 163#define IWL_DEBUG_RADIO(f, a...) IWL_DEBUG(IWL_DL_RADIO, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 571815d7e8bf..ad612a8719f4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -198,43 +198,27 @@ struct iwl3945_eeprom_temperature_corr {
198 */ 198 */
199struct iwl3945_eeprom { 199struct iwl3945_eeprom {
200 u8 reserved0[16]; 200 u8 reserved0[16];
201#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
202 u16 device_id; /* abs.ofs: 16 */ 201 u16 device_id; /* abs.ofs: 16 */
203 u8 reserved1[2]; 202 u8 reserved1[2];
204#define EEPROM_PMC (2*0x0A) /* 2 bytes */
205 u16 pmc; /* abs.ofs: 20 */ 203 u16 pmc; /* abs.ofs: 20 */
206 u8 reserved2[20]; 204 u8 reserved2[20];
207#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
208 u8 mac_address[6]; /* abs.ofs: 42 */ 205 u8 mac_address[6]; /* abs.ofs: 42 */
209 u8 reserved3[58]; 206 u8 reserved3[58];
210#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
211 u16 board_revision; /* abs.ofs: 106 */ 207 u16 board_revision; /* abs.ofs: 106 */
212 u8 reserved4[11]; 208 u8 reserved4[11];
213#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
214 u8 board_pba_number[9]; /* abs.ofs: 119 */ 209 u8 board_pba_number[9]; /* abs.ofs: 119 */
215 u8 reserved5[8]; 210 u8 reserved5[8];
216#define EEPROM_VERSION (2*0x44) /* 2 bytes */
217 u16 version; /* abs.ofs: 136 */ 211 u16 version; /* abs.ofs: 136 */
218#define EEPROM_SKU_CAP (2*0x45) /* 1 bytes */
219 u8 sku_cap; /* abs.ofs: 138 */ 212 u8 sku_cap; /* abs.ofs: 138 */
220#define EEPROM_LEDS_MODE (2*0x45+1) /* 1 bytes */
221 u8 leds_mode; /* abs.ofs: 139 */ 213 u8 leds_mode; /* abs.ofs: 139 */
222#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
223 u16 oem_mode; 214 u16 oem_mode;
224#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
225 u16 wowlan_mode; /* abs.ofs: 142 */ 215 u16 wowlan_mode; /* abs.ofs: 142 */
226#define EEPROM_LEDS_TIME_INTERVAL (2*0x48) /* 2 bytes */
227 u16 leds_time_interval; /* abs.ofs: 144 */ 216 u16 leds_time_interval; /* abs.ofs: 144 */
228#define EEPROM_LEDS_OFF_TIME (2*0x49) /* 1 bytes */
229 u8 leds_off_time; /* abs.ofs: 146 */ 217 u8 leds_off_time; /* abs.ofs: 146 */
230#define EEPROM_LEDS_ON_TIME (2*0x49+1) /* 1 bytes */
231 u8 leds_on_time; /* abs.ofs: 147 */ 218 u8 leds_on_time; /* abs.ofs: 147 */
232#define EEPROM_ALMGOR_M_VERSION (2*0x4A) /* 1 bytes */
233 u8 almgor_m_version; /* abs.ofs: 148 */ 219 u8 almgor_m_version; /* abs.ofs: 148 */
234#define EEPROM_ANTENNA_SWITCH_TYPE (2*0x4A+1) /* 1 bytes */
235 u8 antenna_switch_type; /* abs.ofs: 149 */ 220 u8 antenna_switch_type; /* abs.ofs: 149 */
236 u8 reserved6[42]; 221 u8 reserved6[42];
237#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
238 u8 sku_id[4]; /* abs.ofs: 192 */ 222 u8 sku_id[4]; /* abs.ofs: 192 */
239 223
240/* 224/*
@@ -249,9 +233,7 @@ struct iwl3945_eeprom {
249 * 233 *
250 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 234 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
251 */ 235 */
252#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
253 u16 band_1_count; /* abs.ofs: 196 */ 236 u16 band_1_count; /* abs.ofs: 196 */
254#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
255 struct iwl3945_eeprom_channel band_1_channels[14]; /* abs.ofs: 196 */ 237 struct iwl3945_eeprom_channel band_1_channels[14]; /* abs.ofs: 196 */
256 238
257/* 239/*
@@ -259,36 +241,28 @@ struct iwl3945_eeprom {
259 * 5.0 GHz channels 7, 8, 11, 12, 16 241 * 5.0 GHz channels 7, 8, 11, 12, 16
260 * (4915-5080MHz) (none of these is ever supported) 242 * (4915-5080MHz) (none of these is ever supported)
261 */ 243 */
262#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
263 u16 band_2_count; /* abs.ofs: 226 */ 244 u16 band_2_count; /* abs.ofs: 226 */
264#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
265 struct iwl3945_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */ 245 struct iwl3945_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
266 246
267/* 247/*
268 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 248 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
269 * (5170-5320MHz) 249 * (5170-5320MHz)
270 */ 250 */
271#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
272 u16 band_3_count; /* abs.ofs: 254 */ 251 u16 band_3_count; /* abs.ofs: 254 */
273#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
274 struct iwl3945_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */ 252 struct iwl3945_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
275 253
276/* 254/*
277 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 255 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
278 * (5500-5700MHz) 256 * (5500-5700MHz)
279 */ 257 */
280#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
281 u16 band_4_count; /* abs.ofs: 280 */ 258 u16 band_4_count; /* abs.ofs: 280 */
282#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
283 struct iwl3945_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */ 259 struct iwl3945_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
284 260
285/* 261/*
286 * 5.7 GHz channels 145, 149, 153, 157, 161, 165 262 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
287 * (5725-5825MHz) 263 * (5725-5825MHz)
288 */ 264 */
289#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
290 u16 band_5_count; /* abs.ofs: 304 */ 265 u16 band_5_count; /* abs.ofs: 304 */
291#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
292 struct iwl3945_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */ 266 struct iwl3945_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
293 267
294 u8 reserved9[194]; 268 u8 reserved9[194];
@@ -296,15 +270,9 @@ struct iwl3945_eeprom {
296/* 270/*
297 * 3945 Txpower calibration data. 271 * 3945 Txpower calibration data.
298 */ 272 */
299#define EEPROM_TXPOWER_CALIB_GROUP0 0x200
300#define EEPROM_TXPOWER_CALIB_GROUP1 0x240
301#define EEPROM_TXPOWER_CALIB_GROUP2 0x280
302#define EEPROM_TXPOWER_CALIB_GROUP3 0x2c0
303#define EEPROM_TXPOWER_CALIB_GROUP4 0x300
304#define IWL_NUM_TX_CALIB_GROUPS 5 273#define IWL_NUM_TX_CALIB_GROUPS 5
305 struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS]; 274 struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS];
306/* abs.ofs: 512 */ 275/* abs.ofs: 512 */
307#define EEPROM_CALIB_TEMPERATURE_CORRECT 0x340
308 struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */ 276 struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
309 u8 reserved16[172]; /* fill out to full 1024 byte block */ 277 u8 reserved16[172]; /* fill out to full 1024 byte block */
310} __attribute__ ((packed)); 278} __attribute__ ((packed));
@@ -321,181 +289,6 @@ struct iwl3945_eeprom {
321#define PCI_REG_WUM8 0x0E8 289#define PCI_REG_WUM8 0x0E8
322#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000) 290#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000)
323 291
324/*=== CSR (control and status registers) ===*/
325#define CSR_BASE (0x000)
326
327#define CSR_SW_VER (CSR_BASE+0x000)
328#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
329#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
330#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
331#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
332#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
333#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
334#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
335#define CSR_GP_CNTRL (CSR_BASE+0x024)
336
337/*
338 * Hardware revision info
339 * Bit fields:
340 * 31-8: Reserved
341 * 7-4: Type of device: 0x0 = 4965, 0xd = 3945
342 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
343 * 1-0: "Dash" value, as in A-1, etc.
344 */
345#define CSR_HW_REV (CSR_BASE+0x028)
346
347/* EEPROM reads */
348#define CSR_EEPROM_REG (CSR_BASE+0x02c)
349#define CSR_EEPROM_GP (CSR_BASE+0x030)
350#define CSR_GP_UCODE (CSR_BASE+0x044)
351#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
352#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058)
353#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
354#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
355#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
356
357/* Analog phase-lock-loop configuration (3945 only)
358 * Set bit 24. */
359#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
360
361/* Bits for CSR_HW_IF_CONFIG_REG */
362#define CSR_HW_IF_CONFIG_REG_BIT_ALMAGOR_MB (0x00000100)
363#define CSR_HW_IF_CONFIG_REG_BIT_ALMAGOR_MM (0x00000200)
364#define CSR_HW_IF_CONFIG_REG_BIT_SKU_MRC (0x00000400)
365#define CSR_HW_IF_CONFIG_REG_BIT_BOARD_TYPE (0x00000800)
366#define CSR_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000)
367#define CSR_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000)
368#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
369
370/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
371 * acknowledged (reset) by host writing "1" to flagged bits. */
372#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
373#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
374#define CSR_INT_BIT_DNLD (1 << 28) /* uCode Download */
375#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
376#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
377#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
378#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
379#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
380#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
381#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
382#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
383
384#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
385 CSR_INT_BIT_HW_ERR | \
386 CSR_INT_BIT_FH_TX | \
387 CSR_INT_BIT_SW_ERR | \
388 CSR_INT_BIT_RF_KILL | \
389 CSR_INT_BIT_SW_RX | \
390 CSR_INT_BIT_WAKEUP | \
391 CSR_INT_BIT_ALIVE)
392
393/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
394#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
395#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
396#define CSR_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
397#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
398#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
399#define CSR_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
400#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
401#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
402
403#define CSR_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
404 CSR_FH_INT_BIT_RX_CHNL2 | \
405 CSR_FH_INT_BIT_RX_CHNL1 | \
406 CSR_FH_INT_BIT_RX_CHNL0)
407
408#define CSR_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL6 | \
409 CSR_FH_INT_BIT_TX_CHNL1 | \
410 CSR_FH_INT_BIT_TX_CHNL0)
411
412
413/* RESET */
414#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
415#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
416#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
417#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
418#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
419
420/* GP (general purpose) CONTROL */
421#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
422#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
423#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
424#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
425
426#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
427
428#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
429#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
430#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
431
432
433/* EEPROM REG */
434#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
435#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
436
437/* EEPROM GP */
438#define CSR_EEPROM_GP_VALID_MSK (0x00000006)
439#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000)
440#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
441
442/* UCODE DRV GP */
443#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
444#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
445#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
446#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
447
448/* GPIO */
449#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
450#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
451#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC CSR_GPIO_IN_BIT_AUX_POWER
452
453/* GI Chicken Bits */
454#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
455#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
456
457/* CSR_ANA_PLL_CFG */
458#define CSR_ANA_PLL_CFG_SH (0x00880300)
459
460/*=== HBUS (Host-side Bus) ===*/
461#define HBUS_BASE (0x400)
462
463/*
464 * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
465 * structures, error log, event log, verifying uCode load).
466 * First write to address register, then read from or write to data register
467 * to complete the job. Once the address register is set up, accesses to
468 * data registers auto-increment the address by one dword.
469 * Bit usage for address registers (read or write):
470 * 0-31: memory address within device
471 */
472#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c)
473#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010)
474#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018)
475#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c)
476
477/*
478 * Registers for accessing device's internal peripheral registers
479 * (e.g. SCD, BSM, etc.). First write to address register,
480 * then read from or write to data register to complete the job.
481 * Bit usage for address registers (read or write):
482 * 0-15: register address (offset) within device
483 * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword)
484 */
485#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044)
486#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048)
487#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c)
488#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
489
490/*
491 * Per-Tx-queue write pointer (index, really!) (3945 and 4965).
492 * Indicates index to next TFD that driver will fill (1 past latest filled).
493 * Bit usage:
494 * 0-7: queue write index
495 * 11-8: queue selector
496 */
497#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
498
499/* SCD (3945 Tx Frame Scheduler) */ 292/* SCD (3945 Tx Frame Scheduler) */
500#define SCD_BASE (CSR_BASE + 0x2E00) 293#define SCD_BASE (CSR_BASE + 0x2E00)
501 294
@@ -663,7 +456,7 @@ struct iwl3945_eeprom {
663/* Size of uCode instruction memory in bootstrap state machine */ 456/* Size of uCode instruction memory in bootstrap state machine */
664#define IWL_MAX_BSM_SIZE ALM_RTC_INST_SIZE 457#define IWL_MAX_BSM_SIZE ALM_RTC_INST_SIZE
665 458
666#define IWL_MAX_NUM_QUEUES 8 459#define IWL39_MAX_NUM_QUEUES 8
667 460
668static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr) 461static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
669{ 462{
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-io.h b/drivers/net/wireless/iwlwifi/iwl-3945-io.h
index 75e20d0a20d1..0b9475114618 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-io.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project. 5 * Portions of this file are derived from the ipw3945 project.
6 * 6 *
@@ -59,28 +59,28 @@
59 * 59 *
60 */ 60 */
61 61
62#define _iwl3945_write32(iwl, ofs, val) writel((val), (iwl)->hw_base + (ofs)) 62#define _iwl3945_write32(priv, ofs, val) writel((val), (priv)->hw_base + (ofs))
63#ifdef CONFIG_IWL3945_DEBUG 63#ifdef CONFIG_IWL3945_DEBUG
64static inline void __iwl3945_write32(const char *f, u32 l, struct iwl3945_priv *iwl, 64static inline void __iwl3945_write32(const char *f, u32 l, struct iwl3945_priv *priv,
65 u32 ofs, u32 val) 65 u32 ofs, u32 val)
66{ 66{
67 IWL_DEBUG_IO("write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l); 67 IWL_DEBUG_IO("write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
68 _iwl3945_write32(iwl, ofs, val); 68 _iwl3945_write32(priv, ofs, val);
69} 69}
70#define iwl3945_write32(iwl, ofs, val) \ 70#define iwl3945_write32(priv, ofs, val) \
71 __iwl3945_write32(__FILE__, __LINE__, iwl, ofs, val) 71 __iwl3945_write32(__FILE__, __LINE__, priv, ofs, val)
72#else 72#else
73#define iwl3945_write32(iwl, ofs, val) _iwl3945_write32(iwl, ofs, val) 73#define iwl3945_write32(priv, ofs, val) _iwl3945_write32(priv, ofs, val)
74#endif 74#endif
75 75
76#define _iwl3945_read32(iwl, ofs) readl((iwl)->hw_base + (ofs)) 76#define _iwl3945_read32(priv, ofs) readl((priv)->hw_base + (ofs))
77#ifdef CONFIG_IWL3945_DEBUG 77#ifdef CONFIG_IWL3945_DEBUG
78static inline u32 __iwl3945_read32(char *f, u32 l, struct iwl3945_priv *iwl, u32 ofs) 78static inline u32 __iwl3945_read32(char *f, u32 l, struct iwl3945_priv *priv, u32 ofs)
79{ 79{
80 IWL_DEBUG_IO("read_direct32(0x%08X) - %s %d\n", ofs, f, l); 80 IWL_DEBUG_IO("read_direct32(0x%08X) - %s %d\n", ofs, f, l);
81 return _iwl3945_read32(iwl, ofs); 81 return _iwl3945_read32(priv, ofs);
82} 82}
83#define iwl3945_read32(iwl, ofs) __iwl3945_read32(__FILE__, __LINE__, iwl, ofs) 83#define iwl3945_read32(priv, ofs) __iwl3945_read32(__FILE__, __LINE__, priv, ofs)
84#else 84#else
85#define iwl3945_read32(p, o) _iwl3945_read32(p, o) 85#define iwl3945_read32(p, o) _iwl3945_read32(p, o)
86#endif 86#endif
@@ -105,18 +105,13 @@ static inline int __iwl3945_poll_bit(const char *f, u32 l,
105 u32 bits, u32 mask, int timeout) 105 u32 bits, u32 mask, int timeout)
106{ 106{
107 int ret = _iwl3945_poll_bit(priv, addr, bits, mask, timeout); 107 int ret = _iwl3945_poll_bit(priv, addr, bits, mask, timeout);
108 if (unlikely(ret == -ETIMEDOUT)) 108 IWL_DEBUG_IO("poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
109 IWL_DEBUG_IO 109 addr, bits, mask,
110 ("poll_bit(0x%08X, 0x%08X, 0x%08X) - timedout - %s %d\n", 110 unlikely(ret == -ETIMEDOUT)?"timeout":"", f, l);
111 addr, bits, mask, f, l);
112 else
113 IWL_DEBUG_IO
114 ("poll_bit(0x%08X, 0x%08X, 0x%08X) = 0x%08X - %s %d\n",
115 addr, bits, mask, ret, f, l);
116 return ret; 111 return ret;
117} 112}
118#define iwl3945_poll_bit(iwl, addr, bits, mask, timeout) \ 113#define iwl3945_poll_bit(priv, addr, bits, mask, timeout) \
119 __iwl3945_poll_bit(__FILE__, __LINE__, iwl, addr, bits, mask, timeout) 114 __iwl3945_poll_bit(__FILE__, __LINE__, priv, addr, bits, mask, timeout)
120#else 115#else
121#define iwl3945_poll_bit(p, a, b, m, t) _iwl3945_poll_bit(p, a, b, m, t) 116#define iwl3945_poll_bit(p, a, b, m, t) _iwl3945_poll_bit(p, a, b, m, t)
122#endif 117#endif
@@ -321,8 +316,8 @@ static inline int __iwl3945_poll_direct_bit(const char *f, u32 l,
321 "- %s %d\n", addr, mask, ret, f, l); 316 "- %s %d\n", addr, mask, ret, f, l);
322 return ret; 317 return ret;
323} 318}
324#define iwl3945_poll_direct_bit(iwl, addr, mask, timeout) \ 319#define iwl3945_poll_direct_bit(priv, addr, mask, timeout) \
325 __iwl3945_poll_direct_bit(__FILE__, __LINE__, iwl, addr, mask, timeout) 320 __iwl3945_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
326#else 321#else
327#define iwl3945_poll_direct_bit _iwl3945_poll_direct_bit 322#define iwl3945_poll_direct_bit _iwl3945_poll_direct_bit
328#endif 323#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
new file mode 100644
index 000000000000..d200d08fb086
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -0,0 +1,433 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/version.h>
31#include <linux/init.h>
32#include <linux/pci.h>
33#include <linux/dma-mapping.h>
34#include <linux/delay.h>
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/wireless.h>
38#include <net/mac80211.h>
39#include <linux/etherdevice.h>
40#include <asm/unaligned.h>
41
42#include "iwl-3945.h"
43#include "iwl-helpers.h"
44
45#define IWL_1MB_RATE (128 * 1024)
46#define IWL_LED_THRESHOLD (16)
47#define IWL_MAX_BLINK_TBL (10)
48
49static const struct {
50 u16 brightness;
51 u8 on_time;
52 u8 of_time;
53} blink_tbl[] =
54{
55 {300, 25, 25},
56 {200, 40, 40},
57 {100, 55, 55},
58 {70, 65, 65},
59 {50, 75, 75},
60 {20, 85, 85},
61 {15, 95, 95 },
62 {10, 110, 110},
63 {5, 130, 130},
64 {0, 167, 167}
65};
66
67static int iwl3945_led_cmd_callback(struct iwl3945_priv *priv,
68 struct iwl3945_cmd *cmd,
69 struct sk_buff *skb)
70{
71 return 1;
72}
73
74
75/* Send led command */
76static int iwl_send_led_cmd(struct iwl3945_priv *priv,
77 struct iwl3945_led_cmd *led_cmd)
78{
79 struct iwl3945_host_cmd cmd = {
80 .id = REPLY_LEDS_CMD,
81 .len = sizeof(struct iwl3945_led_cmd),
82 .data = led_cmd,
83 .meta.flags = CMD_ASYNC,
84 .meta.u.callback = iwl3945_led_cmd_callback
85 };
86
87 return iwl3945_send_cmd(priv, &cmd);
88}
89
90
91/* Set led on command */
92static int iwl3945_led_on(struct iwl3945_priv *priv, int led_id)
93{
94 struct iwl3945_led_cmd led_cmd = {
95 .id = led_id,
96 .on = IWL_LED_SOLID,
97 .off = 0,
98 .interval = IWL_DEF_LED_INTRVL
99 };
100 return iwl_send_led_cmd(priv, &led_cmd);
101}
102
103/* Set led on command */
104static int iwl3945_led_pattern(struct iwl3945_priv *priv, int led_id,
105 enum led_brightness brightness)
106{
107 struct iwl3945_led_cmd led_cmd = {
108 .id = led_id,
109 .on = brightness,
110 .off = brightness,
111 .interval = IWL_DEF_LED_INTRVL
112 };
113 if (brightness == LED_FULL) {
114 led_cmd.on = IWL_LED_SOLID;
115 led_cmd.off = 0;
116 }
117 return iwl_send_led_cmd(priv, &led_cmd);
118}
119
120/* Set led register off */
121static int iwl3945_led_on_reg(struct iwl3945_priv *priv, int led_id)
122{
123 IWL_DEBUG_LED("led on %d\n", led_id);
124 return iwl3945_led_on(priv, led_id);
125}
126
127/* Set led off command */
128static int iwl3945_led_off(struct iwl3945_priv *priv, int led_id)
129{
130 struct iwl3945_led_cmd led_cmd = {
131 .id = led_id,
132 .on = 0,
133 .off = 0,
134 .interval = IWL_DEF_LED_INTRVL
135 };
136 IWL_DEBUG_LED("led off %d\n", led_id);
137 return iwl_send_led_cmd(priv, &led_cmd);
138}
139
140/* Set led register off */
141static int iwl3945_led_off_reg(struct iwl3945_priv *priv, int led_id)
142{
143 iwl3945_led_off(priv, led_id);
144 return 0;
145}
146
147/* Set led blink command */
148static int iwl3945_led_not_solid(struct iwl3945_priv *priv, int led_id,
149 u8 brightness)
150{
151 struct iwl3945_led_cmd led_cmd = {
152 .id = led_id,
153 .on = brightness,
154 .off = brightness,
155 .interval = IWL_DEF_LED_INTRVL
156 };
157
158 return iwl_send_led_cmd(priv, &led_cmd);
159}
160
161
162/*
163 * brightness call back function for Tx/Rx LED
164 */
165static int iwl3945_led_associated(struct iwl3945_priv *priv, int led_id)
166{
167 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
168 !test_bit(STATUS_READY, &priv->status))
169 return 0;
170
171
172 /* start counting Tx/Rx bytes */
173 if (!priv->last_blink_time && priv->allow_blinking)
174 priv->last_blink_time = jiffies;
175 return 0;
176}
177
178/*
179 * brightness call back for association and radio
180 */
181static void iwl3945_led_brightness_set(struct led_classdev *led_cdev,
182 enum led_brightness brightness)
183{
184 struct iwl3945_led *led = container_of(led_cdev,
185 struct iwl3945_led, led_dev);
186 struct iwl3945_priv *priv = led->priv;
187
188 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
189 return;
190
191 switch (brightness) {
192 case LED_FULL:
193 if (led->type == IWL_LED_TRG_ASSOC) {
194 priv->allow_blinking = 1;
195 IWL_DEBUG_LED("MAC is associated\n");
196 }
197 if (led->led_on)
198 led->led_on(priv, IWL_LED_LINK);
199 break;
200 case LED_OFF:
201 if (led->type == IWL_LED_TRG_ASSOC) {
202 priv->allow_blinking = 0;
203 IWL_DEBUG_LED("MAC is disassociated\n");
204 }
205 if (led->led_off)
206 led->led_off(priv, IWL_LED_LINK);
207 break;
208 default:
209 if (led->led_pattern)
210 led->led_pattern(priv, IWL_LED_LINK, brightness);
211 break;
212 }
213}
214
215
216
217/*
218 * Register led class with the system
219 */
220static int iwl3945_led_register_led(struct iwl3945_priv *priv,
221 struct iwl3945_led *led,
222 enum led_type type, u8 set_led,
223 const char *name, char *trigger)
224{
225 struct device *device = wiphy_dev(priv->hw->wiphy);
226 int ret;
227
228 led->led_dev.name = name;
229 led->led_dev.brightness_set = iwl3945_led_brightness_set;
230 led->led_dev.default_trigger = trigger;
231
232 ret = led_classdev_register(device, &led->led_dev);
233 if (ret) {
234 IWL_ERROR("Error: failed to register led handler.\n");
235 return ret;
236 }
237
238 led->priv = priv;
239 led->type = type;
240 led->registered = 1;
241
242 if (set_led && led->led_on)
243 led->led_on(priv, IWL_LED_LINK);
244 return 0;
245}
246
247
248/*
249 * calculate blink rate according to last 2 sec Tx/Rx activities
250 */
251static inline u8 get_blink_rate(struct iwl3945_priv *priv)
252{
253 int index;
254 u8 blink_rate;
255
256 if (priv->rxtxpackets < IWL_LED_THRESHOLD)
257 index = 10;
258 else {
259 for (index = 0; index < IWL_MAX_BLINK_TBL; index++) {
260 if (priv->rxtxpackets > (blink_tbl[index].brightness *
261 IWL_1MB_RATE))
262 break;
263 }
264 }
265 /* if 0 frame is transfered */
266 if ((index == IWL_MAX_BLINK_TBL) || !priv->allow_blinking)
267 blink_rate = IWL_LED_SOLID;
268 else
269 blink_rate = blink_tbl[index].on_time;
270
271 return blink_rate;
272}
273
274static inline int is_rf_kill(struct iwl3945_priv *priv)
275{
276 return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
277 test_bit(STATUS_RF_KILL_SW, &priv->status);
278}
279
280/*
281 * this function called from handler. Since setting Led command can
282 * happen very frequent we postpone led command to be called from
283 * REPLY handler so we know ucode is up
284 */
285void iwl3945_led_background(struct iwl3945_priv *priv)
286{
287 u8 blink_rate;
288
289 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
290 priv->last_blink_time = 0;
291 return;
292 }
293 if (is_rf_kill(priv)) {
294 priv->last_blink_time = 0;
295 return;
296 }
297
298 if (!priv->allow_blinking) {
299 priv->last_blink_time = 0;
300 if (priv->last_blink_rate != IWL_LED_SOLID) {
301 priv->last_blink_rate = IWL_LED_SOLID;
302 iwl3945_led_on(priv, IWL_LED_LINK);
303 }
304 return;
305 }
306 if (!priv->last_blink_time ||
307 !time_after(jiffies, priv->last_blink_time +
308 msecs_to_jiffies(1000)))
309 return;
310
311 blink_rate = get_blink_rate(priv);
312
313 /* call only if blink rate change */
314 if (blink_rate != priv->last_blink_rate) {
315 if (blink_rate != IWL_LED_SOLID) {
316 priv->last_blink_time = jiffies +
317 msecs_to_jiffies(1000);
318 iwl3945_led_not_solid(priv, IWL_LED_LINK, blink_rate);
319 } else {
320 priv->last_blink_time = 0;
321 iwl3945_led_on(priv, IWL_LED_LINK);
322 }
323 }
324
325 priv->last_blink_rate = blink_rate;
326 priv->rxtxpackets = 0;
327}
328
329
330/* Register all led handler */
331int iwl3945_led_register(struct iwl3945_priv *priv)
332{
333 char *trigger;
334 char name[32];
335 int ret;
336
337 priv->last_blink_rate = 0;
338 priv->rxtxpackets = 0;
339 priv->last_blink_time = 0;
340 priv->allow_blinking = 0;
341
342 trigger = ieee80211_get_radio_led_name(priv->hw);
343 snprintf(name, sizeof(name), "iwl-%s:radio",
344 wiphy_name(priv->hw->wiphy));
345
346 priv->led[IWL_LED_TRG_RADIO].led_on = iwl3945_led_on_reg;
347 priv->led[IWL_LED_TRG_RADIO].led_off = iwl3945_led_off_reg;
348 priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL;
349
350 ret = iwl3945_led_register_led(priv,
351 &priv->led[IWL_LED_TRG_RADIO],
352 IWL_LED_TRG_RADIO, 1,
353 name, trigger);
354 if (ret)
355 goto exit_fail;
356
357 trigger = ieee80211_get_assoc_led_name(priv->hw);
358 snprintf(name, sizeof(name), "iwl-%s:assoc",
359 wiphy_name(priv->hw->wiphy));
360
361 ret = iwl3945_led_register_led(priv,
362 &priv->led[IWL_LED_TRG_ASSOC],
363 IWL_LED_TRG_ASSOC, 0,
364 name, trigger);
365 /* for assoc always turn led on */
366 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl3945_led_on_reg;
367 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl3945_led_on_reg;
368 priv->led[IWL_LED_TRG_ASSOC].led_pattern = NULL;
369
370 if (ret)
371 goto exit_fail;
372
373 trigger = ieee80211_get_rx_led_name(priv->hw);
374 snprintf(name, sizeof(name), "iwl-%s:RX",
375 wiphy_name(priv->hw->wiphy));
376
377
378 ret = iwl3945_led_register_led(priv,
379 &priv->led[IWL_LED_TRG_RX],
380 IWL_LED_TRG_RX, 0,
381 name, trigger);
382
383 priv->led[IWL_LED_TRG_RX].led_on = iwl3945_led_associated;
384 priv->led[IWL_LED_TRG_RX].led_off = iwl3945_led_associated;
385 priv->led[IWL_LED_TRG_RX].led_pattern = iwl3945_led_pattern;
386
387 if (ret)
388 goto exit_fail;
389
390 trigger = ieee80211_get_tx_led_name(priv->hw);
391 snprintf(name, sizeof(name), "iwl-%s:TX",
392 wiphy_name(priv->hw->wiphy));
393 ret = iwl3945_led_register_led(priv,
394 &priv->led[IWL_LED_TRG_TX],
395 IWL_LED_TRG_TX, 0,
396 name, trigger);
397 priv->led[IWL_LED_TRG_TX].led_on = iwl3945_led_associated;
398 priv->led[IWL_LED_TRG_TX].led_off = iwl3945_led_associated;
399 priv->led[IWL_LED_TRG_TX].led_pattern = iwl3945_led_pattern;
400
401 if (ret)
402 goto exit_fail;
403
404 return 0;
405
406exit_fail:
407 iwl3945_led_unregister(priv);
408 return ret;
409}
410
411
412/* unregister led class */
413static void iwl3945_led_unregister_led(struct iwl3945_led *led, u8 set_led)
414{
415 if (!led->registered)
416 return;
417
418 led_classdev_unregister(&led->led_dev);
419
420 if (set_led)
421 led->led_dev.brightness_set(&led->led_dev, LED_OFF);
422 led->registered = 0;
423}
424
425/* Unregister all led handlers */
426void iwl3945_led_unregister(struct iwl3945_priv *priv)
427{
428 iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_ASSOC], 0);
429 iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_RX], 0);
430 iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_TX], 0);
431 iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_RADIO], 1);
432}
433
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
new file mode 100644
index 000000000000..b1d2f6b8b259
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
@@ -0,0 +1,73 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef IWL3945_LEDS_H
28#define IWL3945_LEDS_H
29
30struct iwl3945_priv;
31
32#ifdef CONFIG_IWL3945_LEDS
33#define IWL_LED_SOLID 11
34#define IWL_LED_NAME_LEN 31
35#define IWL_DEF_LED_INTRVL __constant_cpu_to_le32(1000)
36
37#define IWL_LED_ACTIVITY (0<<1)
38#define IWL_LED_LINK (1<<1)
39
40enum led_type {
41 IWL_LED_TRG_TX,
42 IWL_LED_TRG_RX,
43 IWL_LED_TRG_ASSOC,
44 IWL_LED_TRG_RADIO,
45 IWL_LED_TRG_MAX,
46};
47
48#include <linux/leds.h>
49
50struct iwl3945_led {
51 struct iwl3945_priv *priv;
52 struct led_classdev led_dev;
53
54 int (*led_on) (struct iwl3945_priv *priv, int led_id);
55 int (*led_off) (struct iwl3945_priv *priv, int led_id);
56 int (*led_pattern) (struct iwl3945_priv *priv, int led_id,
57 enum led_brightness brightness);
58
59 enum led_type type;
60 unsigned int registered;
61};
62
63extern int iwl3945_led_register(struct iwl3945_priv *priv);
64extern void iwl3945_led_unregister(struct iwl3945_priv *priv);
65extern void iwl3945_led_background(struct iwl3945_priv *priv);
66
67#else
68static inline int iwl3945_led_register(struct iwl3945_priv *priv) { return 0; }
69static inline void iwl3945_led_unregister(struct iwl3945_priv *priv) {}
70static inline void iwl3945_led_background(struct iwl3945_priv *priv) {}
71#endif /* CONFIG_IWL3945_LEDS */
72
73#endif /* IWL3945_LEDS_H */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 80d31ae51e77..85c22641542d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -37,7 +37,7 @@
37 37
38#include <linux/workqueue.h> 38#include <linux/workqueue.h>
39 39
40#include "../net/mac80211/ieee80211_rate.h" 40#include "../net/mac80211/rate.h"
41 41
42#include "iwl-3945.h" 42#include "iwl-3945.h"
43 43
@@ -100,14 +100,6 @@ static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = {
100 {-89, IWL_RATE_6M_INDEX} 100 {-89, IWL_RATE_6M_INDEX}
101}; 101};
102 102
103static struct iwl3945_tpt_entry iwl3945_tpt_table_b[] = {
104 {-86, IWL_RATE_11M_INDEX},
105 {-88, IWL_RATE_5M_INDEX},
106 {-90, IWL_RATE_2M_INDEX},
107 {-92, IWL_RATE_1M_INDEX}
108
109};
110
111static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = { 103static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
112 {-60, IWL_RATE_54M_INDEX}, 104 {-60, IWL_RATE_54M_INDEX},
113 {-64, IWL_RATE_48M_INDEX}, 105 {-64, IWL_RATE_48M_INDEX},
@@ -129,7 +121,7 @@ static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
129#define IWL_RATE_MIN_SUCCESS_TH 8 121#define IWL_RATE_MIN_SUCCESS_TH 8
130#define IWL_RATE_DECREASE_TH 1920 122#define IWL_RATE_DECREASE_TH 1920
131 123
132static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, u8 mode) 124static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band)
133{ 125{
134 u32 index = 0; 126 u32 index = 0;
135 u32 table_size = 0; 127 u32 table_size = 0;
@@ -138,21 +130,19 @@ static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, u8 mode)
138 if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL)) 130 if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL))
139 rssi = IWL_MIN_RSSI_VAL; 131 rssi = IWL_MIN_RSSI_VAL;
140 132
141 switch (mode) { 133 switch (band) {
142 case MODE_IEEE80211G: 134 case IEEE80211_BAND_2GHZ:
143 tpt_table = iwl3945_tpt_table_g; 135 tpt_table = iwl3945_tpt_table_g;
144 table_size = ARRAY_SIZE(iwl3945_tpt_table_g); 136 table_size = ARRAY_SIZE(iwl3945_tpt_table_g);
145 break; 137 break;
146 138
147 case MODE_IEEE80211A: 139 case IEEE80211_BAND_5GHZ:
148 tpt_table = iwl3945_tpt_table_a; 140 tpt_table = iwl3945_tpt_table_a;
149 table_size = ARRAY_SIZE(iwl3945_tpt_table_a); 141 table_size = ARRAY_SIZE(iwl3945_tpt_table_a);
150 break; 142 break;
151 143
152 default: 144 default:
153 case MODE_IEEE80211B: 145 BUG();
154 tpt_table = iwl3945_tpt_table_b;
155 table_size = ARRAY_SIZE(iwl3945_tpt_table_b);
156 break; 146 break;
157 } 147 }
158 148
@@ -168,9 +158,9 @@ static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window)
168{ 158{
169 window->data = 0; 159 window->data = 0;
170 window->success_counter = 0; 160 window->success_counter = 0;
171 window->success_ratio = IWL_INVALID_VALUE; 161 window->success_ratio = -1;
172 window->counter = 0; 162 window->counter = 0;
173 window->average_tpt = IWL_INVALID_VALUE; 163 window->average_tpt = IWL_INV_TPT;
174 window->stamp = 0; 164 window->stamp = 0;
175} 165}
176 166
@@ -340,17 +330,17 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
340 * after assoc.. */ 330 * after assoc.. */
341 331
342 for (i = IWL_RATE_COUNT - 1; i >= 0; i--) { 332 for (i = IWL_RATE_COUNT - 1; i >= 0; i--) {
343 if (sta->supp_rates & (1 << i)) { 333 if (sta->supp_rates[local->hw.conf.channel->band] & (1 << i)) {
344 sta->txrate = i; 334 sta->txrate_idx = i;
345 break; 335 break;
346 } 336 }
347 } 337 }
348 338
349 sta->last_txrate = sta->txrate; 339 sta->last_txrate_idx = sta->txrate_idx;
350 340
351 /* For MODE_IEEE80211A mode it start at IWL_FIRST_OFDM_RATE */ 341 /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
352 if (local->hw.conf.phymode == MODE_IEEE80211A) 342 if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ)
353 sta->last_txrate += IWL_FIRST_OFDM_RATE; 343 sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
354 344
355 IWL_DEBUG_RATE("leave\n"); 345 IWL_DEBUG_RATE("leave\n");
356} 346}
@@ -429,17 +419,19 @@ static int rs_adjust_next_rate(struct iwl3945_priv *priv, int rate)
429{ 419{
430 int next_rate = iwl3945_get_prev_ieee_rate(rate); 420 int next_rate = iwl3945_get_prev_ieee_rate(rate);
431 421
432 switch (priv->phymode) { 422 switch (priv->band) {
433 case MODE_IEEE80211A: 423 case IEEE80211_BAND_5GHZ:
434 if (rate == IWL_RATE_12M_INDEX) 424 if (rate == IWL_RATE_12M_INDEX)
435 next_rate = IWL_RATE_9M_INDEX; 425 next_rate = IWL_RATE_9M_INDEX;
436 else if (rate == IWL_RATE_6M_INDEX) 426 else if (rate == IWL_RATE_6M_INDEX)
437 next_rate = IWL_RATE_6M_INDEX; 427 next_rate = IWL_RATE_6M_INDEX;
438 break; 428 break;
429/* XXX cannot be invoked in current mac80211 so not a regression
439 case MODE_IEEE80211B: 430 case MODE_IEEE80211B:
440 if (rate == IWL_RATE_11M_INDEX_TABLE) 431 if (rate == IWL_RATE_11M_INDEX_TABLE)
441 next_rate = IWL_RATE_5M_INDEX_TABLE; 432 next_rate = IWL_RATE_5M_INDEX_TABLE;
442 break; 433 break;
434 */
443 default: 435 default:
444 break; 436 break;
445 } 437 }
@@ -465,22 +457,25 @@ static void rs_tx_status(void *priv_rate,
465 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_rate; 457 struct iwl3945_priv *priv = (struct iwl3945_priv *)priv_rate;
466 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 458 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
467 struct iwl3945_rs_sta *rs_sta; 459 struct iwl3945_rs_sta *rs_sta;
460 struct ieee80211_supported_band *sband;
468 461
469 IWL_DEBUG_RATE("enter\n"); 462 IWL_DEBUG_RATE("enter\n");
470 463
471 retries = tx_resp->retry_count; 464 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
472 465
473 first_index = tx_resp->control.tx_rate; 466
467 retries = tx_resp->retry_count;
468 first_index = tx_resp->control.tx_rate->hw_value;
474 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT)) { 469 if ((first_index < 0) || (first_index >= IWL_RATE_COUNT)) {
475 IWL_DEBUG_RATE("leave: Rate out of bounds: %0x for %d\n", 470 IWL_DEBUG_RATE("leave: Rate out of bounds: %d\n", first_index);
476 tx_resp->control.tx_rate, first_index);
477 return; 471 return;
478 } 472 }
479 473
474 rcu_read_lock();
475
480 sta = sta_info_get(local, hdr->addr1); 476 sta = sta_info_get(local, hdr->addr1);
481 if (!sta || !sta->rate_ctrl_priv) { 477 if (!sta || !sta->rate_ctrl_priv) {
482 if (sta) 478 rcu_read_unlock();
483 sta_info_put(sta);
484 IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); 479 IWL_DEBUG_RATE("leave: No STA priv data to update!\n");
485 return; 480 return;
486 } 481 }
@@ -553,7 +548,7 @@ static void rs_tx_status(void *priv_rate,
553 548
554 spin_unlock_irqrestore(&rs_sta->lock, flags); 549 spin_unlock_irqrestore(&rs_sta->lock, flags);
555 550
556 sta_info_put(sta); 551 rcu_read_unlock();
557 552
558 IWL_DEBUG_RATE("leave\n"); 553 IWL_DEBUG_RATE("leave\n");
559 554
@@ -561,14 +556,14 @@ static void rs_tx_status(void *priv_rate,
561} 556}
562 557
563static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta, 558static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
564 u8 index, u16 rate_mask, int phymode) 559 u8 index, u16 rate_mask, enum ieee80211_band band)
565{ 560{
566 u8 high = IWL_RATE_INVALID; 561 u8 high = IWL_RATE_INVALID;
567 u8 low = IWL_RATE_INVALID; 562 u8 low = IWL_RATE_INVALID;
568 563
569 /* 802.11A walks to the next literal adjacent rate in 564 /* 802.11A walks to the next literal adjacent rate in
570 * the rate table */ 565 * the rate table */
571 if (unlikely(phymode == MODE_IEEE80211A)) { 566 if (unlikely(band == IEEE80211_BAND_5GHZ)) {
572 int i; 567 int i;
573 u32 mask; 568 u32 mask;
574 569
@@ -639,7 +634,8 @@ static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
639 * 634 *
640 */ 635 */
641static void rs_get_rate(void *priv_rate, struct net_device *dev, 636static void rs_get_rate(void *priv_rate, struct net_device *dev,
642 struct ieee80211_hw_mode *mode, struct sk_buff *skb, 637 struct ieee80211_supported_band *sband,
638 struct sk_buff *skb,
643 struct rate_selection *sel) 639 struct rate_selection *sel)
644{ 640{
645 u8 low = IWL_RATE_INVALID; 641 u8 low = IWL_RATE_INVALID;
@@ -648,9 +644,9 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
648 int index; 644 int index;
649 struct iwl3945_rs_sta *rs_sta; 645 struct iwl3945_rs_sta *rs_sta;
650 struct iwl3945_rate_scale_data *window = NULL; 646 struct iwl3945_rate_scale_data *window = NULL;
651 int current_tpt = IWL_INVALID_VALUE; 647 int current_tpt = IWL_INV_TPT;
652 int low_tpt = IWL_INVALID_VALUE; 648 int low_tpt = IWL_INV_TPT;
653 int high_tpt = IWL_INVALID_VALUE; 649 int high_tpt = IWL_INV_TPT;
654 u32 fail_count; 650 u32 fail_count;
655 s8 scale_action = 0; 651 s8 scale_action = 0;
656 unsigned long flags; 652 unsigned long flags;
@@ -663,6 +659,8 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
663 659
664 IWL_DEBUG_RATE("enter\n"); 660 IWL_DEBUG_RATE("enter\n");
665 661
662 rcu_read_lock();
663
666 sta = sta_info_get(local, hdr->addr1); 664 sta = sta_info_get(local, hdr->addr1);
667 665
668 /* Send management frames and broadcast/multicast data using lowest 666 /* Send management frames and broadcast/multicast data using lowest
@@ -672,16 +670,15 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
672 is_multicast_ether_addr(hdr->addr1) || 670 is_multicast_ether_addr(hdr->addr1) ||
673 !sta || !sta->rate_ctrl_priv) { 671 !sta || !sta->rate_ctrl_priv) {
674 IWL_DEBUG_RATE("leave: No STA priv data to update!\n"); 672 IWL_DEBUG_RATE("leave: No STA priv data to update!\n");
675 sel->rate = rate_lowest(local, local->oper_hw_mode, sta); 673 sel->rate = rate_lowest(local, sband, sta);
676 if (sta) 674 rcu_read_unlock();
677 sta_info_put(sta);
678 return; 675 return;
679 } 676 }
680 677
681 rate_mask = sta->supp_rates; 678 rate_mask = sta->supp_rates[sband->band];
682 index = min(sta->last_txrate & 0xffff, IWL_RATE_COUNT - 1); 679 index = min(sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT - 1);
683 680
684 if (priv->phymode == (u8) MODE_IEEE80211A) 681 if (sband->band == IEEE80211_BAND_5GHZ)
685 rate_mask = rate_mask << IWL_FIRST_OFDM_RATE; 682 rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
686 683
687 rs_sta = (void *)sta->rate_ctrl_priv; 684 rs_sta = (void *)sta->rate_ctrl_priv;
@@ -713,7 +710,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
713 710
714 if (((fail_count <= IWL_RATE_MIN_FAILURE_TH) && 711 if (((fail_count <= IWL_RATE_MIN_FAILURE_TH) &&
715 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) { 712 (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
716 window->average_tpt = IWL_INVALID_VALUE; 713 window->average_tpt = IWL_INV_TPT;
717 spin_unlock_irqrestore(&rs_sta->lock, flags); 714 spin_unlock_irqrestore(&rs_sta->lock, flags);
718 715
719 IWL_DEBUG_RATE("Invalid average_tpt on rate %d: " 716 IWL_DEBUG_RATE("Invalid average_tpt on rate %d: "
@@ -732,7 +729,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
732 current_tpt = window->average_tpt; 729 current_tpt = window->average_tpt;
733 730
734 high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask, 731 high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask,
735 local->hw.conf.phymode); 732 sband->band);
736 low = high_low & 0xff; 733 low = high_low & 0xff;
737 high = (high_low >> 8) & 0xff; 734 high = (high_low >> 8) & 0xff;
738 735
@@ -749,19 +746,16 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
749 if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) { 746 if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
750 IWL_DEBUG_RATE("decrease rate because of low success_ratio\n"); 747 IWL_DEBUG_RATE("decrease rate because of low success_ratio\n");
751 scale_action = -1; 748 scale_action = -1;
752 } else if ((low_tpt == IWL_INVALID_VALUE) && 749 } else if ((low_tpt == IWL_INV_TPT) && (high_tpt == IWL_INV_TPT))
753 (high_tpt == IWL_INVALID_VALUE))
754 scale_action = 1; 750 scale_action = 1;
755 else if ((low_tpt != IWL_INVALID_VALUE) && 751 else if ((low_tpt != IWL_INV_TPT) && (high_tpt != IWL_INV_TPT) &&
756 (high_tpt != IWL_INVALID_VALUE) 752 (low_tpt < current_tpt) && (high_tpt < current_tpt)) {
757 && (low_tpt < current_tpt)
758 && (high_tpt < current_tpt)) {
759 IWL_DEBUG_RATE("No action -- low [%d] & high [%d] < " 753 IWL_DEBUG_RATE("No action -- low [%d] & high [%d] < "
760 "current_tpt [%d]\n", 754 "current_tpt [%d]\n",
761 low_tpt, high_tpt, current_tpt); 755 low_tpt, high_tpt, current_tpt);
762 scale_action = 0; 756 scale_action = 0;
763 } else { 757 } else {
764 if (high_tpt != IWL_INVALID_VALUE) { 758 if (high_tpt != IWL_INV_TPT) {
765 if (high_tpt > current_tpt) 759 if (high_tpt > current_tpt)
766 scale_action = 1; 760 scale_action = 1;
767 else { 761 else {
@@ -769,7 +763,7 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
769 ("decrease rate because of high tpt\n"); 763 ("decrease rate because of high tpt\n");
770 scale_action = -1; 764 scale_action = -1;
771 } 765 }
772 } else if (low_tpt != IWL_INVALID_VALUE) { 766 } else if (low_tpt != IWL_INV_TPT) {
773 if (low_tpt > current_tpt) { 767 if (low_tpt > current_tpt) {
774 IWL_DEBUG_RATE 768 IWL_DEBUG_RATE
775 ("decrease rate because of low tpt\n"); 769 ("decrease rate because of low tpt\n");
@@ -810,17 +804,17 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
810 804
811 out: 805 out:
812 806
813 sta->last_txrate = index; 807 sta->last_txrate_idx = index;
814 if (priv->phymode == (u8) MODE_IEEE80211A) 808 if (sband->band == IEEE80211_BAND_5GHZ)
815 sta->txrate = sta->last_txrate - IWL_FIRST_OFDM_RATE; 809 sta->txrate_idx = sta->last_txrate_idx - IWL_FIRST_OFDM_RATE;
816 else 810 else
817 sta->txrate = sta->last_txrate; 811 sta->txrate_idx = sta->last_txrate_idx;
818 812
819 sta_info_put(sta); 813 rcu_read_unlock();
820 814
821 IWL_DEBUG_RATE("leave: %d\n", index); 815 IWL_DEBUG_RATE("leave: %d\n", index);
822 816
823 sel->rate = &priv->ieee_rates[index]; 817 sel->rate = &sband->bitrates[sta->txrate_idx];
824} 818}
825 819
826static struct rate_control_ops rs_ops = { 820static struct rate_control_ops rs_ops = {
@@ -848,13 +842,15 @@ int iwl3945_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
848 unsigned long now = jiffies; 842 unsigned long now = jiffies;
849 u32 max_time = 0; 843 u32 max_time = 0;
850 844
845 rcu_read_lock();
846
851 sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr); 847 sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr);
852 if (!sta || !sta->rate_ctrl_priv) { 848 if (!sta || !sta->rate_ctrl_priv) {
853 if (sta) { 849 if (sta)
854 sta_info_put(sta);
855 IWL_DEBUG_RATE("leave - no private rate data!\n"); 850 IWL_DEBUG_RATE("leave - no private rate data!\n");
856 } else 851 else
857 IWL_DEBUG_RATE("leave - no station!\n"); 852 IWL_DEBUG_RATE("leave - no station!\n");
853 rcu_read_unlock();
858 return sprintf(buf, "station %d not found\n", sta_id); 854 return sprintf(buf, "station %d not found\n", sta_id);
859 } 855 }
860 856
@@ -895,7 +891,7 @@ int iwl3945_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
895 i = j; 891 i = j;
896 } 892 }
897 spin_unlock_irqrestore(&rs_sta->lock, flags); 893 spin_unlock_irqrestore(&rs_sta->lock, flags);
898 sta_info_put(sta); 894 rcu_read_unlock();
899 895
900 /* Display the average rate of all samples taken. 896 /* Display the average rate of all samples taken.
901 * 897 *
@@ -932,11 +928,12 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
932 return; 928 return;
933 } 929 }
934 930
931 rcu_read_lock();
932
935 sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr); 933 sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr);
936 if (!sta || !sta->rate_ctrl_priv) { 934 if (!sta || !sta->rate_ctrl_priv) {
937 if (sta)
938 sta_info_put(sta);
939 IWL_DEBUG_RATE("leave - no private rate data!\n"); 935 IWL_DEBUG_RATE("leave - no private rate data!\n");
936 rcu_read_unlock();
940 return; 937 return;
941 } 938 }
942 939
@@ -945,8 +942,9 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
945 spin_lock_irqsave(&rs_sta->lock, flags); 942 spin_lock_irqsave(&rs_sta->lock, flags);
946 943
947 rs_sta->tgg = 0; 944 rs_sta->tgg = 0;
948 switch (priv->phymode) { 945 switch (priv->band) {
949 case MODE_IEEE80211G: 946 case IEEE80211_BAND_2GHZ:
947 /* TODO: this always does G, not a regression */
950 if (priv->active_rxon.flags & RXON_FLG_TGG_PROTECT_MSK) { 948 if (priv->active_rxon.flags & RXON_FLG_TGG_PROTECT_MSK) {
951 rs_sta->tgg = 1; 949 rs_sta->tgg = 1;
952 rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot; 950 rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
@@ -954,18 +952,15 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
954 rs_sta->expected_tpt = iwl3945_expected_tpt_g; 952 rs_sta->expected_tpt = iwl3945_expected_tpt_g;
955 break; 953 break;
956 954
957 case MODE_IEEE80211A: 955 case IEEE80211_BAND_5GHZ:
958 rs_sta->expected_tpt = iwl3945_expected_tpt_a; 956 rs_sta->expected_tpt = iwl3945_expected_tpt_a;
959 break; 957 break;
960 958 case IEEE80211_NUM_BANDS:
961 default: 959 BUG();
962 IWL_WARNING("Invalid phymode. Defaulting to 802.11b\n");
963 case MODE_IEEE80211B:
964 rs_sta->expected_tpt = iwl3945_expected_tpt_b;
965 break; 960 break;
966 } 961 }
967 962
968 sta_info_put(sta); 963 rcu_read_unlock();
969 spin_unlock_irqrestore(&rs_sta->lock, flags); 964 spin_unlock_irqrestore(&rs_sta->lock, flags);
970 965
971 rssi = priv->last_rx_rssi; 966 rssi = priv->last_rx_rssi;
@@ -974,20 +969,19 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
974 969
975 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RATE, "Network RSSI: %d\n", rssi); 970 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RATE, "Network RSSI: %d\n", rssi);
976 971
977 rs_sta->start_rate = 972 rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band);
978 iwl3945_get_rate_index_by_rssi(rssi, priv->phymode);
979 973
980 IWL_DEBUG_RATE("leave: rssi %d assign rate index: " 974 IWL_DEBUG_RATE("leave: rssi %d assign rate index: "
981 "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate, 975 "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate,
982 iwl3945_rates[rs_sta->start_rate].plcp); 976 iwl3945_rates[rs_sta->start_rate].plcp);
983} 977}
984 978
985void iwl3945_rate_control_register(struct ieee80211_hw *hw) 979int iwl3945_rate_control_register(void)
986{ 980{
987 ieee80211_rate_control_register(&rs_ops); 981 return ieee80211_rate_control_register(&rs_ops);
988} 982}
989 983
990void iwl3945_rate_control_unregister(struct ieee80211_hw *hw) 984void iwl3945_rate_control_unregister(void)
991{ 985{
992 ieee80211_rate_control_unregister(&rs_ops); 986 ieee80211_rate_control_unregister(&rs_ops);
993} 987}
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.h b/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
index d5e9220f871d..f085d330bdcf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -36,8 +36,8 @@ struct iwl3945_rate_info {
36 u8 next_rs; /* next rate used in rs algo */ 36 u8 next_rs; /* next rate used in rs algo */
37 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */ 37 u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
38 u8 next_rs_tgg; /* next rate used in TGG rs algo */ 38 u8 next_rs_tgg; /* next rate used in TGG rs algo */
39 u8 table_rs_index; /* index in rate scale table cmd */ 39 u8 table_rs_index; /* index in rate scale table cmd */
40 u8 prev_table_rs; /* prev in rate table cmd */ 40 u8 prev_table_rs; /* prev in rate table cmd */
41}; 41};
42 42
43/* 43/*
@@ -159,7 +159,7 @@ enum {
159 159
160#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1) 160#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
161 161
162#define IWL_INVALID_VALUE -1 162#define IWL_INV_TPT -1
163 163
164#define IWL_MIN_RSSI_VAL -100 164#define IWL_MIN_RSSI_VAL -100
165#define IWL_MAX_RSSI_VAL 0 165#define IWL_MAX_RSSI_VAL 0
@@ -202,7 +202,7 @@ extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
202 * ieee80211_register_hw 202 * ieee80211_register_hw
203 * 203 *
204 */ 204 */
205extern void iwl3945_rate_control_register(struct ieee80211_hw *hw); 205extern int iwl3945_rate_control_register(void);
206 206
207/** 207/**
208 * iwl3945_rate_control_unregister - Unregister the rate control callbacks 208 * iwl3945_rate_control_unregister - Unregister the rate control callbacks
@@ -210,6 +210,6 @@ extern void iwl3945_rate_control_register(struct ieee80211_hw *hw);
210 * This should be called after calling ieee80211_unregister_hw, but before 210 * This should be called after calling ieee80211_unregister_hw, but before
211 * the driver is unloaded. 211 * the driver is unloaded.
212 */ 212 */
213extern void iwl3945_rate_control_unregister(struct ieee80211_hw *hw); 213extern void iwl3945_rate_control_unregister(void);
214 214
215#endif 215#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 8d4d91d35fd2..598e4eef4f40 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -39,6 +39,7 @@
39#include <asm/unaligned.h> 39#include <asm/unaligned.h>
40#include <net/mac80211.h> 40#include <net/mac80211.h>
41 41
42#include "iwl-3945-core.h"
42#include "iwl-3945.h" 43#include "iwl-3945.h"
43#include "iwl-helpers.h" 44#include "iwl-helpers.h"
44#include "iwl-3945-rs.h" 45#include "iwl-3945-rs.h"
@@ -183,6 +184,16 @@ void iwl3945_disable_events(struct iwl3945_priv *priv)
183 184
184} 185}
185 186
187static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
188{
189 int idx;
190
191 for (idx = 0; idx < IWL_RATE_COUNT; idx++)
192 if (iwl3945_rates[idx].plcp == plcp)
193 return idx;
194 return -1;
195}
196
186/** 197/**
187 * iwl3945_get_antenna_flags - Get antenna flags for RXON command 198 * iwl3945_get_antenna_flags - Get antenna flags for RXON command
188 * @priv: eeprom and antenna fields are used to determine antenna flags 199 * @priv: eeprom and antenna fields are used to determine antenna flags
@@ -216,14 +227,126 @@ __le32 iwl3945_get_antenna_flags(const struct iwl3945_priv *priv)
216 return 0; /* "diversity" is default if error */ 227 return 0; /* "diversity" is default if error */
217} 228}
218 229
230#ifdef CONFIG_IWL3945_DEBUG
231#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
232
233static const char *iwl3945_get_tx_fail_reason(u32 status)
234{
235 switch (status & TX_STATUS_MSK) {
236 case TX_STATUS_SUCCESS:
237 return "SUCCESS";
238 TX_STATUS_ENTRY(SHORT_LIMIT);
239 TX_STATUS_ENTRY(LONG_LIMIT);
240 TX_STATUS_ENTRY(FIFO_UNDERRUN);
241 TX_STATUS_ENTRY(MGMNT_ABORT);
242 TX_STATUS_ENTRY(NEXT_FRAG);
243 TX_STATUS_ENTRY(LIFE_EXPIRE);
244 TX_STATUS_ENTRY(DEST_PS);
245 TX_STATUS_ENTRY(ABORTED);
246 TX_STATUS_ENTRY(BT_RETRY);
247 TX_STATUS_ENTRY(STA_INVALID);
248 TX_STATUS_ENTRY(FRAG_DROPPED);
249 TX_STATUS_ENTRY(TID_DISABLE);
250 TX_STATUS_ENTRY(FRAME_FLUSHED);
251 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
252 TX_STATUS_ENTRY(TX_LOCKED);
253 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
254 }
255
256 return "UNKNOWN";
257}
258#else
259static inline const char *iwl3945_get_tx_fail_reason(u32 status)
260{
261 return "";
262}
263#endif
264
265
266/**
267 * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
268 *
269 * When FW advances 'R' index, all entries between old and new 'R' index
270 * need to be reclaimed. As result, some free space forms. If there is
271 * enough free space (> low mark), wake the stack that feeds us.
272 */
273static void iwl3945_tx_queue_reclaim(struct iwl3945_priv *priv,
274 int txq_id, int index)
275{
276 struct iwl3945_tx_queue *txq = &priv->txq[txq_id];
277 struct iwl3945_queue *q = &txq->q;
278 struct iwl3945_tx_info *tx_info;
279
280 BUG_ON(txq_id == IWL_CMD_QUEUE_NUM);
281
282 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
283 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
284
285 tx_info = &txq->txb[txq->q.read_ptr];
286 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb[0],
287 &tx_info->status);
288 tx_info->skb[0] = NULL;
289 iwl3945_hw_txq_free_tfd(priv, txq);
290 }
291
292 if (iwl3945_queue_space(q) > q->low_mark && (txq_id >= 0) &&
293 (txq_id != IWL_CMD_QUEUE_NUM) &&
294 priv->mac80211_registered)
295 ieee80211_wake_queue(priv->hw, txq_id);
296}
297
298/**
299 * iwl3945_rx_reply_tx - Handle Tx response
300 */
301static void iwl3945_rx_reply_tx(struct iwl3945_priv *priv,
302 struct iwl3945_rx_mem_buffer *rxb)
303{
304 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data;
305 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
306 int txq_id = SEQ_TO_QUEUE(sequence);
307 int index = SEQ_TO_INDEX(sequence);
308 struct iwl3945_tx_queue *txq = &priv->txq[txq_id];
309 struct ieee80211_tx_status *tx_status;
310 struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
311 u32 status = le32_to_cpu(tx_resp->status);
312 int rate_idx;
313
314 if ((index >= txq->q.n_bd) || (iwl3945_x2_queue_used(&txq->q, index) == 0)) {
315 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
316 "is out of range [0-%d] %d %d\n", txq_id,
317 index, txq->q.n_bd, txq->q.write_ptr,
318 txq->q.read_ptr);
319 return;
320 }
321
322 tx_status = &(txq->txb[txq->q.read_ptr].status);
323
324 tx_status->retry_count = tx_resp->failure_frame;
325 /* tx_status->rts_retry_count = tx_resp->failure_rts; */
326 tx_status->flags = ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
327 IEEE80211_TX_STATUS_ACK : 0;
328
329 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
330 txq_id, iwl3945_get_tx_fail_reason(status), status,
331 tx_resp->rate, tx_resp->failure_frame);
332
333 rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
334 tx_status->control.tx_rate = &priv->ieee_rates[rate_idx];
335 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
336 iwl3945_tx_queue_reclaim(priv, txq_id, index);
337
338 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
339 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
340}
341
342
343
219/***************************************************************************** 344/*****************************************************************************
220 * 345 *
221 * Intel PRO/Wireless 3945ABG/BG Network Connection 346 * Intel PRO/Wireless 3945ABG/BG Network Connection
222 * 347 *
223 * RX handler implementations 348 * RX handler implementations
224 * 349 *
225 * Used by iwl-base.c
226 *
227 *****************************************************************************/ 350 *****************************************************************************/
228 351
229void iwl3945_hw_rx_statistics(struct iwl3945_priv *priv, struct iwl3945_rx_mem_buffer *rxb) 352void iwl3945_hw_rx_statistics(struct iwl3945_priv *priv, struct iwl3945_rx_mem_buffer *rxb)
@@ -235,9 +358,161 @@ void iwl3945_hw_rx_statistics(struct iwl3945_priv *priv, struct iwl3945_rx_mem_b
235 358
236 memcpy(&priv->statistics, pkt->u.raw, sizeof(priv->statistics)); 359 memcpy(&priv->statistics, pkt->u.raw, sizeof(priv->statistics));
237 360
361 iwl3945_led_background(priv);
362
238 priv->last_statistics_time = jiffies; 363 priv->last_statistics_time = jiffies;
239} 364}
240 365
366/******************************************************************************
367 *
368 * Misc. internal state and helper functions
369 *
370 ******************************************************************************/
371#ifdef CONFIG_IWL3945_DEBUG
372
373/**
374 * iwl3945_report_frame - dump frame to syslog during debug sessions
375 *
376 * You may hack this function to show different aspects of received frames,
377 * including selective frame dumps.
378 * group100 parameter selects whether to show 1 out of 100 good frames.
379 */
380static void iwl3945_dbg_report_frame(struct iwl3945_priv *priv,
381 struct iwl3945_rx_packet *pkt,
382 struct ieee80211_hdr *header, int group100)
383{
384 u32 to_us;
385 u32 print_summary = 0;
386 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
387 u32 hundred = 0;
388 u32 dataframe = 0;
389 u16 fc;
390 u16 seq_ctl;
391 u16 channel;
392 u16 phy_flags;
393 u16 length;
394 u16 status;
395 u16 bcn_tmr;
396 u32 tsf_low;
397 u64 tsf;
398 u8 rssi;
399 u8 agc;
400 u16 sig_avg;
401 u16 noise_diff;
402 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
403 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
404 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
405 u8 *data = IWL_RX_DATA(pkt);
406
407 /* MAC header */
408 fc = le16_to_cpu(header->frame_control);
409 seq_ctl = le16_to_cpu(header->seq_ctrl);
410
411 /* metadata */
412 channel = le16_to_cpu(rx_hdr->channel);
413 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
414 length = le16_to_cpu(rx_hdr->len);
415
416 /* end-of-frame status and timestamp */
417 status = le32_to_cpu(rx_end->status);
418 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
419 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
420 tsf = le64_to_cpu(rx_end->timestamp);
421
422 /* signal statistics */
423 rssi = rx_stats->rssi;
424 agc = rx_stats->agc;
425 sig_avg = le16_to_cpu(rx_stats->sig_avg);
426 noise_diff = le16_to_cpu(rx_stats->noise_diff);
427
428 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
429
430 /* if data frame is to us and all is good,
431 * (optionally) print summary for only 1 out of every 100 */
432 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
433 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
434 dataframe = 1;
435 if (!group100)
436 print_summary = 1; /* print each frame */
437 else if (priv->framecnt_to_us < 100) {
438 priv->framecnt_to_us++;
439 print_summary = 0;
440 } else {
441 priv->framecnt_to_us = 0;
442 print_summary = 1;
443 hundred = 1;
444 }
445 } else {
446 /* print summary for all other frames */
447 print_summary = 1;
448 }
449
450 if (print_summary) {
451 char *title;
452 u32 rate;
453
454 if (hundred)
455 title = "100Frames";
456 else if (fc & IEEE80211_FCTL_RETRY)
457 title = "Retry";
458 else if (ieee80211_is_assoc_response(fc))
459 title = "AscRsp";
460 else if (ieee80211_is_reassoc_response(fc))
461 title = "RasRsp";
462 else if (ieee80211_is_probe_response(fc)) {
463 title = "PrbRsp";
464 print_dump = 1; /* dump frame contents */
465 } else if (ieee80211_is_beacon(fc)) {
466 title = "Beacon";
467 print_dump = 1; /* dump frame contents */
468 } else if (ieee80211_is_atim(fc))
469 title = "ATIM";
470 else if (ieee80211_is_auth(fc))
471 title = "Auth";
472 else if (ieee80211_is_deauth(fc))
473 title = "DeAuth";
474 else if (ieee80211_is_disassoc(fc))
475 title = "DisAssoc";
476 else
477 title = "Frame";
478
479 rate = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
480 if (rate == -1)
481 rate = 0;
482 else
483 rate = iwl3945_rates[rate].ieee / 2;
484
485 /* print frame summary.
486 * MAC addresses show just the last byte (for brevity),
487 * but you can hack it to show more, if you'd like to. */
488 if (dataframe)
489 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
490 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
491 title, fc, header->addr1[5],
492 length, rssi, channel, rate);
493 else {
494 /* src/dst addresses assume managed mode */
495 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
496 "src=0x%02x, rssi=%u, tim=%lu usec, "
497 "phy=0x%02x, chnl=%d\n",
498 title, fc, header->addr1[5],
499 header->addr3[5], rssi,
500 tsf_low - priv->scan_start_tsf,
501 phy_flags, channel);
502 }
503 }
504 if (print_dump)
505 iwl3945_print_hex_dump(IWL_DL_RX, data, length);
506}
507#else
508static inline void iwl3945_dbg_report_frame(struct iwl3945_priv *priv,
509 struct iwl3945_rx_packet *pkt,
510 struct ieee80211_hdr *header, int group100)
511{
512}
513#endif
514
515
241static void iwl3945_add_radiotap(struct iwl3945_priv *priv, 516static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
242 struct sk_buff *skb, 517 struct sk_buff *skb,
243 struct iwl3945_rx_frame_hdr *rx_hdr, 518 struct iwl3945_rx_frame_hdr *rx_hdr,
@@ -247,9 +522,9 @@ static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
247 * the information provided in the skb from the hardware */ 522 * the information provided in the skb from the hardware */
248 s8 signal = stats->ssi; 523 s8 signal = stats->ssi;
249 s8 noise = 0; 524 s8 noise = 0;
250 int rate = stats->rate; 525 int rate = stats->rate_idx;
251 u64 tsf = stats->mactime; 526 u64 tsf = stats->mactime;
252 __le16 phy_flags_hw = rx_hdr->phy_flags; 527 __le16 phy_flags_hw = rx_hdr->phy_flags, antenna;
253 528
254 struct iwl3945_rt_rx_hdr { 529 struct iwl3945_rt_rx_hdr {
255 struct ieee80211_radiotap_header rt_hdr; 530 struct ieee80211_radiotap_header rt_hdr;
@@ -315,15 +590,14 @@ static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
315 IEEE80211_CHAN_2GHZ), 590 IEEE80211_CHAN_2GHZ),
316 &iwl3945_rt->rt_chbitmask); 591 &iwl3945_rt->rt_chbitmask);
317 592
318 rate = iwl3945_rate_index_from_plcp(rate);
319 if (rate == -1) 593 if (rate == -1)
320 iwl3945_rt->rt_rate = 0; 594 iwl3945_rt->rt_rate = 0;
321 else 595 else
322 iwl3945_rt->rt_rate = iwl3945_rates[rate].ieee; 596 iwl3945_rt->rt_rate = iwl3945_rates[rate].ieee;
323 597
324 /* antenna number */ 598 /* antenna number */
325 iwl3945_rt->rt_antenna = 599 antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
326 le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; 600 iwl3945_rt->rt_antenna = le16_to_cpu(antenna) >> 4;
327 601
328 /* set the preamble flag if we have it */ 602 /* set the preamble flag if we have it */
329 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) 603 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
@@ -368,6 +642,10 @@ static void iwl3945_handle_data_packet(struct iwl3945_priv *priv, int is_data,
368 if (priv->add_radiotap) 642 if (priv->add_radiotap)
369 iwl3945_add_radiotap(priv, rxb->skb, rx_hdr, stats); 643 iwl3945_add_radiotap(priv, rxb->skb, rx_hdr, stats);
370 644
645#ifdef CONFIG_IWL3945_LEDS
646 if (is_data)
647 priv->rxtxpackets += len;
648#endif
371 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats); 649 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
372 rxb->skb = NULL; 650 rxb->skb = NULL;
373} 651}
@@ -377,25 +655,28 @@ static void iwl3945_handle_data_packet(struct iwl3945_priv *priv, int is_data,
377static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv, 655static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
378 struct iwl3945_rx_mem_buffer *rxb) 656 struct iwl3945_rx_mem_buffer *rxb)
379{ 657{
658 struct ieee80211_hdr *header;
659 struct ieee80211_rx_status rx_status;
380 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data; 660 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data;
381 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); 661 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
382 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); 662 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
383 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); 663 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
384 struct ieee80211_hdr *header; 664 int snr;
385 u16 rx_stats_sig_avg = le16_to_cpu(rx_stats->sig_avg); 665 u16 rx_stats_sig_avg = le16_to_cpu(rx_stats->sig_avg);
386 u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff); 666 u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff);
387 struct ieee80211_rx_status stats = {
388 .mactime = le64_to_cpu(rx_end->timestamp),
389 .freq = ieee80211chan2mhz(le16_to_cpu(rx_hdr->channel)),
390 .channel = le16_to_cpu(rx_hdr->channel),
391 .phymode = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
392 MODE_IEEE80211G : MODE_IEEE80211A,
393 .antenna = 0,
394 .rate = rx_hdr->rate,
395 .flag = 0,
396 };
397 u8 network_packet; 667 u8 network_packet;
398 int snr; 668
669 rx_status.antenna = 0;
670 rx_status.flag = 0;
671 rx_status.mactime = le64_to_cpu(rx_end->timestamp);
672 rx_status.freq =
673 ieee80211_frequency_to_channel(le16_to_cpu(rx_hdr->channel));
674 rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
675 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
676
677 rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
678 if (rx_status.band == IEEE80211_BAND_5GHZ)
679 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
399 680
400 if ((unlikely(rx_stats->phy_count > 20))) { 681 if ((unlikely(rx_stats->phy_count > 20))) {
401 IWL_DEBUG_DROP 682 IWL_DEBUG_DROP
@@ -411,12 +692,12 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
411 } 692 }
412 693
413 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) { 694 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) {
414 iwl3945_handle_data_packet(priv, 1, rxb, &stats); 695 iwl3945_handle_data_packet(priv, 1, rxb, &rx_status);
415 return; 696 return;
416 } 697 }
417 698
418 /* Convert 3945's rssi indicator to dBm */ 699 /* Convert 3945's rssi indicator to dBm */
419 stats.ssi = rx_stats->rssi - IWL_RSSI_OFFSET; 700 rx_status.ssi = rx_stats->rssi - IWL_RSSI_OFFSET;
420 701
421 /* Set default noise value to -127 */ 702 /* Set default noise value to -127 */
422 if (priv->last_rx_noise == 0) 703 if (priv->last_rx_noise == 0)
@@ -432,51 +713,47 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
432 * signal-to-noise ratio (SNR) is (sig_avg / noise_diff). 713 * signal-to-noise ratio (SNR) is (sig_avg / noise_diff).
433 * Convert linear SNR to dB SNR, then subtract that from rssi dBm 714 * Convert linear SNR to dB SNR, then subtract that from rssi dBm
434 * to obtain noise level in dBm. 715 * to obtain noise level in dBm.
435 * Calculate stats.signal (quality indicator in %) based on SNR. */ 716 * Calculate rx_status.signal (quality indicator in %) based on SNR. */
436 if (rx_stats_noise_diff) { 717 if (rx_stats_noise_diff) {
437 snr = rx_stats_sig_avg / rx_stats_noise_diff; 718 snr = rx_stats_sig_avg / rx_stats_noise_diff;
438 stats.noise = stats.ssi - iwl3945_calc_db_from_ratio(snr); 719 rx_status.noise = rx_status.ssi -
439 stats.signal = iwl3945_calc_sig_qual(stats.ssi, stats.noise); 720 iwl3945_calc_db_from_ratio(snr);
721 rx_status.signal = iwl3945_calc_sig_qual(rx_status.ssi,
722 rx_status.noise);
440 723
441 /* If noise info not available, calculate signal quality indicator (%) 724 /* If noise info not available, calculate signal quality indicator (%)
442 * using just the dBm signal level. */ 725 * using just the dBm signal level. */
443 } else { 726 } else {
444 stats.noise = priv->last_rx_noise; 727 rx_status.noise = priv->last_rx_noise;
445 stats.signal = iwl3945_calc_sig_qual(stats.ssi, 0); 728 rx_status.signal = iwl3945_calc_sig_qual(rx_status.ssi, 0);
446 } 729 }
447 730
448 731
449 IWL_DEBUG_STATS("Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n", 732 IWL_DEBUG_STATS("Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n",
450 stats.ssi, stats.noise, stats.signal, 733 rx_status.ssi, rx_status.noise, rx_status.signal,
451 rx_stats_sig_avg, rx_stats_noise_diff); 734 rx_stats_sig_avg, rx_stats_noise_diff);
452 735
453 stats.freq = ieee80211chan2mhz(stats.channel);
454
455 /* can be covered by iwl3945_report_frame() in most cases */
456/* IWL_DEBUG_RX("RX status: 0x%08X\n", rx_end->status); */
457
458 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); 736 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
459 737
460 network_packet = iwl3945_is_network_packet(priv, header); 738 network_packet = iwl3945_is_network_packet(priv, header);
461 739
462#ifdef CONFIG_IWL3945_DEBUG 740 IWL_DEBUG_STATS_LIMIT("[%c] %d RSSI:%d Signal:%u, Noise:%u, Rate:%u\n",
463 if (iwl3945_debug_level & IWL_DL_STATS && net_ratelimit()) 741 network_packet ? '*' : ' ',
464 IWL_DEBUG_STATS 742 le16_to_cpu(rx_hdr->channel),
465 ("[%c] %d RSSI: %d Signal: %u, Noise: %u, Rate: %u\n", 743 rx_status.ssi, rx_status.ssi,
466 network_packet ? '*' : ' ', 744 rx_status.ssi, rx_status.rate_idx);
467 stats.channel, stats.ssi, stats.ssi,
468 stats.ssi, stats.rate);
469 745
746#ifdef CONFIG_IWL3945_DEBUG
470 if (iwl3945_debug_level & (IWL_DL_RX)) 747 if (iwl3945_debug_level & (IWL_DL_RX))
471 /* Set "1" to report good data frames in groups of 100 */ 748 /* Set "1" to report good data frames in groups of 100 */
472 iwl3945_report_frame(priv, pkt, header, 1); 749 iwl3945_dbg_report_frame(priv, pkt, header, 1);
473#endif 750#endif
474 751
475 if (network_packet) { 752 if (network_packet) {
476 priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp); 753 priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp);
477 priv->last_tsf = le64_to_cpu(rx_end->timestamp); 754 priv->last_tsf = le64_to_cpu(rx_end->timestamp);
478 priv->last_rx_rssi = stats.ssi; 755 priv->last_rx_rssi = rx_status.ssi;
479 priv->last_rx_noise = stats.noise; 756 priv->last_rx_noise = rx_status.noise;
480 } 757 }
481 758
482 switch (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FTYPE) { 759 switch (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FTYPE) {
@@ -563,7 +840,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
563 } 840 }
564 } 841 }
565 842
566 iwl3945_handle_data_packet(priv, 0, rxb, &stats); 843 iwl3945_handle_data_packet(priv, 0, rxb, &rx_status);
567 break; 844 break;
568 845
569 case IEEE80211_FTYPE_CTL: 846 case IEEE80211_FTYPE_CTL:
@@ -580,7 +857,7 @@ static void iwl3945_rx_reply_rx(struct iwl3945_priv *priv,
580 print_mac(mac2, header->addr2), 857 print_mac(mac2, header->addr2),
581 print_mac(mac3, header->addr3)); 858 print_mac(mac3, header->addr3));
582 else 859 else
583 iwl3945_handle_data_packet(priv, 1, rxb, &stats); 860 iwl3945_handle_data_packet(priv, 1, rxb, &rx_status);
584 break; 861 break;
585 } 862 }
586 } 863 }
@@ -689,7 +966,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv,
689 struct ieee80211_hdr *hdr, int sta_id, int tx_id) 966 struct ieee80211_hdr *hdr, int sta_id, int tx_id)
690{ 967{
691 unsigned long flags; 968 unsigned long flags;
692 u16 rate_index = min(ctrl->tx_rate & 0xffff, IWL_RATE_COUNT - 1); 969 u16 rate_index = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1);
693 u16 rate_mask; 970 u16 rate_mask;
694 int rate; 971 int rate;
695 u8 rts_retry_limit; 972 u8 rts_retry_limit;
@@ -709,7 +986,7 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl3945_priv *priv,
709 priv->stations[sta_id].current_rate.rate_n_flags = rate; 986 priv->stations[sta_id].current_rate.rate_n_flags = rate;
710 987
711 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 988 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
712 (sta_id != IWL3945_BROADCAST_ID) && 989 (sta_id != priv->hw_setting.bcast_sta_id) &&
713 (sta_id != IWL_MULTICAST_ID)) 990 (sta_id != IWL_MULTICAST_ID))
714 priv->stations[IWL_STA_ID].current_rate.rate_n_flags = rate; 991 priv->stations[IWL_STA_ID].current_rate.rate_n_flags = rate;
715 992
@@ -996,19 +1273,19 @@ int iwl3945_hw_nic_init(struct iwl3945_priv *priv)
996 if (rev_id & PCI_CFG_REV_ID_BIT_RTP) 1273 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
997 IWL_DEBUG_INFO("RTP type \n"); 1274 IWL_DEBUG_INFO("RTP type \n");
998 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { 1275 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
999 IWL_DEBUG_INFO("ALM-MB type\n"); 1276 IWL_DEBUG_INFO("3945 RADIO-MB type\n");
1000 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG, 1277 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1001 CSR_HW_IF_CONFIG_REG_BIT_ALMAGOR_MB); 1278 CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
1002 } else { 1279 } else {
1003 IWL_DEBUG_INFO("ALM-MM type\n"); 1280 IWL_DEBUG_INFO("3945 RADIO-MM type\n");
1004 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG, 1281 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1005 CSR_HW_IF_CONFIG_REG_BIT_ALMAGOR_MM); 1282 CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
1006 } 1283 }
1007 1284
1008 if (EEPROM_SKU_CAP_OP_MODE_MRC == priv->eeprom.sku_cap) { 1285 if (EEPROM_SKU_CAP_OP_MODE_MRC == priv->eeprom.sku_cap) {
1009 IWL_DEBUG_INFO("SKU OP mode is mrc\n"); 1286 IWL_DEBUG_INFO("SKU OP mode is mrc\n");
1010 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG, 1287 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1011 CSR_HW_IF_CONFIG_REG_BIT_SKU_MRC); 1288 CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
1012 } else 1289 } else
1013 IWL_DEBUG_INFO("SKU OP mode is basic\n"); 1290 IWL_DEBUG_INFO("SKU OP mode is basic\n");
1014 1291
@@ -1016,24 +1293,24 @@ int iwl3945_hw_nic_init(struct iwl3945_priv *priv)
1016 IWL_DEBUG_INFO("3945ABG revision is 0x%X\n", 1293 IWL_DEBUG_INFO("3945ABG revision is 0x%X\n",
1017 priv->eeprom.board_revision); 1294 priv->eeprom.board_revision);
1018 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG, 1295 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1019 CSR_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); 1296 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
1020 } else { 1297 } else {
1021 IWL_DEBUG_INFO("3945ABG revision is 0x%X\n", 1298 IWL_DEBUG_INFO("3945ABG revision is 0x%X\n",
1022 priv->eeprom.board_revision); 1299 priv->eeprom.board_revision);
1023 iwl3945_clear_bit(priv, CSR_HW_IF_CONFIG_REG, 1300 iwl3945_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
1024 CSR_HW_IF_CONFIG_REG_BIT_BOARD_TYPE); 1301 CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
1025 } 1302 }
1026 1303
1027 if (priv->eeprom.almgor_m_version <= 1) { 1304 if (priv->eeprom.almgor_m_version <= 1) {
1028 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG, 1305 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1029 CSR_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A); 1306 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
1030 IWL_DEBUG_INFO("Card M type A version is 0x%X\n", 1307 IWL_DEBUG_INFO("Card M type A version is 0x%X\n",
1031 priv->eeprom.almgor_m_version); 1308 priv->eeprom.almgor_m_version);
1032 } else { 1309 } else {
1033 IWL_DEBUG_INFO("Card M type B version is 0x%X\n", 1310 IWL_DEBUG_INFO("Card M type B version is 0x%X\n",
1034 priv->eeprom.almgor_m_version); 1311 priv->eeprom.almgor_m_version);
1035 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG, 1312 iwl3945_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1036 CSR_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B); 1313 CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
1037 } 1314 }
1038 spin_unlock_irqrestore(&priv->lock, flags); 1315 spin_unlock_irqrestore(&priv->lock, flags);
1039 1316
@@ -1552,14 +1829,14 @@ int iwl3945_hw_reg_send_txpower(struct iwl3945_priv *priv)
1552 .channel = priv->active_rxon.channel, 1829 .channel = priv->active_rxon.channel,
1553 }; 1830 };
1554 1831
1555 txpower.band = (priv->phymode == MODE_IEEE80211A) ? 0 : 1; 1832 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
1556 ch_info = iwl3945_get_channel_info(priv, 1833 ch_info = iwl3945_get_channel_info(priv,
1557 priv->phymode, 1834 priv->band,
1558 le16_to_cpu(priv->active_rxon.channel)); 1835 le16_to_cpu(priv->active_rxon.channel));
1559 if (!ch_info) { 1836 if (!ch_info) {
1560 IWL_ERROR 1837 IWL_ERROR
1561 ("Failed to get channel info for channel %d [%d]\n", 1838 ("Failed to get channel info for channel %d [%d]\n",
1562 le16_to_cpu(priv->active_rxon.channel), priv->phymode); 1839 le16_to_cpu(priv->active_rxon.channel), priv->band);
1563 return -EINVAL; 1840 return -EINVAL;
1564 } 1841 }
1565 1842
@@ -2241,8 +2518,8 @@ int iwl3945_init_hw_rate_table(struct iwl3945_priv *priv)
2241 table[index].next_rate_index = iwl3945_rates[prev_index].table_rs_index; 2518 table[index].next_rate_index = iwl3945_rates[prev_index].table_rs_index;
2242 } 2519 }
2243 2520
2244 switch (priv->phymode) { 2521 switch (priv->band) {
2245 case MODE_IEEE80211A: 2522 case IEEE80211_BAND_5GHZ:
2246 IWL_DEBUG_RATE("Select A mode rate scale\n"); 2523 IWL_DEBUG_RATE("Select A mode rate scale\n");
2247 /* If one of the following CCK rates is used, 2524 /* If one of the following CCK rates is used,
2248 * have it fall back to the 6M OFDM rate */ 2525 * have it fall back to the 6M OFDM rate */
@@ -2257,8 +2534,8 @@ int iwl3945_init_hw_rate_table(struct iwl3945_priv *priv)
2257 iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index; 2534 iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
2258 break; 2535 break;
2259 2536
2260 case MODE_IEEE80211B: 2537 case IEEE80211_BAND_2GHZ:
2261 IWL_DEBUG_RATE("Select B mode rate scale\n"); 2538 IWL_DEBUG_RATE("Select B/G mode rate scale\n");
2262 /* If an OFDM rate is used, have it fall back to the 2539 /* If an OFDM rate is used, have it fall back to the
2263 * 1M CCK rates */ 2540 * 1M CCK rates */
2264 for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE; i++) 2541 for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE; i++)
@@ -2269,7 +2546,7 @@ int iwl3945_init_hw_rate_table(struct iwl3945_priv *priv)
2269 break; 2546 break;
2270 2547
2271 default: 2548 default:
2272 IWL_DEBUG_RATE("Select G mode rate scale\n"); 2549 WARN_ON(1);
2273 break; 2550 break;
2274 } 2551 }
2275 2552
@@ -2303,7 +2580,6 @@ int iwl3945_hw_set_hw_setting(struct iwl3945_priv *priv)
2303 return -ENOMEM; 2580 return -ENOMEM;
2304 } 2581 }
2305 2582
2306 priv->hw_setting.ac_queue_count = AC_NUM;
2307 priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE; 2583 priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE;
2308 priv->hw_setting.max_pkt_size = 2342; 2584 priv->hw_setting.max_pkt_size = 2342;
2309 priv->hw_setting.tx_cmd_len = sizeof(struct iwl3945_tx_cmd); 2585 priv->hw_setting.tx_cmd_len = sizeof(struct iwl3945_tx_cmd);
@@ -2311,6 +2587,8 @@ int iwl3945_hw_set_hw_setting(struct iwl3945_priv *priv)
2311 priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG; 2587 priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG;
2312 priv->hw_setting.max_stations = IWL3945_STATION_COUNT; 2588 priv->hw_setting.max_stations = IWL3945_STATION_COUNT;
2313 priv->hw_setting.bcast_sta_id = IWL3945_BROADCAST_ID; 2589 priv->hw_setting.bcast_sta_id = IWL3945_BROADCAST_ID;
2590
2591 priv->hw_setting.tx_ant_num = 2;
2314 return 0; 2592 return 0;
2315} 2593}
2316 2594
@@ -2323,7 +2601,7 @@ unsigned int iwl3945_hw_get_beacon_cmd(struct iwl3945_priv *priv,
2323 tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u; 2601 tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
2324 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); 2602 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2325 2603
2326 tx_beacon_cmd->tx.sta_id = IWL3945_BROADCAST_ID; 2604 tx_beacon_cmd->tx.sta_id = priv->hw_setting.bcast_sta_id;
2327 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 2605 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2328 2606
2329 frame_size = iwl3945_fill_beacon_frame(priv, 2607 frame_size = iwl3945_fill_beacon_frame(priv,
@@ -2350,6 +2628,7 @@ unsigned int iwl3945_hw_get_beacon_cmd(struct iwl3945_priv *priv,
2350 2628
2351void iwl3945_hw_rx_handler_setup(struct iwl3945_priv *priv) 2629void iwl3945_hw_rx_handler_setup(struct iwl3945_priv *priv)
2352{ 2630{
2631 priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx;
2353 priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx; 2632 priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx;
2354} 2633}
2355 2634
@@ -2364,9 +2643,25 @@ void iwl3945_hw_cancel_deferred_work(struct iwl3945_priv *priv)
2364 cancel_delayed_work(&priv->thermal_periodic); 2643 cancel_delayed_work(&priv->thermal_periodic);
2365} 2644}
2366 2645
2646static struct iwl_3945_cfg iwl3945_bg_cfg = {
2647 .name = "3945BG",
2648 .fw_name = "iwlwifi-3945" IWL3945_UCODE_API ".ucode",
2649 .sku = IWL_SKU_G,
2650};
2651
2652static struct iwl_3945_cfg iwl3945_abg_cfg = {
2653 .name = "3945ABG",
2654 .fw_name = "iwlwifi-3945" IWL3945_UCODE_API ".ucode",
2655 .sku = IWL_SKU_A|IWL_SKU_G,
2656};
2657
2367struct pci_device_id iwl3945_hw_card_ids[] = { 2658struct pci_device_id iwl3945_hw_card_ids[] = {
2368 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4222)}, 2659 {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
2369 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4227)}, 2660 {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
2661 {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
2662 {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)},
2663 {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)},
2664 {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)},
2370 {0} 2665 {0}
2371}; 2666};
2372 2667
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 1da14f9bbe0f..45c1c5533bf0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -40,9 +40,17 @@
40extern struct pci_device_id iwl3945_hw_card_ids[]; 40extern struct pci_device_id iwl3945_hw_card_ids[];
41 41
42#define DRV_NAME "iwl3945" 42#define DRV_NAME "iwl3945"
43#include "iwl-3945-hw.h" 43#include "iwl-csr.h"
44#include "iwl-prph.h" 44#include "iwl-prph.h"
45#include "iwl-3945-hw.h"
45#include "iwl-3945-debug.h" 46#include "iwl-3945-debug.h"
47#include "iwl-3945-led.h"
48
49/* Change firmware file name, using "-" and incrementing number,
50 * *only* when uCode interface or architecture changes so that it
51 * is not compatible with earlier drivers.
52 * This number will also appear in << 8 position of 1st dword of uCode file */
53#define IWL3945_UCODE_API "-1"
46 54
47/* Default noise level to report when noise measurement is not available. 55/* Default noise level to report when noise measurement is not available.
48 * This may be because we're: 56 * This may be because we're:
@@ -109,6 +117,9 @@ struct iwl3945_queue {
109 * space less than this */ 117 * space less than this */
110} __attribute__ ((packed)); 118} __attribute__ ((packed));
111 119
120int iwl3945_queue_space(const struct iwl3945_queue *q);
121int iwl3945_x2_queue_used(const struct iwl3945_queue *q, int i);
122
112#define MAX_NUM_OF_TBS (20) 123#define MAX_NUM_OF_TBS (20)
113 124
114/* One for each TFD */ 125/* One for each TFD */
@@ -195,7 +206,7 @@ struct iwl3945_channel_info {
195 206
196 u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */ 207 u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
197 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */ 208 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
198 u8 phymode; /* MODE_IEEE80211{A,B,G} */ 209 enum ieee80211_band band;
199 210
200 /* Radio/DSP gain settings for each "normal" data Tx rate. 211 /* Radio/DSP gain settings for each "normal" data Tx rate.
201 * These include, in addition to RF and DSP gain, a few fields for 212 * These include, in addition to RF and DSP gain, a few fields for
@@ -269,8 +280,8 @@ struct iwl3945_frame {
269 280
270#define SEQ_TO_QUEUE(x) ((x >> 8) & 0xbf) 281#define SEQ_TO_QUEUE(x) ((x >> 8) & 0xbf)
271#define QUEUE_TO_SEQ(x) ((x & 0xbf) << 8) 282#define QUEUE_TO_SEQ(x) ((x & 0xbf) << 8)
272#define SEQ_TO_INDEX(x) (x & 0xff) 283#define SEQ_TO_INDEX(x) ((u8)(x & 0xff))
273#define INDEX_TO_SEQ(x) (x & 0xff) 284#define INDEX_TO_SEQ(x) ((u8)(x & 0xff))
274#define SEQ_HUGE_FRAME (0x4000) 285#define SEQ_HUGE_FRAME (0x4000)
275#define SEQ_RX_FRAME __constant_cpu_to_le16(0x8000) 286#define SEQ_RX_FRAME __constant_cpu_to_le16(0x8000)
276#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) 287#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
@@ -390,23 +401,24 @@ struct iwl3945_rx_queue {
390#define MIN_B_CHANNELS 1 401#define MIN_B_CHANNELS 1
391 402
392#define STATUS_HCMD_ACTIVE 0 /* host command in progress */ 403#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
393#define STATUS_INT_ENABLED 1 404#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */
394#define STATUS_RF_KILL_HW 2 405#define STATUS_INT_ENABLED 2
395#define STATUS_RF_KILL_SW 3 406#define STATUS_RF_KILL_HW 3
396#define STATUS_INIT 4 407#define STATUS_RF_KILL_SW 4
397#define STATUS_ALIVE 5 408#define STATUS_INIT 5
398#define STATUS_READY 6 409#define STATUS_ALIVE 6
399#define STATUS_TEMPERATURE 7 410#define STATUS_READY 7
400#define STATUS_GEO_CONFIGURED 8 411#define STATUS_TEMPERATURE 8
401#define STATUS_EXIT_PENDING 9 412#define STATUS_GEO_CONFIGURED 9
402#define STATUS_IN_SUSPEND 10 413#define STATUS_EXIT_PENDING 10
403#define STATUS_STATISTICS 11 414#define STATUS_IN_SUSPEND 11
404#define STATUS_SCANNING 12 415#define STATUS_STATISTICS 12
405#define STATUS_SCAN_ABORTING 13 416#define STATUS_SCANNING 13
406#define STATUS_SCAN_HW 14 417#define STATUS_SCAN_ABORTING 14
407#define STATUS_POWER_PMI 15 418#define STATUS_SCAN_HW 15
408#define STATUS_FW_ERROR 16 419#define STATUS_POWER_PMI 16
409#define STATUS_CONF_PENDING 17 420#define STATUS_FW_ERROR 17
421#define STATUS_CONF_PENDING 18
410 422
411#define MAX_TID_COUNT 9 423#define MAX_TID_COUNT 9
412 424
@@ -431,8 +443,6 @@ union iwl3945_ht_rate_supp {
431 }; 443 };
432}; 444};
433 445
434#ifdef CONFIG_IWL3945_QOS
435
436union iwl3945_qos_capabity { 446union iwl3945_qos_capabity {
437 struct { 447 struct {
438 u8 edca_count:4; /* bit 0-3 */ 448 u8 edca_count:4; /* bit 0-3 */
@@ -460,7 +470,6 @@ struct iwl3945_qos_info {
460 union iwl3945_qos_capabity qos_cap; 470 union iwl3945_qos_capabity qos_cap;
461 struct iwl3945_qosparam_cmd def_qos_parm; 471 struct iwl3945_qosparam_cmd def_qos_parm;
462}; 472};
463#endif /*CONFIG_IWL3945_QOS */
464 473
465#define STA_PS_STATUS_WAKE 0 474#define STA_PS_STATUS_WAKE 0
466#define STA_PS_STATUS_SLEEP 1 475#define STA_PS_STATUS_SLEEP 1
@@ -511,8 +520,8 @@ struct iwl3945_ibss_seq {
511/** 520/**
512 * struct iwl3945_driver_hw_info 521 * struct iwl3945_driver_hw_info
513 * @max_txq_num: Max # Tx queues supported 522 * @max_txq_num: Max # Tx queues supported
514 * @ac_queue_count: # Tx queues for EDCA Access Categories (AC)
515 * @tx_cmd_len: Size of Tx command (but not including frame itself) 523 * @tx_cmd_len: Size of Tx command (but not including frame itself)
524 * @tx_ant_num: Number of TX antennas
516 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2) 525 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
517 * @rx_buf_size: 526 * @rx_buf_size:
518 * @max_pkt_size: 527 * @max_pkt_size:
@@ -524,8 +533,8 @@ struct iwl3945_ibss_seq {
524 */ 533 */
525struct iwl3945_driver_hw_info { 534struct iwl3945_driver_hw_info {
526 u16 max_txq_num; 535 u16 max_txq_num;
527 u16 ac_queue_count;
528 u16 tx_cmd_len; 536 u16 tx_cmd_len;
537 u16 tx_ant_num;
529 u16 max_rxq_size; 538 u16 max_rxq_size;
530 u32 rx_buf_size; 539 u32 rx_buf_size;
531 u32 max_pkt_size; 540 u32 max_pkt_size;
@@ -561,16 +570,6 @@ extern int iwl3945_is_network_packet(struct iwl3945_priv *priv,
561 struct ieee80211_hdr *header); 570 struct ieee80211_hdr *header);
562extern int iwl3945_power_init_handle(struct iwl3945_priv *priv); 571extern int iwl3945_power_init_handle(struct iwl3945_priv *priv);
563extern int iwl3945_eeprom_init(struct iwl3945_priv *priv); 572extern int iwl3945_eeprom_init(struct iwl3945_priv *priv);
564#ifdef CONFIG_IWL3945_DEBUG
565extern void iwl3945_report_frame(struct iwl3945_priv *priv,
566 struct iwl3945_rx_packet *pkt,
567 struct ieee80211_hdr *header, int group100);
568#else
569static inline void iwl3945_report_frame(struct iwl3945_priv *priv,
570 struct iwl3945_rx_packet *pkt,
571 struct ieee80211_hdr *header,
572 int group100) {}
573#endif
574extern void iwl3945_handle_data_packet_monitor(struct iwl3945_priv *priv, 573extern void iwl3945_handle_data_packet_monitor(struct iwl3945_priv *priv,
575 struct iwl3945_rx_mem_buffer *rxb, 574 struct iwl3945_rx_mem_buffer *rxb,
576 void *data, short len, 575 void *data, short len,
@@ -688,25 +687,28 @@ enum {
688 687
689#endif 688#endif
690 689
690#define IWL_MAX_NUM_QUEUES IWL39_MAX_NUM_QUEUES
691
691struct iwl3945_priv { 692struct iwl3945_priv {
692 693
693 /* ieee device used by generic ieee processing code */ 694 /* ieee device used by generic ieee processing code */
694 struct ieee80211_hw *hw; 695 struct ieee80211_hw *hw;
695 struct ieee80211_channel *ieee_channels; 696 struct ieee80211_channel *ieee_channels;
696 struct ieee80211_rate *ieee_rates; 697 struct ieee80211_rate *ieee_rates;
698 struct iwl_3945_cfg *cfg; /* device configuration */
697 699
698 /* temporary frame storage list */ 700 /* temporary frame storage list */
699 struct list_head free_frames; 701 struct list_head free_frames;
700 int frames_count; 702 int frames_count;
701 703
702 u8 phymode; 704 enum ieee80211_band band;
703 int alloc_rxb_skb; 705 int alloc_rxb_skb;
704 bool add_radiotap; 706 bool add_radiotap;
705 707
706 void (*rx_handlers[REPLY_MAX])(struct iwl3945_priv *priv, 708 void (*rx_handlers[REPLY_MAX])(struct iwl3945_priv *priv,
707 struct iwl3945_rx_mem_buffer *rxb); 709 struct iwl3945_rx_mem_buffer *rxb);
708 710
709 const struct ieee80211_hw_mode *modes; 711 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
710 712
711#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT 713#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
712 /* spectrum measurement report caching */ 714 /* spectrum measurement report caching */
@@ -779,13 +781,15 @@ struct iwl3945_priv {
779 struct iwl3945_init_alive_resp card_alive_init; 781 struct iwl3945_init_alive_resp card_alive_init;
780 struct iwl3945_alive_resp card_alive; 782 struct iwl3945_alive_resp card_alive;
781 783
782#ifdef LED 784#ifdef CONFIG_IWL3945_LEDS
783 /* LED related variables */ 785 struct iwl3945_led led[IWL_LED_TRG_MAX];
784 struct iwl3945_activity_blink activity; 786 unsigned long last_blink_time;
785 unsigned long led_packets; 787 u8 last_blink_rate;
786 int led_state; 788 u8 allow_blinking;
789 unsigned int rxtxpackets;
787#endif 790#endif
788 791
792
789 u16 active_rate; 793 u16 active_rate;
790 u16 active_rate_basic; 794 u16 active_rate_basic;
791 795
@@ -803,7 +807,6 @@ struct iwl3945_priv {
803 struct iwl3945_tx_queue txq[IWL_MAX_NUM_QUEUES]; 807 struct iwl3945_tx_queue txq[IWL_MAX_NUM_QUEUES];
804 808
805 unsigned long status; 809 unsigned long status;
806 u32 config;
807 810
808 int last_rx_rssi; /* From Rx packet statisitics */ 811 int last_rx_rssi; /* From Rx packet statisitics */
809 int last_rx_noise; /* From beacon statistics */ 812 int last_rx_noise; /* From beacon statistics */
@@ -830,10 +833,9 @@ struct iwl3945_priv {
830 struct iwl3945_station_entry stations[IWL_STATION_COUNT]; 833 struct iwl3945_station_entry stations[IWL_STATION_COUNT];
831 834
832 /* Indication if ieee80211_ops->open has been called */ 835 /* Indication if ieee80211_ops->open has been called */
833 int is_open; 836 u8 is_open;
834 837
835 u8 mac80211_registered; 838 u8 mac80211_registered;
836 int is_abg;
837 839
838 u32 notif_missed_beacons; 840 u32 notif_missed_beacons;
839 841
@@ -852,7 +854,7 @@ struct iwl3945_priv {
852 /* eeprom */ 854 /* eeprom */
853 struct iwl3945_eeprom eeprom; 855 struct iwl3945_eeprom eeprom;
854 856
855 int iw_mode; 857 enum ieee80211_if_types iw_mode;
856 858
857 struct sk_buff *ibss_beacon; 859 struct sk_buff *ibss_beacon;
858 860
@@ -869,9 +871,7 @@ struct iwl3945_priv {
869 u16 assoc_capability; 871 u16 assoc_capability;
870 u8 ps_mode; 872 u8 ps_mode;
871 873
872#ifdef CONFIG_IWL3945_QOS
873 struct iwl3945_qos_info qos_data; 874 struct iwl3945_qos_info qos_data;
874#endif /*CONFIG_IWL3945_QOS */
875 875
876 struct workqueue_struct *workqueue; 876 struct workqueue_struct *workqueue;
877 877
@@ -937,13 +937,12 @@ static inline int is_channel_radar(const struct iwl3945_channel_info *ch_info)
937 937
938static inline u8 is_channel_a_band(const struct iwl3945_channel_info *ch_info) 938static inline u8 is_channel_a_band(const struct iwl3945_channel_info *ch_info)
939{ 939{
940 return ch_info->phymode == MODE_IEEE80211A; 940 return ch_info->band == IEEE80211_BAND_5GHZ;
941} 941}
942 942
943static inline u8 is_channel_bg_band(const struct iwl3945_channel_info *ch_info) 943static inline u8 is_channel_bg_band(const struct iwl3945_channel_info *ch_info)
944{ 944{
945 return ((ch_info->phymode == MODE_IEEE80211B) || 945 return ch_info->band == IEEE80211_BAND_2GHZ;
946 (ch_info->phymode == MODE_IEEE80211G));
947} 946}
948 947
949static inline int is_channel_passive(const struct iwl3945_channel_info *ch) 948static inline int is_channel_passive(const struct iwl3945_channel_info *ch)
@@ -956,18 +955,8 @@ static inline int is_channel_ibss(const struct iwl3945_channel_info *ch)
956 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0; 955 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
957} 956}
958 957
959static inline int iwl3945_rate_index_from_plcp(int plcp)
960{
961 int i;
962
963 for (i = 0; i < IWL_RATE_COUNT; i++)
964 if (iwl3945_rates[i].plcp == plcp)
965 return i;
966 return -1;
967}
968
969extern const struct iwl3945_channel_info *iwl3945_get_channel_info( 958extern const struct iwl3945_channel_info *iwl3945_get_channel_info(
970 const struct iwl3945_priv *priv, int phymode, u16 channel); 959 const struct iwl3945_priv *priv, enum ieee80211_band band, u16 channel);
971 960
972/* Requires full declaration of iwl3945_priv before including */ 961/* Requires full declaration of iwl3945_priv before including */
973#include "iwl-3945-io.h" 962#include "iwl-3945-io.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-commands.h b/drivers/net/wireless/iwlwifi/iwl-4965-commands.h
index f3470c896d9a..3bcd107e2d71 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-commands.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -84,6 +84,9 @@ enum {
84 REPLY_REMOVE_STA = 0x19, /* not used */ 84 REPLY_REMOVE_STA = 0x19, /* not used */
85 REPLY_REMOVE_ALL_STA = 0x1a, /* not used */ 85 REPLY_REMOVE_ALL_STA = 0x1a, /* not used */
86 86
87 /* Security */
88 REPLY_WEPKEY = 0x20,
89
87 /* RX, TX, LEDs */ 90 /* RX, TX, LEDs */
88 REPLY_TX = 0x1c, 91 REPLY_TX = 0x1c,
89 REPLY_RATE_SCALE = 0x47, /* 3945 only */ 92 REPLY_RATE_SCALE = 0x47, /* 3945 only */
@@ -139,7 +142,7 @@ enum {
139 REPLY_PHY_CALIBRATION_CMD = 0xb0, 142 REPLY_PHY_CALIBRATION_CMD = 0xb0,
140 REPLY_RX_PHY_CMD = 0xc0, 143 REPLY_RX_PHY_CMD = 0xc0,
141 REPLY_RX_MPDU_CMD = 0xc1, 144 REPLY_RX_MPDU_CMD = 0xc1,
142 REPLY_4965_RX = 0xc3, 145 REPLY_RX = 0xc3,
143 REPLY_COMPRESSED_BA = 0xc5, 146 REPLY_COMPRESSED_BA = 0xc5,
144 REPLY_MAX = 0xff 147 REPLY_MAX = 0xff
145}; 148};
@@ -151,16 +154,16 @@ enum {
151 * 154 *
152 *****************************************************************************/ 155 *****************************************************************************/
153 156
154/* iwl4965_cmd_header flags value */ 157/* iwl_cmd_header flags value */
155#define IWL_CMD_FAILED_MSK 0x40 158#define IWL_CMD_FAILED_MSK 0x40
156 159
157/** 160/**
158 * struct iwl4965_cmd_header 161 * struct iwl_cmd_header
159 * 162 *
160 * This header format appears in the beginning of each command sent from the 163 * This header format appears in the beginning of each command sent from the
161 * driver, and each response/notification received from uCode. 164 * driver, and each response/notification received from uCode.
162 */ 165 */
163struct iwl4965_cmd_header { 166struct iwl_cmd_header {
164 u8 cmd; /* Command ID: REPLY_RXON, etc. */ 167 u8 cmd; /* Command ID: REPLY_RXON, etc. */
165 u8 flags; /* IWL_CMD_* */ 168 u8 flags; /* IWL_CMD_* */
166 /* 169 /*
@@ -194,7 +197,7 @@ struct iwl4965_cmd_header {
194 * 4965 rate_n_flags bit fields 197 * 4965 rate_n_flags bit fields
195 * 198 *
196 * rate_n_flags format is used in following 4965 commands: 199 * rate_n_flags format is used in following 4965 commands:
197 * REPLY_4965_RX (response only) 200 * REPLY_RX (response only)
198 * REPLY_TX (both command and response) 201 * REPLY_TX (both command and response)
199 * REPLY_TX_LINK_QUALITY_CMD 202 * REPLY_TX_LINK_QUALITY_CMD
200 * 203 *
@@ -266,11 +269,10 @@ struct iwl4965_cmd_header {
266 * 10 B active, A inactive 269 * 10 B active, A inactive
267 * 11 Both active 270 * 11 Both active
268 */ 271 */
269#define RATE_MCS_ANT_A_POS 14 272#define RATE_MCS_ANT_POS 14
270#define RATE_MCS_ANT_B_POS 15 273#define RATE_MCS_ANT_A_MSK 0x04000
271#define RATE_MCS_ANT_A_MSK 0x4000 274#define RATE_MCS_ANT_B_MSK 0x08000
272#define RATE_MCS_ANT_B_MSK 0x8000 275#define RATE_MCS_ANT_AB_MSK 0x0C000
273#define RATE_MCS_ANT_AB_MSK 0xc000
274 276
275 277
276/** 278/**
@@ -727,14 +729,21 @@ struct iwl4965_qosparam_cmd {
727#define STA_CONTROL_MODIFY_MSK 0x01 729#define STA_CONTROL_MODIFY_MSK 0x01
728 730
729/* key flags __le16*/ 731/* key flags __le16*/
730#define STA_KEY_FLG_ENCRYPT_MSK __constant_cpu_to_le16(0x7) 732#define STA_KEY_FLG_ENCRYPT_MSK __constant_cpu_to_le16(0x0007)
731#define STA_KEY_FLG_NO_ENC __constant_cpu_to_le16(0x0) 733#define STA_KEY_FLG_NO_ENC __constant_cpu_to_le16(0x0000)
732#define STA_KEY_FLG_WEP __constant_cpu_to_le16(0x1) 734#define STA_KEY_FLG_WEP __constant_cpu_to_le16(0x0001)
733#define STA_KEY_FLG_CCMP __constant_cpu_to_le16(0x2) 735#define STA_KEY_FLG_CCMP __constant_cpu_to_le16(0x0002)
734#define STA_KEY_FLG_TKIP __constant_cpu_to_le16(0x3) 736#define STA_KEY_FLG_TKIP __constant_cpu_to_le16(0x0003)
735 737
736#define STA_KEY_FLG_KEYID_POS 8 738#define STA_KEY_FLG_KEYID_POS 8
737#define STA_KEY_FLG_INVALID __constant_cpu_to_le16(0x0800) 739#define STA_KEY_FLG_INVALID __constant_cpu_to_le16(0x0800)
740/* wep key is either from global key (0) or from station info array (1) */
741#define STA_KEY_FLG_MAP_KEY_MSK __constant_cpu_to_le16(0x0008)
742
743/* wep key in STA: 5-bytes (0) or 13-bytes (1) */
744#define STA_KEY_FLG_KEY_SIZE_MSK __constant_cpu_to_le16(0x1000)
745#define STA_KEY_MULTICAST_MSK __constant_cpu_to_le16(0x4000)
746#define STA_KEY_MAX_NUM 8
738 747
739/* Flags indicate whether to modify vs. don't change various station params */ 748/* Flags indicate whether to modify vs. don't change various station params */
740#define STA_MODIFY_KEY_MASK 0x01 749#define STA_MODIFY_KEY_MASK 0x01
@@ -752,7 +761,8 @@ struct iwl4965_keyinfo {
752 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */ 761 u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
753 u8 reserved1; 762 u8 reserved1;
754 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */ 763 __le16 tkip_rx_ttak[5]; /* 10-byte unicast TKIP TTAK */
755 __le16 reserved2; 764 u8 key_offset;
765 u8 reserved2;
756 u8 key[16]; /* 16-byte unicast decryption key */ 766 u8 key[16]; /* 16-byte unicast decryption key */
757} __attribute__ ((packed)); 767} __attribute__ ((packed));
758 768
@@ -842,6 +852,30 @@ struct iwl4965_add_sta_resp {
842 u8 status; /* ADD_STA_* */ 852 u8 status; /* ADD_STA_* */
843} __attribute__ ((packed)); 853} __attribute__ ((packed));
844 854
855/*
856 * REPLY_WEP_KEY = 0x20
857 */
858struct iwl_wep_key {
859 u8 key_index;
860 u8 key_offset;
861 u8 reserved1[2];
862 u8 key_size;
863 u8 reserved2[3];
864 u8 key[16];
865} __attribute__ ((packed));
866
867struct iwl_wep_cmd {
868 u8 num_keys;
869 u8 global_key_type;
870 u8 flags;
871 u8 reserved;
872 struct iwl_wep_key key[0];
873} __attribute__ ((packed));
874
875#define WEP_KEY_WEP_TYPE 1
876#define WEP_KEYS_MAX 4
877#define WEP_INVALID_OFFSET 0xff
878#define WEP_KEY_LEN_128 13
845 879
846/****************************************************************************** 880/******************************************************************************
847 * (4) 881 * (4)
@@ -868,26 +902,35 @@ struct iwl4965_rx_frame_hdr {
868 u8 payload[0]; 902 u8 payload[0];
869} __attribute__ ((packed)); 903} __attribute__ ((packed));
870 904
871#define RX_RES_STATUS_NO_CRC32_ERROR __constant_cpu_to_le32(1 << 0) 905#define RX_RES_STATUS_NO_CRC32_ERROR __constant_cpu_to_le32(1 << 0)
872#define RX_RES_STATUS_NO_RXE_OVERFLOW __constant_cpu_to_le32(1 << 1) 906#define RX_RES_STATUS_NO_RXE_OVERFLOW __constant_cpu_to_le32(1 << 1)
907
908#define RX_RES_PHY_FLAGS_BAND_24_MSK __constant_cpu_to_le16(1 << 0)
909#define RX_RES_PHY_FLAGS_MOD_CCK_MSK __constant_cpu_to_le16(1 << 1)
910#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK __constant_cpu_to_le16(1 << 2)
911#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK __constant_cpu_to_le16(1 << 3)
912#define RX_RES_PHY_FLAGS_ANTENNA_MSK __constant_cpu_to_le16(0xf0)
913
914#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8)
915#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8)
916#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8)
917#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8)
918#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8)
919#define RX_RES_STATUS_SEC_TYPE_ERR (0x7 << 8)
873 920
874#define RX_RES_PHY_FLAGS_BAND_24_MSK __constant_cpu_to_le16(1 << 0) 921#define RX_RES_STATUS_STATION_FOUND (1<<6)
875#define RX_RES_PHY_FLAGS_MOD_CCK_MSK __constant_cpu_to_le16(1 << 1) 922#define RX_RES_STATUS_NO_STATION_INFO_MISMATCH (1<<7)
876#define RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK __constant_cpu_to_le16(1 << 2)
877#define RX_RES_PHY_FLAGS_NARROW_BAND_MSK __constant_cpu_to_le16(1 << 3)
878#define RX_RES_PHY_FLAGS_ANTENNA_MSK __constant_cpu_to_le16(0xf0)
879 923
880#define RX_RES_STATUS_SEC_TYPE_MSK (0x7 << 8) 924#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11)
881#define RX_RES_STATUS_SEC_TYPE_NONE (0x0 << 8) 925#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11)
882#define RX_RES_STATUS_SEC_TYPE_WEP (0x1 << 8) 926#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11)
883#define RX_RES_STATUS_SEC_TYPE_CCMP (0x2 << 8) 927#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11)
884#define RX_RES_STATUS_SEC_TYPE_TKIP (0x3 << 8) 928#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11)
885 929
886#define RX_RES_STATUS_DECRYPT_TYPE_MSK (0x3 << 11) 930#define RX_MPDU_RES_STATUS_ICV_OK (0x20)
887#define RX_RES_STATUS_NOT_DECRYPT (0x0 << 11) 931#define RX_MPDU_RES_STATUS_MIC_OK (0x40)
888#define RX_RES_STATUS_DECRYPT_OK (0x3 << 11) 932#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
889#define RX_RES_STATUS_BAD_ICV_MIC (0x1 << 11) 933#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
890#define RX_RES_STATUS_BAD_KEY_TTAK (0x2 << 11)
891 934
892struct iwl4965_rx_frame_end { 935struct iwl4965_rx_frame_end {
893 __le32 status; 936 __le32 status;
@@ -922,7 +965,7 @@ struct iwl4965_rx_non_cfg_phy {
922} __attribute__ ((packed)); 965} __attribute__ ((packed));
923 966
924/* 967/*
925 * REPLY_4965_RX = 0xc3 (response only, not a command) 968 * REPLY_RX = 0xc3 (response only, not a command)
926 * Used only for legacy (non 11n) frames. 969 * Used only for legacy (non 11n) frames.
927 */ 970 */
928#define RX_RES_PHY_CNT 14 971#define RX_RES_PHY_CNT 14
@@ -1038,6 +1081,10 @@ struct iwl4965_rx_mpdu_res_start {
1038 * MAC header) to DWORD boundary. */ 1081 * MAC header) to DWORD boundary. */
1039#define TX_CMD_FLG_MH_PAD_MSK __constant_cpu_to_le32(1 << 20) 1082#define TX_CMD_FLG_MH_PAD_MSK __constant_cpu_to_le32(1 << 20)
1040 1083
1084/* accelerate aggregation support
1085 * 0 - no CCMP encryption; 1 - CCMP encryption */
1086#define TX_CMD_FLG_AGG_CCMP_MSK __constant_cpu_to_le32(1 << 22)
1087
1041/* HCCA-AP - disable duration overwriting. */ 1088/* HCCA-AP - disable duration overwriting. */
1042#define TX_CMD_FLG_DUR_MSK __constant_cpu_to_le32(1 << 25) 1089#define TX_CMD_FLG_DUR_MSK __constant_cpu_to_le32(1 << 25)
1043 1090
@@ -1300,6 +1347,25 @@ struct iwl4965_tx_resp {
1300 __le32 status; /* TX status (for aggregation status of 1st frame) */ 1347 __le32 status; /* TX status (for aggregation status of 1st frame) */
1301} __attribute__ ((packed)); 1348} __attribute__ ((packed));
1302 1349
1350struct agg_tx_status {
1351 __le16 status;
1352 __le16 sequence;
1353} __attribute__ ((packed));
1354
1355struct iwl4965_tx_resp_agg {
1356 u8 frame_count; /* 1 no aggregation, >1 aggregation */
1357 u8 reserved1;
1358 u8 failure_rts;
1359 u8 failure_frame;
1360 __le32 rate_n_flags;
1361 __le16 wireless_media_time;
1362 __le16 reserved3;
1363 __le32 pa_power1;
1364 __le32 pa_power2;
1365 struct agg_tx_status status; /* TX status (for aggregation status */
1366 /* of 1st frame) */
1367} __attribute__ ((packed));
1368
1303/* 1369/*
1304 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command) 1370 * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
1305 * 1371 *
@@ -1313,9 +1379,8 @@ struct iwl4965_compressed_ba_resp {
1313 /* Index of recipient (BA-sending) station in uCode's station table */ 1379 /* Index of recipient (BA-sending) station in uCode's station table */
1314 u8 sta_id; 1380 u8 sta_id;
1315 u8 tid; 1381 u8 tid;
1316 __le16 ba_seq_ctl; 1382 __le16 seq_ctl;
1317 __le32 ba_bitmap0; 1383 __le64 bitmap;
1318 __le32 ba_bitmap1;
1319 __le16 scd_flow; 1384 __le16 scd_flow;
1320 __le16 scd_ssn; 1385 __le16 scd_ssn;
1321} __attribute__ ((packed)); 1386} __attribute__ ((packed));
@@ -1348,11 +1413,11 @@ struct iwl4965_txpowertable_cmd {
1348 1413
1349 1414
1350/** 1415/**
1351 * struct iwl4965_link_qual_general_params 1416 * struct iwl_link_qual_general_params
1352 * 1417 *
1353 * Used in REPLY_TX_LINK_QUALITY_CMD 1418 * Used in REPLY_TX_LINK_QUALITY_CMD
1354 */ 1419 */
1355struct iwl4965_link_qual_general_params { 1420struct iwl_link_qual_general_params {
1356 u8 flags; 1421 u8 flags;
1357 1422
1358 /* No entries at or above this (driver chosen) index contain MIMO */ 1423 /* No entries at or above this (driver chosen) index contain MIMO */
@@ -1379,11 +1444,11 @@ struct iwl4965_link_qual_general_params {
1379} __attribute__ ((packed)); 1444} __attribute__ ((packed));
1380 1445
1381/** 1446/**
1382 * struct iwl4965_link_qual_agg_params 1447 * struct iwl_link_qual_agg_params
1383 * 1448 *
1384 * Used in REPLY_TX_LINK_QUALITY_CMD 1449 * Used in REPLY_TX_LINK_QUALITY_CMD
1385 */ 1450 */
1386struct iwl4965_link_qual_agg_params { 1451struct iwl_link_qual_agg_params {
1387 1452
1388 /* Maximum number of uSec in aggregation. 1453 /* Maximum number of uSec in aggregation.
1389 * Driver should set this to 4000 (4 milliseconds). */ 1454 * Driver should set this to 4000 (4 milliseconds). */
@@ -1593,14 +1658,14 @@ struct iwl4965_link_qual_agg_params {
1593 * legacy), and then repeat the search process. 1658 * legacy), and then repeat the search process.
1594 * 1659 *
1595 */ 1660 */
1596struct iwl4965_link_quality_cmd { 1661struct iwl_link_quality_cmd {
1597 1662
1598 /* Index of destination/recipient station in uCode's station table */ 1663 /* Index of destination/recipient station in uCode's station table */
1599 u8 sta_id; 1664 u8 sta_id;
1600 u8 reserved1; 1665 u8 reserved1;
1601 __le16 control; /* not used */ 1666 __le16 control; /* not used */
1602 struct iwl4965_link_qual_general_params general_params; 1667 struct iwl_link_qual_general_params general_params;
1603 struct iwl4965_link_qual_agg_params agg_params; 1668 struct iwl_link_qual_agg_params agg_params;
1604 1669
1605 /* 1670 /*
1606 * Rate info; when using rate-scaling, Tx command's initial_rate_index 1671 * Rate info; when using rate-scaling, Tx command's initial_rate_index
@@ -2625,7 +2690,7 @@ struct iwl4965_led_cmd {
2625 2690
2626struct iwl4965_rx_packet { 2691struct iwl4965_rx_packet {
2627 __le32 len; 2692 __le32 len;
2628 struct iwl4965_cmd_header hdr; 2693 struct iwl_cmd_header hdr;
2629 union { 2694 union {
2630 struct iwl4965_alive_resp alive_frame; 2695 struct iwl4965_alive_resp alive_frame;
2631 struct iwl4965_rx_frame rx_frame; 2696 struct iwl4965_rx_frame rx_frame;
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index ffe1e9dfdec7..1a66b508a8ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -92,316 +92,6 @@
92/* RSSI to dBm */ 92/* RSSI to dBm */
93#define IWL_RSSI_OFFSET 44 93#define IWL_RSSI_OFFSET 44
94 94
95/*
96 * EEPROM related constants, enums, and structures.
97 */
98
99/*
100 * EEPROM access time values:
101 *
102 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG,
103 * then clearing (with subsequent read/modify/write) CSR_EEPROM_REG bit
104 * CSR_EEPROM_REG_BIT_CMD (0x2).
105 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
106 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
107 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
108 */
109#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
110#define IWL_EEPROM_ACCESS_DELAY 10 /* uSec */
111
112/*
113 * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
114 *
115 * IBSS and/or AP operation is allowed *only* on those channels with
116 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
117 * RADAR detection is not supported by the 4965 driver, but is a
118 * requirement for establishing a new network for legal operation on channels
119 * requiring RADAR detection or restricting ACTIVE scanning.
120 *
121 * NOTE: "WIDE" flag does not indicate anything about "FAT" 40 MHz channels.
122 * It only indicates that 20 MHz channel use is supported; FAT channel
123 * usage is indicated by a separate set of regulatory flags for each
124 * FAT channel pair.
125 *
126 * NOTE: Using a channel inappropriately will result in a uCode error!
127 */
128enum {
129 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
130 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
131 /* Bit 2 Reserved */
132 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
133 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
134 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
135 EEPROM_CHANNEL_NARROW = (1 << 6), /* 10 MHz channel (not used) */
136 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
137};
138
139/* SKU Capabilities */
140#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
141#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
142
143/* *regulatory* channel data format in eeprom, one for each channel.
144 * There are separate entries for FAT (40 MHz) vs. normal (20 MHz) channels. */
145struct iwl4965_eeprom_channel {
146 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
147 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
148} __attribute__ ((packed));
149
150/* 4965 has two radio transmitters (and 3 radio receivers) */
151#define EEPROM_TX_POWER_TX_CHAINS (2)
152
153/* 4965 has room for up to 8 sets of txpower calibration data */
154#define EEPROM_TX_POWER_BANDS (8)
155
156/* 4965 factory calibration measures txpower gain settings for
157 * each of 3 target output levels */
158#define EEPROM_TX_POWER_MEASUREMENTS (3)
159
160/* 4965 driver does not work with txpower calibration version < 5.
161 * Look for this in calib_version member of struct iwl4965_eeprom. */
162#define EEPROM_TX_POWER_VERSION_NEW (5)
163
164
165/*
166 * 4965 factory calibration data for one txpower level, on one channel,
167 * measured on one of the 2 tx chains (radio transmitter and associated
168 * antenna). EEPROM contains:
169 *
170 * 1) Temperature (degrees Celsius) of device when measurement was made.
171 *
172 * 2) Gain table index used to achieve the target measurement power.
173 * This refers to the "well-known" gain tables (see iwl-4965-hw.h).
174 *
175 * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
176 *
177 * 4) RF power amplifier detector level measurement (not used).
178 */
179struct iwl4965_eeprom_calib_measure {
180 u8 temperature; /* Device temperature (Celsius) */
181 u8 gain_idx; /* Index into gain table */
182 u8 actual_pow; /* Measured RF output power, half-dBm */
183 s8 pa_det; /* Power amp detector level (not used) */
184} __attribute__ ((packed));
185
186
187/*
188 * 4965 measurement set for one channel. EEPROM contains:
189 *
190 * 1) Channel number measured
191 *
192 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
193 * (a.k.a. "tx chains") (6 measurements altogether)
194 */
195struct iwl4965_eeprom_calib_ch_info {
196 u8 ch_num;
197 struct iwl4965_eeprom_calib_measure measurements[EEPROM_TX_POWER_TX_CHAINS]
198 [EEPROM_TX_POWER_MEASUREMENTS];
199} __attribute__ ((packed));
200
201/*
202 * 4965 txpower subband info.
203 *
204 * For each frequency subband, EEPROM contains the following:
205 *
206 * 1) First and last channels within range of the subband. "0" values
207 * indicate that this sample set is not being used.
208 *
209 * 2) Sample measurement sets for 2 channels close to the range endpoints.
210 */
211struct iwl4965_eeprom_calib_subband_info {
212 u8 ch_from; /* channel number of lowest channel in subband */
213 u8 ch_to; /* channel number of highest channel in subband */
214 struct iwl4965_eeprom_calib_ch_info ch1;
215 struct iwl4965_eeprom_calib_ch_info ch2;
216} __attribute__ ((packed));
217
218
219/*
220 * 4965 txpower calibration info. EEPROM contains:
221 *
222 * 1) Factory-measured saturation power levels (maximum levels at which
223 * tx power amplifier can output a signal without too much distortion).
224 * There is one level for 2.4 GHz band and one for 5 GHz band. These
225 * values apply to all channels within each of the bands.
226 *
227 * 2) Factory-measured power supply voltage level. This is assumed to be
228 * constant (i.e. same value applies to all channels/bands) while the
229 * factory measurements are being made.
230 *
231 * 3) Up to 8 sets of factory-measured txpower calibration values.
232 * These are for different frequency ranges, since txpower gain
233 * characteristics of the analog radio circuitry vary with frequency.
234 *
235 * Not all sets need to be filled with data;
236 * struct iwl4965_eeprom_calib_subband_info contains range of channels
237 * (0 if unused) for each set of data.
238 */
239struct iwl4965_eeprom_calib_info {
240 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
241 u8 saturation_power52; /* half-dBm */
242 s16 voltage; /* signed */
243 struct iwl4965_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS];
244} __attribute__ ((packed));
245
246
247/*
248 * 4965 EEPROM map
249 */
250struct iwl4965_eeprom {
251 u8 reserved0[16];
252#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
253 u16 device_id; /* abs.ofs: 16 */
254 u8 reserved1[2];
255#define EEPROM_PMC (2*0x0A) /* 2 bytes */
256 u16 pmc; /* abs.ofs: 20 */
257 u8 reserved2[20];
258#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
259 u8 mac_address[6]; /* abs.ofs: 42 */
260 u8 reserved3[58];
261#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
262 u16 board_revision; /* abs.ofs: 106 */
263 u8 reserved4[11];
264#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
265 u8 board_pba_number[9]; /* abs.ofs: 119 */
266 u8 reserved5[8];
267#define EEPROM_VERSION (2*0x44) /* 2 bytes */
268 u16 version; /* abs.ofs: 136 */
269#define EEPROM_SKU_CAP (2*0x45) /* 1 bytes */
270 u8 sku_cap; /* abs.ofs: 138 */
271#define EEPROM_LEDS_MODE (2*0x45+1) /* 1 bytes */
272 u8 leds_mode; /* abs.ofs: 139 */
273#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
274 u16 oem_mode;
275#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
276 u16 wowlan_mode; /* abs.ofs: 142 */
277#define EEPROM_LEDS_TIME_INTERVAL (2*0x48) /* 2 bytes */
278 u16 leds_time_interval; /* abs.ofs: 144 */
279#define EEPROM_LEDS_OFF_TIME (2*0x49) /* 1 bytes */
280 u8 leds_off_time; /* abs.ofs: 146 */
281#define EEPROM_LEDS_ON_TIME (2*0x49+1) /* 1 bytes */
282 u8 leds_on_time; /* abs.ofs: 147 */
283#define EEPROM_ALMGOR_M_VERSION (2*0x4A) /* 1 bytes */
284 u8 almgor_m_version; /* abs.ofs: 148 */
285#define EEPROM_ANTENNA_SWITCH_TYPE (2*0x4A+1) /* 1 bytes */
286 u8 antenna_switch_type; /* abs.ofs: 149 */
287 u8 reserved6[8];
288#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
289 u16 board_revision_4965; /* abs.ofs: 158 */
290 u8 reserved7[13];
291#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
292 u8 board_pba_number_4965[9]; /* abs.ofs: 173 */
293 u8 reserved8[10];
294#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
295 u8 sku_id[4]; /* abs.ofs: 192 */
296
297/*
298 * Per-channel regulatory data.
299 *
300 * Each channel that *might* be supported by 3945 or 4965 has a fixed location
301 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
302 * txpower (MSB).
303 *
304 * Entries immediately below are for 20 MHz channel width. FAT (40 MHz)
305 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
306 *
307 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
308 */
309#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
310 u16 band_1_count; /* abs.ofs: 196 */
311#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
312 struct iwl4965_eeprom_channel band_1_channels[14]; /* abs.ofs: 196 */
313
314/*
315 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
316 * 5.0 GHz channels 7, 8, 11, 12, 16
317 * (4915-5080MHz) (none of these is ever supported)
318 */
319#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
320 u16 band_2_count; /* abs.ofs: 226 */
321#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
322 struct iwl4965_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
323
324/*
325 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
326 * (5170-5320MHz)
327 */
328#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
329 u16 band_3_count; /* abs.ofs: 254 */
330#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
331 struct iwl4965_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
332
333/*
334 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
335 * (5500-5700MHz)
336 */
337#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
338 u16 band_4_count; /* abs.ofs: 280 */
339#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
340 struct iwl4965_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
341
342/*
343 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
344 * (5725-5825MHz)
345 */
346#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
347 u16 band_5_count; /* abs.ofs: 304 */
348#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
349 struct iwl4965_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
350
351 u8 reserved10[2];
352
353
354/*
355 * 2.4 GHz FAT channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
356 *
357 * The channel listed is the center of the lower 20 MHz half of the channel.
358 * The overall center frequency is actually 2 channels (10 MHz) above that,
359 * and the upper half of each FAT channel is centered 4 channels (20 MHz) away
360 * from the lower half; e.g. the upper half of FAT channel 1 is channel 5,
361 * and the overall FAT channel width centers on channel 3.
362 *
363 * NOTE: The RXON command uses 20 MHz channel numbers to specify the
364 * control channel to which to tune. RXON also specifies whether the
365 * control channel is the upper or lower half of a FAT channel.
366 *
367 * NOTE: 4965 does not support FAT channels on 2.4 GHz.
368 */
369#define EEPROM_REGULATORY_BAND_24_FAT_CHANNELS (2*0xA0) /* 14 bytes */
370 struct iwl4965_eeprom_channel band_24_channels[7]; /* abs.ofs: 320 */
371 u8 reserved11[2];
372
373/*
374 * 5.2 GHz FAT channels 36 (40), 44 (48), 52 (56), 60 (64),
375 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
376 */
377#define EEPROM_REGULATORY_BAND_52_FAT_CHANNELS (2*0xA8) /* 22 bytes */
378 struct iwl4965_eeprom_channel band_52_channels[11]; /* abs.ofs: 336 */
379 u8 reserved12[6];
380
381/*
382 * 4965 driver requires txpower calibration format version 5 or greater.
383 * Driver does not work with txpower calibration version < 5.
384 * This value is simply a 16-bit number, no major/minor versions here.
385 */
386#define EEPROM_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
387 u16 calib_version; /* abs.ofs: 364 */
388 u8 reserved13[2];
389 u8 reserved14[96]; /* abs.ofs: 368 */
390
391/*
392 * 4965 Txpower calibration data.
393 */
394#define EEPROM_IWL_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
395 struct iwl4965_eeprom_calib_info calib_info; /* abs.ofs: 464 */
396
397 u8 reserved16[140]; /* fill out to full 1024 byte block */
398
399
400} __attribute__ ((packed));
401
402#define IWL_EEPROM_IMAGE_SIZE 1024
403
404/* End of EEPROM */
405 95
406#include "iwl-4965-commands.h" 96#include "iwl-4965-commands.h"
407 97
@@ -410,182 +100,6 @@ struct iwl4965_eeprom {
410#define PCI_REG_WUM8 0x0E8 100#define PCI_REG_WUM8 0x0E8
411#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000) 101#define PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT (0x80000000)
412 102
413/*=== CSR (control and status registers) ===*/
414#define CSR_BASE (0x000)
415
416#define CSR_SW_VER (CSR_BASE+0x000)
417#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
418#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
419#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
420#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
421#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
422#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
423#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
424#define CSR_GP_CNTRL (CSR_BASE+0x024)
425
426/*
427 * Hardware revision info
428 * Bit fields:
429 * 31-8: Reserved
430 * 7-4: Type of device: 0x0 = 4965, 0xd = 3945
431 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
432 * 1-0: "Dash" value, as in A-1, etc.
433 *
434 * NOTE: Revision step affects calculation of CCK txpower for 4965.
435 */
436#define CSR_HW_REV (CSR_BASE+0x028)
437
438/* EEPROM reads */
439#define CSR_EEPROM_REG (CSR_BASE+0x02c)
440#define CSR_EEPROM_GP (CSR_BASE+0x030)
441#define CSR_GP_UCODE (CSR_BASE+0x044)
442#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
443#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058)
444#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
445#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
446#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
447
448/*
449 * Indicates hardware rev, to determine CCK backoff for txpower calculation.
450 * Bit fields:
451 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
452 */
453#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
454
455/* Hardware interface configuration bits */
456#define CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R (0x00000010)
457#define CSR_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
458#define CSR_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
459#define CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
460#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
461
462/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
463 * acknowledged (reset) by host writing "1" to flagged bits. */
464#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
465#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
466#define CSR_INT_BIT_DNLD (1 << 28) /* uCode Download */
467#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
468#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
469#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
470#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
471#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
472#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
473#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
474#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
475
476#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
477 CSR_INT_BIT_HW_ERR | \
478 CSR_INT_BIT_FH_TX | \
479 CSR_INT_BIT_SW_ERR | \
480 CSR_INT_BIT_RF_KILL | \
481 CSR_INT_BIT_SW_RX | \
482 CSR_INT_BIT_WAKEUP | \
483 CSR_INT_BIT_ALIVE)
484
485/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
486#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
487#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
488#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
489#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
490#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
491#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
492
493#define CSR_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
494 CSR_FH_INT_BIT_RX_CHNL1 | \
495 CSR_FH_INT_BIT_RX_CHNL0)
496
497#define CSR_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \
498 CSR_FH_INT_BIT_TX_CHNL0)
499
500
501/* RESET */
502#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
503#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
504#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
505#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
506#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
507
508/* GP (general purpose) CONTROL */
509#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
510#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
511#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
512#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
513
514#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
515
516#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
517#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
518#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
519
520
521/* EEPROM REG */
522#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
523#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
524
525/* EEPROM GP */
526#define CSR_EEPROM_GP_VALID_MSK (0x00000006)
527#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000)
528#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
529
530/* UCODE DRV GP */
531#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
532#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
533#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
534#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
535
536/* GPIO */
537#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
538#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
539#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC CSR_GPIO_IN_BIT_AUX_POWER
540
541/* GI Chicken Bits */
542#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
543#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
544
545/*=== HBUS (Host-side Bus) ===*/
546#define HBUS_BASE (0x400)
547
548/*
549 * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
550 * structures, error log, event log, verifying uCode load).
551 * First write to address register, then read from or write to data register
552 * to complete the job. Once the address register is set up, accesses to
553 * data registers auto-increment the address by one dword.
554 * Bit usage for address registers (read or write):
555 * 0-31: memory address within device
556 */
557#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c)
558#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010)
559#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018)
560#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c)
561
562/*
563 * Registers for accessing device's internal peripheral registers
564 * (e.g. SCD, BSM, etc.). First write to address register,
565 * then read from or write to data register to complete the job.
566 * Bit usage for address registers (read or write):
567 * 0-15: register address (offset) within device
568 * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword)
569 */
570#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044)
571#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048)
572#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c)
573#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
574
575/*
576 * Per-Tx-queue write pointer (index, really!) (3945 and 4965).
577 * Driver sets this to indicate index to next TFD that driver will fill
578 * (1 past latest filled).
579 * Bit usage:
580 * 0-7: queue write index (0-255)
581 * 11-8: queue selector (0-15)
582 */
583#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
584
585#define HBUS_TARG_MBX_C (HBUS_BASE+0x030)
586
587#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004)
588
589#define TFD_QUEUE_SIZE_MAX (256) 103#define TFD_QUEUE_SIZE_MAX (256)
590 104
591#define IWL_NUM_SCAN_RATES (2) 105#define IWL_NUM_SCAN_RATES (2)
@@ -599,9 +113,6 @@ struct iwl4965_eeprom {
599#define TFD_TX_CMD_SLOTS 256 113#define TFD_TX_CMD_SLOTS 256
600#define TFD_CMD_SLOTS 32 114#define TFD_CMD_SLOTS 32
601 115
602#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl4965_cmd) - \
603 sizeof(struct iwl4965_cmd_meta))
604
605/* 116/*
606 * RX related structures and functions 117 * RX related structures and functions
607 */ 118 */
@@ -615,16 +126,18 @@ struct iwl4965_eeprom {
615/* Sizes and addresses for instruction and data memory (SRAM) in 126/* Sizes and addresses for instruction and data memory (SRAM) in
616 * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ 127 * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
617#define RTC_INST_LOWER_BOUND (0x000000) 128#define RTC_INST_LOWER_BOUND (0x000000)
618#define KDR_RTC_INST_UPPER_BOUND (0x018000) 129#define IWL49_RTC_INST_UPPER_BOUND (0x018000)
619 130
620#define RTC_DATA_LOWER_BOUND (0x800000) 131#define RTC_DATA_LOWER_BOUND (0x800000)
621#define KDR_RTC_DATA_UPPER_BOUND (0x80A000) 132#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000)
622 133
623#define KDR_RTC_INST_SIZE (KDR_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND) 134#define IWL49_RTC_INST_SIZE \
624#define KDR_RTC_DATA_SIZE (KDR_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND) 135 (IWL49_RTC_INST_UPPER_BOUND - RTC_INST_LOWER_BOUND)
136#define IWL49_RTC_DATA_SIZE \
137 (IWL49_RTC_DATA_UPPER_BOUND - RTC_DATA_LOWER_BOUND)
625 138
626#define IWL_MAX_INST_SIZE KDR_RTC_INST_SIZE 139#define IWL_MAX_INST_SIZE IWL49_RTC_INST_SIZE
627#define IWL_MAX_DATA_SIZE KDR_RTC_DATA_SIZE 140#define IWL_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
628 141
629/* Size of uCode instruction memory in bootstrap state machine */ 142/* Size of uCode instruction memory in bootstrap state machine */
630#define IWL_MAX_BSM_SIZE BSM_SRAM_SIZE 143#define IWL_MAX_BSM_SIZE BSM_SRAM_SIZE
@@ -632,7 +145,7 @@ struct iwl4965_eeprom {
632static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr) 145static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
633{ 146{
634 return (addr >= RTC_DATA_LOWER_BOUND) && 147 return (addr >= RTC_DATA_LOWER_BOUND) &&
635 (addr < KDR_RTC_DATA_UPPER_BOUND); 148 (addr < IWL49_RTC_DATA_UPPER_BOUND);
636} 149}
637 150
638/********************* START TEMPERATURE *************************************/ 151/********************* START TEMPERATURE *************************************/
@@ -1872,10 +1385,10 @@ static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u16 flags)
1872 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array 1385 * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
1873 * in DRAM containing 256 Transmit Frame Descriptors (TFDs). 1386 * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
1874 */ 1387 */
1875#define IWL4965_MAX_WIN_SIZE 64 1388#define IWL4965_MAX_WIN_SIZE 64
1876#define IWL4965_QUEUE_SIZE 256 1389#define IWL4965_QUEUE_SIZE 256
1877#define IWL4965_NUM_FIFOS 7 1390#define IWL4965_NUM_FIFOS 7
1878#define IWL_MAX_NUM_QUEUES 16 1391#define IWL4965_MAX_NUM_QUEUES 16
1879 1392
1880 1393
1881/** 1394/**
@@ -2040,30 +1553,30 @@ struct iwl4965_sched_queue_byte_cnt_tbl {
2040 */ 1553 */
2041struct iwl4965_shared { 1554struct iwl4965_shared {
2042 struct iwl4965_sched_queue_byte_cnt_tbl 1555 struct iwl4965_sched_queue_byte_cnt_tbl
2043 queues_byte_cnt_tbls[IWL_MAX_NUM_QUEUES]; 1556 queues_byte_cnt_tbls[IWL4965_MAX_NUM_QUEUES];
2044 __le32 val0; 1557 __le32 rb_closed;
2045 1558
2046 /* __le32 rb_closed_stts_rb_num:12; */ 1559 /* __le32 rb_closed_stts_rb_num:12; */
2047#define IWL_rb_closed_stts_rb_num_POS 0 1560#define IWL_rb_closed_stts_rb_num_POS 0
2048#define IWL_rb_closed_stts_rb_num_LEN 12 1561#define IWL_rb_closed_stts_rb_num_LEN 12
2049#define IWL_rb_closed_stts_rb_num_SYM val0 1562#define IWL_rb_closed_stts_rb_num_SYM rb_closed
2050 /* __le32 rsrv1:4; */ 1563 /* __le32 rsrv1:4; */
2051 /* __le32 rb_closed_stts_rx_frame_num:12; */ 1564 /* __le32 rb_closed_stts_rx_frame_num:12; */
2052#define IWL_rb_closed_stts_rx_frame_num_POS 16 1565#define IWL_rb_closed_stts_rx_frame_num_POS 16
2053#define IWL_rb_closed_stts_rx_frame_num_LEN 12 1566#define IWL_rb_closed_stts_rx_frame_num_LEN 12
2054#define IWL_rb_closed_stts_rx_frame_num_SYM val0 1567#define IWL_rb_closed_stts_rx_frame_num_SYM rb_closed
2055 /* __le32 rsrv2:4; */ 1568 /* __le32 rsrv2:4; */
2056 1569
2057 __le32 val1; 1570 __le32 frm_finished;
2058 /* __le32 frame_finished_stts_rb_num:12; */ 1571 /* __le32 frame_finished_stts_rb_num:12; */
2059#define IWL_frame_finished_stts_rb_num_POS 0 1572#define IWL_frame_finished_stts_rb_num_POS 0
2060#define IWL_frame_finished_stts_rb_num_LEN 12 1573#define IWL_frame_finished_stts_rb_num_LEN 12
2061#define IWL_frame_finished_stts_rb_num_SYM val1 1574#define IWL_frame_finished_stts_rb_num_SYM frm_finished
2062 /* __le32 rsrv3:4; */ 1575 /* __le32 rsrv3:4; */
2063 /* __le32 frame_finished_stts_rx_frame_num:12; */ 1576 /* __le32 frame_finished_stts_rx_frame_num:12; */
2064#define IWL_frame_finished_stts_rx_frame_num_POS 16 1577#define IWL_frame_finished_stts_rx_frame_num_POS 16
2065#define IWL_frame_finished_stts_rx_frame_num_LEN 12 1578#define IWL_frame_finished_stts_rx_frame_num_LEN 12
2066#define IWL_frame_finished_stts_rx_frame_num_SYM val1 1579#define IWL_frame_finished_stts_rx_frame_num_SYM frm_finished
2067 /* __le32 rsrv4:4; */ 1580 /* __le32 rsrv4:4; */
2068 1581
2069 __le32 padding1; /* so that allocation will be aligned to 16B */ 1582 __le32 padding1; /* so that allocation will be aligned to 16B */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-io.h b/drivers/net/wireless/iwlwifi/iwl-4965-io.h
deleted file mode 100644
index 34a0b57eea0c..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-4965-io.h
+++ /dev/null
@@ -1,431 +0,0 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl4965_io_h__
30#define __iwl4965_io_h__
31
32#include <linux/io.h>
33
34#include "iwl-4965-debug.h"
35
36/*
37 * IO, register, and NIC memory access functions
38 *
39 * NOTE on naming convention and macro usage for these
40 *
41 * A single _ prefix before a an access function means that no state
42 * check or debug information is printed when that function is called.
43 *
44 * A double __ prefix before an access function means that state is checked
45 * and the current line number is printed in addition to any other debug output.
46 *
47 * The non-prefixed name is the #define that maps the caller into a
48 * #define that provides the caller's __LINE__ to the double prefix version.
49 *
50 * If you wish to call the function without any debug or state checking,
51 * you should use the single _ prefix version (as is used by dependent IO
52 * routines, for example _iwl4965_read_direct32 calls the non-check version of
53 * _iwl4965_read32.)
54 *
55 * These declarations are *extremely* useful in quickly isolating code deltas
56 * which result in misconfiguring of the hardware I/O. In combination with
57 * git-bisect and the IO debug level you can quickly determine the specific
58 * commit which breaks the IO sequence to the hardware.
59 *
60 */
61
62#define _iwl4965_write32(iwl, ofs, val) writel((val), (iwl)->hw_base + (ofs))
63#ifdef CONFIG_IWL4965_DEBUG
64static inline void __iwl4965_write32(const char *f, u32 l, struct iwl4965_priv *iwl,
65 u32 ofs, u32 val)
66{
67 IWL_DEBUG_IO("write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
68 _iwl4965_write32(iwl, ofs, val);
69}
70#define iwl4965_write32(iwl, ofs, val) \
71 __iwl4965_write32(__FILE__, __LINE__, iwl, ofs, val)
72#else
73#define iwl4965_write32(iwl, ofs, val) _iwl4965_write32(iwl, ofs, val)
74#endif
75
76#define _iwl4965_read32(iwl, ofs) readl((iwl)->hw_base + (ofs))
77#ifdef CONFIG_IWL4965_DEBUG
78static inline u32 __iwl4965_read32(char *f, u32 l, struct iwl4965_priv *iwl, u32 ofs)
79{
80 IWL_DEBUG_IO("read_direct32(0x%08X) - %s %d\n", ofs, f, l);
81 return _iwl4965_read32(iwl, ofs);
82}
83#define iwl4965_read32(iwl, ofs) __iwl4965_read32(__FILE__, __LINE__, iwl, ofs)
84#else
85#define iwl4965_read32(p, o) _iwl4965_read32(p, o)
86#endif
87
88static inline int _iwl4965_poll_bit(struct iwl4965_priv *priv, u32 addr,
89 u32 bits, u32 mask, int timeout)
90{
91 int i = 0;
92
93 do {
94 if ((_iwl4965_read32(priv, addr) & mask) == (bits & mask))
95 return i;
96 mdelay(10);
97 i += 10;
98 } while (i < timeout);
99
100 return -ETIMEDOUT;
101}
102#ifdef CONFIG_IWL4965_DEBUG
103static inline int __iwl4965_poll_bit(const char *f, u32 l,
104 struct iwl4965_priv *priv, u32 addr,
105 u32 bits, u32 mask, int timeout)
106{
107 int ret = _iwl4965_poll_bit(priv, addr, bits, mask, timeout);
108 if (unlikely(ret == -ETIMEDOUT))
109 IWL_DEBUG_IO
110 ("poll_bit(0x%08X, 0x%08X, 0x%08X) - timedout - %s %d\n",
111 addr, bits, mask, f, l);
112 else
113 IWL_DEBUG_IO
114 ("poll_bit(0x%08X, 0x%08X, 0x%08X) = 0x%08X - %s %d\n",
115 addr, bits, mask, ret, f, l);
116 return ret;
117}
118#define iwl4965_poll_bit(iwl, addr, bits, mask, timeout) \
119 __iwl4965_poll_bit(__FILE__, __LINE__, iwl, addr, bits, mask, timeout)
120#else
121#define iwl4965_poll_bit(p, a, b, m, t) _iwl4965_poll_bit(p, a, b, m, t)
122#endif
123
124static inline void _iwl4965_set_bit(struct iwl4965_priv *priv, u32 reg, u32 mask)
125{
126 _iwl4965_write32(priv, reg, _iwl4965_read32(priv, reg) | mask);
127}
128#ifdef CONFIG_IWL4965_DEBUG
129static inline void __iwl4965_set_bit(const char *f, u32 l,
130 struct iwl4965_priv *priv, u32 reg, u32 mask)
131{
132 u32 val = _iwl4965_read32(priv, reg) | mask;
133 IWL_DEBUG_IO("set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
134 _iwl4965_write32(priv, reg, val);
135}
136#define iwl4965_set_bit(p, r, m) __iwl4965_set_bit(__FILE__, __LINE__, p, r, m)
137#else
138#define iwl4965_set_bit(p, r, m) _iwl4965_set_bit(p, r, m)
139#endif
140
141static inline void _iwl4965_clear_bit(struct iwl4965_priv *priv, u32 reg, u32 mask)
142{
143 _iwl4965_write32(priv, reg, _iwl4965_read32(priv, reg) & ~mask);
144}
145#ifdef CONFIG_IWL4965_DEBUG
146static inline void __iwl4965_clear_bit(const char *f, u32 l,
147 struct iwl4965_priv *priv, u32 reg, u32 mask)
148{
149 u32 val = _iwl4965_read32(priv, reg) & ~mask;
150 IWL_DEBUG_IO("clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
151 _iwl4965_write32(priv, reg, val);
152}
153#define iwl4965_clear_bit(p, r, m) __iwl4965_clear_bit(__FILE__, __LINE__, p, r, m)
154#else
155#define iwl4965_clear_bit(p, r, m) _iwl4965_clear_bit(p, r, m)
156#endif
157
158static inline int _iwl4965_grab_nic_access(struct iwl4965_priv *priv)
159{
160 int ret;
161 u32 gp_ctl;
162
163#ifdef CONFIG_IWL4965_DEBUG
164 if (atomic_read(&priv->restrict_refcnt))
165 return 0;
166#endif
167 if (test_bit(STATUS_RF_KILL_HW, &priv->status) ||
168 test_bit(STATUS_RF_KILL_SW, &priv->status)) {
169 IWL_WARNING("WARNING: Requesting MAC access during RFKILL "
170 "wakes up NIC\n");
171
172 /* 10 msec allows time for NIC to complete its data save */
173 gp_ctl = _iwl4965_read32(priv, CSR_GP_CNTRL);
174 if (gp_ctl & CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
175 IWL_DEBUG_RF_KILL("Wait for complete power-down, "
176 "gpctl = 0x%08x\n", gp_ctl);
177 mdelay(10);
178 } else
179 IWL_DEBUG_RF_KILL("power-down complete, "
180 "gpctl = 0x%08x\n", gp_ctl);
181 }
182
183 /* this bit wakes up the NIC */
184 _iwl4965_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
185 ret = _iwl4965_poll_bit(priv, CSR_GP_CNTRL,
186 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
187 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
188 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 50);
189 if (ret < 0) {
190 IWL_ERROR("MAC is in deep sleep!\n");
191 return -EIO;
192 }
193
194#ifdef CONFIG_IWL4965_DEBUG
195 atomic_inc(&priv->restrict_refcnt);
196#endif
197 return 0;
198}
199
200#ifdef CONFIG_IWL4965_DEBUG
201static inline int __iwl4965_grab_nic_access(const char *f, u32 l,
202 struct iwl4965_priv *priv)
203{
204 if (atomic_read(&priv->restrict_refcnt))
205 IWL_DEBUG_INFO("Grabbing access while already held at "
206 "line %d.\n", l);
207
208 IWL_DEBUG_IO("grabbing nic access - %s %d\n", f, l);
209 return _iwl4965_grab_nic_access(priv);
210}
211#define iwl4965_grab_nic_access(priv) \
212 __iwl4965_grab_nic_access(__FILE__, __LINE__, priv)
213#else
214#define iwl4965_grab_nic_access(priv) \
215 _iwl4965_grab_nic_access(priv)
216#endif
217
218static inline void _iwl4965_release_nic_access(struct iwl4965_priv *priv)
219{
220#ifdef CONFIG_IWL4965_DEBUG
221 if (atomic_dec_and_test(&priv->restrict_refcnt))
222#endif
223 _iwl4965_clear_bit(priv, CSR_GP_CNTRL,
224 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
225}
226#ifdef CONFIG_IWL4965_DEBUG
227static inline void __iwl4965_release_nic_access(const char *f, u32 l,
228 struct iwl4965_priv *priv)
229{
230 if (atomic_read(&priv->restrict_refcnt) <= 0)
231 IWL_ERROR("Release unheld nic access at line %d.\n", l);
232
233 IWL_DEBUG_IO("releasing nic access - %s %d\n", f, l);
234 _iwl4965_release_nic_access(priv);
235}
236#define iwl4965_release_nic_access(priv) \
237 __iwl4965_release_nic_access(__FILE__, __LINE__, priv)
238#else
239#define iwl4965_release_nic_access(priv) \
240 _iwl4965_release_nic_access(priv)
241#endif
242
243static inline u32 _iwl4965_read_direct32(struct iwl4965_priv *priv, u32 reg)
244{
245 return _iwl4965_read32(priv, reg);
246}
247#ifdef CONFIG_IWL4965_DEBUG
248static inline u32 __iwl4965_read_direct32(const char *f, u32 l,
249 struct iwl4965_priv *priv, u32 reg)
250{
251 u32 value = _iwl4965_read_direct32(priv, reg);
252 if (!atomic_read(&priv->restrict_refcnt))
253 IWL_ERROR("Nic access not held from %s %d\n", f, l);
254 IWL_DEBUG_IO("read_direct32(0x%4X) = 0x%08x - %s %d \n", reg, value,
255 f, l);
256 return value;
257}
258#define iwl4965_read_direct32(priv, reg) \
259 __iwl4965_read_direct32(__FILE__, __LINE__, priv, reg)
260#else
261#define iwl4965_read_direct32 _iwl4965_read_direct32
262#endif
263
264static inline void _iwl4965_write_direct32(struct iwl4965_priv *priv,
265 u32 reg, u32 value)
266{
267 _iwl4965_write32(priv, reg, value);
268}
269#ifdef CONFIG_IWL4965_DEBUG
270static void __iwl4965_write_direct32(u32 line,
271 struct iwl4965_priv *priv, u32 reg, u32 value)
272{
273 if (!atomic_read(&priv->restrict_refcnt))
274 IWL_ERROR("Nic access not held from line %d\n", line);
275 _iwl4965_write_direct32(priv, reg, value);
276}
277#define iwl4965_write_direct32(priv, reg, value) \
278 __iwl4965_write_direct32(__LINE__, priv, reg, value)
279#else
280#define iwl4965_write_direct32 _iwl4965_write_direct32
281#endif
282
283static inline void iwl4965_write_reg_buf(struct iwl4965_priv *priv,
284 u32 reg, u32 len, u32 *values)
285{
286 u32 count = sizeof(u32);
287
288 if ((priv != NULL) && (values != NULL)) {
289 for (; 0 < len; len -= count, reg += count, values++)
290 _iwl4965_write_direct32(priv, reg, *values);
291 }
292}
293
294static inline int _iwl4965_poll_direct_bit(struct iwl4965_priv *priv,
295 u32 addr, u32 mask, int timeout)
296{
297 int i = 0;
298
299 do {
300 if ((_iwl4965_read_direct32(priv, addr) & mask) == mask)
301 return i;
302 mdelay(10);
303 i += 10;
304 } while (i < timeout);
305
306 return -ETIMEDOUT;
307}
308
309#ifdef CONFIG_IWL4965_DEBUG
310static inline int __iwl4965_poll_direct_bit(const char *f, u32 l,
311 struct iwl4965_priv *priv,
312 u32 addr, u32 mask, int timeout)
313{
314 int ret = _iwl4965_poll_direct_bit(priv, addr, mask, timeout);
315
316 if (unlikely(ret == -ETIMEDOUT))
317 IWL_DEBUG_IO("poll_direct_bit(0x%08X, 0x%08X) - "
318 "timedout - %s %d\n", addr, mask, f, l);
319 else
320 IWL_DEBUG_IO("poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
321 "- %s %d\n", addr, mask, ret, f, l);
322 return ret;
323}
324#define iwl4965_poll_direct_bit(iwl, addr, mask, timeout) \
325 __iwl4965_poll_direct_bit(__FILE__, __LINE__, iwl, addr, mask, timeout)
326#else
327#define iwl4965_poll_direct_bit _iwl4965_poll_direct_bit
328#endif
329
330static inline u32 _iwl4965_read_prph(struct iwl4965_priv *priv, u32 reg)
331{
332 _iwl4965_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
333 return _iwl4965_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
334}
335#ifdef CONFIG_IWL4965_DEBUG
336static inline u32 __iwl4965_read_prph(u32 line, struct iwl4965_priv *priv, u32 reg)
337{
338 if (!atomic_read(&priv->restrict_refcnt))
339 IWL_ERROR("Nic access not held from line %d\n", line);
340 return _iwl4965_read_prph(priv, reg);
341}
342
343#define iwl4965_read_prph(priv, reg) \
344 __iwl4965_read_prph(__LINE__, priv, reg)
345#else
346#define iwl4965_read_prph _iwl4965_read_prph
347#endif
348
349static inline void _iwl4965_write_prph(struct iwl4965_priv *priv,
350 u32 addr, u32 val)
351{
352 _iwl4965_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
353 ((addr & 0x0000FFFF) | (3 << 24)));
354 _iwl4965_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
355}
356#ifdef CONFIG_IWL4965_DEBUG
357static inline void __iwl4965_write_prph(u32 line, struct iwl4965_priv *priv,
358 u32 addr, u32 val)
359{
360 if (!atomic_read(&priv->restrict_refcnt))
361 IWL_ERROR("Nic access from line %d\n", line);
362 _iwl4965_write_prph(priv, addr, val);
363}
364
365#define iwl4965_write_prph(priv, addr, val) \
366 __iwl4965_write_prph(__LINE__, priv, addr, val);
367#else
368#define iwl4965_write_prph _iwl4965_write_prph
369#endif
370
371#define _iwl4965_set_bits_prph(priv, reg, mask) \
372 _iwl4965_write_prph(priv, reg, (_iwl4965_read_prph(priv, reg) | mask))
373#ifdef CONFIG_IWL4965_DEBUG
374static inline void __iwl4965_set_bits_prph(u32 line, struct iwl4965_priv *priv,
375 u32 reg, u32 mask)
376{
377 if (!atomic_read(&priv->restrict_refcnt))
378 IWL_ERROR("Nic access not held from line %d\n", line);
379
380 _iwl4965_set_bits_prph(priv, reg, mask);
381}
382#define iwl4965_set_bits_prph(priv, reg, mask) \
383 __iwl4965_set_bits_prph(__LINE__, priv, reg, mask)
384#else
385#define iwl4965_set_bits_prph _iwl4965_set_bits_prph
386#endif
387
388#define _iwl4965_set_bits_mask_prph(priv, reg, bits, mask) \
389 _iwl4965_write_prph(priv, reg, ((_iwl4965_read_prph(priv, reg) & mask) | bits))
390
391#ifdef CONFIG_IWL4965_DEBUG
392static inline void __iwl4965_set_bits_mask_prph(u32 line,
393 struct iwl4965_priv *priv, u32 reg, u32 bits, u32 mask)
394{
395 if (!atomic_read(&priv->restrict_refcnt))
396 IWL_ERROR("Nic access not held from line %d\n", line);
397 _iwl4965_set_bits_mask_prph(priv, reg, bits, mask);
398}
399#define iwl4965_set_bits_mask_prph(priv, reg, bits, mask) \
400 __iwl4965_set_bits_mask_prph(__LINE__, priv, reg, bits, mask)
401#else
402#define iwl4965_set_bits_mask_prph _iwl4965_set_bits_mask_prph
403#endif
404
405static inline void iwl4965_clear_bits_prph(struct iwl4965_priv
406 *priv, u32 reg, u32 mask)
407{
408 u32 val = _iwl4965_read_prph(priv, reg);
409 _iwl4965_write_prph(priv, reg, (val & ~mask));
410}
411
412static inline u32 iwl4965_read_targ_mem(struct iwl4965_priv *priv, u32 addr)
413{
414 iwl4965_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
415 return iwl4965_read_direct32(priv, HBUS_TARG_MEM_RDAT);
416}
417
418static inline void iwl4965_write_targ_mem(struct iwl4965_priv *priv, u32 addr, u32 val)
419{
420 iwl4965_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
421 iwl4965_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
422}
423
424static inline void iwl4965_write_targ_mem_buf(struct iwl4965_priv *priv, u32 addr,
425 u32 len, u32 *values)
426{
427 iwl4965_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
428 for (; 0 < len; len -= sizeof(u32), values++)
429 iwl4965_write_direct32(priv, HBUS_TARG_MEM_WDAT, *values);
430}
431#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
index d06462264147..b608e1ca8b40 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -36,9 +36,10 @@
36 36
37#include <linux/workqueue.h> 37#include <linux/workqueue.h>
38 38
39#include "../net/mac80211/ieee80211_rate.h" 39#include "../net/mac80211/rate.h"
40 40
41#include "iwl-4965.h" 41#include "iwl-4965.h"
42#include "iwl-core.h"
42#include "iwl-helpers.h" 43#include "iwl-helpers.h"
43 44
44#define RS_NAME "iwl-4965-rs" 45#define RS_NAME "iwl-4965-rs"
@@ -83,7 +84,7 @@ struct iwl4965_rate_scale_data {
83/** 84/**
84 * struct iwl4965_scale_tbl_info -- tx params and success history for all rates 85 * struct iwl4965_scale_tbl_info -- tx params and success history for all rates
85 * 86 *
86 * There are two of these in struct iwl_rate_scale_priv, 87 * There are two of these in struct iwl4965_lq_sta,
87 * one for "active", and one for "search". 88 * one for "active", and one for "search".
88 */ 89 */
89struct iwl4965_scale_tbl_info { 90struct iwl4965_scale_tbl_info {
@@ -98,8 +99,23 @@ struct iwl4965_scale_tbl_info {
98 struct iwl4965_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */ 99 struct iwl4965_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
99}; 100};
100 101
102#ifdef CONFIG_IWL4965_HT
103
104struct iwl4965_traffic_load {
105 unsigned long time_stamp; /* age of the oldest statistics */
106 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
107 * slice */
108 u32 total; /* total num of packets during the
109 * last TID_MAX_TIME_DIFF */
110 u8 queue_count; /* number of queues that has
111 * been used since the last cleanup */
112 u8 head; /* start of the circular buffer */
113};
114
115#endif /* CONFIG_IWL4965_HT */
116
101/** 117/**
102 * struct iwl_rate_scale_priv -- driver's rate scaling private structure 118 * struct iwl4965_lq_sta -- driver's rate scaling private structure
103 * 119 *
104 * Pointer to this gets passed back and forth between driver and mac80211. 120 * Pointer to this gets passed back and forth between driver and mac80211.
105 */ 121 */
@@ -124,7 +140,7 @@ struct iwl4965_lq_sta {
124 u8 valid_antenna; 140 u8 valid_antenna;
125 u8 is_green; 141 u8 is_green;
126 u8 is_dup; 142 u8 is_dup;
127 u8 phymode; 143 enum ieee80211_band band;
128 u8 ibss_sta_added; 144 u8 ibss_sta_added;
129 145
130 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ 146 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
@@ -134,23 +150,30 @@ struct iwl4965_lq_sta {
134 u16 active_mimo_rate; 150 u16 active_mimo_rate;
135 u16 active_rate_basic; 151 u16 active_rate_basic;
136 152
137 struct iwl4965_link_quality_cmd lq; 153 struct iwl_link_quality_cmd lq;
138 struct iwl4965_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */ 154 struct iwl4965_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
155#ifdef CONFIG_IWL4965_HT
156 struct iwl4965_traffic_load load[TID_MAX_LOAD_COUNT];
157 u8 tx_agg_tid_en;
158#endif
139#ifdef CONFIG_MAC80211_DEBUGFS 159#ifdef CONFIG_MAC80211_DEBUGFS
140 struct dentry *rs_sta_dbgfs_scale_table_file; 160 struct dentry *rs_sta_dbgfs_scale_table_file;
141 struct dentry *rs_sta_dbgfs_stats_table_file; 161 struct dentry *rs_sta_dbgfs_stats_table_file;
162#ifdef CONFIG_IWL4965_HT
163 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
164#endif
142 struct iwl4965_rate dbg_fixed; 165 struct iwl4965_rate dbg_fixed;
143 struct iwl4965_priv *drv; 166 struct iwl_priv *drv;
144#endif 167#endif
145}; 168};
146 169
147static void rs_rate_scale_perform(struct iwl4965_priv *priv, 170static void rs_rate_scale_perform(struct iwl_priv *priv,
148 struct net_device *dev, 171 struct net_device *dev,
149 struct ieee80211_hdr *hdr, 172 struct ieee80211_hdr *hdr,
150 struct sta_info *sta); 173 struct sta_info *sta);
151static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta, 174static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta,
152 struct iwl4965_rate *tx_mcs, 175 struct iwl4965_rate *tx_mcs,
153 struct iwl4965_link_quality_cmd *tbl); 176 struct iwl_link_quality_cmd *tbl);
154 177
155 178
156#ifdef CONFIG_MAC80211_DEBUGFS 179#ifdef CONFIG_MAC80211_DEBUGFS
@@ -207,68 +230,150 @@ static s32 expected_tpt_mimo40MHzSGI[IWL_RATE_COUNT] = {
207 0, 0, 0, 0, 131, 131, 191, 222, 242, 270, 284, 289, 293 230 0, 0, 0, 0, 131, 131, 191, 222, 242, 270, 284, 289, 293
208}; 231};
209 232
210static int iwl4965_lq_sync_callback(struct iwl4965_priv *priv, 233static inline u8 iwl4965_rate_get_rate(u32 rate_n_flags)
211 struct iwl4965_cmd *cmd, struct sk_buff *skb)
212{ 234{
213 /*We didn't cache the SKB; let the caller free it */ 235 return (u8)(rate_n_flags & 0xFF);
214 return 1;
215} 236}
216 237
217static inline u8 iwl4965_rate_get_rate(u32 rate_n_flags) 238static void rs_rate_scale_clear_window(struct iwl4965_rate_scale_data *window)
218{ 239{
219 return (u8)(rate_n_flags & 0xFF); 240 window->data = 0;
241 window->success_counter = 0;
242 window->success_ratio = IWL_INVALID_VALUE;
243 window->counter = 0;
244 window->average_tpt = IWL_INVALID_VALUE;
245 window->stamp = 0;
220} 246}
221 247
222static int rs_send_lq_cmd(struct iwl4965_priv *priv, 248#ifdef CONFIG_IWL4965_HT
223 struct iwl4965_link_quality_cmd *lq, u8 flags) 249/*
250 * removes the old data from the statistics. All data that is older than
251 * TID_MAX_TIME_DIFF, will be deleted.
252 */
253static void rs_tl_rm_old_stats(struct iwl4965_traffic_load *tl, u32 curr_time)
224{ 254{
225#ifdef CONFIG_IWL4965_DEBUG 255 /* The oldest age we want to keep */
226 int i; 256 u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
227#endif 257
228 struct iwl4965_host_cmd cmd = { 258 while (tl->queue_count &&
229 .id = REPLY_TX_LINK_QUALITY_CMD, 259 (tl->time_stamp < oldest_time)) {
230 .len = sizeof(struct iwl4965_link_quality_cmd), 260 tl->total -= tl->packet_count[tl->head];
231 .meta.flags = flags, 261 tl->packet_count[tl->head] = 0;
232 .data = lq, 262 tl->time_stamp += TID_QUEUE_CELL_SPACING;
233 }; 263 tl->queue_count--;
234 264 tl->head++;
235 if ((lq->sta_id == 0xFF) && 265 if (tl->head >= TID_QUEUE_MAX_SIZE)
236 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)) 266 tl->head = 0;
237 return -EINVAL; 267 }
268}
269
270/*
271 * increment traffic load value for tid and also remove
272 * any old values if passed the certain time period
273 */
274static void rs_tl_add_packet(struct iwl4965_lq_sta *lq_data, u8 tid)
275{
276 u32 curr_time = jiffies_to_msecs(jiffies);
277 u32 time_diff;
278 s32 index;
279 struct iwl4965_traffic_load *tl = NULL;
238 280
239 if (lq->sta_id == 0xFF) 281 if (tid >= TID_MAX_LOAD_COUNT)
240 lq->sta_id = IWL_AP_ID; 282 return;
241 283
242 IWL_DEBUG_RATE("lq station id 0x%x\n", lq->sta_id); 284 tl = &lq_data->load[tid];
243 IWL_DEBUG_RATE("lq dta 0x%X 0x%X\n",
244 lq->general_params.single_stream_ant_msk,
245 lq->general_params.dual_stream_ant_msk);
246#ifdef CONFIG_IWL4965_DEBUG
247 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
248 IWL_DEBUG_RATE("lq index %d 0x%X\n",
249 i, lq->rs_table[i].rate_n_flags);
250#endif
251 285
252 if (flags & CMD_ASYNC) 286 curr_time -= curr_time % TID_ROUND_VALUE;
253 cmd.meta.u.callback = iwl4965_lq_sync_callback;
254 287
255 if (iwl4965_is_associated(priv) && priv->assoc_station_added && 288 /* Happens only for the first packet. Initialize the data */
256 priv->lq_mngr.lq_ready) 289 if (!(tl->queue_count)) {
257 return iwl4965_send_cmd(priv, &cmd); 290 tl->total = 1;
291 tl->time_stamp = curr_time;
292 tl->queue_count = 1;
293 tl->head = 0;
294 tl->packet_count[0] = 1;
295 return;
296 }
258 297
259 return 0; 298 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
299 index = time_diff / TID_QUEUE_CELL_SPACING;
300
301 /* The history is too long: remove data that is older than */
302 /* TID_MAX_TIME_DIFF */
303 if (index >= TID_QUEUE_MAX_SIZE)
304 rs_tl_rm_old_stats(tl, curr_time);
305
306 index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
307 tl->packet_count[index] = tl->packet_count[index] + 1;
308 tl->total = tl->total + 1;
309
310 if ((index + 1) > tl->queue_count)
311 tl->queue_count = index + 1;
260} 312}
261 313
262static void rs_rate_scale_clear_window(struct iwl4965_rate_scale_data *window) 314/*
315 get the traffic load value for tid
316*/
317static u32 rs_tl_get_load(struct iwl4965_lq_sta *lq_data, u8 tid)
263{ 318{
264 window->data = 0; 319 u32 curr_time = jiffies_to_msecs(jiffies);
265 window->success_counter = 0; 320 u32 time_diff;
266 window->success_ratio = IWL_INVALID_VALUE; 321 s32 index;
267 window->counter = 0; 322 struct iwl4965_traffic_load *tl = NULL;
268 window->average_tpt = IWL_INVALID_VALUE; 323
269 window->stamp = 0; 324 if (tid >= TID_MAX_LOAD_COUNT)
325 return 0;
326
327 tl = &(lq_data->load[tid]);
328
329 curr_time -= curr_time % TID_ROUND_VALUE;
330
331 if (!(tl->queue_count))
332 return 0;
333
334 time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
335 index = time_diff / TID_QUEUE_CELL_SPACING;
336
337 /* The history is too long: remove data that is older than */
338 /* TID_MAX_TIME_DIFF */
339 if (index >= TID_QUEUE_MAX_SIZE)
340 rs_tl_rm_old_stats(tl, curr_time);
341
342 return tl->total;
343}
344
345static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
346 struct iwl4965_lq_sta *lq_data, u8 tid,
347 struct sta_info *sta)
348{
349 unsigned long state;
350 DECLARE_MAC_BUF(mac);
351
352 spin_lock_bh(&sta->ampdu_mlme.ampdu_tx);
353 state = sta->ampdu_mlme.tid_state_tx[tid];
354 spin_unlock_bh(&sta->ampdu_mlme.ampdu_tx);
355
356 if (state == HT_AGG_STATE_IDLE &&
357 rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
358 IWL_DEBUG_HT("Starting Tx agg: STA: %s tid: %d\n",
359 print_mac(mac, sta->addr), tid);
360 ieee80211_start_tx_ba_session(priv->hw, sta->addr, tid);
361 }
362}
363
364static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
365 struct iwl4965_lq_sta *lq_data,
366 struct sta_info *sta)
367{
368 if ((tid < TID_MAX_LOAD_COUNT))
369 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
370 else if (tid == IWL_AGG_ALL_TID)
371 for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++)
372 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
270} 373}
271 374
375#endif /* CONFIG_IWLWIFI_HT */
376
272/** 377/**
273 * rs_collect_tx_data - Update the success/failure sliding window 378 * rs_collect_tx_data - Update the success/failure sliding window
274 * 379 *
@@ -277,7 +382,8 @@ static void rs_rate_scale_clear_window(struct iwl4965_rate_scale_data *window)
277 * packets. 382 * packets.
278 */ 383 */
279static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows, 384static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
280 int scale_index, s32 tpt, u32 status) 385 int scale_index, s32 tpt, int retries,
386 int successes)
281{ 387{
282 struct iwl4965_rate_scale_data *window = NULL; 388 struct iwl4965_rate_scale_data *window = NULL;
283 u64 mask; 389 u64 mask;
@@ -298,26 +404,33 @@ static int rs_collect_tx_data(struct iwl4965_rate_scale_data *windows,
298 * subtract "1" from the success counter (this is the main reason 404 * subtract "1" from the success counter (this is the main reason
299 * we keep these bitmaps!). 405 * we keep these bitmaps!).
300 */ 406 */
301 if (window->counter >= win_size) { 407 while (retries > 0) {
302 window->counter = win_size - 1; 408 if (window->counter >= win_size) {
303 mask = 1; 409 window->counter = win_size - 1;
304 mask = (mask << (win_size - 1)); 410 mask = 1;
305 if ((window->data & mask)) { 411 mask = (mask << (win_size - 1));
306 window->data &= ~mask; 412 if (window->data & mask) {
307 window->success_counter = window->success_counter - 1; 413 window->data &= ~mask;
414 window->success_counter =
415 window->success_counter - 1;
416 }
308 } 417 }
309 }
310 418
311 /* Increment frames-attempted counter */ 419 /* Increment frames-attempted counter */
312 window->counter = window->counter + 1; 420 window->counter++;
421
422 /* Shift bitmap by one frame (throw away oldest history),
423 * OR in "1", and increment "success" if this
424 * frame was successful. */
425 mask = window->data;
426 window->data = (mask << 1);
427 if (successes > 0) {
428 window->success_counter = window->success_counter + 1;
429 window->data |= 0x1;
430 successes--;
431 }
313 432
314 /* Shift bitmap by one frame (throw away oldest history), 433 retries--;
315 * OR in "1", and increment "success" if this frame was successful. */
316 mask = window->data;
317 window->data = (mask << 1);
318 if (status != 0) {
319 window->success_counter = window->success_counter + 1;
320 window->data |= 0x1;
321 } 434 }
322 435
323 /* Calculate current success ratio, avoid divide-by-0! */ 436 /* Calculate current success ratio, avoid divide-by-0! */
@@ -404,13 +517,14 @@ static void rs_mcs_from_tbl(struct iwl4965_rate *mcs_rate,
404 * fill "search" or "active" tx mode table. 517 * fill "search" or "active" tx mode table.
405 */ 518 */
406static int rs_get_tbl_info_from_mcs(const struct iwl4965_rate *mcs_rate, 519static int rs_get_tbl_info_from_mcs(const struct iwl4965_rate *mcs_rate,
407 int phymode, struct iwl4965_scale_tbl_info *tbl, 520 enum ieee80211_band band,
521 struct iwl4965_scale_tbl_info *tbl,
408 int *rate_idx) 522 int *rate_idx)
409{ 523{
410 int index; 524 int index;
411 u32 ant_msk; 525 u32 ant_msk;
412 526
413 index = iwl4965_rate_index_from_plcp(mcs_rate->rate_n_flags); 527 index = iwl4965_hwrate_to_plcp_idx(mcs_rate->rate_n_flags);
414 528
415 if (index == IWL_RATE_INVALID) { 529 if (index == IWL_RATE_INVALID) {
416 *rate_idx = -1; 530 *rate_idx = -1;
@@ -429,7 +543,7 @@ static int rs_get_tbl_info_from_mcs(const struct iwl4965_rate *mcs_rate,
429 tbl->lq_type = LQ_NONE; 543 tbl->lq_type = LQ_NONE;
430 else { 544 else {
431 545
432 if (phymode == MODE_IEEE80211A) 546 if (band == IEEE80211_BAND_5GHZ)
433 tbl->lq_type = LQ_A; 547 tbl->lq_type = LQ_A;
434 else 548 else
435 tbl->lq_type = LQ_G; 549 tbl->lq_type = LQ_G;
@@ -498,7 +612,7 @@ static inline void rs_toggle_antenna(struct iwl4965_rate *new_rate,
498 } 612 }
499} 613}
500 614
501static inline u8 rs_use_green(struct iwl4965_priv *priv, 615static inline u8 rs_use_green(struct iwl_priv *priv,
502 struct ieee80211_conf *conf) 616 struct ieee80211_conf *conf)
503{ 617{
504#ifdef CONFIG_IWL4965_HT 618#ifdef CONFIG_IWL4965_HT
@@ -607,7 +721,7 @@ static void rs_get_lower_rate(struct iwl4965_lq_sta *lq_sta,
607 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) { 721 if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
608 switch_to_legacy = 1; 722 switch_to_legacy = 1;
609 scale_index = rs_ht_to_legacy[scale_index]; 723 scale_index = rs_ht_to_legacy[scale_index];
610 if (lq_sta->phymode == MODE_IEEE80211A) 724 if (lq_sta->band == IEEE80211_BAND_5GHZ)
611 tbl->lq_type = LQ_A; 725 tbl->lq_type = LQ_A;
612 else 726 else
613 tbl->lq_type = LQ_G; 727 tbl->lq_type = LQ_G;
@@ -625,7 +739,7 @@ static void rs_get_lower_rate(struct iwl4965_lq_sta *lq_sta,
625 /* Mask with station rate restriction */ 739 /* Mask with station rate restriction */
626 if (is_legacy(tbl->lq_type)) { 740 if (is_legacy(tbl->lq_type)) {
627 /* supp_rates has no CCK bits in A mode */ 741 /* supp_rates has no CCK bits in A mode */
628 if (lq_sta->phymode == (u8) MODE_IEEE80211A) 742 if (lq_sta->band == IEEE80211_BAND_5GHZ)
629 rate_mask = (u16)(rate_mask & 743 rate_mask = (u16)(rate_mask &
630 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); 744 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
631 else 745 else
@@ -658,11 +772,12 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
658 u8 retries; 772 u8 retries;
659 int rs_index, index = 0; 773 int rs_index, index = 0;
660 struct iwl4965_lq_sta *lq_sta; 774 struct iwl4965_lq_sta *lq_sta;
661 struct iwl4965_link_quality_cmd *table; 775 struct iwl_link_quality_cmd *table;
662 struct sta_info *sta; 776 struct sta_info *sta;
663 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 777 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
664 struct iwl4965_priv *priv = (struct iwl4965_priv *)priv_rate; 778 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
665 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 779 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
780 struct ieee80211_hw *hw = local_to_hw(local);
666 struct iwl4965_rate_scale_data *window = NULL; 781 struct iwl4965_rate_scale_data *window = NULL;
667 struct iwl4965_rate_scale_data *search_win = NULL; 782 struct iwl4965_rate_scale_data *search_win = NULL;
668 struct iwl4965_rate tx_mcs; 783 struct iwl4965_rate tx_mcs;
@@ -677,28 +792,32 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
677 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1)) 792 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1))
678 return; 793 return;
679 794
795 /* This packet was aggregated but doesn't carry rate scale info */
796 if ((tx_resp->control.flags & IEEE80211_TXCTL_AMPDU) &&
797 !(tx_resp->flags & IEEE80211_TX_STATUS_AMPDU))
798 return;
799
680 retries = tx_resp->retry_count; 800 retries = tx_resp->retry_count;
681 801
682 if (retries > 15) 802 if (retries > 15)
683 retries = 15; 803 retries = 15;
684 804
805 rcu_read_lock();
685 806
686 sta = sta_info_get(local, hdr->addr1); 807 sta = sta_info_get(local, hdr->addr1);
687 808
688 if (!sta || !sta->rate_ctrl_priv) { 809 if (!sta || !sta->rate_ctrl_priv)
689 if (sta) 810 goto out;
690 sta_info_put(sta); 811
691 return;
692 }
693 812
694 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv; 813 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv;
695 814
696 if (!priv->lq_mngr.lq_ready) 815 if (!priv->lq_mngr.lq_ready)
697 return; 816 goto out;
698 817
699 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 818 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
700 !lq_sta->ibss_sta_added) 819 !lq_sta->ibss_sta_added)
701 return; 820 goto out;
702 821
703 table = &lq_sta->lq; 822 table = &lq_sta->lq;
704 active_index = lq_sta->active_tbl; 823 active_index = lq_sta->active_tbl;
@@ -719,17 +838,6 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
719 search_win = (struct iwl4965_rate_scale_data *) 838 search_win = (struct iwl4965_rate_scale_data *)
720 &(search_tbl->win[0]); 839 &(search_tbl->win[0]);
721 840
722 tx_mcs.rate_n_flags = tx_resp->control.tx_rate;
723
724 rs_get_tbl_info_from_mcs(&tx_mcs, priv->phymode,
725 &tbl_type, &rs_index);
726 if ((rs_index < 0) || (rs_index >= IWL_RATE_COUNT)) {
727 IWL_DEBUG_RATE("bad rate index at: %d rate 0x%X\n",
728 rs_index, tx_mcs.rate_n_flags);
729 sta_info_put(sta);
730 return;
731 }
732
733 /* 841 /*
734 * Ignore this Tx frame response if its initial rate doesn't match 842 * Ignore this Tx frame response if its initial rate doesn't match
735 * that of latest Link Quality command. There may be stragglers 843 * that of latest Link Quality command. There may be stragglers
@@ -738,14 +846,29 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
738 * to check "search" mode, or a prior "search" mode after we've moved 846 * to check "search" mode, or a prior "search" mode after we've moved
739 * to a new "search" mode (which might become the new "active" mode). 847 * to a new "search" mode (which might become the new "active" mode).
740 */ 848 */
741 if (retries && 849 tx_mcs.rate_n_flags = le32_to_cpu(table->rs_table[0].rate_n_flags);
742 (tx_mcs.rate_n_flags != 850 rs_get_tbl_info_from_mcs(&tx_mcs, priv->band, &tbl_type, &rs_index);
743 le32_to_cpu(table->rs_table[0].rate_n_flags))) { 851 if (priv->band == IEEE80211_BAND_5GHZ)
744 IWL_DEBUG_RATE("initial rate does not match 0x%x 0x%x\n", 852 rs_index -= IWL_FIRST_OFDM_RATE;
745 tx_mcs.rate_n_flags, 853
746 le32_to_cpu(table->rs_table[0].rate_n_flags)); 854 if ((tx_resp->control.tx_rate == NULL) ||
747 sta_info_put(sta); 855 (tbl_type.is_SGI ^
748 return; 856 !!(tx_resp->control.flags & IEEE80211_TXCTL_SHORT_GI)) ||
857 (tbl_type.is_fat ^
858 !!(tx_resp->control.flags & IEEE80211_TXCTL_40_MHZ_WIDTH)) ||
859 (tbl_type.is_dup ^
860 !!(tx_resp->control.flags & IEEE80211_TXCTL_DUP_DATA)) ||
861 (tbl_type.antenna_type ^
862 tx_resp->control.antenna_sel_tx) ||
863 (!!(tx_mcs.rate_n_flags & RATE_MCS_HT_MSK) ^
864 !!(tx_resp->control.flags & IEEE80211_TXCTL_OFDM_HT)) ||
865 (!!(tx_mcs.rate_n_flags & RATE_MCS_GF_MSK) ^
866 !!(tx_resp->control.flags & IEEE80211_TXCTL_GREEN_FIELD)) ||
867 (hw->wiphy->bands[priv->band]->bitrates[rs_index].bitrate !=
868 tx_resp->control.tx_rate->bitrate)) {
869 IWL_DEBUG_RATE("initial rate does not match 0x%x\n",
870 tx_mcs.rate_n_flags);
871 goto out;
749 } 872 }
750 873
751 /* Update frame history window with "failure" for each Tx retry. */ 874 /* Update frame history window with "failure" for each Tx retry. */
@@ -754,7 +877,7 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
754 * Each tx attempt steps one entry deeper in the rate table. */ 877 * Each tx attempt steps one entry deeper in the rate table. */
755 tx_mcs.rate_n_flags = 878 tx_mcs.rate_n_flags =
756 le32_to_cpu(table->rs_table[index].rate_n_flags); 879 le32_to_cpu(table->rs_table[index].rate_n_flags);
757 rs_get_tbl_info_from_mcs(&tx_mcs, priv->phymode, 880 rs_get_tbl_info_from_mcs(&tx_mcs, priv->band,
758 &tbl_type, &rs_index); 881 &tbl_type, &rs_index);
759 882
760 /* If type matches "search" table, 883 /* If type matches "search" table,
@@ -766,7 +889,7 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
766 tpt = search_tbl->expected_tpt[rs_index]; 889 tpt = search_tbl->expected_tpt[rs_index];
767 else 890 else
768 tpt = 0; 891 tpt = 0;
769 rs_collect_tx_data(search_win, rs_index, tpt, 0); 892 rs_collect_tx_data(search_win, rs_index, tpt, 1, 0);
770 893
771 /* Else if type matches "current/active" table, 894 /* Else if type matches "current/active" table,
772 * add failure to "current/active" history */ 895 * add failure to "current/active" history */
@@ -777,7 +900,7 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
777 tpt = curr_tbl->expected_tpt[rs_index]; 900 tpt = curr_tbl->expected_tpt[rs_index];
778 else 901 else
779 tpt = 0; 902 tpt = 0;
780 rs_collect_tx_data(window, rs_index, tpt, 0); 903 rs_collect_tx_data(window, rs_index, tpt, 1, 0);
781 } 904 }
782 905
783 /* If not searching for a new mode, increment failed counter 906 /* If not searching for a new mode, increment failed counter
@@ -794,14 +917,8 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
794 * if Tx was successful first try, use original rate, 917 * if Tx was successful first try, use original rate,
795 * else look up the rate that was, finally, successful. 918 * else look up the rate that was, finally, successful.
796 */ 919 */
797 if (!tx_resp->retry_count) 920 tx_mcs.rate_n_flags = le32_to_cpu(table->rs_table[index].rate_n_flags);
798 tx_mcs.rate_n_flags = tx_resp->control.tx_rate; 921 rs_get_tbl_info_from_mcs(&tx_mcs, priv->band, &tbl_type, &rs_index);
799 else
800 tx_mcs.rate_n_flags =
801 le32_to_cpu(table->rs_table[index].rate_n_flags);
802
803 rs_get_tbl_info_from_mcs(&tx_mcs, priv->phymode,
804 &tbl_type, &rs_index);
805 922
806 /* Update frame history window with "success" if Tx got ACKed ... */ 923 /* Update frame history window with "success" if Tx got ACKed ... */
807 if (tx_resp->flags & IEEE80211_TX_STATUS_ACK) 924 if (tx_resp->flags & IEEE80211_TX_STATUS_ACK)
@@ -818,9 +935,13 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
818 tpt = search_tbl->expected_tpt[rs_index]; 935 tpt = search_tbl->expected_tpt[rs_index];
819 else 936 else
820 tpt = 0; 937 tpt = 0;
821 rs_collect_tx_data(search_win, 938 if (tx_resp->control.flags & IEEE80211_TXCTL_AMPDU)
822 rs_index, tpt, status); 939 rs_collect_tx_data(search_win, rs_index, tpt,
823 940 tx_resp->ampdu_ack_len,
941 tx_resp->ampdu_ack_map);
942 else
943 rs_collect_tx_data(search_win, rs_index, tpt,
944 1, status);
824 /* Else if type matches "current/active" table, 945 /* Else if type matches "current/active" table,
825 * add final tx status to "current/active" history */ 946 * add final tx status to "current/active" history */
826 } else if ((tbl_type.lq_type == curr_tbl->lq_type) && 947 } else if ((tbl_type.lq_type == curr_tbl->lq_type) &&
@@ -830,21 +951,34 @@ static void rs_tx_status(void *priv_rate, struct net_device *dev,
830 tpt = curr_tbl->expected_tpt[rs_index]; 951 tpt = curr_tbl->expected_tpt[rs_index];
831 else 952 else
832 tpt = 0; 953 tpt = 0;
833 rs_collect_tx_data(window, rs_index, tpt, status); 954 if (tx_resp->control.flags & IEEE80211_TXCTL_AMPDU)
955 rs_collect_tx_data(window, rs_index, tpt,
956 tx_resp->ampdu_ack_len,
957 tx_resp->ampdu_ack_map);
958 else
959 rs_collect_tx_data(window, rs_index, tpt,
960 1, status);
834 } 961 }
835 962
836 /* If not searching for new mode, increment success/failed counter 963 /* If not searching for new mode, increment success/failed counter
837 * ... these help determine when to start searching again */ 964 * ... these help determine when to start searching again */
838 if (lq_sta->stay_in_tbl) { 965 if (lq_sta->stay_in_tbl) {
839 if (status) 966 if (tx_resp->control.flags & IEEE80211_TXCTL_AMPDU) {
840 lq_sta->total_success++; 967 lq_sta->total_success += tx_resp->ampdu_ack_map;
841 else 968 lq_sta->total_failed +=
842 lq_sta->total_failed++; 969 (tx_resp->ampdu_ack_len - tx_resp->ampdu_ack_map);
970 } else {
971 if (status)
972 lq_sta->total_success++;
973 else
974 lq_sta->total_failed++;
975 }
843 } 976 }
844 977
845 /* See if there's a better rate or modulation mode to try. */ 978 /* See if there's a better rate or modulation mode to try. */
846 rs_rate_scale_perform(priv, dev, hdr, sta); 979 rs_rate_scale_perform(priv, dev, hdr, sta);
847 sta_info_put(sta); 980out:
981 rcu_read_unlock();
848 return; 982 return;
849} 983}
850 984
@@ -948,7 +1082,7 @@ static void rs_get_expected_tpt_table(struct iwl4965_lq_sta *lq_sta,
948 * to decrease to match "active" throughput. When moving from MIMO to SISO, 1082 * to decrease to match "active" throughput. When moving from MIMO to SISO,
949 * bit rate will typically need to increase, but not if performance was bad. 1083 * bit rate will typically need to increase, but not if performance was bad.
950 */ 1084 */
951static s32 rs_get_best_rate(struct iwl4965_priv *priv, 1085static s32 rs_get_best_rate(struct iwl_priv *priv,
952 struct iwl4965_lq_sta *lq_sta, 1086 struct iwl4965_lq_sta *lq_sta,
953 struct iwl4965_scale_tbl_info *tbl, /* "search" */ 1087 struct iwl4965_scale_tbl_info *tbl, /* "search" */
954 u16 rate_mask, s8 index, s8 rate) 1088 u16 rate_mask, s8 index, s8 rate)
@@ -1046,7 +1180,7 @@ static inline u8 rs_is_both_ant_supp(u8 valid_antenna)
1046/* 1180/*
1047 * Set up search table for MIMO 1181 * Set up search table for MIMO
1048 */ 1182 */
1049static int rs_switch_to_mimo(struct iwl4965_priv *priv, 1183static int rs_switch_to_mimo(struct iwl_priv *priv,
1050 struct iwl4965_lq_sta *lq_sta, 1184 struct iwl4965_lq_sta *lq_sta,
1051 struct ieee80211_conf *conf, 1185 struct ieee80211_conf *conf,
1052 struct sta_info *sta, 1186 struct sta_info *sta,
@@ -1105,13 +1239,13 @@ static int rs_switch_to_mimo(struct iwl4965_priv *priv,
1105 return 0; 1239 return 0;
1106#else 1240#else
1107 return -1; 1241 return -1;
1108#endif /*CONFIG_IWL4965_HT */ 1242#endif /*CONFIG_IWL4965_HT */
1109} 1243}
1110 1244
1111/* 1245/*
1112 * Set up search table for SISO 1246 * Set up search table for SISO
1113 */ 1247 */
1114static int rs_switch_to_siso(struct iwl4965_priv *priv, 1248static int rs_switch_to_siso(struct iwl_priv *priv,
1115 struct iwl4965_lq_sta *lq_sta, 1249 struct iwl4965_lq_sta *lq_sta,
1116 struct ieee80211_conf *conf, 1250 struct ieee80211_conf *conf,
1117 struct sta_info *sta, 1251 struct sta_info *sta,
@@ -1168,13 +1302,13 @@ static int rs_switch_to_siso(struct iwl4965_priv *priv,
1168#else 1302#else
1169 return -1; 1303 return -1;
1170 1304
1171#endif /*CONFIG_IWL4965_HT */ 1305#endif /*CONFIG_IWL4965_HT */
1172} 1306}
1173 1307
1174/* 1308/*
1175 * Try to switch to new modulation mode from legacy 1309 * Try to switch to new modulation mode from legacy
1176 */ 1310 */
1177static int rs_move_legacy_other(struct iwl4965_priv *priv, 1311static int rs_move_legacy_other(struct iwl_priv *priv,
1178 struct iwl4965_lq_sta *lq_sta, 1312 struct iwl4965_lq_sta *lq_sta,
1179 struct ieee80211_conf *conf, 1313 struct ieee80211_conf *conf,
1180 struct sta_info *sta, 1314 struct sta_info *sta,
@@ -1272,7 +1406,7 @@ static int rs_move_legacy_other(struct iwl4965_priv *priv,
1272/* 1406/*
1273 * Try to switch to new modulation mode from SISO 1407 * Try to switch to new modulation mode from SISO
1274 */ 1408 */
1275static int rs_move_siso_to_other(struct iwl4965_priv *priv, 1409static int rs_move_siso_to_other(struct iwl_priv *priv,
1276 struct iwl4965_lq_sta *lq_sta, 1410 struct iwl4965_lq_sta *lq_sta,
1277 struct ieee80211_conf *conf, 1411 struct ieee80211_conf *conf,
1278 struct sta_info *sta, 1412 struct sta_info *sta,
@@ -1325,6 +1459,7 @@ static int rs_move_siso_to_other(struct iwl4965_priv *priv,
1325 break; 1459 break;
1326 case IWL_SISO_SWITCH_GI: 1460 case IWL_SISO_SWITCH_GI:
1327 IWL_DEBUG_HT("LQ: SISO SWITCH TO GI\n"); 1461 IWL_DEBUG_HT("LQ: SISO SWITCH TO GI\n");
1462
1328 memcpy(search_tbl, tbl, sz); 1463 memcpy(search_tbl, tbl, sz);
1329 search_tbl->action = 0; 1464 search_tbl->action = 0;
1330 if (search_tbl->is_SGI) 1465 if (search_tbl->is_SGI)
@@ -1367,7 +1502,7 @@ static int rs_move_siso_to_other(struct iwl4965_priv *priv,
1367/* 1502/*
1368 * Try to switch to new modulation mode from MIMO 1503 * Try to switch to new modulation mode from MIMO
1369 */ 1504 */
1370static int rs_move_mimo_to_other(struct iwl4965_priv *priv, 1505static int rs_move_mimo_to_other(struct iwl_priv *priv,
1371 struct iwl4965_lq_sta *lq_sta, 1506 struct iwl4965_lq_sta *lq_sta,
1372 struct ieee80211_conf *conf, 1507 struct ieee80211_conf *conf,
1373 struct sta_info *sta, 1508 struct sta_info *sta,
@@ -1390,6 +1525,7 @@ static int rs_move_mimo_to_other(struct iwl4965_priv *priv,
1390 case IWL_MIMO_SWITCH_ANTENNA_B: 1525 case IWL_MIMO_SWITCH_ANTENNA_B:
1391 IWL_DEBUG_HT("LQ: MIMO SWITCH TO SISO\n"); 1526 IWL_DEBUG_HT("LQ: MIMO SWITCH TO SISO\n");
1392 1527
1528
1393 /* Set up new search table for SISO */ 1529 /* Set up new search table for SISO */
1394 memcpy(search_tbl, tbl, sz); 1530 memcpy(search_tbl, tbl, sz);
1395 search_tbl->lq_type = LQ_SISO; 1531 search_tbl->lq_type = LQ_SISO;
@@ -1546,7 +1682,7 @@ static void rs_stay_in_table(struct iwl4965_lq_sta *lq_sta)
1546/* 1682/*
1547 * Do rate scaling and search for new modulation mode. 1683 * Do rate scaling and search for new modulation mode.
1548 */ 1684 */
1549static void rs_rate_scale_perform(struct iwl4965_priv *priv, 1685static void rs_rate_scale_perform(struct iwl_priv *priv,
1550 struct net_device *dev, 1686 struct net_device *dev,
1551 struct ieee80211_hdr *hdr, 1687 struct ieee80211_hdr *hdr,
1552 struct sta_info *sta) 1688 struct sta_info *sta)
@@ -1574,6 +1710,10 @@ static void rs_rate_scale_perform(struct iwl4965_priv *priv,
1574 u8 active_tbl = 0; 1710 u8 active_tbl = 0;
1575 u8 done_search = 0; 1711 u8 done_search = 0;
1576 u16 high_low; 1712 u16 high_low;
1713#ifdef CONFIG_IWL4965_HT
1714 u8 tid = MAX_TID_COUNT;
1715 __le16 *qc;
1716#endif
1577 1717
1578 IWL_DEBUG_RATE("rate scale calculate new rate for skb\n"); 1718 IWL_DEBUG_RATE("rate scale calculate new rate for skb\n");
1579 1719
@@ -1594,6 +1734,13 @@ static void rs_rate_scale_perform(struct iwl4965_priv *priv,
1594 } 1734 }
1595 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv; 1735 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv;
1596 1736
1737#ifdef CONFIG_IWL4965_HT
1738 qc = ieee80211_get_qos_ctrl(hdr);
1739 if (qc) {
1740 tid = (u8)(le16_to_cpu(*qc) & 0xf);
1741 rs_tl_add_packet(lq_sta, tid);
1742 }
1743#endif
1597 /* 1744 /*
1598 * Select rate-scale / modulation-mode table to work with in 1745 * Select rate-scale / modulation-mode table to work with in
1599 * the rest of this function: "search" if searching for better 1746 * the rest of this function: "search" if searching for better
@@ -1608,7 +1755,7 @@ static void rs_rate_scale_perform(struct iwl4965_priv *priv,
1608 is_green = lq_sta->is_green; 1755 is_green = lq_sta->is_green;
1609 1756
1610 /* current tx rate */ 1757 /* current tx rate */
1611 index = sta->last_txrate; 1758 index = sta->last_txrate_idx;
1612 1759
1613 IWL_DEBUG_RATE("Rate scale index %d for type %d\n", index, 1760 IWL_DEBUG_RATE("Rate scale index %d for type %d\n", index,
1614 tbl->lq_type); 1761 tbl->lq_type);
@@ -1621,7 +1768,7 @@ static void rs_rate_scale_perform(struct iwl4965_priv *priv,
1621 1768
1622 /* mask with station rate restriction */ 1769 /* mask with station rate restriction */
1623 if (is_legacy(tbl->lq_type)) { 1770 if (is_legacy(tbl->lq_type)) {
1624 if (lq_sta->phymode == (u8) MODE_IEEE80211A) 1771 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1625 /* supp_rates has no CCK bits in A mode */ 1772 /* supp_rates has no CCK bits in A mode */
1626 rate_scale_index_msk = (u16) (rate_mask & 1773 rate_scale_index_msk = (u16) (rate_mask &
1627 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE)); 1774 (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
@@ -1685,7 +1832,7 @@ static void rs_rate_scale_perform(struct iwl4965_priv *priv,
1685 if (update_lq) { 1832 if (update_lq) {
1686 rs_mcs_from_tbl(&mcs_rate, tbl, index, is_green); 1833 rs_mcs_from_tbl(&mcs_rate, tbl, index, is_green);
1687 rs_fill_link_cmd(lq_sta, &mcs_rate, &lq_sta->lq); 1834 rs_fill_link_cmd(lq_sta, &mcs_rate, &lq_sta->lq);
1688 rs_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 1835 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
1689 } 1836 }
1690 goto out; 1837 goto out;
1691 1838
@@ -1727,7 +1874,7 @@ static void rs_rate_scale_perform(struct iwl4965_priv *priv,
1727 tbl = &(lq_sta->lq_info[active_tbl]); 1874 tbl = &(lq_sta->lq_info[active_tbl]);
1728 1875
1729 /* Revert to "active" rate and throughput info */ 1876 /* Revert to "active" rate and throughput info */
1730 index = iwl4965_rate_index_from_plcp( 1877 index = iwl4965_hwrate_to_plcp_idx(
1731 tbl->current_rate.rate_n_flags); 1878 tbl->current_rate.rate_n_flags);
1732 current_tpt = lq_sta->last_tpt; 1879 current_tpt = lq_sta->last_tpt;
1733 1880
@@ -1850,7 +1997,7 @@ static void rs_rate_scale_perform(struct iwl4965_priv *priv,
1850 if (update_lq) { 1997 if (update_lq) {
1851 rs_mcs_from_tbl(&mcs_rate, tbl, index, is_green); 1998 rs_mcs_from_tbl(&mcs_rate, tbl, index, is_green);
1852 rs_fill_link_cmd(lq_sta, &mcs_rate, &lq_sta->lq); 1999 rs_fill_link_cmd(lq_sta, &mcs_rate, &lq_sta->lq);
1853 rs_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 2000 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
1854 } 2001 }
1855 2002
1856 /* Should we stay with this modulation mode, or search for a new one? */ 2003 /* Should we stay with this modulation mode, or search for a new one? */
@@ -1883,14 +2030,14 @@ static void rs_rate_scale_perform(struct iwl4965_priv *priv,
1883 rs_rate_scale_clear_window(&(tbl->win[i])); 2030 rs_rate_scale_clear_window(&(tbl->win[i]));
1884 2031
1885 /* Use new "search" start rate */ 2032 /* Use new "search" start rate */
1886 index = iwl4965_rate_index_from_plcp( 2033 index = iwl4965_hwrate_to_plcp_idx(
1887 tbl->current_rate.rate_n_flags); 2034 tbl->current_rate.rate_n_flags);
1888 2035
1889 IWL_DEBUG_HT("Switch current mcs: %X index: %d\n", 2036 IWL_DEBUG_HT("Switch current mcs: %X index: %d\n",
1890 tbl->current_rate.rate_n_flags, index); 2037 tbl->current_rate.rate_n_flags, index);
1891 rs_fill_link_cmd(lq_sta, &tbl->current_rate, 2038 rs_fill_link_cmd(lq_sta, &tbl->current_rate,
1892 &lq_sta->lq); 2039 &lq_sta->lq);
1893 rs_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 2040 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
1894 } 2041 }
1895 2042
1896 /* If the "active" (non-search) mode was legacy, 2043 /* If the "active" (non-search) mode was legacy,
@@ -1914,15 +2061,14 @@ static void rs_rate_scale_perform(struct iwl4965_priv *priv,
1914 * mode for a while before next round of mode comparisons. */ 2061 * mode for a while before next round of mode comparisons. */
1915 if (lq_sta->enable_counter && 2062 if (lq_sta->enable_counter &&
1916 (lq_sta->action_counter >= IWL_ACTION_LIMIT)) { 2063 (lq_sta->action_counter >= IWL_ACTION_LIMIT)) {
1917#ifdef CONFIG_IWL4965_HT_AGG 2064#ifdef CONFIG_IWL4965_HT
1918 /* If appropriate, set up aggregation! */ 2065 if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
1919 if ((lq_sta->last_tpt > TID_AGG_TPT_THREHOLD) && 2066 (lq_sta->tx_agg_tid_en & (1 << tid)) &&
1920 (priv->lq_mngr.agg_ctrl.auto_agg)) { 2067 (tid != MAX_TID_COUNT)) {
1921 priv->lq_mngr.agg_ctrl.tid_retry = 2068 IWL_DEBUG_HT("try to aggregate tid %d\n", tid);
1922 TID_ALL_SPECIFIED; 2069 rs_tl_turn_on_agg(priv, tid, lq_sta, sta);
1923 schedule_work(&priv->agg_work);
1924 } 2070 }
1925#endif /*CONFIG_IWL4965_HT_AGG */ 2071#endif /*CONFIG_IWL4965_HT */
1926 lq_sta->action_counter = 0; 2072 lq_sta->action_counter = 0;
1927 rs_set_stay_in_table(0, lq_sta); 2073 rs_set_stay_in_table(0, lq_sta);
1928 } 2074 }
@@ -1942,21 +2088,21 @@ static void rs_rate_scale_perform(struct iwl4965_priv *priv,
1942out: 2088out:
1943 rs_mcs_from_tbl(&tbl->current_rate, tbl, index, is_green); 2089 rs_mcs_from_tbl(&tbl->current_rate, tbl, index, is_green);
1944 i = index; 2090 i = index;
1945 sta->last_txrate = i; 2091 sta->last_txrate_idx = i;
1946 2092
1947 /* sta->txrate is an index to A mode rates which start 2093 /* sta->txrate_idx is an index to A mode rates which start
1948 * at IWL_FIRST_OFDM_RATE 2094 * at IWL_FIRST_OFDM_RATE
1949 */ 2095 */
1950 if (lq_sta->phymode == (u8) MODE_IEEE80211A) 2096 if (lq_sta->band == IEEE80211_BAND_5GHZ)
1951 sta->txrate = i - IWL_FIRST_OFDM_RATE; 2097 sta->txrate_idx = i - IWL_FIRST_OFDM_RATE;
1952 else 2098 else
1953 sta->txrate = i; 2099 sta->txrate_idx = i;
1954 2100
1955 return; 2101 return;
1956} 2102}
1957 2103
1958 2104
1959static void rs_initialize_lq(struct iwl4965_priv *priv, 2105static void rs_initialize_lq(struct iwl_priv *priv,
1960 struct ieee80211_conf *conf, 2106 struct ieee80211_conf *conf,
1961 struct sta_info *sta) 2107 struct sta_info *sta)
1962{ 2108{
@@ -1972,7 +2118,7 @@ static void rs_initialize_lq(struct iwl4965_priv *priv,
1972 goto out; 2118 goto out;
1973 2119
1974 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv; 2120 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv;
1975 i = sta->last_txrate; 2121 i = sta->last_txrate_idx;
1976 2122
1977 if ((lq_sta->lq.sta_id == 0xff) && 2123 if ((lq_sta->lq.sta_id == 0xff) &&
1978 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)) 2124 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS))
@@ -1996,7 +2142,7 @@ static void rs_initialize_lq(struct iwl4965_priv *priv,
1996 mcs_rate.rate_n_flags |= RATE_MCS_CCK_MSK; 2142 mcs_rate.rate_n_flags |= RATE_MCS_CCK_MSK;
1997 2143
1998 tbl->antenna_type = ANT_AUX; 2144 tbl->antenna_type = ANT_AUX;
1999 rs_get_tbl_info_from_mcs(&mcs_rate, priv->phymode, tbl, &rate_idx); 2145 rs_get_tbl_info_from_mcs(&mcs_rate, priv->band, tbl, &rate_idx);
2000 if (!rs_is_ant_connected(priv->valid_antenna, tbl->antenna_type)) 2146 if (!rs_is_ant_connected(priv->valid_antenna, tbl->antenna_type))
2001 rs_toggle_antenna(&mcs_rate, tbl); 2147 rs_toggle_antenna(&mcs_rate, tbl);
2002 2148
@@ -2004,13 +2150,14 @@ static void rs_initialize_lq(struct iwl4965_priv *priv,
2004 tbl->current_rate.rate_n_flags = mcs_rate.rate_n_flags; 2150 tbl->current_rate.rate_n_flags = mcs_rate.rate_n_flags;
2005 rs_get_expected_tpt_table(lq_sta, tbl); 2151 rs_get_expected_tpt_table(lq_sta, tbl);
2006 rs_fill_link_cmd(lq_sta, &mcs_rate, &lq_sta->lq); 2152 rs_fill_link_cmd(lq_sta, &mcs_rate, &lq_sta->lq);
2007 rs_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 2153 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
2008 out: 2154 out:
2009 return; 2155 return;
2010} 2156}
2011 2157
2012static void rs_get_rate(void *priv_rate, struct net_device *dev, 2158static void rs_get_rate(void *priv_rate, struct net_device *dev,
2013 struct ieee80211_hw_mode *mode, struct sk_buff *skb, 2159 struct ieee80211_supported_band *sband,
2160 struct sk_buff *skb,
2014 struct rate_selection *sel) 2161 struct rate_selection *sel)
2015{ 2162{
2016 2163
@@ -2020,11 +2167,13 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2020 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2167 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2021 struct sta_info *sta; 2168 struct sta_info *sta;
2022 u16 fc; 2169 u16 fc;
2023 struct iwl4965_priv *priv = (struct iwl4965_priv *)priv_rate; 2170 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
2024 struct iwl4965_lq_sta *lq_sta; 2171 struct iwl4965_lq_sta *lq_sta;
2025 2172
2026 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n"); 2173 IWL_DEBUG_RATE_LIMIT("rate scale calculate new rate for skb\n");
2027 2174
2175 rcu_read_lock();
2176
2028 sta = sta_info_get(local, hdr->addr1); 2177 sta = sta_info_get(local, hdr->addr1);
2029 2178
2030 /* Send management frames and broadcast/multicast data using lowest 2179 /* Send management frames and broadcast/multicast data using lowest
@@ -2032,14 +2181,12 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2032 fc = le16_to_cpu(hdr->frame_control); 2181 fc = le16_to_cpu(hdr->frame_control);
2033 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1) || 2182 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1) ||
2034 !sta || !sta->rate_ctrl_priv) { 2183 !sta || !sta->rate_ctrl_priv) {
2035 sel->rate = rate_lowest(local, local->oper_hw_mode, sta); 2184 sel->rate = rate_lowest(local, sband, sta);
2036 if (sta) 2185 goto out;
2037 sta_info_put(sta);
2038 return;
2039 } 2186 }
2040 2187
2041 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv; 2188 lq_sta = (struct iwl4965_lq_sta *)sta->rate_ctrl_priv;
2042 i = sta->last_txrate; 2189 i = sta->last_txrate_idx;
2043 2190
2044 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) && 2191 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
2045 !lq_sta->ibss_sta_added) { 2192 !lq_sta->ibss_sta_added) {
@@ -2062,14 +2209,15 @@ static void rs_get_rate(void *priv_rate, struct net_device *dev,
2062 goto done; 2209 goto done;
2063 } 2210 }
2064 2211
2065 done: 2212done:
2066 if ((i < 0) || (i > IWL_RATE_COUNT)) { 2213 if ((i < 0) || (i > IWL_RATE_COUNT)) {
2067 sel->rate = rate_lowest(local, local->oper_hw_mode, sta); 2214 sel->rate = rate_lowest(local, sband, sta);
2068 return; 2215 goto out;
2069 } 2216 }
2070 sta_info_put(sta);
2071 2217
2072 sel->rate = &priv->ieee_rates[i]; 2218 sel->rate = &priv->ieee_rates[i];
2219out:
2220 rcu_read_unlock();
2073} 2221}
2074 2222
2075static void *rs_alloc_sta(void *priv, gfp_t gfp) 2223static void *rs_alloc_sta(void *priv, gfp_t gfp)
@@ -2099,13 +2247,15 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2099{ 2247{
2100 int i, j; 2248 int i, j;
2101 struct ieee80211_conf *conf = &local->hw.conf; 2249 struct ieee80211_conf *conf = &local->hw.conf;
2102 struct ieee80211_hw_mode *mode = local->oper_hw_mode; 2250 struct ieee80211_supported_band *sband;
2103 struct iwl4965_priv *priv = (struct iwl4965_priv *)priv_rate; 2251 struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
2104 struct iwl4965_lq_sta *lq_sta = priv_sta; 2252 struct iwl4965_lq_sta *lq_sta = priv_sta;
2105 2253
2254 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
2255
2106 lq_sta->flush_timer = 0; 2256 lq_sta->flush_timer = 0;
2107 lq_sta->supp_rates = sta->supp_rates; 2257 lq_sta->supp_rates = sta->supp_rates[sband->band];
2108 sta->txrate = 3; 2258 sta->txrate_idx = 3;
2109 for (j = 0; j < LQ_SIZE; j++) 2259 for (j = 0; j < LQ_SIZE; j++)
2110 for (i = 0; i < IWL_RATE_COUNT; i++) 2260 for (i = 0; i < IWL_RATE_COUNT; i++)
2111 rs_rate_scale_clear_window(&(lq_sta->lq_info[j].win[i])); 2261 rs_rate_scale_clear_window(&(lq_sta->lq_info[j].win[i]));
@@ -2140,15 +2290,15 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2140 } 2290 }
2141 2291
2142 /* Find highest tx rate supported by hardware and destination station */ 2292 /* Find highest tx rate supported by hardware and destination station */
2143 for (i = 0; i < mode->num_rates; i++) { 2293 for (i = 0; i < sband->n_bitrates; i++)
2144 if ((sta->supp_rates & BIT(i)) && 2294 if (sta->supp_rates[sband->band] & BIT(i))
2145 (mode->rates[i].flags & IEEE80211_RATE_SUPPORTED)) 2295 sta->txrate_idx = i;
2146 sta->txrate = i; 2296
2147 } 2297 sta->last_txrate_idx = sta->txrate_idx;
2148 sta->last_txrate = sta->txrate; 2298 /* WTF is with this bogus comment? A doesn't have cck rates */
2149 /* For MODE_IEEE80211A, cck rates are at end of rate table */ 2299 /* For MODE_IEEE80211A, cck rates are at end of rate table */
2150 if (local->hw.conf.phymode == MODE_IEEE80211A) 2300 if (local->hw.conf.channel->band == IEEE80211_BAND_5GHZ)
2151 sta->last_txrate += IWL_FIRST_OFDM_RATE; 2301 sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2152 2302
2153 lq_sta->is_dup = 0; 2303 lq_sta->is_dup = 0;
2154 lq_sta->valid_antenna = priv->valid_antenna; 2304 lq_sta->valid_antenna = priv->valid_antenna;
@@ -2157,7 +2307,7 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2157 lq_sta->active_rate = priv->active_rate; 2307 lq_sta->active_rate = priv->active_rate;
2158 lq_sta->active_rate &= ~(0x1000); 2308 lq_sta->active_rate &= ~(0x1000);
2159 lq_sta->active_rate_basic = priv->active_rate_basic; 2309 lq_sta->active_rate_basic = priv->active_rate_basic;
2160 lq_sta->phymode = priv->phymode; 2310 lq_sta->band = priv->band;
2161#ifdef CONFIG_IWL4965_HT 2311#ifdef CONFIG_IWL4965_HT
2162 /* 2312 /*
2163 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), 2313 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
@@ -2180,6 +2330,8 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2180 IWL_DEBUG_HT("SISO RATE 0x%X MIMO RATE 0x%X\n", 2330 IWL_DEBUG_HT("SISO RATE 0x%X MIMO RATE 0x%X\n",
2181 lq_sta->active_siso_rate, 2331 lq_sta->active_siso_rate,
2182 lq_sta->active_mimo_rate); 2332 lq_sta->active_mimo_rate);
2333 /* as default allow aggregation for all tids */
2334 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
2183#endif /*CONFIG_IWL4965_HT*/ 2335#endif /*CONFIG_IWL4965_HT*/
2184#ifdef CONFIG_MAC80211_DEBUGFS 2336#ifdef CONFIG_MAC80211_DEBUGFS
2185 lq_sta->drv = priv; 2337 lq_sta->drv = priv;
@@ -2193,7 +2345,7 @@ static void rs_rate_init(void *priv_rate, void *priv_sta,
2193 2345
2194static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta, 2346static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta,
2195 struct iwl4965_rate *tx_mcs, 2347 struct iwl4965_rate *tx_mcs,
2196 struct iwl4965_link_quality_cmd *lq_cmd) 2348 struct iwl_link_quality_cmd *lq_cmd)
2197{ 2349{
2198 int index = 0; 2350 int index = 0;
2199 int rate_idx; 2351 int rate_idx;
@@ -2207,7 +2359,7 @@ static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta,
2207 rs_dbgfs_set_mcs(lq_sta, tx_mcs, index); 2359 rs_dbgfs_set_mcs(lq_sta, tx_mcs, index);
2208 2360
2209 /* Interpret rate_n_flags */ 2361 /* Interpret rate_n_flags */
2210 rs_get_tbl_info_from_mcs(tx_mcs, lq_sta->phymode, 2362 rs_get_tbl_info_from_mcs(tx_mcs, lq_sta->band,
2211 &tbl_type, &rate_idx); 2363 &tbl_type, &rate_idx);
2212 2364
2213 /* How many times should we repeat the initial rate? */ 2365 /* How many times should we repeat the initial rate? */
@@ -2261,7 +2413,7 @@ static void rs_fill_link_cmd(struct iwl4965_lq_sta *lq_sta,
2261 index++; 2413 index++;
2262 } 2414 }
2263 2415
2264 rs_get_tbl_info_from_mcs(&new_rate, lq_sta->phymode, &tbl_type, 2416 rs_get_tbl_info_from_mcs(&new_rate, lq_sta->band, &tbl_type,
2265 &rate_idx); 2417 &rate_idx);
2266 2418
2267 /* Indicate to uCode which entries might be MIMO. 2419 /* Indicate to uCode which entries might be MIMO.
@@ -2318,17 +2470,11 @@ static void rs_free(void *priv_rate)
2318 2470
2319static void rs_clear(void *priv_rate) 2471static void rs_clear(void *priv_rate)
2320{ 2472{
2321 struct iwl4965_priv *priv = (struct iwl4965_priv *) priv_rate; 2473 struct iwl_priv *priv = (struct iwl_priv *) priv_rate;
2322 2474
2323 IWL_DEBUG_RATE("enter\n"); 2475 IWL_DEBUG_RATE("enter\n");
2324 2476
2325 priv->lq_mngr.lq_ready = 0; 2477 priv->lq_mngr.lq_ready = 0;
2326#ifdef CONFIG_IWL4965_HT
2327#ifdef CONFIG_IWL4965_HT_AGG
2328 if (priv->lq_mngr.agg_ctrl.granted_ba)
2329 iwl4965_turn_off_agg(priv, TID_ALL_SPECIFIED);
2330#endif /*CONFIG_IWL4965_HT_AGG */
2331#endif /* CONFIG_IWL4965_HT */
2332 2478
2333 IWL_DEBUG_RATE("leave\n"); 2479 IWL_DEBUG_RATE("leave\n");
2334} 2480}
@@ -2354,7 +2500,7 @@ static void rs_dbgfs_set_mcs(struct iwl4965_lq_sta *lq_sta,
2354{ 2500{
2355 u32 base_rate; 2501 u32 base_rate;
2356 2502
2357 if (lq_sta->phymode == (u8) MODE_IEEE80211A) 2503 if (lq_sta->band == IEEE80211_BAND_5GHZ)
2358 base_rate = 0x800D; 2504 base_rate = 0x800D;
2359 else 2505 else
2360 base_rate = 0x820A; 2506 base_rate = 0x820A;
@@ -2398,7 +2544,7 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
2398 2544
2399 if (lq_sta->dbg_fixed.rate_n_flags) { 2545 if (lq_sta->dbg_fixed.rate_n_flags) {
2400 rs_fill_link_cmd(lq_sta, &lq_sta->dbg_fixed, &lq_sta->lq); 2546 rs_fill_link_cmd(lq_sta, &lq_sta->dbg_fixed, &lq_sta->lq);
2401 rs_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC); 2547 iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC);
2402 } 2548 }
2403 2549
2404 return count; 2550 return count;
@@ -2495,6 +2641,12 @@ static void rs_add_debugfs(void *priv, void *priv_sta,
2495 lq_sta->rs_sta_dbgfs_stats_table_file = 2641 lq_sta->rs_sta_dbgfs_stats_table_file =
2496 debugfs_create_file("rate_stats_table", 0600, dir, 2642 debugfs_create_file("rate_stats_table", 0600, dir,
2497 lq_sta, &rs_sta_dbgfs_stats_table_ops); 2643 lq_sta, &rs_sta_dbgfs_stats_table_ops);
2644#ifdef CONFIG_IWL4965_HT
2645 lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
2646 debugfs_create_u8("tx_agg_tid_enable", 0600, dir,
2647 &lq_sta->tx_agg_tid_en);
2648#endif
2649
2498} 2650}
2499 2651
2500static void rs_remove_debugfs(void *priv, void *priv_sta) 2652static void rs_remove_debugfs(void *priv, void *priv_sta)
@@ -2502,6 +2654,9 @@ static void rs_remove_debugfs(void *priv, void *priv_sta)
2502 struct iwl4965_lq_sta *lq_sta = priv_sta; 2654 struct iwl4965_lq_sta *lq_sta = priv_sta;
2503 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file); 2655 debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
2504 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file); 2656 debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
2657#ifdef CONFIG_IWL4965_HT
2658 debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
2659#endif
2505} 2660}
2506#endif 2661#endif
2507 2662
@@ -2525,7 +2680,7 @@ static struct rate_control_ops rs_ops = {
2525int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id) 2680int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
2526{ 2681{
2527 struct ieee80211_local *local = hw_to_local(hw); 2682 struct ieee80211_local *local = hw_to_local(hw);
2528 struct iwl4965_priv *priv = hw->priv; 2683 struct iwl_priv *priv = hw->priv;
2529 struct iwl4965_lq_sta *lq_sta; 2684 struct iwl4965_lq_sta *lq_sta;
2530 struct sta_info *sta; 2685 struct sta_info *sta;
2531 int cnt = 0, i; 2686 int cnt = 0, i;
@@ -2534,13 +2689,15 @@ int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
2534 u32 max_time = 0; 2689 u32 max_time = 0;
2535 u8 lq_type, antenna; 2690 u8 lq_type, antenna;
2536 2691
2692 rcu_read_lock();
2693
2537 sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr); 2694 sta = sta_info_get(local, priv->stations[sta_id].sta.sta.addr);
2538 if (!sta || !sta->rate_ctrl_priv) { 2695 if (!sta || !sta->rate_ctrl_priv) {
2539 if (sta) { 2696 if (sta)
2540 sta_info_put(sta);
2541 IWL_DEBUG_RATE("leave - no private rate data!\n"); 2697 IWL_DEBUG_RATE("leave - no private rate data!\n");
2542 } else 2698 else
2543 IWL_DEBUG_RATE("leave - no station!\n"); 2699 IWL_DEBUG_RATE("leave - no station!\n");
2700 rcu_read_unlock();
2544 return sprintf(buf, "station %d not found\n", sta_id); 2701 return sprintf(buf, "station %d not found\n", sta_id);
2545 } 2702 }
2546 2703
@@ -2605,25 +2762,25 @@ int iwl4965_fill_rs_info(struct ieee80211_hw *hw, char *buf, u8 sta_id)
2605 2762
2606 cnt += sprintf(&buf[cnt], "\nrate scale type %d antenna %d " 2763 cnt += sprintf(&buf[cnt], "\nrate scale type %d antenna %d "
2607 "active_search %d rate index %d\n", lq_type, antenna, 2764 "active_search %d rate index %d\n", lq_type, antenna,
2608 lq_sta->search_better_tbl, sta->last_txrate); 2765 lq_sta->search_better_tbl, sta->last_txrate_idx);
2609 2766
2610 sta_info_put(sta); 2767 rcu_read_unlock();
2611 return cnt; 2768 return cnt;
2612} 2769}
2613 2770
2614void iwl4965_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) 2771void iwl4965_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
2615{ 2772{
2616 struct iwl4965_priv *priv = hw->priv; 2773 struct iwl_priv *priv = hw->priv;
2617 2774
2618 priv->lq_mngr.lq_ready = 1; 2775 priv->lq_mngr.lq_ready = 1;
2619} 2776}
2620 2777
2621void iwl4965_rate_control_register(struct ieee80211_hw *hw) 2778int iwl4965_rate_control_register(void)
2622{ 2779{
2623 ieee80211_rate_control_register(&rs_ops); 2780 return ieee80211_rate_control_register(&rs_ops);
2624} 2781}
2625 2782
2626void iwl4965_rate_control_unregister(struct ieee80211_hw *hw) 2783void iwl4965_rate_control_unregister(void)
2627{ 2784{
2628 ieee80211_rate_control_unregister(&rs_ops); 2785 ieee80211_rate_control_unregister(&rs_ops);
2629} 2786}
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-rs.h b/drivers/net/wireless/iwlwifi/iwl-4965-rs.h
index 55f707382787..866e378aa385 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-rs.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -212,6 +212,18 @@ enum {
212 212
213#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ 213#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
214 214
215/* load per tid defines for A-MPDU activation */
216#define IWL_AGG_TPT_THREHOLD 0
217#define IWL_AGG_LOAD_THRESHOLD 10
218#define IWL_AGG_ALL_TID 0xff
219#define TID_QUEUE_CELL_SPACING 50 /*mS */
220#define TID_QUEUE_MAX_SIZE 20
221#define TID_ROUND_VALUE 5 /* mS */
222#define TID_MAX_LOAD_COUNT 8
223
224#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
225#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
226
215extern const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT]; 227extern const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT];
216 228
217enum iwl4965_table_type { 229enum iwl4965_table_type {
@@ -247,7 +259,7 @@ static inline u8 iwl4965_get_prev_ieee_rate(u8 rate_index)
247 return rate; 259 return rate;
248} 260}
249 261
250extern int iwl4965_rate_index_from_plcp(int plcp); 262extern int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags);
251 263
252/** 264/**
253 * iwl4965_fill_rs_info - Fill an output text buffer with the rate representation 265 * iwl4965_fill_rs_info - Fill an output text buffer with the rate representation
@@ -276,7 +288,7 @@ extern void iwl4965_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
276 * ieee80211_register_hw 288 * ieee80211_register_hw
277 * 289 *
278 */ 290 */
279extern void iwl4965_rate_control_register(struct ieee80211_hw *hw); 291extern int iwl4965_rate_control_register(void);
280 292
281/** 293/**
282 * iwl4965_rate_control_unregister - Unregister the rate control callbacks 294 * iwl4965_rate_control_unregister - Unregister the rate control callbacks
@@ -284,6 +296,6 @@ extern void iwl4965_rate_control_register(struct ieee80211_hw *hw);
284 * This should be called after calling ieee80211_unregister_hw, but before 296 * This should be called after calling ieee80211_unregister_hw, but before
285 * the driver is unloaded. 297 * the driver is unloaded.
286 */ 298 */
287extern void iwl4965_rate_control_unregister(struct ieee80211_hw *hw); 299extern void iwl4965_rate_control_unregister(void);
288 300
289#endif 301#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 65767570be68..17f629fb96ff 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -38,10 +38,21 @@
38#include <linux/etherdevice.h> 38#include <linux/etherdevice.h>
39#include <asm/unaligned.h> 39#include <asm/unaligned.h>
40 40
41#include "iwl-eeprom.h"
41#include "iwl-4965.h" 42#include "iwl-4965.h"
43#include "iwl-core.h"
44#include "iwl-io.h"
42#include "iwl-helpers.h" 45#include "iwl-helpers.h"
43 46
44static void iwl4965_hw_card_show_info(struct iwl4965_priv *priv); 47/* module parameters */
48static struct iwl_mod_params iwl4965_mod_params = {
49 .num_of_queues = IWL4965_MAX_NUM_QUEUES,
50 .enable_qos = 1,
51 .amsdu_size_8K = 1,
52 /* the rest are 0 by default */
53};
54
55static void iwl4965_hw_card_show_info(struct iwl_priv *priv);
45 56
46#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ 57#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
47 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ 58 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -79,13 +90,277 @@ const struct iwl4965_rate_info iwl4965_rates[IWL_RATE_COUNT] = {
79 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */ 90 IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
80}; 91};
81 92
93#ifdef CONFIG_IWL4965_HT
94
95static const u16 default_tid_to_tx_fifo[] = {
96 IWL_TX_FIFO_AC1,
97 IWL_TX_FIFO_AC0,
98 IWL_TX_FIFO_AC0,
99 IWL_TX_FIFO_AC1,
100 IWL_TX_FIFO_AC2,
101 IWL_TX_FIFO_AC2,
102 IWL_TX_FIFO_AC3,
103 IWL_TX_FIFO_AC3,
104 IWL_TX_FIFO_NONE,
105 IWL_TX_FIFO_NONE,
106 IWL_TX_FIFO_NONE,
107 IWL_TX_FIFO_NONE,
108 IWL_TX_FIFO_NONE,
109 IWL_TX_FIFO_NONE,
110 IWL_TX_FIFO_NONE,
111 IWL_TX_FIFO_NONE,
112 IWL_TX_FIFO_AC3
113};
114
115#endif /*CONFIG_IWL4965_HT */
116
117/* check contents of special bootstrap uCode SRAM */
118static int iwl4965_verify_bsm(struct iwl_priv *priv)
119{
120 __le32 *image = priv->ucode_boot.v_addr;
121 u32 len = priv->ucode_boot.len;
122 u32 reg;
123 u32 val;
124
125 IWL_DEBUG_INFO("Begin verify bsm\n");
126
127 /* verify BSM SRAM contents */
128 val = iwl_read_prph(priv, BSM_WR_DWCOUNT_REG);
129 for (reg = BSM_SRAM_LOWER_BOUND;
130 reg < BSM_SRAM_LOWER_BOUND + len;
131 reg += sizeof(u32), image++) {
132 val = iwl_read_prph(priv, reg);
133 if (val != le32_to_cpu(*image)) {
134 IWL_ERROR("BSM uCode verification failed at "
135 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
136 BSM_SRAM_LOWER_BOUND,
137 reg - BSM_SRAM_LOWER_BOUND, len,
138 val, le32_to_cpu(*image));
139 return -EIO;
140 }
141 }
142
143 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
144
145 return 0;
146}
147
148/**
149 * iwl4965_load_bsm - Load bootstrap instructions
150 *
151 * BSM operation:
152 *
153 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
154 * in special SRAM that does not power down during RFKILL. When powering back
155 * up after power-saving sleeps (or during initial uCode load), the BSM loads
156 * the bootstrap program into the on-board processor, and starts it.
157 *
158 * The bootstrap program loads (via DMA) instructions and data for a new
159 * program from host DRAM locations indicated by the host driver in the
160 * BSM_DRAM_* registers. Once the new program is loaded, it starts
161 * automatically.
162 *
163 * When initializing the NIC, the host driver points the BSM to the
164 * "initialize" uCode image. This uCode sets up some internal data, then
165 * notifies host via "initialize alive" that it is complete.
166 *
167 * The host then replaces the BSM_DRAM_* pointer values to point to the
168 * normal runtime uCode instructions and a backup uCode data cache buffer
169 * (filled initially with starting data values for the on-board processor),
170 * then triggers the "initialize" uCode to load and launch the runtime uCode,
171 * which begins normal operation.
172 *
173 * When doing a power-save shutdown, runtime uCode saves data SRAM into
174 * the backup data cache in DRAM before SRAM is powered down.
175 *
176 * When powering back up, the BSM loads the bootstrap program. This reloads
177 * the runtime uCode instructions and the backup data cache into SRAM,
178 * and re-launches the runtime uCode from where it left off.
179 */
180static int iwl4965_load_bsm(struct iwl_priv *priv)
181{
182 __le32 *image = priv->ucode_boot.v_addr;
183 u32 len = priv->ucode_boot.len;
184 dma_addr_t pinst;
185 dma_addr_t pdata;
186 u32 inst_len;
187 u32 data_len;
188 int i;
189 u32 done;
190 u32 reg_offset;
191 int ret;
192
193 IWL_DEBUG_INFO("Begin load bsm\n");
194
195 /* make sure bootstrap program is no larger than BSM's SRAM size */
196 if (len > IWL_MAX_BSM_SIZE)
197 return -EINVAL;
198
199 /* Tell bootstrap uCode where to find the "Initialize" uCode
200 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
201 * NOTE: iwl4965_initialize_alive_start() will replace these values,
202 * after the "initialize" uCode has run, to point to
203 * runtime/protocol instructions and backup data cache. */
204 pinst = priv->ucode_init.p_addr >> 4;
205 pdata = priv->ucode_init_data.p_addr >> 4;
206 inst_len = priv->ucode_init.len;
207 data_len = priv->ucode_init_data.len;
208
209 ret = iwl_grab_nic_access(priv);
210 if (ret)
211 return ret;
212
213 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
214 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
215 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
216 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
217
218 /* Fill BSM memory with bootstrap instructions */
219 for (reg_offset = BSM_SRAM_LOWER_BOUND;
220 reg_offset < BSM_SRAM_LOWER_BOUND + len;
221 reg_offset += sizeof(u32), image++)
222 _iwl_write_prph(priv, reg_offset, le32_to_cpu(*image));
223
224 ret = iwl4965_verify_bsm(priv);
225 if (ret) {
226 iwl_release_nic_access(priv);
227 return ret;
228 }
229
230 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
231 iwl_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
232 iwl_write_prph(priv, BSM_WR_MEM_DST_REG, RTC_INST_LOWER_BOUND);
233 iwl_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
234
235 /* Load bootstrap code into instruction SRAM now,
236 * to prepare to load "initialize" uCode */
237 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
238
239 /* Wait for load of bootstrap uCode to finish */
240 for (i = 0; i < 100; i++) {
241 done = iwl_read_prph(priv, BSM_WR_CTRL_REG);
242 if (!(done & BSM_WR_CTRL_REG_BIT_START))
243 break;
244 udelay(10);
245 }
246 if (i < 100)
247 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
248 else {
249 IWL_ERROR("BSM write did not complete!\n");
250 return -EIO;
251 }
252
253 /* Enable future boot loads whenever power management unit triggers it
254 * (e.g. when powering back up after power-save shutdown) */
255 iwl_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
256
257 iwl_release_nic_access(priv);
258
259 return 0;
260}
261
262static int iwl4965_init_drv(struct iwl_priv *priv)
263{
264 int ret;
265 int i;
266
267 priv->antenna = (enum iwl4965_antenna)priv->cfg->mod_params->antenna;
268 priv->retry_rate = 1;
269 priv->ibss_beacon = NULL;
270
271 spin_lock_init(&priv->lock);
272 spin_lock_init(&priv->power_data.lock);
273 spin_lock_init(&priv->sta_lock);
274 spin_lock_init(&priv->hcmd_lock);
275 spin_lock_init(&priv->lq_mngr.lock);
276
277 priv->shared_virt = pci_alloc_consistent(priv->pci_dev,
278 sizeof(struct iwl4965_shared),
279 &priv->shared_phys);
280
281 if (!priv->shared_virt) {
282 ret = -ENOMEM;
283 goto err;
284 }
285
286 memset(priv->shared_virt, 0, sizeof(struct iwl4965_shared));
287
288
289 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
290 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
291
292 INIT_LIST_HEAD(&priv->free_frames);
293
294 mutex_init(&priv->mutex);
295
296 /* Clear the driver's (not device's) station table */
297 iwlcore_clear_stations_table(priv);
298
299 priv->data_retry_limit = -1;
300 priv->ieee_channels = NULL;
301 priv->ieee_rates = NULL;
302 priv->band = IEEE80211_BAND_2GHZ;
303
304 priv->iw_mode = IEEE80211_IF_TYPE_STA;
305
306 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
307 priv->valid_antenna = 0x7; /* assume all 3 connected */
308 priv->ps_mode = IWL_MIMO_PS_NONE;
309
310 /* Choose which receivers/antennas to use */
311 iwl4965_set_rxon_chain(priv);
312
313 iwlcore_reset_qos(priv);
314
315 priv->qos_data.qos_active = 0;
316 priv->qos_data.qos_cap.val = 0;
317
318 iwlcore_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
319
320 priv->rates_mask = IWL_RATES_MASK;
321 /* If power management is turned on, default to AC mode */
322 priv->power_mode = IWL_POWER_AC;
323 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
324
325 ret = iwl_init_channel_map(priv);
326 if (ret) {
327 IWL_ERROR("initializing regulatory failed: %d\n", ret);
328 goto err;
329 }
330
331 ret = iwl4965_init_geos(priv);
332 if (ret) {
333 IWL_ERROR("initializing geos failed: %d\n", ret);
334 goto err_free_channel_map;
335 }
336
337 ret = ieee80211_register_hw(priv->hw);
338 if (ret) {
339 IWL_ERROR("Failed to register network device (error %d)\n",
340 ret);
341 goto err_free_geos;
342 }
343
344 priv->hw->conf.beacon_int = 100;
345 priv->mac80211_registered = 1;
346
347 return 0;
348
349err_free_geos:
350 iwl4965_free_geos(priv);
351err_free_channel_map:
352 iwl_free_channel_map(priv);
353err:
354 return ret;
355}
356
82static int is_fat_channel(__le32 rxon_flags) 357static int is_fat_channel(__le32 rxon_flags)
83{ 358{
84 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) || 359 return (rxon_flags & RXON_FLG_CHANNEL_MODE_PURE_40_MSK) ||
85 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK); 360 (rxon_flags & RXON_FLG_CHANNEL_MODE_MIXED_MSK);
86} 361}
87 362
88static u8 is_single_stream(struct iwl4965_priv *priv) 363static u8 is_single_stream(struct iwl_priv *priv)
89{ 364{
90#ifdef CONFIG_IWL4965_HT 365#ifdef CONFIG_IWL4965_HT
91 if (!priv->current_ht_config.is_ht || 366 if (!priv->current_ht_config.is_ht ||
@@ -98,13 +373,71 @@ static u8 is_single_stream(struct iwl4965_priv *priv)
98 return 0; 373 return 0;
99} 374}
100 375
376int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
377{
378 int idx = 0;
379
380 /* 4965 HT rate format */
381 if (rate_n_flags & RATE_MCS_HT_MSK) {
382 idx = (rate_n_flags & 0xff);
383
384 if (idx >= IWL_RATE_MIMO_6M_PLCP)
385 idx = idx - IWL_RATE_MIMO_6M_PLCP;
386
387 idx += IWL_FIRST_OFDM_RATE;
388 /* skip 9M not supported in ht*/
389 if (idx >= IWL_RATE_9M_INDEX)
390 idx += 1;
391 if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
392 return idx;
393
394 /* 4965 legacy rate format, search for match in table */
395 } else {
396 for (idx = 0; idx < ARRAY_SIZE(iwl4965_rates); idx++)
397 if (iwl4965_rates[idx].plcp == (rate_n_flags & 0xFF))
398 return idx;
399 }
400
401 return -1;
402}
403
404/**
405 * translate ucode response to mac80211 tx status control values
406 */
407void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
408 struct ieee80211_tx_control *control)
409{
410 int rate_index;
411
412 control->antenna_sel_tx =
413 ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS);
414 if (rate_n_flags & RATE_MCS_HT_MSK)
415 control->flags |= IEEE80211_TXCTL_OFDM_HT;
416 if (rate_n_flags & RATE_MCS_GF_MSK)
417 control->flags |= IEEE80211_TXCTL_GREEN_FIELD;
418 if (rate_n_flags & RATE_MCS_FAT_MSK)
419 control->flags |= IEEE80211_TXCTL_40_MHZ_WIDTH;
420 if (rate_n_flags & RATE_MCS_DUP_MSK)
421 control->flags |= IEEE80211_TXCTL_DUP_DATA;
422 if (rate_n_flags & RATE_MCS_SGI_MSK)
423 control->flags |= IEEE80211_TXCTL_SHORT_GI;
424 /* since iwl4965_hwrate_to_plcp_idx is band indifferent, we always use
425 * IEEE80211_BAND_2GHZ band as it contains all the rates */
426 rate_index = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
427 if (rate_index == -1)
428 control->tx_rate = NULL;
429 else
430 control->tx_rate =
431 &priv->bands[IEEE80211_BAND_2GHZ].bitrates[rate_index];
432}
433
101/* 434/*
102 * Determine how many receiver/antenna chains to use. 435 * Determine how many receiver/antenna chains to use.
103 * More provides better reception via diversity. Fewer saves power. 436 * More provides better reception via diversity. Fewer saves power.
104 * MIMO (dual stream) requires at least 2, but works better with 3. 437 * MIMO (dual stream) requires at least 2, but works better with 3.
105 * This does not determine *which* chains to use, just how many. 438 * This does not determine *which* chains to use, just how many.
106 */ 439 */
107static int iwl4965_get_rx_chain_counter(struct iwl4965_priv *priv, 440static int iwl4965_get_rx_chain_counter(struct iwl_priv *priv,
108 u8 *idle_state, u8 *rx_state) 441 u8 *idle_state, u8 *rx_state)
109{ 442{
110 u8 is_single = is_single_stream(priv); 443 u8 is_single = is_single_stream(priv);
@@ -133,32 +466,32 @@ static int iwl4965_get_rx_chain_counter(struct iwl4965_priv *priv,
133 return 0; 466 return 0;
134} 467}
135 468
136int iwl4965_hw_rxq_stop(struct iwl4965_priv *priv) 469int iwl4965_hw_rxq_stop(struct iwl_priv *priv)
137{ 470{
138 int rc; 471 int rc;
139 unsigned long flags; 472 unsigned long flags;
140 473
141 spin_lock_irqsave(&priv->lock, flags); 474 spin_lock_irqsave(&priv->lock, flags);
142 rc = iwl4965_grab_nic_access(priv); 475 rc = iwl_grab_nic_access(priv);
143 if (rc) { 476 if (rc) {
144 spin_unlock_irqrestore(&priv->lock, flags); 477 spin_unlock_irqrestore(&priv->lock, flags);
145 return rc; 478 return rc;
146 } 479 }
147 480
148 /* stop Rx DMA */ 481 /* stop Rx DMA */
149 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 482 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
150 rc = iwl4965_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG, 483 rc = iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
151 (1 << 24), 1000); 484 (1 << 24), 1000);
152 if (rc < 0) 485 if (rc < 0)
153 IWL_ERROR("Can't stop Rx DMA.\n"); 486 IWL_ERROR("Can't stop Rx DMA.\n");
154 487
155 iwl4965_release_nic_access(priv); 488 iwl_release_nic_access(priv);
156 spin_unlock_irqrestore(&priv->lock, flags); 489 spin_unlock_irqrestore(&priv->lock, flags);
157 490
158 return 0; 491 return 0;
159} 492}
160 493
161u8 iwl4965_hw_find_station(struct iwl4965_priv *priv, const u8 *addr) 494u8 iwl4965_hw_find_station(struct iwl_priv *priv, const u8 *addr)
162{ 495{
163 int i; 496 int i;
164 int start = 0; 497 int start = 0;
@@ -171,10 +504,10 @@ u8 iwl4965_hw_find_station(struct iwl4965_priv *priv, const u8 *addr)
171 start = IWL_STA_ID; 504 start = IWL_STA_ID;
172 505
173 if (is_broadcast_ether_addr(addr)) 506 if (is_broadcast_ether_addr(addr))
174 return IWL4965_BROADCAST_ID; 507 return priv->hw_params.bcast_sta_id;
175 508
176 spin_lock_irqsave(&priv->sta_lock, flags); 509 spin_lock_irqsave(&priv->sta_lock, flags);
177 for (i = start; i < priv->hw_setting.max_stations; i++) 510 for (i = start; i < priv->hw_params.max_stations; i++)
178 if ((priv->stations[i].used) && 511 if ((priv->stations[i].used) &&
179 (!compare_ether_addr 512 (!compare_ether_addr
180 (priv->stations[i].sta.sta.addr, addr))) { 513 (priv->stations[i].sta.sta.addr, addr))) {
@@ -190,13 +523,13 @@ u8 iwl4965_hw_find_station(struct iwl4965_priv *priv, const u8 *addr)
190 return ret; 523 return ret;
191} 524}
192 525
193static int iwl4965_nic_set_pwr_src(struct iwl4965_priv *priv, int pwr_max) 526static int iwl4965_nic_set_pwr_src(struct iwl_priv *priv, int pwr_max)
194{ 527{
195 int ret; 528 int ret;
196 unsigned long flags; 529 unsigned long flags;
197 530
198 spin_lock_irqsave(&priv->lock, flags); 531 spin_lock_irqsave(&priv->lock, flags);
199 ret = iwl4965_grab_nic_access(priv); 532 ret = iwl_grab_nic_access(priv);
200 if (ret) { 533 if (ret) {
201 spin_unlock_irqrestore(&priv->lock, flags); 534 spin_unlock_irqrestore(&priv->lock, flags);
202 return ret; 535 return ret;
@@ -209,92 +542,92 @@ static int iwl4965_nic_set_pwr_src(struct iwl4965_priv *priv, int pwr_max)
209 &val); 542 &val);
210 543
211 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT) 544 if (val & PCI_CFG_PMC_PME_FROM_D3COLD_SUPPORT)
212 iwl4965_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 545 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
213 APMG_PS_CTRL_VAL_PWR_SRC_VAUX, 546 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
214 ~APMG_PS_CTRL_MSK_PWR_SRC); 547 ~APMG_PS_CTRL_MSK_PWR_SRC);
215 } else 548 } else
216 iwl4965_set_bits_mask_prph(priv, APMG_PS_CTRL_REG, 549 iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
217 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, 550 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
218 ~APMG_PS_CTRL_MSK_PWR_SRC); 551 ~APMG_PS_CTRL_MSK_PWR_SRC);
219 552
220 iwl4965_release_nic_access(priv); 553 iwl_release_nic_access(priv);
221 spin_unlock_irqrestore(&priv->lock, flags); 554 spin_unlock_irqrestore(&priv->lock, flags);
222 555
223 return ret; 556 return ret;
224} 557}
225 558
226static int iwl4965_rx_init(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq) 559static int iwl4965_rx_init(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
227{ 560{
228 int rc; 561 int ret;
229 unsigned long flags; 562 unsigned long flags;
230 unsigned int rb_size; 563 unsigned int rb_size;
231 564
232 spin_lock_irqsave(&priv->lock, flags); 565 spin_lock_irqsave(&priv->lock, flags);
233 rc = iwl4965_grab_nic_access(priv); 566 ret = iwl_grab_nic_access(priv);
234 if (rc) { 567 if (ret) {
235 spin_unlock_irqrestore(&priv->lock, flags); 568 spin_unlock_irqrestore(&priv->lock, flags);
236 return rc; 569 return ret;
237 } 570 }
238 571
239 if (iwl4965_param_amsdu_size_8K) 572 if (priv->cfg->mod_params->amsdu_size_8K)
240 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K; 573 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
241 else 574 else
242 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; 575 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
243 576
244 /* Stop Rx DMA */ 577 /* Stop Rx DMA */
245 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); 578 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
246 579
247 /* Reset driver's Rx queue write index */ 580 /* Reset driver's Rx queue write index */
248 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); 581 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
249 582
250 /* Tell device where to find RBD circular buffer in DRAM */ 583 /* Tell device where to find RBD circular buffer in DRAM */
251 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG, 584 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
252 rxq->dma_addr >> 8); 585 rxq->dma_addr >> 8);
253 586
254 /* Tell device where in DRAM to update its Rx status */ 587 /* Tell device where in DRAM to update its Rx status */
255 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG, 588 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
256 (priv->hw_setting.shared_phys + 589 (priv->shared_phys +
257 offsetof(struct iwl4965_shared, val0)) >> 4); 590 offsetof(struct iwl4965_shared, rb_closed)) >> 4);
258 591
259 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */ 592 /* Enable Rx DMA, enable host interrupt, Rx buffer size 4k, 256 RBDs */
260 iwl4965_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 593 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
261 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | 594 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
262 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | 595 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
263 rb_size | 596 rb_size |
264 /*0x10 << 4 | */ 597 /* 0x10 << 4 | */
265 (RX_QUEUE_SIZE_LOG << 598 (RX_QUEUE_SIZE_LOG <<
266 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT)); 599 FH_RCSR_RX_CONFIG_RBDCB_SIZE_BITSHIFT));
267 600
268 /* 601 /*
269 * iwl4965_write32(priv,CSR_INT_COAL_REG,0); 602 * iwl_write32(priv,CSR_INT_COAL_REG,0);
270 */ 603 */
271 604
272 iwl4965_release_nic_access(priv); 605 iwl_release_nic_access(priv);
273 spin_unlock_irqrestore(&priv->lock, flags); 606 spin_unlock_irqrestore(&priv->lock, flags);
274 607
275 return 0; 608 return 0;
276} 609}
277 610
278/* Tell 4965 where to find the "keep warm" buffer */ 611/* Tell 4965 where to find the "keep warm" buffer */
279static int iwl4965_kw_init(struct iwl4965_priv *priv) 612static int iwl4965_kw_init(struct iwl_priv *priv)
280{ 613{
281 unsigned long flags; 614 unsigned long flags;
282 int rc; 615 int rc;
283 616
284 spin_lock_irqsave(&priv->lock, flags); 617 spin_lock_irqsave(&priv->lock, flags);
285 rc = iwl4965_grab_nic_access(priv); 618 rc = iwl_grab_nic_access(priv);
286 if (rc) 619 if (rc)
287 goto out; 620 goto out;
288 621
289 iwl4965_write_direct32(priv, IWL_FH_KW_MEM_ADDR_REG, 622 iwl_write_direct32(priv, IWL_FH_KW_MEM_ADDR_REG,
290 priv->kw.dma_addr >> 4); 623 priv->kw.dma_addr >> 4);
291 iwl4965_release_nic_access(priv); 624 iwl_release_nic_access(priv);
292out: 625out:
293 spin_unlock_irqrestore(&priv->lock, flags); 626 spin_unlock_irqrestore(&priv->lock, flags);
294 return rc; 627 return rc;
295} 628}
296 629
297static int iwl4965_kw_alloc(struct iwl4965_priv *priv) 630static int iwl4965_kw_alloc(struct iwl_priv *priv)
298{ 631{
299 struct pci_dev *dev = priv->pci_dev; 632 struct pci_dev *dev = priv->pci_dev;
300 struct iwl4965_kw *kw = &priv->kw; 633 struct iwl4965_kw *kw = &priv->kw;
@@ -307,58 +640,10 @@ static int iwl4965_kw_alloc(struct iwl4965_priv *priv)
307 return 0; 640 return 0;
308} 641}
309 642
310#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
311 ? # x " " : "")
312
313/**
314 * iwl4965_set_fat_chan_info - Copy fat channel info into driver's priv.
315 *
316 * Does not set up a command, or touch hardware.
317 */
318int iwl4965_set_fat_chan_info(struct iwl4965_priv *priv, int phymode, u16 channel,
319 const struct iwl4965_eeprom_channel *eeprom_ch,
320 u8 fat_extension_channel)
321{
322 struct iwl4965_channel_info *ch_info;
323
324 ch_info = (struct iwl4965_channel_info *)
325 iwl4965_get_channel_info(priv, phymode, channel);
326
327 if (!is_channel_valid(ch_info))
328 return -1;
329
330 IWL_DEBUG_INFO("FAT Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
331 " %ddBm): Ad-Hoc %ssupported\n",
332 ch_info->channel,
333 is_channel_a_band(ch_info) ?
334 "5.2" : "2.4",
335 CHECK_AND_PRINT(IBSS),
336 CHECK_AND_PRINT(ACTIVE),
337 CHECK_AND_PRINT(RADAR),
338 CHECK_AND_PRINT(WIDE),
339 CHECK_AND_PRINT(NARROW),
340 CHECK_AND_PRINT(DFS),
341 eeprom_ch->flags,
342 eeprom_ch->max_power_avg,
343 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
344 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
345 "" : "not ");
346
347 ch_info->fat_eeprom = *eeprom_ch;
348 ch_info->fat_max_power_avg = eeprom_ch->max_power_avg;
349 ch_info->fat_curr_txpow = eeprom_ch->max_power_avg;
350 ch_info->fat_min_power = 0;
351 ch_info->fat_scan_power = eeprom_ch->max_power_avg;
352 ch_info->fat_flags = eeprom_ch->flags;
353 ch_info->fat_extension_channel = fat_extension_channel;
354
355 return 0;
356}
357
358/** 643/**
359 * iwl4965_kw_free - Free the "keep warm" buffer 644 * iwl4965_kw_free - Free the "keep warm" buffer
360 */ 645 */
361static void iwl4965_kw_free(struct iwl4965_priv *priv) 646static void iwl4965_kw_free(struct iwl_priv *priv)
362{ 647{
363 struct pci_dev *dev = priv->pci_dev; 648 struct pci_dev *dev = priv->pci_dev;
364 struct iwl4965_kw *kw = &priv->kw; 649 struct iwl4965_kw *kw = &priv->kw;
@@ -376,7 +661,7 @@ static void iwl4965_kw_free(struct iwl4965_priv *priv)
376 * @param priv 661 * @param priv
377 * @return error code 662 * @return error code
378 */ 663 */
379static int iwl4965_txq_ctx_reset(struct iwl4965_priv *priv) 664static int iwl4965_txq_ctx_reset(struct iwl_priv *priv)
380{ 665{
381 int rc = 0; 666 int rc = 0;
382 int txq_id, slots_num; 667 int txq_id, slots_num;
@@ -396,7 +681,7 @@ static int iwl4965_txq_ctx_reset(struct iwl4965_priv *priv)
396 681
397 spin_lock_irqsave(&priv->lock, flags); 682 spin_lock_irqsave(&priv->lock, flags);
398 683
399 rc = iwl4965_grab_nic_access(priv); 684 rc = iwl_grab_nic_access(priv);
400 if (unlikely(rc)) { 685 if (unlikely(rc)) {
401 IWL_ERROR("TX reset failed"); 686 IWL_ERROR("TX reset failed");
402 spin_unlock_irqrestore(&priv->lock, flags); 687 spin_unlock_irqrestore(&priv->lock, flags);
@@ -404,8 +689,8 @@ static int iwl4965_txq_ctx_reset(struct iwl4965_priv *priv)
404 } 689 }
405 690
406 /* Turn off all Tx DMA channels */ 691 /* Turn off all Tx DMA channels */
407 iwl4965_write_prph(priv, KDR_SCD_TXFACT, 0); 692 iwl_write_prph(priv, IWL49_SCD_TXFACT, 0);
408 iwl4965_release_nic_access(priv); 693 iwl_release_nic_access(priv);
409 spin_unlock_irqrestore(&priv->lock, flags); 694 spin_unlock_irqrestore(&priv->lock, flags);
410 695
411 /* Tell 4965 where to find the keep-warm buffer */ 696 /* Tell 4965 where to find the keep-warm buffer */
@@ -417,7 +702,7 @@ static int iwl4965_txq_ctx_reset(struct iwl4965_priv *priv)
417 702
418 /* Alloc and init all (default 16) Tx queues, 703 /* Alloc and init all (default 16) Tx queues,
419 * including the command queue (#4) */ 704 * including the command queue (#4) */
420 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) { 705 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
421 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ? 706 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
422 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS; 707 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
423 rc = iwl4965_tx_queue_init(priv, &priv->txq[txq_id], slots_num, 708 rc = iwl4965_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
@@ -438,7 +723,7 @@ static int iwl4965_txq_ctx_reset(struct iwl4965_priv *priv)
438 return rc; 723 return rc;
439} 724}
440 725
441int iwl4965_hw_nic_init(struct iwl4965_priv *priv) 726int iwl4965_hw_nic_init(struct iwl_priv *priv)
442{ 727{
443 int rc; 728 int rc;
444 unsigned long flags; 729 unsigned long flags;
@@ -452,11 +737,11 @@ int iwl4965_hw_nic_init(struct iwl4965_priv *priv)
452 /* nic_init */ 737 /* nic_init */
453 spin_lock_irqsave(&priv->lock, flags); 738 spin_lock_irqsave(&priv->lock, flags);
454 739
455 iwl4965_set_bit(priv, CSR_GIO_CHICKEN_BITS, 740 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
456 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER); 741 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
457 742
458 iwl4965_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 743 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
459 rc = iwl4965_poll_bit(priv, CSR_GP_CNTRL, 744 rc = iwl_poll_bit(priv, CSR_GP_CNTRL,
460 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 745 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
461 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000); 746 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
462 if (rc < 0) { 747 if (rc < 0) {
@@ -465,26 +750,25 @@ int iwl4965_hw_nic_init(struct iwl4965_priv *priv)
465 return rc; 750 return rc;
466 } 751 }
467 752
468 rc = iwl4965_grab_nic_access(priv); 753 rc = iwl_grab_nic_access(priv);
469 if (rc) { 754 if (rc) {
470 spin_unlock_irqrestore(&priv->lock, flags); 755 spin_unlock_irqrestore(&priv->lock, flags);
471 return rc; 756 return rc;
472 } 757 }
473 758
474 iwl4965_read_prph(priv, APMG_CLK_CTRL_REG); 759 iwl_read_prph(priv, APMG_CLK_CTRL_REG);
475 760
476 iwl4965_write_prph(priv, APMG_CLK_CTRL_REG, 761 iwl_write_prph(priv, APMG_CLK_CTRL_REG,
477 APMG_CLK_VAL_DMA_CLK_RQT | 762 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
478 APMG_CLK_VAL_BSM_CLK_RQT); 763 iwl_read_prph(priv, APMG_CLK_CTRL_REG);
479 iwl4965_read_prph(priv, APMG_CLK_CTRL_REG);
480 764
481 udelay(20); 765 udelay(20);
482 766
483 iwl4965_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 767 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
484 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 768 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
485 769
486 iwl4965_release_nic_access(priv); 770 iwl_release_nic_access(priv);
487 iwl4965_write32(priv, CSR_INT_COALESCING, 512 / 32); 771 iwl_write32(priv, CSR_INT_COALESCING, 512 / 32);
488 spin_unlock_irqrestore(&priv->lock, flags); 772 spin_unlock_irqrestore(&priv->lock, flags);
489 773
490 /* Determine HW type */ 774 /* Determine HW type */
@@ -520,25 +804,24 @@ int iwl4965_hw_nic_init(struct iwl4965_priv *priv)
520 804
521 /* set CSR_HW_CONFIG_REG for uCode use */ 805 /* set CSR_HW_CONFIG_REG for uCode use */
522 806
523 iwl4965_set_bit(priv, CSR_SW_VER, CSR_HW_IF_CONFIG_REG_BIT_KEDRON_R | 807 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
524 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI | 808 CSR49_HW_IF_CONFIG_REG_BIT_4965_R |
525 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI); 809 CSR49_HW_IF_CONFIG_REG_BIT_RADIO_SI |
810 CSR49_HW_IF_CONFIG_REG_BIT_MAC_SI);
526 811
527 rc = iwl4965_grab_nic_access(priv); 812 rc = iwl_grab_nic_access(priv);
528 if (rc < 0) { 813 if (rc < 0) {
529 spin_unlock_irqrestore(&priv->lock, flags); 814 spin_unlock_irqrestore(&priv->lock, flags);
530 IWL_DEBUG_INFO("Failed to init the card\n"); 815 IWL_DEBUG_INFO("Failed to init the card\n");
531 return rc; 816 return rc;
532 } 817 }
533 818
534 iwl4965_read_prph(priv, APMG_PS_CTRL_REG); 819 iwl_read_prph(priv, APMG_PS_CTRL_REG);
535 iwl4965_set_bits_prph(priv, APMG_PS_CTRL_REG, 820 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
536 APMG_PS_CTRL_VAL_RESET_REQ);
537 udelay(5); 821 udelay(5);
538 iwl4965_clear_bits_prph(priv, APMG_PS_CTRL_REG, 822 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
539 APMG_PS_CTRL_VAL_RESET_REQ);
540 823
541 iwl4965_release_nic_access(priv); 824 iwl_release_nic_access(priv);
542 spin_unlock_irqrestore(&priv->lock, flags); 825 spin_unlock_irqrestore(&priv->lock, flags);
543 826
544 iwl4965_hw_card_show_info(priv); 827 iwl4965_hw_card_show_info(priv);
@@ -582,7 +865,7 @@ int iwl4965_hw_nic_init(struct iwl4965_priv *priv)
582 return 0; 865 return 0;
583} 866}
584 867
585int iwl4965_hw_nic_stop_master(struct iwl4965_priv *priv) 868int iwl4965_hw_nic_stop_master(struct iwl_priv *priv)
586{ 869{
587 int rc = 0; 870 int rc = 0;
588 u32 reg_val; 871 u32 reg_val;
@@ -591,16 +874,16 @@ int iwl4965_hw_nic_stop_master(struct iwl4965_priv *priv)
591 spin_lock_irqsave(&priv->lock, flags); 874 spin_lock_irqsave(&priv->lock, flags);
592 875
593 /* set stop master bit */ 876 /* set stop master bit */
594 iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER); 877 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
595 878
596 reg_val = iwl4965_read32(priv, CSR_GP_CNTRL); 879 reg_val = iwl_read32(priv, CSR_GP_CNTRL);
597 880
598 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE == 881 if (CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE ==
599 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE)) 882 (reg_val & CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE))
600 IWL_DEBUG_INFO("Card in power save, master is already " 883 IWL_DEBUG_INFO("Card in power save, master is already "
601 "stopped\n"); 884 "stopped\n");
602 else { 885 else {
603 rc = iwl4965_poll_bit(priv, CSR_RESET, 886 rc = iwl_poll_bit(priv, CSR_RESET,
604 CSR_RESET_REG_FLAG_MASTER_DISABLED, 887 CSR_RESET_REG_FLAG_MASTER_DISABLED,
605 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 888 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
606 if (rc < 0) { 889 if (rc < 0) {
@@ -618,27 +901,26 @@ int iwl4965_hw_nic_stop_master(struct iwl4965_priv *priv)
618/** 901/**
619 * iwl4965_hw_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory 902 * iwl4965_hw_txq_ctx_stop - Stop all Tx DMA channels, free Tx queue memory
620 */ 903 */
621void iwl4965_hw_txq_ctx_stop(struct iwl4965_priv *priv) 904void iwl4965_hw_txq_ctx_stop(struct iwl_priv *priv)
622{ 905{
623 906
624 int txq_id; 907 int txq_id;
625 unsigned long flags; 908 unsigned long flags;
626 909
627 /* Stop each Tx DMA channel, and wait for it to be idle */ 910 /* Stop each Tx DMA channel, and wait for it to be idle */
628 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) { 911 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
629 spin_lock_irqsave(&priv->lock, flags); 912 spin_lock_irqsave(&priv->lock, flags);
630 if (iwl4965_grab_nic_access(priv)) { 913 if (iwl_grab_nic_access(priv)) {
631 spin_unlock_irqrestore(&priv->lock, flags); 914 spin_unlock_irqrestore(&priv->lock, flags);
632 continue; 915 continue;
633 } 916 }
634 917
635 iwl4965_write_direct32(priv, 918 iwl_write_direct32(priv,
636 IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 919 IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 0x0);
637 0x0); 920 iwl_poll_direct_bit(priv, IWL_FH_TSSR_TX_STATUS_REG,
638 iwl4965_poll_direct_bit(priv, IWL_FH_TSSR_TX_STATUS_REG, 921 IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE
639 IWL_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE 922 (txq_id), 200);
640 (txq_id), 200); 923 iwl_release_nic_access(priv);
641 iwl4965_release_nic_access(priv);
642 spin_unlock_irqrestore(&priv->lock, flags); 924 spin_unlock_irqrestore(&priv->lock, flags);
643 } 925 }
644 926
@@ -646,7 +928,7 @@ void iwl4965_hw_txq_ctx_stop(struct iwl4965_priv *priv)
646 iwl4965_hw_txq_ctx_free(priv); 928 iwl4965_hw_txq_ctx_free(priv);
647} 929}
648 930
649int iwl4965_hw_nic_reset(struct iwl4965_priv *priv) 931int iwl4965_hw_nic_reset(struct iwl_priv *priv)
650{ 932{
651 int rc = 0; 933 int rc = 0;
652 unsigned long flags; 934 unsigned long flags;
@@ -655,29 +937,29 @@ int iwl4965_hw_nic_reset(struct iwl4965_priv *priv)
655 937
656 spin_lock_irqsave(&priv->lock, flags); 938 spin_lock_irqsave(&priv->lock, flags);
657 939
658 iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 940 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
659 941
660 udelay(10); 942 udelay(10);
661 943
662 iwl4965_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 944 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
663 rc = iwl4965_poll_bit(priv, CSR_RESET, 945 rc = iwl_poll_bit(priv, CSR_RESET,
664 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 946 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
665 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25); 947 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25);
666 948
667 udelay(10); 949 udelay(10);
668 950
669 rc = iwl4965_grab_nic_access(priv); 951 rc = iwl_grab_nic_access(priv);
670 if (!rc) { 952 if (!rc) {
671 iwl4965_write_prph(priv, APMG_CLK_EN_REG, 953 iwl_write_prph(priv, APMG_CLK_EN_REG,
672 APMG_CLK_VAL_DMA_CLK_RQT | 954 APMG_CLK_VAL_DMA_CLK_RQT |
673 APMG_CLK_VAL_BSM_CLK_RQT); 955 APMG_CLK_VAL_BSM_CLK_RQT);
674 956
675 udelay(10); 957 udelay(10);
676 958
677 iwl4965_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 959 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
678 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 960 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
679 961
680 iwl4965_release_nic_access(priv); 962 iwl_release_nic_access(priv);
681 } 963 }
682 964
683 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 965 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
@@ -694,56 +976,37 @@ int iwl4965_hw_nic_reset(struct iwl4965_priv *priv)
694/** 976/**
695 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics 977 * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
696 * 978 *
697 * This callback is provided in order to queue the statistics_work 979 * This callback is provided in order to send a statistics request.
698 * in work_queue context (v. softirq)
699 * 980 *
700 * This timer function is continually reset to execute within 981 * This timer function is continually reset to execute within
701 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION 982 * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
702 * was received. We need to ensure we receive the statistics in order 983 * was received. We need to ensure we receive the statistics in order
703 * to update the temperature used for calibrating the TXPOWER. However, 984 * to update the temperature used for calibrating the TXPOWER.
704 * we can't send the statistics command from softirq context (which
705 * is the context which timers run at) so we have to queue off the
706 * statistics_work to actually send the command to the hardware.
707 */ 985 */
708static void iwl4965_bg_statistics_periodic(unsigned long data) 986static void iwl4965_bg_statistics_periodic(unsigned long data)
709{ 987{
710 struct iwl4965_priv *priv = (struct iwl4965_priv *)data; 988 struct iwl_priv *priv = (struct iwl_priv *)data;
711
712 queue_work(priv->workqueue, &priv->statistics_work);
713}
714
715/**
716 * iwl4965_bg_statistics_work - Send the statistics request to the hardware.
717 *
718 * This is queued by iwl4965_bg_statistics_periodic.
719 */
720static void iwl4965_bg_statistics_work(struct work_struct *work)
721{
722 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv,
723 statistics_work);
724 989
725 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 990 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
726 return; 991 return;
727 992
728 mutex_lock(&priv->mutex); 993 iwl_send_statistics_request(priv, CMD_ASYNC);
729 iwl4965_send_statistics_request(priv);
730 mutex_unlock(&priv->mutex);
731} 994}
732 995
733#define CT_LIMIT_CONST 259 996#define CT_LIMIT_CONST 259
734#define TM_CT_KILL_THRESHOLD 110 997#define TM_CT_KILL_THRESHOLD 110
735 998
736void iwl4965_rf_kill_ct_config(struct iwl4965_priv *priv) 999void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
737{ 1000{
738 struct iwl4965_ct_kill_config cmd; 1001 struct iwl4965_ct_kill_config cmd;
739 u32 R1, R2, R3; 1002 u32 R1, R2, R3;
740 u32 temp_th; 1003 u32 temp_th;
741 u32 crit_temperature; 1004 u32 crit_temperature;
742 unsigned long flags; 1005 unsigned long flags;
743 int rc = 0; 1006 int ret = 0;
744 1007
745 spin_lock_irqsave(&priv->lock, flags); 1008 spin_lock_irqsave(&priv->lock, flags);
746 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, 1009 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
747 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); 1010 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
748 spin_unlock_irqrestore(&priv->lock, flags); 1011 spin_unlock_irqrestore(&priv->lock, flags);
749 1012
@@ -761,9 +1024,9 @@ void iwl4965_rf_kill_ct_config(struct iwl4965_priv *priv)
761 1024
762 crit_temperature = ((temp_th * (R3-R1))/CT_LIMIT_CONST) + R2; 1025 crit_temperature = ((temp_th * (R3-R1))/CT_LIMIT_CONST) + R2;
763 cmd.critical_temperature_R = cpu_to_le32(crit_temperature); 1026 cmd.critical_temperature_R = cpu_to_le32(crit_temperature);
764 rc = iwl4965_send_cmd_pdu(priv, 1027 ret = iwl_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
765 REPLY_CT_KILL_CONFIG_CMD, sizeof(cmd), &cmd); 1028 sizeof(cmd), &cmd);
766 if (rc) 1029 if (ret)
767 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n"); 1030 IWL_ERROR("REPLY_CT_KILL_CONFIG_CMD failed\n");
768 else 1031 else
769 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded\n"); 1032 IWL_DEBUG_INFO("REPLY_CT_KILL_CONFIG_CMD succeeded\n");
@@ -779,7 +1042,7 @@ void iwl4965_rf_kill_ct_config(struct iwl4965_priv *priv)
779 * enough to receive all of our own network traffic, but not so 1042 * enough to receive all of our own network traffic, but not so
780 * high that our DSP gets too busy trying to lock onto non-network 1043 * high that our DSP gets too busy trying to lock onto non-network
781 * activity/noise. */ 1044 * activity/noise. */
782static int iwl4965_sens_energy_cck(struct iwl4965_priv *priv, 1045static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
783 u32 norm_fa, 1046 u32 norm_fa,
784 u32 rx_enable_time, 1047 u32 rx_enable_time,
785 struct statistics_general_data *rx_info) 1048 struct statistics_general_data *rx_info)
@@ -970,7 +1233,7 @@ static int iwl4965_sens_energy_cck(struct iwl4965_priv *priv,
970} 1233}
971 1234
972 1235
973static int iwl4965_sens_auto_corr_ofdm(struct iwl4965_priv *priv, 1236static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
974 u32 norm_fa, 1237 u32 norm_fa,
975 u32 rx_enable_time) 1238 u32 rx_enable_time)
976{ 1239{
@@ -1035,25 +1298,25 @@ static int iwl4965_sens_auto_corr_ofdm(struct iwl4965_priv *priv,
1035 return 0; 1298 return 0;
1036} 1299}
1037 1300
1038static int iwl4965_sensitivity_callback(struct iwl4965_priv *priv, 1301static int iwl4965_sensitivity_callback(struct iwl_priv *priv,
1039 struct iwl4965_cmd *cmd, struct sk_buff *skb) 1302 struct iwl_cmd *cmd, struct sk_buff *skb)
1040{ 1303{
1041 /* We didn't cache the SKB; let the caller free it */ 1304 /* We didn't cache the SKB; let the caller free it */
1042 return 1; 1305 return 1;
1043} 1306}
1044 1307
1045/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */ 1308/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
1046static int iwl4965_sensitivity_write(struct iwl4965_priv *priv, u8 flags) 1309static int iwl4965_sensitivity_write(struct iwl_priv *priv, u8 flags)
1047{ 1310{
1048 int rc = 0;
1049 struct iwl4965_sensitivity_cmd cmd ; 1311 struct iwl4965_sensitivity_cmd cmd ;
1050 struct iwl4965_sensitivity_data *data = NULL; 1312 struct iwl4965_sensitivity_data *data = NULL;
1051 struct iwl4965_host_cmd cmd_out = { 1313 struct iwl_host_cmd cmd_out = {
1052 .id = SENSITIVITY_CMD, 1314 .id = SENSITIVITY_CMD,
1053 .len = sizeof(struct iwl4965_sensitivity_cmd), 1315 .len = sizeof(struct iwl4965_sensitivity_cmd),
1054 .meta.flags = flags, 1316 .meta.flags = flags,
1055 .data = &cmd, 1317 .data = &cmd,
1056 }; 1318 };
1319 int ret;
1057 1320
1058 data = &(priv->sensitivity_data); 1321 data = &(priv->sensitivity_data);
1059 1322
@@ -1111,20 +1374,18 @@ static int iwl4965_sensitivity_write(struct iwl4965_priv *priv, u8 flags)
1111 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]), 1374 memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
1112 sizeof(u16)*HD_TABLE_SIZE); 1375 sizeof(u16)*HD_TABLE_SIZE);
1113 1376
1114 rc = iwl4965_send_cmd(priv, &cmd_out); 1377 ret = iwl_send_cmd(priv, &cmd_out);
1115 if (!rc) { 1378 if (ret)
1116 IWL_DEBUG_CALIB("SENSITIVITY_CMD succeeded\n"); 1379 IWL_ERROR("SENSITIVITY_CMD failed\n");
1117 return rc;
1118 }
1119 1380
1120 return 0; 1381 return ret;
1121} 1382}
1122 1383
1123void iwl4965_init_sensitivity(struct iwl4965_priv *priv, u8 flags, u8 force) 1384void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags, u8 force)
1124{ 1385{
1125 int rc = 0;
1126 int i;
1127 struct iwl4965_sensitivity_data *data = NULL; 1386 struct iwl4965_sensitivity_data *data = NULL;
1387 int i;
1388 int ret = 0;
1128 1389
1129 IWL_DEBUG_CALIB("Start iwl4965_init_sensitivity\n"); 1390 IWL_DEBUG_CALIB("Start iwl4965_init_sensitivity\n");
1130 1391
@@ -1168,8 +1429,8 @@ void iwl4965_init_sensitivity(struct iwl4965_priv *priv, u8 flags, u8 force)
1168 memset(&(priv->sensitivity_tbl[0]), 0, 1429 memset(&(priv->sensitivity_tbl[0]), 0,
1169 sizeof(u16)*HD_TABLE_SIZE); 1430 sizeof(u16)*HD_TABLE_SIZE);
1170 1431
1171 rc |= iwl4965_sensitivity_write(priv, flags); 1432 ret |= iwl4965_sensitivity_write(priv, flags);
1172 IWL_DEBUG_CALIB("<<return 0x%X\n", rc); 1433 IWL_DEBUG_CALIB("<<return 0x%X\n", ret);
1173 1434
1174 return; 1435 return;
1175} 1436}
@@ -1178,13 +1439,12 @@ void iwl4965_init_sensitivity(struct iwl4965_priv *priv, u8 flags, u8 force)
1178/* Reset differential Rx gains in NIC to prepare for chain noise calibration. 1439/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
1179 * Called after every association, but this runs only once! 1440 * Called after every association, but this runs only once!
1180 * ... once chain noise is calibrated the first time, it's good forever. */ 1441 * ... once chain noise is calibrated the first time, it's good forever. */
1181void iwl4965_chain_noise_reset(struct iwl4965_priv *priv) 1442void iwl4965_chain_noise_reset(struct iwl_priv *priv)
1182{ 1443{
1183 struct iwl4965_chain_noise_data *data = NULL; 1444 struct iwl4965_chain_noise_data *data = NULL;
1184 int rc = 0;
1185 1445
1186 data = &(priv->chain_noise_data); 1446 data = &(priv->chain_noise_data);
1187 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl4965_is_associated(priv)) { 1447 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
1188 struct iwl4965_calibration_cmd cmd; 1448 struct iwl4965_calibration_cmd cmd;
1189 1449
1190 memset(&cmd, 0, sizeof(cmd)); 1450 memset(&cmd, 0, sizeof(cmd));
@@ -1192,8 +1452,8 @@ void iwl4965_chain_noise_reset(struct iwl4965_priv *priv)
1192 cmd.diff_gain_a = 0; 1452 cmd.diff_gain_a = 0;
1193 cmd.diff_gain_b = 0; 1453 cmd.diff_gain_b = 0;
1194 cmd.diff_gain_c = 0; 1454 cmd.diff_gain_c = 0;
1195 rc = iwl4965_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, 1455 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
1196 sizeof(cmd), &cmd); 1456 sizeof(cmd), &cmd, NULL);
1197 msleep(4); 1457 msleep(4);
1198 data->state = IWL_CHAIN_NOISE_ACCUMULATE; 1458 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
1199 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n"); 1459 IWL_DEBUG_CALIB("Run chain_noise_calibrate\n");
@@ -1207,11 +1467,11 @@ void iwl4965_chain_noise_reset(struct iwl4965_priv *priv)
1207 * 1) Which antennas are connected. 1467 * 1) Which antennas are connected.
1208 * 2) Differential rx gain settings to balance the 3 receivers. 1468 * 2) Differential rx gain settings to balance the 3 receivers.
1209 */ 1469 */
1210static void iwl4965_noise_calibration(struct iwl4965_priv *priv, 1470static void iwl4965_noise_calibration(struct iwl_priv *priv,
1211 struct iwl4965_notif_statistics *stat_resp) 1471 struct iwl4965_notif_statistics *stat_resp)
1212{ 1472{
1213 struct iwl4965_chain_noise_data *data = NULL; 1473 struct iwl4965_chain_noise_data *data = NULL;
1214 int rc = 0; 1474 int ret = 0;
1215 1475
1216 u32 chain_noise_a; 1476 u32 chain_noise_a;
1217 u32 chain_noise_b; 1477 u32 chain_noise_b;
@@ -1417,9 +1677,9 @@ static void iwl4965_noise_calibration(struct iwl4965_priv *priv,
1417 cmd.diff_gain_a = data->delta_gain_code[0]; 1677 cmd.diff_gain_a = data->delta_gain_code[0];
1418 cmd.diff_gain_b = data->delta_gain_code[1]; 1678 cmd.diff_gain_b = data->delta_gain_code[1];
1419 cmd.diff_gain_c = data->delta_gain_code[2]; 1679 cmd.diff_gain_c = data->delta_gain_code[2];
1420 rc = iwl4965_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD, 1680 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
1421 sizeof(cmd), &cmd); 1681 sizeof(cmd), &cmd);
1422 if (rc) 1682 if (ret)
1423 IWL_DEBUG_CALIB("fail sending cmd " 1683 IWL_DEBUG_CALIB("fail sending cmd "
1424 "REPLY_PHY_CALIBRATION_CMD \n"); 1684 "REPLY_PHY_CALIBRATION_CMD \n");
1425 1685
@@ -1440,10 +1700,9 @@ static void iwl4965_noise_calibration(struct iwl4965_priv *priv,
1440 return; 1700 return;
1441} 1701}
1442 1702
1443static void iwl4965_sensitivity_calibration(struct iwl4965_priv *priv, 1703static void iwl4965_sensitivity_calibration(struct iwl_priv *priv,
1444 struct iwl4965_notif_statistics *resp) 1704 struct iwl4965_notif_statistics *resp)
1445{ 1705{
1446 int rc = 0;
1447 u32 rx_enable_time; 1706 u32 rx_enable_time;
1448 u32 fa_cck; 1707 u32 fa_cck;
1449 u32 fa_ofdm; 1708 u32 fa_ofdm;
@@ -1456,10 +1715,11 @@ static void iwl4965_sensitivity_calibration(struct iwl4965_priv *priv,
1456 struct statistics_rx *statistics = &(resp->rx); 1715 struct statistics_rx *statistics = &(resp->rx);
1457 unsigned long flags; 1716 unsigned long flags;
1458 struct statistics_general_data statis; 1717 struct statistics_general_data statis;
1718 int ret;
1459 1719
1460 data = &(priv->sensitivity_data); 1720 data = &(priv->sensitivity_data);
1461 1721
1462 if (!iwl4965_is_associated(priv)) { 1722 if (!iwl_is_associated(priv)) {
1463 IWL_DEBUG_CALIB("<< - not associated\n"); 1723 IWL_DEBUG_CALIB("<< - not associated\n");
1464 return; 1724 return;
1465 } 1725 }
@@ -1540,14 +1800,14 @@ static void iwl4965_sensitivity_calibration(struct iwl4965_priv *priv,
1540 1800
1541 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); 1801 iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
1542 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis); 1802 iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
1543 rc |= iwl4965_sensitivity_write(priv, CMD_ASYNC); 1803 ret = iwl4965_sensitivity_write(priv, CMD_ASYNC);
1544 1804
1545 return; 1805 return;
1546} 1806}
1547 1807
1548static void iwl4965_bg_sensitivity_work(struct work_struct *work) 1808static void iwl4965_bg_sensitivity_work(struct work_struct *work)
1549{ 1809{
1550 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv, 1810 struct iwl_priv *priv = container_of(work, struct iwl_priv,
1551 sensitivity_work); 1811 sensitivity_work);
1552 1812
1553 mutex_lock(&priv->mutex); 1813 mutex_lock(&priv->mutex);
@@ -1577,7 +1837,7 @@ static void iwl4965_bg_sensitivity_work(struct work_struct *work)
1577 1837
1578static void iwl4965_bg_txpower_work(struct work_struct *work) 1838static void iwl4965_bg_txpower_work(struct work_struct *work)
1579{ 1839{
1580 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv, 1840 struct iwl_priv *priv = container_of(work, struct iwl_priv,
1581 txpower_work); 1841 txpower_work);
1582 1842
1583 /* If a scan happened to start before we got here 1843 /* If a scan happened to start before we got here
@@ -1605,11 +1865,11 @@ static void iwl4965_bg_txpower_work(struct work_struct *work)
1605/* 1865/*
1606 * Acquire priv->lock before calling this function ! 1866 * Acquire priv->lock before calling this function !
1607 */ 1867 */
1608static void iwl4965_set_wr_ptrs(struct iwl4965_priv *priv, int txq_id, u32 index) 1868static void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
1609{ 1869{
1610 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR, 1870 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
1611 (index & 0xff) | (txq_id << 8)); 1871 (index & 0xff) | (txq_id << 8));
1612 iwl4965_write_prph(priv, KDR_SCD_QUEUE_RDPTR(txq_id), index); 1872 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
1613} 1873}
1614 1874
1615/** 1875/**
@@ -1619,7 +1879,7 @@ static void iwl4965_set_wr_ptrs(struct iwl4965_priv *priv, int txq_id, u32 index
1619 * 1879 *
1620 * NOTE: Acquire priv->lock before calling this function ! 1880 * NOTE: Acquire priv->lock before calling this function !
1621 */ 1881 */
1622static void iwl4965_tx_queue_set_status(struct iwl4965_priv *priv, 1882static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
1623 struct iwl4965_tx_queue *txq, 1883 struct iwl4965_tx_queue *txq,
1624 int tx_fifo_id, int scd_retry) 1884 int tx_fifo_id, int scd_retry)
1625{ 1885{
@@ -1629,7 +1889,7 @@ static void iwl4965_tx_queue_set_status(struct iwl4965_priv *priv,
1629 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0; 1889 int active = test_bit(txq_id, &priv->txq_ctx_active_msk)?1:0;
1630 1890
1631 /* Set up and activate */ 1891 /* Set up and activate */
1632 iwl4965_write_prph(priv, KDR_SCD_QUEUE_STATUS_BITS(txq_id), 1892 iwl_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
1633 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1893 (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1634 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | 1894 (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) |
1635 (scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) | 1895 (scd_retry << SCD_QUEUE_STTS_REG_POS_WSL) |
@@ -1653,22 +1913,22 @@ static const u16 default_queue_to_tx_fifo[] = {
1653 IWL_TX_FIFO_HCCA_2 1913 IWL_TX_FIFO_HCCA_2
1654}; 1914};
1655 1915
1656static inline void iwl4965_txq_ctx_activate(struct iwl4965_priv *priv, int txq_id) 1916static inline void iwl4965_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
1657{ 1917{
1658 set_bit(txq_id, &priv->txq_ctx_active_msk); 1918 set_bit(txq_id, &priv->txq_ctx_active_msk);
1659} 1919}
1660 1920
1661static inline void iwl4965_txq_ctx_deactivate(struct iwl4965_priv *priv, int txq_id) 1921static inline void iwl4965_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
1662{ 1922{
1663 clear_bit(txq_id, &priv->txq_ctx_active_msk); 1923 clear_bit(txq_id, &priv->txq_ctx_active_msk);
1664} 1924}
1665 1925
1666int iwl4965_alive_notify(struct iwl4965_priv *priv) 1926int iwl4965_alive_notify(struct iwl_priv *priv)
1667{ 1927{
1668 u32 a; 1928 u32 a;
1669 int i = 0; 1929 int i = 0;
1670 unsigned long flags; 1930 unsigned long flags;
1671 int rc; 1931 int ret;
1672 1932
1673 spin_lock_irqsave(&priv->lock, flags); 1933 spin_lock_irqsave(&priv->lock, flags);
1674 1934
@@ -1681,46 +1941,46 @@ int iwl4965_alive_notify(struct iwl4965_priv *priv)
1681 priv->chain_noise_data.delta_gain_code[i] = 1941 priv->chain_noise_data.delta_gain_code[i] =
1682 CHAIN_NOISE_DELTA_GAIN_INIT_VAL; 1942 CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
1683#endif /* CONFIG_IWL4965_SENSITIVITY*/ 1943#endif /* CONFIG_IWL4965_SENSITIVITY*/
1684 rc = iwl4965_grab_nic_access(priv); 1944 ret = iwl_grab_nic_access(priv);
1685 if (rc) { 1945 if (ret) {
1686 spin_unlock_irqrestore(&priv->lock, flags); 1946 spin_unlock_irqrestore(&priv->lock, flags);
1687 return rc; 1947 return ret;
1688 } 1948 }
1689 1949
1690 /* Clear 4965's internal Tx Scheduler data base */ 1950 /* Clear 4965's internal Tx Scheduler data base */
1691 priv->scd_base_addr = iwl4965_read_prph(priv, KDR_SCD_SRAM_BASE_ADDR); 1951 priv->scd_base_addr = iwl_read_prph(priv, IWL49_SCD_SRAM_BASE_ADDR);
1692 a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET; 1952 a = priv->scd_base_addr + SCD_CONTEXT_DATA_OFFSET;
1693 for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4) 1953 for (; a < priv->scd_base_addr + SCD_TX_STTS_BITMAP_OFFSET; a += 4)
1694 iwl4965_write_targ_mem(priv, a, 0); 1954 iwl_write_targ_mem(priv, a, 0);
1695 for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4) 1955 for (; a < priv->scd_base_addr + SCD_TRANSLATE_TBL_OFFSET; a += 4)
1696 iwl4965_write_targ_mem(priv, a, 0); 1956 iwl_write_targ_mem(priv, a, 0);
1697 for (; a < sizeof(u16) * priv->hw_setting.max_txq_num; a += 4) 1957 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4)
1698 iwl4965_write_targ_mem(priv, a, 0); 1958 iwl_write_targ_mem(priv, a, 0);
1699 1959
1700 /* Tel 4965 where to find Tx byte count tables */ 1960 /* Tel 4965 where to find Tx byte count tables */
1701 iwl4965_write_prph(priv, KDR_SCD_DRAM_BASE_ADDR, 1961 iwl_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
1702 (priv->hw_setting.shared_phys + 1962 (priv->shared_phys +
1703 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10); 1963 offsetof(struct iwl4965_shared, queues_byte_cnt_tbls)) >> 10);
1704 1964
1705 /* Disable chain mode for all queues */ 1965 /* Disable chain mode for all queues */
1706 iwl4965_write_prph(priv, KDR_SCD_QUEUECHAIN_SEL, 0); 1966 iwl_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
1707 1967
1708 /* Initialize each Tx queue (including the command queue) */ 1968 /* Initialize each Tx queue (including the command queue) */
1709 for (i = 0; i < priv->hw_setting.max_txq_num; i++) { 1969 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
1710 1970
1711 /* TFD circular buffer read/write indexes */ 1971 /* TFD circular buffer read/write indexes */
1712 iwl4965_write_prph(priv, KDR_SCD_QUEUE_RDPTR(i), 0); 1972 iwl_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
1713 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8)); 1973 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
1714 1974
1715 /* Max Tx Window size for Scheduler-ACK mode */ 1975 /* Max Tx Window size for Scheduler-ACK mode */
1716 iwl4965_write_targ_mem(priv, priv->scd_base_addr + 1976 iwl_write_targ_mem(priv, priv->scd_base_addr +
1717 SCD_CONTEXT_QUEUE_OFFSET(i), 1977 SCD_CONTEXT_QUEUE_OFFSET(i),
1718 (SCD_WIN_SIZE << 1978 (SCD_WIN_SIZE <<
1719 SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & 1979 SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
1720 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); 1980 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
1721 1981
1722 /* Frame limit */ 1982 /* Frame limit */
1723 iwl4965_write_targ_mem(priv, priv->scd_base_addr + 1983 iwl_write_targ_mem(priv, priv->scd_base_addr +
1724 SCD_CONTEXT_QUEUE_OFFSET(i) + 1984 SCD_CONTEXT_QUEUE_OFFSET(i) +
1725 sizeof(u32), 1985 sizeof(u32),
1726 (SCD_FRAME_LIMIT << 1986 (SCD_FRAME_LIMIT <<
@@ -1728,11 +1988,11 @@ int iwl4965_alive_notify(struct iwl4965_priv *priv)
1728 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); 1988 SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
1729 1989
1730 } 1990 }
1731 iwl4965_write_prph(priv, KDR_SCD_INTERRUPT_MASK, 1991 iwl_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
1732 (1 << priv->hw_setting.max_txq_num) - 1); 1992 (1 << priv->hw_params.max_txq_num) - 1);
1733 1993
1734 /* Activate all Tx DMA/FIFO channels */ 1994 /* Activate all Tx DMA/FIFO channels */
1735 iwl4965_write_prph(priv, KDR_SCD_TXFACT, 1995 iwl_write_prph(priv, IWL49_SCD_TXFACT,
1736 SCD_TXFACT_REG_TXFIFO_MASK(0, 7)); 1996 SCD_TXFACT_REG_TXFIFO_MASK(0, 7));
1737 1997
1738 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0); 1998 iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
@@ -1744,42 +2004,47 @@ int iwl4965_alive_notify(struct iwl4965_priv *priv)
1744 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); 2004 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
1745 } 2005 }
1746 2006
1747 iwl4965_release_nic_access(priv); 2007 iwl_release_nic_access(priv);
1748 spin_unlock_irqrestore(&priv->lock, flags); 2008 spin_unlock_irqrestore(&priv->lock, flags);
1749 2009
1750 return 0; 2010 /* Ask for statistics now, the uCode will send statistics notification
2011 * periodically after association */
2012 iwl_send_statistics_request(priv, CMD_ASYNC);
2013 return ret;
1751} 2014}
1752 2015
1753/** 2016/**
1754 * iwl4965_hw_set_hw_setting 2017 * iwl4965_hw_set_hw_params
1755 * 2018 *
1756 * Called when initializing driver 2019 * Called when initializing driver
1757 */ 2020 */
1758int iwl4965_hw_set_hw_setting(struct iwl4965_priv *priv) 2021int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
1759{ 2022{
1760 /* Allocate area for Tx byte count tables and Rx queue status */
1761 priv->hw_setting.shared_virt =
1762 pci_alloc_consistent(priv->pci_dev,
1763 sizeof(struct iwl4965_shared),
1764 &priv->hw_setting.shared_phys);
1765
1766 if (!priv->hw_setting.shared_virt)
1767 return -1;
1768 2023
1769 memset(priv->hw_setting.shared_virt, 0, sizeof(struct iwl4965_shared)); 2024 if ((priv->cfg->mod_params->num_of_queues > IWL4965_MAX_NUM_QUEUES) ||
2025 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
2026 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
2027 IWL_MIN_NUM_QUEUES, IWL4965_MAX_NUM_QUEUES);
2028 return -EINVAL;
2029 }
1770 2030
1771 priv->hw_setting.max_txq_num = iwl4965_param_queues_num; 2031 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
1772 priv->hw_setting.ac_queue_count = AC_NUM; 2032 priv->hw_params.tx_cmd_len = sizeof(struct iwl4965_tx_cmd);
1773 priv->hw_setting.tx_cmd_len = sizeof(struct iwl4965_tx_cmd); 2033 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
1774 priv->hw_setting.max_rxq_size = RX_QUEUE_SIZE; 2034 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
1775 priv->hw_setting.max_rxq_log = RX_QUEUE_SIZE_LOG; 2035 if (priv->cfg->mod_params->amsdu_size_8K)
1776 if (iwl4965_param_amsdu_size_8K) 2036 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K;
1777 priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE_8K;
1778 else 2037 else
1779 priv->hw_setting.rx_buf_size = IWL_RX_BUF_SIZE_4K; 2038 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K;
1780 priv->hw_setting.max_pkt_size = priv->hw_setting.rx_buf_size - 256; 2039 priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256;
1781 priv->hw_setting.max_stations = IWL4965_STATION_COUNT; 2040 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
1782 priv->hw_setting.bcast_sta_id = IWL4965_BROADCAST_ID; 2041 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
2042
2043 priv->hw_params.tx_chains_num = 2;
2044 priv->hw_params.rx_chains_num = 2;
2045 priv->hw_params.valid_tx_ant = (IWL_ANTENNA_MAIN | IWL_ANTENNA_AUX);
2046 priv->hw_params.valid_rx_ant = (IWL_ANTENNA_MAIN | IWL_ANTENNA_AUX);
2047
1783 return 0; 2048 return 0;
1784} 2049}
1785 2050
@@ -1788,12 +2053,12 @@ int iwl4965_hw_set_hw_setting(struct iwl4965_priv *priv)
1788 * 2053 *
1789 * Destroy all TX DMA queues and structures 2054 * Destroy all TX DMA queues and structures
1790 */ 2055 */
1791void iwl4965_hw_txq_ctx_free(struct iwl4965_priv *priv) 2056void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
1792{ 2057{
1793 int txq_id; 2058 int txq_id;
1794 2059
1795 /* Tx queues */ 2060 /* Tx queues */
1796 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) 2061 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1797 iwl4965_tx_queue_free(priv, &priv->txq[txq_id]); 2062 iwl4965_tx_queue_free(priv, &priv->txq[txq_id]);
1798 2063
1799 /* Keep-warm buffer */ 2064 /* Keep-warm buffer */
@@ -1806,7 +2071,7 @@ void iwl4965_hw_txq_ctx_free(struct iwl4965_priv *priv)
1806 * Does NOT advance any TFD circular buffer read/write indexes 2071 * Does NOT advance any TFD circular buffer read/write indexes
1807 * Does NOT free the TFD itself (which is within circular buffer) 2072 * Does NOT free the TFD itself (which is within circular buffer)
1808 */ 2073 */
1809int iwl4965_hw_txq_free_tfd(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq) 2074int iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
1810{ 2075{
1811 struct iwl4965_tfd_frame *bd_tmp = (struct iwl4965_tfd_frame *)&txq->bd[0]; 2076 struct iwl4965_tfd_frame *bd_tmp = (struct iwl4965_tfd_frame *)&txq->bd[0];
1812 struct iwl4965_tfd_frame *bd = &bd_tmp[txq->q.read_ptr]; 2077 struct iwl4965_tfd_frame *bd = &bd_tmp[txq->q.read_ptr];
@@ -1859,7 +2124,7 @@ int iwl4965_hw_txq_free_tfd(struct iwl4965_priv *priv, struct iwl4965_tx_queue *
1859 return 0; 2124 return 0;
1860} 2125}
1861 2126
1862int iwl4965_hw_reg_set_txpower(struct iwl4965_priv *priv, s8 power) 2127int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1863{ 2128{
1864 IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n"); 2129 IWL_ERROR("TODO: Implement iwl4965_hw_reg_set_txpower!\n");
1865 return -EINVAL; 2130 return -EINVAL;
@@ -1914,12 +2179,13 @@ static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
1914 return comp; 2179 return comp;
1915} 2180}
1916 2181
1917static const struct iwl4965_channel_info * 2182static const struct iwl_channel_info *
1918iwl4965_get_channel_txpower_info(struct iwl4965_priv *priv, u8 phymode, u16 channel) 2183iwl4965_get_channel_txpower_info(struct iwl_priv *priv,
2184 enum ieee80211_band band, u16 channel)
1919{ 2185{
1920 const struct iwl4965_channel_info *ch_info; 2186 const struct iwl_channel_info *ch_info;
1921 2187
1922 ch_info = iwl4965_get_channel_info(priv, phymode, channel); 2188 ch_info = iwl_get_channel_info(priv, band, channel);
1923 2189
1924 if (!is_channel_valid(ch_info)) 2190 if (!is_channel_valid(ch_info))
1925 return NULL; 2191 return NULL;
@@ -1953,7 +2219,7 @@ static s32 iwl4965_get_tx_atten_grp(u16 channel)
1953 return -1; 2219 return -1;
1954} 2220}
1955 2221
1956static u32 iwl4965_get_sub_band(const struct iwl4965_priv *priv, u32 channel) 2222static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
1957{ 2223{
1958 s32 b = -1; 2224 s32 b = -1;
1959 2225
@@ -1989,7 +2255,7 @@ static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
1989 * differences in channel frequencies, which is proportional to differences 2255 * differences in channel frequencies, which is proportional to differences
1990 * in channel number. 2256 * in channel number.
1991 */ 2257 */
1992static int iwl4965_interpolate_chan(struct iwl4965_priv *priv, u32 channel, 2258static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
1993 struct iwl4965_eeprom_calib_ch_info *chan_info) 2259 struct iwl4965_eeprom_calib_ch_info *chan_info)
1994{ 2260{
1995 s32 s = -1; 2261 s32 s = -1;
@@ -2322,7 +2588,7 @@ static const struct gain_entry gain_table[2][108] = {
2322 } 2588 }
2323}; 2589};
2324 2590
2325static int iwl4965_fill_txpower_tbl(struct iwl4965_priv *priv, u8 band, u16 channel, 2591static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
2326 u8 is_fat, u8 ctrl_chan_high, 2592 u8 is_fat, u8 ctrl_chan_high,
2327 struct iwl4965_tx_power_db *tx_power_tbl) 2593 struct iwl4965_tx_power_db *tx_power_tbl)
2328{ 2594{
@@ -2336,7 +2602,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl4965_priv *priv, u8 band, u16 chan
2336 s32 txatten_grp = CALIB_CH_GROUP_MAX; 2602 s32 txatten_grp = CALIB_CH_GROUP_MAX;
2337 int i; 2603 int i;
2338 int c; 2604 int c;
2339 const struct iwl4965_channel_info *ch_info = NULL; 2605 const struct iwl_channel_info *ch_info = NULL;
2340 struct iwl4965_eeprom_calib_ch_info ch_eeprom_info; 2606 struct iwl4965_eeprom_calib_ch_info ch_eeprom_info;
2341 const struct iwl4965_eeprom_calib_measure *measurement; 2607 const struct iwl4965_eeprom_calib_measure *measurement;
2342 s16 voltage; 2608 s16 voltage;
@@ -2368,7 +2634,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl4965_priv *priv, u8 band, u16 chan
2368 2634
2369 /* Get current (RXON) channel, band, width */ 2635 /* Get current (RXON) channel, band, width */
2370 ch_info = 2636 ch_info =
2371 iwl4965_get_channel_txpower_info(priv, priv->phymode, channel); 2637 iwl4965_get_channel_txpower_info(priv, priv->band, channel);
2372 2638
2373 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band, 2639 IWL_DEBUG_TXPOWER("chan %d band %d is_fat %d\n", channel, band,
2374 is_fat); 2640 is_fat);
@@ -2579,10 +2845,10 @@ static int iwl4965_fill_txpower_tbl(struct iwl4965_priv *priv, u8 band, u16 chan
2579 * Uses the active RXON for channel, band, and characteristics (fat, high) 2845 * Uses the active RXON for channel, band, and characteristics (fat, high)
2580 * The power limit is taken from priv->user_txpower_limit. 2846 * The power limit is taken from priv->user_txpower_limit.
2581 */ 2847 */
2582int iwl4965_hw_reg_send_txpower(struct iwl4965_priv *priv) 2848int iwl4965_hw_reg_send_txpower(struct iwl_priv *priv)
2583{ 2849{
2584 struct iwl4965_txpowertable_cmd cmd = { 0 }; 2850 struct iwl4965_txpowertable_cmd cmd = { 0 };
2585 int rc = 0; 2851 int ret;
2586 u8 band = 0; 2852 u8 band = 0;
2587 u8 is_fat = 0; 2853 u8 is_fat = 0;
2588 u8 ctrl_chan_high = 0; 2854 u8 ctrl_chan_high = 0;
@@ -2595,8 +2861,7 @@ int iwl4965_hw_reg_send_txpower(struct iwl4965_priv *priv)
2595 return -EAGAIN; 2861 return -EAGAIN;
2596 } 2862 }
2597 2863
2598 band = ((priv->phymode == MODE_IEEE80211B) || 2864 band = priv->band == IEEE80211_BAND_2GHZ;
2599 (priv->phymode == MODE_IEEE80211G));
2600 2865
2601 is_fat = is_fat_channel(priv->active_rxon.flags); 2866 is_fat = is_fat_channel(priv->active_rxon.flags);
2602 2867
@@ -2607,29 +2872,70 @@ int iwl4965_hw_reg_send_txpower(struct iwl4965_priv *priv)
2607 cmd.band = band; 2872 cmd.band = band;
2608 cmd.channel = priv->active_rxon.channel; 2873 cmd.channel = priv->active_rxon.channel;
2609 2874
2610 rc = iwl4965_fill_txpower_tbl(priv, band, 2875 ret = iwl4965_fill_txpower_tbl(priv, band,
2611 le16_to_cpu(priv->active_rxon.channel), 2876 le16_to_cpu(priv->active_rxon.channel),
2612 is_fat, ctrl_chan_high, &cmd.tx_power); 2877 is_fat, ctrl_chan_high, &cmd.tx_power);
2613 if (rc) 2878 if (ret)
2614 return rc; 2879 goto out;
2615 2880
2616 rc = iwl4965_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd); 2881 ret = iwl_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
2617 return rc; 2882
2883out:
2884 return ret;
2618} 2885}
2619 2886
2620int iwl4965_hw_channel_switch(struct iwl4965_priv *priv, u16 channel) 2887static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
2888{
2889 int ret = 0;
2890 struct iwl4965_rxon_assoc_cmd rxon_assoc;
2891 const struct iwl4965_rxon_cmd *rxon1 = &priv->staging_rxon;
2892 const struct iwl4965_rxon_cmd *rxon2 = &priv->active_rxon;
2893
2894 if ((rxon1->flags == rxon2->flags) &&
2895 (rxon1->filter_flags == rxon2->filter_flags) &&
2896 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
2897 (rxon1->ofdm_ht_single_stream_basic_rates ==
2898 rxon2->ofdm_ht_single_stream_basic_rates) &&
2899 (rxon1->ofdm_ht_dual_stream_basic_rates ==
2900 rxon2->ofdm_ht_dual_stream_basic_rates) &&
2901 (rxon1->rx_chain == rxon2->rx_chain) &&
2902 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
2903 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
2904 return 0;
2905 }
2906
2907 rxon_assoc.flags = priv->staging_rxon.flags;
2908 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
2909 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
2910 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
2911 rxon_assoc.reserved = 0;
2912 rxon_assoc.ofdm_ht_single_stream_basic_rates =
2913 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
2914 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
2915 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
2916 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
2917
2918 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
2919 sizeof(rxon_assoc), &rxon_assoc, NULL);
2920 if (ret)
2921 return ret;
2922
2923 return ret;
2924}
2925
2926
2927int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
2621{ 2928{
2622 int rc; 2929 int rc;
2623 u8 band = 0; 2930 u8 band = 0;
2624 u8 is_fat = 0; 2931 u8 is_fat = 0;
2625 u8 ctrl_chan_high = 0; 2932 u8 ctrl_chan_high = 0;
2626 struct iwl4965_channel_switch_cmd cmd = { 0 }; 2933 struct iwl4965_channel_switch_cmd cmd = { 0 };
2627 const struct iwl4965_channel_info *ch_info; 2934 const struct iwl_channel_info *ch_info;
2628 2935
2629 band = ((priv->phymode == MODE_IEEE80211B) || 2936 band = priv->band == IEEE80211_BAND_2GHZ;
2630 (priv->phymode == MODE_IEEE80211G));
2631 2937
2632 ch_info = iwl4965_get_channel_info(priv, priv->phymode, channel); 2938 ch_info = iwl_get_channel_info(priv, priv->band, channel);
2633 2939
2634 is_fat = is_fat_channel(priv->staging_rxon.flags); 2940 is_fat = is_fat_channel(priv->staging_rxon.flags);
2635 2941
@@ -2655,15 +2961,15 @@ int iwl4965_hw_channel_switch(struct iwl4965_priv *priv, u16 channel)
2655 return rc; 2961 return rc;
2656 } 2962 }
2657 2963
2658 rc = iwl4965_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); 2964 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
2659 return rc; 2965 return rc;
2660} 2966}
2661 2967
2662#define RTS_HCCA_RETRY_LIMIT 3 2968#define RTS_HCCA_RETRY_LIMIT 3
2663#define RTS_DFAULT_RETRY_LIMIT 60 2969#define RTS_DFAULT_RETRY_LIMIT 60
2664 2970
2665void iwl4965_hw_build_tx_cmd_rate(struct iwl4965_priv *priv, 2971void iwl4965_hw_build_tx_cmd_rate(struct iwl_priv *priv,
2666 struct iwl4965_cmd *cmd, 2972 struct iwl_cmd *cmd,
2667 struct ieee80211_tx_control *ctrl, 2973 struct ieee80211_tx_control *ctrl,
2668 struct ieee80211_hdr *hdr, int sta_id, 2974 struct ieee80211_hdr *hdr, int sta_id,
2669 int is_hcca) 2975 int is_hcca)
@@ -2674,7 +2980,7 @@ void iwl4965_hw_build_tx_cmd_rate(struct iwl4965_priv *priv,
2674 u16 fc = le16_to_cpu(hdr->frame_control); 2980 u16 fc = le16_to_cpu(hdr->frame_control);
2675 u8 rate_plcp; 2981 u8 rate_plcp;
2676 u16 rate_flags = 0; 2982 u16 rate_flags = 0;
2677 int rate_idx = min(ctrl->tx_rate & 0xffff, IWL_RATE_COUNT - 1); 2983 int rate_idx = min(ctrl->tx_rate->hw_value & 0xffff, IWL_RATE_COUNT - 1);
2678 2984
2679 rate_plcp = iwl4965_rates[rate_idx].plcp; 2985 rate_plcp = iwl4965_rates[rate_idx].plcp;
2680 2986
@@ -2729,19 +3035,18 @@ void iwl4965_hw_build_tx_cmd_rate(struct iwl4965_priv *priv,
2729 tx->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags); 3035 tx->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
2730} 3036}
2731 3037
2732int iwl4965_hw_get_rx_read(struct iwl4965_priv *priv) 3038int iwl4965_hw_get_rx_read(struct iwl_priv *priv)
2733{ 3039{
2734 struct iwl4965_shared *shared_data = priv->hw_setting.shared_virt; 3040 struct iwl4965_shared *s = priv->shared_virt;
2735 3041 return le32_to_cpu(s->rb_closed) & 0xFFF;
2736 return IWL_GET_BITS(*shared_data, rb_closed_stts_rb_num);
2737} 3042}
2738 3043
2739int iwl4965_hw_get_temperature(struct iwl4965_priv *priv) 3044int iwl4965_hw_get_temperature(struct iwl_priv *priv)
2740{ 3045{
2741 return priv->temperature; 3046 return priv->temperature;
2742} 3047}
2743 3048
2744unsigned int iwl4965_hw_get_beacon_cmd(struct iwl4965_priv *priv, 3049unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
2745 struct iwl4965_frame *frame, u8 rate) 3050 struct iwl4965_frame *frame, u8 rate)
2746{ 3051{
2747 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd; 3052 struct iwl4965_tx_beacon_cmd *tx_beacon_cmd;
@@ -2750,7 +3055,7 @@ unsigned int iwl4965_hw_get_beacon_cmd(struct iwl4965_priv *priv,
2750 tx_beacon_cmd = &frame->u.beacon; 3055 tx_beacon_cmd = &frame->u.beacon;
2751 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd)); 3056 memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
2752 3057
2753 tx_beacon_cmd->tx.sta_id = IWL4965_BROADCAST_ID; 3058 tx_beacon_cmd->tx.sta_id = priv->hw_params.bcast_sta_id;
2754 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 3059 tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
2755 3060
2756 frame_size = iwl4965_fill_beacon_frame(priv, 3061 frame_size = iwl4965_fill_beacon_frame(priv,
@@ -2780,35 +3085,35 @@ unsigned int iwl4965_hw_get_beacon_cmd(struct iwl4965_priv *priv,
2780 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA 3085 * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
2781 * channels supported in hardware. 3086 * channels supported in hardware.
2782 */ 3087 */
2783int iwl4965_hw_tx_queue_init(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq) 3088int iwl4965_hw_tx_queue_init(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
2784{ 3089{
2785 int rc; 3090 int rc;
2786 unsigned long flags; 3091 unsigned long flags;
2787 int txq_id = txq->q.id; 3092 int txq_id = txq->q.id;
2788 3093
2789 spin_lock_irqsave(&priv->lock, flags); 3094 spin_lock_irqsave(&priv->lock, flags);
2790 rc = iwl4965_grab_nic_access(priv); 3095 rc = iwl_grab_nic_access(priv);
2791 if (rc) { 3096 if (rc) {
2792 spin_unlock_irqrestore(&priv->lock, flags); 3097 spin_unlock_irqrestore(&priv->lock, flags);
2793 return rc; 3098 return rc;
2794 } 3099 }
2795 3100
2796 /* Circular buffer (TFD queue in DRAM) physical base address */ 3101 /* Circular buffer (TFD queue in DRAM) physical base address */
2797 iwl4965_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id), 3102 iwl_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
2798 txq->q.dma_addr >> 8); 3103 txq->q.dma_addr >> 8);
2799 3104
2800 /* Enable DMA channel, using same id as for TFD queue */ 3105 /* Enable DMA channel, using same id as for TFD queue */
2801 iwl4965_write_direct32( 3106 iwl_write_direct32(
2802 priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id), 3107 priv, IWL_FH_TCSR_CHNL_TX_CONFIG_REG(txq_id),
2803 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 3108 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2804 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL); 3109 IWL_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL);
2805 iwl4965_release_nic_access(priv); 3110 iwl_release_nic_access(priv);
2806 spin_unlock_irqrestore(&priv->lock, flags); 3111 spin_unlock_irqrestore(&priv->lock, flags);
2807 3112
2808 return 0; 3113 return 0;
2809} 3114}
2810 3115
2811int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl4965_priv *priv, void *ptr, 3116int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *ptr,
2812 dma_addr_t addr, u16 len) 3117 dma_addr_t addr, u16 len)
2813{ 3118{
2814 int index, is_odd; 3119 int index, is_odd;
@@ -2842,7 +3147,7 @@ int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl4965_priv *priv, void *ptr,
2842 return 0; 3147 return 0;
2843} 3148}
2844 3149
2845static void iwl4965_hw_card_show_info(struct iwl4965_priv *priv) 3150static void iwl4965_hw_card_show_info(struct iwl_priv *priv)
2846{ 3151{
2847 u16 hw_version = priv->eeprom.board_revision_4965; 3152 u16 hw_version = priv->eeprom.board_revision_4965;
2848 3153
@@ -2858,17 +3163,15 @@ static void iwl4965_hw_card_show_info(struct iwl4965_priv *priv)
2858#define IWL_TX_DELIMITER_SIZE 4 3163#define IWL_TX_DELIMITER_SIZE 4
2859 3164
2860/** 3165/**
2861 * iwl4965_tx_queue_update_wr_ptr - Set up entry in Tx byte-count array 3166 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
2862 */ 3167 */
2863int iwl4965_tx_queue_update_wr_ptr(struct iwl4965_priv *priv, 3168static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
2864 struct iwl4965_tx_queue *txq, u16 byte_cnt) 3169 struct iwl4965_tx_queue *txq,
3170 u16 byte_cnt)
2865{ 3171{
2866 int len; 3172 int len;
2867 int txq_id = txq->q.id; 3173 int txq_id = txq->q.id;
2868 struct iwl4965_shared *shared_data = priv->hw_setting.shared_virt; 3174 struct iwl4965_shared *shared_data = priv->shared_virt;
2869
2870 if (txq->need_update == 0)
2871 return 0;
2872 3175
2873 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 3176 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
2874 3177
@@ -2881,8 +3184,6 @@ int iwl4965_tx_queue_update_wr_ptr(struct iwl4965_priv *priv,
2881 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id]. 3184 IWL_SET_BITS16(shared_data->queues_byte_cnt_tbls[txq_id].
2882 tfd_offset[IWL4965_QUEUE_SIZE + txq->q.write_ptr], 3185 tfd_offset[IWL4965_QUEUE_SIZE + txq->q.write_ptr],
2883 byte_cnt, len); 3186 byte_cnt, len);
2884
2885 return 0;
2886} 3187}
2887 3188
2888/** 3189/**
@@ -2891,7 +3192,7 @@ int iwl4965_tx_queue_update_wr_ptr(struct iwl4965_priv *priv,
2891 * Selects how many and which Rx receivers/antennas/chains to use. 3192 * Selects how many and which Rx receivers/antennas/chains to use.
2892 * This should not be used for scan command ... it puts data in wrong place. 3193 * This should not be used for scan command ... it puts data in wrong place.
2893 */ 3194 */
2894void iwl4965_set_rxon_chain(struct iwl4965_priv *priv) 3195void iwl4965_set_rxon_chain(struct iwl_priv *priv)
2895{ 3196{
2896 u8 is_single = is_single_stream(priv); 3197 u8 is_single = is_single_stream(priv);
2897 u8 idle_state, rx_state; 3198 u8 idle_state, rx_state;
@@ -2922,378 +3223,6 @@ void iwl4965_set_rxon_chain(struct iwl4965_priv *priv)
2922 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain); 3223 IWL_DEBUG_ASSOC("rx chain %X\n", priv->staging_rxon.rx_chain);
2923} 3224}
2924 3225
2925#ifdef CONFIG_IWL4965_HT
2926#ifdef CONFIG_IWL4965_HT_AGG
2927/*
2928 get the traffic load value for tid
2929*/
2930static u32 iwl4965_tl_get_load(struct iwl4965_priv *priv, u8 tid)
2931{
2932 u32 load = 0;
2933 u32 current_time = jiffies_to_msecs(jiffies);
2934 u32 time_diff;
2935 s32 index;
2936 unsigned long flags;
2937 struct iwl4965_traffic_load *tid_ptr = NULL;
2938
2939 if (tid >= TID_MAX_LOAD_COUNT)
2940 return 0;
2941
2942 tid_ptr = &(priv->lq_mngr.agg_ctrl.traffic_load[tid]);
2943
2944 current_time -= current_time % TID_ROUND_VALUE;
2945
2946 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
2947 if (!(tid_ptr->queue_count))
2948 goto out;
2949
2950 time_diff = TIME_WRAP_AROUND(tid_ptr->time_stamp, current_time);
2951 index = time_diff / TID_QUEUE_CELL_SPACING;
2952
2953 if (index >= TID_QUEUE_MAX_SIZE) {
2954 u32 oldest_time = current_time - TID_MAX_TIME_DIFF;
2955
2956 while (tid_ptr->queue_count &&
2957 (tid_ptr->time_stamp < oldest_time)) {
2958 tid_ptr->total -= tid_ptr->packet_count[tid_ptr->head];
2959 tid_ptr->packet_count[tid_ptr->head] = 0;
2960 tid_ptr->time_stamp += TID_QUEUE_CELL_SPACING;
2961 tid_ptr->queue_count--;
2962 tid_ptr->head++;
2963 if (tid_ptr->head >= TID_QUEUE_MAX_SIZE)
2964 tid_ptr->head = 0;
2965 }
2966 }
2967 load = tid_ptr->total;
2968
2969 out:
2970 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
2971 return load;
2972}
2973
2974/*
2975 increment traffic load value for tid and also remove
2976 any old values if passed the certian time period
2977*/
2978static void iwl4965_tl_add_packet(struct iwl4965_priv *priv, u8 tid)
2979{
2980 u32 current_time = jiffies_to_msecs(jiffies);
2981 u32 time_diff;
2982 s32 index;
2983 unsigned long flags;
2984 struct iwl4965_traffic_load *tid_ptr = NULL;
2985
2986 if (tid >= TID_MAX_LOAD_COUNT)
2987 return;
2988
2989 tid_ptr = &(priv->lq_mngr.agg_ctrl.traffic_load[tid]);
2990
2991 current_time -= current_time % TID_ROUND_VALUE;
2992
2993 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
2994 if (!(tid_ptr->queue_count)) {
2995 tid_ptr->total = 1;
2996 tid_ptr->time_stamp = current_time;
2997 tid_ptr->queue_count = 1;
2998 tid_ptr->head = 0;
2999 tid_ptr->packet_count[0] = 1;
3000 goto out;
3001 }
3002
3003 time_diff = TIME_WRAP_AROUND(tid_ptr->time_stamp, current_time);
3004 index = time_diff / TID_QUEUE_CELL_SPACING;
3005
3006 if (index >= TID_QUEUE_MAX_SIZE) {
3007 u32 oldest_time = current_time - TID_MAX_TIME_DIFF;
3008
3009 while (tid_ptr->queue_count &&
3010 (tid_ptr->time_stamp < oldest_time)) {
3011 tid_ptr->total -= tid_ptr->packet_count[tid_ptr->head];
3012 tid_ptr->packet_count[tid_ptr->head] = 0;
3013 tid_ptr->time_stamp += TID_QUEUE_CELL_SPACING;
3014 tid_ptr->queue_count--;
3015 tid_ptr->head++;
3016 if (tid_ptr->head >= TID_QUEUE_MAX_SIZE)
3017 tid_ptr->head = 0;
3018 }
3019 }
3020
3021 index = (tid_ptr->head + index) % TID_QUEUE_MAX_SIZE;
3022 tid_ptr->packet_count[index] = tid_ptr->packet_count[index] + 1;
3023 tid_ptr->total = tid_ptr->total + 1;
3024
3025 if ((index + 1) > tid_ptr->queue_count)
3026 tid_ptr->queue_count = index + 1;
3027 out:
3028 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3029
3030}
3031
3032#define MMAC_SCHED_MAX_NUMBER_OF_HT_BACK_FLOWS 7
3033enum HT_STATUS {
3034 BA_STATUS_FAILURE = 0,
3035 BA_STATUS_INITIATOR_DELBA,
3036 BA_STATUS_RECIPIENT_DELBA,
3037 BA_STATUS_RENEW_ADDBA_REQUEST,
3038 BA_STATUS_ACTIVE,
3039};
3040
3041/**
3042 * iwl4964_tl_ba_avail - Find out if an unused aggregation queue is available
3043 */
3044static u8 iwl4964_tl_ba_avail(struct iwl4965_priv *priv)
3045{
3046 int i;
3047 struct iwl4965_lq_mngr *lq;
3048 u8 count = 0;
3049 u16 msk;
3050
3051 lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
3052
3053 /* Find out how many agg queues are in use */
3054 for (i = 0; i < TID_MAX_LOAD_COUNT ; i++) {
3055 msk = 1 << i;
3056 if ((lq->agg_ctrl.granted_ba & msk) ||
3057 (lq->agg_ctrl.wait_for_agg_status & msk))
3058 count++;
3059 }
3060
3061 if (count < MMAC_SCHED_MAX_NUMBER_OF_HT_BACK_FLOWS)
3062 return 1;
3063
3064 return 0;
3065}
3066
3067static void iwl4965_ba_status(struct iwl4965_priv *priv,
3068 u8 tid, enum HT_STATUS status);
3069
3070static int iwl4965_perform_addba(struct iwl4965_priv *priv, u8 tid, u32 length,
3071 u32 ba_timeout)
3072{
3073 int rc;
3074
3075 rc = ieee80211_start_BA_session(priv->hw, priv->bssid, tid);
3076 if (rc)
3077 iwl4965_ba_status(priv, tid, BA_STATUS_FAILURE);
3078
3079 return rc;
3080}
3081
3082static int iwl4965_perform_delba(struct iwl4965_priv *priv, u8 tid)
3083{
3084 int rc;
3085
3086 rc = ieee80211_stop_BA_session(priv->hw, priv->bssid, tid);
3087 if (rc)
3088 iwl4965_ba_status(priv, tid, BA_STATUS_FAILURE);
3089
3090 return rc;
3091}
3092
3093static void iwl4965_turn_on_agg_for_tid(struct iwl4965_priv *priv,
3094 struct iwl4965_lq_mngr *lq,
3095 u8 auto_agg, u8 tid)
3096{
3097 u32 tid_msk = (1 << tid);
3098 unsigned long flags;
3099
3100 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3101/*
3102 if ((auto_agg) && (!lq->enable_counter)){
3103 lq->agg_ctrl.next_retry = 0;
3104 lq->agg_ctrl.tid_retry = 0;
3105 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3106 return;
3107 }
3108*/
3109 if (!(lq->agg_ctrl.granted_ba & tid_msk) &&
3110 (lq->agg_ctrl.requested_ba & tid_msk)) {
3111 u8 available_queues;
3112 u32 load;
3113
3114 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3115 available_queues = iwl4964_tl_ba_avail(priv);
3116 load = iwl4965_tl_get_load(priv, tid);
3117
3118 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3119 if (!available_queues) {
3120 if (auto_agg)
3121 lq->agg_ctrl.tid_retry |= tid_msk;
3122 else {
3123 lq->agg_ctrl.requested_ba &= ~tid_msk;
3124 lq->agg_ctrl.wait_for_agg_status &= ~tid_msk;
3125 }
3126 } else if ((auto_agg) &&
3127 ((load <= lq->agg_ctrl.tid_traffic_load_threshold) ||
3128 ((lq->agg_ctrl.wait_for_agg_status & tid_msk))))
3129 lq->agg_ctrl.tid_retry |= tid_msk;
3130 else {
3131 lq->agg_ctrl.wait_for_agg_status |= tid_msk;
3132 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3133 iwl4965_perform_addba(priv, tid, 0x40,
3134 lq->agg_ctrl.ba_timeout);
3135 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3136 }
3137 }
3138 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3139}
3140
3141static void iwl4965_turn_on_agg(struct iwl4965_priv *priv, u8 tid)
3142{
3143 struct iwl4965_lq_mngr *lq;
3144 unsigned long flags;
3145
3146 lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
3147
3148 if ((tid < TID_MAX_LOAD_COUNT))
3149 iwl4965_turn_on_agg_for_tid(priv, lq, lq->agg_ctrl.auto_agg,
3150 tid);
3151 else if (tid == TID_ALL_SPECIFIED) {
3152 if (lq->agg_ctrl.requested_ba) {
3153 for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++)
3154 iwl4965_turn_on_agg_for_tid(priv, lq,
3155 lq->agg_ctrl.auto_agg, tid);
3156 } else {
3157 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3158 lq->agg_ctrl.tid_retry = 0;
3159 lq->agg_ctrl.next_retry = 0;
3160 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3161 }
3162 }
3163
3164}
3165
3166void iwl4965_turn_off_agg(struct iwl4965_priv *priv, u8 tid)
3167{
3168 u32 tid_msk;
3169 struct iwl4965_lq_mngr *lq;
3170 unsigned long flags;
3171
3172 lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
3173
3174 if ((tid < TID_MAX_LOAD_COUNT)) {
3175 tid_msk = 1 << tid;
3176 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3177 lq->agg_ctrl.wait_for_agg_status |= tid_msk;
3178 lq->agg_ctrl.requested_ba &= ~tid_msk;
3179 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3180 iwl4965_perform_delba(priv, tid);
3181 } else if (tid == TID_ALL_SPECIFIED) {
3182 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3183 for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) {
3184 tid_msk = 1 << tid;
3185 lq->agg_ctrl.wait_for_agg_status |= tid_msk;
3186 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3187 iwl4965_perform_delba(priv, tid);
3188 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3189 }
3190 lq->agg_ctrl.requested_ba = 0;
3191 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3192 }
3193}
3194
3195/**
3196 * iwl4965_ba_status - Update driver's link quality mgr with tid's HT status
3197 */
3198static void iwl4965_ba_status(struct iwl4965_priv *priv,
3199 u8 tid, enum HT_STATUS status)
3200{
3201 struct iwl4965_lq_mngr *lq;
3202 u32 tid_msk = (1 << tid);
3203 unsigned long flags;
3204
3205 lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
3206
3207 if ((tid >= TID_MAX_LOAD_COUNT))
3208 goto out;
3209
3210 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3211 switch (status) {
3212 case BA_STATUS_ACTIVE:
3213 if (!(lq->agg_ctrl.granted_ba & tid_msk))
3214 lq->agg_ctrl.granted_ba |= tid_msk;
3215 break;
3216 default:
3217 if ((lq->agg_ctrl.granted_ba & tid_msk))
3218 lq->agg_ctrl.granted_ba &= ~tid_msk;
3219 break;
3220 }
3221
3222 lq->agg_ctrl.wait_for_agg_status &= ~tid_msk;
3223 if (status != BA_STATUS_ACTIVE) {
3224 if (lq->agg_ctrl.auto_agg) {
3225 lq->agg_ctrl.tid_retry |= tid_msk;
3226 lq->agg_ctrl.next_retry =
3227 jiffies + msecs_to_jiffies(500);
3228 } else
3229 lq->agg_ctrl.requested_ba &= ~tid_msk;
3230 }
3231 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3232 out:
3233 return;
3234}
3235
3236static void iwl4965_bg_agg_work(struct work_struct *work)
3237{
3238 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv,
3239 agg_work);
3240
3241 u32 tid;
3242 u32 retry_tid;
3243 u32 tid_msk;
3244 unsigned long flags;
3245 struct iwl4965_lq_mngr *lq = (struct iwl4965_lq_mngr *)&(priv->lq_mngr);
3246
3247 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3248 retry_tid = lq->agg_ctrl.tid_retry;
3249 lq->agg_ctrl.tid_retry = 0;
3250 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3251
3252 if (retry_tid == TID_ALL_SPECIFIED)
3253 iwl4965_turn_on_agg(priv, TID_ALL_SPECIFIED);
3254 else {
3255 for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) {
3256 tid_msk = (1 << tid);
3257 if (retry_tid & tid_msk)
3258 iwl4965_turn_on_agg(priv, tid);
3259 }
3260 }
3261
3262 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3263 if (lq->agg_ctrl.tid_retry)
3264 lq->agg_ctrl.next_retry = jiffies + msecs_to_jiffies(500);
3265 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3266 return;
3267}
3268
3269/* TODO: move this functionality to rate scaling */
3270void iwl4965_tl_get_stats(struct iwl4965_priv *priv,
3271 struct ieee80211_hdr *hdr)
3272{
3273 __le16 *qc = ieee80211_get_qos_ctrl(hdr);
3274
3275 if (qc &&
3276 (priv->iw_mode != IEEE80211_IF_TYPE_IBSS)) {
3277 u8 tid = 0;
3278 tid = (u8) (le16_to_cpu(*qc) & 0xF);
3279 if (tid < TID_MAX_LOAD_COUNT)
3280 iwl4965_tl_add_packet(priv, tid);
3281 }
3282
3283 if (priv->lq_mngr.agg_ctrl.next_retry &&
3284 (time_after(priv->lq_mngr.agg_ctrl.next_retry, jiffies))) {
3285 unsigned long flags;
3286
3287 spin_lock_irqsave(&priv->lq_mngr.lock, flags);
3288 priv->lq_mngr.agg_ctrl.next_retry = 0;
3289 spin_unlock_irqrestore(&priv->lq_mngr.lock, flags);
3290 schedule_work(&priv->agg_work);
3291 }
3292}
3293
3294#endif /*CONFIG_IWL4965_HT_AGG */
3295#endif /* CONFIG_IWL4965_HT */
3296
3297/** 3226/**
3298 * sign_extend - Sign extend a value using specified bit as sign-bit 3227 * sign_extend - Sign extend a value using specified bit as sign-bit
3299 * 3228 *
@@ -3316,7 +3245,7 @@ static s32 sign_extend(u32 oper, int index)
3316 * 3245 *
3317 * A return of <0 indicates bogus data in the statistics 3246 * A return of <0 indicates bogus data in the statistics
3318 */ 3247 */
3319int iwl4965_get_temperature(const struct iwl4965_priv *priv) 3248int iwl4965_get_temperature(const struct iwl_priv *priv)
3320{ 3249{
3321 s32 temperature; 3250 s32 temperature;
3322 s32 vt; 3251 s32 vt;
@@ -3384,7 +3313,7 @@ int iwl4965_get_temperature(const struct iwl4965_priv *priv)
3384 * Assumes caller will replace priv->last_temperature once calibration 3313 * Assumes caller will replace priv->last_temperature once calibration
3385 * executed. 3314 * executed.
3386 */ 3315 */
3387static int iwl4965_is_temp_calib_needed(struct iwl4965_priv *priv) 3316static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
3388{ 3317{
3389 int temp_diff; 3318 int temp_diff;
3390 3319
@@ -3417,7 +3346,7 @@ static int iwl4965_is_temp_calib_needed(struct iwl4965_priv *priv)
3417/* Calculate noise level, based on measurements during network silence just 3346/* Calculate noise level, based on measurements during network silence just
3418 * before arriving beacon. This measurement can be done only if we know 3347 * before arriving beacon. This measurement can be done only if we know
3419 * exactly when to expect beacons, therefore only when we're associated. */ 3348 * exactly when to expect beacons, therefore only when we're associated. */
3420static void iwl4965_rx_calc_noise(struct iwl4965_priv *priv) 3349static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
3421{ 3350{
3422 struct statistics_rx_non_phy *rx_info 3351 struct statistics_rx_non_phy *rx_info
3423 = &(priv->statistics.rx.general); 3352 = &(priv->statistics.rx.general);
@@ -3454,7 +3383,7 @@ static void iwl4965_rx_calc_noise(struct iwl4965_priv *priv)
3454 priv->last_rx_noise); 3383 priv->last_rx_noise);
3455} 3384}
3456 3385
3457void iwl4965_hw_rx_statistics(struct iwl4965_priv *priv, struct iwl4965_rx_mem_buffer *rxb) 3386void iwl4965_hw_rx_statistics(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
3458{ 3387{
3459 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3388 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3460 int change; 3389 int change;
@@ -3488,6 +3417,8 @@ void iwl4965_hw_rx_statistics(struct iwl4965_priv *priv, struct iwl4965_rx_mem_b
3488#endif 3417#endif
3489 } 3418 }
3490 3419
3420 iwl_leds_background(priv);
3421
3491 /* If the hardware hasn't reported a change in 3422 /* If the hardware hasn't reported a change in
3492 * temperature then don't bother computing a 3423 * temperature then don't bother computing a
3493 * calibrated temperature value */ 3424 * calibrated temperature value */
@@ -3518,7 +3449,7 @@ void iwl4965_hw_rx_statistics(struct iwl4965_priv *priv, struct iwl4965_rx_mem_b
3518 queue_work(priv->workqueue, &priv->txpower_work); 3449 queue_work(priv->workqueue, &priv->txpower_work);
3519} 3450}
3520 3451
3521static void iwl4965_add_radiotap(struct iwl4965_priv *priv, 3452static void iwl4965_add_radiotap(struct iwl_priv *priv,
3522 struct sk_buff *skb, 3453 struct sk_buff *skb,
3523 struct iwl4965_rx_phy_res *rx_start, 3454 struct iwl4965_rx_phy_res *rx_start,
3524 struct ieee80211_rx_status *stats, 3455 struct ieee80211_rx_status *stats,
@@ -3526,8 +3457,9 @@ static void iwl4965_add_radiotap(struct iwl4965_priv *priv,
3526{ 3457{
3527 s8 signal = stats->ssi; 3458 s8 signal = stats->ssi;
3528 s8 noise = 0; 3459 s8 noise = 0;
3529 int rate = stats->rate; 3460 int rate = stats->rate_idx;
3530 u64 tsf = stats->mactime; 3461 u64 tsf = stats->mactime;
3462 __le16 antenna;
3531 __le16 phy_flags_hw = rx_start->phy_flags; 3463 __le16 phy_flags_hw = rx_start->phy_flags;
3532 struct iwl4965_rt_rx_hdr { 3464 struct iwl4965_rt_rx_hdr {
3533 struct ieee80211_radiotap_header rt_hdr; 3465 struct ieee80211_radiotap_header rt_hdr;
@@ -3594,7 +3526,6 @@ static void iwl4965_add_radiotap(struct iwl4965_priv *priv,
3594 IEEE80211_CHAN_2GHZ), 3526 IEEE80211_CHAN_2GHZ),
3595 &iwl4965_rt->rt_chbitmask); 3527 &iwl4965_rt->rt_chbitmask);
3596 3528
3597 rate = iwl4965_rate_index_from_plcp(rate);
3598 if (rate == -1) 3529 if (rate == -1)
3599 iwl4965_rt->rt_rate = 0; 3530 iwl4965_rt->rt_rate = 0;
3600 else 3531 else
@@ -3613,8 +3544,8 @@ static void iwl4965_add_radiotap(struct iwl4965_priv *priv,
3613 * new 802.11n radiotap field "RX chains" that is defined 3544 * new 802.11n radiotap field "RX chains" that is defined
3614 * as a bitmask. 3545 * as a bitmask.
3615 */ 3546 */
3616 iwl4965_rt->rt_antenna = 3547 antenna = phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK;
3617 le16_to_cpu(phy_flags_hw & RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4; 3548 iwl4965_rt->rt_antenna = le16_to_cpu(antenna) >> 4;
3618 3549
3619 /* set the preamble flag if appropriate */ 3550 /* set the preamble flag if appropriate */
3620 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK) 3551 if (phy_flags_hw & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
@@ -3623,7 +3554,74 @@ static void iwl4965_add_radiotap(struct iwl4965_priv *priv,
3623 stats->flag |= RX_FLAG_RADIOTAP; 3554 stats->flag |= RX_FLAG_RADIOTAP;
3624} 3555}
3625 3556
3626static void iwl4965_handle_data_packet(struct iwl4965_priv *priv, int is_data, 3557static void iwl_update_rx_stats(struct iwl_priv *priv, u16 fc, u16 len)
3558{
3559 /* 0 - mgmt, 1 - cnt, 2 - data */
3560 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
3561 priv->rx_stats[idx].cnt++;
3562 priv->rx_stats[idx].bytes += len;
3563}
3564
3565static u32 iwl4965_translate_rx_status(u32 decrypt_in)
3566{
3567 u32 decrypt_out = 0;
3568
3569 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
3570 RX_RES_STATUS_STATION_FOUND)
3571 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
3572 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
3573
3574 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
3575
3576 /* packet was not encrypted */
3577 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
3578 RX_RES_STATUS_SEC_TYPE_NONE)
3579 return decrypt_out;
3580
3581 /* packet was encrypted with unknown alg */
3582 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
3583 RX_RES_STATUS_SEC_TYPE_ERR)
3584 return decrypt_out;
3585
3586 /* decryption was not done in HW */
3587 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
3588 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
3589 return decrypt_out;
3590
3591 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
3592
3593 case RX_RES_STATUS_SEC_TYPE_CCMP:
3594 /* alg is CCM: check MIC only */
3595 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
3596 /* Bad MIC */
3597 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
3598 else
3599 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
3600
3601 break;
3602
3603 case RX_RES_STATUS_SEC_TYPE_TKIP:
3604 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
3605 /* Bad TTAK */
3606 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
3607 break;
3608 }
3609 /* fall through if TTAK OK */
3610 default:
3611 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
3612 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
3613 else
3614 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
3615 break;
3616 };
3617
3618 IWL_DEBUG_RX("decrypt_in:0x%x decrypt_out = 0x%x\n",
3619 decrypt_in, decrypt_out);
3620
3621 return decrypt_out;
3622}
3623
3624static void iwl4965_handle_data_packet(struct iwl_priv *priv, int is_data,
3627 int include_phy, 3625 int include_phy,
3628 struct iwl4965_rx_mem_buffer *rxb, 3626 struct iwl4965_rx_mem_buffer *rxb,
3629 struct ieee80211_rx_status *stats) 3627 struct ieee80211_rx_status *stats)
@@ -3636,6 +3634,7 @@ static void iwl4965_handle_data_packet(struct iwl4965_priv *priv, int is_data,
3636 __le32 *rx_end; 3634 __le32 *rx_end;
3637 unsigned int skblen; 3635 unsigned int skblen;
3638 u32 ampdu_status; 3636 u32 ampdu_status;
3637 u32 ampdu_status_legacy;
3639 3638
3640 if (!include_phy && priv->last_phy_res[0]) 3639 if (!include_phy && priv->last_phy_res[0])
3641 rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1]; 3640 rx_start = (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
@@ -3664,7 +3663,7 @@ static void iwl4965_handle_data_packet(struct iwl4965_priv *priv, int is_data,
3664 rx_start->byte_count = amsdu->byte_count; 3663 rx_start->byte_count = amsdu->byte_count;
3665 rx_end = (__le32 *) (((u8 *) hdr) + len); 3664 rx_end = (__le32 *) (((u8 *) hdr) + len);
3666 } 3665 }
3667 if (len > priv->hw_setting.max_pkt_size || len < 16) { 3666 if (len > priv->hw_params.max_pkt_size || len < 16) {
3668 IWL_WARNING("byte count out of range [16,4K] : %d\n", len); 3667 IWL_WARNING("byte count out of range [16,4K] : %d\n", len);
3669 return; 3668 return;
3670 } 3669 }
@@ -3672,6 +3671,12 @@ static void iwl4965_handle_data_packet(struct iwl4965_priv *priv, int is_data,
3672 ampdu_status = le32_to_cpu(*rx_end); 3671 ampdu_status = le32_to_cpu(*rx_end);
3673 skblen = ((u8 *) rx_end - (u8 *) & pkt->u.raw[0]) + sizeof(u32); 3672 skblen = ((u8 *) rx_end - (u8 *) & pkt->u.raw[0]) + sizeof(u32);
3674 3673
3674 if (!include_phy) {
3675 /* New status scheme, need to translate */
3676 ampdu_status_legacy = ampdu_status;
3677 ampdu_status = iwl4965_translate_rx_status(ampdu_status);
3678 }
3679
3675 /* start from MAC */ 3680 /* start from MAC */
3676 skb_reserve(rxb->skb, (void *)hdr - (void *)pkt); 3681 skb_reserve(rxb->skb, (void *)hdr - (void *)pkt);
3677 skb_put(rxb->skb, len); /* end where data ends */ 3682 skb_put(rxb->skb, len); /* end where data ends */
@@ -3686,19 +3691,16 @@ static void iwl4965_handle_data_packet(struct iwl4965_priv *priv, int is_data,
3686 stats->flag = 0; 3691 stats->flag = 0;
3687 hdr = (struct ieee80211_hdr *)rxb->skb->data; 3692 hdr = (struct ieee80211_hdr *)rxb->skb->data;
3688 3693
3689 if (iwl4965_param_hwcrypto) 3694 if (!priv->cfg->mod_params->sw_crypto)
3690 iwl4965_set_decrypted_flag(priv, rxb->skb, ampdu_status, stats); 3695 iwl4965_set_decrypted_flag(priv, rxb->skb, ampdu_status, stats);
3691 3696
3692 if (priv->add_radiotap) 3697 if (priv->add_radiotap)
3693 iwl4965_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status); 3698 iwl4965_add_radiotap(priv, rxb->skb, rx_start, stats, ampdu_status);
3694 3699
3700 iwl_update_rx_stats(priv, le16_to_cpu(hdr->frame_control), len);
3695 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats); 3701 ieee80211_rx_irqsafe(priv->hw, rxb->skb, stats);
3696 priv->alloc_rxb_skb--; 3702 priv->alloc_rxb_skb--;
3697 rxb->skb = NULL; 3703 rxb->skb = NULL;
3698#ifdef LED
3699 priv->led_packets += len;
3700 iwl4965_setup_activity_timer(priv);
3701#endif
3702} 3704}
3703 3705
3704/* Calc max signal level (dBm) among 3 possible receivers */ 3706/* Calc max signal level (dBm) among 3 possible receivers */
@@ -3737,85 +3739,16 @@ static int iwl4965_calc_rssi(struct iwl4965_rx_phy_res *rx_resp)
3737 3739
3738#ifdef CONFIG_IWL4965_HT 3740#ifdef CONFIG_IWL4965_HT
3739 3741
3740/* Parsed Information Elements */ 3742void iwl4965_init_ht_hw_capab(struct iwl_priv *priv,
3741struct ieee802_11_elems { 3743 struct ieee80211_ht_info *ht_info,
3742 u8 *ds_params; 3744 enum ieee80211_band band)
3743 u8 ds_params_len;
3744 u8 *tim;
3745 u8 tim_len;
3746 u8 *ibss_params;
3747 u8 ibss_params_len;
3748 u8 *erp_info;
3749 u8 erp_info_len;
3750 u8 *ht_cap_param;
3751 u8 ht_cap_param_len;
3752 u8 *ht_extra_param;
3753 u8 ht_extra_param_len;
3754};
3755
3756static int parse_elems(u8 *start, size_t len, struct ieee802_11_elems *elems)
3757{
3758 size_t left = len;
3759 u8 *pos = start;
3760 int unknown = 0;
3761
3762 memset(elems, 0, sizeof(*elems));
3763
3764 while (left >= 2) {
3765 u8 id, elen;
3766
3767 id = *pos++;
3768 elen = *pos++;
3769 left -= 2;
3770
3771 if (elen > left)
3772 return -1;
3773
3774 switch (id) {
3775 case WLAN_EID_DS_PARAMS:
3776 elems->ds_params = pos;
3777 elems->ds_params_len = elen;
3778 break;
3779 case WLAN_EID_TIM:
3780 elems->tim = pos;
3781 elems->tim_len = elen;
3782 break;
3783 case WLAN_EID_IBSS_PARAMS:
3784 elems->ibss_params = pos;
3785 elems->ibss_params_len = elen;
3786 break;
3787 case WLAN_EID_ERP_INFO:
3788 elems->erp_info = pos;
3789 elems->erp_info_len = elen;
3790 break;
3791 case WLAN_EID_HT_CAPABILITY:
3792 elems->ht_cap_param = pos;
3793 elems->ht_cap_param_len = elen;
3794 break;
3795 case WLAN_EID_HT_EXTRA_INFO:
3796 elems->ht_extra_param = pos;
3797 elems->ht_extra_param_len = elen;
3798 break;
3799 default:
3800 unknown++;
3801 break;
3802 }
3803
3804 left -= elen;
3805 pos += elen;
3806 }
3807
3808 return 0;
3809}
3810
3811void iwl4965_init_ht_hw_capab(struct ieee80211_ht_info *ht_info, int mode)
3812{ 3745{
3813 ht_info->cap = 0; 3746 ht_info->cap = 0;
3814 memset(ht_info->supp_mcs_set, 0, 16); 3747 memset(ht_info->supp_mcs_set, 0, 16);
3815 3748
3816 ht_info->ht_supported = 1; 3749 ht_info->ht_supported = 1;
3817 3750
3818 if (mode == MODE_IEEE80211A) { 3751 if (band == IEEE80211_BAND_5GHZ) {
3819 ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH; 3752 ht_info->cap |= (u16)IEEE80211_HT_CAP_SUP_WIDTH;
3820 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40; 3753 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_40;
3821 ht_info->supp_mcs_set[4] = 0x01; 3754 ht_info->supp_mcs_set[4] = 0x01;
@@ -3824,10 +3757,9 @@ void iwl4965_init_ht_hw_capab(struct ieee80211_ht_info *ht_info, int mode)
3824 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20; 3757 ht_info->cap |= (u16)IEEE80211_HT_CAP_SGI_20;
3825 ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS & 3758 ht_info->cap |= (u16)(IEEE80211_HT_CAP_MIMO_PS &
3826 (IWL_MIMO_PS_NONE << 2)); 3759 (IWL_MIMO_PS_NONE << 2));
3827 if (iwl4965_param_amsdu_size_8K) { 3760
3828 printk(KERN_DEBUG "iwl4965 in A-MSDU 8K support mode\n"); 3761 if (priv->cfg->mod_params->amsdu_size_8K)
3829 ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU; 3762 ht_info->cap |= (u16)IEEE80211_HT_CAP_MAX_AMSDU;
3830 }
3831 3763
3832 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF; 3764 ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
3833 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF; 3765 ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
@@ -3837,7 +3769,7 @@ void iwl4965_init_ht_hw_capab(struct ieee80211_ht_info *ht_info, int mode)
3837} 3769}
3838#endif /* CONFIG_IWL4965_HT */ 3770#endif /* CONFIG_IWL4965_HT */
3839 3771
3840static void iwl4965_sta_modify_ps_wake(struct iwl4965_priv *priv, int sta_id) 3772static void iwl4965_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
3841{ 3773{
3842 unsigned long flags; 3774 unsigned long flags;
3843 3775
@@ -3851,7 +3783,7 @@ static void iwl4965_sta_modify_ps_wake(struct iwl4965_priv *priv, int sta_id)
3851 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 3783 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
3852} 3784}
3853 3785
3854static void iwl4965_update_ps_mode(struct iwl4965_priv *priv, u16 ps_bit, u8 *addr) 3786static void iwl4965_update_ps_mode(struct iwl_priv *priv, u16 ps_bit, u8 *addr)
3855{ 3787{
3856 /* FIXME: need locking over ps_status ??? */ 3788 /* FIXME: need locking over ps_status ??? */
3857 u8 sta_id = iwl4965_hw_find_station(priv, addr); 3789 u8 sta_id = iwl4965_hw_find_station(priv, addr);
@@ -3868,44 +3800,201 @@ static void iwl4965_update_ps_mode(struct iwl4965_priv *priv, u16 ps_bit, u8 *ad
3868 } 3800 }
3869 } 3801 }
3870} 3802}
3803#ifdef CONFIG_IWLWIFI_DEBUG
3871 3804
3872#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) 3805/**
3806 * iwl4965_dbg_report_frame - dump frame to syslog during debug sessions
3807 *
3808 * You may hack this function to show different aspects of received frames,
3809 * including selective frame dumps.
3810 * group100 parameter selects whether to show 1 out of 100 good frames.
3811 *
3812 * TODO: This was originally written for 3945, need to audit for
3813 * proper operation with 4965.
3814 */
3815static void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3816 struct iwl4965_rx_packet *pkt,
3817 struct ieee80211_hdr *header, int group100)
3818{
3819 u32 to_us;
3820 u32 print_summary = 0;
3821 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
3822 u32 hundred = 0;
3823 u32 dataframe = 0;
3824 u16 fc;
3825 u16 seq_ctl;
3826 u16 channel;
3827 u16 phy_flags;
3828 int rate_sym;
3829 u16 length;
3830 u16 status;
3831 u16 bcn_tmr;
3832 u32 tsf_low;
3833 u64 tsf;
3834 u8 rssi;
3835 u8 agc;
3836 u16 sig_avg;
3837 u16 noise_diff;
3838 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
3839 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
3840 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
3841 u8 *data = IWL_RX_DATA(pkt);
3842
3843 if (likely(!(iwl_debug_level & IWL_DL_RX)))
3844 return;
3873 3845
3874/* Called for REPLY_4965_RX (legacy ABG frames), or 3846 /* MAC header */
3847 fc = le16_to_cpu(header->frame_control);
3848 seq_ctl = le16_to_cpu(header->seq_ctrl);
3849
3850 /* metadata */
3851 channel = le16_to_cpu(rx_hdr->channel);
3852 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
3853 rate_sym = rx_hdr->rate;
3854 length = le16_to_cpu(rx_hdr->len);
3855
3856 /* end-of-frame status and timestamp */
3857 status = le32_to_cpu(rx_end->status);
3858 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
3859 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
3860 tsf = le64_to_cpu(rx_end->timestamp);
3861
3862 /* signal statistics */
3863 rssi = rx_stats->rssi;
3864 agc = rx_stats->agc;
3865 sig_avg = le16_to_cpu(rx_stats->sig_avg);
3866 noise_diff = le16_to_cpu(rx_stats->noise_diff);
3867
3868 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
3869
3870 /* if data frame is to us and all is good,
3871 * (optionally) print summary for only 1 out of every 100 */
3872 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
3873 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
3874 dataframe = 1;
3875 if (!group100)
3876 print_summary = 1; /* print each frame */
3877 else if (priv->framecnt_to_us < 100) {
3878 priv->framecnt_to_us++;
3879 print_summary = 0;
3880 } else {
3881 priv->framecnt_to_us = 0;
3882 print_summary = 1;
3883 hundred = 1;
3884 }
3885 } else {
3886 /* print summary for all other frames */
3887 print_summary = 1;
3888 }
3889
3890 if (print_summary) {
3891 char *title;
3892 int rate_idx;
3893 u32 bitrate;
3894
3895 if (hundred)
3896 title = "100Frames";
3897 else if (fc & IEEE80211_FCTL_RETRY)
3898 title = "Retry";
3899 else if (ieee80211_is_assoc_response(fc))
3900 title = "AscRsp";
3901 else if (ieee80211_is_reassoc_response(fc))
3902 title = "RasRsp";
3903 else if (ieee80211_is_probe_response(fc)) {
3904 title = "PrbRsp";
3905 print_dump = 1; /* dump frame contents */
3906 } else if (ieee80211_is_beacon(fc)) {
3907 title = "Beacon";
3908 print_dump = 1; /* dump frame contents */
3909 } else if (ieee80211_is_atim(fc))
3910 title = "ATIM";
3911 else if (ieee80211_is_auth(fc))
3912 title = "Auth";
3913 else if (ieee80211_is_deauth(fc))
3914 title = "DeAuth";
3915 else if (ieee80211_is_disassoc(fc))
3916 title = "DisAssoc";
3917 else
3918 title = "Frame";
3919
3920 rate_idx = iwl4965_hwrate_to_plcp_idx(rate_sym);
3921 if (unlikely(rate_idx == -1))
3922 bitrate = 0;
3923 else
3924 bitrate = iwl4965_rates[rate_idx].ieee / 2;
3925
3926 /* print frame summary.
3927 * MAC addresses show just the last byte (for brevity),
3928 * but you can hack it to show more, if you'd like to. */
3929 if (dataframe)
3930 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
3931 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
3932 title, fc, header->addr1[5],
3933 length, rssi, channel, bitrate);
3934 else {
3935 /* src/dst addresses assume managed mode */
3936 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
3937 "src=0x%02x, rssi=%u, tim=%lu usec, "
3938 "phy=0x%02x, chnl=%d\n",
3939 title, fc, header->addr1[5],
3940 header->addr3[5], rssi,
3941 tsf_low - priv->scan_start_tsf,
3942 phy_flags, channel);
3943 }
3944 }
3945 if (print_dump)
3946 iwl_print_hex_dump(IWL_DL_RX, data, length);
3947}
3948#else
3949static inline void iwl4965_dbg_report_frame(struct iwl_priv *priv,
3950 struct iwl4965_rx_packet *pkt,
3951 struct ieee80211_hdr *header,
3952 int group100)
3953{
3954}
3955#endif
3956
3957
3958
3959/* Called for REPLY_RX (legacy ABG frames), or
3875 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */ 3960 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
3876static void iwl4965_rx_reply_rx(struct iwl4965_priv *priv, 3961static void iwl4965_rx_reply_rx(struct iwl_priv *priv,
3877 struct iwl4965_rx_mem_buffer *rxb) 3962 struct iwl4965_rx_mem_buffer *rxb)
3878{ 3963{
3964 struct ieee80211_hdr *header;
3965 struct ieee80211_rx_status rx_status;
3879 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3966 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3880 /* Use phy data (Rx signal strength, etc.) contained within 3967 /* Use phy data (Rx signal strength, etc.) contained within
3881 * this rx packet for legacy frames, 3968 * this rx packet for legacy frames,
3882 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */ 3969 * or phy data cached from REPLY_RX_PHY_CMD for HT frames. */
3883 int include_phy = (pkt->hdr.cmd == REPLY_4965_RX); 3970 int include_phy = (pkt->hdr.cmd == REPLY_RX);
3884 struct iwl4965_rx_phy_res *rx_start = (include_phy) ? 3971 struct iwl4965_rx_phy_res *rx_start = (include_phy) ?
3885 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) : 3972 (struct iwl4965_rx_phy_res *)&(pkt->u.raw[0]) :
3886 (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1]; 3973 (struct iwl4965_rx_phy_res *)&priv->last_phy_res[1];
3887 __le32 *rx_end; 3974 __le32 *rx_end;
3888 unsigned int len = 0; 3975 unsigned int len = 0;
3889 struct ieee80211_hdr *header;
3890 u16 fc; 3976 u16 fc;
3891 struct ieee80211_rx_status stats = {
3892 .mactime = le64_to_cpu(rx_start->timestamp),
3893 .channel = le16_to_cpu(rx_start->channel),
3894 .phymode =
3895 (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
3896 MODE_IEEE80211G : MODE_IEEE80211A,
3897 .antenna = 0,
3898 .rate = iwl4965_hw_get_rate(rx_start->rate_n_flags),
3899 .flag = 0,
3900 };
3901 u8 network_packet; 3977 u8 network_packet;
3902 3978
3979 rx_status.mactime = le64_to_cpu(rx_start->timestamp);
3980 rx_status.freq =
3981 ieee80211_frequency_to_channel(le16_to_cpu(rx_start->channel));
3982 rx_status.band = (rx_start->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
3983 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
3984 rx_status.rate_idx =
3985 iwl4965_hwrate_to_plcp_idx(le32_to_cpu(rx_start->rate_n_flags));
3986 if (rx_status.band == IEEE80211_BAND_5GHZ)
3987 rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
3988
3989 rx_status.antenna = 0;
3990 rx_status.flag = 0;
3991
3903 if ((unlikely(rx_start->cfg_phy_cnt > 20))) { 3992 if ((unlikely(rx_start->cfg_phy_cnt > 20))) {
3904 IWL_DEBUG_DROP 3993 IWL_DEBUG_DROP("dsp size out of range [0,20]: %d/n",
3905 ("dsp size out of range [0,20]: " 3994 rx_start->cfg_phy_cnt);
3906 "%d/n", rx_start->cfg_phy_cnt);
3907 return; 3995 return;
3908 } 3996 }
3997
3909 if (!include_phy) { 3998 if (!include_phy) {
3910 if (priv->last_phy_res[0]) 3999 if (priv->last_phy_res[0])
3911 rx_start = (struct iwl4965_rx_phy_res *) 4000 rx_start = (struct iwl4965_rx_phy_res *)
@@ -3924,7 +4013,7 @@ static void iwl4965_rx_reply_rx(struct iwl4965_priv *priv,
3924 + rx_start->cfg_phy_cnt); 4013 + rx_start->cfg_phy_cnt);
3925 4014
3926 len = le16_to_cpu(rx_start->byte_count); 4015 len = le16_to_cpu(rx_start->byte_count);
3927 rx_end = (__le32 *) (pkt->u.raw + rx_start->cfg_phy_cnt + 4016 rx_end = (__le32 *)(pkt->u.raw + rx_start->cfg_phy_cnt +
3928 sizeof(struct iwl4965_rx_phy_res) + len); 4017 sizeof(struct iwl4965_rx_phy_res) + len);
3929 } else { 4018 } else {
3930 struct iwl4965_rx_mpdu_res_start *amsdu = 4019 struct iwl4965_rx_mpdu_res_start *amsdu =
@@ -3946,43 +4035,38 @@ static void iwl4965_rx_reply_rx(struct iwl4965_priv *priv,
3946 4035
3947 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp); 4036 priv->ucode_beacon_time = le32_to_cpu(rx_start->beacon_time_stamp);
3948 4037
3949 stats.freq = ieee80211chan2mhz(stats.channel);
3950
3951 /* Find max signal strength (dBm) among 3 antenna/receiver chains */ 4038 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
3952 stats.ssi = iwl4965_calc_rssi(rx_start); 4039 rx_status.ssi = iwl4965_calc_rssi(rx_start);
3953 4040
3954 /* Meaningful noise values are available only from beacon statistics, 4041 /* Meaningful noise values are available only from beacon statistics,
3955 * which are gathered only when associated, and indicate noise 4042 * which are gathered only when associated, and indicate noise
3956 * only for the associated network channel ... 4043 * only for the associated network channel ...
3957 * Ignore these noise values while scanning (other channels) */ 4044 * Ignore these noise values while scanning (other channels) */
3958 if (iwl4965_is_associated(priv) && 4045 if (iwl_is_associated(priv) &&
3959 !test_bit(STATUS_SCANNING, &priv->status)) { 4046 !test_bit(STATUS_SCANNING, &priv->status)) {
3960 stats.noise = priv->last_rx_noise; 4047 rx_status.noise = priv->last_rx_noise;
3961 stats.signal = iwl4965_calc_sig_qual(stats.ssi, stats.noise); 4048 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi,
4049 rx_status.noise);
3962 } else { 4050 } else {
3963 stats.noise = IWL_NOISE_MEAS_NOT_AVAILABLE; 4051 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3964 stats.signal = iwl4965_calc_sig_qual(stats.ssi, 0); 4052 rx_status.signal = iwl4965_calc_sig_qual(rx_status.ssi, 0);
3965 } 4053 }
3966 4054
3967 /* Reset beacon noise level if not associated. */ 4055 /* Reset beacon noise level if not associated. */
3968 if (!iwl4965_is_associated(priv)) 4056 if (!iwl_is_associated(priv))
3969 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; 4057 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
3970 4058
3971#ifdef CONFIG_IWL4965_DEBUG 4059 /* Set "1" to report good data frames in groups of 100 */
3972 /* TODO: Parts of iwl4965_report_frame are broken for 4965 */ 4060 /* FIXME: need to optimze the call: */
3973 if (iwl4965_debug_level & (IWL_DL_RX)) 4061 iwl4965_dbg_report_frame(priv, pkt, header, 1);
3974 /* Set "1" to report good data frames in groups of 100 */
3975 iwl4965_report_frame(priv, pkt, header, 1);
3976 4062
3977 if (iwl4965_debug_level & (IWL_DL_RX | IWL_DL_STATS)) 4063 IWL_DEBUG_STATS_LIMIT("Rssi %d, noise %d, qual %d, TSF %llu\n",
3978 IWL_DEBUG_RX("Rssi %d, noise %d, qual %d, TSF %lu\n", 4064 rx_status.ssi, rx_status.noise, rx_status.signal,
3979 stats.ssi, stats.noise, stats.signal, 4065 (unsigned long long)rx_status.mactime);
3980 (long unsigned int)le64_to_cpu(rx_start->timestamp));
3981#endif
3982 4066
3983 network_packet = iwl4965_is_network_packet(priv, header); 4067 network_packet = iwl4965_is_network_packet(priv, header);
3984 if (network_packet) { 4068 if (network_packet) {
3985 priv->last_rx_rssi = stats.ssi; 4069 priv->last_rx_rssi = rx_status.ssi;
3986 priv->last_beacon_time = priv->ucode_beacon_time; 4070 priv->last_beacon_time = priv->ucode_beacon_time;
3987 priv->last_tsf = le64_to_cpu(rx_start->timestamp); 4071 priv->last_tsf = le64_to_cpu(rx_start->timestamp);
3988 } 4072 }
@@ -3990,102 +4074,10 @@ static void iwl4965_rx_reply_rx(struct iwl4965_priv *priv,
3990 fc = le16_to_cpu(header->frame_control); 4074 fc = le16_to_cpu(header->frame_control);
3991 switch (fc & IEEE80211_FCTL_FTYPE) { 4075 switch (fc & IEEE80211_FCTL_FTYPE) {
3992 case IEEE80211_FTYPE_MGMT: 4076 case IEEE80211_FTYPE_MGMT:
3993
3994 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) 4077 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
3995 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM, 4078 iwl4965_update_ps_mode(priv, fc & IEEE80211_FCTL_PM,
3996 header->addr2); 4079 header->addr2);
3997 switch (fc & IEEE80211_FCTL_STYPE) { 4080 iwl4965_handle_data_packet(priv, 0, include_phy, rxb, &rx_status);
3998 case IEEE80211_STYPE_PROBE_RESP:
3999 case IEEE80211_STYPE_BEACON:
4000 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA &&
4001 !compare_ether_addr(header->addr2, priv->bssid)) ||
4002 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS &&
4003 !compare_ether_addr(header->addr3, priv->bssid))) {
4004 struct ieee80211_mgmt *mgmt =
4005 (struct ieee80211_mgmt *)header;
4006 u64 timestamp =
4007 le64_to_cpu(mgmt->u.beacon.timestamp);
4008
4009 priv->timestamp0 = timestamp & 0xFFFFFFFF;
4010 priv->timestamp1 =
4011 (timestamp >> 32) & 0xFFFFFFFF;
4012 priv->beacon_int = le16_to_cpu(
4013 mgmt->u.beacon.beacon_int);
4014 if (priv->call_post_assoc_from_beacon &&
4015 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
4016 priv->call_post_assoc_from_beacon = 0;
4017 queue_work(priv->workqueue,
4018 &priv->post_associate.work);
4019 }
4020 }
4021 break;
4022
4023 case IEEE80211_STYPE_ACTION:
4024 break;
4025
4026 /*
4027 * TODO: Use the new callback function from
4028 * mac80211 instead of sniffing these packets.
4029 */
4030 case IEEE80211_STYPE_ASSOC_RESP:
4031 case IEEE80211_STYPE_REASSOC_RESP:
4032 if (network_packet) {
4033#ifdef CONFIG_IWL4965_HT
4034 u8 *pos = NULL;
4035 struct ieee802_11_elems elems;
4036#endif /*CONFIG_IWL4965_HT */
4037 struct ieee80211_mgmt *mgnt =
4038 (struct ieee80211_mgmt *)header;
4039
4040 /* We have just associated, give some
4041 * time for the 4-way handshake if
4042 * any. Don't start scan too early. */
4043 priv->next_scan_jiffies = jiffies +
4044 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
4045
4046 priv->assoc_id = (~((1 << 15) | (1 << 14))
4047 & le16_to_cpu(mgnt->u.assoc_resp.aid));
4048 priv->assoc_capability =
4049 le16_to_cpu(
4050 mgnt->u.assoc_resp.capab_info);
4051#ifdef CONFIG_IWL4965_HT
4052 pos = mgnt->u.assoc_resp.variable;
4053 if (!parse_elems(pos,
4054 len - (pos - (u8 *) mgnt),
4055 &elems)) {
4056 if (elems.ht_extra_param &&
4057 elems.ht_cap_param)
4058 break;
4059 }
4060#endif /*CONFIG_IWL4965_HT */
4061 /* assoc_id is 0 no association */
4062 if (!priv->assoc_id)
4063 break;
4064 if (priv->beacon_int)
4065 queue_work(priv->workqueue,
4066 &priv->post_associate.work);
4067 else
4068 priv->call_post_assoc_from_beacon = 1;
4069 }
4070
4071 break;
4072
4073 case IEEE80211_STYPE_PROBE_REQ:
4074 if ((priv->iw_mode == IEEE80211_IF_TYPE_IBSS) &&
4075 !iwl4965_is_associated(priv)) {
4076 DECLARE_MAC_BUF(mac1);
4077 DECLARE_MAC_BUF(mac2);
4078 DECLARE_MAC_BUF(mac3);
4079
4080 IWL_DEBUG_DROP("Dropping (non network): "
4081 "%s, %s, %s\n",
4082 print_mac(mac1, header->addr1),
4083 print_mac(mac2, header->addr2),
4084 print_mac(mac3, header->addr3));
4085 return;
4086 }
4087 }
4088 iwl4965_handle_data_packet(priv, 0, include_phy, rxb, &stats);
4089 break; 4081 break;
4090 4082
4091 case IEEE80211_FTYPE_CTL: 4083 case IEEE80211_FTYPE_CTL:
@@ -4094,7 +4086,7 @@ static void iwl4965_rx_reply_rx(struct iwl4965_priv *priv,
4094 case IEEE80211_STYPE_BACK_REQ: 4086 case IEEE80211_STYPE_BACK_REQ:
4095 IWL_DEBUG_HT("IEEE80211_STYPE_BACK_REQ arrived\n"); 4087 IWL_DEBUG_HT("IEEE80211_STYPE_BACK_REQ arrived\n");
4096 iwl4965_handle_data_packet(priv, 0, include_phy, 4088 iwl4965_handle_data_packet(priv, 0, include_phy,
4097 rxb, &stats); 4089 rxb, &rx_status);
4098 break; 4090 break;
4099 default: 4091 default:
4100 break; 4092 break;
@@ -4124,7 +4116,7 @@ static void iwl4965_rx_reply_rx(struct iwl4965_priv *priv,
4124 print_mac(mac3, header->addr3)); 4116 print_mac(mac3, header->addr3));
4125 else 4117 else
4126 iwl4965_handle_data_packet(priv, 1, include_phy, rxb, 4118 iwl4965_handle_data_packet(priv, 1, include_phy, rxb,
4127 &stats); 4119 &rx_status);
4128 break; 4120 break;
4129 } 4121 }
4130 default: 4122 default:
@@ -4135,7 +4127,7 @@ static void iwl4965_rx_reply_rx(struct iwl4965_priv *priv,
4135 4127
4136/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD). 4128/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
4137 * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */ 4129 * This will be used later in iwl4965_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
4138static void iwl4965_rx_reply_rx_phy(struct iwl4965_priv *priv, 4130static void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
4139 struct iwl4965_rx_mem_buffer *rxb) 4131 struct iwl4965_rx_mem_buffer *rxb)
4140{ 4132{
4141 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 4133 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
@@ -4143,8 +4135,7 @@ static void iwl4965_rx_reply_rx_phy(struct iwl4965_priv *priv,
4143 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]), 4135 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
4144 sizeof(struct iwl4965_rx_phy_res)); 4136 sizeof(struct iwl4965_rx_phy_res));
4145} 4137}
4146 4138static void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
4147static void iwl4965_rx_missed_beacon_notif(struct iwl4965_priv *priv,
4148 struct iwl4965_rx_mem_buffer *rxb) 4139 struct iwl4965_rx_mem_buffer *rxb)
4149 4140
4150{ 4141{
@@ -4165,31 +4156,12 @@ static void iwl4965_rx_missed_beacon_notif(struct iwl4965_priv *priv,
4165 } 4156 }
4166#endif /*CONFIG_IWL4965_SENSITIVITY*/ 4157#endif /*CONFIG_IWL4965_SENSITIVITY*/
4167} 4158}
4168
4169#ifdef CONFIG_IWL4965_HT 4159#ifdef CONFIG_IWL4965_HT
4170#ifdef CONFIG_IWL4965_HT_AGG
4171
4172/**
4173 * iwl4965_set_tx_status - Update driver's record of one Tx frame's status
4174 *
4175 * This will get sent to mac80211.
4176 */
4177static void iwl4965_set_tx_status(struct iwl4965_priv *priv, int txq_id, int idx,
4178 u32 status, u32 retry_count, u32 rate)
4179{
4180 struct ieee80211_tx_status *tx_status =
4181 &(priv->txq[txq_id].txb[idx].status);
4182
4183 tx_status->flags = status ? IEEE80211_TX_STATUS_ACK : 0;
4184 tx_status->retry_count += retry_count;
4185 tx_status->control.tx_rate = rate;
4186}
4187
4188 4160
4189/** 4161/**
4190 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table 4162 * iwl4965_sta_modify_enable_tid_tx - Enable Tx for this TID in station table
4191 */ 4163 */
4192static void iwl4965_sta_modify_enable_tid_tx(struct iwl4965_priv *priv, 4164static void iwl4965_sta_modify_enable_tid_tx(struct iwl_priv *priv,
4193 int sta_id, int tid) 4165 int sta_id, int tid)
4194{ 4166{
4195 unsigned long flags; 4167 unsigned long flags;
@@ -4204,24 +4176,24 @@ static void iwl4965_sta_modify_enable_tid_tx(struct iwl4965_priv *priv,
4204 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 4176 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4205} 4177}
4206 4178
4207
4208/** 4179/**
4209 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack 4180 * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
4210 * 4181 *
4211 * Go through block-ack's bitmap of ACK'd frames, update driver's record of 4182 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
4212 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo. 4183 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
4213 */ 4184 */
4214static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv, 4185static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
4215 struct iwl4965_ht_agg *agg, 4186 struct iwl4965_ht_agg *agg,
4216 struct iwl4965_compressed_ba_resp* 4187 struct iwl4965_compressed_ba_resp*
4217 ba_resp) 4188 ba_resp)
4218 4189
4219{ 4190{
4220 int i, sh, ack; 4191 int i, sh, ack;
4221 u16 ba_seq_ctl = le16_to_cpu(ba_resp->ba_seq_ctl); 4192 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
4222 u32 bitmap0, bitmap1; 4193 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4223 u32 resp_bitmap0 = le32_to_cpu(ba_resp->ba_bitmap0); 4194 u64 bitmap;
4224 u32 resp_bitmap1 = le32_to_cpu(ba_resp->ba_bitmap1); 4195 int successes = 0;
4196 struct ieee80211_tx_status *tx_status;
4225 4197
4226 if (unlikely(!agg->wait_for_ba)) { 4198 if (unlikely(!agg->wait_for_ba)) {
4227 IWL_ERROR("Received BA when not expected\n"); 4199 IWL_ERROR("Received BA when not expected\n");
@@ -4230,17 +4202,15 @@ static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv,
4230 4202
4231 /* Mark that the expected block-ack response arrived */ 4203 /* Mark that the expected block-ack response arrived */
4232 agg->wait_for_ba = 0; 4204 agg->wait_for_ba = 0;
4233 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->ba_seq_ctl); 4205 IWL_DEBUG_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
4234 4206
4235 /* Calculate shift to align block-ack bits with our Tx window bits */ 4207 /* Calculate shift to align block-ack bits with our Tx window bits */
4236 sh = agg->start_idx - SEQ_TO_INDEX(ba_seq_ctl >> 4); 4208 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl>>4);
4237 if (sh < 0) /* tbw something is wrong with indices */ 4209 if (sh < 0) /* tbw something is wrong with indices */
4238 sh += 0x100; 4210 sh += 0x100;
4239 4211
4240 /* don't use 64-bit values for now */ 4212 /* don't use 64-bit values for now */
4241 bitmap0 = resp_bitmap0 >> sh; 4213 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
4242 bitmap1 = resp_bitmap1 >> sh;
4243 bitmap0 |= (resp_bitmap1 & ((1 << sh) | ((1 << sh) - 1))) << (32 - sh);
4244 4214
4245 if (agg->frame_count > (64 - sh)) { 4215 if (agg->frame_count > (64 - sh)) {
4246 IWL_DEBUG_TX_REPLY("more frames than bitmap size"); 4216 IWL_DEBUG_TX_REPLY("more frames than bitmap size");
@@ -4249,23 +4219,113 @@ static int iwl4965_tx_status_reply_compressed_ba(struct iwl4965_priv *priv,
4249 4219
4250 /* check for success or failure according to the 4220 /* check for success or failure according to the
4251 * transmitted bitmap and block-ack bitmap */ 4221 * transmitted bitmap and block-ack bitmap */
4252 bitmap0 &= agg->bitmap0; 4222 bitmap &= agg->bitmap;
4253 bitmap1 &= agg->bitmap1;
4254 4223
4255 /* For each frame attempted in aggregation, 4224 /* For each frame attempted in aggregation,
4256 * update driver's record of tx frame's status. */ 4225 * update driver's record of tx frame's status. */
4257 for (i = 0; i < agg->frame_count ; i++) { 4226 for (i = 0; i < agg->frame_count ; i++) {
4258 int idx = (agg->start_idx + i) & 0xff; 4227 ack = bitmap & (1 << i);
4259 ack = bitmap0 & (1 << i); 4228 successes += !!ack;
4260 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", 4229 IWL_DEBUG_TX_REPLY("%s ON i=%d idx=%d raw=%d\n",
4261 ack? "ACK":"NACK", i, idx, agg->start_idx + i); 4230 ack? "ACK":"NACK", i, (agg->start_idx + i) & 0xff,
4262 iwl4965_set_tx_status(priv, agg->txq_id, idx, ack, 0, 4231 agg->start_idx + i);
4263 agg->rate_n_flags); 4232 }
4233
4234 tx_status = &priv->txq[scd_flow].txb[agg->start_idx].status;
4235 tx_status->flags = IEEE80211_TX_STATUS_ACK;
4236 tx_status->flags |= IEEE80211_TX_STATUS_AMPDU;
4237 tx_status->ampdu_ack_map = successes;
4238 tx_status->ampdu_ack_len = agg->frame_count;
4239 iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags,
4240 &tx_status->control);
4241
4242 IWL_DEBUG_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
4264 4243
4244 return 0;
4245}
4246
4247/**
4248 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
4249 */
4250static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
4251 u16 txq_id)
4252{
4253 /* Simply stop the queue, but don't change any configuration;
4254 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
4255 iwl_write_prph(priv,
4256 IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
4257 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4258 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4259}
4260
4261/**
4262 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
4263 * priv->lock must be held by the caller
4264 */
4265static int iwl4965_tx_queue_agg_disable(struct iwl_priv *priv, u16 txq_id,
4266 u16 ssn_idx, u8 tx_fifo)
4267{
4268 int ret = 0;
4269
4270 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) {
4271 IWL_WARNING("queue number too small: %d, must be > %d\n",
4272 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4273 return -EINVAL;
4265 } 4274 }
4266 4275
4267 IWL_DEBUG_TX_REPLY("Bitmap %x%x\n", bitmap0, bitmap1); 4276 ret = iwl_grab_nic_access(priv);
4277 if (ret)
4278 return ret;
4279
4280 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4268 4281
4282 iwl_clear_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
4283
4284 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4285 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
4286 /* supposes that ssn_idx is valid (!= 0xFFF) */
4287 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4288
4289 iwl_clear_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
4290 iwl4965_txq_ctx_deactivate(priv, txq_id);
4291 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4292
4293 iwl_release_nic_access(priv);
4294
4295 return 0;
4296}
4297
4298int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
4299 u8 tid, int txq_id)
4300{
4301 struct iwl4965_queue *q = &priv->txq[txq_id].q;
4302 u8 *addr = priv->stations[sta_id].sta.sta.addr;
4303 struct iwl4965_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
4304
4305 switch (priv->stations[sta_id].tid[tid].agg.state) {
4306 case IWL_EMPTYING_HW_QUEUE_DELBA:
4307 /* We are reclaiming the last packet of the */
4308 /* aggregated HW queue */
4309 if (txq_id == tid_data->agg.txq_id &&
4310 q->read_ptr == q->write_ptr) {
4311 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
4312 int tx_fifo = default_tid_to_tx_fifo[tid];
4313 IWL_DEBUG_HT("HW queue empty: continue DELBA flow\n");
4314 iwl4965_tx_queue_agg_disable(priv, txq_id,
4315 ssn, tx_fifo);
4316 tid_data->agg.state = IWL_AGG_OFF;
4317 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4318 }
4319 break;
4320 case IWL_EMPTYING_HW_QUEUE_ADDBA:
4321 /* We are reclaiming the last packet of the queue */
4322 if (tid_data->tfds_in_queue == 0) {
4323 IWL_DEBUG_HT("HW queue empty: continue ADDBA flow\n");
4324 tid_data->agg.state = IWL_AGG_ON;
4325 ieee80211_start_tx_ba_cb_irqsafe(priv->hw, addr, tid);
4326 }
4327 break;
4328 }
4269 return 0; 4329 return 0;
4270} 4330}
4271 4331
@@ -4285,7 +4345,7 @@ static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
4285 * Handles block-acknowledge notification from device, which reports success 4345 * Handles block-acknowledge notification from device, which reports success
4286 * of frames sent via aggregation. 4346 * of frames sent via aggregation.
4287 */ 4347 */
4288static void iwl4965_rx_reply_compressed_ba(struct iwl4965_priv *priv, 4348static void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
4289 struct iwl4965_rx_mem_buffer *rxb) 4349 struct iwl4965_rx_mem_buffer *rxb)
4290{ 4350{
4291 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 4351 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
@@ -4293,48 +4353,43 @@ static void iwl4965_rx_reply_compressed_ba(struct iwl4965_priv *priv,
4293 int index; 4353 int index;
4294 struct iwl4965_tx_queue *txq = NULL; 4354 struct iwl4965_tx_queue *txq = NULL;
4295 struct iwl4965_ht_agg *agg; 4355 struct iwl4965_ht_agg *agg;
4356 DECLARE_MAC_BUF(mac);
4296 4357
4297 /* "flow" corresponds to Tx queue */ 4358 /* "flow" corresponds to Tx queue */
4298 u16 ba_resp_scd_flow = le16_to_cpu(ba_resp->scd_flow); 4359 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
4299 4360
4300 /* "ssn" is start of block-ack Tx window, corresponds to index 4361 /* "ssn" is start of block-ack Tx window, corresponds to index
4301 * (in Tx queue's circular buffer) of first TFD/frame in window */ 4362 * (in Tx queue's circular buffer) of first TFD/frame in window */
4302 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn); 4363 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
4303 4364
4304 if (ba_resp_scd_flow >= ARRAY_SIZE(priv->txq)) { 4365 if (scd_flow >= priv->hw_params.max_txq_num) {
4305 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues"); 4366 IWL_ERROR("BUG_ON scd_flow is bigger than number of queues");
4306 return; 4367 return;
4307 } 4368 }
4308 4369
4309 txq = &priv->txq[ba_resp_scd_flow]; 4370 txq = &priv->txq[scd_flow];
4310 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg; 4371 agg = &priv->stations[ba_resp->sta_id].tid[ba_resp->tid].agg;
4311 4372
4312 /* Find index just before block-ack window */ 4373 /* Find index just before block-ack window */
4313 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); 4374 index = iwl4965_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
4314 4375
4315 /* TODO: Need to get this copy more safely - now good for debug */ 4376 /* TODO: Need to get this copy more safely - now good for debug */
4316/* 4377
4317 {
4318 DECLARE_MAC_BUF(mac);
4319 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, " 4378 IWL_DEBUG_TX_REPLY("REPLY_COMPRESSED_BA [%d]Received from %s, "
4320 "sta_id = %d\n", 4379 "sta_id = %d\n",
4321 agg->wait_for_ba, 4380 agg->wait_for_ba,
4322 print_mac(mac, (u8*) &ba_resp->sta_addr_lo32), 4381 print_mac(mac, (u8*) &ba_resp->sta_addr_lo32),
4323 ba_resp->sta_id); 4382 ba_resp->sta_id);
4324 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%X%X, scd_flow = " 4383 IWL_DEBUG_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
4325 "%d, scd_ssn = %d\n", 4384 "%d, scd_ssn = %d\n",
4326 ba_resp->tid, 4385 ba_resp->tid,
4327 ba_resp->ba_seq_ctl, 4386 ba_resp->seq_ctl,
4328 ba_resp->ba_bitmap1, 4387 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
4329 ba_resp->ba_bitmap0,
4330 ba_resp->scd_flow, 4388 ba_resp->scd_flow,
4331 ba_resp->scd_ssn); 4389 ba_resp->scd_ssn);
4332 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%X%X \n", 4390 IWL_DEBUG_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx \n",
4333 agg->start_idx, 4391 agg->start_idx,
4334 agg->bitmap1, 4392 (unsigned long long)agg->bitmap);
4335 agg->bitmap0);
4336 }
4337*/
4338 4393
4339 /* Update driver's record of ACK vs. not for each frame in window */ 4394 /* Update driver's record of ACK vs. not for each frame in window */
4340 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp); 4395 iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
@@ -4342,29 +4397,23 @@ static void iwl4965_rx_reply_compressed_ba(struct iwl4965_priv *priv,
4342 /* Release all TFDs before the SSN, i.e. all TFDs in front of 4397 /* Release all TFDs before the SSN, i.e. all TFDs in front of
4343 * block-ack window (we assume that they've been successfully 4398 * block-ack window (we assume that they've been successfully
4344 * transmitted ... if not, it's too late anyway). */ 4399 * transmitted ... if not, it's too late anyway). */
4345 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) 4400 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
4346 iwl4965_tx_queue_reclaim(priv, ba_resp_scd_flow, index); 4401 int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
4347 4402 priv->stations[ba_resp->sta_id].
4348} 4403 tid[ba_resp->tid].tfds_in_queue -= freed;
4349 4404 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
4350 4405 priv->mac80211_registered &&
4351/** 4406 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
4352 * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration 4407 ieee80211_wake_queue(priv->hw, scd_flow);
4353 */ 4408 iwl4965_check_empty_hw_queue(priv, ba_resp->sta_id,
4354static void iwl4965_tx_queue_stop_scheduler(struct iwl4965_priv *priv, u16 txq_id) 4409 ba_resp->tid, scd_flow);
4355{ 4410 }
4356 /* Simply stop the queue, but don't change any configuration;
4357 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
4358 iwl4965_write_prph(priv,
4359 KDR_SCD_QUEUE_STATUS_BITS(txq_id),
4360 (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)|
4361 (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
4362} 4411}
4363 4412
4364/** 4413/**
4365 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue 4414 * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
4366 */ 4415 */
4367static int iwl4965_tx_queue_set_q2ratid(struct iwl4965_priv *priv, u16 ra_tid, 4416static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
4368 u16 txq_id) 4417 u16 txq_id)
4369{ 4418{
4370 u32 tbl_dw_addr; 4419 u32 tbl_dw_addr;
@@ -4376,25 +4425,26 @@ static int iwl4965_tx_queue_set_q2ratid(struct iwl4965_priv *priv, u16 ra_tid,
4376 tbl_dw_addr = priv->scd_base_addr + 4425 tbl_dw_addr = priv->scd_base_addr +
4377 SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id); 4426 SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
4378 4427
4379 tbl_dw = iwl4965_read_targ_mem(priv, tbl_dw_addr); 4428 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
4380 4429
4381 if (txq_id & 0x1) 4430 if (txq_id & 0x1)
4382 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 4431 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
4383 else 4432 else
4384 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 4433 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
4385 4434
4386 iwl4965_write_targ_mem(priv, tbl_dw_addr, tbl_dw); 4435 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
4387 4436
4388 return 0; 4437 return 0;
4389} 4438}
4390 4439
4440
4391/** 4441/**
4392 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue 4442 * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
4393 * 4443 *
4394 * NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID, 4444 * NOTE: txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID,
4395 * i.e. it must be one of the higher queues used for aggregation 4445 * i.e. it must be one of the higher queues used for aggregation
4396 */ 4446 */
4397static int iwl4965_tx_queue_agg_enable(struct iwl4965_priv *priv, int txq_id, 4447static int iwl4965_tx_queue_agg_enable(struct iwl_priv *priv, int txq_id,
4398 int tx_fifo, int sta_id, int tid, 4448 int tx_fifo, int sta_id, int tid,
4399 u16 ssn_idx) 4449 u16 ssn_idx)
4400{ 4450{
@@ -4412,7 +4462,7 @@ static int iwl4965_tx_queue_agg_enable(struct iwl4965_priv *priv, int txq_id,
4412 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid); 4462 iwl4965_sta_modify_enable_tid_tx(priv, sta_id, tid);
4413 4463
4414 spin_lock_irqsave(&priv->lock, flags); 4464 spin_lock_irqsave(&priv->lock, flags);
4415 rc = iwl4965_grab_nic_access(priv); 4465 rc = iwl_grab_nic_access(priv);
4416 if (rc) { 4466 if (rc) {
4417 spin_unlock_irqrestore(&priv->lock, flags); 4467 spin_unlock_irqrestore(&priv->lock, flags);
4418 return rc; 4468 return rc;
@@ -4425,7 +4475,7 @@ static int iwl4965_tx_queue_agg_enable(struct iwl4965_priv *priv, int txq_id,
4425 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id); 4475 iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
4426 4476
4427 /* Set this queue as a chain-building queue */ 4477 /* Set this queue as a chain-building queue */
4428 iwl4965_set_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1 << txq_id)); 4478 iwl_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
4429 4479
4430 /* Place first TFD at index corresponding to start sequence number. 4480 /* Place first TFD at index corresponding to start sequence number.
4431 * Assumes that ssn_idx is valid (!= 0xFFF) */ 4481 * Assumes that ssn_idx is valid (!= 0xFFF) */
@@ -4434,69 +4484,27 @@ static int iwl4965_tx_queue_agg_enable(struct iwl4965_priv *priv, int txq_id,
4434 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx); 4484 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4435 4485
4436 /* Set up Tx window size and frame limit for this queue */ 4486 /* Set up Tx window size and frame limit for this queue */
4437 iwl4965_write_targ_mem(priv, 4487 iwl_write_targ_mem(priv,
4438 priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id), 4488 priv->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id),
4439 (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) & 4489 (SCD_WIN_SIZE << SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
4440 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK); 4490 SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
4441 4491
4442 iwl4965_write_targ_mem(priv, priv->scd_base_addr + 4492 iwl_write_targ_mem(priv, priv->scd_base_addr +
4443 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 4493 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
4444 (SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) 4494 (SCD_FRAME_LIMIT << SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
4445 & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK); 4495 & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
4446 4496
4447 iwl4965_set_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id)); 4497 iwl_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
4448 4498
4449 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ 4499 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
4450 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); 4500 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
4451 4501
4452 iwl4965_release_nic_access(priv); 4502 iwl_release_nic_access(priv);
4453 spin_unlock_irqrestore(&priv->lock, flags);
4454
4455 return 0;
4456}
4457
4458/**
4459 * txq_id must be greater than IWL_BACK_QUEUE_FIRST_ID
4460 */
4461static int iwl4965_tx_queue_agg_disable(struct iwl4965_priv *priv, u16 txq_id,
4462 u16 ssn_idx, u8 tx_fifo)
4463{
4464 unsigned long flags;
4465 int rc;
4466
4467 if (IWL_BACK_QUEUE_FIRST_ID > txq_id) {
4468 IWL_WARNING("queue number too small: %d, must be > %d\n",
4469 txq_id, IWL_BACK_QUEUE_FIRST_ID);
4470 return -EINVAL;
4471 }
4472
4473 spin_lock_irqsave(&priv->lock, flags);
4474 rc = iwl4965_grab_nic_access(priv);
4475 if (rc) {
4476 spin_unlock_irqrestore(&priv->lock, flags);
4477 return rc;
4478 }
4479
4480 iwl4965_tx_queue_stop_scheduler(priv, txq_id);
4481
4482 iwl4965_clear_bits_prph(priv, KDR_SCD_QUEUECHAIN_SEL, (1 << txq_id));
4483
4484 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
4485 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
4486 /* supposes that ssn_idx is valid (!= 0xFFF) */
4487 iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
4488
4489 iwl4965_clear_bits_prph(priv, KDR_SCD_INTERRUPT_MASK, (1 << txq_id));
4490 iwl4965_txq_ctx_deactivate(priv, txq_id);
4491 iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
4492
4493 iwl4965_release_nic_access(priv);
4494 spin_unlock_irqrestore(&priv->lock, flags); 4503 spin_unlock_irqrestore(&priv->lock, flags);
4495 4504
4496 return 0; 4505 return 0;
4497} 4506}
4498 4507
4499#endif/* CONFIG_IWL4965_HT_AGG */
4500#endif /* CONFIG_IWL4965_HT */ 4508#endif /* CONFIG_IWL4965_HT */
4501 4509
4502/** 4510/**
@@ -4513,10 +4521,10 @@ static int iwl4965_tx_queue_agg_disable(struct iwl4965_priv *priv, u16 txq_id,
4513 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD, 4521 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
4514 * which requires station table entry to exist). 4522 * which requires station table entry to exist).
4515 */ 4523 */
4516void iwl4965_add_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap) 4524void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
4517{ 4525{
4518 int i, r; 4526 int i, r;
4519 struct iwl4965_link_quality_cmd link_cmd = { 4527 struct iwl_link_quality_cmd link_cmd = {
4520 .reserved1 = 0, 4528 .reserved1 = 0,
4521 }; 4529 };
4522 u16 rate_flags; 4530 u16 rate_flags;
@@ -4525,7 +4533,7 @@ void iwl4965_add_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap)
4525 * all the way down to 1M in IEEE order, and then spin on 1M */ 4533 * all the way down to 1M in IEEE order, and then spin on 1M */
4526 if (is_ap) 4534 if (is_ap)
4527 r = IWL_RATE_54M_INDEX; 4535 r = IWL_RATE_54M_INDEX;
4528 else if (priv->phymode == MODE_IEEE80211A) 4536 else if (priv->band == IEEE80211_BAND_5GHZ)
4529 r = IWL_RATE_6M_INDEX; 4537 r = IWL_RATE_6M_INDEX;
4530 else 4538 else
4531 r = IWL_RATE_1M_INDEX; 4539 r = IWL_RATE_1M_INDEX;
@@ -4550,24 +4558,25 @@ void iwl4965_add_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap)
4550 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000); 4558 link_cmd.agg_params.agg_time_limit = cpu_to_le16(4000);
4551 4559
4552 /* Update the rate scaling for control frame Tx to AP */ 4560 /* Update the rate scaling for control frame Tx to AP */
4553 link_cmd.sta_id = is_ap ? IWL_AP_ID : IWL4965_BROADCAST_ID; 4561 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
4554 4562
4555 iwl4965_send_cmd_pdu(priv, REPLY_TX_LINK_QUALITY_CMD, sizeof(link_cmd), 4563 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
4556 &link_cmd); 4564 sizeof(link_cmd), &link_cmd, NULL);
4557} 4565}
4558 4566
4559#ifdef CONFIG_IWL4965_HT 4567#ifdef CONFIG_IWL4965_HT
4560 4568
4561static u8 iwl4965_is_channel_extension(struct iwl4965_priv *priv, int phymode, 4569static u8 iwl4965_is_channel_extension(struct iwl_priv *priv,
4562 u16 channel, u8 extension_chan_offset) 4570 enum ieee80211_band band,
4571 u16 channel, u8 extension_chan_offset)
4563{ 4572{
4564 const struct iwl4965_channel_info *ch_info; 4573 const struct iwl_channel_info *ch_info;
4565 4574
4566 ch_info = iwl4965_get_channel_info(priv, phymode, channel); 4575 ch_info = iwl_get_channel_info(priv, band, channel);
4567 if (!is_channel_valid(ch_info)) 4576 if (!is_channel_valid(ch_info))
4568 return 0; 4577 return 0;
4569 4578
4570 if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_AUTO) 4579 if (extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE)
4571 return 0; 4580 return 0;
4572 4581
4573 if ((ch_info->fat_extension_channel == extension_chan_offset) || 4582 if ((ch_info->fat_extension_channel == extension_chan_offset) ||
@@ -4577,14 +4586,14 @@ static u8 iwl4965_is_channel_extension(struct iwl4965_priv *priv, int phymode,
4577 return 0; 4586 return 0;
4578} 4587}
4579 4588
4580static u8 iwl4965_is_fat_tx_allowed(struct iwl4965_priv *priv, 4589static u8 iwl4965_is_fat_tx_allowed(struct iwl_priv *priv,
4581 struct ieee80211_ht_info *sta_ht_inf) 4590 struct ieee80211_ht_info *sta_ht_inf)
4582{ 4591{
4583 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config; 4592 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config;
4584 4593
4585 if ((!iwl_ht_conf->is_ht) || 4594 if ((!iwl_ht_conf->is_ht) ||
4586 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) || 4595 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ) ||
4587 (iwl_ht_conf->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_AUTO)) 4596 (iwl_ht_conf->extension_chan_offset == IWL_EXT_CHANNEL_OFFSET_NONE))
4588 return 0; 4597 return 0;
4589 4598
4590 if (sta_ht_inf) { 4599 if (sta_ht_inf) {
@@ -4593,12 +4602,12 @@ static u8 iwl4965_is_fat_tx_allowed(struct iwl4965_priv *priv,
4593 return 0; 4602 return 0;
4594 } 4603 }
4595 4604
4596 return (iwl4965_is_channel_extension(priv, priv->phymode, 4605 return (iwl4965_is_channel_extension(priv, priv->band,
4597 iwl_ht_conf->control_channel, 4606 iwl_ht_conf->control_channel,
4598 iwl_ht_conf->extension_chan_offset)); 4607 iwl_ht_conf->extension_chan_offset));
4599} 4608}
4600 4609
4601void iwl4965_set_rxon_ht(struct iwl4965_priv *priv, struct iwl_ht_info *ht_info) 4610void iwl4965_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
4602{ 4611{
4603 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon; 4612 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
4604 u32 val; 4613 u32 val;
@@ -4629,9 +4638,7 @@ void iwl4965_set_rxon_ht(struct iwl4965_priv *priv, struct iwl_ht_info *ht_info)
4629 case IWL_EXT_CHANNEL_OFFSET_BELOW: 4638 case IWL_EXT_CHANNEL_OFFSET_BELOW:
4630 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 4639 rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
4631 break; 4640 break;
4632 case IWL_EXT_CHANNEL_OFFSET_AUTO: 4641 case IWL_EXT_CHANNEL_OFFSET_NONE:
4633 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4634 break;
4635 default: 4642 default:
4636 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK; 4643 rxon->flags &= ~RXON_FLG_CHANNEL_MODE_MIXED_MSK;
4637 break; 4644 break;
@@ -4654,7 +4661,7 @@ void iwl4965_set_rxon_ht(struct iwl4965_priv *priv, struct iwl_ht_info *ht_info)
4654 return; 4661 return;
4655} 4662}
4656 4663
4657void iwl4965_set_ht_add_station(struct iwl4965_priv *priv, u8 index, 4664void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index,
4658 struct ieee80211_ht_info *sta_ht_inf) 4665 struct ieee80211_ht_info *sta_ht_inf)
4659{ 4666{
4660 __le32 sta_flags; 4667 __le32 sta_flags;
@@ -4699,7 +4706,7 @@ void iwl4965_set_ht_add_station(struct iwl4965_priv *priv, u8 index,
4699 return; 4706 return;
4700} 4707}
4701 4708
4702static void iwl4965_sta_modify_add_ba_tid(struct iwl4965_priv *priv, 4709static void iwl4965_sta_modify_add_ba_tid(struct iwl_priv *priv,
4703 int sta_id, int tid, u16 ssn) 4710 int sta_id, int tid, u16 ssn)
4704{ 4711{
4705 unsigned long flags; 4712 unsigned long flags;
@@ -4715,7 +4722,7 @@ static void iwl4965_sta_modify_add_ba_tid(struct iwl4965_priv *priv,
4715 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 4722 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4716} 4723}
4717 4724
4718static void iwl4965_sta_modify_del_ba_tid(struct iwl4965_priv *priv, 4725static void iwl4965_sta_modify_del_ba_tid(struct iwl_priv *priv,
4719 int sta_id, int tid) 4726 int sta_id, int tid)
4720{ 4727{
4721 unsigned long flags; 4728 unsigned long flags;
@@ -4730,136 +4737,94 @@ static void iwl4965_sta_modify_del_ba_tid(struct iwl4965_priv *priv,
4730 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 4737 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
4731} 4738}
4732 4739
4733int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
4734 enum ieee80211_ampdu_mlme_action action,
4735 const u8 *addr, u16 tid, u16 ssn)
4736{
4737 struct iwl4965_priv *priv = hw->priv;
4738 int sta_id;
4739 DECLARE_MAC_BUF(mac);
4740
4741 IWL_DEBUG_HT("A-MPDU action on da=%s tid=%d ",
4742 print_mac(mac, addr), tid);
4743 sta_id = iwl4965_hw_find_station(priv, addr);
4744 switch (action) {
4745 case IEEE80211_AMPDU_RX_START:
4746 IWL_DEBUG_HT("start Rx\n");
4747 iwl4965_sta_modify_add_ba_tid(priv, sta_id, tid, ssn);
4748 break;
4749 case IEEE80211_AMPDU_RX_STOP:
4750 IWL_DEBUG_HT("stop Rx\n");
4751 iwl4965_sta_modify_del_ba_tid(priv, sta_id, tid);
4752 break;
4753 default:
4754 IWL_DEBUG_HT("unknown\n");
4755 return -EINVAL;
4756 break;
4757 }
4758 return 0;
4759}
4760
4761#ifdef CONFIG_IWL4965_HT_AGG
4762
4763static const u16 default_tid_to_tx_fifo[] = {
4764 IWL_TX_FIFO_AC1,
4765 IWL_TX_FIFO_AC0,
4766 IWL_TX_FIFO_AC0,
4767 IWL_TX_FIFO_AC1,
4768 IWL_TX_FIFO_AC2,
4769 IWL_TX_FIFO_AC2,
4770 IWL_TX_FIFO_AC3,
4771 IWL_TX_FIFO_AC3,
4772 IWL_TX_FIFO_NONE,
4773 IWL_TX_FIFO_NONE,
4774 IWL_TX_FIFO_NONE,
4775 IWL_TX_FIFO_NONE,
4776 IWL_TX_FIFO_NONE,
4777 IWL_TX_FIFO_NONE,
4778 IWL_TX_FIFO_NONE,
4779 IWL_TX_FIFO_NONE,
4780 IWL_TX_FIFO_AC3
4781};
4782
4783/* 4740/*
4784 * Find first available (lowest unused) Tx Queue, mark it "active". 4741 * Find first available (lowest unused) Tx Queue, mark it "active".
4785 * Called only when finding queue for aggregation. 4742 * Called only when finding queue for aggregation.
4786 * Should never return anything < 7, because they should already 4743 * Should never return anything < 7, because they should already
4787 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6). 4744 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
4788 */ 4745 */
4789static int iwl4965_txq_ctx_activate_free(struct iwl4965_priv *priv) 4746static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
4790{ 4747{
4791 int txq_id; 4748 int txq_id;
4792 4749
4793 for (txq_id = 0; txq_id < priv->hw_setting.max_txq_num; txq_id++) 4750 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
4794 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk)) 4751 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
4795 return txq_id; 4752 return txq_id;
4796 return -1; 4753 return -1;
4797} 4754}
4798 4755
4799int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, u8 *da, u16 tid, 4756static int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, const u8 *da,
4800 u16 *start_seq_num) 4757 u16 tid, u16 *start_seq_num)
4801{ 4758{
4802 4759 struct iwl_priv *priv = hw->priv;
4803 struct iwl4965_priv *priv = hw->priv;
4804 int sta_id; 4760 int sta_id;
4805 int tx_fifo; 4761 int tx_fifo;
4806 int txq_id; 4762 int txq_id;
4807 int ssn = -1; 4763 int ssn = -1;
4764 int ret = 0;
4808 unsigned long flags; 4765 unsigned long flags;
4809 struct iwl4965_tid_data *tid_data; 4766 struct iwl4965_tid_data *tid_data;
4810 DECLARE_MAC_BUF(mac); 4767 DECLARE_MAC_BUF(mac);
4811 4768
4812 /* Determine Tx DMA/FIFO channel for this Traffic ID */
4813 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo))) 4769 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
4814 tx_fifo = default_tid_to_tx_fifo[tid]; 4770 tx_fifo = default_tid_to_tx_fifo[tid];
4815 else 4771 else
4816 return -EINVAL; 4772 return -EINVAL;
4817 4773
4818 IWL_WARNING("iwl-AGG iwl4965_mac_ht_tx_agg_start on da=%s" 4774 IWL_WARNING("%s on da = %s tid = %d\n",
4819 " tid=%d\n", print_mac(mac, da), tid); 4775 __func__, print_mac(mac, da), tid);
4820 4776
4821 /* Get index into station table */
4822 sta_id = iwl4965_hw_find_station(priv, da); 4777 sta_id = iwl4965_hw_find_station(priv, da);
4823 if (sta_id == IWL_INVALID_STATION) 4778 if (sta_id == IWL_INVALID_STATION)
4824 return -ENXIO; 4779 return -ENXIO;
4825 4780
4826 /* Find available Tx queue for aggregation */ 4781 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
4782 IWL_ERROR("Start AGG when state is not IWL_AGG_OFF !\n");
4783 return -ENXIO;
4784 }
4785
4827 txq_id = iwl4965_txq_ctx_activate_free(priv); 4786 txq_id = iwl4965_txq_ctx_activate_free(priv);
4828 if (txq_id == -1) 4787 if (txq_id == -1)
4829 return -ENXIO; 4788 return -ENXIO;
4830 4789
4831 spin_lock_irqsave(&priv->sta_lock, flags); 4790 spin_lock_irqsave(&priv->sta_lock, flags);
4832 tid_data = &priv->stations[sta_id].tid[tid]; 4791 tid_data = &priv->stations[sta_id].tid[tid];
4833
4834 /* Get starting sequence number for 1st frame in block ack window.
4835 * We'll use least signif byte as 1st frame's index into Tx queue. */
4836 ssn = SEQ_TO_SN(tid_data->seq_number); 4792 ssn = SEQ_TO_SN(tid_data->seq_number);
4837 tid_data->agg.txq_id = txq_id; 4793 tid_data->agg.txq_id = txq_id;
4838 spin_unlock_irqrestore(&priv->sta_lock, flags); 4794 spin_unlock_irqrestore(&priv->sta_lock, flags);
4839 4795
4840 *start_seq_num = ssn; 4796 *start_seq_num = ssn;
4797 ret = iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo,
4798 sta_id, tid, ssn);
4799 if (ret)
4800 return ret;
4841 4801
4842 /* Update driver's link quality manager */ 4802 ret = 0;
4843 iwl4965_ba_status(priv, tid, BA_STATUS_ACTIVE); 4803 if (tid_data->tfds_in_queue == 0) {
4844 4804 printk(KERN_ERR "HW queue is empty\n");
4845 /* Set up and enable aggregation for selected Tx queue and FIFO */ 4805 tid_data->agg.state = IWL_AGG_ON;
4846 return iwl4965_tx_queue_agg_enable(priv, txq_id, tx_fifo, 4806 ieee80211_start_tx_ba_cb_irqsafe(hw, da, tid);
4847 sta_id, tid, ssn); 4807 } else {
4808 IWL_DEBUG_HT("HW queue is NOT empty: %d packets in HW queue\n",
4809 tid_data->tfds_in_queue);
4810 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
4811 }
4812 return ret;
4848} 4813}
4849 4814
4850 4815static int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, const u8 *da,
4851int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, u8 *da, u16 tid, 4816 u16 tid)
4852 int generator)
4853{ 4817{
4854 4818
4855 struct iwl4965_priv *priv = hw->priv; 4819 struct iwl_priv *priv = hw->priv;
4856 int tx_fifo_id, txq_id, sta_id, ssn = -1; 4820 int tx_fifo_id, txq_id, sta_id, ssn = -1;
4857 struct iwl4965_tid_data *tid_data; 4821 struct iwl4965_tid_data *tid_data;
4858 int rc; 4822 int ret, write_ptr, read_ptr;
4823 unsigned long flags;
4859 DECLARE_MAC_BUF(mac); 4824 DECLARE_MAC_BUF(mac);
4860 4825
4861 if (!da) { 4826 if (!da) {
4862 IWL_ERROR("%s: da = NULL\n", __func__); 4827 IWL_ERROR("da = NULL\n");
4863 return -EINVAL; 4828 return -EINVAL;
4864 } 4829 }
4865 4830
@@ -4873,31 +4838,82 @@ int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, u8 *da, u16 tid,
4873 if (sta_id == IWL_INVALID_STATION) 4838 if (sta_id == IWL_INVALID_STATION)
4874 return -ENXIO; 4839 return -ENXIO;
4875 4840
4841 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
4842 IWL_WARNING("Stopping AGG while state not IWL_AGG_ON\n");
4843
4876 tid_data = &priv->stations[sta_id].tid[tid]; 4844 tid_data = &priv->stations[sta_id].tid[tid];
4877 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4; 4845 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
4878 txq_id = tid_data->agg.txq_id; 4846 txq_id = tid_data->agg.txq_id;
4847 write_ptr = priv->txq[txq_id].q.write_ptr;
4848 read_ptr = priv->txq[txq_id].q.read_ptr;
4849
4850 /* The queue is not empty */
4851 if (write_ptr != read_ptr) {
4852 IWL_DEBUG_HT("Stopping a non empty AGG HW QUEUE\n");
4853 priv->stations[sta_id].tid[tid].agg.state =
4854 IWL_EMPTYING_HW_QUEUE_DELBA;
4855 return 0;
4856 }
4879 4857
4880 rc = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id); 4858 IWL_DEBUG_HT("HW queue empty\n");;
4881 /* FIXME: need more safe way to handle error condition */ 4859 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
4882 if (rc) 4860
4883 return rc; 4861 spin_lock_irqsave(&priv->lock, flags);
4862 ret = iwl4965_tx_queue_agg_disable(priv, txq_id, ssn, tx_fifo_id);
4863 spin_unlock_irqrestore(&priv->lock, flags);
4864
4865 if (ret)
4866 return ret;
4867
4868 ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, da, tid);
4884 4869
4885 iwl4965_ba_status(priv, tid, BA_STATUS_INITIATOR_DELBA);
4886 IWL_DEBUG_INFO("iwl4965_mac_ht_tx_agg_stop on da=%s tid=%d\n", 4870 IWL_DEBUG_INFO("iwl4965_mac_ht_tx_agg_stop on da=%s tid=%d\n",
4887 print_mac(mac, da), tid); 4871 print_mac(mac, da), tid);
4888 4872
4889 return 0; 4873 return 0;
4890} 4874}
4891 4875
4876int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
4877 enum ieee80211_ampdu_mlme_action action,
4878 const u8 *addr, u16 tid, u16 *ssn)
4879{
4880 struct iwl_priv *priv = hw->priv;
4881 int sta_id;
4882 DECLARE_MAC_BUF(mac);
4883
4884 IWL_DEBUG_HT("A-MPDU action on da=%s tid=%d ",
4885 print_mac(mac, addr), tid);
4886 sta_id = iwl4965_hw_find_station(priv, addr);
4887 switch (action) {
4888 case IEEE80211_AMPDU_RX_START:
4889 IWL_DEBUG_HT("start Rx\n");
4890 iwl4965_sta_modify_add_ba_tid(priv, sta_id, tid, *ssn);
4891 break;
4892 case IEEE80211_AMPDU_RX_STOP:
4893 IWL_DEBUG_HT("stop Rx\n");
4894 iwl4965_sta_modify_del_ba_tid(priv, sta_id, tid);
4895 break;
4896 case IEEE80211_AMPDU_TX_START:
4897 IWL_DEBUG_HT("start Tx\n");
4898 return iwl4965_mac_ht_tx_agg_start(hw, addr, tid, ssn);
4899 case IEEE80211_AMPDU_TX_STOP:
4900 IWL_DEBUG_HT("stop Tx\n");
4901 return iwl4965_mac_ht_tx_agg_stop(hw, addr, tid);
4902 default:
4903 IWL_DEBUG_HT("unknown\n");
4904 return -EINVAL;
4905 break;
4906 }
4907 return 0;
4908}
4892 4909
4893#endif /* CONFIG_IWL4965_HT_AGG */
4894#endif /* CONFIG_IWL4965_HT */ 4910#endif /* CONFIG_IWL4965_HT */
4895 4911
4896/* Set up 4965-specific Rx frame reply handlers */ 4912/* Set up 4965-specific Rx frame reply handlers */
4897void iwl4965_hw_rx_handler_setup(struct iwl4965_priv *priv) 4913void iwl4965_hw_rx_handler_setup(struct iwl_priv *priv)
4898{ 4914{
4899 /* Legacy Rx frames */ 4915 /* Legacy Rx frames */
4900 priv->rx_handlers[REPLY_4965_RX] = iwl4965_rx_reply_rx; 4916 priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
4901 4917
4902 /* High-throughput (HT) Rx frames */ 4918 /* High-throughput (HT) Rx frames */
4903 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy; 4919 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
@@ -4907,71 +4923,85 @@ void iwl4965_hw_rx_handler_setup(struct iwl4965_priv *priv)
4907 iwl4965_rx_missed_beacon_notif; 4923 iwl4965_rx_missed_beacon_notif;
4908 4924
4909#ifdef CONFIG_IWL4965_HT 4925#ifdef CONFIG_IWL4965_HT
4910#ifdef CONFIG_IWL4965_HT_AGG
4911 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba; 4926 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
4912#endif /* CONFIG_IWL4965_HT_AGG */
4913#endif /* CONFIG_IWL4965_HT */ 4927#endif /* CONFIG_IWL4965_HT */
4914} 4928}
4915 4929
4916void iwl4965_hw_setup_deferred_work(struct iwl4965_priv *priv) 4930void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv)
4917{ 4931{
4918 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work); 4932 INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
4919 INIT_WORK(&priv->statistics_work, iwl4965_bg_statistics_work);
4920#ifdef CONFIG_IWL4965_SENSITIVITY 4933#ifdef CONFIG_IWL4965_SENSITIVITY
4921 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work); 4934 INIT_WORK(&priv->sensitivity_work, iwl4965_bg_sensitivity_work);
4922#endif 4935#endif
4923#ifdef CONFIG_IWL4965_HT
4924#ifdef CONFIG_IWL4965_HT_AGG
4925 INIT_WORK(&priv->agg_work, iwl4965_bg_agg_work);
4926#endif /* CONFIG_IWL4965_HT_AGG */
4927#endif /* CONFIG_IWL4965_HT */
4928 init_timer(&priv->statistics_periodic); 4936 init_timer(&priv->statistics_periodic);
4929 priv->statistics_periodic.data = (unsigned long)priv; 4937 priv->statistics_periodic.data = (unsigned long)priv;
4930 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic; 4938 priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
4931} 4939}
4932 4940
4933void iwl4965_hw_cancel_deferred_work(struct iwl4965_priv *priv) 4941void iwl4965_hw_cancel_deferred_work(struct iwl_priv *priv)
4934{ 4942{
4935 del_timer_sync(&priv->statistics_periodic); 4943 del_timer_sync(&priv->statistics_periodic);
4936 4944
4937 cancel_delayed_work(&priv->init_alive_start); 4945 cancel_delayed_work(&priv->init_alive_start);
4938} 4946}
4939 4947
4940struct pci_device_id iwl4965_hw_card_ids[] = { 4948
4941 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4229)}, 4949static struct iwl_hcmd_ops iwl4965_hcmd = {
4942 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4230)}, 4950 .rxon_assoc = iwl4965_send_rxon_assoc,
4943 {0}
4944}; 4951};
4945 4952
4946/* 4953static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
4947 * The device's EEPROM semaphore prevents conflicts between driver and uCode 4954 .enqueue_hcmd = iwl4965_enqueue_hcmd,
4948 * when accessing the EEPROM; each access is a series of pulses to/from the 4955};
4949 * EEPROM chip, not a single event, so even reads could conflict if they
4950 * weren't arbitrated by the semaphore.
4951 */
4952int iwl4965_eeprom_acquire_semaphore(struct iwl4965_priv *priv)
4953{
4954 u16 count;
4955 int rc;
4956 4956
4957 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) { 4957static struct iwl_lib_ops iwl4965_lib = {
4958 /* Request semaphore */ 4958 .init_drv = iwl4965_init_drv,
4959 iwl4965_set_bit(priv, CSR_HW_IF_CONFIG_REG, 4959 .set_hw_params = iwl4965_hw_set_hw_params,
4960 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); 4960 .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
4961 4961 .hw_nic_init = iwl4965_hw_nic_init,
4962 /* See if we got it */ 4962 .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
4963 rc = iwl4965_poll_bit(priv, CSR_HW_IF_CONFIG_REG, 4963 .alive_notify = iwl4965_alive_notify,
4964 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, 4964 .load_ucode = iwl4965_load_bsm,
4965 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, 4965 .eeprom_ops = {
4966 EEPROM_SEM_TIMEOUT); 4966 .verify_signature = iwlcore_eeprom_verify_signature,
4967 if (rc >= 0) { 4967 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
4968 IWL_DEBUG_IO("Acquired semaphore after %d tries.\n", 4968 .release_semaphore = iwlcore_eeprom_release_semaphore,
4969 count+1); 4969 },
4970 return rc; 4970 .radio_kill_sw = iwl4965_radio_kill_sw,
4971 } 4971};
4972 }
4973 4972
4974 return rc; 4973static struct iwl_ops iwl4965_ops = {
4975} 4974 .lib = &iwl4965_lib,
4975 .hcmd = &iwl4965_hcmd,
4976 .utils = &iwl4965_hcmd_utils,
4977};
4978
4979struct iwl_cfg iwl4965_agn_cfg = {
4980 .name = "4965AGN",
4981 .fw_name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode",
4982 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
4983 .ops = &iwl4965_ops,
4984 .mod_params = &iwl4965_mod_params,
4985};
4986
4987module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444);
4988MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4989module_param_named(disable, iwl4965_mod_params.disable, int, 0444);
4990MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
4991module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444);
4992MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])\n");
4993module_param_named(debug, iwl4965_mod_params.debug, int, 0444);
4994MODULE_PARM_DESC(debug, "debug output mask");
4995module_param_named(
4996 disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, 0444);
4997MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
4998
4999module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444);
5000MODULE_PARM_DESC(queues_num, "number of hw queues.");
5001
5002/* QoS */
5003module_param_named(qos_enable, iwl4965_mod_params.enable_qos, int, 0444);
5004MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
5005module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444);
5006MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
4976 5007
4977MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.h b/drivers/net/wireless/iwlwifi/iwl-4965.h
index 9cb82be0ff80..9ed13cb0a2a9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify it 5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as 6 * under the terms of version 2 of the GNU General Public License as
@@ -36,13 +36,24 @@
36#include <linux/kernel.h> 36#include <linux/kernel.h>
37#include <net/ieee80211_radiotap.h> 37#include <net/ieee80211_radiotap.h>
38 38
39/* Hardware specific file defines the PCI IDs table for that hardware module */
40extern struct pci_device_id iwl4965_hw_card_ids[];
41
42#define DRV_NAME "iwl4965" 39#define DRV_NAME "iwl4965"
40#include "iwl-rfkill.h"
41#include "iwl-eeprom.h"
43#include "iwl-4965-hw.h" 42#include "iwl-4965-hw.h"
43#include "iwl-csr.h"
44#include "iwl-prph.h" 44#include "iwl-prph.h"
45#include "iwl-4965-debug.h" 45#include "iwl-debug.h"
46#include "iwl-led.h"
47
48/* configuration for the iwl4965 */
49extern struct iwl_cfg iwl4965_agn_cfg;
50
51/* Change firmware file name, using "-" and incrementing number,
52 * *only* when uCode interface or architecture changes so that it
53 * is not compatible with earlier drivers.
54 * This number will also appear in << 8 position of 1st dword of uCode file */
55#define IWL4965_UCODE_API "-1"
56
46 57
47/* Default noise level to report when noise measurement is not available. 58/* Default noise level to report when noise measurement is not available.
48 * This may be because we're: 59 * This may be because we're:
@@ -57,11 +68,6 @@ extern struct pci_device_id iwl4965_hw_card_ids[];
57 * averages within an s8's (used in some apps) range of negative values. */ 68 * averages within an s8's (used in some apps) range of negative values. */
58#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127) 69#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
59 70
60/* Module parameters accessible from iwl-*.c */
61extern int iwl4965_param_hwcrypto;
62extern int iwl4965_param_queues_num;
63extern int iwl4965_param_amsdu_size_8K;
64
65enum iwl4965_antenna { 71enum iwl4965_antenna {
66 IWL_ANTENNA_DIVERSITY, 72 IWL_ANTENNA_DIVERSITY,
67 IWL_ANTENNA_MAIN, 73 IWL_ANTENNA_MAIN,
@@ -133,7 +139,7 @@ struct iwl4965_tx_info {
133struct iwl4965_tx_queue { 139struct iwl4965_tx_queue {
134 struct iwl4965_queue q; 140 struct iwl4965_queue q;
135 struct iwl4965_tfd_frame *bd; 141 struct iwl4965_tfd_frame *bd;
136 struct iwl4965_cmd *cmd; 142 struct iwl_cmd *cmd;
137 dma_addr_t dma_addr_cmd; 143 dma_addr_t dma_addr_cmd;
138 struct iwl4965_tx_info *txb; 144 struct iwl4965_tx_info *txb;
139 int need_update; 145 int need_update;
@@ -190,7 +196,7 @@ enum {
190 */ 196 */
191#define IWL4965_MAX_RATE (33) 197#define IWL4965_MAX_RATE (33)
192 198
193struct iwl4965_channel_info { 199struct iwl_channel_info {
194 struct iwl4965_channel_tgd_info tgd; 200 struct iwl4965_channel_tgd_info tgd;
195 struct iwl4965_channel_tgh_info tgh; 201 struct iwl4965_channel_tgh_info tgh;
196 struct iwl4965_eeprom_channel eeprom; /* EEPROM regulatory limit */ 202 struct iwl4965_eeprom_channel eeprom; /* EEPROM regulatory limit */
@@ -206,7 +212,7 @@ struct iwl4965_channel_info {
206 212
207 u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */ 213 u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
208 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */ 214 u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
209 u8 phymode; /* MODE_IEEE80211{A,B,G} */ 215 enum ieee80211_band band;
210 216
211 /* Radio/DSP gain settings for each "normal" data Tx rate. 217 /* Radio/DSP gain settings for each "normal" data Tx rate.
212 * These include, in addition to RF and DSP gain, a few fields for 218 * These include, in addition to RF and DSP gain, a few fields for
@@ -288,8 +294,8 @@ struct iwl4965_frame {
288 294
289#define SEQ_TO_QUEUE(x) ((x >> 8) & 0xbf) 295#define SEQ_TO_QUEUE(x) ((x >> 8) & 0xbf)
290#define QUEUE_TO_SEQ(x) ((x & 0xbf) << 8) 296#define QUEUE_TO_SEQ(x) ((x & 0xbf) << 8)
291#define SEQ_TO_INDEX(x) (x & 0xff) 297#define SEQ_TO_INDEX(x) ((u8)(x & 0xff))
292#define INDEX_TO_SEQ(x) (x & 0xff) 298#define INDEX_TO_SEQ(x) ((u8)(x & 0xff))
293#define SEQ_HUGE_FRAME (0x4000) 299#define SEQ_HUGE_FRAME (0x4000)
294#define SEQ_RX_FRAME __constant_cpu_to_le16(0x8000) 300#define SEQ_RX_FRAME __constant_cpu_to_le16(0x8000)
295#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4) 301#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
@@ -305,15 +311,15 @@ enum {
305 CMD_WANT_SKB = (1 << 2), 311 CMD_WANT_SKB = (1 << 2),
306}; 312};
307 313
308struct iwl4965_cmd; 314struct iwl_cmd;
309struct iwl4965_priv; 315struct iwl_priv;
310 316
311struct iwl4965_cmd_meta { 317struct iwl_cmd_meta {
312 struct iwl4965_cmd_meta *source; 318 struct iwl_cmd_meta *source;
313 union { 319 union {
314 struct sk_buff *skb; 320 struct sk_buff *skb;
315 int (*callback)(struct iwl4965_priv *priv, 321 int (*callback)(struct iwl_priv *priv,
316 struct iwl4965_cmd *cmd, struct sk_buff *skb); 322 struct iwl_cmd *cmd, struct sk_buff *skb);
317 } __attribute__ ((packed)) u; 323 } __attribute__ ((packed)) u;
318 324
319 /* The CMD_SIZE_HUGE flag bit indicates that the command 325 /* The CMD_SIZE_HUGE flag bit indicates that the command
@@ -323,15 +329,15 @@ struct iwl4965_cmd_meta {
323} __attribute__ ((packed)); 329} __attribute__ ((packed));
324 330
325/** 331/**
326 * struct iwl4965_cmd 332 * struct iwl_cmd
327 * 333 *
328 * For allocation of the command and tx queues, this establishes the overall 334 * For allocation of the command and tx queues, this establishes the overall
329 * size of the largest command we send to uCode, except for a scan command 335 * size of the largest command we send to uCode, except for a scan command
330 * (which is relatively huge; space is allocated separately). 336 * (which is relatively huge; space is allocated separately).
331 */ 337 */
332struct iwl4965_cmd { 338struct iwl_cmd {
333 struct iwl4965_cmd_meta meta; /* driver data */ 339 struct iwl_cmd_meta meta; /* driver data */
334 struct iwl4965_cmd_header hdr; /* uCode API */ 340 struct iwl_cmd_header hdr; /* uCode API */
335 union { 341 union {
336 struct iwl4965_addsta_cmd addsta; 342 struct iwl4965_addsta_cmd addsta;
337 struct iwl4965_led_cmd led; 343 struct iwl4965_led_cmd led;
@@ -351,15 +357,15 @@ struct iwl4965_cmd {
351 } __attribute__ ((packed)) cmd; 357 } __attribute__ ((packed)) cmd;
352} __attribute__ ((packed)); 358} __attribute__ ((packed));
353 359
354struct iwl4965_host_cmd { 360struct iwl_host_cmd {
355 u8 id; 361 u8 id;
356 u16 len; 362 u16 len;
357 struct iwl4965_cmd_meta meta; 363 struct iwl_cmd_meta meta;
358 const void *data; 364 const void *data;
359}; 365};
360 366
361#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl4965_cmd) - \ 367#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_cmd) - \
362 sizeof(struct iwl4965_cmd_meta)) 368 sizeof(struct iwl_cmd_meta))
363 369
364/* 370/*
365 * RX related structures and functions 371 * RX related structures and functions
@@ -408,32 +414,12 @@ struct iwl4965_rx_queue {
408#define MAX_B_CHANNELS 14 414#define MAX_B_CHANNELS 14
409#define MIN_B_CHANNELS 1 415#define MIN_B_CHANNELS 1
410 416
411#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
412#define STATUS_INT_ENABLED 1
413#define STATUS_RF_KILL_HW 2
414#define STATUS_RF_KILL_SW 3
415#define STATUS_INIT 4
416#define STATUS_ALIVE 5
417#define STATUS_READY 6
418#define STATUS_TEMPERATURE 7
419#define STATUS_GEO_CONFIGURED 8
420#define STATUS_EXIT_PENDING 9
421#define STATUS_IN_SUSPEND 10
422#define STATUS_STATISTICS 11
423#define STATUS_SCANNING 12
424#define STATUS_SCAN_ABORTING 13
425#define STATUS_SCAN_HW 14
426#define STATUS_POWER_PMI 15
427#define STATUS_FW_ERROR 16
428#define STATUS_CONF_PENDING 17
429
430#define MAX_TID_COUNT 9 417#define MAX_TID_COUNT 9
431 418
432#define IWL_INVALID_RATE 0xFF 419#define IWL_INVALID_RATE 0xFF
433#define IWL_INVALID_VALUE -1 420#define IWL_INVALID_VALUE -1
434 421
435#ifdef CONFIG_IWL4965_HT 422#ifdef CONFIG_IWL4965_HT
436#ifdef CONFIG_IWL4965_HT_AGG
437/** 423/**
438 * struct iwl4965_ht_agg -- aggregation status while waiting for block-ack 424 * struct iwl4965_ht_agg -- aggregation status while waiting for block-ack
439 * @txq_id: Tx queue used for Tx attempt 425 * @txq_id: Tx queue used for Tx attempt
@@ -453,25 +439,30 @@ struct iwl4965_ht_agg {
453 u16 frame_count; 439 u16 frame_count;
454 u16 wait_for_ba; 440 u16 wait_for_ba;
455 u16 start_idx; 441 u16 start_idx;
456 u32 bitmap0; 442 u64 bitmap;
457 u32 bitmap1;
458 u32 rate_n_flags; 443 u32 rate_n_flags;
444#define IWL_AGG_OFF 0
445#define IWL_AGG_ON 1
446#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
447#define IWL_EMPTYING_HW_QUEUE_DELBA 3
448 u8 state;
459}; 449};
460#endif /* CONFIG_IWL4965_HT_AGG */ 450
461#endif /* CONFIG_IWL4965_HT */ 451#endif /* CONFIG_IWL4965_HT */
462 452
463struct iwl4965_tid_data { 453struct iwl4965_tid_data {
464 u16 seq_number; 454 u16 seq_number;
455 u16 tfds_in_queue;
465#ifdef CONFIG_IWL4965_HT 456#ifdef CONFIG_IWL4965_HT
466#ifdef CONFIG_IWL4965_HT_AGG
467 struct iwl4965_ht_agg agg; 457 struct iwl4965_ht_agg agg;
468#endif /* CONFIG_IWL4965_HT_AGG */
469#endif /* CONFIG_IWL4965_HT */ 458#endif /* CONFIG_IWL4965_HT */
470}; 459};
471 460
472struct iwl4965_hw_key { 461struct iwl4965_hw_key {
473 enum ieee80211_key_alg alg; 462 enum ieee80211_key_alg alg;
474 int keylen; 463 int keylen;
464 u8 keyidx;
465 struct ieee80211_key_conf *conf;
475 u8 key[32]; 466 u8 key[32];
476}; 467};
477 468
@@ -508,8 +499,6 @@ struct iwl_ht_info {
508}; 499};
509#endif /*CONFIG_IWL4965_HT */ 500#endif /*CONFIG_IWL4965_HT */
510 501
511#ifdef CONFIG_IWL4965_QOS
512
513union iwl4965_qos_capabity { 502union iwl4965_qos_capabity {
514 struct { 503 struct {
515 u8 edca_count:4; /* bit 0-3 */ 504 u8 edca_count:4; /* bit 0-3 */
@@ -537,7 +526,6 @@ struct iwl4965_qos_info {
537 union iwl4965_qos_capabity qos_cap; 526 union iwl4965_qos_capabity qos_cap;
538 struct iwl4965_qosparam_cmd def_qos_parm; 527 struct iwl4965_qosparam_cmd def_qos_parm;
539}; 528};
540#endif /*CONFIG_IWL4965_QOS */
541 529
542#define STA_PS_STATUS_WAKE 0 530#define STA_PS_STATUS_WAKE 0
543#define STA_PS_STATUS_SLEEP 1 531#define STA_PS_STATUS_SLEEP 1
@@ -579,30 +567,29 @@ struct iwl4965_ibss_seq {
579}; 567};
580 568
581/** 569/**
582 * struct iwl4965_driver_hw_info 570 * struct iwl_hw_params
583 * @max_txq_num: Max # Tx queues supported 571 * @max_txq_num: Max # Tx queues supported
584 * @ac_queue_count: # Tx queues for EDCA Access Categories (AC)
585 * @tx_cmd_len: Size of Tx command (but not including frame itself) 572 * @tx_cmd_len: Size of Tx command (but not including frame itself)
573 * @tx_ant_num: Number of TX antennas
586 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2) 574 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
587 * @rx_buffer_size: 575 * @rx_buffer_size:
588 * @max_rxq_log: Log-base-2 of max_rxq_size 576 * @max_rxq_log: Log-base-2 of max_rxq_size
589 * @max_stations: 577 * @max_stations:
590 * @bcast_sta_id: 578 * @bcast_sta_id:
591 * @shared_virt: Pointer to driver/uCode shared Tx Byte Counts and Rx status
592 * @shared_phys: Physical Pointer to Tx Byte Counts and Rx status
593 */ 579 */
594struct iwl4965_driver_hw_info { 580struct iwl_hw_params {
595 u16 max_txq_num; 581 u16 max_txq_num;
596 u16 ac_queue_count;
597 u16 tx_cmd_len; 582 u16 tx_cmd_len;
583 u8 tx_chains_num;
584 u8 rx_chains_num;
585 u8 valid_tx_ant;
586 u8 valid_rx_ant;
598 u16 max_rxq_size; 587 u16 max_rxq_size;
588 u16 max_rxq_log;
599 u32 rx_buf_size; 589 u32 rx_buf_size;
600 u32 max_pkt_size; 590 u32 max_pkt_size;
601 u16 max_rxq_log;
602 u8 max_stations; 591 u8 max_stations;
603 u8 bcast_sta_id; 592 u8 bcast_sta_id;
604 void *shared_virt;
605 dma_addr_t shared_phys;
606}; 593};
607 594
608#define HT_SHORT_GI_20MHZ_ONLY (1 << 0) 595#define HT_SHORT_GI_20MHZ_ONLY (1 << 0)
@@ -626,62 +613,49 @@ struct iwl4965_driver_hw_info {
626 * 613 *
627 *****************************************************************************/ 614 *****************************************************************************/
628struct iwl4965_addsta_cmd; 615struct iwl4965_addsta_cmd;
629extern int iwl4965_send_add_station(struct iwl4965_priv *priv, 616extern int iwl4965_send_add_station(struct iwl_priv *priv,
630 struct iwl4965_addsta_cmd *sta, u8 flags); 617 struct iwl4965_addsta_cmd *sta, u8 flags);
631extern u8 iwl4965_add_station_flags(struct iwl4965_priv *priv, const u8 *addr, 618extern u8 iwl4965_add_station_flags(struct iwl_priv *priv, const u8 *addr,
632 int is_ap, u8 flags, void *ht_data); 619 int is_ap, u8 flags, void *ht_data);
633extern int iwl4965_is_network_packet(struct iwl4965_priv *priv, 620extern int iwl4965_is_network_packet(struct iwl_priv *priv,
634 struct ieee80211_hdr *header); 621 struct ieee80211_hdr *header);
635extern int iwl4965_power_init_handle(struct iwl4965_priv *priv); 622extern int iwl4965_power_init_handle(struct iwl_priv *priv);
636extern int iwl4965_eeprom_init(struct iwl4965_priv *priv); 623extern void iwl4965_handle_data_packet_monitor(struct iwl_priv *priv,
637#ifdef CONFIG_IWL4965_DEBUG
638extern void iwl4965_report_frame(struct iwl4965_priv *priv,
639 struct iwl4965_rx_packet *pkt,
640 struct ieee80211_hdr *header, int group100);
641#else
642static inline void iwl4965_report_frame(struct iwl4965_priv *priv,
643 struct iwl4965_rx_packet *pkt,
644 struct ieee80211_hdr *header,
645 int group100) {}
646#endif
647extern void iwl4965_handle_data_packet_monitor(struct iwl4965_priv *priv,
648 struct iwl4965_rx_mem_buffer *rxb, 624 struct iwl4965_rx_mem_buffer *rxb,
649 void *data, short len, 625 void *data, short len,
650 struct ieee80211_rx_status *stats, 626 struct ieee80211_rx_status *stats,
651 u16 phy_flags); 627 u16 phy_flags);
652extern int iwl4965_is_duplicate_packet(struct iwl4965_priv *priv, 628extern int iwl4965_is_duplicate_packet(struct iwl_priv *priv,
653 struct ieee80211_hdr *header); 629 struct ieee80211_hdr *header);
654extern int iwl4965_rx_queue_alloc(struct iwl4965_priv *priv); 630extern int iwl4965_rx_queue_alloc(struct iwl_priv *priv);
655extern void iwl4965_rx_queue_reset(struct iwl4965_priv *priv, 631extern void iwl4965_rx_queue_reset(struct iwl_priv *priv,
656 struct iwl4965_rx_queue *rxq); 632 struct iwl4965_rx_queue *rxq);
657extern int iwl4965_calc_db_from_ratio(int sig_ratio); 633extern int iwl4965_calc_db_from_ratio(int sig_ratio);
658extern int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm); 634extern int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm);
659extern int iwl4965_tx_queue_init(struct iwl4965_priv *priv, 635extern int iwl4965_tx_queue_init(struct iwl_priv *priv,
660 struct iwl4965_tx_queue *txq, int count, u32 id); 636 struct iwl4965_tx_queue *txq, int count, u32 id);
661extern void iwl4965_rx_replenish(void *data); 637extern void iwl4965_rx_replenish(void *data);
662extern void iwl4965_tx_queue_free(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq); 638extern void iwl4965_tx_queue_free(struct iwl_priv *priv, struct iwl4965_tx_queue *txq);
663extern int iwl4965_send_cmd_pdu(struct iwl4965_priv *priv, u8 id, u16 len, 639extern unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv,
664 const void *data);
665extern int __must_check iwl4965_send_cmd(struct iwl4965_priv *priv,
666 struct iwl4965_host_cmd *cmd);
667extern unsigned int iwl4965_fill_beacon_frame(struct iwl4965_priv *priv,
668 struct ieee80211_hdr *hdr, 640 struct ieee80211_hdr *hdr,
669 const u8 *dest, int left); 641 const u8 *dest, int left);
670extern int iwl4965_rx_queue_update_write_ptr(struct iwl4965_priv *priv, 642extern int iwl4965_rx_queue_update_write_ptr(struct iwl_priv *priv,
671 struct iwl4965_rx_queue *q); 643 struct iwl4965_rx_queue *q);
672extern int iwl4965_send_statistics_request(struct iwl4965_priv *priv); 644extern void iwl4965_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
673extern void iwl4965_set_decrypted_flag(struct iwl4965_priv *priv, struct sk_buff *skb,
674 u32 decrypt_res, 645 u32 decrypt_res,
675 struct ieee80211_rx_status *stats); 646 struct ieee80211_rx_status *stats);
676extern __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr); 647extern __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr);
648int iwl4965_init_geos(struct iwl_priv *priv);
649void iwl4965_free_geos(struct iwl_priv *priv);
677 650
678extern const u8 iwl4965_broadcast_addr[ETH_ALEN]; 651extern const u8 iwl4965_broadcast_addr[ETH_ALEN];
652int iwl4965_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
679 653
680/* 654/*
681 * Currently used by iwl-3945-rs... look at restructuring so that it doesn't 655 * Currently used by iwl-3945-rs... look at restructuring so that it doesn't
682 * call this... todo... fix that. 656 * call this... todo... fix that.
683*/ 657*/
684extern u8 iwl4965_sync_station(struct iwl4965_priv *priv, int sta_id, 658extern u8 iwl4965_sync_station(struct iwl_priv *priv, int sta_id,
685 u16 tx_rate, u8 flags); 659 u16 tx_rate, u8 flags);
686 660
687/****************************************************************************** 661/******************************************************************************
@@ -700,36 +674,36 @@ extern u8 iwl4965_sync_station(struct iwl4965_priv *priv, int sta_id,
700 * iwl4965_mac_ <-- mac80211 callback 674 * iwl4965_mac_ <-- mac80211 callback
701 * 675 *
702 ****************************************************************************/ 676 ****************************************************************************/
703extern void iwl4965_hw_rx_handler_setup(struct iwl4965_priv *priv); 677extern void iwl4965_hw_rx_handler_setup(struct iwl_priv *priv);
704extern void iwl4965_hw_setup_deferred_work(struct iwl4965_priv *priv); 678extern void iwl4965_hw_setup_deferred_work(struct iwl_priv *priv);
705extern void iwl4965_hw_cancel_deferred_work(struct iwl4965_priv *priv); 679extern void iwl4965_hw_cancel_deferred_work(struct iwl_priv *priv);
706extern int iwl4965_hw_rxq_stop(struct iwl4965_priv *priv); 680extern int iwl4965_hw_rxq_stop(struct iwl_priv *priv);
707extern int iwl4965_hw_set_hw_setting(struct iwl4965_priv *priv); 681extern int iwl4965_hw_set_hw_params(struct iwl_priv *priv);
708extern int iwl4965_hw_nic_init(struct iwl4965_priv *priv); 682extern int iwl4965_hw_nic_init(struct iwl_priv *priv);
709extern int iwl4965_hw_nic_stop_master(struct iwl4965_priv *priv); 683extern int iwl4965_hw_nic_stop_master(struct iwl_priv *priv);
710extern void iwl4965_hw_txq_ctx_free(struct iwl4965_priv *priv); 684extern void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
711extern void iwl4965_hw_txq_ctx_stop(struct iwl4965_priv *priv); 685extern void iwl4965_hw_txq_ctx_stop(struct iwl_priv *priv);
712extern int iwl4965_hw_nic_reset(struct iwl4965_priv *priv); 686extern int iwl4965_hw_nic_reset(struct iwl_priv *priv);
713extern int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl4965_priv *priv, void *tfd, 687extern int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, void *tfd,
714 dma_addr_t addr, u16 len); 688 dma_addr_t addr, u16 len);
715extern int iwl4965_hw_txq_free_tfd(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq); 689extern int iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl4965_tx_queue *txq);
716extern int iwl4965_hw_get_temperature(struct iwl4965_priv *priv); 690extern int iwl4965_hw_get_temperature(struct iwl_priv *priv);
717extern int iwl4965_hw_tx_queue_init(struct iwl4965_priv *priv, 691extern int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
718 struct iwl4965_tx_queue *txq); 692 struct iwl4965_tx_queue *txq);
719extern unsigned int iwl4965_hw_get_beacon_cmd(struct iwl4965_priv *priv, 693extern unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
720 struct iwl4965_frame *frame, u8 rate); 694 struct iwl4965_frame *frame, u8 rate);
721extern int iwl4965_hw_get_rx_read(struct iwl4965_priv *priv); 695extern int iwl4965_hw_get_rx_read(struct iwl_priv *priv);
722extern void iwl4965_hw_build_tx_cmd_rate(struct iwl4965_priv *priv, 696extern void iwl4965_hw_build_tx_cmd_rate(struct iwl_priv *priv,
723 struct iwl4965_cmd *cmd, 697 struct iwl_cmd *cmd,
724 struct ieee80211_tx_control *ctrl, 698 struct ieee80211_tx_control *ctrl,
725 struct ieee80211_hdr *hdr, 699 struct ieee80211_hdr *hdr,
726 int sta_id, int tx_id); 700 int sta_id, int tx_id);
727extern int iwl4965_hw_reg_send_txpower(struct iwl4965_priv *priv); 701extern int iwl4965_hw_reg_send_txpower(struct iwl_priv *priv);
728extern int iwl4965_hw_reg_set_txpower(struct iwl4965_priv *priv, s8 power); 702extern int iwl4965_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
729extern void iwl4965_hw_rx_statistics(struct iwl4965_priv *priv, 703extern void iwl4965_hw_rx_statistics(struct iwl_priv *priv,
730 struct iwl4965_rx_mem_buffer *rxb); 704 struct iwl4965_rx_mem_buffer *rxb);
731extern void iwl4965_disable_events(struct iwl4965_priv *priv); 705extern void iwl4965_disable_events(struct iwl_priv *priv);
732extern int iwl4965_get_temperature(const struct iwl4965_priv *priv); 706extern int iwl4965_get_temperature(const struct iwl_priv *priv);
733 707
734/** 708/**
735 * iwl4965_hw_find_station - Find station id for a given BSSID 709 * iwl4965_hw_find_station - Find station id for a given BSSID
@@ -739,54 +713,51 @@ extern int iwl4965_get_temperature(const struct iwl4965_priv *priv);
739 * not yet been merged into a single common layer for managing the 713 * not yet been merged into a single common layer for managing the
740 * station tables. 714 * station tables.
741 */ 715 */
742extern u8 iwl4965_hw_find_station(struct iwl4965_priv *priv, const u8 *bssid); 716extern u8 iwl4965_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
743 717
744extern int iwl4965_hw_channel_switch(struct iwl4965_priv *priv, u16 channel); 718extern int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel);
745extern int iwl4965_tx_queue_reclaim(struct iwl4965_priv *priv, int txq_id, int index); 719extern int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
746 720extern int iwl4965_queue_space(const struct iwl4965_queue *q);
747struct iwl4965_priv; 721struct iwl_priv;
748 722
723extern void iwl4965_radio_kill_sw(struct iwl_priv *priv, int disable_radio);
749/* 724/*
750 * Forward declare iwl-4965.c functions for iwl-base.c 725 * Forward declare iwl-4965.c functions for iwl-base.c
751 */ 726 */
752extern int iwl4965_eeprom_acquire_semaphore(struct iwl4965_priv *priv); 727extern int iwl4965_tx_queue_update_wr_ptr(struct iwl_priv *priv,
753
754extern int iwl4965_tx_queue_update_wr_ptr(struct iwl4965_priv *priv,
755 struct iwl4965_tx_queue *txq, 728 struct iwl4965_tx_queue *txq,
756 u16 byte_cnt); 729 u16 byte_cnt);
757extern void iwl4965_add_station(struct iwl4965_priv *priv, const u8 *addr, 730extern void iwl4965_add_station(struct iwl_priv *priv, const u8 *addr,
758 int is_ap); 731 int is_ap);
759extern void iwl4965_set_rxon_chain(struct iwl4965_priv *priv); 732extern void iwl4965_set_rxon_chain(struct iwl_priv *priv);
760extern int iwl4965_alive_notify(struct iwl4965_priv *priv); 733extern int iwl4965_alive_notify(struct iwl_priv *priv);
761extern void iwl4965_update_rate_scaling(struct iwl4965_priv *priv, u8 mode); 734extern void iwl4965_update_rate_scaling(struct iwl_priv *priv, u8 mode);
762extern void iwl4965_chain_noise_reset(struct iwl4965_priv *priv); 735extern void iwl4965_chain_noise_reset(struct iwl_priv *priv);
763extern void iwl4965_init_sensitivity(struct iwl4965_priv *priv, u8 flags, 736extern void iwl4965_init_sensitivity(struct iwl_priv *priv, u8 flags,
764 u8 force); 737 u8 force);
765extern int iwl4965_set_fat_chan_info(struct iwl4965_priv *priv, int phymode, 738extern void iwl4965_rf_kill_ct_config(struct iwl_priv *priv);
766 u16 channel, 739extern void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv,
767 const struct iwl4965_eeprom_channel *eeprom_ch, 740 u32 rate_n_flags,
768 u8 fat_extension_channel); 741 struct ieee80211_tx_control *control);
769extern void iwl4965_rf_kill_ct_config(struct iwl4965_priv *priv);
770 742
771#ifdef CONFIG_IWL4965_HT 743#ifdef CONFIG_IWL4965_HT
772extern void iwl4965_init_ht_hw_capab(struct ieee80211_ht_info *ht_info, 744void iwl4965_init_ht_hw_capab(struct iwl_priv *priv,
773 int mode); 745 struct ieee80211_ht_info *ht_info,
774extern void iwl4965_set_rxon_ht(struct iwl4965_priv *priv, 746 enum ieee80211_band band);
775 struct iwl_ht_info *ht_info); 747void iwl4965_set_rxon_ht(struct iwl_priv *priv,
776extern void iwl4965_set_ht_add_station(struct iwl4965_priv *priv, u8 index, 748 struct iwl_ht_info *ht_info);
749void iwl4965_set_ht_add_station(struct iwl_priv *priv, u8 index,
777 struct ieee80211_ht_info *sta_ht_inf); 750 struct ieee80211_ht_info *sta_ht_inf);
778extern int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw, 751int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
779 enum ieee80211_ampdu_mlme_action action, 752 enum ieee80211_ampdu_mlme_action action,
780 const u8 *addr, u16 tid, u16 ssn); 753 const u8 *addr, u16 tid, u16 *ssn);
781#ifdef CONFIG_IWL4965_HT_AGG 754int iwl4965_check_empty_hw_queue(struct iwl_priv *priv, int sta_id,
782extern int iwl4965_mac_ht_tx_agg_start(struct ieee80211_hw *hw, u8 *da, 755 u8 tid, int txq_id);
783 u16 tid, u16 *start_seq_num); 756#else
784extern int iwl4965_mac_ht_tx_agg_stop(struct ieee80211_hw *hw, u8 *da, 757static inline void iwl4965_init_ht_hw_capab(struct iwl_priv *priv,
785 u16 tid, int generator); 758 struct ieee80211_ht_info *ht_info,
786extern void iwl4965_turn_off_agg(struct iwl4965_priv *priv, u8 tid); 759 enum ieee80211_band band) {}
787extern void iwl4965_tl_get_stats(struct iwl4965_priv *priv, 760
788 struct ieee80211_hdr *hdr);
789#endif /* CONFIG_IWL4965_HT_AGG */
790#endif /*CONFIG_IWL4965_HT */ 761#endif /*CONFIG_IWL4965_HT */
791/* Structures, enum, and defines specific to the 4965 */ 762/* Structures, enum, and defines specific to the 4965 */
792 763
@@ -798,18 +769,6 @@ struct iwl4965_kw {
798 size_t size; 769 size_t size;
799}; 770};
800 771
801#define TID_QUEUE_CELL_SPACING 50 /*mS */
802#define TID_QUEUE_MAX_SIZE 20
803#define TID_ROUND_VALUE 5 /* mS */
804#define TID_MAX_LOAD_COUNT 8
805
806#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
807#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
808
809#define TID_ALL_ENABLED 0x7f
810#define TID_ALL_SPECIFIED 0xff
811#define TID_AGG_TPT_THREHOLD 0x0
812
813#define IWL_CHANNEL_WIDTH_20MHZ 0 772#define IWL_CHANNEL_WIDTH_20MHZ 0
814#define IWL_CHANNEL_WIDTH_40MHZ 1 773#define IWL_CHANNEL_WIDTH_40MHZ 1
815 774
@@ -823,48 +782,17 @@ struct iwl4965_kw {
823#define IWL_OPERATION_MODE_MIXED 2 782#define IWL_OPERATION_MODE_MIXED 2
824#define IWL_OPERATION_MODE_20MHZ 3 783#define IWL_OPERATION_MODE_20MHZ 3
825 784
826#define IWL_EXT_CHANNEL_OFFSET_AUTO 0 785#define IWL_EXT_CHANNEL_OFFSET_NONE 0
827#define IWL_EXT_CHANNEL_OFFSET_ABOVE 1 786#define IWL_EXT_CHANNEL_OFFSET_ABOVE 1
828#define IWL_EXT_CHANNEL_OFFSET_ 2 787#define IWL_EXT_CHANNEL_OFFSET_RESERVE1 2
829#define IWL_EXT_CHANNEL_OFFSET_BELOW 3 788#define IWL_EXT_CHANNEL_OFFSET_BELOW 3
830#define IWL_EXT_CHANNEL_OFFSET_MAX 4
831 789
832#define NRG_NUM_PREV_STAT_L 20 790#define NRG_NUM_PREV_STAT_L 20
833#define NUM_RX_CHAINS (3) 791#define NUM_RX_CHAINS (3)
834 792
835#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000 793#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
836 794
837struct iwl4965_traffic_load {
838 unsigned long time_stamp;
839 u32 packet_count[TID_QUEUE_MAX_SIZE];
840 u8 queue_count;
841 u8 head;
842 u32 total;
843};
844
845#ifdef CONFIG_IWL4965_HT_AGG
846/**
847 * struct iwl4965_agg_control
848 * @requested_ba: bit map of tids requesting aggregation/block-ack
849 * @granted_ba: bit map of tids granted aggregation/block-ack
850 */
851struct iwl4965_agg_control {
852 unsigned long next_retry;
853 u32 wait_for_agg_status;
854 u32 tid_retry;
855 u32 requested_ba;
856 u32 granted_ba;
857 u8 auto_agg;
858 u32 tid_traffic_load_threshold;
859 u32 ba_timeout;
860 struct iwl4965_traffic_load traffic_load[TID_MAX_LOAD_COUNT];
861};
862#endif /*CONFIG_IWL4965_HT_AGG */
863
864struct iwl4965_lq_mngr { 795struct iwl4965_lq_mngr {
865#ifdef CONFIG_IWL4965_HT_AGG
866 struct iwl4965_agg_control agg_ctrl;
867#endif
868 spinlock_t lock; 796 spinlock_t lock;
869 s32 max_window_size; 797 s32 max_window_size;
870 s32 *expected_tpt; 798 s32 *expected_tpt;
@@ -877,7 +805,6 @@ struct iwl4965_lq_mngr {
877 u8 lq_ready; 805 u8 lq_ready;
878}; 806};
879 807
880
881/* Sensitivity and chain noise calibration */ 808/* Sensitivity and chain noise calibration */
882#define INTERFERENCE_DATA_AVAILABLE __constant_cpu_to_le32(1) 809#define INTERFERENCE_DATA_AVAILABLE __constant_cpu_to_le32(1)
883#define INITIALIZATION_VALUE 0xFFFF 810#define INITIALIZATION_VALUE 0xFFFF
@@ -1014,25 +941,28 @@ enum {
1014 941
1015#endif 942#endif
1016 943
1017struct iwl4965_priv { 944#define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */
945
946struct iwl_priv {
1018 947
1019 /* ieee device used by generic ieee processing code */ 948 /* ieee device used by generic ieee processing code */
1020 struct ieee80211_hw *hw; 949 struct ieee80211_hw *hw;
1021 struct ieee80211_channel *ieee_channels; 950 struct ieee80211_channel *ieee_channels;
1022 struct ieee80211_rate *ieee_rates; 951 struct ieee80211_rate *ieee_rates;
952 struct iwl_cfg *cfg;
1023 953
1024 /* temporary frame storage list */ 954 /* temporary frame storage list */
1025 struct list_head free_frames; 955 struct list_head free_frames;
1026 int frames_count; 956 int frames_count;
1027 957
1028 u8 phymode; 958 enum ieee80211_band band;
1029 int alloc_rxb_skb; 959 int alloc_rxb_skb;
1030 bool add_radiotap; 960 bool add_radiotap;
1031 961
1032 void (*rx_handlers[REPLY_MAX])(struct iwl4965_priv *priv, 962 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
1033 struct iwl4965_rx_mem_buffer *rxb); 963 struct iwl4965_rx_mem_buffer *rxb);
1034 964
1035 const struct ieee80211_hw_mode *modes; 965 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
1036 966
1037#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 967#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
1038 /* spectrum measurement report caching */ 968 /* spectrum measurement report caching */
@@ -1044,7 +974,7 @@ struct iwl4965_priv {
1044 974
1045 /* we allocate array of iwl4965_channel_info for NIC's valid channels. 975 /* we allocate array of iwl4965_channel_info for NIC's valid channels.
1046 * Access via channel # using indirect index array */ 976 * Access via channel # using indirect index array */
1047 struct iwl4965_channel_info *channel_info; /* channel info array */ 977 struct iwl_channel_info *channel_info; /* channel info array */
1048 u8 channel_count; /* # of channels */ 978 u8 channel_count; /* # of channels */
1049 979
1050 /* each calibration channel group in the EEPROM has a derived 980 /* each calibration channel group in the EEPROM has a derived
@@ -1104,18 +1034,21 @@ struct iwl4965_priv {
1104 * 4965's initialize alive response contains some calibration data. */ 1034 * 4965's initialize alive response contains some calibration data. */
1105 struct iwl4965_init_alive_resp card_alive_init; 1035 struct iwl4965_init_alive_resp card_alive_init;
1106 struct iwl4965_alive_resp card_alive; 1036 struct iwl4965_alive_resp card_alive;
1037#ifdef CONFIG_IWLWIFI_RFKILL
1038 struct iwl_rfkill_mngr rfkill_mngr;
1039#endif
1107 1040
1108#ifdef LED 1041#ifdef CONFIG_IWLWIFI_LEDS
1109 /* LED related variables */ 1042 struct iwl4965_led led[IWL_LED_TRG_MAX];
1110 struct iwl4965_activity_blink activity; 1043 unsigned long last_blink_time;
1111 unsigned long led_packets; 1044 u8 last_blink_rate;
1112 int led_state; 1045 u8 allow_blinking;
1046 u64 led_tpt;
1113#endif 1047#endif
1114 1048
1115 u16 active_rate; 1049 u16 active_rate;
1116 u16 active_rate_basic; 1050 u16 active_rate_basic;
1117 1051
1118 u8 call_post_assoc_from_beacon;
1119 u8 assoc_station_added; 1052 u8 assoc_station_added;
1120 u8 use_ant_b_for_management_frame; /* Tx antenna selection */ 1053 u8 use_ant_b_for_management_frame; /* Tx antenna selection */
1121 u8 valid_antenna; /* Bit mask of antennas actually connected */ 1054 u8 valid_antenna; /* Bit mask of antennas actually connected */
@@ -1150,11 +1083,16 @@ struct iwl4965_priv {
1150 u32 scd_base_addr; /* scheduler sram base address */ 1083 u32 scd_base_addr; /* scheduler sram base address */
1151 1084
1152 unsigned long status; 1085 unsigned long status;
1153 u32 config;
1154 1086
1155 int last_rx_rssi; /* From Rx packet statisitics */ 1087 int last_rx_rssi; /* From Rx packet statisitics */
1156 int last_rx_noise; /* From beacon statistics */ 1088 int last_rx_noise; /* From beacon statistics */
1157 1089
1090 /* counts mgmt, ctl, and data packets */
1091 struct traffic_stats {
1092 u32 cnt;
1093 u64 bytes;
1094 } tx_stats[3], rx_stats[3];
1095
1158 struct iwl4965_power_mgr power_data; 1096 struct iwl4965_power_mgr power_data;
1159 1097
1160 struct iwl4965_notif_statistics statistics; 1098 struct iwl4965_notif_statistics statistics;
@@ -1175,12 +1113,15 @@ struct iwl4965_priv {
1175 spinlock_t sta_lock; 1113 spinlock_t sta_lock;
1176 int num_stations; 1114 int num_stations;
1177 struct iwl4965_station_entry stations[IWL_STATION_COUNT]; 1115 struct iwl4965_station_entry stations[IWL_STATION_COUNT];
1116 struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
1117 u8 default_wep_key;
1118 u8 key_mapping_key;
1119 unsigned long ucode_key_table;
1178 1120
1179 /* Indication if ieee80211_ops->open has been called */ 1121 /* Indication if ieee80211_ops->open has been called */
1180 int is_open; 1122 u8 is_open;
1181 1123
1182 u8 mac80211_registered; 1124 u8 mac80211_registered;
1183 int is_abg;
1184 1125
1185 u32 notif_missed_beacons; 1126 u32 notif_missed_beacons;
1186 1127
@@ -1199,26 +1140,28 @@ struct iwl4965_priv {
1199 /* eeprom */ 1140 /* eeprom */
1200 struct iwl4965_eeprom eeprom; 1141 struct iwl4965_eeprom eeprom;
1201 1142
1202 int iw_mode; 1143 enum ieee80211_if_types iw_mode;
1203 1144
1204 struct sk_buff *ibss_beacon; 1145 struct sk_buff *ibss_beacon;
1205 1146
1206 /* Last Rx'd beacon timestamp */ 1147 /* Last Rx'd beacon timestamp */
1207 u32 timestamp0; 1148 u64 timestamp;
1208 u32 timestamp1;
1209 u16 beacon_int; 1149 u16 beacon_int;
1210 struct iwl4965_driver_hw_info hw_setting;
1211 struct ieee80211_vif *vif; 1150 struct ieee80211_vif *vif;
1212 1151
1152 struct iwl_hw_params hw_params;
1153 /* driver/uCode shared Tx Byte Counts and Rx status */
1154 void *shared_virt;
1155 /* Physical Pointer to Tx Byte Counts and Rx status */
1156 dma_addr_t shared_phys;
1157
1213 /* Current association information needed to configure the 1158 /* Current association information needed to configure the
1214 * hardware */ 1159 * hardware */
1215 u16 assoc_id; 1160 u16 assoc_id;
1216 u16 assoc_capability; 1161 u16 assoc_capability;
1217 u8 ps_mode; 1162 u8 ps_mode;
1218 1163
1219#ifdef CONFIG_IWL4965_QOS
1220 struct iwl4965_qos_info qos_data; 1164 struct iwl4965_qos_info qos_data;
1221#endif /*CONFIG_IWL4965_QOS */
1222 1165
1223 struct workqueue_struct *workqueue; 1166 struct workqueue_struct *workqueue;
1224 1167
@@ -1253,71 +1196,68 @@ struct iwl4965_priv {
1253 u32 pm_state[16]; 1196 u32 pm_state[16];
1254#endif 1197#endif
1255 1198
1256#ifdef CONFIG_IWL4965_DEBUG 1199#ifdef CONFIG_IWLWIFI_DEBUG
1257 /* debugging info */ 1200 /* debugging info */
1258 u32 framecnt_to_us; 1201 u32 framecnt_to_us;
1259 atomic_t restrict_refcnt; 1202 atomic_t restrict_refcnt;
1260#endif 1203#ifdef CONFIG_IWLWIFI_DEBUGFS
1204 /* debugfs */
1205 struct iwl_debugfs *dbgfs;
1206#endif /* CONFIG_IWLWIFI_DEBUGFS */
1207#endif /* CONFIG_IWLWIFI_DEBUG */
1261 1208
1262 struct work_struct txpower_work; 1209 struct work_struct txpower_work;
1263#ifdef CONFIG_IWL4965_SENSITIVITY 1210#ifdef CONFIG_IWL4965_SENSITIVITY
1264 struct work_struct sensitivity_work; 1211 struct work_struct sensitivity_work;
1265#endif 1212#endif
1266 struct work_struct statistics_work;
1267 struct timer_list statistics_periodic; 1213 struct timer_list statistics_periodic;
1214}; /*iwl_priv */
1268 1215
1269#ifdef CONFIG_IWL4965_HT_AGG 1216static inline int iwl_is_associated(struct iwl_priv *priv)
1270 struct work_struct agg_work;
1271#endif
1272}; /*iwl4965_priv */
1273
1274static inline int iwl4965_is_associated(struct iwl4965_priv *priv)
1275{ 1217{
1276 return (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0; 1218 return (priv->active_rxon.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
1277} 1219}
1278 1220
1279static inline int is_channel_valid(const struct iwl4965_channel_info *ch_info) 1221static inline int is_channel_valid(const struct iwl_channel_info *ch_info)
1280{ 1222{
1281 if (ch_info == NULL) 1223 if (ch_info == NULL)
1282 return 0; 1224 return 0;
1283 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0; 1225 return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
1284} 1226}
1285 1227
1286static inline int is_channel_narrow(const struct iwl4965_channel_info *ch_info) 1228static inline int is_channel_narrow(const struct iwl_channel_info *ch_info)
1287{ 1229{
1288 return (ch_info->flags & EEPROM_CHANNEL_NARROW) ? 1 : 0; 1230 return (ch_info->flags & EEPROM_CHANNEL_NARROW) ? 1 : 0;
1289} 1231}
1290 1232
1291static inline int is_channel_radar(const struct iwl4965_channel_info *ch_info) 1233static inline int is_channel_radar(const struct iwl_channel_info *ch_info)
1292{ 1234{
1293 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0; 1235 return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
1294} 1236}
1295 1237
1296static inline u8 is_channel_a_band(const struct iwl4965_channel_info *ch_info) 1238static inline u8 is_channel_a_band(const struct iwl_channel_info *ch_info)
1297{ 1239{
1298 return ch_info->phymode == MODE_IEEE80211A; 1240 return ch_info->band == IEEE80211_BAND_5GHZ;
1299} 1241}
1300 1242
1301static inline u8 is_channel_bg_band(const struct iwl4965_channel_info *ch_info) 1243static inline u8 is_channel_bg_band(const struct iwl_channel_info *ch_info)
1302{ 1244{
1303 return ((ch_info->phymode == MODE_IEEE80211B) || 1245 return ch_info->band == IEEE80211_BAND_2GHZ;
1304 (ch_info->phymode == MODE_IEEE80211G));
1305} 1246}
1306 1247
1307static inline int is_channel_passive(const struct iwl4965_channel_info *ch) 1248static inline int is_channel_passive(const struct iwl_channel_info *ch)
1308{ 1249{
1309 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0; 1250 return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
1310} 1251}
1311 1252
1312static inline int is_channel_ibss(const struct iwl4965_channel_info *ch) 1253static inline int is_channel_ibss(const struct iwl_channel_info *ch)
1313{ 1254{
1314 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0; 1255 return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
1315} 1256}
1316 1257
1317extern const struct iwl4965_channel_info *iwl4965_get_channel_info( 1258extern const struct iwl_channel_info *iwl_get_channel_info(
1318 const struct iwl4965_priv *priv, int phymode, u16 channel); 1259 const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
1319 1260
1320/* Requires full declaration of iwl4965_priv before including */ 1261/* Requires full declaration of iwl_priv before including */
1321#include "iwl-4965-io.h"
1322 1262
1323#endif /* __iwl4965_4965_h__ */ 1263#endif /* __iwl4965_4965_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
new file mode 100644
index 000000000000..2dfd982d7d1f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -0,0 +1,292 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Tomas Winkler <tomas.winkler@intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/version.h>
32#include <net/mac80211.h>
33
34struct iwl_priv; /* FIXME: remove */
35#include "iwl-debug.h"
36#include "iwl-eeprom.h"
37#include "iwl-4965.h" /* FIXME: remove */
38#include "iwl-core.h"
39#include "iwl-rfkill.h"
40
41
42MODULE_DESCRIPTION("iwl core");
43MODULE_VERSION(IWLWIFI_VERSION);
44MODULE_AUTHOR(DRV_COPYRIGHT);
45MODULE_LICENSE("GPL");
46
47#ifdef CONFIG_IWLWIFI_DEBUG
48u32 iwl_debug_level;
49EXPORT_SYMBOL(iwl_debug_level);
50#endif
51
52/* This function both allocates and initializes hw and priv. */
53struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
54 struct ieee80211_ops *hw_ops)
55{
56 struct iwl_priv *priv;
57
58 /* mac80211 allocates memory for this device instance, including
59 * space for this driver's private structure */
60 struct ieee80211_hw *hw =
61 ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops);
62 if (hw == NULL) {
63 IWL_ERROR("Can not allocate network device\n");
64 goto out;
65 }
66
67 priv = hw->priv;
68 priv->hw = hw;
69
70out:
71 return hw;
72}
73EXPORT_SYMBOL(iwl_alloc_all);
74
75/**
76 * iwlcore_clear_stations_table - Clear the driver's station table
77 *
78 * NOTE: This does not clear or otherwise alter the device's station table.
79 */
80void iwlcore_clear_stations_table(struct iwl_priv *priv)
81{
82 unsigned long flags;
83
84 spin_lock_irqsave(&priv->sta_lock, flags);
85
86 priv->num_stations = 0;
87 memset(priv->stations, 0, sizeof(priv->stations));
88
89 spin_unlock_irqrestore(&priv->sta_lock, flags);
90}
91EXPORT_SYMBOL(iwlcore_clear_stations_table);
92
93void iwlcore_reset_qos(struct iwl_priv *priv)
94{
95 u16 cw_min = 15;
96 u16 cw_max = 1023;
97 u8 aifs = 2;
98 u8 is_legacy = 0;
99 unsigned long flags;
100 int i;
101
102 spin_lock_irqsave(&priv->lock, flags);
103 priv->qos_data.qos_active = 0;
104
105 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) {
106 if (priv->qos_data.qos_enable)
107 priv->qos_data.qos_active = 1;
108 if (!(priv->active_rate & 0xfff0)) {
109 cw_min = 31;
110 is_legacy = 1;
111 }
112 } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
113 if (priv->qos_data.qos_enable)
114 priv->qos_data.qos_active = 1;
115 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
116 cw_min = 31;
117 is_legacy = 1;
118 }
119
120 if (priv->qos_data.qos_active)
121 aifs = 3;
122
123 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
124 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
125 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
126 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
127 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
128
129 if (priv->qos_data.qos_active) {
130 i = 1;
131 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
132 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
133 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
134 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
135 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
136
137 i = 2;
138 priv->qos_data.def_qos_parm.ac[i].cw_min =
139 cpu_to_le16((cw_min + 1) / 2 - 1);
140 priv->qos_data.def_qos_parm.ac[i].cw_max =
141 cpu_to_le16(cw_max);
142 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
143 if (is_legacy)
144 priv->qos_data.def_qos_parm.ac[i].edca_txop =
145 cpu_to_le16(6016);
146 else
147 priv->qos_data.def_qos_parm.ac[i].edca_txop =
148 cpu_to_le16(3008);
149 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
150
151 i = 3;
152 priv->qos_data.def_qos_parm.ac[i].cw_min =
153 cpu_to_le16((cw_min + 1) / 4 - 1);
154 priv->qos_data.def_qos_parm.ac[i].cw_max =
155 cpu_to_le16((cw_max + 1) / 2 - 1);
156 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
157 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
158 if (is_legacy)
159 priv->qos_data.def_qos_parm.ac[i].edca_txop =
160 cpu_to_le16(3264);
161 else
162 priv->qos_data.def_qos_parm.ac[i].edca_txop =
163 cpu_to_le16(1504);
164 } else {
165 for (i = 1; i < 4; i++) {
166 priv->qos_data.def_qos_parm.ac[i].cw_min =
167 cpu_to_le16(cw_min);
168 priv->qos_data.def_qos_parm.ac[i].cw_max =
169 cpu_to_le16(cw_max);
170 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
171 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
172 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
173 }
174 }
175 IWL_DEBUG_QOS("set QoS to default \n");
176
177 spin_unlock_irqrestore(&priv->lock, flags);
178}
179EXPORT_SYMBOL(iwlcore_reset_qos);
180
181/**
182 * iwlcore_set_rxon_channel - Set the phymode and channel values in staging RXON
183 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
184 * @channel: Any channel valid for the requested phymode
185
186 * In addition to setting the staging RXON, priv->phymode is also set.
187 *
188 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
189 * in the staging RXON flag structure based on the phymode
190 */
191int iwlcore_set_rxon_channel(struct iwl_priv *priv,
192 enum ieee80211_band band,
193 u16 channel)
194{
195 if (!iwl_get_channel_info(priv, band, channel)) {
196 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
197 channel, band);
198 return -EINVAL;
199 }
200
201 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
202 (priv->band == band))
203 return 0;
204
205 priv->staging_rxon.channel = cpu_to_le16(channel);
206 if (band == IEEE80211_BAND_5GHZ)
207 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
208 else
209 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
210
211 priv->band = band;
212
213 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, band);
214
215 return 0;
216}
217EXPORT_SYMBOL(iwlcore_set_rxon_channel);
218
219static void iwlcore_init_hw(struct iwl_priv *priv)
220{
221 struct ieee80211_hw *hw = priv->hw;
222 hw->rate_control_algorithm = "iwl-4965-rs";
223
224 /* Tell mac80211 and its clients (e.g. Wireless Extensions)
225 * the range of signal quality values that we'll provide.
226 * Negative values for level/noise indicate that we'll provide dBm.
227 * For WE, at least, non-0 values here *enable* display of values
228 * in app (iwconfig). */
229 hw->max_rssi = -20; /* signal level, negative indicates dBm */
230 hw->max_noise = -20; /* noise level, negative indicates dBm */
231 hw->max_signal = 100; /* link quality indication (%) */
232
233 /* Tell mac80211 our Tx characteristics */
234 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE;
235
236 /* Default value; 4 EDCA QOS priorities */
237 hw->queues = 4;
238#ifdef CONFIG_IWL4965_HT
239 /* Enhanced value; more queues, to support 11n aggregation */
240 hw->queues = 16;
241#endif /* CONFIG_IWL4965_HT */
242}
243
244int iwl_setup(struct iwl_priv *priv)
245{
246 int ret = 0;
247 iwlcore_init_hw(priv);
248 ret = priv->cfg->ops->lib->init_drv(priv);
249 return ret;
250}
251EXPORT_SYMBOL(iwl_setup);
252
253/* Low level driver call this function to update iwlcore with
254 * driver status.
255 */
256int iwlcore_low_level_notify(struct iwl_priv *priv,
257 enum iwlcore_card_notify notify)
258{
259 int ret;
260 switch (notify) {
261 case IWLCORE_INIT_EVT:
262 ret = iwl_rfkill_init(priv);
263 if (ret)
264 IWL_ERROR("Unable to initialize RFKILL system. "
265 "Ignoring error: %d\n", ret);
266 break;
267 case IWLCORE_START_EVT:
268 break;
269 case IWLCORE_STOP_EVT:
270 break;
271 case IWLCORE_REMOVE_EVT:
272 iwl_rfkill_unregister(priv);
273 break;
274 }
275
276 return 0;
277}
278EXPORT_SYMBOL(iwlcore_low_level_notify);
279
280int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags)
281{
282 u32 stat_flags = 0;
283 struct iwl_host_cmd cmd = {
284 .id = REPLY_STATISTICS_CMD,
285 .meta.flags = flags,
286 .len = sizeof(stat_flags),
287 .data = (u8 *) &stat_flags,
288 };
289 return iwl_send_cmd(priv, &cmd);
290}
291EXPORT_SYMBOL(iwl_send_statistics_request);
292
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
new file mode 100644
index 000000000000..7193d97630dc
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -0,0 +1,246 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Tomas Winkler <tomas.winkler@intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_core_h__
64#define __iwl_core_h__
65
66/************************
67 * forward declarations *
68 ************************/
69struct iwl_host_cmd;
70struct iwl_cmd;
71
72
73#define IWLWIFI_VERSION "1.2.26k"
74#define DRV_COPYRIGHT "Copyright(c) 2003-2008 Intel Corporation"
75
76#define IWL_PCI_DEVICE(dev, subdev, cfg) \
77 .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
78 .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
79 .driver_data = (kernel_ulong_t)&(cfg)
80
81#define IWL_SKU_G 0x1
82#define IWL_SKU_A 0x2
83#define IWL_SKU_N 0x8
84
85struct iwl_hcmd_ops {
86 int (*rxon_assoc)(struct iwl_priv *priv);
87};
88struct iwl_hcmd_utils_ops {
89 int (*enqueue_hcmd)(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
90};
91
92struct iwl_lib_ops {
93 /* iwlwifi driver (priv) init */
94 int (*init_drv)(struct iwl_priv *priv);
95 /* set hw dependant perameters */
96 int (*set_hw_params)(struct iwl_priv *priv);
97
98 void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
99 struct iwl4965_tx_queue *txq,
100 u16 byte_cnt);
101 /* nic init */
102 int (*hw_nic_init)(struct iwl_priv *priv);
103 /* alive notification */
104 int (*alive_notify)(struct iwl_priv *priv);
105 /* check validity of rtc data address */
106 int (*is_valid_rtc_data_addr)(u32 addr);
107 /* 1st ucode load */
108 int (*load_ucode)(struct iwl_priv *priv);
109 /* rfkill */
110 void (*radio_kill_sw)(struct iwl_priv *priv, int disable_radio);
111 /* eeprom operations (as defined in iwl-eeprom.h) */
112 struct iwl_eeprom_ops eeprom_ops;
113};
114
115struct iwl_ops {
116 const struct iwl_lib_ops *lib;
117 const struct iwl_hcmd_ops *hcmd;
118 const struct iwl_hcmd_utils_ops *utils;
119};
120
121struct iwl_mod_params {
122 int disable; /* def: 0 = enable radio */
123 int sw_crypto; /* def: 0 = using hardware encryption */
124 int debug; /* def: 0 = minimal debug log messages */
125 int disable_hw_scan; /* def: 0 = use h/w scan */
126 int num_of_queues; /* def: HW dependent */
127 int enable_qos; /* def: 1 = use quality of service */
128 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
129 int antenna; /* def: 0 = both antennas (use diversity) */
130};
131
132struct iwl_cfg {
133 const char *name;
134 const char *fw_name;
135 unsigned int sku;
136 const struct iwl_ops *ops;
137 const struct iwl_mod_params *mod_params;
138};
139
140/***************************
141 * L i b *
142 ***************************/
143
144struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
145 struct ieee80211_ops *hw_ops);
146
147void iwlcore_clear_stations_table(struct iwl_priv *priv);
148void iwlcore_reset_qos(struct iwl_priv *priv);
149int iwlcore_set_rxon_channel(struct iwl_priv *priv,
150 enum ieee80211_band band,
151 u16 channel);
152
153int iwl_setup(struct iwl_priv *priv);
154
155/*****************************************************
156 * S e n d i n g H o s t C o m m a n d s *
157 *****************************************************/
158
159const char *get_cmd_string(u8 cmd);
160int __must_check iwl_send_cmd_sync(struct iwl_priv *priv,
161 struct iwl_host_cmd *cmd);
162int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
163int __must_check iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id,
164 u16 len, const void *data);
165int iwl_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
166 const void *data,
167 int (*callback)(struct iwl_priv *priv,
168 struct iwl_cmd *cmd,
169 struct sk_buff *skb));
170/*************** DRIVER STATUS FUNCTIONS *****/
171
172#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
173#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */
174#define STATUS_INT_ENABLED 2
175#define STATUS_RF_KILL_HW 3
176#define STATUS_RF_KILL_SW 4
177#define STATUS_INIT 5
178#define STATUS_ALIVE 6
179#define STATUS_READY 7
180#define STATUS_TEMPERATURE 8
181#define STATUS_GEO_CONFIGURED 9
182#define STATUS_EXIT_PENDING 10
183#define STATUS_IN_SUSPEND 11
184#define STATUS_STATISTICS 12
185#define STATUS_SCANNING 13
186#define STATUS_SCAN_ABORTING 14
187#define STATUS_SCAN_HW 15
188#define STATUS_POWER_PMI 16
189#define STATUS_FW_ERROR 17
190#define STATUS_CONF_PENDING 18
191
192
193static inline int iwl_is_ready(struct iwl_priv *priv)
194{
195 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
196 * set but EXIT_PENDING is not */
197 return test_bit(STATUS_READY, &priv->status) &&
198 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
199 !test_bit(STATUS_EXIT_PENDING, &priv->status);
200}
201
202static inline int iwl_is_alive(struct iwl_priv *priv)
203{
204 return test_bit(STATUS_ALIVE, &priv->status);
205}
206
207static inline int iwl_is_init(struct iwl_priv *priv)
208{
209 return test_bit(STATUS_INIT, &priv->status);
210}
211
212static inline int iwl_is_rfkill(struct iwl_priv *priv)
213{
214 return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
215 test_bit(STATUS_RF_KILL_SW, &priv->status);
216}
217
218static inline int iwl_is_ready_rf(struct iwl_priv *priv)
219{
220
221 if (iwl_is_rfkill(priv))
222 return 0;
223
224 return iwl_is_ready(priv);
225}
226
227
228enum iwlcore_card_notify {
229 IWLCORE_INIT_EVT = 0,
230 IWLCORE_START_EVT = 1,
231 IWLCORE_STOP_EVT = 2,
232 IWLCORE_REMOVE_EVT = 3,
233};
234
235int iwlcore_low_level_notify(struct iwl_priv *priv,
236 enum iwlcore_card_notify notify);
237extern int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags);
238int iwl_send_lq_cmd(struct iwl_priv *priv,
239 struct iwl_link_quality_cmd *lq, u8 flags);
240
241static inline int iwl_send_rxon_assoc(struct iwl_priv *priv)
242{
243 return priv->cfg->ops->hcmd->rxon_assoc(priv);
244}
245
246#endif /* __iwl_core_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
new file mode 100644
index 000000000000..12725796ea5f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -0,0 +1,265 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*=== CSR (control and status registers) ===*/
64#define CSR_BASE (0x000)
65
66#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
67#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
68#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
69#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
70#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
71#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
72#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
73#define CSR_GP_CNTRL (CSR_BASE+0x024)
74
75/*
76 * Hardware revision info
77 * Bit fields:
78 * 31-8: Reserved
79 * 7-4: Type of device: 0x0 = 4965, 0xd = 3945
80 * 3-2: Revision step: 0 = A, 1 = B, 2 = C, 3 = D
81 * 1-0: "Dash" value, as in A-1, etc.
82 *
83 * NOTE: Revision step affects calculation of CCK txpower for 4965.
84 */
85#define CSR_HW_REV (CSR_BASE+0x028)
86
87/* EEPROM reads */
88#define CSR_EEPROM_REG (CSR_BASE+0x02c)
89#define CSR_EEPROM_GP (CSR_BASE+0x030)
90#define CSR_GP_UCODE (CSR_BASE+0x044)
91#define CSR_UCODE_DRV_GP1 (CSR_BASE+0x054)
92#define CSR_UCODE_DRV_GP1_SET (CSR_BASE+0x058)
93#define CSR_UCODE_DRV_GP1_CLR (CSR_BASE+0x05c)
94#define CSR_UCODE_DRV_GP2 (CSR_BASE+0x060)
95#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
96#define CSR_LED_REG (CSR_BASE+0x094)
97
98/* Analog phase-lock-loop configuration (3945 only)
99 * Set bit 24. */
100#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
101/*
102 * Indicates hardware rev, to determine CCK backoff for txpower calculation.
103 * Bit fields:
104 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
105 */
106#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
107
108/* Bits for CSR_HW_IF_CONFIG_REG */
109#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
110#define CSR49_HW_IF_CONFIG_REG_MSK_BOARD_VER (0x00000C00)
111#define CSR49_HW_IF_CONFIG_REG_BIT_MAC_SI (0x00000100)
112#define CSR49_HW_IF_CONFIG_REG_BIT_RADIO_SI (0x00000200)
113
114#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MB (0x00000100)
115#define CSR39_HW_IF_CONFIG_REG_BIT_3945_MM (0x00000200)
116#define CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC (0x00000400)
117#define CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE (0x00000800)
118#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A (0x00000000)
119#define CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B (0x00001000)
120
121#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
122
123/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
124 * acknowledged (reset) by host writing "1" to flagged bits. */
125#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
126#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
127#define CSR_INT_BIT_DNLD (1 << 28) /* uCode Download */
128#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
129#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
130#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
131#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
132#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
133#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
134#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
135#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
136
137#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
138 CSR_INT_BIT_HW_ERR | \
139 CSR_INT_BIT_FH_TX | \
140 CSR_INT_BIT_SW_ERR | \
141 CSR_INT_BIT_RF_KILL | \
142 CSR_INT_BIT_SW_RX | \
143 CSR_INT_BIT_WAKEUP | \
144 CSR_INT_BIT_ALIVE)
145
146/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
147#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
148#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
149#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
150#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
151#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
152#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
153#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
154#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
155
156#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
157 CSR39_FH_INT_BIT_RX_CHNL2 | \
158 CSR_FH_INT_BIT_RX_CHNL1 | \
159 CSR_FH_INT_BIT_RX_CHNL0)
160
161
162#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
163 CSR_FH_INT_BIT_TX_CHNL1 | \
164 CSR_FH_INT_BIT_TX_CHNL0)
165
166#define CSR49_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
167 CSR_FH_INT_BIT_RX_CHNL1 | \
168 CSR_FH_INT_BIT_RX_CHNL0)
169
170#define CSR49_FH_INT_TX_MASK (CSR_FH_INT_BIT_TX_CHNL1 | \
171 CSR_FH_INT_BIT_TX_CHNL0)
172
173
174/* RESET */
175#define CSR_RESET_REG_FLAG_NEVO_RESET (0x00000001)
176#define CSR_RESET_REG_FLAG_FORCE_NMI (0x00000002)
177#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
178#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
179#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
180
181/* GP (general purpose) CONTROL */
182#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
183#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
184#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
185#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
186
187#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
188
189#define CSR_GP_CNTRL_REG_MSK_POWER_SAVE_TYPE (0x07000000)
190#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
191#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
192
193
194/* EEPROM REG */
195#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
196#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
197
198/* EEPROM GP */
199#define CSR_EEPROM_GP_VALID_MSK (0x00000006)
200#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000)
201#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
202
203/* UCODE DRV GP */
204#define CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP (0x00000001)
205#define CSR_UCODE_SW_BIT_RFKILL (0x00000002)
206#define CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED (0x00000004)
207#define CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT (0x00000008)
208
209/* GPIO */
210#define CSR_GPIO_IN_BIT_AUX_POWER (0x00000200)
211#define CSR_GPIO_IN_VAL_VAUX_PWR_SRC (0x00000000)
212#define CSR_GPIO_IN_VAL_VMAIN_PWR_SRC CSR_GPIO_IN_BIT_AUX_POWER
213
214/* GI Chicken Bits */
215#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
216#define CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER (0x20000000)
217
218/* LED */
219#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
220#define CSR_LED_REG_TRUN_ON (0x78)
221#define CSR_LED_REG_TRUN_OFF (0x38)
222
223/*=== HBUS (Host-side Bus) ===*/
224#define HBUS_BASE (0x400)
225/*
226 * Registers for accessing device's internal SRAM memory (e.g. SCD SRAM
227 * structures, error log, event log, verifying uCode load).
228 * First write to address register, then read from or write to data register
229 * to complete the job. Once the address register is set up, accesses to
230 * data registers auto-increment the address by one dword.
231 * Bit usage for address registers (read or write):
232 * 0-31: memory address within device
233 */
234#define HBUS_TARG_MEM_RADDR (HBUS_BASE+0x00c)
235#define HBUS_TARG_MEM_WADDR (HBUS_BASE+0x010)
236#define HBUS_TARG_MEM_WDAT (HBUS_BASE+0x018)
237#define HBUS_TARG_MEM_RDAT (HBUS_BASE+0x01c)
238
239/*
240 * Registers for accessing device's internal peripheral registers
241 * (e.g. SCD, BSM, etc.). First write to address register,
242 * then read from or write to data register to complete the job.
243 * Bit usage for address registers (read or write):
244 * 0-15: register address (offset) within device
245 * 24-25: (# bytes - 1) to read or write (e.g. 3 for dword)
246 */
247#define HBUS_TARG_PRPH_WADDR (HBUS_BASE+0x044)
248#define HBUS_TARG_PRPH_RADDR (HBUS_BASE+0x048)
249#define HBUS_TARG_PRPH_WDAT (HBUS_BASE+0x04c)
250#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
251
252/*
253 * Per-Tx-queue write pointer (index, really!) (3945 and 4965).
254 * Indicates index to next TFD that driver will fill (1 past latest filled).
255 * Bit usage:
256 * 0-7: queue write index
257 * 11-8: queue selector
258 */
259#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
260#define HBUS_TARG_MBX_C (HBUS_BASE+0x030)
261
262#define HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED (0x00000004)
263
264
265
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 36696bbf170c..c60724c21db8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project. 5 * Portions of this file are derived from the ipw3945 project.
6 * 6 *
@@ -26,20 +26,49 @@
26 * 26 *
27 *****************************************************************************/ 27 *****************************************************************************/
28 28
29#ifndef __iwl4965_debug_h__ 29#ifndef __iwl_debug_h__
30#define __iwl4965_debug_h__ 30#define __iwl_debug_h__
31 31
32#ifdef CONFIG_IWL4965_DEBUG 32#ifdef CONFIG_IWLWIFI_DEBUG
33extern u32 iwl4965_debug_level; 33extern u32 iwl_debug_level;
34#define IWL_DEBUG(level, fmt, args...) \ 34#define IWL_DEBUG(level, fmt, args...) \
35do { if (iwl4965_debug_level & (level)) \ 35do { if (iwl_debug_level & (level)) \
36 printk(KERN_ERR DRV_NAME": %c %s " fmt, \ 36 printk(KERN_ERR DRV_NAME": %c %s " fmt, \
37 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 37 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
38 38
39#define IWL_DEBUG_LIMIT(level, fmt, args...) \ 39#define IWL_DEBUG_LIMIT(level, fmt, args...) \
40do { if ((iwl4965_debug_level & (level)) && net_ratelimit()) \ 40do { if ((iwl_debug_level & (level)) && net_ratelimit()) \
41 printk(KERN_ERR DRV_NAME": %c %s " fmt, \ 41 printk(KERN_ERR DRV_NAME": %c %s " fmt, \
42 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0) 42 in_interrupt() ? 'I' : 'U', __FUNCTION__ , ## args); } while (0)
43
44static inline void iwl_print_hex_dump(int level, void *p, u32 len)
45{
46 if (!(iwl_debug_level & level))
47 return;
48
49 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
50 p, len, 1);
51}
52
53#ifdef CONFIG_IWLWIFI_DEBUGFS
54struct iwl_debugfs {
55 const char *name;
56 struct dentry *dir_drv;
57 struct dentry *dir_data;
58 struct dir_data_files{
59 struct dentry *file_sram;
60 struct dentry *file_stations;
61 struct dentry *file_rx_statistics;
62 struct dentry *file_tx_statistics;
63 } dbgfs_data_files;
64 u32 sram_offset;
65 u32 sram_len;
66};
67
68int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
69void iwl_dbgfs_unregister(struct iwl_priv *priv);
70#endif
71
43#else 72#else
44static inline void IWL_DEBUG(int level, const char *fmt, ...) 73static inline void IWL_DEBUG(int level, const char *fmt, ...)
45{ 74{
@@ -47,7 +76,22 @@ static inline void IWL_DEBUG(int level, const char *fmt, ...)
47static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...) 76static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...)
48{ 77{
49} 78}
50#endif /* CONFIG_IWL4965_DEBUG */ 79static inline void iwl_print_hex_dump(int level, void *p, u32 len)
80{
81}
82#endif /* CONFIG_IWLWIFI_DEBUG */
83
84
85
86#ifndef CONFIG_IWLWIFI_DEBUGFS
87static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
88{
89 return 0;
90}
91static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
92{
93}
94#endif /* CONFIG_IWLWIFI_DEBUGFS */
51 95
52/* 96/*
53 * To use the debug system; 97 * To use the debug system;
@@ -68,10 +112,10 @@ static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...)
68 * 112 *
69 * % cat /proc/net/iwl/debug_level 113 * % cat /proc/net/iwl/debug_level
70 * 114 *
71 * you simply need to add your entry to the iwl4965_debug_levels array. 115 * you simply need to add your entry to the iwl_debug_levels array.
72 * 116 *
73 * If you do not see debug_level in /proc/net/iwl then you do not have 117 * If you do not see debug_level in /proc/net/iwl then you do not have
74 * CONFIG_IWL4965_DEBUG defined in your kernel configuration 118 * CONFIG_IWLWIFI_DEBUG defined in your kernel configuration
75 * 119 *
76 */ 120 */
77 121
@@ -143,6 +187,7 @@ static inline void IWL_DEBUG_LIMIT(int level, const char *fmt, ...)
143 IWL_DEBUG_LIMIT(IWL_DL_ASSOC | IWL_DL_INFO, f, ## a) 187 IWL_DEBUG_LIMIT(IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
144#define IWL_DEBUG_HT(f, a...) IWL_DEBUG(IWL_DL_HT, f, ## a) 188#define IWL_DEBUG_HT(f, a...) IWL_DEBUG(IWL_DL_HT, f, ## a)
145#define IWL_DEBUG_STATS(f, a...) IWL_DEBUG(IWL_DL_STATS, f, ## a) 189#define IWL_DEBUG_STATS(f, a...) IWL_DEBUG(IWL_DL_STATS, f, ## a)
190#define IWL_DEBUG_STATS_LIMIT(f, a...) IWL_DEBUG_LIMIT(IWL_DL_STATS, f, ## a)
146#define IWL_DEBUG_TX_REPLY(f, a...) IWL_DEBUG(IWL_DL_TX_REPLY, f, ## a) 191#define IWL_DEBUG_TX_REPLY(f, a...) IWL_DEBUG(IWL_DL_TX_REPLY, f, ## a)
147#define IWL_DEBUG_QOS(f, a...) IWL_DEBUG(IWL_DL_QOS, f, ## a) 192#define IWL_DEBUG_QOS(f, a...) IWL_DEBUG(IWL_DL_QOS, f, ## a)
148#define IWL_DEBUG_RADIO(f, a...) IWL_DEBUG(IWL_DL_RADIO, f, ## a) 193#define IWL_DEBUG_RADIO(f, a...) IWL_DEBUG(IWL_DL_RADIO, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
new file mode 100644
index 000000000000..0f16f2606f29
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -0,0 +1,335 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Tomas Winkler <tomas.winkler@intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/debugfs.h>
32
33#include <linux/ieee80211.h>
34#include <net/mac80211.h>
35
36
37#include "iwl-4965.h"
38#include "iwl-debug.h"
39#include "iwl-core.h"
40#include "iwl-io.h"
41
42
43/* create and remove of files */
44#define DEBUGFS_ADD_DIR(name, parent) do { \
45 dbgfs->dir_##name = debugfs_create_dir(#name, parent); \
46 if (!(dbgfs->dir_##name)) \
47 goto err; \
48} while (0)
49
50#define DEBUGFS_ADD_FILE(name, parent) do { \
51 dbgfs->dbgfs_##parent##_files.file_##name = \
52 debugfs_create_file(#name, 0644, dbgfs->dir_##parent, priv, \
53 &iwl_dbgfs_##name##_ops); \
54 if (!(dbgfs->dbgfs_##parent##_files.file_##name)) \
55 goto err; \
56} while (0)
57
58#define DEBUGFS_REMOVE(name) do { \
59 debugfs_remove(name); \
60 name = NULL; \
61} while (0);
62
63/* file operation */
64#define DEBUGFS_READ_FUNC(name) \
65static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
66 char __user *user_buf, \
67 size_t count, loff_t *ppos);
68
69#define DEBUGFS_WRITE_FUNC(name) \
70static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
71 const char __user *user_buf, \
72 size_t count, loff_t *ppos);
73
74
75static int iwl_dbgfs_open_file_generic(struct inode *inode, struct file *file)
76{
77 file->private_data = inode->i_private;
78 return 0;
79}
80
81#define DEBUGFS_READ_FILE_OPS(name) \
82 DEBUGFS_READ_FUNC(name); \
83static const struct file_operations iwl_dbgfs_##name##_ops = { \
84 .read = iwl_dbgfs_##name##_read, \
85 .open = iwl_dbgfs_open_file_generic, \
86};
87
88#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
89 DEBUGFS_READ_FUNC(name); \
90 DEBUGFS_WRITE_FUNC(name); \
91static const struct file_operations iwl_dbgfs_##name##_ops = { \
92 .write = iwl_dbgfs_##name##_write, \
93 .read = iwl_dbgfs_##name##_read, \
94 .open = iwl_dbgfs_open_file_generic, \
95};
96
97
98static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
99 char __user *user_buf,
100 size_t count, loff_t *ppos) {
101
102 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
103 char buf[256];
104 int pos = 0;
105 const size_t bufsz = sizeof(buf);
106
107 pos += scnprintf(buf + pos, bufsz - pos, "mgmt: %u\n",
108 priv->tx_stats[0].cnt);
109 pos += scnprintf(buf + pos, bufsz - pos, "ctrl: %u\n",
110 priv->tx_stats[1].cnt);
111 pos += scnprintf(buf + pos, bufsz - pos, "data: %u\n",
112 priv->tx_stats[2].cnt);
113
114 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
115}
116
117static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
118 char __user *user_buf,
119 size_t count, loff_t *ppos) {
120
121 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
122 char buf[256];
123 int pos = 0;
124 const size_t bufsz = sizeof(buf);
125
126 pos += scnprintf(buf + pos, bufsz - pos, "mgmt: %u\n",
127 priv->rx_stats[0].cnt);
128 pos += scnprintf(buf + pos, bufsz - pos, "ctrl: %u\n",
129 priv->rx_stats[1].cnt);
130 pos += scnprintf(buf + pos, bufsz - pos, "data: %u\n",
131 priv->rx_stats[2].cnt);
132
133 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
134}
135
136#define BYTE1_MASK 0x000000ff;
137#define BYTE2_MASK 0x0000ffff;
138#define BYTE3_MASK 0x00ffffff;
139static ssize_t iwl_dbgfs_sram_read(struct file *file,
140 char __user *user_buf,
141 size_t count, loff_t *ppos)
142{
143 u32 val;
144 char buf[1024];
145 ssize_t ret;
146 int i;
147 int pos = 0;
148 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
149 const size_t bufsz = sizeof(buf);
150
151 printk(KERN_DEBUG "offset is: 0x%x\tlen is: 0x%x\n",
152 priv->dbgfs->sram_offset, priv->dbgfs->sram_len);
153
154 iwl_grab_nic_access(priv);
155 for (i = priv->dbgfs->sram_len; i > 0; i -= 4) {
156 val = iwl_read_targ_mem(priv, priv->dbgfs->sram_offset + \
157 priv->dbgfs->sram_len - i);
158 if (i < 4) {
159 switch (i) {
160 case 1:
161 val &= BYTE1_MASK;
162 break;
163 case 2:
164 val &= BYTE2_MASK;
165 break;
166 case 3:
167 val &= BYTE3_MASK;
168 break;
169 }
170 }
171 pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
172 }
173 pos += scnprintf(buf + pos, bufsz - pos, "\n");
174 iwl_release_nic_access(priv);
175
176 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
177 return ret;
178}
179
180static ssize_t iwl_dbgfs_sram_write(struct file *file,
181 const char __user *user_buf,
182 size_t count, loff_t *ppos)
183{
184 struct iwl_priv *priv = file->private_data;
185 char buf[64];
186 int buf_size;
187 u32 offset, len;
188
189 memset(buf, 0, sizeof(buf));
190 buf_size = min(count, sizeof(buf) - 1);
191 if (copy_from_user(buf, user_buf, buf_size))
192 return -EFAULT;
193
194 if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
195 priv->dbgfs->sram_offset = offset;
196 priv->dbgfs->sram_len = len;
197 } else {
198 priv->dbgfs->sram_offset = 0;
199 priv->dbgfs->sram_len = 0;
200 }
201
202 return count;
203}
204
205static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
206 size_t count, loff_t *ppos)
207{
208 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
209 struct iwl4965_station_entry *station;
210 int max_sta = priv->hw_params.max_stations;
211 char *buf;
212 int i, j, pos = 0;
213 ssize_t ret;
214 /* Add 30 for initial string */
215 const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
216 DECLARE_MAC_BUF(mac);
217
218 buf = kmalloc(bufsz, GFP_KERNEL);
219 if(!buf)
220 return -ENOMEM;
221
222 pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
223 priv->num_stations);
224
225 for (i = 0; i < max_sta; i++) {
226 station = &priv->stations[i];
227 if (station->used) {
228 pos += scnprintf(buf + pos, bufsz - pos,
229 "station %d:\ngeneral data:\n", i+1);
230 print_mac(mac, station->sta.sta.addr);
231 pos += scnprintf(buf + pos, bufsz - pos, "id: %u\n",
232 station->sta.sta.sta_id);
233 pos += scnprintf(buf + pos, bufsz - pos, "mode: %u\n",
234 station->sta.mode);
235 pos += scnprintf(buf + pos, bufsz - pos,
236 "flags: 0x%x\n",
237 station->sta.station_flags_msk);
238 pos += scnprintf(buf + pos, bufsz - pos,
239 "ps_status: %u\n", station->ps_status);
240 pos += scnprintf(buf + pos, bufsz - pos, "tid data:\n");
241 pos += scnprintf(buf + pos, bufsz - pos,
242 "seq_num\t\ttxq_id\t");
243 pos += scnprintf(buf + pos, bufsz - pos,
244 "frame_count\twait_for_ba\t");
245 pos += scnprintf(buf + pos, bufsz - pos,
246 "start_idx\tbitmap0\t");
247 pos += scnprintf(buf + pos, bufsz - pos,
248 "bitmap1\trate_n_flags\n");
249
250 for (j = 0; j < MAX_TID_COUNT; j++) {
251 pos += scnprintf(buf + pos, bufsz - pos,
252 "[%d]:\t\t%u\t", j,
253 station->tid[j].seq_number);
254 pos += scnprintf(buf + pos, bufsz - pos,
255 "%u\t\t%u\t\t%u\t\t",
256 station->tid[j].agg.txq_id,
257 station->tid[j].agg.frame_count,
258 station->tid[j].agg.wait_for_ba);
259 pos += scnprintf(buf + pos, bufsz - pos,
260 "%u\t%llu\t%u\n",
261 station->tid[j].agg.start_idx,
262 (unsigned long long)station->tid[j].agg.bitmap,
263 station->tid[j].agg.rate_n_flags);
264 }
265 pos += scnprintf(buf + pos, bufsz - pos, "\n");
266 }
267 }
268
269 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
270 kfree(buf);
271 return ret;
272}
273
274
275DEBUGFS_READ_WRITE_FILE_OPS(sram);
276DEBUGFS_READ_FILE_OPS(stations);
277DEBUGFS_READ_FILE_OPS(rx_statistics);
278DEBUGFS_READ_FILE_OPS(tx_statistics);
279
280/*
281 * Create the debugfs files and directories
282 *
283 */
284int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
285{
286 struct iwl_debugfs *dbgfs;
287
288 dbgfs = kzalloc(sizeof(struct iwl_debugfs), GFP_KERNEL);
289 if (!dbgfs) {
290 goto err;
291 }
292
293 priv->dbgfs = dbgfs;
294 dbgfs->name = name;
295 dbgfs->dir_drv = debugfs_create_dir(name, NULL);
296 if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)){
297 goto err;
298 }
299
300 DEBUGFS_ADD_DIR(data, dbgfs->dir_drv);
301 DEBUGFS_ADD_FILE(sram, data);
302 DEBUGFS_ADD_FILE(stations, data);
303 DEBUGFS_ADD_FILE(rx_statistics, data);
304 DEBUGFS_ADD_FILE(tx_statistics, data);
305
306 return 0;
307
308err:
309 IWL_ERROR("Can't open the debugfs directory\n");
310 iwl_dbgfs_unregister(priv);
311 return -ENOENT;
312}
313EXPORT_SYMBOL(iwl_dbgfs_register);
314
315/**
316 * Remove the debugfs files and directories
317 *
318 */
319void iwl_dbgfs_unregister(struct iwl_priv *priv)
320{
321 if (!(priv->dbgfs))
322 return;
323
324 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_rx_statistics);
325 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_tx_statistics);
326 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sram);
327 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations);
328 DEBUGFS_REMOVE(priv->dbgfs->dir_data);
329 DEBUGFS_REMOVE(priv->dbgfs->dir_drv);
330 kfree(priv->dbgfs);
331 priv->dbgfs = NULL;
332}
333EXPORT_SYMBOL(iwl_dbgfs_unregister);
334
335
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
new file mode 100644
index 000000000000..a07d5dcb7abc
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -0,0 +1,561 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Tomas Winkler <tomas.winkler@intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63
64#include <linux/kernel.h>
65#include <linux/module.h>
66#include <linux/version.h>
67#include <linux/init.h>
68
69#include <net/mac80211.h>
70
71#include "iwl-4965-commands.h"
72#include "iwl-4965.h"
73#include "iwl-core.h"
74#include "iwl-debug.h"
75#include "iwl-eeprom.h"
76#include "iwl-io.h"
77
78/************************** EEPROM BANDS ****************************
79 *
80 * The iwl_eeprom_band definitions below provide the mapping from the
81 * EEPROM contents to the specific channel number supported for each
82 * band.
83 *
84 * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
85 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
86 * The specific geography and calibration information for that channel
87 * is contained in the eeprom map itself.
88 *
89 * During init, we copy the eeprom information and channel map
90 * information into priv->channel_info_24/52 and priv->channel_map_24/52
91 *
92 * channel_map_24/52 provides the index in the channel_info array for a
93 * given channel. We have to have two separate maps as there is channel
94 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
95 * band_2
96 *
97 * A value of 0xff stored in the channel_map indicates that the channel
98 * is not supported by the hardware at all.
99 *
100 * A value of 0xfe in the channel_map indicates that the channel is not
101 * valid for Tx with the current hardware. This means that
102 * while the system can tune and receive on a given channel, it may not
103 * be able to associate or transmit any frames on that
104 * channel. There is no corresponding channel information for that
105 * entry.
106 *
107 *********************************************************************/
108
109/* 2.4 GHz */
110const u8 iwl_eeprom_band_1[14] = {
111 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
112};
113
114/* 5.2 GHz bands */
115static const u8 iwl_eeprom_band_2[] = { /* 4915-5080MHz */
116 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
117};
118
119static const u8 iwl_eeprom_band_3[] = { /* 5170-5320MHz */
120 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
121};
122
123static const u8 iwl_eeprom_band_4[] = { /* 5500-5700MHz */
124 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
125};
126
127static const u8 iwl_eeprom_band_5[] = { /* 5725-5825MHz */
128 145, 149, 153, 157, 161, 165
129};
130
131static const u8 iwl_eeprom_band_6[] = { /* 2.4 FAT channel */
132 1, 2, 3, 4, 5, 6, 7
133};
134
135static const u8 iwl_eeprom_band_7[] = { /* 5.2 FAT channel */
136 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
137};
138
139/******************************************************************************
140 *
141 * EEPROM related functions
142 *
143******************************************************************************/
144
145int iwlcore_eeprom_verify_signature(struct iwl_priv *priv)
146{
147 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
148 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
149 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
150 return -ENOENT;
151 }
152 return 0;
153}
154EXPORT_SYMBOL(iwlcore_eeprom_verify_signature);
155
156/*
157 * The device's EEPROM semaphore prevents conflicts between driver and uCode
158 * when accessing the EEPROM; each access is a series of pulses to/from the
159 * EEPROM chip, not a single event, so even reads could conflict if they
160 * weren't arbitrated by the semaphore.
161 */
162int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv)
163{
164 u16 count;
165 int ret;
166
167 for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
168 /* Request semaphore */
169 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
170 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
171
172 /* See if we got it */
173 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
174 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
175 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
176 EEPROM_SEM_TIMEOUT);
177 if (ret >= 0) {
178 IWL_DEBUG_IO("Acquired semaphore after %d tries.\n",
179 count+1);
180 return ret;
181 }
182 }
183
184 return ret;
185}
186EXPORT_SYMBOL(iwlcore_eeprom_acquire_semaphore);
187
188void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv)
189{
190 iwl_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
191 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
192
193}
194EXPORT_SYMBOL(iwlcore_eeprom_release_semaphore);
195
196
197/**
198 * iwl_eeprom_init - read EEPROM contents
199 *
200 * Load the EEPROM contents from adapter into priv->eeprom
201 *
202 * NOTE: This routine uses the non-debug IO access functions.
203 */
204int iwl_eeprom_init(struct iwl_priv *priv)
205{
206 u16 *e = (u16 *)&priv->eeprom;
207 u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
208 u32 r;
209 int sz = sizeof(priv->eeprom);
210 int ret;
211 int i;
212 u16 addr;
213
214 /* The EEPROM structure has several padding buffers within it
215 * and when adding new EEPROM maps is subject to programmer errors
216 * which may be very difficult to identify without explicitly
217 * checking the resulting size of the eeprom map. */
218 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
219
220 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
221 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
222 return -ENOENT;
223 }
224
225 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
226 ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
227 if (ret < 0) {
228 IWL_ERROR("Failed to acquire EEPROM semaphore.\n");
229 return -ENOENT;
230 }
231
232 /* eeprom is an array of 16bit values */
233 for (addr = 0; addr < sz; addr += sizeof(u16)) {
234 _iwl_write32(priv, CSR_EEPROM_REG, addr << 1);
235 _iwl_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
236
237 for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT;
238 i += IWL_EEPROM_ACCESS_DELAY) {
239 r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
240 if (r & CSR_EEPROM_REG_READ_VALID_MSK)
241 break;
242 udelay(IWL_EEPROM_ACCESS_DELAY);
243 }
244
245 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
246 IWL_ERROR("Time out reading EEPROM[%d]", addr);
247 ret = -ETIMEDOUT;
248 goto done;
249 }
250 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
251 }
252 ret = 0;
253
254done:
255 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
256 return ret;
257}
258EXPORT_SYMBOL(iwl_eeprom_init);
259
260
261void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
262{
263 memcpy(mac, priv->eeprom.mac_address, 6);
264}
265EXPORT_SYMBOL(iwl_eeprom_get_mac);
266
267static void iwl_init_band_reference(const struct iwl_priv *priv,
268 int band,
269 int *eeprom_ch_count,
270 const struct iwl4965_eeprom_channel
271 **eeprom_ch_info,
272 const u8 **eeprom_ch_index)
273{
274 switch (band) {
275 case 1: /* 2.4GHz band */
276 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
277 *eeprom_ch_info = priv->eeprom.band_1_channels;
278 *eeprom_ch_index = iwl_eeprom_band_1;
279 break;
280 case 2: /* 4.9GHz band */
281 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
282 *eeprom_ch_info = priv->eeprom.band_2_channels;
283 *eeprom_ch_index = iwl_eeprom_band_2;
284 break;
285 case 3: /* 5.2GHz band */
286 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
287 *eeprom_ch_info = priv->eeprom.band_3_channels;
288 *eeprom_ch_index = iwl_eeprom_band_3;
289 break;
290 case 4: /* 5.5GHz band */
291 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
292 *eeprom_ch_info = priv->eeprom.band_4_channels;
293 *eeprom_ch_index = iwl_eeprom_band_4;
294 break;
295 case 5: /* 5.7GHz band */
296 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
297 *eeprom_ch_info = priv->eeprom.band_5_channels;
298 *eeprom_ch_index = iwl_eeprom_band_5;
299 break;
300 case 6: /* 2.4GHz FAT channels */
301 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
302 *eeprom_ch_info = priv->eeprom.band_24_channels;
303 *eeprom_ch_index = iwl_eeprom_band_6;
304 break;
305 case 7: /* 5 GHz FAT channels */
306 *eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
307 *eeprom_ch_info = priv->eeprom.band_52_channels;
308 *eeprom_ch_index = iwl_eeprom_band_7;
309 break;
310 default:
311 BUG();
312 return;
313 }
314}
315
316#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
317 ? # x " " : "")
318
319/**
320 * iwl4965_set_fat_chan_info - Copy fat channel info into driver's priv.
321 *
322 * Does not set up a command, or touch hardware.
323 */
324static int iwl4965_set_fat_chan_info(struct iwl_priv *priv,
325 enum ieee80211_band band, u16 channel,
326 const struct iwl4965_eeprom_channel *eeprom_ch,
327 u8 fat_extension_channel)
328{
329 struct iwl_channel_info *ch_info;
330
331 ch_info = (struct iwl_channel_info *)
332 iwl_get_channel_info(priv, band, channel);
333
334 if (!is_channel_valid(ch_info))
335 return -1;
336
337 IWL_DEBUG_INFO("FAT Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
338 " %ddBm): Ad-Hoc %ssupported\n",
339 ch_info->channel,
340 is_channel_a_band(ch_info) ?
341 "5.2" : "2.4",
342 CHECK_AND_PRINT(IBSS),
343 CHECK_AND_PRINT(ACTIVE),
344 CHECK_AND_PRINT(RADAR),
345 CHECK_AND_PRINT(WIDE),
346 CHECK_AND_PRINT(NARROW),
347 CHECK_AND_PRINT(DFS),
348 eeprom_ch->flags,
349 eeprom_ch->max_power_avg,
350 ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
351 && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
352 "" : "not ");
353
354 ch_info->fat_eeprom = *eeprom_ch;
355 ch_info->fat_max_power_avg = eeprom_ch->max_power_avg;
356 ch_info->fat_curr_txpow = eeprom_ch->max_power_avg;
357 ch_info->fat_min_power = 0;
358 ch_info->fat_scan_power = eeprom_ch->max_power_avg;
359 ch_info->fat_flags = eeprom_ch->flags;
360 ch_info->fat_extension_channel = fat_extension_channel;
361
362 return 0;
363}
364
365#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
366 ? # x " " : "")
367
368/**
369 * iwl_init_channel_map - Set up driver's info for all possible channels
370 */
371int iwl_init_channel_map(struct iwl_priv *priv)
372{
373 int eeprom_ch_count = 0;
374 const u8 *eeprom_ch_index = NULL;
375 const struct iwl4965_eeprom_channel *eeprom_ch_info = NULL;
376 int band, ch;
377 struct iwl_channel_info *ch_info;
378
379 if (priv->channel_count) {
380 IWL_DEBUG_INFO("Channel map already initialized.\n");
381 return 0;
382 }
383
384 if (priv->eeprom.version < 0x2f) {
385 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
386 priv->eeprom.version);
387 return -EINVAL;
388 }
389
390 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
391
392 priv->channel_count =
393 ARRAY_SIZE(iwl_eeprom_band_1) +
394 ARRAY_SIZE(iwl_eeprom_band_2) +
395 ARRAY_SIZE(iwl_eeprom_band_3) +
396 ARRAY_SIZE(iwl_eeprom_band_4) +
397 ARRAY_SIZE(iwl_eeprom_band_5);
398
399 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
400
401 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
402 priv->channel_count, GFP_KERNEL);
403 if (!priv->channel_info) {
404 IWL_ERROR("Could not allocate channel_info\n");
405 priv->channel_count = 0;
406 return -ENOMEM;
407 }
408
409 ch_info = priv->channel_info;
410
411 /* Loop through the 5 EEPROM bands adding them in order to the
412 * channel map we maintain (that contains additional information than
413 * what just in the EEPROM) */
414 for (band = 1; band <= 5; band++) {
415
416 iwl_init_band_reference(priv, band, &eeprom_ch_count,
417 &eeprom_ch_info, &eeprom_ch_index);
418
419 /* Loop through each band adding each of the channels */
420 for (ch = 0; ch < eeprom_ch_count; ch++) {
421 ch_info->channel = eeprom_ch_index[ch];
422 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
423 IEEE80211_BAND_5GHZ;
424
425 /* permanently store EEPROM's channel regulatory flags
426 * and max power in channel info database. */
427 ch_info->eeprom = eeprom_ch_info[ch];
428
429 /* Copy the run-time flags so they are there even on
430 * invalid channels */
431 ch_info->flags = eeprom_ch_info[ch].flags;
432
433 if (!(is_channel_valid(ch_info))) {
434 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
435 "No traffic\n",
436 ch_info->channel,
437 ch_info->flags,
438 is_channel_a_band(ch_info) ?
439 "5.2" : "2.4");
440 ch_info++;
441 continue;
442 }
443
444 /* Initialize regulatory-based run-time data */
445 ch_info->max_power_avg = ch_info->curr_txpow =
446 eeprom_ch_info[ch].max_power_avg;
447 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
448 ch_info->min_power = 0;
449
450 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x"
451 " %ddBm): Ad-Hoc %ssupported\n",
452 ch_info->channel,
453 is_channel_a_band(ch_info) ?
454 "5.2" : "2.4",
455 CHECK_AND_PRINT_I(VALID),
456 CHECK_AND_PRINT_I(IBSS),
457 CHECK_AND_PRINT_I(ACTIVE),
458 CHECK_AND_PRINT_I(RADAR),
459 CHECK_AND_PRINT_I(WIDE),
460 CHECK_AND_PRINT_I(NARROW),
461 CHECK_AND_PRINT_I(DFS),
462 eeprom_ch_info[ch].flags,
463 eeprom_ch_info[ch].max_power_avg,
464 ((eeprom_ch_info[ch].
465 flags & EEPROM_CHANNEL_IBSS)
466 && !(eeprom_ch_info[ch].
467 flags & EEPROM_CHANNEL_RADAR))
468 ? "" : "not ");
469
470 /* Set the user_txpower_limit to the highest power
471 * supported by any channel */
472 if (eeprom_ch_info[ch].max_power_avg >
473 priv->user_txpower_limit)
474 priv->user_txpower_limit =
475 eeprom_ch_info[ch].max_power_avg;
476
477 ch_info++;
478 }
479 }
480
481 /* Two additional EEPROM bands for 2.4 and 5 GHz FAT channels */
482 for (band = 6; band <= 7; band++) {
483 enum ieee80211_band ieeeband;
484 u8 fat_extension_chan;
485
486 iwl_init_band_reference(priv, band, &eeprom_ch_count,
487 &eeprom_ch_info, &eeprom_ch_index);
488
489 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
490 ieeeband =
491 (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
492
493 /* Loop through each band adding each of the channels */
494 for (ch = 0; ch < eeprom_ch_count; ch++) {
495
496 if ((band == 6) &&
497 ((eeprom_ch_index[ch] == 5) ||
498 (eeprom_ch_index[ch] == 6) ||
499 (eeprom_ch_index[ch] == 7)))
500 fat_extension_chan = HT_IE_EXT_CHANNEL_MAX;
501 else
502 fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE;
503
504 /* Set up driver's info for lower half */
505 iwl4965_set_fat_chan_info(priv, ieeeband,
506 eeprom_ch_index[ch],
507 &(eeprom_ch_info[ch]),
508 fat_extension_chan);
509
510 /* Set up driver's info for upper half */
511 iwl4965_set_fat_chan_info(priv, ieeeband,
512 (eeprom_ch_index[ch] + 4),
513 &(eeprom_ch_info[ch]),
514 HT_IE_EXT_CHANNEL_BELOW);
515 }
516 }
517
518 return 0;
519}
520EXPORT_SYMBOL(iwl_init_channel_map);
521
522/*
523 * iwl_free_channel_map - undo allocations in iwl4965_init_channel_map
524 */
525void iwl_free_channel_map(struct iwl_priv *priv)
526{
527 kfree(priv->channel_info);
528 priv->channel_count = 0;
529}
530EXPORT_SYMBOL(iwl_free_channel_map);
531
532/**
533 * iwl_get_channel_info - Find driver's private channel info
534 *
535 * Based on band and channel number.
536 */
537const struct iwl_channel_info *iwl_get_channel_info(
538 const struct iwl_priv *priv,
539 enum ieee80211_band band, u16 channel)
540{
541 int i;
542
543 switch (band) {
544 case IEEE80211_BAND_5GHZ:
545 for (i = 14; i < priv->channel_count; i++) {
546 if (priv->channel_info[i].channel == channel)
547 return &priv->channel_info[i];
548 }
549 break;
550 case IEEE80211_BAND_2GHZ:
551 if (channel >= 1 && channel <= 14)
552 return &priv->channel_info[channel - 1];
553 break;
554 default:
555 BUG();
556 }
557
558 return NULL;
559}
560EXPORT_SYMBOL(iwl_get_channel_info);
561
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
new file mode 100644
index 000000000000..bd0a042ca77f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -0,0 +1,375 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Tomas Winkler <tomas.winkler@intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_eeprom_h__
64#define __iwl_eeprom_h__
65
66struct iwl_priv;
67
68/*
69 * EEPROM access time values:
70 *
71 * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG,
72 * then clearing (with subsequent read/modify/write) CSR_EEPROM_REG bit
73 * CSR_EEPROM_REG_BIT_CMD (0x2).
74 * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
75 * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
76 * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
77 */
78#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
79#define IWL_EEPROM_ACCESS_DELAY 10 /* uSec */
80
81#define IWL_EEPROM_SEM_TIMEOUT 10 /* milliseconds */
82#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
83
84
85/*
86 * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
87 *
88 * IBSS and/or AP operation is allowed *only* on those channels with
89 * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
90 * RADAR detection is not supported by the 4965 driver, but is a
91 * requirement for establishing a new network for legal operation on channels
92 * requiring RADAR detection or restricting ACTIVE scanning.
93 *
94 * NOTE: "WIDE" flag does not indicate anything about "FAT" 40 MHz channels.
95 * It only indicates that 20 MHz channel use is supported; FAT channel
96 * usage is indicated by a separate set of regulatory flags for each
97 * FAT channel pair.
98 *
99 * NOTE: Using a channel inappropriately will result in a uCode error!
100 */
101#define IWL_NUM_TX_CALIB_GROUPS 5
102enum {
103 EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
104 EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
105 /* Bit 2 Reserved */
106 EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
107 EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
108 EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
109 EEPROM_CHANNEL_NARROW = (1 << 6), /* 10 MHz channel (not used) */
110 EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
111};
112
113/* SKU Capabilities */
114#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
115#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
116
117/* *regulatory* channel data format in eeprom, one for each channel.
118 * There are separate entries for FAT (40 MHz) vs. normal (20 MHz) channels. */
119struct iwl4965_eeprom_channel {
120 u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
121 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
122} __attribute__ ((packed));
123
124/* 4965 has two radio transmitters (and 3 radio receivers) */
125#define EEPROM_TX_POWER_TX_CHAINS (2)
126
127/* 4965 has room for up to 8 sets of txpower calibration data */
128#define EEPROM_TX_POWER_BANDS (8)
129
130/* 4965 factory calibration measures txpower gain settings for
131 * each of 3 target output levels */
132#define EEPROM_TX_POWER_MEASUREMENTS (3)
133
134#define EEPROM_4965_TX_POWER_VERSION (2)
135
136/* 4965 driver does not work with txpower calibration version < 5.
137 * Look for this in calib_version member of struct iwl4965_eeprom. */
138#define EEPROM_TX_POWER_VERSION_NEW (5)
139
140/* 2.4 GHz */
141extern const u8 iwl_eeprom_band_1[14];
142
143/*
144 * 4965 factory calibration data for one txpower level, on one channel,
145 * measured on one of the 2 tx chains (radio transmitter and associated
146 * antenna). EEPROM contains:
147 *
148 * 1) Temperature (degrees Celsius) of device when measurement was made.
149 *
150 * 2) Gain table index used to achieve the target measurement power.
151 * This refers to the "well-known" gain tables (see iwl-4965-hw.h).
152 *
153 * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
154 *
155 * 4) RF power amplifier detector level measurement (not used).
156 */
157struct iwl4965_eeprom_calib_measure {
158 u8 temperature; /* Device temperature (Celsius) */
159 u8 gain_idx; /* Index into gain table */
160 u8 actual_pow; /* Measured RF output power, half-dBm */
161 s8 pa_det; /* Power amp detector level (not used) */
162} __attribute__ ((packed));
163
164
165/*
166 * 4965 measurement set for one channel. EEPROM contains:
167 *
168 * 1) Channel number measured
169 *
170 * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
171 * (a.k.a. "tx chains") (6 measurements altogether)
172 */
173struct iwl4965_eeprom_calib_ch_info {
174 u8 ch_num;
175 struct iwl4965_eeprom_calib_measure
176 measurements[EEPROM_TX_POWER_TX_CHAINS]
177 [EEPROM_TX_POWER_MEASUREMENTS];
178} __attribute__ ((packed));
179
180/*
181 * 4965 txpower subband info.
182 *
183 * For each frequency subband, EEPROM contains the following:
184 *
185 * 1) First and last channels within range of the subband. "0" values
186 * indicate that this sample set is not being used.
187 *
188 * 2) Sample measurement sets for 2 channels close to the range endpoints.
189 */
190struct iwl4965_eeprom_calib_subband_info {
191 u8 ch_from; /* channel number of lowest channel in subband */
192 u8 ch_to; /* channel number of highest channel in subband */
193 struct iwl4965_eeprom_calib_ch_info ch1;
194 struct iwl4965_eeprom_calib_ch_info ch2;
195} __attribute__ ((packed));
196
197
198/*
199 * 4965 txpower calibration info. EEPROM contains:
200 *
201 * 1) Factory-measured saturation power levels (maximum levels at which
202 * tx power amplifier can output a signal without too much distortion).
203 * There is one level for 2.4 GHz band and one for 5 GHz band. These
204 * values apply to all channels within each of the bands.
205 *
206 * 2) Factory-measured power supply voltage level. This is assumed to be
207 * constant (i.e. same value applies to all channels/bands) while the
208 * factory measurements are being made.
209 *
210 * 3) Up to 8 sets of factory-measured txpower calibration values.
211 * These are for different frequency ranges, since txpower gain
212 * characteristics of the analog radio circuitry vary with frequency.
213 *
214 * Not all sets need to be filled with data;
215 * struct iwl4965_eeprom_calib_subband_info contains range of channels
216 * (0 if unused) for each set of data.
217 */
218struct iwl4965_eeprom_calib_info {
219 u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
220 u8 saturation_power52; /* half-dBm */
221 s16 voltage; /* signed */
222 struct iwl4965_eeprom_calib_subband_info
223 band_info[EEPROM_TX_POWER_BANDS];
224} __attribute__ ((packed));
225
226
227
228/*
229 * 4965 EEPROM map
230 */
231struct iwl4965_eeprom {
232 u8 reserved0[16];
233 u16 device_id; /* abs.ofs: 16 */
234 u8 reserved1[2];
235 u16 pmc; /* abs.ofs: 20 */
236 u8 reserved2[20];
237 u8 mac_address[6]; /* abs.ofs: 42 */
238 u8 reserved3[58];
239 u16 board_revision; /* abs.ofs: 106 */
240 u8 reserved4[11];
241 u8 board_pba_number[9]; /* abs.ofs: 119 */
242 u8 reserved5[8];
243 u16 version; /* abs.ofs: 136 */
244 u8 sku_cap; /* abs.ofs: 138 */
245 u8 leds_mode; /* abs.ofs: 139 */
246 u16 oem_mode;
247 u16 wowlan_mode; /* abs.ofs: 142 */
248 u16 leds_time_interval; /* abs.ofs: 144 */
249 u8 leds_off_time; /* abs.ofs: 146 */
250 u8 leds_on_time; /* abs.ofs: 147 */
251 u8 almgor_m_version; /* abs.ofs: 148 */
252 u8 antenna_switch_type; /* abs.ofs: 149 */
253 u8 reserved6[8];
254 u16 board_revision_4965; /* abs.ofs: 158 */
255 u8 reserved7[13];
256 u8 board_pba_number_4965[9]; /* abs.ofs: 173 */
257 u8 reserved8[10];
258 u8 sku_id[4]; /* abs.ofs: 192 */
259
260/*
261 * Per-channel regulatory data.
262 *
263 * Each channel that *might* be supported by 3945 or 4965 has a fixed location
264 * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
265 * txpower (MSB).
266 *
267 * Entries immediately below are for 20 MHz channel width. FAT (40 MHz)
268 * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
269 *
270 * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
271 */
272 u16 band_1_count; /* abs.ofs: 196 */
273 struct iwl4965_eeprom_channel band_1_channels[14]; /* abs.ofs: 196 */
274
275/*
276 * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
277 * 5.0 GHz channels 7, 8, 11, 12, 16
278 * (4915-5080MHz) (none of these is ever supported)
279 */
280 u16 band_2_count; /* abs.ofs: 226 */
281 struct iwl4965_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
282
283/*
284 * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
285 * (5170-5320MHz)
286 */
287 u16 band_3_count; /* abs.ofs: 254 */
288 struct iwl4965_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
289
290/*
291 * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
292 * (5500-5700MHz)
293 */
294 u16 band_4_count; /* abs.ofs: 280 */
295 struct iwl4965_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
296
297/*
298 * 5.7 GHz channels 145, 149, 153, 157, 161, 165
299 * (5725-5825MHz)
300 */
301 u16 band_5_count; /* abs.ofs: 304 */
302 struct iwl4965_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
303
304 u8 reserved10[2];
305
306
307/*
308 * 2.4 GHz FAT channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
309 *
310 * The channel listed is the center of the lower 20 MHz half of the channel.
311 * The overall center frequency is actually 2 channels (10 MHz) above that,
312 * and the upper half of each FAT channel is centered 4 channels (20 MHz) away
313 * from the lower half; e.g. the upper half of FAT channel 1 is channel 5,
314 * and the overall FAT channel width centers on channel 3.
315 *
316 * NOTE: The RXON command uses 20 MHz channel numbers to specify the
317 * control channel to which to tune. RXON also specifies whether the
318 * control channel is the upper or lower half of a FAT channel.
319 *
320 * NOTE: 4965 does not support FAT channels on 2.4 GHz.
321 */
322 struct iwl4965_eeprom_channel band_24_channels[7]; /* abs.ofs: 320 */
323 u8 reserved11[2];
324
325/*
326 * 5.2 GHz FAT channels 36 (40), 44 (48), 52 (56), 60 (64),
327 * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
328 */
329 struct iwl4965_eeprom_channel band_52_channels[11]; /* abs.ofs: 336 */
330 u8 reserved12[6];
331
332/*
333 * 4965 driver requires txpower calibration format version 5 or greater.
334 * Driver does not work with txpower calibration version < 5.
335 * This value is simply a 16-bit number, no major/minor versions here.
336 */
337 u16 calib_version; /* abs.ofs: 364 */
338 u8 reserved13[2];
339 u8 reserved14[96]; /* abs.ofs: 368 */
340
341/*
342 * 4965 Txpower calibration data.
343 */
344 struct iwl4965_eeprom_calib_info calib_info; /* abs.ofs: 464 */
345
346 u8 reserved16[140]; /* fill out to full 1024 byte block */
347
348
349} __attribute__ ((packed));
350
351#define IWL_EEPROM_IMAGE_SIZE 1024
352
353/* End of EEPROM */
354
355struct iwl_eeprom_ops {
356 int (*verify_signature) (struct iwl_priv *priv);
357 int (*acquire_semaphore) (struct iwl_priv *priv);
358 void (*release_semaphore) (struct iwl_priv *priv);
359};
360
361
362void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
363int iwl_eeprom_init(struct iwl_priv *priv);
364
365int iwlcore_eeprom_verify_signature(struct iwl_priv *priv);
366int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv);
367void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv);
368
369int iwl_init_channel_map(struct iwl_priv *priv);
370void iwl_free_channel_map(struct iwl_priv *priv);
371const struct iwl_channel_info *iwl_get_channel_info(
372 const struct iwl_priv *priv,
373 enum ieee80211_band band, u16 channel);
374
375#endif /* __iwl_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
new file mode 100644
index 000000000000..fdb27f1cdc08
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -0,0 +1,278 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Tomas Winkler <tomas.winkler@intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/version.h>
32#include <net/mac80211.h>
33
34#include "iwl-4965.h" /* FIXME: remove */
35#include "iwl-debug.h"
36#include "iwl-eeprom.h"
37#include "iwl-core.h"
38
39
40#define IWL_CMD(x) case x : return #x
41
42const char *get_cmd_string(u8 cmd)
43{
44 switch (cmd) {
45 IWL_CMD(REPLY_ALIVE);
46 IWL_CMD(REPLY_ERROR);
47 IWL_CMD(REPLY_RXON);
48 IWL_CMD(REPLY_RXON_ASSOC);
49 IWL_CMD(REPLY_QOS_PARAM);
50 IWL_CMD(REPLY_RXON_TIMING);
51 IWL_CMD(REPLY_ADD_STA);
52 IWL_CMD(REPLY_REMOVE_STA);
53 IWL_CMD(REPLY_REMOVE_ALL_STA);
54 IWL_CMD(REPLY_WEPKEY);
55 IWL_CMD(REPLY_TX);
56 IWL_CMD(REPLY_RATE_SCALE);
57 IWL_CMD(REPLY_LEDS_CMD);
58 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
59 IWL_CMD(RADAR_NOTIFICATION);
60 IWL_CMD(REPLY_QUIET_CMD);
61 IWL_CMD(REPLY_CHANNEL_SWITCH);
62 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
63 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
64 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
65 IWL_CMD(POWER_TABLE_CMD);
66 IWL_CMD(PM_SLEEP_NOTIFICATION);
67 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
68 IWL_CMD(REPLY_SCAN_CMD);
69 IWL_CMD(REPLY_SCAN_ABORT_CMD);
70 IWL_CMD(SCAN_START_NOTIFICATION);
71 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
72 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
73 IWL_CMD(BEACON_NOTIFICATION);
74 IWL_CMD(REPLY_TX_BEACON);
75 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
76 IWL_CMD(QUIET_NOTIFICATION);
77 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
78 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
79 IWL_CMD(REPLY_BT_CONFIG);
80 IWL_CMD(REPLY_STATISTICS_CMD);
81 IWL_CMD(STATISTICS_NOTIFICATION);
82 IWL_CMD(REPLY_CARD_STATE_CMD);
83 IWL_CMD(CARD_STATE_NOTIFICATION);
84 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
85 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
86 IWL_CMD(SENSITIVITY_CMD);
87 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
88 IWL_CMD(REPLY_RX_PHY_CMD);
89 IWL_CMD(REPLY_RX_MPDU_CMD);
90 IWL_CMD(REPLY_RX);
91 IWL_CMD(REPLY_COMPRESSED_BA);
92 default:
93 return "UNKNOWN";
94
95 }
96}
97EXPORT_SYMBOL(get_cmd_string);
98
99#define HOST_COMPLETE_TIMEOUT (HZ / 2)
100
101static int iwl_generic_cmd_callback(struct iwl_priv *priv,
102 struct iwl_cmd *cmd, struct sk_buff *skb)
103{
104 struct iwl4965_rx_packet *pkt = NULL;
105
106 if (!skb) {
107 IWL_ERROR("Error: Response NULL in %s.\n",
108 get_cmd_string(cmd->hdr.cmd));
109 return 1;
110 }
111
112 pkt = (struct iwl4965_rx_packet *)skb->data;
113 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
114 IWL_ERROR("Bad return from %s (0x%08X)\n",
115 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
116 return 1;
117 }
118
119 IWL_DEBUG_HC("back from %s (0x%08X)\n",
120 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
121
122 /* Let iwl_tx_complete free the response skb */
123 return 1;
124}
125
126static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
127{
128 int ret;
129
130 BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
131
132 /* An asynchronous command can not expect an SKB to be set. */
133 BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
134
135 /* Assign a generic callback if one is not provided */
136 if (!cmd->meta.u.callback)
137 cmd->meta.u.callback = iwl_generic_cmd_callback;
138
139 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
140 return -EBUSY;
141
142 ret = priv->cfg->ops->utils->enqueue_hcmd(priv, cmd);
143 if (ret < 0) {
144 IWL_ERROR("Error sending %s: enqueue_hcmd failed: %d\n",
145 get_cmd_string(cmd->id), ret);
146 return ret;
147 }
148 return 0;
149}
150
151int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
152{
153 int cmd_idx;
154 int ret;
155
156 BUG_ON(cmd->meta.flags & CMD_ASYNC);
157
158 /* A synchronous command can not have a callback set. */
159 BUG_ON(cmd->meta.u.callback != NULL);
160
161 if (test_and_set_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status)) {
162 IWL_ERROR("Error sending %s: Already sending a host command\n",
163 get_cmd_string(cmd->id));
164 ret = -EBUSY;
165 goto out;
166 }
167
168 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
169
170 if (cmd->meta.flags & CMD_WANT_SKB)
171 cmd->meta.source = &cmd->meta;
172
173 cmd_idx = priv->cfg->ops->utils->enqueue_hcmd(priv, cmd);
174 if (cmd_idx < 0) {
175 ret = cmd_idx;
176 IWL_ERROR("Error sending %s: enqueue_hcmd failed: %d\n",
177 get_cmd_string(cmd->id), ret);
178 goto out;
179 }
180
181 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
182 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
183 HOST_COMPLETE_TIMEOUT);
184 if (!ret) {
185 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
186 IWL_ERROR("Error sending %s: time out after %dms.\n",
187 get_cmd_string(cmd->id),
188 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
189
190 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
191 ret = -ETIMEDOUT;
192 goto cancel;
193 }
194 }
195
196 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
197 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
198 get_cmd_string(cmd->id));
199 ret = -ECANCELED;
200 goto fail;
201 }
202 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
203 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
204 get_cmd_string(cmd->id));
205 ret = -EIO;
206 goto fail;
207 }
208 if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
209 IWL_ERROR("Error: Response NULL in '%s'\n",
210 get_cmd_string(cmd->id));
211 ret = -EIO;
212 goto out;
213 }
214
215 ret = 0;
216 goto out;
217
218cancel:
219 if (cmd->meta.flags & CMD_WANT_SKB) {
220 struct iwl_cmd *qcmd;
221
222 /* Cancel the CMD_WANT_SKB flag for the cmd in the
223 * TX cmd queue. Otherwise in case the cmd comes
224 * in later, it will possibly set an invalid
225 * address (cmd->meta.source). */
226 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
227 qcmd->meta.flags &= ~CMD_WANT_SKB;
228 }
229fail:
230 if (cmd->meta.u.skb) {
231 dev_kfree_skb_any(cmd->meta.u.skb);
232 cmd->meta.u.skb = NULL;
233 }
234out:
235 clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status);
236 return ret;
237}
238EXPORT_SYMBOL(iwl_send_cmd_sync);
239
240int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
241{
242 if (cmd->meta.flags & CMD_ASYNC)
243 return iwl_send_cmd_async(priv, cmd);
244
245 return iwl_send_cmd_sync(priv, cmd);
246}
247EXPORT_SYMBOL(iwl_send_cmd);
248
249int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
250{
251 struct iwl_host_cmd cmd = {
252 .id = id,
253 .len = len,
254 .data = data,
255 };
256
257 return iwl_send_cmd_sync(priv, &cmd);
258}
259EXPORT_SYMBOL(iwl_send_cmd_pdu);
260
261int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
262 u8 id, u16 len, const void *data,
263 int (*callback)(struct iwl_priv *priv,
264 struct iwl_cmd *cmd,
265 struct sk_buff *skb))
266{
267 struct iwl_host_cmd cmd = {
268 .id = id,
269 .len = len,
270 .data = data,
271 };
272
273 cmd.meta.flags |= CMD_ASYNC;
274 cmd.meta.u.callback = callback;
275
276 return iwl_send_cmd_async(priv, &cmd);
277}
278EXPORT_SYMBOL(iwl_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 8993cca81b40..a443472bea62 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -254,6 +254,26 @@ static inline u8 iwl_get_dma_hi_address(dma_addr_t addr)
254 return sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0; 254 return sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0;
255} 255}
256 256
257/**
258 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
259 * @index -- current index
260 * @n_bd -- total number of entries in queue (must be power of 2)
261 */
262static inline int iwl_queue_inc_wrap(int index, int n_bd)
263{
264 return ++index & (n_bd - 1);
265}
266
267/**
268 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
269 * @index -- current index
270 * @n_bd -- total number of entries in queue (must be power of 2)
271 */
272static inline int iwl_queue_dec_wrap(int index, int n_bd)
273{
274 return --index & (n_bd - 1);
275}
276
257/* TODO: Move fw_desc functions to iwl-pci.ko */ 277/* TODO: Move fw_desc functions to iwl-pci.ko */
258static inline void iwl_free_fw_desc(struct pci_dev *pci_dev, 278static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
259 struct fw_desc *desc) 279 struct fw_desc *desc)
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
new file mode 100644
index 000000000000..5bc3df432d2d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -0,0 +1,429 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
19 *
20 * The full GNU General Public License is included in this distribution in the
21 * file called LICENSE.
22 *
23 * Contact Information:
24 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
25 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *
27 *****************************************************************************/
28
29#ifndef __iwl_io_h__
30#define __iwl_io_h__
31
32#include <linux/io.h>
33
34#include "iwl-debug.h"
35
36/*
37 * IO, register, and NIC memory access functions
38 *
39 * NOTE on naming convention and macro usage for these
40 *
41 * A single _ prefix before a an access function means that no state
42 * check or debug information is printed when that function is called.
43 *
44 * A double __ prefix before an access function means that state is checked
45 * and the current line number and caller function name are printed in addition
46 * to any other debug output.
47 *
48 * The non-prefixed name is the #define that maps the caller into a
49 * #define that provides the caller's name and __LINE__ to the double
50 * prefix version.
51 *
52 * If you wish to call the function without any debug or state checking,
53 * you should use the single _ prefix version (as is used by dependent IO
54 * routines, for example _iwl_read_direct32 calls the non-check version of
55 * _iwl_read32.)
56 *
57 * These declarations are *extremely* useful in quickly isolating code deltas
58 * which result in misconfiguring of the hardware I/O. In combination with
59 * git-bisect and the IO debug level you can quickly determine the specific
60 * commit which breaks the IO sequence to the hardware.
61 *
62 */
63
64#define _iwl_write32(priv, ofs, val) writel((val), (priv)->hw_base + (ofs))
65#ifdef CONFIG_IWLWIFI_DEBUG
66static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv,
67 u32 ofs, u32 val)
68{
69 IWL_DEBUG_IO("write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
70 _iwl_write32(priv, ofs, val);
71}
72#define iwl_write32(priv, ofs, val) \
73 __iwl_write32(__FILE__, __LINE__, priv, ofs, val)
74#else
75#define iwl_write32(priv, ofs, val) _iwl_write32(priv, ofs, val)
76#endif
77
78#define _iwl_read32(priv, ofs) readl((priv)->hw_base + (ofs))
79#ifdef CONFIG_IWLWIFI_DEBUG
80static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
81{
82 IWL_DEBUG_IO("read_direct32(0x%08X) - %s %d\n", ofs, f, l);
83 return _iwl_read32(priv, ofs);
84}
85#define iwl_read32(priv, ofs) __iwl_read32(__FILE__, __LINE__, priv, ofs)
86#else
87#define iwl_read32(p, o) _iwl_read32(p, o)
88#endif
89
90static inline int _iwl_poll_bit(struct iwl_priv *priv, u32 addr,
91 u32 bits, u32 mask, int timeout)
92{
93 int i = 0;
94
95 do {
96 if ((_iwl_read32(priv, addr) & mask) == (bits & mask))
97 return i;
98 mdelay(10);
99 i += 10;
100 } while (i < timeout);
101
102 return -ETIMEDOUT;
103}
104#ifdef CONFIG_IWLWIFI_DEBUG
105static inline int __iwl_poll_bit(const char *f, u32 l,
106 struct iwl_priv *priv, u32 addr,
107 u32 bits, u32 mask, int timeout)
108{
109 int ret = _iwl_poll_bit(priv, addr, bits, mask, timeout);
110 IWL_DEBUG_IO("poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
111 addr, bits, mask,
112 unlikely(ret == -ETIMEDOUT)?"timeout":"", f, l);
113 return ret;
114}
115#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
116 __iwl_poll_bit(__FILE__, __LINE__, priv, addr, bits, mask, timeout)
117#else
118#define iwl_poll_bit(p, a, b, m, t) _iwl_poll_bit(p, a, b, m, t)
119#endif
120
121static inline void _iwl_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
122{
123 _iwl_write32(priv, reg, _iwl_read32(priv, reg) | mask);
124}
125#ifdef CONFIG_IWLWIFI_DEBUG
126static inline void __iwl_set_bit(const char *f, u32 l,
127 struct iwl_priv *priv, u32 reg, u32 mask)
128{
129 u32 val = _iwl_read32(priv, reg) | mask;
130 IWL_DEBUG_IO("set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
131 _iwl_write32(priv, reg, val);
132}
133#define iwl_set_bit(p, r, m) __iwl_set_bit(__FILE__, __LINE__, p, r, m)
134#else
135#define iwl_set_bit(p, r, m) _iwl_set_bit(p, r, m)
136#endif
137
138static inline void _iwl_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
139{
140 _iwl_write32(priv, reg, _iwl_read32(priv, reg) & ~mask);
141}
142#ifdef CONFIG_IWLWIFI_DEBUG
143static inline void __iwl_clear_bit(const char *f, u32 l,
144 struct iwl_priv *priv, u32 reg, u32 mask)
145{
146 u32 val = _iwl_read32(priv, reg) & ~mask;
147 IWL_DEBUG_IO("clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
148 _iwl_write32(priv, reg, val);
149}
150#define iwl_clear_bit(p, r, m) __iwl_clear_bit(__FILE__, __LINE__, p, r, m)
151#else
152#define iwl_clear_bit(p, r, m) _iwl_clear_bit(p, r, m)
153#endif
154
155static inline int _iwl_grab_nic_access(struct iwl_priv *priv)
156{
157 int ret;
158 u32 gp_ctl;
159
160#ifdef CONFIG_IWLWIFI_DEBUG
161 if (atomic_read(&priv->restrict_refcnt))
162 return 0;
163#endif
164 if (test_bit(STATUS_RF_KILL_HW, &priv->status) ||
165 test_bit(STATUS_RF_KILL_SW, &priv->status)) {
166 IWL_WARNING("WARNING: Requesting MAC access during RFKILL "
167 "wakes up NIC\n");
168
169 /* 10 msec allows time for NIC to complete its data save */
170 gp_ctl = _iwl_read32(priv, CSR_GP_CNTRL);
171 if (gp_ctl & CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY) {
172 IWL_DEBUG_RF_KILL("Wait for complete power-down, "
173 "gpctl = 0x%08x\n", gp_ctl);
174 mdelay(10);
175 } else
176 IWL_DEBUG_RF_KILL("power-down complete, "
177 "gpctl = 0x%08x\n", gp_ctl);
178 }
179
180 /* this bit wakes up the NIC */
181 _iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
182 ret = _iwl_poll_bit(priv, CSR_GP_CNTRL,
183 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
184 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
185 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 50);
186 if (ret < 0) {
187 IWL_ERROR("MAC is in deep sleep!\n");
188 return -EIO;
189 }
190
191#ifdef CONFIG_IWLWIFI_DEBUG
192 atomic_inc(&priv->restrict_refcnt);
193#endif
194 return 0;
195}
196
197#ifdef CONFIG_IWLWIFI_DEBUG
198static inline int __iwl_grab_nic_access(const char *f, u32 l,
199 struct iwl_priv *priv)
200{
201 if (atomic_read(&priv->restrict_refcnt))
202 IWL_ERROR("Grabbing access while already held %s %d.\n", f, l);
203
204 IWL_DEBUG_IO("grabbing nic access - %s %d\n", f, l);
205 return _iwl_grab_nic_access(priv);
206}
207#define iwl_grab_nic_access(priv) \
208 __iwl_grab_nic_access(__FILE__, __LINE__, priv)
209#else
210#define iwl_grab_nic_access(priv) \
211 _iwl_grab_nic_access(priv)
212#endif
213
214static inline void _iwl_release_nic_access(struct iwl_priv *priv)
215{
216#ifdef CONFIG_IWLWIFI_DEBUG
217 if (atomic_dec_and_test(&priv->restrict_refcnt))
218#endif
219 _iwl_clear_bit(priv, CSR_GP_CNTRL,
220 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
221}
222#ifdef CONFIG_IWLWIFI_DEBUG
223static inline void __iwl_release_nic_access(const char *f, u32 l,
224 struct iwl_priv *priv)
225{
226 if (atomic_read(&priv->restrict_refcnt) <= 0)
227 IWL_ERROR("Release unheld nic access at line %s %d.\n", f, l);
228
229 IWL_DEBUG_IO("releasing nic access - %s %d\n", f, l);
230 _iwl_release_nic_access(priv);
231}
232#define iwl_release_nic_access(priv) \
233 __iwl_release_nic_access(__FILE__, __LINE__, priv)
234#else
235#define iwl_release_nic_access(priv) \
236 _iwl_release_nic_access(priv)
237#endif
238
239static inline u32 _iwl_read_direct32(struct iwl_priv *priv, u32 reg)
240{
241 return _iwl_read32(priv, reg);
242}
243#ifdef CONFIG_IWLWIFI_DEBUG
244static inline u32 __iwl_read_direct32(const char *f, u32 l,
245 struct iwl_priv *priv, u32 reg)
246{
247 u32 value = _iwl_read_direct32(priv, reg);
248 if (!atomic_read(&priv->restrict_refcnt))
249 IWL_ERROR("Nic access not held from %s %d\n", f, l);
250 IWL_DEBUG_IO("read_direct32(0x%4X) = 0x%08x - %s %d \n", reg, value,
251 f, l);
252 return value;
253}
254#define iwl_read_direct32(priv, reg) \
255 __iwl_read_direct32(__FILE__, __LINE__, priv, reg)
256#else
257#define iwl_read_direct32 _iwl_read_direct32
258#endif
259
260static inline void _iwl_write_direct32(struct iwl_priv *priv,
261 u32 reg, u32 value)
262{
263 _iwl_write32(priv, reg, value);
264}
265#ifdef CONFIG_IWLWIFI_DEBUG
266static void __iwl_write_direct32(const char *f , u32 line,
267 struct iwl_priv *priv, u32 reg, u32 value)
268{
269 if (!atomic_read(&priv->restrict_refcnt))
270 IWL_ERROR("Nic access not held from %s line %d\n", f, line);
271 _iwl_write_direct32(priv, reg, value);
272}
273#define iwl_write_direct32(priv, reg, value) \
274 __iwl_write_direct32(__func__, __LINE__, priv, reg, value)
275#else
276#define iwl_write_direct32 _iwl_write_direct32
277#endif
278
279static inline void iwl_write_reg_buf(struct iwl_priv *priv,
280 u32 reg, u32 len, u32 *values)
281{
282 u32 count = sizeof(u32);
283
284 if ((priv != NULL) && (values != NULL)) {
285 for (; 0 < len; len -= count, reg += count, values++)
286 _iwl_write_direct32(priv, reg, *values);
287 }
288}
289
290static inline int _iwl_poll_direct_bit(struct iwl_priv *priv,
291 u32 addr, u32 mask, int timeout)
292{
293 int i = 0;
294
295 do {
296 if ((_iwl_read_direct32(priv, addr) & mask) == mask)
297 return i;
298 mdelay(10);
299 i += 10;
300 } while (i < timeout);
301
302 return -ETIMEDOUT;
303}
304
305#ifdef CONFIG_IWLWIFI_DEBUG
306static inline int __iwl_poll_direct_bit(const char *f, u32 l,
307 struct iwl_priv *priv,
308 u32 addr, u32 mask, int timeout)
309{
310 int ret = _iwl_poll_direct_bit(priv, addr, mask, timeout);
311
312 if (unlikely(ret == -ETIMEDOUT))
313 IWL_DEBUG_IO("poll_direct_bit(0x%08X, 0x%08X) - "
314 "timedout - %s %d\n", addr, mask, f, l);
315 else
316 IWL_DEBUG_IO("poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
317 "- %s %d\n", addr, mask, ret, f, l);
318 return ret;
319}
320#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
321 __iwl_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
322#else
323#define iwl_poll_direct_bit _iwl_poll_direct_bit
324#endif
325
326static inline u32 _iwl_read_prph(struct iwl_priv *priv, u32 reg)
327{
328 _iwl_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
329 return _iwl_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
330}
331#ifdef CONFIG_IWLWIFI_DEBUG
332static inline u32 __iwl_read_prph(const char *f, u32 line,
333 struct iwl_priv *priv, u32 reg)
334{
335 if (!atomic_read(&priv->restrict_refcnt))
336 IWL_ERROR("Nic access not held from %s line %d\n", f, line);
337 return _iwl_read_prph(priv, reg);
338}
339
340#define iwl_read_prph(priv, reg) \
341 __iwl_read_prph(__func__, __LINE__, priv, reg)
342#else
343#define iwl_read_prph _iwl_read_prph
344#endif
345
346static inline void _iwl_write_prph(struct iwl_priv *priv,
347 u32 addr, u32 val)
348{
349 _iwl_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
350 ((addr & 0x0000FFFF) | (3 << 24)));
351 _iwl_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
352}
353#ifdef CONFIG_IWLWIFI_DEBUG
354static inline void __iwl_write_prph(const char *f, u32 line,
355 struct iwl_priv *priv, u32 addr, u32 val)
356{
357 if (!atomic_read(&priv->restrict_refcnt))
358 IWL_ERROR("Nic access not held from %s line %d\n", f, line);
359 _iwl_write_prph(priv, addr, val);
360}
361
362#define iwl_write_prph(priv, addr, val) \
363 __iwl_write_prph(__func__, __LINE__, priv, addr, val);
364#else
365#define iwl_write_prph _iwl_write_prph
366#endif
367
368#define _iwl_set_bits_prph(priv, reg, mask) \
369 _iwl_write_prph(priv, reg, (_iwl_read_prph(priv, reg) | mask))
370#ifdef CONFIG_IWLWIFI_DEBUG
371static inline void __iwl_set_bits_prph(const char *f, u32 line,
372 struct iwl_priv *priv,
373 u32 reg, u32 mask)
374{
375 if (!atomic_read(&priv->restrict_refcnt))
376 IWL_ERROR("Nic access not held from %s line %d\n", f, line);
377
378 _iwl_set_bits_prph(priv, reg, mask);
379}
380#define iwl_set_bits_prph(priv, reg, mask) \
381 __iwl_set_bits_prph(__func__, __LINE__, priv, reg, mask)
382#else
383#define iwl_set_bits_prph _iwl_set_bits_prph
384#endif
385
386#define _iwl_set_bits_mask_prph(priv, reg, bits, mask) \
387 _iwl_write_prph(priv, reg, ((_iwl_read_prph(priv, reg) & mask) | bits))
388
389#ifdef CONFIG_IWLWIFI_DEBUG
390static inline void __iwl_set_bits_mask_prph(const char *f, u32 line,
391 struct iwl_priv *priv, u32 reg, u32 bits, u32 mask)
392{
393 if (!atomic_read(&priv->restrict_refcnt))
394 IWL_ERROR("Nic access not held from %s line %d\n", f, line);
395 _iwl_set_bits_mask_prph(priv, reg, bits, mask);
396}
397#define iwl_set_bits_mask_prph(priv, reg, bits, mask) \
398 __iwl_set_bits_mask_prph(__func__, __LINE__, priv, reg, bits, mask)
399#else
400#define iwl_set_bits_mask_prph _iwl_set_bits_mask_prph
401#endif
402
403static inline void iwl_clear_bits_prph(struct iwl_priv
404 *priv, u32 reg, u32 mask)
405{
406 u32 val = _iwl_read_prph(priv, reg);
407 _iwl_write_prph(priv, reg, (val & ~mask));
408}
409
410static inline u32 iwl_read_targ_mem(struct iwl_priv *priv, u32 addr)
411{
412 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
413 return iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
414}
415
416static inline void iwl_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
417{
418 iwl_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
419 iwl_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
420}
421
422static inline void iwl_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
423 u32 len, u32 *values)
424{
425 iwl_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
426 for (; 0 < len; len -= sizeof(u32), values++)
427 iwl_write_direct32(priv, HBUS_TARG_MEM_WDAT, *values);
428}
429#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
new file mode 100644
index 000000000000..03fdf5b434a1
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -0,0 +1,449 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/version.h>
31#include <linux/init.h>
32#include <linux/pci.h>
33#include <linux/dma-mapping.h>
34#include <linux/delay.h>
35#include <linux/skbuff.h>
36#include <linux/netdevice.h>
37#include <linux/wireless.h>
38#include <net/mac80211.h>
39#include <linux/etherdevice.h>
40#include <asm/unaligned.h>
41
42#include "iwl-4965.h"
43#include "iwl-core.h"
44#include "iwl-io.h"
45#include "iwl-helpers.h"
46
47#define IWL_1MB_RATE (128 * 1024)
48#define IWL_LED_THRESHOLD (16)
49#define IWL_MAX_BLINK_TBL (10)
50
51static const struct {
52 u16 tpt;
53 u8 on_time;
54 u8 of_time;
55} blink_tbl[] =
56{
57 {300, 25, 25},
58 {200, 40, 40},
59 {100, 55, 55},
60 {70, 65, 65},
61 {50, 75, 75},
62 {20, 85, 85},
63 {15, 95, 95 },
64 {10, 110, 110},
65 {5, 130, 130},
66 {0, 167, 167}
67};
68
69static int iwl_led_cmd_callback(struct iwl_priv *priv,
70 struct iwl_cmd *cmd, struct sk_buff *skb)
71{
72 return 1;
73}
74
75
76/* Send led command */
77static int iwl_send_led_cmd(struct iwl_priv *priv,
78 struct iwl4965_led_cmd *led_cmd)
79{
80 struct iwl_host_cmd cmd = {
81 .id = REPLY_LEDS_CMD,
82 .len = sizeof(struct iwl4965_led_cmd),
83 .data = led_cmd,
84 .meta.flags = CMD_ASYNC,
85 .meta.u.callback = iwl_led_cmd_callback
86 };
87 u32 reg;
88
89 reg = iwl_read32(priv, CSR_LED_REG);
90 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
91 iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
92
93 return iwl_send_cmd(priv, &cmd);
94}
95
96
97/* Set led on command */
98static int iwl4965_led_on(struct iwl_priv *priv, int led_id)
99{
100 struct iwl4965_led_cmd led_cmd = {
101 .id = led_id,
102 .on = IWL_LED_SOLID,
103 .off = 0,
104 .interval = IWL_DEF_LED_INTRVL
105 };
106 return iwl_send_led_cmd(priv, &led_cmd);
107}
108
109/* Set led on command */
110static int iwl4965_led_pattern(struct iwl_priv *priv, int led_id,
111 enum led_brightness brightness)
112{
113 struct iwl4965_led_cmd led_cmd = {
114 .id = led_id,
115 .on = brightness,
116 .off = brightness,
117 .interval = IWL_DEF_LED_INTRVL
118 };
119 if (brightness == LED_FULL) {
120 led_cmd.on = IWL_LED_SOLID;
121 led_cmd.off = 0;
122 }
123 return iwl_send_led_cmd(priv, &led_cmd);
124}
125
126/* Set led register off */
127static int iwl4965_led_on_reg(struct iwl_priv *priv, int led_id)
128{
129 IWL_DEBUG_LED("led on %d\n", led_id);
130 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
131 return 0;
132}
133
134#if 0
135/* Set led off command */
136int iwl4965_led_off(struct iwl_priv *priv, int led_id)
137{
138 struct iwl4965_led_cmd led_cmd = {
139 .id = led_id,
140 .on = 0,
141 .off = 0,
142 .interval = IWL_DEF_LED_INTRVL
143 };
144 IWL_DEBUG_LED("led off %d\n", led_id);
145 return iwl_send_led_cmd(priv, &led_cmd);
146}
147#endif
148
149
150/* Set led register off */
151static int iwl4965_led_off_reg(struct iwl_priv *priv, int led_id)
152{
153 IWL_DEBUG_LED("radio off\n");
154 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
155 return 0;
156}
157
158/* Set led blink command */
159static int iwl4965_led_not_solid(struct iwl_priv *priv, int led_id,
160 u8 brightness)
161{
162 struct iwl4965_led_cmd led_cmd = {
163 .id = led_id,
164 .on = brightness,
165 .off = brightness,
166 .interval = IWL_DEF_LED_INTRVL
167 };
168
169 return iwl_send_led_cmd(priv, &led_cmd);
170}
171
172
173/*
174 * brightness call back function for Tx/Rx LED
175 */
176static int iwl4965_led_associated(struct iwl_priv *priv, int led_id)
177{
178 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
179 !test_bit(STATUS_READY, &priv->status))
180 return 0;
181
182
183 /* start counting Tx/Rx bytes */
184 if (!priv->last_blink_time && priv->allow_blinking)
185 priv->last_blink_time = jiffies;
186 return 0;
187}
188
189/*
190 * brightness call back for association and radio
191 */
192static void iwl4965_led_brightness_set(struct led_classdev *led_cdev,
193 enum led_brightness brightness)
194{
195 struct iwl4965_led *led = container_of(led_cdev,
196 struct iwl4965_led, led_dev);
197 struct iwl_priv *priv = led->priv;
198
199 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
200 return;
201
202 switch (brightness) {
203 case LED_FULL:
204 if (led->type == IWL_LED_TRG_ASSOC)
205 priv->allow_blinking = 1;
206
207 if (led->led_on)
208 led->led_on(priv, IWL_LED_LINK);
209 break;
210 case LED_OFF:
211 if (led->type == IWL_LED_TRG_ASSOC)
212 priv->allow_blinking = 0;
213
214 if (led->led_off)
215 led->led_off(priv, IWL_LED_LINK);
216 break;
217 default:
218 if (led->led_pattern)
219 led->led_pattern(priv, IWL_LED_LINK, brightness);
220 break;
221 }
222}
223
224
225
226/*
227 * Register led class with the system
228 */
229static int iwl_leds_register_led(struct iwl_priv *priv,
230 struct iwl4965_led *led,
231 enum led_type type, u8 set_led,
232 const char *name, char *trigger)
233{
234 struct device *device = wiphy_dev(priv->hw->wiphy);
235 int ret;
236
237 led->led_dev.name = name;
238 led->led_dev.brightness_set = iwl4965_led_brightness_set;
239 led->led_dev.default_trigger = trigger;
240
241 led->priv = priv;
242 led->type = type;
243
244 ret = led_classdev_register(device, &led->led_dev);
245 if (ret) {
246 IWL_ERROR("Error: failed to register led handler.\n");
247 return ret;
248 }
249
250 led->registered = 1;
251
252 if (set_led && led->led_on)
253 led->led_on(priv, IWL_LED_LINK);
254
255 return 0;
256}
257
258
259/*
260 * calculate blink rate according to last 2 sec Tx/Rx activities
261 */
262static inline u8 get_blink_rate(struct iwl_priv *priv)
263{
264 int i;
265 u8 blink_rate;
266 u64 current_tpt = priv->tx_stats[2].bytes + priv->rx_stats[2].bytes;
267 s64 tpt = current_tpt - priv->led_tpt;
268
269 if (tpt < 0) /* wrapparound */
270 tpt = -tpt;
271
272 priv->led_tpt = current_tpt;
273
274 if (tpt < IWL_LED_THRESHOLD) {
275 i = IWL_MAX_BLINK_TBL;
276 } else {
277 for (i = 0; i < IWL_MAX_BLINK_TBL; i++)
278 if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE))
279 break;
280 }
281 /* if 0 frame is transfered */
282 if ((i == IWL_MAX_BLINK_TBL) || !priv->allow_blinking)
283 blink_rate = IWL_LED_SOLID;
284 else
285 blink_rate = blink_tbl[i].on_time;
286
287 return blink_rate;
288}
289
290static inline int is_rf_kill(struct iwl_priv *priv)
291{
292 return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
293 test_bit(STATUS_RF_KILL_SW, &priv->status);
294}
295
296/*
297 * this function called from handler. Since setting Led command can
298 * happen very frequent we postpone led command to be called from
299 * REPLY handler so we know ucode is up
300 */
301void iwl_leds_background(struct iwl_priv *priv)
302{
303 u8 blink_rate;
304
305 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
306 priv->last_blink_time = 0;
307 return;
308 }
309 if (is_rf_kill(priv)) {
310 priv->last_blink_time = 0;
311 return;
312 }
313
314 if (!priv->allow_blinking) {
315 priv->last_blink_time = 0;
316 if (priv->last_blink_rate != IWL_LED_SOLID) {
317 priv->last_blink_rate = IWL_LED_SOLID;
318 iwl4965_led_on(priv, IWL_LED_LINK);
319 }
320 return;
321 }
322 if (!priv->last_blink_time ||
323 !time_after(jiffies, priv->last_blink_time +
324 msecs_to_jiffies(1000)))
325 return;
326
327 blink_rate = get_blink_rate(priv);
328
329 /* call only if blink rate change */
330 if (blink_rate != priv->last_blink_rate) {
331 if (blink_rate != IWL_LED_SOLID) {
332 priv->last_blink_time = jiffies +
333 msecs_to_jiffies(1000);
334 iwl4965_led_not_solid(priv, IWL_LED_LINK, blink_rate);
335 } else {
336 priv->last_blink_time = 0;
337 iwl4965_led_on(priv, IWL_LED_LINK);
338 }
339 }
340
341 priv->last_blink_rate = blink_rate;
342}
343EXPORT_SYMBOL(iwl_leds_background);
344
345/* Register all led handler */
346int iwl_leds_register(struct iwl_priv *priv)
347{
348 char *trigger;
349 char name[32];
350 int ret;
351
352 priv->last_blink_rate = 0;
353 priv->led_tpt = 0;
354 priv->last_blink_time = 0;
355 priv->allow_blinking = 0;
356
357 trigger = ieee80211_get_radio_led_name(priv->hw);
358 snprintf(name, sizeof(name), "iwl-%s:radio",
359 wiphy_name(priv->hw->wiphy));
360
361 priv->led[IWL_LED_TRG_RADIO].led_on = iwl4965_led_on_reg;
362 priv->led[IWL_LED_TRG_RADIO].led_off = iwl4965_led_off_reg;
363 priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL;
364
365 ret = iwl_leds_register_led(priv,
366 &priv->led[IWL_LED_TRG_RADIO],
367 IWL_LED_TRG_RADIO, 1,
368 name, trigger);
369 if (ret)
370 goto exit_fail;
371
372 trigger = ieee80211_get_assoc_led_name(priv->hw);
373 snprintf(name, sizeof(name), "iwl-%s:assoc",
374 wiphy_name(priv->hw->wiphy));
375
376 ret = iwl_leds_register_led(priv,
377 &priv->led[IWL_LED_TRG_ASSOC],
378 IWL_LED_TRG_ASSOC, 0,
379 name, trigger);
380 /* for assoc always turn led on */
381 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl4965_led_on_reg;
382 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl4965_led_on_reg;
383 priv->led[IWL_LED_TRG_ASSOC].led_pattern = NULL;
384
385 if (ret)
386 goto exit_fail;
387
388 trigger = ieee80211_get_rx_led_name(priv->hw);
389 snprintf(name, sizeof(name), "iwl-%s:RX",
390 wiphy_name(priv->hw->wiphy));
391
392
393 ret = iwl_leds_register_led(priv,
394 &priv->led[IWL_LED_TRG_RX],
395 IWL_LED_TRG_RX, 0,
396 name, trigger);
397
398 priv->led[IWL_LED_TRG_RX].led_on = iwl4965_led_associated;
399 priv->led[IWL_LED_TRG_RX].led_off = iwl4965_led_associated;
400 priv->led[IWL_LED_TRG_RX].led_pattern = iwl4965_led_pattern;
401
402 if (ret)
403 goto exit_fail;
404
405 trigger = ieee80211_get_tx_led_name(priv->hw);
406 snprintf(name, sizeof(name), "iwl-%s:TX",
407 wiphy_name(priv->hw->wiphy));
408 ret = iwl_leds_register_led(priv,
409 &priv->led[IWL_LED_TRG_TX],
410 IWL_LED_TRG_TX, 0,
411 name, trigger);
412 priv->led[IWL_LED_TRG_TX].led_on = iwl4965_led_associated;
413 priv->led[IWL_LED_TRG_TX].led_off = iwl4965_led_associated;
414 priv->led[IWL_LED_TRG_TX].led_pattern = iwl4965_led_pattern;
415
416 if (ret)
417 goto exit_fail;
418
419 return 0;
420
421exit_fail:
422 iwl_leds_unregister(priv);
423 return ret;
424}
425EXPORT_SYMBOL(iwl_leds_register);
426
427/* unregister led class */
428static void iwl_leds_unregister_led(struct iwl4965_led *led, u8 set_led)
429{
430 if (!led->registered)
431 return;
432
433 led_classdev_unregister(&led->led_dev);
434
435 if (set_led)
436 led->led_dev.brightness_set(&led->led_dev, LED_OFF);
437 led->registered = 0;
438}
439
440/* Unregister all led handlers */
441void iwl_leds_unregister(struct iwl_priv *priv)
442{
443 iwl_leds_unregister_led(&priv->led[IWL_LED_TRG_ASSOC], 0);
444 iwl_leds_unregister_led(&priv->led[IWL_LED_TRG_RX], 0);
445 iwl_leds_unregister_led(&priv->led[IWL_LED_TRG_TX], 0);
446 iwl_leds_unregister_led(&priv->led[IWL_LED_TRG_RADIO], 1);
447}
448EXPORT_SYMBOL(iwl_leds_unregister);
449
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
new file mode 100644
index 000000000000..5bb04128cd65
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -0,0 +1,82 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_leds_h__
28#define __iwl_leds_h__
29
30
31struct iwl_priv;
32
33#ifdef CONFIG_IWLWIFI_LEDS
34#include <linux/leds.h>
35
36#define IWL_LED_SOLID 11
37#define IWL_LED_NAME_LEN 31
38#define IWL_DEF_LED_INTRVL __constant_cpu_to_le32(1000)
39
40#define IWL_LED_ACTIVITY (0<<1)
41#define IWL_LED_LINK (1<<1)
42
43enum led_type {
44 IWL_LED_TRG_TX,
45 IWL_LED_TRG_RX,
46 IWL_LED_TRG_ASSOC,
47 IWL_LED_TRG_RADIO,
48 IWL_LED_TRG_MAX,
49};
50
51
52struct iwl4965_led {
53 struct iwl_priv *priv;
54 struct led_classdev led_dev;
55
56 int (*led_on) (struct iwl_priv *priv, int led_id);
57 int (*led_off) (struct iwl_priv *priv, int led_id);
58 int (*led_pattern) (struct iwl_priv *priv, int led_id,
59 enum led_brightness brightness);
60
61 enum led_type type;
62 unsigned int registered;
63};
64
65int iwl_leds_register(struct iwl_priv *priv);
66void iwl_leds_unregister(struct iwl_priv *priv);
67void iwl_leds_background(struct iwl_priv *priv);
68
69#else
70static inline int iwl_leds_register(struct iwl_priv *priv)
71{
72 return 0;
73}
74static inline void iwl_leds_unregister(struct iwl_priv *priv)
75{
76}
77static inline void iwl_leds_background(struct iwl_priv *priv)
78{
79}
80
81#endif /* CONFIG_IWLWIFI_LEDS */
82#endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 4ba121634877..c9cf8eef1a90 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. 8 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 11 * it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
30 * 30 *
31 * BSD LICENSE 31 * BSD LICENSE
32 * 32 *
33 * Copyright(c) 2005 - 2007 Intel Corporation. All rights reserved. 33 * Copyright(c) 2005 - 2008 Intel Corporation. All rights reserved.
34 * All rights reserved. 34 * All rights reserved.
35 * 35 *
36 * Redistribution and use in source and binary forms, with or without 36 * Redistribution and use in source and binary forms, with or without
@@ -243,44 +243,48 @@
243 * 4965 Tx Scheduler registers. 243 * 4965 Tx Scheduler registers.
244 * Details are documented in iwl-4965-hw.h 244 * Details are documented in iwl-4965-hw.h
245 */ 245 */
246#define KDR_SCD_BASE (PRPH_BASE + 0xa02c00) 246#define IWL49_SCD_BASE (PRPH_BASE + 0xa02c00)
247 247
248#define KDR_SCD_SRAM_BASE_ADDR (KDR_SCD_BASE + 0x0) 248#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_BASE + 0x0)
249#define KDR_SCD_EMPTY_BITS (KDR_SCD_BASE + 0x4) 249#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_BASE + 0x4)
250#define KDR_SCD_DRAM_BASE_ADDR (KDR_SCD_BASE + 0x10) 250#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_BASE + 0x10)
251#define KDR_SCD_AIT (KDR_SCD_BASE + 0x18) 251#define IWL49_SCD_AIT (IWL49_SCD_BASE + 0x18)
252#define KDR_SCD_TXFACT (KDR_SCD_BASE + 0x1c) 252#define IWL49_SCD_TXFACT (IWL49_SCD_BASE + 0x1c)
253#define KDR_SCD_QUEUE_WRPTR(x) (KDR_SCD_BASE + 0x24 + (x) * 4) 253#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_BASE + 0x24 + (x) * 4)
254#define KDR_SCD_QUEUE_RDPTR(x) (KDR_SCD_BASE + 0x64 + (x) * 4) 254#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_BASE + 0x64 + (x) * 4)
255#define KDR_SCD_SETQUEUENUM (KDR_SCD_BASE + 0xa4) 255#define IWL49_SCD_SETQUEUENUM (IWL49_SCD_BASE + 0xa4)
256#define KDR_SCD_SET_TXSTAT_TXED (KDR_SCD_BASE + 0xa8) 256#define IWL49_SCD_SET_TXSTAT_TXED (IWL49_SCD_BASE + 0xa8)
257#define KDR_SCD_SET_TXSTAT_DONE (KDR_SCD_BASE + 0xac) 257#define IWL49_SCD_SET_TXSTAT_DONE (IWL49_SCD_BASE + 0xac)
258#define KDR_SCD_SET_TXSTAT_NOT_SCHD (KDR_SCD_BASE + 0xb0) 258#define IWL49_SCD_SET_TXSTAT_NOT_SCHD (IWL49_SCD_BASE + 0xb0)
259#define KDR_SCD_DECREASE_CREDIT (KDR_SCD_BASE + 0xb4) 259#define IWL49_SCD_DECREASE_CREDIT (IWL49_SCD_BASE + 0xb4)
260#define KDR_SCD_DECREASE_SCREDIT (KDR_SCD_BASE + 0xb8) 260#define IWL49_SCD_DECREASE_SCREDIT (IWL49_SCD_BASE + 0xb8)
261#define KDR_SCD_LOAD_CREDIT (KDR_SCD_BASE + 0xbc) 261#define IWL49_SCD_LOAD_CREDIT (IWL49_SCD_BASE + 0xbc)
262#define KDR_SCD_LOAD_SCREDIT (KDR_SCD_BASE + 0xc0) 262#define IWL49_SCD_LOAD_SCREDIT (IWL49_SCD_BASE + 0xc0)
263#define KDR_SCD_BAR (KDR_SCD_BASE + 0xc4) 263#define IWL49_SCD_BAR (IWL49_SCD_BASE + 0xc4)
264#define KDR_SCD_BAR_DW0 (KDR_SCD_BASE + 0xc8) 264#define IWL49_SCD_BAR_DW0 (IWL49_SCD_BASE + 0xc8)
265#define KDR_SCD_BAR_DW1 (KDR_SCD_BASE + 0xcc) 265#define IWL49_SCD_BAR_DW1 (IWL49_SCD_BASE + 0xcc)
266#define KDR_SCD_QUEUECHAIN_SEL (KDR_SCD_BASE + 0xd0) 266#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_BASE + 0xd0)
267#define KDR_SCD_QUERY_REQ (KDR_SCD_BASE + 0xd8) 267#define IWL49_SCD_QUERY_REQ (IWL49_SCD_BASE + 0xd8)
268#define KDR_SCD_QUERY_RES (KDR_SCD_BASE + 0xdc) 268#define IWL49_SCD_QUERY_RES (IWL49_SCD_BASE + 0xdc)
269#define KDR_SCD_PENDING_FRAMES (KDR_SCD_BASE + 0xe0) 269#define IWL49_SCD_PENDING_FRAMES (IWL49_SCD_BASE + 0xe0)
270#define KDR_SCD_INTERRUPT_MASK (KDR_SCD_BASE + 0xe4) 270#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_BASE + 0xe4)
271#define KDR_SCD_INTERRUPT_THRESHOLD (KDR_SCD_BASE + 0xe8) 271#define IWL49_SCD_INTERRUPT_THRESHOLD (IWL49_SCD_BASE + 0xe8)
272#define KDR_SCD_QUERY_MIN_FRAME_SIZE (KDR_SCD_BASE + 0x100) 272#define IWL49_SCD_QUERY_MIN_FRAME_SIZE (IWL49_SCD_BASE + 0x100)
273#define KDR_SCD_QUEUE_STATUS_BITS(x) (KDR_SCD_BASE + 0x104 + (x) * 4) 273#define IWL49_SCD_QUEUE_STATUS_BITS(x) (IWL49_SCD_BASE + 0x104 + (x) * 4)
274 274
275/* SP SCD */ 275/* SP SCD */
276#define SHL_SCD_BASE (PRPH_BASE + 0xa02c00) 276#define IWL50_SCD_BASE (PRPH_BASE + 0xa02c00)
277 277
278#define SHL_SCD_AIT (SHL_SCD_BASE + 0x0c) 278#define IWL50_SCD_SRAM_BASE_ADDR (IWL50_SCD_BASE + 0x0)
279#define SHL_SCD_TXFACT (SHL_SCD_BASE + 0x10) 279#define IWL50_SCD_DRAM_BASE_ADDR (IWL50_SCD_BASE + 0x8)
280#define SHL_SCD_QUEUE_WRPTR(x) (SHL_SCD_BASE + 0x18 + (x) * 4) 280#define IWL50_SCD_AIT (IWL50_SCD_BASE + 0x0c)
281#define SHL_SCD_QUEUE_RDPTR(x) (SHL_SCD_BASE + 0x68 + (x) * 4) 281#define IWL50_SCD_TXFACT (IWL50_SCD_BASE + 0x10)
282#define SHL_SCD_QUEUECHAIN_SEL (SHL_SCD_BASE + 0xe8) 282#define IWL50_SCD_ACTIVE (IWL50_SCD_BASE + 0x14)
283#define SHL_SCD_AGGR_SEL (SHL_SCD_BASE + 0x248) 283#define IWL50_SCD_QUEUE_WRPTR(x) (IWL50_SCD_BASE + 0x18 + (x) * 4)
284#define SHL_SCD_INTERRUPT_MASK (SHL_SCD_BASE + 0x108) 284#define IWL50_SCD_QUEUE_RDPTR(x) (IWL50_SCD_BASE + 0x68 + (x) * 4)
285#define IWL50_SCD_QUEUECHAIN_SEL (IWL50_SCD_BASE + 0xe8)
286#define IWL50_SCD_AGGR_SEL (IWL50_SCD_BASE + 0x248)
287#define IWL50_SCD_INTERRUPT_MASK (IWL50_SCD_BASE + 0x108)
288#define IWL50_SCD_QUEUE_STATUS_BITS(x) (IWL50_SCD_BASE + 0x10c + (x) * 4)
285 289
286#endif /* __iwl_prph_h__ */ 290#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.c b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
new file mode 100644
index 000000000000..5980a5621cb8
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-rfkill.c
@@ -0,0 +1,173 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/version.h>
31#include <linux/init.h>
32
33#include <net/mac80211.h>
34
35#include "iwl-eeprom.h"
36#include "iwl-4965.h"
37#include "iwl-core.h"
38#include "iwl-helpers.h"
39
40
41/* software rf-kill from user */
42static int iwl_rfkill_soft_rf_kill(void *data, enum rfkill_state state)
43{
44 struct iwl_priv *priv = data;
45 int err = 0;
46
47 if (!priv->rfkill_mngr.rfkill)
48 return 0;
49
50 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
51 return 0;
52
53 IWL_DEBUG_RF_KILL("we recieved soft RFKILL set to state %d\n", state);
54 mutex_lock(&priv->mutex);
55
56 switch (state) {
57 case RFKILL_STATE_ON:
58 priv->cfg->ops->lib->radio_kill_sw(priv, 0);
59 /* if HW rf-kill is set dont allow ON state */
60 if (iwl_is_rfkill(priv))
61 err = -EBUSY;
62 break;
63 case RFKILL_STATE_OFF:
64 priv->cfg->ops->lib->radio_kill_sw(priv, 1);
65 if (!iwl_is_rfkill(priv))
66 err = -EBUSY;
67 break;
68 }
69 mutex_unlock(&priv->mutex);
70
71 return err;
72}
73
74int iwl_rfkill_init(struct iwl_priv *priv)
75{
76 struct device *device = wiphy_dev(priv->hw->wiphy);
77 int ret = 0;
78
79 BUG_ON(device == NULL);
80
81 IWL_DEBUG_RF_KILL("Initializing RFKILL.\n");
82 priv->rfkill_mngr.rfkill = rfkill_allocate(device, RFKILL_TYPE_WLAN);
83 if (!priv->rfkill_mngr.rfkill) {
84 IWL_ERROR("Unable to allocate rfkill device.\n");
85 ret = -ENOMEM;
86 goto error;
87 }
88
89 priv->rfkill_mngr.rfkill->name = priv->cfg->name;
90 priv->rfkill_mngr.rfkill->data = priv;
91 priv->rfkill_mngr.rfkill->state = RFKILL_STATE_ON;
92 priv->rfkill_mngr.rfkill->toggle_radio = iwl_rfkill_soft_rf_kill;
93 priv->rfkill_mngr.rfkill->user_claim_unsupported = 1;
94
95 priv->rfkill_mngr.rfkill->dev.class->suspend = NULL;
96 priv->rfkill_mngr.rfkill->dev.class->resume = NULL;
97
98 priv->rfkill_mngr.input_dev = input_allocate_device();
99 if (!priv->rfkill_mngr.input_dev) {
100 IWL_ERROR("Unable to allocate rfkill input device.\n");
101 ret = -ENOMEM;
102 goto freed_rfkill;
103 }
104
105 priv->rfkill_mngr.input_dev->name = priv->cfg->name;
106 priv->rfkill_mngr.input_dev->phys = wiphy_name(priv->hw->wiphy);
107 priv->rfkill_mngr.input_dev->id.bustype = BUS_HOST;
108 priv->rfkill_mngr.input_dev->id.vendor = priv->pci_dev->vendor;
109 priv->rfkill_mngr.input_dev->dev.parent = device;
110 priv->rfkill_mngr.input_dev->evbit[0] = BIT(EV_KEY);
111 set_bit(KEY_WLAN, priv->rfkill_mngr.input_dev->keybit);
112
113 ret = rfkill_register(priv->rfkill_mngr.rfkill);
114 if (ret) {
115 IWL_ERROR("Unable to register rfkill: %d\n", ret);
116 goto free_input_dev;
117 }
118
119 ret = input_register_device(priv->rfkill_mngr.input_dev);
120 if (ret) {
121 IWL_ERROR("Unable to register rfkill input device: %d\n", ret);
122 goto unregister_rfkill;
123 }
124
125 IWL_DEBUG_RF_KILL("RFKILL initialization complete.\n");
126 return ret;
127
128unregister_rfkill:
129 rfkill_unregister(priv->rfkill_mngr.rfkill);
130 priv->rfkill_mngr.rfkill = NULL;
131
132free_input_dev:
133 input_free_device(priv->rfkill_mngr.input_dev);
134 priv->rfkill_mngr.input_dev = NULL;
135
136freed_rfkill:
137 if (priv->rfkill_mngr.rfkill != NULL)
138 rfkill_free(priv->rfkill_mngr.rfkill);
139 priv->rfkill_mngr.rfkill = NULL;
140
141error:
142 IWL_DEBUG_RF_KILL("RFKILL initialization complete.\n");
143 return ret;
144}
145EXPORT_SYMBOL(iwl_rfkill_init);
146
147void iwl_rfkill_unregister(struct iwl_priv *priv)
148{
149
150 if (priv->rfkill_mngr.input_dev)
151 input_unregister_device(priv->rfkill_mngr.input_dev);
152
153 if (priv->rfkill_mngr.rfkill)
154 rfkill_unregister(priv->rfkill_mngr.rfkill);
155
156 priv->rfkill_mngr.input_dev = NULL;
157 priv->rfkill_mngr.rfkill = NULL;
158}
159EXPORT_SYMBOL(iwl_rfkill_unregister);
160
161/* set rf-kill to the right state. */
162void iwl_rfkill_set_hw_state(struct iwl_priv *priv)
163{
164
165 if (!priv->rfkill_mngr.rfkill)
166 return;
167
168 if (!iwl_is_rfkill(priv))
169 priv->rfkill_mngr.rfkill->state = RFKILL_STATE_ON;
170 else
171 priv->rfkill_mngr.rfkill->state = RFKILL_STATE_OFF;
172}
173EXPORT_SYMBOL(iwl_rfkill_set_hw_state);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rfkill.h b/drivers/net/wireless/iwlwifi/iwl-rfkill.h
new file mode 100644
index 000000000000..a7f04b855403
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-rfkill.h
@@ -0,0 +1,54 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28#ifndef __iwl_rf_kill_h__
29#define __iwl_rf_kill_h__
30
31struct iwl_priv;
32
33#include <linux/rfkill.h>
34#include <linux/input.h>
35
36
37#ifdef CONFIG_IWLWIFI_RFKILL
38struct iwl_rfkill_mngr {
39 struct rfkill *rfkill;
40 struct input_dev *input_dev;
41};
42
43void iwl_rfkill_set_hw_state(struct iwl_priv *priv);
44void iwl_rfkill_unregister(struct iwl_priv *priv);
45int iwl_rfkill_init(struct iwl_priv *priv);
46#else
47static inline void iwl_rfkill_set_hw_state(struct iwl_priv *priv) {}
48static inline void iwl_rfkill_unregister(struct iwl_priv *priv) {}
49static inline int iwl_rfkill_init(struct iwl_priv *priv) { return 0; }
50#endif
51
52
53
54#endif /* __iwl_rf_kill_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
index b576ff24eb4f..a40a2174df98 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ieee80211 subsystem header files. 5 * Portions of this file are derived from the ieee80211 subsystem header files.
6 * 6 *
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
new file mode 100644
index 000000000000..e4fdfaa2b9b2
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -0,0 +1,355 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <net/mac80211.h>
31
32#include "iwl-eeprom.h"
33#include "iwl-4965.h"
34#include "iwl-core.h"
35#include "iwl-sta.h"
36#include "iwl-io.h"
37#include "iwl-helpers.h"
38#include "iwl-4965.h"
39#include "iwl-sta.h"
40
41int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
42{
43 int i;
44
45 for (i = 0; i < STA_KEY_MAX_NUM; i++)
46 if (!test_and_set_bit(i, &priv->ucode_key_table))
47 return i;
48
49 return -1;
50}
51
52int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
53{
54 int i, not_empty = 0;
55 u8 buff[sizeof(struct iwl_wep_cmd) +
56 sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
57 struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
58 size_t cmd_size = sizeof(struct iwl_wep_cmd);
59 struct iwl_host_cmd cmd = {
60 .id = REPLY_WEPKEY,
61 .data = wep_cmd,
62 .meta.flags = CMD_ASYNC,
63 };
64
65 memset(wep_cmd, 0, cmd_size +
66 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
67
68 for (i = 0; i < WEP_KEYS_MAX ; i++) {
69 wep_cmd->key[i].key_index = i;
70 if (priv->wep_keys[i].key_size) {
71 wep_cmd->key[i].key_offset = i;
72 not_empty = 1;
73 } else {
74 wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
75 }
76
77 wep_cmd->key[i].key_size = priv->wep_keys[i].key_size;
78 memcpy(&wep_cmd->key[i].key[3], priv->wep_keys[i].key,
79 priv->wep_keys[i].key_size);
80 }
81
82 wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
83 wep_cmd->num_keys = WEP_KEYS_MAX;
84
85 cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
86
87 cmd.len = cmd_size;
88
89 if (not_empty || send_if_empty)
90 return iwl_send_cmd(priv, &cmd);
91 else
92 return 0;
93}
94
95int iwl_remove_default_wep_key(struct iwl_priv *priv,
96 struct ieee80211_key_conf *keyconf)
97{
98 int ret;
99 unsigned long flags;
100
101 spin_lock_irqsave(&priv->sta_lock, flags);
102
103 if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table))
104 IWL_ERROR("index %d not used in uCode key table.\n",
105 keyconf->keyidx);
106
107 priv->default_wep_key--;
108 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0]));
109 ret = iwl_send_static_wepkey_cmd(priv, 1);
110 spin_unlock_irqrestore(&priv->sta_lock, flags);
111
112 return ret;
113}
114
115int iwl_set_default_wep_key(struct iwl_priv *priv,
116 struct ieee80211_key_conf *keyconf)
117{
118 int ret;
119 unsigned long flags;
120
121 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
122 keyconf->hw_key_idx = keyconf->keyidx;
123 priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP;
124
125 spin_lock_irqsave(&priv->sta_lock, flags);
126 priv->default_wep_key++;
127
128 if (test_and_set_bit(keyconf->keyidx, &priv->ucode_key_table))
129 IWL_ERROR("index %d already used in uCode key table.\n",
130 keyconf->keyidx);
131
132 priv->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
133 memcpy(&priv->wep_keys[keyconf->keyidx].key, &keyconf->key,
134 keyconf->keylen);
135
136 ret = iwl_send_static_wepkey_cmd(priv, 0);
137 spin_unlock_irqrestore(&priv->sta_lock, flags);
138
139 return ret;
140}
141
142static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
143 struct ieee80211_key_conf *keyconf,
144 u8 sta_id)
145{
146 unsigned long flags;
147 __le16 key_flags = 0;
148 int ret;
149
150 keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
151 keyconf->hw_key_idx = keyconf->keyidx;
152
153 key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
154 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
155 key_flags &= ~STA_KEY_FLG_INVALID;
156
157 if (keyconf->keylen == WEP_KEY_LEN_128)
158 key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
159
160 if (sta_id == priv->hw_params.bcast_sta_id)
161 key_flags |= STA_KEY_MULTICAST_MSK;
162
163 spin_lock_irqsave(&priv->sta_lock, flags);
164
165 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
166 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
167 priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
168
169 memcpy(priv->stations[sta_id].keyinfo.key,
170 keyconf->key, keyconf->keylen);
171
172 memcpy(&priv->stations[sta_id].sta.key.key[3],
173 keyconf->key, keyconf->keylen);
174
175 priv->stations[sta_id].sta.key.key_offset =
176 iwl_get_free_ucode_key_index(priv);
177 priv->stations[sta_id].sta.key.key_flags = key_flags;
178
179 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
180 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
181
182 ret = iwl4965_send_add_station(priv,
183 &priv->stations[sta_id].sta, CMD_ASYNC);
184
185 spin_unlock_irqrestore(&priv->sta_lock, flags);
186
187 return ret;
188}
189
190static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
191 struct ieee80211_key_conf *keyconf,
192 u8 sta_id)
193{
194 unsigned long flags;
195 __le16 key_flags = 0;
196
197 key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
198 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
199 key_flags &= ~STA_KEY_FLG_INVALID;
200
201 if (sta_id == priv->hw_params.bcast_sta_id)
202 key_flags |= STA_KEY_MULTICAST_MSK;
203
204 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
205 keyconf->hw_key_idx = keyconf->keyidx;
206
207 spin_lock_irqsave(&priv->sta_lock, flags);
208 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
209 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
210
211 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
212 keyconf->keylen);
213
214 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
215 keyconf->keylen);
216
217 priv->stations[sta_id].sta.key.key_offset =
218 iwl_get_free_ucode_key_index(priv);
219 priv->stations[sta_id].sta.key.key_flags = key_flags;
220 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
221 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
222
223 spin_unlock_irqrestore(&priv->sta_lock, flags);
224
225 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
226 return iwl4965_send_add_station(priv,
227 &priv->stations[sta_id].sta, CMD_ASYNC);
228}
229
230static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
231 struct ieee80211_key_conf *keyconf,
232 u8 sta_id)
233{
234 unsigned long flags;
235 int ret = 0;
236
237 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
238 keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
239 keyconf->hw_key_idx = keyconf->keyidx;
240
241 spin_lock_irqsave(&priv->sta_lock, flags);
242
243 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
244 priv->stations[sta_id].keyinfo.conf = keyconf;
245 priv->stations[sta_id].keyinfo.keylen = 16;
246 priv->stations[sta_id].sta.key.key_offset =
247 iwl_get_free_ucode_key_index(priv);
248
249 /* This copy is acutally not needed: we get the key with each TX */
250 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
251
252 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
253
254 spin_unlock_irqrestore(&priv->sta_lock, flags);
255
256 return ret;
257}
258
259int iwl_remove_dynamic_key(struct iwl_priv *priv, u8 sta_id)
260{
261 unsigned long flags;
262
263 priv->key_mapping_key = 0;
264
265 spin_lock_irqsave(&priv->sta_lock, flags);
266 if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
267 &priv->ucode_key_table))
268 IWL_ERROR("index %d not used in uCode key table.\n",
269 priv->stations[sta_id].sta.key.key_offset);
270 memset(&priv->stations[sta_id].keyinfo, 0,
271 sizeof(struct iwl4965_hw_key));
272 memset(&priv->stations[sta_id].sta.key, 0,
273 sizeof(struct iwl4965_keyinfo));
274 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
275 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
276 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
277 spin_unlock_irqrestore(&priv->sta_lock, flags);
278
279 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
280 return iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0);
281}
282
283int iwl_set_dynamic_key(struct iwl_priv *priv,
284 struct ieee80211_key_conf *key, u8 sta_id)
285{
286 int ret;
287
288 priv->key_mapping_key = 1;
289
290 switch (key->alg) {
291 case ALG_CCMP:
292 ret = iwl_set_ccmp_dynamic_key_info(priv, key, sta_id);
293 break;
294 case ALG_TKIP:
295 ret = iwl_set_tkip_dynamic_key_info(priv, key, sta_id);
296 break;
297 case ALG_WEP:
298 ret = iwl_set_wep_dynamic_key_info(priv, key, sta_id);
299 break;
300 default:
301 IWL_ERROR("Unknown alg: %s alg = %d\n", __func__, key->alg);
302 ret = -EINVAL;
303 }
304
305 return ret;
306}
307
308#ifdef CONFIG_IWLWIFI_DEBUG
309static void iwl_dump_lq_cmd(struct iwl_priv *priv,
310 struct iwl_link_quality_cmd *lq)
311{
312 int i;
313 IWL_DEBUG_RATE("lq station id 0x%x\n", lq->sta_id);
314 IWL_DEBUG_RATE("lq dta 0x%X 0x%X\n",
315 lq->general_params.single_stream_ant_msk,
316 lq->general_params.dual_stream_ant_msk);
317
318 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
319 IWL_DEBUG_RATE("lq index %d 0x%X\n",
320 i, lq->rs_table[i].rate_n_flags);
321}
322#else
323static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
324 struct iwl_link_quality_cmd *lq)
325{
326}
327#endif
328
329int iwl_send_lq_cmd(struct iwl_priv *priv,
330 struct iwl_link_quality_cmd *lq, u8 flags)
331{
332 struct iwl_host_cmd cmd = {
333 .id = REPLY_TX_LINK_QUALITY_CMD,
334 .len = sizeof(struct iwl_link_quality_cmd),
335 .meta.flags = flags,
336 .data = lq,
337 };
338
339 if ((lq->sta_id == 0xFF) &&
340 (priv->iw_mode == IEEE80211_IF_TYPE_IBSS))
341 return -EINVAL;
342
343 if (lq->sta_id == 0xFF)
344 lq->sta_id = IWL_AP_ID;
345
346 iwl_dump_lq_cmd(priv,lq);
347
348 if (iwl_is_associated(priv) && priv->assoc_station_added &&
349 priv->lq_mngr.lq_ready)
350 return iwl_send_cmd(priv, &cmd);
351
352 return 0;
353}
354EXPORT_SYMBOL(iwl_send_lq_cmd);
355
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
new file mode 100644
index 000000000000..44f272ecc827
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -0,0 +1,49 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 *
5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
20 *
21 * The full GNU General Public License is included in this distribution in the
22 * file called LICENSE.
23 *
24 * Contact Information:
25 * James P. Ketrenos <ipw2100-admin@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#ifndef __iwl_sta_h__
30#define __iwl_sta_h__
31
32#include <net/mac80211.h>
33
34#include "iwl-eeprom.h"
35#include "iwl-core.h"
36#include "iwl-4965.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39
40int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
41int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty);
42int iwl_remove_default_wep_key(struct iwl_priv *priv,
43 struct ieee80211_key_conf *key);
44int iwl_set_default_wep_key(struct iwl_priv *priv,
45 struct ieee80211_key_conf *key);
46int iwl_remove_dynamic_key(struct iwl_priv *priv, u8 sta_id);
47int iwl_set_dynamic_key(struct iwl_priv *priv,
48 struct ieee80211_key_conf *key, u8 sta_id);
49#endif /* __iwl_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index cbaeaf186494..1a5678fe4224 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -46,6 +46,7 @@
46 46
47#include <asm/div64.h> 47#include <asm/div64.h>
48 48
49#include "iwl-3945-core.h"
49#include "iwl-3945.h" 50#include "iwl-3945.h"
50#include "iwl-helpers.h" 51#include "iwl-helpers.h"
51 52
@@ -69,7 +70,7 @@ static int iwl3945_param_disable; /* def: 0 = enable radio */
69static int iwl3945_param_antenna; /* def: 0 = both antennas (use diversity) */ 70static int iwl3945_param_antenna; /* def: 0 = both antennas (use diversity) */
70int iwl3945_param_hwcrypto; /* def: 0 = use software encryption */ 71int iwl3945_param_hwcrypto; /* def: 0 = use software encryption */
71static int iwl3945_param_qos_enable = 1; /* def: 1 = use quality of service */ 72static int iwl3945_param_qos_enable = 1; /* def: 1 = use quality of service */
72int iwl3945_param_queues_num = IWL_MAX_NUM_QUEUES; /* def: 8 Tx queues */ 73int iwl3945_param_queues_num = IWL39_MAX_NUM_QUEUES; /* def: 8 Tx queues */
73 74
74/* 75/*
75 * module name, copyright, version, etc. 76 * module name, copyright, version, etc.
@@ -91,15 +92,10 @@ int iwl3945_param_queues_num = IWL_MAX_NUM_QUEUES; /* def: 8 Tx queues */
91#define VS 92#define VS
92#endif 93#endif
93 94
94#define IWLWIFI_VERSION "1.2.23k" VD VS 95#define IWLWIFI_VERSION "1.2.26k" VD VS
95#define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation" 96#define DRV_COPYRIGHT "Copyright(c) 2003-2008 Intel Corporation"
96#define DRV_VERSION IWLWIFI_VERSION 97#define DRV_VERSION IWLWIFI_VERSION
97 98
98/* Change firmware file name, using "-" and incrementing number,
99 * *only* when uCode interface or architecture changes so that it
100 * is not compatible with earlier drivers.
101 * This number will also appear in << 8 position of 1st dword of uCode file */
102#define IWL3945_UCODE_API "-1"
103 99
104MODULE_DESCRIPTION(DRV_DESCRIPTION); 100MODULE_DESCRIPTION(DRV_DESCRIPTION);
105MODULE_VERSION(DRV_VERSION); 101MODULE_VERSION(DRV_VERSION);
@@ -116,16 +112,10 @@ static __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
116 return NULL; 112 return NULL;
117} 113}
118 114
119static const struct ieee80211_hw_mode *iwl3945_get_hw_mode( 115static const struct ieee80211_supported_band *iwl3945_get_band(
120 struct iwl3945_priv *priv, int mode) 116 struct iwl3945_priv *priv, enum ieee80211_band band)
121{ 117{
122 int i; 118 return priv->hw->wiphy->bands[band];
123
124 for (i = 0; i < 3; i++)
125 if (priv->modes[i].mode == mode)
126 return &priv->modes[i];
127
128 return NULL;
129} 119}
130 120
131static int iwl3945_is_empty_essid(const char *essid, int essid_len) 121static int iwl3945_is_empty_essid(const char *essid, int essid_len)
@@ -168,17 +158,6 @@ static const char *iwl3945_escape_essid(const char *essid, u8 essid_len)
168 return escaped; 158 return escaped;
169} 159}
170 160
171static void iwl3945_print_hex_dump(int level, void *p, u32 len)
172{
173#ifdef CONFIG_IWL3945_DEBUG
174 if (!(iwl3945_debug_level & level))
175 return;
176
177 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
178 p, len, 1);
179#endif
180}
181
182/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 161/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
183 * DMA services 162 * DMA services
184 * 163 *
@@ -204,7 +183,7 @@ static void iwl3945_print_hex_dump(int level, void *p, u32 len)
204 * (#0-3) for data tx via EDCA. An additional 2 HCCA queues are unused. 183 * (#0-3) for data tx via EDCA. An additional 2 HCCA queues are unused.
205 ***************************************************/ 184 ***************************************************/
206 185
207static int iwl3945_queue_space(const struct iwl3945_queue *q) 186int iwl3945_queue_space(const struct iwl3945_queue *q)
208{ 187{
209 int s = q->read_ptr - q->write_ptr; 188 int s = q->read_ptr - q->write_ptr;
210 189
@@ -220,33 +199,14 @@ static int iwl3945_queue_space(const struct iwl3945_queue *q)
220 return s; 199 return s;
221} 200}
222 201
223/** 202int iwl3945_x2_queue_used(const struct iwl3945_queue *q, int i)
224 * iwl3945_queue_inc_wrap - increment queue index, wrap back to beginning
225 * @index -- current index
226 * @n_bd -- total number of entries in queue (must be power of 2)
227 */
228static inline int iwl3945_queue_inc_wrap(int index, int n_bd)
229{
230 return ++index & (n_bd - 1);
231}
232
233/**
234 * iwl3945_queue_dec_wrap - increment queue index, wrap back to end
235 * @index -- current index
236 * @n_bd -- total number of entries in queue (must be power of 2)
237 */
238static inline int iwl3945_queue_dec_wrap(int index, int n_bd)
239{
240 return --index & (n_bd - 1);
241}
242
243static inline int x2_queue_used(const struct iwl3945_queue *q, int i)
244{ 203{
245 return q->write_ptr > q->read_ptr ? 204 return q->write_ptr > q->read_ptr ?
246 (i >= q->read_ptr && i < q->write_ptr) : 205 (i >= q->read_ptr && i < q->write_ptr) :
247 !(i < q->read_ptr && i >= q->write_ptr); 206 !(i < q->read_ptr && i >= q->write_ptr);
248} 207}
249 208
209
250static inline u8 get_cmd_index(struct iwl3945_queue *q, u32 index, int is_huge) 210static inline u8 get_cmd_index(struct iwl3945_queue *q, u32 index, int is_huge)
251{ 211{
252 /* This is for scan command, the big buffer at end of command array */ 212 /* This is for scan command, the big buffer at end of command array */
@@ -267,8 +227,8 @@ static int iwl3945_queue_init(struct iwl3945_priv *priv, struct iwl3945_queue *q
267 q->n_window = slots_num; 227 q->n_window = slots_num;
268 q->id = id; 228 q->id = id;
269 229
270 /* count must be power-of-two size, otherwise iwl3945_queue_inc_wrap 230 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
271 * and iwl3945_queue_dec_wrap are broken. */ 231 * and iwl_queue_dec_wrap are broken. */
272 BUG_ON(!is_power_of_2(count)); 232 BUG_ON(!is_power_of_2(count));
273 233
274 /* slots_num must be power-of-two size, otherwise 234 /* slots_num must be power-of-two size, otherwise
@@ -368,7 +328,7 @@ int iwl3945_tx_queue_init(struct iwl3945_priv *priv,
368 txq->need_update = 0; 328 txq->need_update = 0;
369 329
370 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 330 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
371 * iwl3945_queue_inc_wrap and iwl3945_queue_dec_wrap are broken. */ 331 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
372 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 332 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
373 333
374 /* Initialize queue high/low-water, head/tail indexes */ 334 /* Initialize queue high/low-water, head/tail indexes */
@@ -399,7 +359,7 @@ void iwl3945_tx_queue_free(struct iwl3945_priv *priv, struct iwl3945_tx_queue *t
399 359
400 /* first, empty all BD's */ 360 /* first, empty all BD's */
401 for (; q->write_ptr != q->read_ptr; 361 for (; q->write_ptr != q->read_ptr;
402 q->read_ptr = iwl3945_queue_inc_wrap(q->read_ptr, q->n_bd)) 362 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
403 iwl3945_hw_txq_free_tfd(priv, txq); 363 iwl3945_hw_txq_free_tfd(priv, txq);
404 364
405 len = sizeof(struct iwl3945_cmd) * q->n_window; 365 len = sizeof(struct iwl3945_cmd) * q->n_window;
@@ -547,7 +507,7 @@ u8 iwl3945_add_station(struct iwl3945_priv *priv, const u8 *addr, int is_ap, u8
547 station->sta.sta.sta_id = index; 507 station->sta.sta.sta_id = index;
548 station->sta.station_flags = 0; 508 station->sta.station_flags = 0;
549 509
550 if (priv->phymode == MODE_IEEE80211A) 510 if (priv->band == IEEE80211_BAND_5GHZ)
551 rate = IWL_RATE_6M_PLCP; 511 rate = IWL_RATE_6M_PLCP;
552 else 512 else
553 rate = IWL_RATE_1M_PLCP; 513 rate = IWL_RATE_1M_PLCP;
@@ -738,7 +698,7 @@ static int iwl3945_enqueue_hcmd(struct iwl3945_priv *priv, struct iwl3945_host_c
738 txq->need_update = 1; 698 txq->need_update = 1;
739 699
740 /* Increment and update queue's write index */ 700 /* Increment and update queue's write index */
741 q->write_ptr = iwl3945_queue_inc_wrap(q->write_ptr, q->n_bd); 701 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
742 ret = iwl3945_tx_queue_update_write_ptr(priv, txq); 702 ret = iwl3945_tx_queue_update_write_ptr(priv, txq);
743 703
744 spin_unlock_irqrestore(&priv->hcmd_lock, flags); 704 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
@@ -773,17 +733,17 @@ static int iwl3945_send_cmd_sync(struct iwl3945_priv *priv, struct iwl3945_host_
773{ 733{
774 int cmd_idx; 734 int cmd_idx;
775 int ret; 735 int ret;
776 static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */
777 736
778 BUG_ON(cmd->meta.flags & CMD_ASYNC); 737 BUG_ON(cmd->meta.flags & CMD_ASYNC);
779 738
780 /* A synchronous command can not have a callback set. */ 739 /* A synchronous command can not have a callback set. */
781 BUG_ON(cmd->meta.u.callback != NULL); 740 BUG_ON(cmd->meta.u.callback != NULL);
782 741
783 if (atomic_xchg(&entry, 1)) { 742 if (test_and_set_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status)) {
784 IWL_ERROR("Error sending %s: Already sending a host command\n", 743 IWL_ERROR("Error sending %s: Already sending a host command\n",
785 get_cmd_string(cmd->id)); 744 get_cmd_string(cmd->id));
786 return -EBUSY; 745 ret = -EBUSY;
746 goto out;
787 } 747 }
788 748
789 set_bit(STATUS_HCMD_ACTIVE, &priv->status); 749 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
@@ -853,7 +813,7 @@ fail:
853 cmd->meta.u.skb = NULL; 813 cmd->meta.u.skb = NULL;
854 } 814 }
855out: 815out:
856 atomic_set(&entry, 0); 816 clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status);
857 return ret; 817 return ret;
858} 818}
859 819
@@ -894,35 +854,37 @@ int iwl3945_send_statistics_request(struct iwl3945_priv *priv)
894 854
895/** 855/**
896 * iwl3945_set_rxon_channel - Set the phymode and channel values in staging RXON 856 * iwl3945_set_rxon_channel - Set the phymode and channel values in staging RXON
897 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz 857 * @band: 2.4 or 5 GHz band
898 * @channel: Any channel valid for the requested phymode 858 * @channel: Any channel valid for the requested band
899 859
900 * In addition to setting the staging RXON, priv->phymode is also set. 860 * In addition to setting the staging RXON, priv->band is also set.
901 * 861 *
902 * NOTE: Does not commit to the hardware; it sets appropriate bit fields 862 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
903 * in the staging RXON flag structure based on the phymode 863 * in the staging RXON flag structure based on the band
904 */ 864 */
905static int iwl3945_set_rxon_channel(struct iwl3945_priv *priv, u8 phymode, u16 channel) 865static int iwl3945_set_rxon_channel(struct iwl3945_priv *priv,
866 enum ieee80211_band band,
867 u16 channel)
906{ 868{
907 if (!iwl3945_get_channel_info(priv, phymode, channel)) { 869 if (!iwl3945_get_channel_info(priv, band, channel)) {
908 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n", 870 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
909 channel, phymode); 871 channel, band);
910 return -EINVAL; 872 return -EINVAL;
911 } 873 }
912 874
913 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) && 875 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
914 (priv->phymode == phymode)) 876 (priv->band == band))
915 return 0; 877 return 0;
916 878
917 priv->staging_rxon.channel = cpu_to_le16(channel); 879 priv->staging_rxon.channel = cpu_to_le16(channel);
918 if (phymode == MODE_IEEE80211A) 880 if (band == IEEE80211_BAND_5GHZ)
919 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK; 881 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
920 else 882 else
921 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; 883 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
922 884
923 priv->phymode = phymode; 885 priv->band = band;
924 886
925 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode); 887 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, band);
926 888
927 return 0; 889 return 0;
928} 890}
@@ -1210,8 +1172,7 @@ static int iwl3945_commit_rxon(struct iwl3945_priv *priv)
1210 return -EIO; 1172 return -EIO;
1211 } 1173 }
1212 1174
1213 /* Init the hardware's rate fallback order based on the 1175 /* Init the hardware's rate fallback order based on the band */
1214 * phymode */
1215 rc = iwl3945_init_hw_rate_table(priv); 1176 rc = iwl3945_init_hw_rate_table(priv);
1216 if (rc) { 1177 if (rc) {
1217 IWL_ERROR("Error setting HW rate table: %02X\n", rc); 1178 IWL_ERROR("Error setting HW rate table: %02X\n", rc);
@@ -1635,151 +1596,6 @@ int iwl3945_eeprom_init(struct iwl3945_priv *priv)
1635 return 0; 1596 return 0;
1636} 1597}
1637 1598
1638/******************************************************************************
1639 *
1640 * Misc. internal state and helper functions
1641 *
1642 ******************************************************************************/
1643#ifdef CONFIG_IWL3945_DEBUG
1644
1645/**
1646 * iwl3945_report_frame - dump frame to syslog during debug sessions
1647 *
1648 * You may hack this function to show different aspects of received frames,
1649 * including selective frame dumps.
1650 * group100 parameter selects whether to show 1 out of 100 good frames.
1651 */
1652void iwl3945_report_frame(struct iwl3945_priv *priv,
1653 struct iwl3945_rx_packet *pkt,
1654 struct ieee80211_hdr *header, int group100)
1655{
1656 u32 to_us;
1657 u32 print_summary = 0;
1658 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
1659 u32 hundred = 0;
1660 u32 dataframe = 0;
1661 u16 fc;
1662 u16 seq_ctl;
1663 u16 channel;
1664 u16 phy_flags;
1665 int rate_sym;
1666 u16 length;
1667 u16 status;
1668 u16 bcn_tmr;
1669 u32 tsf_low;
1670 u64 tsf;
1671 u8 rssi;
1672 u8 agc;
1673 u16 sig_avg;
1674 u16 noise_diff;
1675 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
1676 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
1677 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
1678 u8 *data = IWL_RX_DATA(pkt);
1679
1680 /* MAC header */
1681 fc = le16_to_cpu(header->frame_control);
1682 seq_ctl = le16_to_cpu(header->seq_ctrl);
1683
1684 /* metadata */
1685 channel = le16_to_cpu(rx_hdr->channel);
1686 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
1687 rate_sym = rx_hdr->rate;
1688 length = le16_to_cpu(rx_hdr->len);
1689
1690 /* end-of-frame status and timestamp */
1691 status = le32_to_cpu(rx_end->status);
1692 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
1693 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
1694 tsf = le64_to_cpu(rx_end->timestamp);
1695
1696 /* signal statistics */
1697 rssi = rx_stats->rssi;
1698 agc = rx_stats->agc;
1699 sig_avg = le16_to_cpu(rx_stats->sig_avg);
1700 noise_diff = le16_to_cpu(rx_stats->noise_diff);
1701
1702 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
1703
1704 /* if data frame is to us and all is good,
1705 * (optionally) print summary for only 1 out of every 100 */
1706 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
1707 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
1708 dataframe = 1;
1709 if (!group100)
1710 print_summary = 1; /* print each frame */
1711 else if (priv->framecnt_to_us < 100) {
1712 priv->framecnt_to_us++;
1713 print_summary = 0;
1714 } else {
1715 priv->framecnt_to_us = 0;
1716 print_summary = 1;
1717 hundred = 1;
1718 }
1719 } else {
1720 /* print summary for all other frames */
1721 print_summary = 1;
1722 }
1723
1724 if (print_summary) {
1725 char *title;
1726 u32 rate;
1727
1728 if (hundred)
1729 title = "100Frames";
1730 else if (fc & IEEE80211_FCTL_RETRY)
1731 title = "Retry";
1732 else if (ieee80211_is_assoc_response(fc))
1733 title = "AscRsp";
1734 else if (ieee80211_is_reassoc_response(fc))
1735 title = "RasRsp";
1736 else if (ieee80211_is_probe_response(fc)) {
1737 title = "PrbRsp";
1738 print_dump = 1; /* dump frame contents */
1739 } else if (ieee80211_is_beacon(fc)) {
1740 title = "Beacon";
1741 print_dump = 1; /* dump frame contents */
1742 } else if (ieee80211_is_atim(fc))
1743 title = "ATIM";
1744 else if (ieee80211_is_auth(fc))
1745 title = "Auth";
1746 else if (ieee80211_is_deauth(fc))
1747 title = "DeAuth";
1748 else if (ieee80211_is_disassoc(fc))
1749 title = "DisAssoc";
1750 else
1751 title = "Frame";
1752
1753 rate = iwl3945_rate_index_from_plcp(rate_sym);
1754 if (rate == -1)
1755 rate = 0;
1756 else
1757 rate = iwl3945_rates[rate].ieee / 2;
1758
1759 /* print frame summary.
1760 * MAC addresses show just the last byte (for brevity),
1761 * but you can hack it to show more, if you'd like to. */
1762 if (dataframe)
1763 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
1764 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
1765 title, fc, header->addr1[5],
1766 length, rssi, channel, rate);
1767 else {
1768 /* src/dst addresses assume managed mode */
1769 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
1770 "src=0x%02x, rssi=%u, tim=%lu usec, "
1771 "phy=0x%02x, chnl=%d\n",
1772 title, fc, header->addr1[5],
1773 header->addr3[5], rssi,
1774 tsf_low - priv->scan_start_tsf,
1775 phy_flags, channel);
1776 }
1777 }
1778 if (print_dump)
1779 iwl3945_print_hex_dump(IWL_DL_RX, data, length);
1780}
1781#endif
1782
1783static void iwl3945_unset_hw_setting(struct iwl3945_priv *priv) 1599static void iwl3945_unset_hw_setting(struct iwl3945_priv *priv)
1784{ 1600{
1785 if (priv->hw_setting.shared_virt) 1601 if (priv->hw_setting.shared_virt)
@@ -1915,7 +1731,6 @@ static u16 iwl3945_fill_probe_req(struct iwl3945_priv *priv,
1915/* 1731/*
1916 * QoS support 1732 * QoS support
1917*/ 1733*/
1918#ifdef CONFIG_IWL3945_QOS
1919static int iwl3945_send_qos_params_command(struct iwl3945_priv *priv, 1734static int iwl3945_send_qos_params_command(struct iwl3945_priv *priv,
1920 struct iwl3945_qosparam_cmd *qos) 1735 struct iwl3945_qosparam_cmd *qos)
1921{ 1736{
@@ -2044,7 +1859,6 @@ static void iwl3945_activate_qos(struct iwl3945_priv *priv, u8 force)
2044 } 1859 }
2045} 1860}
2046 1861
2047#endif /* CONFIG_IWL3945_QOS */
2048/* 1862/*
2049 * Power management (not Tx power!) functions 1863 * Power management (not Tx power!) functions
2050 */ 1864 */
@@ -2244,39 +2058,13 @@ int iwl3945_is_network_packet(struct iwl3945_priv *priv, struct ieee80211_hdr *h
2244 return !compare_ether_addr(header->addr2, priv->bssid); 2058 return !compare_ether_addr(header->addr2, priv->bssid);
2245 /* packets to our adapter go through */ 2059 /* packets to our adapter go through */
2246 return !compare_ether_addr(header->addr1, priv->mac_addr); 2060 return !compare_ether_addr(header->addr1, priv->mac_addr);
2061 default:
2062 return 1;
2247 } 2063 }
2248 2064
2249 return 1; 2065 return 1;
2250} 2066}
2251 2067
2252#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
2253
2254static const char *iwl3945_get_tx_fail_reason(u32 status)
2255{
2256 switch (status & TX_STATUS_MSK) {
2257 case TX_STATUS_SUCCESS:
2258 return "SUCCESS";
2259 TX_STATUS_ENTRY(SHORT_LIMIT);
2260 TX_STATUS_ENTRY(LONG_LIMIT);
2261 TX_STATUS_ENTRY(FIFO_UNDERRUN);
2262 TX_STATUS_ENTRY(MGMNT_ABORT);
2263 TX_STATUS_ENTRY(NEXT_FRAG);
2264 TX_STATUS_ENTRY(LIFE_EXPIRE);
2265 TX_STATUS_ENTRY(DEST_PS);
2266 TX_STATUS_ENTRY(ABORTED);
2267 TX_STATUS_ENTRY(BT_RETRY);
2268 TX_STATUS_ENTRY(STA_INVALID);
2269 TX_STATUS_ENTRY(FRAG_DROPPED);
2270 TX_STATUS_ENTRY(TID_DISABLE);
2271 TX_STATUS_ENTRY(FRAME_FLUSHED);
2272 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
2273 TX_STATUS_ENTRY(TX_LOCKED);
2274 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
2275 }
2276
2277 return "UNKNOWN";
2278}
2279
2280/** 2068/**
2281 * iwl3945_scan_cancel - Cancel any currently executing HW scan 2069 * iwl3945_scan_cancel - Cancel any currently executing HW scan
2282 * 2070 *
@@ -2461,9 +2249,10 @@ static int iwl3945_set_rxon_hwcrypto(struct iwl3945_priv *priv, int hw_decrypt)
2461 return 0; 2249 return 0;
2462} 2250}
2463 2251
2464static void iwl3945_set_flags_for_phymode(struct iwl3945_priv *priv, u8 phymode) 2252static void iwl3945_set_flags_for_phymode(struct iwl3945_priv *priv,
2253 enum ieee80211_band band)
2465{ 2254{
2466 if (phymode == MODE_IEEE80211A) { 2255 if (band == IEEE80211_BAND_5GHZ) {
2467 priv->staging_rxon.flags &= 2256 priv->staging_rxon.flags &=
2468 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK 2257 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
2469 | RXON_FLG_CCK_MSK); 2258 | RXON_FLG_CCK_MSK);
@@ -2515,6 +2304,9 @@ static void iwl3945_connection_init_rx_config(struct iwl3945_priv *priv)
2515 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK | 2304 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2516 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK; 2305 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2517 break; 2306 break;
2307 default:
2308 IWL_ERROR("Unsupported interface type %d\n", priv->iw_mode);
2309 break;
2518 } 2310 }
2519 2311
2520#if 0 2312#if 0
@@ -2526,7 +2318,7 @@ static void iwl3945_connection_init_rx_config(struct iwl3945_priv *priv)
2526 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 2318 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2527#endif 2319#endif
2528 2320
2529 ch_info = iwl3945_get_channel_info(priv, priv->phymode, 2321 ch_info = iwl3945_get_channel_info(priv, priv->band,
2530 le16_to_cpu(priv->staging_rxon.channel)); 2322 le16_to_cpu(priv->staging_rxon.channel));
2531 2323
2532 if (!ch_info) 2324 if (!ch_info)
@@ -2542,11 +2334,11 @@ static void iwl3945_connection_init_rx_config(struct iwl3945_priv *priv)
2542 2334
2543 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel); 2335 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
2544 if (is_channel_a_band(ch_info)) 2336 if (is_channel_a_band(ch_info))
2545 priv->phymode = MODE_IEEE80211A; 2337 priv->band = IEEE80211_BAND_5GHZ;
2546 else 2338 else
2547 priv->phymode = MODE_IEEE80211G; 2339 priv->band = IEEE80211_BAND_2GHZ;
2548 2340
2549 iwl3945_set_flags_for_phymode(priv, priv->phymode); 2341 iwl3945_set_flags_for_phymode(priv, priv->band);
2550 2342
2551 priv->staging_rxon.ofdm_basic_rates = 2343 priv->staging_rxon.ofdm_basic_rates =
2552 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 2344 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
@@ -2560,7 +2352,7 @@ static int iwl3945_set_mode(struct iwl3945_priv *priv, int mode)
2560 const struct iwl3945_channel_info *ch_info; 2352 const struct iwl3945_channel_info *ch_info;
2561 2353
2562 ch_info = iwl3945_get_channel_info(priv, 2354 ch_info = iwl3945_get_channel_info(priv,
2563 priv->phymode, 2355 priv->band,
2564 le16_to_cpu(priv->staging_rxon.channel)); 2356 le16_to_cpu(priv->staging_rxon.channel));
2565 2357
2566 if (!ch_info || !is_channel_ibss(ch_info)) { 2358 if (!ch_info || !is_channel_ibss(ch_info)) {
@@ -2694,8 +2486,12 @@ static void iwl3945_build_tx_cmd_basic(struct iwl3945_priv *priv,
2694 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3); 2486 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3);
2695 else 2487 else
2696 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2); 2488 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2);
2697 } else 2489 } else {
2698 cmd->cmd.tx.timeout.pm_frame_timeout = 0; 2490 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2491#ifdef CONFIG_IWL3945_LEDS
2492 priv->rxtxpackets += le16_to_cpu(cmd->cmd.tx.len);
2493#endif
2494 }
2699 2495
2700 cmd->cmd.tx.driver_txop = 0; 2496 cmd->cmd.tx.driver_txop = 0;
2701 cmd->cmd.tx.tx_flags = tx_flags; 2497 cmd->cmd.tx.tx_flags = tx_flags;
@@ -2792,7 +2588,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2792 goto drop_unlock; 2588 goto drop_unlock;
2793 } 2589 }
2794 2590
2795 if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) { 2591 if ((ctl->tx_rate->hw_value & 0xFF) == IWL_INVALID_RATE) {
2796 IWL_ERROR("ERROR: No TX rate available.\n"); 2592 IWL_ERROR("ERROR: No TX rate available.\n");
2797 goto drop_unlock; 2593 goto drop_unlock;
2798 } 2594 }
@@ -2963,7 +2759,7 @@ static int iwl3945_tx_skb(struct iwl3945_priv *priv,
2963 ieee80211_get_hdrlen(fc)); 2759 ieee80211_get_hdrlen(fc));
2964 2760
2965 /* Tell device the write index *just past* this latest filled TFD */ 2761 /* Tell device the write index *just past* this latest filled TFD */
2966 q->write_ptr = iwl3945_queue_inc_wrap(q->write_ptr, q->n_bd); 2762 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
2967 rc = iwl3945_tx_queue_update_write_ptr(priv, txq); 2763 rc = iwl3945_tx_queue_update_write_ptr(priv, txq);
2968 spin_unlock_irqrestore(&priv->lock, flags); 2764 spin_unlock_irqrestore(&priv->lock, flags);
2969 2765
@@ -2992,12 +2788,12 @@ drop:
2992 2788
2993static void iwl3945_set_rate(struct iwl3945_priv *priv) 2789static void iwl3945_set_rate(struct iwl3945_priv *priv)
2994{ 2790{
2995 const struct ieee80211_hw_mode *hw = NULL; 2791 const struct ieee80211_supported_band *sband = NULL;
2996 struct ieee80211_rate *rate; 2792 struct ieee80211_rate *rate;
2997 int i; 2793 int i;
2998 2794
2999 hw = iwl3945_get_hw_mode(priv, priv->phymode); 2795 sband = iwl3945_get_band(priv, priv->band);
3000 if (!hw) { 2796 if (!sband) {
3001 IWL_ERROR("Failed to set rate: unable to get hw mode\n"); 2797 IWL_ERROR("Failed to set rate: unable to get hw mode\n");
3002 return; 2798 return;
3003 } 2799 }
@@ -3005,24 +2801,17 @@ static void iwl3945_set_rate(struct iwl3945_priv *priv)
3005 priv->active_rate = 0; 2801 priv->active_rate = 0;
3006 priv->active_rate_basic = 0; 2802 priv->active_rate_basic = 0;
3007 2803
3008 IWL_DEBUG_RATE("Setting rates for 802.11%c\n", 2804 IWL_DEBUG_RATE("Setting rates for %s GHz\n",
3009 hw->mode == MODE_IEEE80211A ? 2805 sband->band == IEEE80211_BAND_2GHZ ? "2.4" : "5");
3010 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g')); 2806
3011 2807 for (i = 0; i < sband->n_bitrates; i++) {
3012 for (i = 0; i < hw->num_rates; i++) { 2808 rate = &sband->bitrates[i];
3013 rate = &(hw->rates[i]); 2809 if ((rate->hw_value < IWL_RATE_COUNT) &&
3014 if ((rate->val < IWL_RATE_COUNT) && 2810 !(rate->flags & IEEE80211_CHAN_DISABLED)) {
3015 (rate->flags & IEEE80211_RATE_SUPPORTED)) { 2811 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)\n",
3016 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n", 2812 rate->hw_value, iwl3945_rates[rate->hw_value].plcp);
3017 rate->val, iwl3945_rates[rate->val].plcp, 2813 priv->active_rate |= (1 << rate->hw_value);
3018 (rate->flags & IEEE80211_RATE_BASIC) ? 2814 }
3019 "*" : "");
3020 priv->active_rate |= (1 << rate->val);
3021 if (rate->flags & IEEE80211_RATE_BASIC)
3022 priv->active_rate_basic |= (1 << rate->val);
3023 } else
3024 IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n",
3025 rate->val, iwl3945_rates[rate->val].plcp);
3026 } 2815 }
3027 2816
3028 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n", 2817 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
@@ -3330,127 +3119,6 @@ static int iwl3945_get_measurement(struct iwl3945_priv *priv,
3330} 3119}
3331#endif 3120#endif
3332 3121
3333static void iwl3945_txstatus_to_ieee(struct iwl3945_priv *priv,
3334 struct iwl3945_tx_info *tx_sta)
3335{
3336
3337 tx_sta->status.ack_signal = 0;
3338 tx_sta->status.excessive_retries = 0;
3339 tx_sta->status.queue_length = 0;
3340 tx_sta->status.queue_number = 0;
3341
3342 if (in_interrupt())
3343 ieee80211_tx_status_irqsafe(priv->hw,
3344 tx_sta->skb[0], &(tx_sta->status));
3345 else
3346 ieee80211_tx_status(priv->hw,
3347 tx_sta->skb[0], &(tx_sta->status));
3348
3349 tx_sta->skb[0] = NULL;
3350}
3351
3352/**
3353 * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
3354 *
3355 * When FW advances 'R' index, all entries between old and new 'R' index
3356 * need to be reclaimed. As result, some free space forms. If there is
3357 * enough free space (> low mark), wake the stack that feeds us.
3358 */
3359static int iwl3945_tx_queue_reclaim(struct iwl3945_priv *priv, int txq_id, int index)
3360{
3361 struct iwl3945_tx_queue *txq = &priv->txq[txq_id];
3362 struct iwl3945_queue *q = &txq->q;
3363 int nfreed = 0;
3364
3365 if ((index >= q->n_bd) || (x2_queue_used(q, index) == 0)) {
3366 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
3367 "is out of range [0-%d] %d %d.\n", txq_id,
3368 index, q->n_bd, q->write_ptr, q->read_ptr);
3369 return 0;
3370 }
3371
3372 for (index = iwl3945_queue_inc_wrap(index, q->n_bd);
3373 q->read_ptr != index;
3374 q->read_ptr = iwl3945_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3375 if (txq_id != IWL_CMD_QUEUE_NUM) {
3376 iwl3945_txstatus_to_ieee(priv,
3377 &(txq->txb[txq->q.read_ptr]));
3378 iwl3945_hw_txq_free_tfd(priv, txq);
3379 } else if (nfreed > 1) {
3380 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
3381 q->write_ptr, q->read_ptr);
3382 queue_work(priv->workqueue, &priv->restart);
3383 }
3384 nfreed++;
3385 }
3386
3387 if (iwl3945_queue_space(q) > q->low_mark && (txq_id >= 0) &&
3388 (txq_id != IWL_CMD_QUEUE_NUM) &&
3389 priv->mac80211_registered)
3390 ieee80211_wake_queue(priv->hw, txq_id);
3391
3392
3393 return nfreed;
3394}
3395
3396static int iwl3945_is_tx_success(u32 status)
3397{
3398 return (status & 0xFF) == 0x1;
3399}
3400
3401/******************************************************************************
3402 *
3403 * Generic RX handler implementations
3404 *
3405 ******************************************************************************/
3406/**
3407 * iwl3945_rx_reply_tx - Handle Tx response
3408 */
3409static void iwl3945_rx_reply_tx(struct iwl3945_priv *priv,
3410 struct iwl3945_rx_mem_buffer *rxb)
3411{
3412 struct iwl3945_rx_packet *pkt = (void *)rxb->skb->data;
3413 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
3414 int txq_id = SEQ_TO_QUEUE(sequence);
3415 int index = SEQ_TO_INDEX(sequence);
3416 struct iwl3945_tx_queue *txq = &priv->txq[txq_id];
3417 struct ieee80211_tx_status *tx_status;
3418 struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
3419 u32 status = le32_to_cpu(tx_resp->status);
3420
3421 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
3422 IWL_ERROR("Read index for DMA queue txq_id (%d) index %d "
3423 "is out of range [0-%d] %d %d\n", txq_id,
3424 index, txq->q.n_bd, txq->q.write_ptr,
3425 txq->q.read_ptr);
3426 return;
3427 }
3428
3429 tx_status = &(txq->txb[txq->q.read_ptr].status);
3430
3431 tx_status->retry_count = tx_resp->failure_frame;
3432 tx_status->queue_number = status;
3433 tx_status->queue_length = tx_resp->bt_kill_count;
3434 tx_status->queue_length |= tx_resp->failure_rts;
3435
3436 tx_status->flags =
3437 iwl3945_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0;
3438
3439 tx_status->control.tx_rate = iwl3945_rate_index_from_plcp(tx_resp->rate);
3440
3441 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
3442 txq_id, iwl3945_get_tx_fail_reason(status), status,
3443 tx_resp->rate, tx_resp->failure_frame);
3444
3445 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
3446 if (index != -1)
3447 iwl3945_tx_queue_reclaim(priv, txq_id, index);
3448
3449 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
3450 IWL_ERROR("TODO: Implement Tx ABORT REQUIRED!!!\n");
3451}
3452
3453
3454static void iwl3945_rx_reply_alive(struct iwl3945_priv *priv, 3122static void iwl3945_rx_reply_alive(struct iwl3945_priv *priv,
3455 struct iwl3945_rx_mem_buffer *rxb) 3123 struct iwl3945_rx_mem_buffer *rxb)
3456{ 3124{
@@ -3797,13 +3465,44 @@ static void iwl3945_setup_rx_handlers(struct iwl3945_priv *priv)
3797 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] = 3465 priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
3798 iwl3945_rx_scan_complete_notif; 3466 iwl3945_rx_scan_complete_notif;
3799 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif; 3467 priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
3800 priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx;
3801 3468
3802 /* Set up hardware specific Rx handlers */ 3469 /* Set up hardware specific Rx handlers */
3803 iwl3945_hw_rx_handler_setup(priv); 3470 iwl3945_hw_rx_handler_setup(priv);
3804} 3471}
3805 3472
3806/** 3473/**
3474 * iwl3945_cmd_queue_reclaim - Reclaim CMD queue entries
3475 * When FW advances 'R' index, all entries between old and new 'R' index
3476 * need to be reclaimed.
3477 */
3478static void iwl3945_cmd_queue_reclaim(struct iwl3945_priv *priv,
3479 int txq_id, int index)
3480{
3481 struct iwl3945_tx_queue *txq = &priv->txq[txq_id];
3482 struct iwl3945_queue *q = &txq->q;
3483 int nfreed = 0;
3484
3485 if ((index >= q->n_bd) || (iwl3945_x2_queue_used(q, index) == 0)) {
3486 IWL_ERROR("Read index for DMA queue txq id (%d), index %d, "
3487 "is out of range [0-%d] %d %d.\n", txq_id,
3488 index, q->n_bd, q->write_ptr, q->read_ptr);
3489 return;
3490 }
3491
3492 for (index = iwl_queue_inc_wrap(index, q->n_bd); q->read_ptr != index;
3493 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3494 if (nfreed > 1) {
3495 IWL_ERROR("HCMD skipped: index (%d) %d %d\n", index,
3496 q->write_ptr, q->read_ptr);
3497 queue_work(priv->workqueue, &priv->restart);
3498 break;
3499 }
3500 nfreed++;
3501 }
3502}
3503
3504
3505/**
3807 * iwl3945_tx_cmd_complete - Pull unused buffers off the queue and reclaim them 3506 * iwl3945_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
3808 * @rxb: Rx buffer to reclaim 3507 * @rxb: Rx buffer to reclaim
3809 * 3508 *
@@ -3822,12 +3521,6 @@ static void iwl3945_tx_cmd_complete(struct iwl3945_priv *priv,
3822 int cmd_index; 3521 int cmd_index;
3823 struct iwl3945_cmd *cmd; 3522 struct iwl3945_cmd *cmd;
3824 3523
3825 /* If a Tx command is being handled and it isn't in the actual
3826 * command queue then there a command routing bug has been introduced
3827 * in the queue management code. */
3828 if (txq_id != IWL_CMD_QUEUE_NUM)
3829 IWL_ERROR("Error wrong command queue %d command id 0x%X\n",
3830 txq_id, pkt->hdr.cmd);
3831 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM); 3524 BUG_ON(txq_id != IWL_CMD_QUEUE_NUM);
3832 3525
3833 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge); 3526 cmd_index = get_cmd_index(&priv->txq[IWL_CMD_QUEUE_NUM].q, index, huge);
@@ -3841,7 +3534,7 @@ static void iwl3945_tx_cmd_complete(struct iwl3945_priv *priv,
3841 !cmd->meta.u.callback(priv, cmd, rxb->skb)) 3534 !cmd->meta.u.callback(priv, cmd, rxb->skb))
3842 rxb->skb = NULL; 3535 rxb->skb = NULL;
3843 3536
3844 iwl3945_tx_queue_reclaim(priv, txq_id, index); 3537 iwl3945_cmd_queue_reclaim(priv, txq_id, index);
3845 3538
3846 if (!(cmd->meta.flags & CMD_ASYNC)) { 3539 if (!(cmd->meta.flags & CMD_ASYNC)) {
3847 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 3540 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
@@ -4460,6 +4153,16 @@ static void iwl3945_enable_interrupts(struct iwl3945_priv *priv)
4460 iwl3945_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK); 4153 iwl3945_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
4461} 4154}
4462 4155
4156
4157/* call this function to flush any scheduled tasklet */
4158static inline void iwl_synchronize_irq(struct iwl3945_priv *priv)
4159{
4160 /* wait to make sure we flush pedding tasklet*/
4161 synchronize_irq(priv->pci_dev->irq);
4162 tasklet_kill(&priv->irq_tasklet);
4163}
4164
4165
4463static inline void iwl3945_disable_interrupts(struct iwl3945_priv *priv) 4166static inline void iwl3945_disable_interrupts(struct iwl3945_priv *priv)
4464{ 4167{
4465 clear_bit(STATUS_INT_ENABLED, &priv->status); 4168 clear_bit(STATUS_INT_ENABLED, &priv->status);
@@ -4521,8 +4224,7 @@ static void iwl3945_dump_nic_error_log(struct iwl3945_priv *priv)
4521 4224
4522 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { 4225 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4523 IWL_ERROR("Start IWL Error Log Dump:\n"); 4226 IWL_ERROR("Start IWL Error Log Dump:\n");
4524 IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n", 4227 IWL_ERROR("Status: 0x%08lX, count: %d\n", priv->status, count);
4525 priv->status, priv->config, count);
4526 } 4228 }
4527 4229
4528 IWL_ERROR("Desc Time asrtPC blink2 " 4230 IWL_ERROR("Desc Time asrtPC blink2 "
@@ -4742,9 +4444,9 @@ static void iwl3945_irq_tasklet(struct iwl3945_priv *priv)
4742 * atomic, make sure that inta covers all the interrupts that 4444 * atomic, make sure that inta covers all the interrupts that
4743 * we've discovered, even if FH interrupt came in just after 4445 * we've discovered, even if FH interrupt came in just after
4744 * reading CSR_INT. */ 4446 * reading CSR_INT. */
4745 if (inta_fh & CSR_FH_INT_RX_MASK) 4447 if (inta_fh & CSR39_FH_INT_RX_MASK)
4746 inta |= CSR_INT_BIT_FH_RX; 4448 inta |= CSR_INT_BIT_FH_RX;
4747 if (inta_fh & CSR_FH_INT_TX_MASK) 4449 if (inta_fh & CSR39_FH_INT_TX_MASK)
4748 inta |= CSR_INT_BIT_FH_TX; 4450 inta |= CSR_INT_BIT_FH_TX;
4749 4451
4750 /* Now service all interrupt bits discovered above. */ 4452 /* Now service all interrupt bits discovered above. */
@@ -4792,7 +4494,7 @@ static void iwl3945_irq_tasklet(struct iwl3945_priv *priv)
4792 /* Queue restart only if RF_KILL switch was set to "kill" 4494 /* Queue restart only if RF_KILL switch was set to "kill"
4793 * when we loaded driver, and is now set to "enable". 4495 * when we loaded driver, and is now set to "enable".
4794 * After we're Alive, RF_KILL gets handled by 4496 * After we're Alive, RF_KILL gets handled by
4795 * iwl_rx_card_state_notif() */ 4497 * iwl3945_rx_card_state_notif() */
4796 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) { 4498 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) {
4797 clear_bit(STATUS_RF_KILL_HW, &priv->status); 4499 clear_bit(STATUS_RF_KILL_HW, &priv->status);
4798 queue_work(priv->workqueue, &priv->restart); 4500 queue_work(priv->workqueue, &priv->restart);
@@ -4860,7 +4562,9 @@ static void iwl3945_irq_tasklet(struct iwl3945_priv *priv)
4860 } 4562 }
4861 4563
4862 /* Re-enable all interrupts */ 4564 /* Re-enable all interrupts */
4863 iwl3945_enable_interrupts(priv); 4565 /* only Re-enable if disabled by irq */
4566 if (test_bit(STATUS_INT_ENABLED, &priv->status))
4567 iwl3945_enable_interrupts(priv);
4864 4568
4865#ifdef CONFIG_IWL3945_DEBUG 4569#ifdef CONFIG_IWL3945_DEBUG
4866 if (iwl3945_debug_level & (IWL_DL_ISR)) { 4570 if (iwl3945_debug_level & (IWL_DL_ISR)) {
@@ -4924,7 +4628,9 @@ unplugged:
4924 4628
4925 none: 4629 none:
4926 /* re-enable interrupts here since we don't have anything to service. */ 4630 /* re-enable interrupts here since we don't have anything to service. */
4927 iwl3945_enable_interrupts(priv); 4631 /* only Re-enable if disabled by irq */
4632 if (test_bit(STATUS_INT_ENABLED, &priv->status))
4633 iwl3945_enable_interrupts(priv);
4928 spin_unlock(&priv->lock); 4634 spin_unlock(&priv->lock);
4929 return IRQ_NONE; 4635 return IRQ_NONE;
4930} 4636}
@@ -5026,24 +4732,24 @@ static void iwl3945_init_band_reference(const struct iwl3945_priv *priv, int ban
5026 * Based on band and channel number. 4732 * Based on band and channel number.
5027 */ 4733 */
5028const struct iwl3945_channel_info *iwl3945_get_channel_info(const struct iwl3945_priv *priv, 4734const struct iwl3945_channel_info *iwl3945_get_channel_info(const struct iwl3945_priv *priv,
5029 int phymode, u16 channel) 4735 enum ieee80211_band band, u16 channel)
5030{ 4736{
5031 int i; 4737 int i;
5032 4738
5033 switch (phymode) { 4739 switch (band) {
5034 case MODE_IEEE80211A: 4740 case IEEE80211_BAND_5GHZ:
5035 for (i = 14; i < priv->channel_count; i++) { 4741 for (i = 14; i < priv->channel_count; i++) {
5036 if (priv->channel_info[i].channel == channel) 4742 if (priv->channel_info[i].channel == channel)
5037 return &priv->channel_info[i]; 4743 return &priv->channel_info[i];
5038 } 4744 }
5039 break; 4745 break;
5040 4746
5041 case MODE_IEEE80211B: 4747 case IEEE80211_BAND_2GHZ:
5042 case MODE_IEEE80211G:
5043 if (channel >= 1 && channel <= 14) 4748 if (channel >= 1 && channel <= 14)
5044 return &priv->channel_info[channel - 1]; 4749 return &priv->channel_info[channel - 1];
5045 break; 4750 break;
5046 4751 case IEEE80211_NUM_BANDS:
4752 WARN_ON(1);
5047 } 4753 }
5048 4754
5049 return NULL; 4755 return NULL;
@@ -5106,8 +4812,8 @@ static int iwl3945_init_channel_map(struct iwl3945_priv *priv)
5106 /* Loop through each band adding each of the channels */ 4812 /* Loop through each band adding each of the channels */
5107 for (ch = 0; ch < eeprom_ch_count; ch++) { 4813 for (ch = 0; ch < eeprom_ch_count; ch++) {
5108 ch_info->channel = eeprom_ch_index[ch]; 4814 ch_info->channel = eeprom_ch_index[ch];
5109 ch_info->phymode = (band == 1) ? MODE_IEEE80211B : 4815 ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
5110 MODE_IEEE80211A; 4816 IEEE80211_BAND_5GHZ;
5111 4817
5112 /* permanently store EEPROM's channel regulatory flags 4818 /* permanently store EEPROM's channel regulatory flags
5113 * and max power in channel info database. */ 4819 * and max power in channel info database. */
@@ -5134,11 +4840,12 @@ static int iwl3945_init_channel_map(struct iwl3945_priv *priv)
5134 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; 4840 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
5135 ch_info->min_power = 0; 4841 ch_info->min_power = 0;
5136 4842
5137 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x" 4843 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x"
5138 " %ddBm): Ad-Hoc %ssupported\n", 4844 " %ddBm): Ad-Hoc %ssupported\n",
5139 ch_info->channel, 4845 ch_info->channel,
5140 is_channel_a_band(ch_info) ? 4846 is_channel_a_band(ch_info) ?
5141 "5.2" : "2.4", 4847 "5.2" : "2.4",
4848 CHECK_AND_PRINT(VALID),
5142 CHECK_AND_PRINT(IBSS), 4849 CHECK_AND_PRINT(IBSS),
5143 CHECK_AND_PRINT(ACTIVE), 4850 CHECK_AND_PRINT(ACTIVE),
5144 CHECK_AND_PRINT(RADAR), 4851 CHECK_AND_PRINT(RADAR),
@@ -5203,18 +4910,20 @@ static void iwl3945_free_channel_map(struct iwl3945_priv *priv)
5203#define IWL_PASSIVE_DWELL_BASE (100) 4910#define IWL_PASSIVE_DWELL_BASE (100)
5204#define IWL_CHANNEL_TUNE_TIME 5 4911#define IWL_CHANNEL_TUNE_TIME 5
5205 4912
5206static inline u16 iwl3945_get_active_dwell_time(struct iwl3945_priv *priv, int phymode) 4913static inline u16 iwl3945_get_active_dwell_time(struct iwl3945_priv *priv,
4914 enum ieee80211_band band)
5207{ 4915{
5208 if (phymode == MODE_IEEE80211A) 4916 if (band == IEEE80211_BAND_5GHZ)
5209 return IWL_ACTIVE_DWELL_TIME_52; 4917 return IWL_ACTIVE_DWELL_TIME_52;
5210 else 4918 else
5211 return IWL_ACTIVE_DWELL_TIME_24; 4919 return IWL_ACTIVE_DWELL_TIME_24;
5212} 4920}
5213 4921
5214static u16 iwl3945_get_passive_dwell_time(struct iwl3945_priv *priv, int phymode) 4922static u16 iwl3945_get_passive_dwell_time(struct iwl3945_priv *priv,
4923 enum ieee80211_band band)
5215{ 4924{
5216 u16 active = iwl3945_get_active_dwell_time(priv, phymode); 4925 u16 active = iwl3945_get_active_dwell_time(priv, band);
5217 u16 passive = (phymode != MODE_IEEE80211A) ? 4926 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
5218 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : 4927 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
5219 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; 4928 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
5220 4929
@@ -5234,28 +4943,32 @@ static u16 iwl3945_get_passive_dwell_time(struct iwl3945_priv *priv, int phymode
5234 return passive; 4943 return passive;
5235} 4944}
5236 4945
5237static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv, int phymode, 4946static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv,
4947 enum ieee80211_band band,
5238 u8 is_active, u8 direct_mask, 4948 u8 is_active, u8 direct_mask,
5239 struct iwl3945_scan_channel *scan_ch) 4949 struct iwl3945_scan_channel *scan_ch)
5240{ 4950{
5241 const struct ieee80211_channel *channels = NULL; 4951 const struct ieee80211_channel *channels = NULL;
5242 const struct ieee80211_hw_mode *hw_mode; 4952 const struct ieee80211_supported_band *sband;
5243 const struct iwl3945_channel_info *ch_info; 4953 const struct iwl3945_channel_info *ch_info;
5244 u16 passive_dwell = 0; 4954 u16 passive_dwell = 0;
5245 u16 active_dwell = 0; 4955 u16 active_dwell = 0;
5246 int added, i; 4956 int added, i;
5247 4957
5248 hw_mode = iwl3945_get_hw_mode(priv, phymode); 4958 sband = iwl3945_get_band(priv, band);
5249 if (!hw_mode) 4959 if (!sband)
5250 return 0; 4960 return 0;
5251 4961
5252 channels = hw_mode->channels; 4962 channels = sband->channels;
5253 4963
5254 active_dwell = iwl3945_get_active_dwell_time(priv, phymode); 4964 active_dwell = iwl3945_get_active_dwell_time(priv, band);
5255 passive_dwell = iwl3945_get_passive_dwell_time(priv, phymode); 4965 passive_dwell = iwl3945_get_passive_dwell_time(priv, band);
4966
4967 for (i = 0, added = 0; i < sband->n_channels; i++) {
4968 if (channels[i].flags & IEEE80211_CHAN_DISABLED)
4969 continue;
5256 4970
5257 for (i = 0, added = 0; i < hw_mode->num_channels; i++) { 4971 if (channels[i].hw_value ==
5258 if (channels[i].chan ==
5259 le16_to_cpu(priv->active_rxon.channel)) { 4972 le16_to_cpu(priv->active_rxon.channel)) {
5260 if (iwl3945_is_associated(priv)) { 4973 if (iwl3945_is_associated(priv)) {
5261 IWL_DEBUG_SCAN 4974 IWL_DEBUG_SCAN
@@ -5266,9 +4979,9 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv, int phymode,
5266 } else if (priv->only_active_channel) 4979 } else if (priv->only_active_channel)
5267 continue; 4980 continue;
5268 4981
5269 scan_ch->channel = channels[i].chan; 4982 scan_ch->channel = channels[i].hw_value;
5270 4983
5271 ch_info = iwl3945_get_channel_info(priv, phymode, scan_ch->channel); 4984 ch_info = iwl3945_get_channel_info(priv, band, scan_ch->channel);
5272 if (!is_channel_valid(ch_info)) { 4985 if (!is_channel_valid(ch_info)) {
5273 IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n", 4986 IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
5274 scan_ch->channel); 4987 scan_ch->channel);
@@ -5276,7 +4989,7 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv, int phymode,
5276 } 4989 }
5277 4990
5278 if (!is_active || is_channel_passive(ch_info) || 4991 if (!is_active || is_channel_passive(ch_info) ||
5279 !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN)) 4992 (channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN))
5280 scan_ch->type = 0; /* passive */ 4993 scan_ch->type = 0; /* passive */
5281 else 4994 else
5282 scan_ch->type = 1; /* active */ 4995 scan_ch->type = 1; /* active */
@@ -5295,7 +5008,7 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv, int phymode,
5295 /* scan_pwr_info->tpc.dsp_atten; */ 5008 /* scan_pwr_info->tpc.dsp_atten; */
5296 5009
5297 /*scan_pwr_info->tpc.tx_gain; */ 5010 /*scan_pwr_info->tpc.tx_gain; */
5298 if (phymode == MODE_IEEE80211A) 5011 if (band == IEEE80211_BAND_5GHZ)
5299 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; 5012 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
5300 else { 5013 else {
5301 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); 5014 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
@@ -5319,41 +5032,23 @@ static int iwl3945_get_channels_for_scan(struct iwl3945_priv *priv, int phymode,
5319 return added; 5032 return added;
5320} 5033}
5321 5034
5322static void iwl3945_reset_channel_flag(struct iwl3945_priv *priv)
5323{
5324 int i, j;
5325 for (i = 0; i < 3; i++) {
5326 struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i];
5327 for (j = 0; j < hw_mode->num_channels; j++)
5328 hw_mode->channels[j].flag = hw_mode->channels[j].val;
5329 }
5330}
5331
5332static void iwl3945_init_hw_rates(struct iwl3945_priv *priv, 5035static void iwl3945_init_hw_rates(struct iwl3945_priv *priv,
5333 struct ieee80211_rate *rates) 5036 struct ieee80211_rate *rates)
5334{ 5037{
5335 int i; 5038 int i;
5336 5039
5337 for (i = 0; i < IWL_RATE_COUNT; i++) { 5040 for (i = 0; i < IWL_RATE_COUNT; i++) {
5338 rates[i].rate = iwl3945_rates[i].ieee * 5; 5041 rates[i].bitrate = iwl3945_rates[i].ieee * 5;
5339 rates[i].val = i; /* Rate scaling will work on indexes */ 5042 rates[i].hw_value = i; /* Rate scaling will work on indexes */
5340 rates[i].val2 = i; 5043 rates[i].hw_value_short = i;
5341 rates[i].flags = IEEE80211_RATE_SUPPORTED; 5044 rates[i].flags = 0;
5342 /* Only OFDM have the bits-per-symbol set */ 5045 if ((i > IWL_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
5343 if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE))
5344 rates[i].flags |= IEEE80211_RATE_OFDM;
5345 else {
5346 /* 5046 /*
5347 * If CCK 1M then set rate flag to CCK else CCK_2 5047 * If CCK != 1M then set short preamble rate flag.
5348 * which is CCK | PREAMBLE2
5349 */ 5048 */
5350 rates[i].flags |= (iwl3945_rates[i].plcp == 10) ? 5049 rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
5351 IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2; 5050 0 : IEEE80211_RATE_SHORT_PREAMBLE;
5352 } 5051 }
5353
5354 /* Set up which ones are basic rates... */
5355 if (IWL_BASIC_RATES_MASK & (1 << i))
5356 rates[i].flags |= IEEE80211_RATE_BASIC;
5357 } 5052 }
5358} 5053}
5359 5054
@@ -5363,143 +5058,117 @@ static void iwl3945_init_hw_rates(struct iwl3945_priv *priv,
5363static int iwl3945_init_geos(struct iwl3945_priv *priv) 5058static int iwl3945_init_geos(struct iwl3945_priv *priv)
5364{ 5059{
5365 struct iwl3945_channel_info *ch; 5060 struct iwl3945_channel_info *ch;
5366 struct ieee80211_hw_mode *modes; 5061 struct ieee80211_supported_band *sband;
5367 struct ieee80211_channel *channels; 5062 struct ieee80211_channel *channels;
5368 struct ieee80211_channel *geo_ch; 5063 struct ieee80211_channel *geo_ch;
5369 struct ieee80211_rate *rates; 5064 struct ieee80211_rate *rates;
5370 int i = 0; 5065 int i = 0;
5371 enum {
5372 A = 0,
5373 B = 1,
5374 G = 2,
5375 };
5376 int mode_count = 3;
5377 5066
5378 if (priv->modes) { 5067 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
5068 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
5379 IWL_DEBUG_INFO("Geography modes already initialized.\n"); 5069 IWL_DEBUG_INFO("Geography modes already initialized.\n");
5380 set_bit(STATUS_GEO_CONFIGURED, &priv->status); 5070 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5381 return 0; 5071 return 0;
5382 } 5072 }
5383 5073
5384 modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count,
5385 GFP_KERNEL);
5386 if (!modes)
5387 return -ENOMEM;
5388
5389 channels = kzalloc(sizeof(struct ieee80211_channel) * 5074 channels = kzalloc(sizeof(struct ieee80211_channel) *
5390 priv->channel_count, GFP_KERNEL); 5075 priv->channel_count, GFP_KERNEL);
5391 if (!channels) { 5076 if (!channels)
5392 kfree(modes);
5393 return -ENOMEM; 5077 return -ENOMEM;
5394 }
5395 5078
5396 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)), 5079 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
5397 GFP_KERNEL); 5080 GFP_KERNEL);
5398 if (!rates) { 5081 if (!rates) {
5399 kfree(modes);
5400 kfree(channels); 5082 kfree(channels);
5401 return -ENOMEM; 5083 return -ENOMEM;
5402 } 5084 }
5403 5085
5404 /* 0 = 802.11a
5405 * 1 = 802.11b
5406 * 2 = 802.11g
5407 */
5408
5409 /* 5.2GHz channels start after the 2.4GHz channels */ 5086 /* 5.2GHz channels start after the 2.4GHz channels */
5410 modes[A].mode = MODE_IEEE80211A; 5087 sband = &priv->bands[IEEE80211_BAND_5GHZ];
5411 modes[A].channels = &channels[ARRAY_SIZE(iwl3945_eeprom_band_1)]; 5088 sband->channels = &channels[ARRAY_SIZE(iwl3945_eeprom_band_1)];
5412 modes[A].rates = &rates[4]; 5089 /* just OFDM */
5413 modes[A].num_rates = 8; /* just OFDM */ 5090 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
5414 modes[A].num_channels = 0; 5091 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
5415 5092
5416 modes[B].mode = MODE_IEEE80211B; 5093 sband = &priv->bands[IEEE80211_BAND_2GHZ];
5417 modes[B].channels = channels; 5094 sband->channels = channels;
5418 modes[B].rates = rates; 5095 /* OFDM & CCK */
5419 modes[B].num_rates = 4; /* just CCK */ 5096 sband->bitrates = rates;
5420 modes[B].num_channels = 0; 5097 sband->n_bitrates = IWL_RATE_COUNT;
5421
5422 modes[G].mode = MODE_IEEE80211G;
5423 modes[G].channels = channels;
5424 modes[G].rates = rates;
5425 modes[G].num_rates = 12; /* OFDM & CCK */
5426 modes[G].num_channels = 0;
5427 5098
5428 priv->ieee_channels = channels; 5099 priv->ieee_channels = channels;
5429 priv->ieee_rates = rates; 5100 priv->ieee_rates = rates;
5430 5101
5431 iwl3945_init_hw_rates(priv, rates); 5102 iwl3945_init_hw_rates(priv, rates);
5432 5103
5433 for (i = 0, geo_ch = channels; i < priv->channel_count; i++) { 5104 for (i = 0; i < priv->channel_count; i++) {
5434 ch = &priv->channel_info[i]; 5105 ch = &priv->channel_info[i];
5435 5106
5436 if (!is_channel_valid(ch)) { 5107 /* FIXME: might be removed if scan is OK*/
5437 IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- " 5108 if (!is_channel_valid(ch))
5438 "skipping.\n",
5439 ch->channel, is_channel_a_band(ch) ?
5440 "5.2" : "2.4");
5441 continue; 5109 continue;
5442 }
5443 5110
5444 if (is_channel_a_band(ch)) 5111 if (is_channel_a_band(ch))
5445 geo_ch = &modes[A].channels[modes[A].num_channels++]; 5112 sband = &priv->bands[IEEE80211_BAND_5GHZ];
5446 else { 5113 else
5447 geo_ch = &modes[B].channels[modes[B].num_channels++]; 5114 sband = &priv->bands[IEEE80211_BAND_2GHZ];
5448 modes[G].num_channels++; 5115
5449 } 5116 geo_ch = &sband->channels[sband->n_channels++];
5450 5117
5451 geo_ch->freq = ieee80211chan2mhz(ch->channel); 5118 geo_ch->center_freq = ieee80211_channel_to_frequency(ch->channel);
5452 geo_ch->chan = ch->channel; 5119 geo_ch->max_power = ch->max_power_avg;
5453 geo_ch->power_level = ch->max_power_avg; 5120 geo_ch->max_antenna_gain = 0xff;
5454 geo_ch->antenna_max = 0xff; 5121 geo_ch->hw_value = ch->channel;
5455 5122
5456 if (is_channel_valid(ch)) { 5123 if (is_channel_valid(ch)) {
5457 geo_ch->flag = IEEE80211_CHAN_W_SCAN; 5124 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
5458 if (ch->flags & EEPROM_CHANNEL_IBSS) 5125 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
5459 geo_ch->flag |= IEEE80211_CHAN_W_IBSS;
5460 5126
5461 if (ch->flags & EEPROM_CHANNEL_ACTIVE) 5127 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
5462 geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN; 5128 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
5463 5129
5464 if (ch->flags & EEPROM_CHANNEL_RADAR) 5130 if (ch->flags & EEPROM_CHANNEL_RADAR)
5465 geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT; 5131 geo_ch->flags |= IEEE80211_CHAN_RADAR;
5466 5132
5467 if (ch->max_power_avg > priv->max_channel_txpower_limit) 5133 if (ch->max_power_avg > priv->max_channel_txpower_limit)
5468 priv->max_channel_txpower_limit = 5134 priv->max_channel_txpower_limit =
5469 ch->max_power_avg; 5135 ch->max_power_avg;
5136 } else {
5137 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
5470 } 5138 }
5471 5139
5472 geo_ch->val = geo_ch->flag; 5140 /* Save flags for reg domain usage */
5141 geo_ch->orig_flags = geo_ch->flags;
5142
5143 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0%X\n",
5144 ch->channel, geo_ch->center_freq,
5145 is_channel_a_band(ch) ? "5.2" : "2.4",
5146 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
5147 "restricted" : "valid",
5148 geo_ch->flags);
5473 } 5149 }
5474 5150
5475 if ((modes[A].num_channels == 0) && priv->is_abg) { 5151 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
5152 priv->cfg->sku & IWL_SKU_A) {
5476 printk(KERN_INFO DRV_NAME 5153 printk(KERN_INFO DRV_NAME
5477 ": Incorrectly detected BG card as ABG. Please send " 5154 ": Incorrectly detected BG card as ABG. Please send "
5478 "your PCI ID 0x%04X:0x%04X to maintainer.\n", 5155 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
5479 priv->pci_dev->device, priv->pci_dev->subsystem_device); 5156 priv->pci_dev->device, priv->pci_dev->subsystem_device);
5480 priv->is_abg = 0; 5157 priv->cfg->sku &= ~IWL_SKU_A;
5481 } 5158 }
5482 5159
5483 printk(KERN_INFO DRV_NAME 5160 printk(KERN_INFO DRV_NAME
5484 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n", 5161 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
5485 modes[G].num_channels, modes[A].num_channels); 5162 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
5486 5163 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
5487 /*
5488 * NOTE: We register these in preference of order -- the
5489 * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick
5490 * a phymode based on rates or AP capabilities but seems to
5491 * configure it purely on if the channel being configured
5492 * is supported by a mode -- and the first match is taken
5493 */
5494 5164
5495 if (modes[G].num_channels) 5165 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
5496 ieee80211_register_hwmode(priv->hw, &modes[G]); 5166 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5497 if (modes[B].num_channels) 5167 &priv->bands[IEEE80211_BAND_2GHZ];
5498 ieee80211_register_hwmode(priv->hw, &modes[B]); 5168 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
5499 if (modes[A].num_channels) 5169 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5500 ieee80211_register_hwmode(priv->hw, &modes[A]); 5170 &priv->bands[IEEE80211_BAND_5GHZ];
5501 5171
5502 priv->modes = modes;
5503 set_bit(STATUS_GEO_CONFIGURED, &priv->status); 5172 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5504 5173
5505 return 0; 5174 return 0;
@@ -5510,7 +5179,6 @@ static int iwl3945_init_geos(struct iwl3945_priv *priv)
5510 */ 5179 */
5511static void iwl3945_free_geos(struct iwl3945_priv *priv) 5180static void iwl3945_free_geos(struct iwl3945_priv *priv)
5512{ 5181{
5513 kfree(priv->modes);
5514 kfree(priv->ieee_channels); 5182 kfree(priv->ieee_channels);
5515 kfree(priv->ieee_rates); 5183 kfree(priv->ieee_rates);
5516 clear_bit(STATUS_GEO_CONFIGURED, &priv->status); 5184 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
@@ -5837,7 +5505,7 @@ static int iwl3945_read_ucode(struct iwl3945_priv *priv)
5837 int ret = 0; 5505 int ret = 0;
5838 const struct firmware *ucode_raw; 5506 const struct firmware *ucode_raw;
5839 /* firmware file name contains uCode/driver compatibility version */ 5507 /* firmware file name contains uCode/driver compatibility version */
5840 const char *name = "iwlwifi-3945" IWL3945_UCODE_API ".ucode"; 5508 const char *name = priv->cfg->fw_name;
5841 u8 *src; 5509 u8 *src;
5842 size_t len; 5510 size_t len;
5843 u32 ver, inst_size, data_size, init_size, init_data_size, boot_size; 5511 u32 ver, inst_size, data_size, init_size, init_data_size, boot_size;
@@ -6209,6 +5877,8 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
6209 5877
6210 iwl3945_reg_txpower_periodic(priv); 5878 iwl3945_reg_txpower_periodic(priv);
6211 5879
5880 iwl3945_led_register(priv);
5881
6212 IWL_DEBUG_INFO("ALIVE processing complete.\n"); 5882 IWL_DEBUG_INFO("ALIVE processing complete.\n");
6213 set_bit(STATUS_READY, &priv->status); 5883 set_bit(STATUS_READY, &priv->status);
6214 wake_up_interruptible(&priv->wait_command_queue); 5884 wake_up_interruptible(&priv->wait_command_queue);
@@ -6216,6 +5886,7 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
6216 if (priv->error_recovering) 5886 if (priv->error_recovering)
6217 iwl3945_error_recovery(priv); 5887 iwl3945_error_recovery(priv);
6218 5888
5889 ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
6219 return; 5890 return;
6220 5891
6221 restart: 5892 restart:
@@ -6237,6 +5908,7 @@ static void __iwl3945_down(struct iwl3945_priv *priv)
6237 if (!exit_pending) 5908 if (!exit_pending)
6238 set_bit(STATUS_EXIT_PENDING, &priv->status); 5909 set_bit(STATUS_EXIT_PENDING, &priv->status);
6239 5910
5911 iwl3945_led_unregister(priv);
6240 iwl3945_clear_stations_table(priv); 5912 iwl3945_clear_stations_table(priv);
6241 5913
6242 /* Unblock any waiting calls */ 5914 /* Unblock any waiting calls */
@@ -6251,7 +5923,10 @@ static void __iwl3945_down(struct iwl3945_priv *priv)
6251 iwl3945_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 5923 iwl3945_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
6252 5924
6253 /* tell the device to stop sending interrupts */ 5925 /* tell the device to stop sending interrupts */
5926 spin_lock_irqsave(&priv->lock, flags);
6254 iwl3945_disable_interrupts(priv); 5927 iwl3945_disable_interrupts(priv);
5928 spin_unlock_irqrestore(&priv->lock, flags);
5929 iwl_synchronize_irq(priv);
6255 5930
6256 if (priv->mac80211_registered) 5931 if (priv->mac80211_registered)
6257 ieee80211_stop_queues(priv->hw); 5932 ieee80211_stop_queues(priv->hw);
@@ -6519,7 +6194,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6519 struct iwl3945_scan_cmd *scan; 6194 struct iwl3945_scan_cmd *scan;
6520 struct ieee80211_conf *conf = NULL; 6195 struct ieee80211_conf *conf = NULL;
6521 u8 direct_mask; 6196 u8 direct_mask;
6522 int phymode; 6197 enum ieee80211_band band;
6523 6198
6524 conf = ieee80211_get_hw_conf(priv->hw); 6199 conf = ieee80211_get_hw_conf(priv->hw);
6525 6200
@@ -6651,13 +6326,13 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6651 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 6326 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
6652 scan->tx_cmd.rate = IWL_RATE_1M_PLCP; 6327 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
6653 scan->good_CRC_th = 0; 6328 scan->good_CRC_th = 0;
6654 phymode = MODE_IEEE80211G; 6329 band = IEEE80211_BAND_2GHZ;
6655 break; 6330 break;
6656 6331
6657 case 1: 6332 case 1:
6658 scan->tx_cmd.rate = IWL_RATE_6M_PLCP; 6333 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
6659 scan->good_CRC_th = IWL_GOOD_CRC_TH; 6334 scan->good_CRC_th = IWL_GOOD_CRC_TH;
6660 phymode = MODE_IEEE80211A; 6335 band = IEEE80211_BAND_5GHZ;
6661 break; 6336 break;
6662 6337
6663 default: 6338 default:
@@ -6671,18 +6346,23 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
6671 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) 6346 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
6672 scan->filter_flags = RXON_FILTER_PROMISC_MSK; 6347 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
6673 6348
6674 if (direct_mask) 6349 if (direct_mask) {
6675 IWL_DEBUG_SCAN 6350 IWL_DEBUG_SCAN
6676 ("Initiating direct scan for %s.\n", 6351 ("Initiating direct scan for %s.\n",
6677 iwl3945_escape_essid(priv->essid, priv->essid_len)); 6352 iwl3945_escape_essid(priv->essid, priv->essid_len));
6678 else 6353 scan->channel_count =
6354 iwl3945_get_channels_for_scan(
6355 priv, band, 1, /* active */
6356 direct_mask,
6357 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6358 } else {
6679 IWL_DEBUG_SCAN("Initiating indirect scan.\n"); 6359 IWL_DEBUG_SCAN("Initiating indirect scan.\n");
6680 6360 scan->channel_count =
6681 scan->channel_count = 6361 iwl3945_get_channels_for_scan(
6682 iwl3945_get_channels_for_scan( 6362 priv, band, 0, /* passive */
6683 priv, phymode, 1, /* active */ 6363 direct_mask,
6684 direct_mask, 6364 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
6685 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); 6365 }
6686 6366
6687 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 6367 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
6688 scan->channel_count * sizeof(struct iwl3945_scan_channel); 6368 scan->channel_count * sizeof(struct iwl3945_scan_channel);
@@ -6825,7 +6505,7 @@ static void iwl3945_bg_post_associate(struct work_struct *data)
6825 iwl3945_add_station(priv, iwl3945_broadcast_addr, 0, 0); 6505 iwl3945_add_station(priv, iwl3945_broadcast_addr, 0, 0);
6826 iwl3945_add_station(priv, priv->bssid, 0, 0); 6506 iwl3945_add_station(priv, priv->bssid, 0, 0);
6827 iwl3945_sync_sta(priv, IWL_STA_ID, 6507 iwl3945_sync_sta(priv, IWL_STA_ID,
6828 (priv->phymode == MODE_IEEE80211A)? 6508 (priv->band == IEEE80211_BAND_5GHZ) ?
6829 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP, 6509 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
6830 CMD_ASYNC); 6510 CMD_ASYNC);
6831 iwl3945_rate_scale_init(priv->hw, IWL_STA_ID); 6511 iwl3945_rate_scale_init(priv->hw, IWL_STA_ID);
@@ -6841,9 +6521,8 @@ static void iwl3945_bg_post_associate(struct work_struct *data)
6841 6521
6842 iwl3945_sequence_reset(priv); 6522 iwl3945_sequence_reset(priv);
6843 6523
6844#ifdef CONFIG_IWL3945_QOS
6845 iwl3945_activate_qos(priv, 0); 6524 iwl3945_activate_qos(priv, 0);
6846#endif /* CONFIG_IWL3945_QOS */ 6525
6847 /* we have just associated, don't start scan too early */ 6526 /* we have just associated, don't start scan too early */
6848 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN; 6527 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
6849 mutex_unlock(&priv->mutex); 6528 mutex_unlock(&priv->mutex);
@@ -7020,7 +6699,7 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
7020 } 6699 }
7021 6700
7022 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 6701 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
7023 ctl->tx_rate); 6702 ctl->tx_rate->bitrate);
7024 6703
7025 if (iwl3945_tx_skb(priv, skb, ctl)) 6704 if (iwl3945_tx_skb(priv, skb, ctl))
7026 dev_kfree_skb_any(skb); 6705 dev_kfree_skb_any(skb);
@@ -7079,7 +6758,7 @@ static int iwl3945_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
7079 int ret = 0; 6758 int ret = 0;
7080 6759
7081 mutex_lock(&priv->mutex); 6760 mutex_lock(&priv->mutex);
7082 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel); 6761 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
7083 6762
7084 priv->add_radiotap = !!(conf->flags & IEEE80211_CONF_RADIOTAP); 6763 priv->add_radiotap = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
7085 6764
@@ -7099,19 +6778,20 @@ static int iwl3945_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
7099 6778
7100 spin_lock_irqsave(&priv->lock, flags); 6779 spin_lock_irqsave(&priv->lock, flags);
7101 6780
7102 ch_info = iwl3945_get_channel_info(priv, conf->phymode, conf->channel); 6781 ch_info = iwl3945_get_channel_info(priv, conf->channel->band,
6782 conf->channel->hw_value);
7103 if (!is_channel_valid(ch_info)) { 6783 if (!is_channel_valid(ch_info)) {
7104 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n", 6784 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n",
7105 conf->channel, conf->phymode); 6785 conf->channel->hw_value, conf->channel->band);
7106 IWL_DEBUG_MAC80211("leave - invalid channel\n"); 6786 IWL_DEBUG_MAC80211("leave - invalid channel\n");
7107 spin_unlock_irqrestore(&priv->lock, flags); 6787 spin_unlock_irqrestore(&priv->lock, flags);
7108 ret = -EINVAL; 6788 ret = -EINVAL;
7109 goto out; 6789 goto out;
7110 } 6790 }
7111 6791
7112 iwl3945_set_rxon_channel(priv, conf->phymode, conf->channel); 6792 iwl3945_set_rxon_channel(priv, conf->channel->band, conf->channel->hw_value);
7113 6793
7114 iwl3945_set_flags_for_phymode(priv, conf->phymode); 6794 iwl3945_set_flags_for_phymode(priv, conf->channel->band);
7115 6795
7116 /* The list of supported rates and rate mask can be different 6796 /* The list of supported rates and rate mask can be different
7117 * for each phymode; since the phymode may have changed, reset 6797 * for each phymode; since the phymode may have changed, reset
@@ -7225,6 +6905,12 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
7225 if (conf == NULL) 6905 if (conf == NULL)
7226 return -EIO; 6906 return -EIO;
7227 6907
6908 if (priv->vif != vif) {
6909 IWL_DEBUG_MAC80211("leave - priv->vif != vif\n");
6910 mutex_unlock(&priv->mutex);
6911 return 0;
6912 }
6913
7228 /* XXX: this MUST use conf->mac_addr */ 6914 /* XXX: this MUST use conf->mac_addr */
7229 6915
7230 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && 6916 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
@@ -7249,17 +6935,6 @@ static int iwl3945_mac_config_interface(struct ieee80211_hw *hw,
7249 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) && 6935 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
7250 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) { 6936 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
7251 */ 6937 */
7252 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
7253 IWL_DEBUG_MAC80211("leave - scanning\n");
7254 mutex_unlock(&priv->mutex);
7255 return 0;
7256 }
7257
7258 if (priv->vif != vif) {
7259 IWL_DEBUG_MAC80211("leave - priv->vif != vif\n");
7260 mutex_unlock(&priv->mutex);
7261 return 0;
7262 }
7263 6938
7264 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 6939 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7265 if (!conf->bssid) { 6940 if (!conf->bssid) {
@@ -7487,10 +7162,8 @@ static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, int queue,
7487 const struct ieee80211_tx_queue_params *params) 7162 const struct ieee80211_tx_queue_params *params)
7488{ 7163{
7489 struct iwl3945_priv *priv = hw->priv; 7164 struct iwl3945_priv *priv = hw->priv;
7490#ifdef CONFIG_IWL3945_QOS
7491 unsigned long flags; 7165 unsigned long flags;
7492 int q; 7166 int q;
7493#endif /* CONFIG_IWL3945_QOS */
7494 7167
7495 IWL_DEBUG_MAC80211("enter\n"); 7168 IWL_DEBUG_MAC80211("enter\n");
7496 7169
@@ -7504,7 +7177,6 @@ static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, int queue,
7504 return 0; 7177 return 0;
7505 } 7178 }
7506 7179
7507#ifdef CONFIG_IWL3945_QOS
7508 if (!priv->qos_data.qos_enable) { 7180 if (!priv->qos_data.qos_enable) {
7509 priv->qos_data.qos_active = 0; 7181 priv->qos_data.qos_active = 0;
7510 IWL_DEBUG_MAC80211("leave - qos not enabled\n"); 7182 IWL_DEBUG_MAC80211("leave - qos not enabled\n");
@@ -7518,7 +7190,7 @@ static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, int queue,
7518 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max); 7190 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
7519 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; 7191 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
7520 priv->qos_data.def_qos_parm.ac[q].edca_txop = 7192 priv->qos_data.def_qos_parm.ac[q].edca_txop =
7521 cpu_to_le16((params->burst_time * 100)); 7193 cpu_to_le16((params->txop * 32));
7522 7194
7523 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; 7195 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
7524 priv->qos_data.qos_active = 1; 7196 priv->qos_data.qos_active = 1;
@@ -7533,8 +7205,6 @@ static int iwl3945_mac_conf_tx(struct ieee80211_hw *hw, int queue,
7533 7205
7534 mutex_unlock(&priv->mutex); 7206 mutex_unlock(&priv->mutex);
7535 7207
7536#endif /*CONFIG_IWL3945_QOS */
7537
7538 IWL_DEBUG_MAC80211("leave\n"); 7208 IWL_DEBUG_MAC80211("leave\n");
7539 return 0; 7209 return 0;
7540} 7210}
@@ -7599,9 +7269,8 @@ static void iwl3945_mac_reset_tsf(struct ieee80211_hw *hw)
7599 mutex_lock(&priv->mutex); 7269 mutex_lock(&priv->mutex);
7600 IWL_DEBUG_MAC80211("enter\n"); 7270 IWL_DEBUG_MAC80211("enter\n");
7601 7271
7602#ifdef CONFIG_IWL3945_QOS
7603 iwl3945_reset_qos(priv); 7272 iwl3945_reset_qos(priv);
7604#endif 7273
7605 cancel_delayed_work(&priv->post_associate); 7274 cancel_delayed_work(&priv->post_associate);
7606 7275
7607 spin_lock_irqsave(&priv->lock, flags); 7276 spin_lock_irqsave(&priv->lock, flags);
@@ -7689,9 +7358,7 @@ static int iwl3945_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
7689 IWL_DEBUG_MAC80211("leave\n"); 7358 IWL_DEBUG_MAC80211("leave\n");
7690 spin_unlock_irqrestore(&priv->lock, flags); 7359 spin_unlock_irqrestore(&priv->lock, flags);
7691 7360
7692#ifdef CONFIG_IWL3945_QOS
7693 iwl3945_reset_qos(priv); 7361 iwl3945_reset_qos(priv);
7694#endif
7695 7362
7696 queue_work(priv->workqueue, &priv->post_associate.work); 7363 queue_work(priv->workqueue, &priv->post_associate.work);
7697 7364
@@ -7892,65 +7559,6 @@ static ssize_t store_filter_flags(struct device *d,
7892static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, 7559static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
7893 store_filter_flags); 7560 store_filter_flags);
7894 7561
7895static ssize_t show_tune(struct device *d,
7896 struct device_attribute *attr, char *buf)
7897{
7898 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data;
7899
7900 return sprintf(buf, "0x%04X\n",
7901 (priv->phymode << 8) |
7902 le16_to_cpu(priv->active_rxon.channel));
7903}
7904
7905static void iwl3945_set_flags_for_phymode(struct iwl3945_priv *priv, u8 phymode);
7906
7907static ssize_t store_tune(struct device *d,
7908 struct device_attribute *attr,
7909 const char *buf, size_t count)
7910{
7911 struct iwl3945_priv *priv = (struct iwl3945_priv *)d->driver_data;
7912 char *p = (char *)buf;
7913 u16 tune = simple_strtoul(p, &p, 0);
7914 u8 phymode = (tune >> 8) & 0xff;
7915 u16 channel = tune & 0xff;
7916
7917 IWL_DEBUG_INFO("Tune request to:%d channel:%d\n", phymode, channel);
7918
7919 mutex_lock(&priv->mutex);
7920 if ((le16_to_cpu(priv->staging_rxon.channel) != channel) ||
7921 (priv->phymode != phymode)) {
7922 const struct iwl3945_channel_info *ch_info;
7923
7924 ch_info = iwl3945_get_channel_info(priv, phymode, channel);
7925 if (!ch_info) {
7926 IWL_WARNING("Requested invalid phymode/channel "
7927 "combination: %d %d\n", phymode, channel);
7928 mutex_unlock(&priv->mutex);
7929 return -EINVAL;
7930 }
7931
7932 /* Cancel any currently running scans... */
7933 if (iwl3945_scan_cancel_timeout(priv, 100))
7934 IWL_WARNING("Could not cancel scan.\n");
7935 else {
7936 IWL_DEBUG_INFO("Committing phymode and "
7937 "rxon.channel = %d %d\n",
7938 phymode, channel);
7939
7940 iwl3945_set_rxon_channel(priv, phymode, channel);
7941 iwl3945_set_flags_for_phymode(priv, phymode);
7942
7943 iwl3945_set_rate(priv);
7944 iwl3945_commit_rxon(priv);
7945 }
7946 }
7947 mutex_unlock(&priv->mutex);
7948
7949 return count;
7950}
7951
7952static DEVICE_ATTR(tune, S_IWUSR | S_IRUGO, show_tune, store_tune);
7953
7954#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT 7562#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
7955 7563
7956static ssize_t show_measurement(struct device *d, 7564static ssize_t show_measurement(struct device *d,
@@ -8024,31 +7632,6 @@ static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
8024 show_measurement, store_measurement); 7632 show_measurement, store_measurement);
8025#endif /* CONFIG_IWL3945_SPECTRUM_MEASUREMENT */ 7633#endif /* CONFIG_IWL3945_SPECTRUM_MEASUREMENT */
8026 7634
8027static ssize_t show_rate(struct device *d,
8028 struct device_attribute *attr, char *buf)
8029{
8030 struct iwl3945_priv *priv = dev_get_drvdata(d);
8031 unsigned long flags;
8032 int i;
8033
8034 spin_lock_irqsave(&priv->sta_lock, flags);
8035 if (priv->iw_mode == IEEE80211_IF_TYPE_STA)
8036 i = priv->stations[IWL_AP_ID].current_rate.s.rate;
8037 else
8038 i = priv->stations[IWL_STA_ID].current_rate.s.rate;
8039 spin_unlock_irqrestore(&priv->sta_lock, flags);
8040
8041 i = iwl3945_rate_index_from_plcp(i);
8042 if (i == -1)
8043 return sprintf(buf, "0\n");
8044
8045 return sprintf(buf, "%d%s\n",
8046 (iwl3945_rates[i].ieee >> 1),
8047 (iwl3945_rates[i].ieee & 0x1) ? ".5" : "");
8048}
8049
8050static DEVICE_ATTR(rate, S_IRUSR, show_rate, NULL);
8051
8052static ssize_t store_retry_rate(struct device *d, 7635static ssize_t store_retry_rate(struct device *d,
8053 struct device_attribute *attr, 7636 struct device_attribute *attr,
8054 const char *buf, size_t count) 7637 const char *buf, size_t count)
@@ -8165,73 +7748,8 @@ static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
8165static ssize_t show_channels(struct device *d, 7748static ssize_t show_channels(struct device *d,
8166 struct device_attribute *attr, char *buf) 7749 struct device_attribute *attr, char *buf)
8167{ 7750{
8168 struct iwl3945_priv *priv = dev_get_drvdata(d); 7751 /* all this shit doesn't belong into sysfs anyway */
8169 int len = 0, i; 7752 return 0;
8170 struct ieee80211_channel *channels = NULL;
8171 const struct ieee80211_hw_mode *hw_mode = NULL;
8172 int count = 0;
8173
8174 if (!iwl3945_is_ready(priv))
8175 return -EAGAIN;
8176
8177 hw_mode = iwl3945_get_hw_mode(priv, MODE_IEEE80211G);
8178 if (!hw_mode)
8179 hw_mode = iwl3945_get_hw_mode(priv, MODE_IEEE80211B);
8180 if (hw_mode) {
8181 channels = hw_mode->channels;
8182 count = hw_mode->num_channels;
8183 }
8184
8185 len +=
8186 sprintf(&buf[len],
8187 "Displaying %d channels in 2.4GHz band "
8188 "(802.11bg):\n", count);
8189
8190 for (i = 0; i < count; i++)
8191 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8192 channels[i].chan,
8193 channels[i].power_level,
8194 channels[i].
8195 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8196 " (IEEE 802.11h required)" : "",
8197 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8198 || (channels[i].
8199 flag &
8200 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8201 ", IBSS",
8202 channels[i].
8203 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8204 "active/passive" : "passive only");
8205
8206 hw_mode = iwl3945_get_hw_mode(priv, MODE_IEEE80211A);
8207 if (hw_mode) {
8208 channels = hw_mode->channels;
8209 count = hw_mode->num_channels;
8210 } else {
8211 channels = NULL;
8212 count = 0;
8213 }
8214
8215 len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band "
8216 "(802.11a):\n", count);
8217
8218 for (i = 0; i < count; i++)
8219 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8220 channels[i].chan,
8221 channels[i].power_level,
8222 channels[i].
8223 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8224 " (IEEE 802.11h required)" : "",
8225 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8226 || (channels[i].
8227 flag &
8228 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8229 ", IBSS",
8230 channels[i].
8231 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8232 "active/passive" : "passive only");
8233
8234 return len;
8235} 7753}
8236 7754
8237static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); 7755static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
@@ -8404,14 +7922,12 @@ static struct attribute *iwl3945_sysfs_entries[] = {
8404 &dev_attr_measurement.attr, 7922 &dev_attr_measurement.attr,
8405#endif 7923#endif
8406 &dev_attr_power_level.attr, 7924 &dev_attr_power_level.attr,
8407 &dev_attr_rate.attr,
8408 &dev_attr_retry_rate.attr, 7925 &dev_attr_retry_rate.attr,
8409 &dev_attr_rf_kill.attr, 7926 &dev_attr_rf_kill.attr,
8410 &dev_attr_rs_window.attr, 7927 &dev_attr_rs_window.attr,
8411 &dev_attr_statistics.attr, 7928 &dev_attr_statistics.attr,
8412 &dev_attr_status.attr, 7929 &dev_attr_status.attr,
8413 &dev_attr_temperature.attr, 7930 &dev_attr_temperature.attr,
8414 &dev_attr_tune.attr,
8415 &dev_attr_tx_power.attr, 7931 &dev_attr_tx_power.attr,
8416 7932
8417 NULL 7933 NULL
@@ -8444,10 +7960,11 @@ static struct ieee80211_ops iwl3945_hw_ops = {
8444static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 7960static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
8445{ 7961{
8446 int err = 0; 7962 int err = 0;
8447 u32 pci_id;
8448 struct iwl3945_priv *priv; 7963 struct iwl3945_priv *priv;
8449 struct ieee80211_hw *hw; 7964 struct ieee80211_hw *hw;
7965 struct iwl_3945_cfg *cfg = (struct iwl_3945_cfg *)(ent->driver_data);
8450 int i; 7966 int i;
7967 unsigned long flags;
8451 DECLARE_MAC_BUF(mac); 7968 DECLARE_MAC_BUF(mac);
8452 7969
8453 /* Disabling hardware scan means that mac80211 will perform scans 7970 /* Disabling hardware scan means that mac80211 will perform scans
@@ -8457,10 +7974,10 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8457 iwl3945_hw_ops.hw_scan = NULL; 7974 iwl3945_hw_ops.hw_scan = NULL;
8458 } 7975 }
8459 7976
8460 if ((iwl3945_param_queues_num > IWL_MAX_NUM_QUEUES) || 7977 if ((iwl3945_param_queues_num > IWL39_MAX_NUM_QUEUES) ||
8461 (iwl3945_param_queues_num < IWL_MIN_NUM_QUEUES)) { 7978 (iwl3945_param_queues_num < IWL_MIN_NUM_QUEUES)) {
8462 IWL_ERROR("invalid queues_num, should be between %d and %d\n", 7979 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
8463 IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES); 7980 IWL_MIN_NUM_QUEUES, IWL39_MAX_NUM_QUEUES);
8464 err = -EINVAL; 7981 err = -EINVAL;
8465 goto out; 7982 goto out;
8466 } 7983 }
@@ -8482,6 +7999,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8482 priv->hw = hw; 7999 priv->hw = hw;
8483 8000
8484 priv->pci_dev = pdev; 8001 priv->pci_dev = pdev;
8002 priv->cfg = cfg;
8485 8003
8486 /* Select antenna (may be helpful if only one antenna is connected) */ 8004 /* Select antenna (may be helpful if only one antenna is connected) */
8487 priv->antenna = (enum iwl3945_antenna)iwl3945_param_antenna; 8005 priv->antenna = (enum iwl3945_antenna)iwl3945_param_antenna;
@@ -8532,7 +8050,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8532 priv->data_retry_limit = -1; 8050 priv->data_retry_limit = -1;
8533 priv->ieee_channels = NULL; 8051 priv->ieee_channels = NULL;
8534 priv->ieee_rates = NULL; 8052 priv->ieee_rates = NULL;
8535 priv->phymode = -1; 8053 priv->band = IEEE80211_BAND_2GHZ;
8536 8054
8537 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 8055 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
8538 if (!err) 8056 if (!err)
@@ -8571,32 +8089,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8571 8089
8572 priv->iw_mode = IEEE80211_IF_TYPE_STA; 8090 priv->iw_mode = IEEE80211_IF_TYPE_STA;
8573 8091
8574 pci_id =
8575 (priv->pci_dev->device << 16) | priv->pci_dev->subsystem_device;
8576
8577 switch (pci_id) {
8578 case 0x42221005: /* 0x4222 0x8086 0x1005 is BG SKU */
8579 case 0x42221034: /* 0x4222 0x8086 0x1034 is BG SKU */
8580 case 0x42271014: /* 0x4227 0x8086 0x1014 is BG SKU */
8581 case 0x42221044: /* 0x4222 0x8086 0x1044 is BG SKU */
8582 priv->is_abg = 0;
8583 break;
8584
8585 /*
8586 * Rest are assumed ABG SKU -- if this is not the
8587 * case then the card will get the wrong 'Detected'
8588 * line in the kernel log however the code that
8589 * initializes the GEO table will detect no A-band
8590 * channels and remove the is_abg mask.
8591 */
8592 default:
8593 priv->is_abg = 1;
8594 break;
8595 }
8596
8597 printk(KERN_INFO DRV_NAME 8092 printk(KERN_INFO DRV_NAME
8598 ": Detected Intel PRO/Wireless 3945%sBG Network Connection\n", 8093 ": Detected Intel Wireless WiFi Link %s\n", priv->cfg->name);
8599 priv->is_abg ? "A" : "");
8600 8094
8601 /* Device-specific setup */ 8095 /* Device-specific setup */
8602 if (iwl3945_hw_set_hw_setting(priv)) { 8096 if (iwl3945_hw_set_hw_setting(priv)) {
@@ -8604,7 +8098,6 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8604 goto out_iounmap; 8098 goto out_iounmap;
8605 } 8099 }
8606 8100
8607#ifdef CONFIG_IWL3945_QOS
8608 if (iwl3945_param_qos_enable) 8101 if (iwl3945_param_qos_enable)
8609 priv->qos_data.qos_enable = 1; 8102 priv->qos_data.qos_enable = 1;
8610 8103
@@ -8612,9 +8105,8 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8612 8105
8613 priv->qos_data.qos_active = 0; 8106 priv->qos_data.qos_active = 0;
8614 priv->qos_data.qos_cap.val = 0; 8107 priv->qos_data.qos_cap.val = 0;
8615#endif /* CONFIG_IWL3945_QOS */
8616 8108
8617 iwl3945_set_rxon_channel(priv, MODE_IEEE80211G, 6); 8109 iwl3945_set_rxon_channel(priv, IEEE80211_BAND_2GHZ, 6);
8618 iwl3945_setup_deferred_work(priv); 8110 iwl3945_setup_deferred_work(priv);
8619 iwl3945_setup_rx_handlers(priv); 8111 iwl3945_setup_rx_handlers(priv);
8620 8112
@@ -8623,7 +8115,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8623 priv->power_mode = IWL_POWER_AC; 8115 priv->power_mode = IWL_POWER_AC;
8624 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER; 8116 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
8625 8117
8118 spin_lock_irqsave(&priv->lock, flags);
8626 iwl3945_disable_interrupts(priv); 8119 iwl3945_disable_interrupts(priv);
8120 spin_unlock_irqrestore(&priv->lock, flags);
8627 8121
8628 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group); 8122 err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
8629 if (err) { 8123 if (err) {
@@ -8665,9 +8159,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
8665 IWL_ERROR("initializing geos failed: %d\n", err); 8159 IWL_ERROR("initializing geos failed: %d\n", err);
8666 goto out_free_channel_map; 8160 goto out_free_channel_map;
8667 } 8161 }
8668 iwl3945_reset_channel_flag(priv);
8669 8162
8670 iwl3945_rate_control_register(priv->hw);
8671 err = ieee80211_register_hw(priv->hw); 8163 err = ieee80211_register_hw(priv->hw);
8672 if (err) { 8164 if (err) {
8673 IWL_ERROR("Failed to register network device (error %d)\n", err); 8165 IWL_ERROR("Failed to register network device (error %d)\n", err);
@@ -8711,6 +8203,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
8711 struct iwl3945_priv *priv = pci_get_drvdata(pdev); 8203 struct iwl3945_priv *priv = pci_get_drvdata(pdev);
8712 struct list_head *p, *q; 8204 struct list_head *p, *q;
8713 int i; 8205 int i;
8206 unsigned long flags;
8714 8207
8715 if (!priv) 8208 if (!priv)
8716 return; 8209 return;
@@ -8721,6 +8214,15 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
8721 8214
8722 iwl3945_down(priv); 8215 iwl3945_down(priv);
8723 8216
8217 /* make sure we flush any pending irq or
8218 * tasklet for the driver
8219 */
8220 spin_lock_irqsave(&priv->lock, flags);
8221 iwl3945_disable_interrupts(priv);
8222 spin_unlock_irqrestore(&priv->lock, flags);
8223
8224 iwl_synchronize_irq(priv);
8225
8724 /* Free MAC hash list for ADHOC */ 8226 /* Free MAC hash list for ADHOC */
8725 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) { 8227 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) {
8726 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) { 8228 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
@@ -8742,7 +8244,6 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
8742 8244
8743 if (priv->mac80211_registered) { 8245 if (priv->mac80211_registered) {
8744 ieee80211_unregister_hw(priv->hw); 8246 ieee80211_unregister_hw(priv->hw);
8745 iwl3945_rate_control_unregister(priv->hw);
8746 } 8247 }
8747 8248
8748 /*netif_stop_queue(dev); */ 8249 /*netif_stop_queue(dev); */
@@ -8823,21 +8324,35 @@ static int __init iwl3945_init(void)
8823 int ret; 8324 int ret;
8824 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); 8325 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
8825 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); 8326 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
8327
8328 ret = iwl3945_rate_control_register();
8329 if (ret) {
8330 IWL_ERROR("Unable to register rate control algorithm: %d\n", ret);
8331 return ret;
8332 }
8333
8826 ret = pci_register_driver(&iwl3945_driver); 8334 ret = pci_register_driver(&iwl3945_driver);
8827 if (ret) { 8335 if (ret) {
8828 IWL_ERROR("Unable to initialize PCI module\n"); 8336 IWL_ERROR("Unable to initialize PCI module\n");
8829 return ret; 8337 goto error_register;
8830 } 8338 }
8831#ifdef CONFIG_IWL3945_DEBUG 8339#ifdef CONFIG_IWL3945_DEBUG
8832 ret = driver_create_file(&iwl3945_driver.driver, &driver_attr_debug_level); 8340 ret = driver_create_file(&iwl3945_driver.driver, &driver_attr_debug_level);
8833 if (ret) { 8341 if (ret) {
8834 IWL_ERROR("Unable to create driver sysfs file\n"); 8342 IWL_ERROR("Unable to create driver sysfs file\n");
8835 pci_unregister_driver(&iwl3945_driver); 8343 goto error_debug;
8836 return ret;
8837 } 8344 }
8838#endif 8345#endif
8839 8346
8840 return ret; 8347 return ret;
8348
8349#ifdef CONFIG_IWL3945_DEBUG
8350error_debug:
8351 pci_unregister_driver(&iwl3945_driver);
8352#endif
8353error_register:
8354 iwl3945_rate_control_unregister();
8355 return ret;
8841} 8356}
8842 8357
8843static void __exit iwl3945_exit(void) 8358static void __exit iwl3945_exit(void)
@@ -8846,6 +8361,7 @@ static void __exit iwl3945_exit(void)
8846 driver_remove_file(&iwl3945_driver.driver, &driver_attr_debug_level); 8361 driver_remove_file(&iwl3945_driver.driver, &driver_attr_debug_level);
8847#endif 8362#endif
8848 pci_unregister_driver(&iwl3945_driver); 8363 pci_unregister_driver(&iwl3945_driver);
8364 iwl3945_rate_control_unregister();
8849} 8365}
8850 8366
8851module_param_named(antenna, iwl3945_param_antenna, int, 0444); 8367module_param_named(antenna, iwl3945_param_antenna, int, 0444);
diff --git a/drivers/net/wireless/iwlwifi/iwl4965-base.c b/drivers/net/wireless/iwlwifi/iwl4965-base.c
index 60ec29eab85a..d7e2358a213a 100644
--- a/drivers/net/wireless/iwlwifi/iwl4965-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl4965-base.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2007 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2008 Intel Corporation. All rights reserved.
4 * 4 *
5 * Portions of this file are derived from the ipw3945 project, as well 5 * Portions of this file are derived from the ipw3945 project, as well
6 * as portions of the ieee80211 subsystem header files. 6 * as portions of the ieee80211 subsystem header files.
@@ -45,14 +45,14 @@
45 45
46#include <asm/div64.h> 46#include <asm/div64.h>
47 47
48#include "iwl-eeprom.h"
48#include "iwl-4965.h" 49#include "iwl-4965.h"
50#include "iwl-core.h"
51#include "iwl-io.h"
49#include "iwl-helpers.h" 52#include "iwl-helpers.h"
53#include "iwl-sta.h"
50 54
51#ifdef CONFIG_IWL4965_DEBUG 55static int iwl4965_tx_queue_update_write_ptr(struct iwl_priv *priv,
52u32 iwl4965_debug_level;
53#endif
54
55static int iwl4965_tx_queue_update_write_ptr(struct iwl4965_priv *priv,
56 struct iwl4965_tx_queue *txq); 56 struct iwl4965_tx_queue *txq);
57 57
58/****************************************************************************** 58/******************************************************************************
@@ -61,16 +61,6 @@ static int iwl4965_tx_queue_update_write_ptr(struct iwl4965_priv *priv,
61 * 61 *
62 ******************************************************************************/ 62 ******************************************************************************/
63 63
64/* module parameters */
65static int iwl4965_param_disable_hw_scan; /* def: 0 = use 4965's h/w scan */
66static int iwl4965_param_debug; /* def: 0 = minimal debug log messages */
67static int iwl4965_param_disable; /* def: enable radio */
68static int iwl4965_param_antenna; /* def: 0 = both antennas (use diversity) */
69int iwl4965_param_hwcrypto; /* def: using software encryption */
70static int iwl4965_param_qos_enable = 1; /* def: 1 = use quality of service */
71int iwl4965_param_queues_num = IWL_MAX_NUM_QUEUES; /* def: 16 Tx queues */
72int iwl4965_param_amsdu_size_8K; /* def: enable 8K amsdu size */
73
74/* 64/*
75 * module name, copyright, version, etc. 65 * module name, copyright, version, etc.
76 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk 66 * NOTE: DRV_NAME is defined in iwlwifi.h for use by iwl-debug.h and printk
@@ -78,7 +68,7 @@ int iwl4965_param_amsdu_size_8K; /* def: enable 8K amsdu size */
78 68
79#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux" 69#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link 4965AGN driver for Linux"
80 70
81#ifdef CONFIG_IWL4965_DEBUG 71#ifdef CONFIG_IWLWIFI_DEBUG
82#define VD "d" 72#define VD "d"
83#else 73#else
84#define VD 74#define VD
@@ -90,15 +80,8 @@ int iwl4965_param_amsdu_size_8K; /* def: enable 8K amsdu size */
90#define VS 80#define VS
91#endif 81#endif
92 82
93#define IWLWIFI_VERSION "1.2.23k" VD VS 83#define DRV_VERSION IWLWIFI_VERSION VD VS
94#define DRV_COPYRIGHT "Copyright(c) 2003-2007 Intel Corporation"
95#define DRV_VERSION IWLWIFI_VERSION
96 84
97/* Change firmware file name, using "-" and incrementing number,
98 * *only* when uCode interface or architecture changes so that it
99 * is not compatible with earlier drivers.
100 * This number will also appear in << 8 position of 1st dword of uCode file */
101#define IWL4965_UCODE_API "-1"
102 85
103MODULE_DESCRIPTION(DRV_DESCRIPTION); 86MODULE_DESCRIPTION(DRV_DESCRIPTION);
104MODULE_VERSION(DRV_VERSION); 87MODULE_VERSION(DRV_VERSION);
@@ -115,16 +98,10 @@ __le16 *ieee80211_get_qos_ctrl(struct ieee80211_hdr *hdr)
115 return NULL; 98 return NULL;
116} 99}
117 100
118static const struct ieee80211_hw_mode *iwl4965_get_hw_mode( 101static const struct ieee80211_supported_band *iwl4965_get_hw_mode(
119 struct iwl4965_priv *priv, int mode) 102 struct iwl_priv *priv, enum ieee80211_band band)
120{ 103{
121 int i; 104 return priv->hw->wiphy->bands[band];
122
123 for (i = 0; i < 3; i++)
124 if (priv->modes[i].mode == mode)
125 return &priv->modes[i];
126
127 return NULL;
128} 105}
129 106
130static int iwl4965_is_empty_essid(const char *essid, int essid_len) 107static int iwl4965_is_empty_essid(const char *essid, int essid_len)
@@ -167,17 +144,6 @@ static const char *iwl4965_escape_essid(const char *essid, u8 essid_len)
167 return escaped; 144 return escaped;
168} 145}
169 146
170static void iwl4965_print_hex_dump(int level, void *p, u32 len)
171{
172#ifdef CONFIG_IWL4965_DEBUG
173 if (!(iwl4965_debug_level & level))
174 return;
175
176 print_hex_dump(KERN_DEBUG, "iwl data: ", DUMP_PREFIX_OFFSET, 16, 1,
177 p, len, 1);
178#endif
179}
180
181/*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 147/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
182 * DMA services 148 * DMA services
183 * 149 *
@@ -205,7 +171,7 @@ static void iwl4965_print_hex_dump(int level, void *p, u32 len)
205 * See more detailed info in iwl-4965-hw.h. 171 * See more detailed info in iwl-4965-hw.h.
206 ***************************************************/ 172 ***************************************************/
207 173
208static int iwl4965_queue_space(const struct iwl4965_queue *q) 174int iwl4965_queue_space(const struct iwl4965_queue *q)
209{ 175{
210 int s = q->read_ptr - q->write_ptr; 176 int s = q->read_ptr - q->write_ptr;
211 177
@@ -221,25 +187,6 @@ static int iwl4965_queue_space(const struct iwl4965_queue *q)
221 return s; 187 return s;
222} 188}
223 189
224/**
225 * iwl4965_queue_inc_wrap - increment queue index, wrap back to beginning
226 * @index -- current index
227 * @n_bd -- total number of entries in queue (must be power of 2)
228 */
229static inline int iwl4965_queue_inc_wrap(int index, int n_bd)
230{
231 return ++index & (n_bd - 1);
232}
233
234/**
235 * iwl4965_queue_dec_wrap - decrement queue index, wrap back to end
236 * @index -- current index
237 * @n_bd -- total number of entries in queue (must be power of 2)
238 */
239static inline int iwl4965_queue_dec_wrap(int index, int n_bd)
240{
241 return --index & (n_bd - 1);
242}
243 190
244static inline int x2_queue_used(const struct iwl4965_queue *q, int i) 191static inline int x2_queue_used(const struct iwl4965_queue *q, int i)
245{ 192{
@@ -261,15 +208,15 @@ static inline u8 get_cmd_index(struct iwl4965_queue *q, u32 index, int is_huge)
261/** 208/**
262 * iwl4965_queue_init - Initialize queue's high/low-water and read/write indexes 209 * iwl4965_queue_init - Initialize queue's high/low-water and read/write indexes
263 */ 210 */
264static int iwl4965_queue_init(struct iwl4965_priv *priv, struct iwl4965_queue *q, 211static int iwl4965_queue_init(struct iwl_priv *priv, struct iwl4965_queue *q,
265 int count, int slots_num, u32 id) 212 int count, int slots_num, u32 id)
266{ 213{
267 q->n_bd = count; 214 q->n_bd = count;
268 q->n_window = slots_num; 215 q->n_window = slots_num;
269 q->id = id; 216 q->id = id;
270 217
271 /* count must be power-of-two size, otherwise iwl4965_queue_inc_wrap 218 /* count must be power-of-two size, otherwise iwl_queue_inc_wrap
272 * and iwl4965_queue_dec_wrap are broken. */ 219 * and iwl_queue_dec_wrap are broken. */
273 BUG_ON(!is_power_of_2(count)); 220 BUG_ON(!is_power_of_2(count));
274 221
275 /* slots_num must be power-of-two size, otherwise 222 /* slots_num must be power-of-two size, otherwise
@@ -292,7 +239,7 @@ static int iwl4965_queue_init(struct iwl4965_priv *priv, struct iwl4965_queue *q
292/** 239/**
293 * iwl4965_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue 240 * iwl4965_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
294 */ 241 */
295static int iwl4965_tx_queue_alloc(struct iwl4965_priv *priv, 242static int iwl4965_tx_queue_alloc(struct iwl_priv *priv,
296 struct iwl4965_tx_queue *txq, u32 id) 243 struct iwl4965_tx_queue *txq, u32 id)
297{ 244{
298 struct pci_dev *dev = priv->pci_dev; 245 struct pci_dev *dev = priv->pci_dev;
@@ -337,7 +284,7 @@ static int iwl4965_tx_queue_alloc(struct iwl4965_priv *priv,
337/** 284/**
338 * iwl4965_tx_queue_init - Allocate and initialize one tx/cmd queue 285 * iwl4965_tx_queue_init - Allocate and initialize one tx/cmd queue
339 */ 286 */
340int iwl4965_tx_queue_init(struct iwl4965_priv *priv, 287int iwl4965_tx_queue_init(struct iwl_priv *priv,
341 struct iwl4965_tx_queue *txq, int slots_num, u32 txq_id) 288 struct iwl4965_tx_queue *txq, int slots_num, u32 txq_id)
342{ 289{
343 struct pci_dev *dev = priv->pci_dev; 290 struct pci_dev *dev = priv->pci_dev;
@@ -352,7 +299,7 @@ int iwl4965_tx_queue_init(struct iwl4965_priv *priv,
352 * For normal Tx queues (all other queues), no super-size command 299 * For normal Tx queues (all other queues), no super-size command
353 * space is needed. 300 * space is needed.
354 */ 301 */
355 len = sizeof(struct iwl4965_cmd) * slots_num; 302 len = sizeof(struct iwl_cmd) * slots_num;
356 if (txq_id == IWL_CMD_QUEUE_NUM) 303 if (txq_id == IWL_CMD_QUEUE_NUM)
357 len += IWL_MAX_SCAN_SIZE; 304 len += IWL_MAX_SCAN_SIZE;
358 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd); 305 txq->cmd = pci_alloc_consistent(dev, len, &txq->dma_addr_cmd);
@@ -369,7 +316,7 @@ int iwl4965_tx_queue_init(struct iwl4965_priv *priv,
369 txq->need_update = 0; 316 txq->need_update = 0;
370 317
371 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 318 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
372 * iwl4965_queue_inc_wrap and iwl4965_queue_dec_wrap are broken. */ 319 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
373 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); 320 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
374 321
375 /* Initialize queue's high/low-water marks, and head/tail indexes */ 322 /* Initialize queue's high/low-water marks, and head/tail indexes */
@@ -389,7 +336,7 @@ int iwl4965_tx_queue_init(struct iwl4965_priv *priv,
389 * Free all buffers. 336 * Free all buffers.
390 * 0-fill, but do not free "txq" descriptor structure. 337 * 0-fill, but do not free "txq" descriptor structure.
391 */ 338 */
392void iwl4965_tx_queue_free(struct iwl4965_priv *priv, struct iwl4965_tx_queue *txq) 339void iwl4965_tx_queue_free(struct iwl_priv *priv, struct iwl4965_tx_queue *txq)
393{ 340{
394 struct iwl4965_queue *q = &txq->q; 341 struct iwl4965_queue *q = &txq->q;
395 struct pci_dev *dev = priv->pci_dev; 342 struct pci_dev *dev = priv->pci_dev;
@@ -400,10 +347,10 @@ void iwl4965_tx_queue_free(struct iwl4965_priv *priv, struct iwl4965_tx_queue *t
400 347
401 /* first, empty all BD's */ 348 /* first, empty all BD's */
402 for (; q->write_ptr != q->read_ptr; 349 for (; q->write_ptr != q->read_ptr;
403 q->read_ptr = iwl4965_queue_inc_wrap(q->read_ptr, q->n_bd)) 350 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
404 iwl4965_hw_txq_free_tfd(priv, txq); 351 iwl4965_hw_txq_free_tfd(priv, txq);
405 352
406 len = sizeof(struct iwl4965_cmd) * q->n_window; 353 len = sizeof(struct iwl_cmd) * q->n_window;
407 if (q->id == IWL_CMD_QUEUE_NUM) 354 if (q->id == IWL_CMD_QUEUE_NUM)
408 len += IWL_MAX_SCAN_SIZE; 355 len += IWL_MAX_SCAN_SIZE;
409 356
@@ -440,7 +387,7 @@ const u8 iwl4965_broadcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
440 * 387 *
441 * NOTE: This does not remove station from device's station table. 388 * NOTE: This does not remove station from device's station table.
442 */ 389 */
443static u8 iwl4965_remove_station(struct iwl4965_priv *priv, const u8 *addr, int is_ap) 390static u8 iwl4965_remove_station(struct iwl_priv *priv, const u8 *addr, int is_ap)
444{ 391{
445 int index = IWL_INVALID_STATION; 392 int index = IWL_INVALID_STATION;
446 int i; 393 int i;
@@ -451,9 +398,9 @@ static u8 iwl4965_remove_station(struct iwl4965_priv *priv, const u8 *addr, int
451 if (is_ap) 398 if (is_ap)
452 index = IWL_AP_ID; 399 index = IWL_AP_ID;
453 else if (is_broadcast_ether_addr(addr)) 400 else if (is_broadcast_ether_addr(addr))
454 index = priv->hw_setting.bcast_sta_id; 401 index = priv->hw_params.bcast_sta_id;
455 else 402 else
456 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) 403 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
457 if (priv->stations[i].used && 404 if (priv->stations[i].used &&
458 !compare_ether_addr(priv->stations[i].sta.sta.addr, 405 !compare_ether_addr(priv->stations[i].sta.sta.addr,
459 addr)) { 406 addr)) {
@@ -478,26 +425,9 @@ out:
478#endif 425#endif
479 426
480/** 427/**
481 * iwl4965_clear_stations_table - Clear the driver's station table
482 *
483 * NOTE: This does not clear or otherwise alter the device's station table.
484 */
485static void iwl4965_clear_stations_table(struct iwl4965_priv *priv)
486{
487 unsigned long flags;
488
489 spin_lock_irqsave(&priv->sta_lock, flags);
490
491 priv->num_stations = 0;
492 memset(priv->stations, 0, sizeof(priv->stations));
493
494 spin_unlock_irqrestore(&priv->sta_lock, flags);
495}
496
497/**
498 * iwl4965_add_station_flags - Add station to tables in driver and device 428 * iwl4965_add_station_flags - Add station to tables in driver and device
499 */ 429 */
500u8 iwl4965_add_station_flags(struct iwl4965_priv *priv, const u8 *addr, 430u8 iwl4965_add_station_flags(struct iwl_priv *priv, const u8 *addr,
501 int is_ap, u8 flags, void *ht_data) 431 int is_ap, u8 flags, void *ht_data)
502{ 432{
503 int i; 433 int i;
@@ -510,9 +440,9 @@ u8 iwl4965_add_station_flags(struct iwl4965_priv *priv, const u8 *addr,
510 if (is_ap) 440 if (is_ap)
511 index = IWL_AP_ID; 441 index = IWL_AP_ID;
512 else if (is_broadcast_ether_addr(addr)) 442 else if (is_broadcast_ether_addr(addr))
513 index = priv->hw_setting.bcast_sta_id; 443 index = priv->hw_params.bcast_sta_id;
514 else 444 else
515 for (i = IWL_STA_ID; i < priv->hw_setting.max_stations; i++) { 445 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
516 if (!compare_ether_addr(priv->stations[i].sta.sta.addr, 446 if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
517 addr)) { 447 addr)) {
518 index = i; 448 index = i;
@@ -553,7 +483,7 @@ u8 iwl4965_add_station_flags(struct iwl4965_priv *priv, const u8 *addr,
553 483
554#ifdef CONFIG_IWL4965_HT 484#ifdef CONFIG_IWL4965_HT
555 /* BCAST station and IBSS stations do not work in HT mode */ 485 /* BCAST station and IBSS stations do not work in HT mode */
556 if (index != priv->hw_setting.bcast_sta_id && 486 if (index != priv->hw_params.bcast_sta_id &&
557 priv->iw_mode != IEEE80211_IF_TYPE_IBSS) 487 priv->iw_mode != IEEE80211_IF_TYPE_IBSS)
558 iwl4965_set_ht_add_station(priv, index, 488 iwl4965_set_ht_add_station(priv, index,
559 (struct ieee80211_ht_info *) ht_data); 489 (struct ieee80211_ht_info *) ht_data);
@@ -567,103 +497,10 @@ u8 iwl4965_add_station_flags(struct iwl4965_priv *priv, const u8 *addr,
567 497
568} 498}
569 499
570/*************** DRIVER STATUS FUNCTIONS *****/
571
572static inline int iwl4965_is_ready(struct iwl4965_priv *priv)
573{
574 /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
575 * set but EXIT_PENDING is not */
576 return test_bit(STATUS_READY, &priv->status) &&
577 test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
578 !test_bit(STATUS_EXIT_PENDING, &priv->status);
579}
580
581static inline int iwl4965_is_alive(struct iwl4965_priv *priv)
582{
583 return test_bit(STATUS_ALIVE, &priv->status);
584}
585
586static inline int iwl4965_is_init(struct iwl4965_priv *priv)
587{
588 return test_bit(STATUS_INIT, &priv->status);
589}
590
591static inline int iwl4965_is_rfkill(struct iwl4965_priv *priv)
592{
593 return test_bit(STATUS_RF_KILL_HW, &priv->status) ||
594 test_bit(STATUS_RF_KILL_SW, &priv->status);
595}
596
597static inline int iwl4965_is_ready_rf(struct iwl4965_priv *priv)
598{
599
600 if (iwl4965_is_rfkill(priv))
601 return 0;
602 500
603 return iwl4965_is_ready(priv);
604}
605 501
606/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 502/*************** HOST COMMAND QUEUE FUNCTIONS *****/
607 503
608#define IWL_CMD(x) case x : return #x
609
610static const char *get_cmd_string(u8 cmd)
611{
612 switch (cmd) {
613 IWL_CMD(REPLY_ALIVE);
614 IWL_CMD(REPLY_ERROR);
615 IWL_CMD(REPLY_RXON);
616 IWL_CMD(REPLY_RXON_ASSOC);
617 IWL_CMD(REPLY_QOS_PARAM);
618 IWL_CMD(REPLY_RXON_TIMING);
619 IWL_CMD(REPLY_ADD_STA);
620 IWL_CMD(REPLY_REMOVE_STA);
621 IWL_CMD(REPLY_REMOVE_ALL_STA);
622 IWL_CMD(REPLY_TX);
623 IWL_CMD(REPLY_RATE_SCALE);
624 IWL_CMD(REPLY_LEDS_CMD);
625 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
626 IWL_CMD(RADAR_NOTIFICATION);
627 IWL_CMD(REPLY_QUIET_CMD);
628 IWL_CMD(REPLY_CHANNEL_SWITCH);
629 IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
630 IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
631 IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
632 IWL_CMD(POWER_TABLE_CMD);
633 IWL_CMD(PM_SLEEP_NOTIFICATION);
634 IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
635 IWL_CMD(REPLY_SCAN_CMD);
636 IWL_CMD(REPLY_SCAN_ABORT_CMD);
637 IWL_CMD(SCAN_START_NOTIFICATION);
638 IWL_CMD(SCAN_RESULTS_NOTIFICATION);
639 IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
640 IWL_CMD(BEACON_NOTIFICATION);
641 IWL_CMD(REPLY_TX_BEACON);
642 IWL_CMD(WHO_IS_AWAKE_NOTIFICATION);
643 IWL_CMD(QUIET_NOTIFICATION);
644 IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
645 IWL_CMD(MEASURE_ABORT_NOTIFICATION);
646 IWL_CMD(REPLY_BT_CONFIG);
647 IWL_CMD(REPLY_STATISTICS_CMD);
648 IWL_CMD(STATISTICS_NOTIFICATION);
649 IWL_CMD(REPLY_CARD_STATE_CMD);
650 IWL_CMD(CARD_STATE_NOTIFICATION);
651 IWL_CMD(MISSED_BEACONS_NOTIFICATION);
652 IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
653 IWL_CMD(SENSITIVITY_CMD);
654 IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
655 IWL_CMD(REPLY_RX_PHY_CMD);
656 IWL_CMD(REPLY_RX_MPDU_CMD);
657 IWL_CMD(REPLY_4965_RX);
658 IWL_CMD(REPLY_COMPRESSED_BA);
659 default:
660 return "UNKNOWN";
661
662 }
663}
664
665#define HOST_COMPLETE_TIMEOUT (HZ / 2)
666
667/** 504/**
668 * iwl4965_enqueue_hcmd - enqueue a uCode command 505 * iwl4965_enqueue_hcmd - enqueue a uCode command
669 * @priv: device private data point 506 * @priv: device private data point
@@ -673,13 +510,13 @@ static const char *get_cmd_string(u8 cmd)
673 * failed. On success, it turns the index (> 0) of command in the 510 * failed. On success, it turns the index (> 0) of command in the
674 * command queue. 511 * command queue.
675 */ 512 */
676static int iwl4965_enqueue_hcmd(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd) 513int iwl4965_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
677{ 514{
678 struct iwl4965_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 515 struct iwl4965_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
679 struct iwl4965_queue *q = &txq->q; 516 struct iwl4965_queue *q = &txq->q;
680 struct iwl4965_tfd_frame *tfd; 517 struct iwl4965_tfd_frame *tfd;
681 u32 *control_flags; 518 u32 *control_flags;
682 struct iwl4965_cmd *out_cmd; 519 struct iwl_cmd *out_cmd;
683 u32 idx; 520 u32 idx;
684 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr)); 521 u16 fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
685 dma_addr_t phys_addr; 522 dma_addr_t phys_addr;
@@ -692,7 +529,7 @@ static int iwl4965_enqueue_hcmd(struct iwl4965_priv *priv, struct iwl4965_host_c
692 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && 529 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
693 !(cmd->meta.flags & CMD_SIZE_HUGE)); 530 !(cmd->meta.flags & CMD_SIZE_HUGE));
694 531
695 if (iwl4965_is_rfkill(priv)) { 532 if (iwl_is_rfkill(priv)) {
696 IWL_DEBUG_INFO("Not sending command - RF KILL"); 533 IWL_DEBUG_INFO("Not sending command - RF KILL");
697 return -EIO; 534 return -EIO;
698 } 535 }
@@ -726,7 +563,7 @@ static int iwl4965_enqueue_hcmd(struct iwl4965_priv *priv, struct iwl4965_host_c
726 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME); 563 out_cmd->hdr.sequence |= cpu_to_le16(SEQ_HUGE_FRAME);
727 564
728 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx + 565 phys_addr = txq->dma_addr_cmd + sizeof(txq->cmd[0]) * idx +
729 offsetof(struct iwl4965_cmd, hdr); 566 offsetof(struct iwl_cmd, hdr);
730 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size); 567 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, phys_addr, fix_size);
731 568
732 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, " 569 IWL_DEBUG_HC("Sending command %s (#%x), seq: 0x%04X, "
@@ -738,161 +575,25 @@ static int iwl4965_enqueue_hcmd(struct iwl4965_priv *priv, struct iwl4965_host_c
738 txq->need_update = 1; 575 txq->need_update = 1;
739 576
740 /* Set up entry in queue's byte count circular buffer */ 577 /* Set up entry in queue's byte count circular buffer */
741 ret = iwl4965_tx_queue_update_wr_ptr(priv, txq, 0); 578 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
742 579
743 /* Increment and update queue's write index */ 580 /* Increment and update queue's write index */
744 q->write_ptr = iwl4965_queue_inc_wrap(q->write_ptr, q->n_bd); 581 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
745 iwl4965_tx_queue_update_write_ptr(priv, txq); 582 ret = iwl4965_tx_queue_update_write_ptr(priv, txq);
746 583
747 spin_unlock_irqrestore(&priv->hcmd_lock, flags); 584 spin_unlock_irqrestore(&priv->hcmd_lock, flags);
748 return ret ? ret : idx; 585 return ret ? ret : idx;
749} 586}
750 587
751static int iwl4965_send_cmd_async(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd) 588static void iwl4965_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt)
752{ 589{
753 int ret; 590 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
754
755 BUG_ON(!(cmd->meta.flags & CMD_ASYNC));
756
757 /* An asynchronous command can not expect an SKB to be set. */
758 BUG_ON(cmd->meta.flags & CMD_WANT_SKB);
759
760 /* An asynchronous command MUST have a callback. */
761 BUG_ON(!cmd->meta.u.callback);
762
763 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
764 return -EBUSY;
765
766 ret = iwl4965_enqueue_hcmd(priv, cmd);
767 if (ret < 0) {
768 IWL_ERROR("Error sending %s: iwl4965_enqueue_hcmd failed: %d\n",
769 get_cmd_string(cmd->id), ret);
770 return ret;
771 }
772 return 0;
773}
774
775static int iwl4965_send_cmd_sync(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
776{
777 int cmd_idx;
778 int ret;
779 static atomic_t entry = ATOMIC_INIT(0); /* reentrance protection */
780
781 BUG_ON(cmd->meta.flags & CMD_ASYNC);
782
783 /* A synchronous command can not have a callback set. */
784 BUG_ON(cmd->meta.u.callback != NULL);
785
786 if (atomic_xchg(&entry, 1)) {
787 IWL_ERROR("Error sending %s: Already sending a host command\n",
788 get_cmd_string(cmd->id));
789 return -EBUSY;
790 }
791
792 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
793
794 if (cmd->meta.flags & CMD_WANT_SKB)
795 cmd->meta.source = &cmd->meta;
796
797 cmd_idx = iwl4965_enqueue_hcmd(priv, cmd);
798 if (cmd_idx < 0) {
799 ret = cmd_idx;
800 IWL_ERROR("Error sending %s: iwl4965_enqueue_hcmd failed: %d\n",
801 get_cmd_string(cmd->id), ret);
802 goto out;
803 }
804
805 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
806 !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
807 HOST_COMPLETE_TIMEOUT);
808 if (!ret) {
809 if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
810 IWL_ERROR("Error sending %s: time out after %dms.\n",
811 get_cmd_string(cmd->id),
812 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
813
814 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
815 ret = -ETIMEDOUT;
816 goto cancel;
817 }
818 }
819
820 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
821 IWL_DEBUG_INFO("Command %s aborted: RF KILL Switch\n",
822 get_cmd_string(cmd->id));
823 ret = -ECANCELED;
824 goto fail;
825 }
826 if (test_bit(STATUS_FW_ERROR, &priv->status)) {
827 IWL_DEBUG_INFO("Command %s failed: FW Error\n",
828 get_cmd_string(cmd->id));
829 ret = -EIO;
830 goto fail;
831 }
832 if ((cmd->meta.flags & CMD_WANT_SKB) && !cmd->meta.u.skb) {
833 IWL_ERROR("Error: Response NULL in '%s'\n",
834 get_cmd_string(cmd->id));
835 ret = -EIO;
836 goto out;
837 }
838
839 ret = 0;
840 goto out;
841
842cancel:
843 if (cmd->meta.flags & CMD_WANT_SKB) {
844 struct iwl4965_cmd *qcmd;
845
846 /* Cancel the CMD_WANT_SKB flag for the cmd in the
847 * TX cmd queue. Otherwise in case the cmd comes
848 * in later, it will possibly set an invalid
849 * address (cmd->meta.source). */
850 qcmd = &priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_idx];
851 qcmd->meta.flags &= ~CMD_WANT_SKB;
852 }
853fail:
854 if (cmd->meta.u.skb) {
855 dev_kfree_skb_any(cmd->meta.u.skb);
856 cmd->meta.u.skb = NULL;
857 }
858out:
859 atomic_set(&entry, 0);
860 return ret;
861}
862
863int iwl4965_send_cmd(struct iwl4965_priv *priv, struct iwl4965_host_cmd *cmd)
864{
865 if (cmd->meta.flags & CMD_ASYNC)
866 return iwl4965_send_cmd_async(priv, cmd);
867
868 return iwl4965_send_cmd_sync(priv, cmd);
869}
870
871int iwl4965_send_cmd_pdu(struct iwl4965_priv *priv, u8 id, u16 len, const void *data)
872{
873 struct iwl4965_host_cmd cmd = {
874 .id = id,
875 .len = len,
876 .data = data,
877 };
878
879 return iwl4965_send_cmd_sync(priv, &cmd);
880}
881
882static int __must_check iwl4965_send_cmd_u32(struct iwl4965_priv *priv, u8 id, u32 val)
883{
884 struct iwl4965_host_cmd cmd = {
885 .id = id,
886 .len = sizeof(val),
887 .data = &val,
888 };
889 591
890 return iwl4965_send_cmd_sync(priv, &cmd); 592 if (hw_decrypt)
891} 593 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
594 else
595 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
892 596
893int iwl4965_send_statistics_request(struct iwl4965_priv *priv)
894{
895 return iwl4965_send_cmd_u32(priv, REPLY_STATISTICS_CMD, 0);
896} 597}
897 598
898/** 599/**
@@ -901,7 +602,7 @@ int iwl4965_send_statistics_request(struct iwl4965_priv *priv)
901 * there is only one AP station with id= IWL_AP_ID 602 * there is only one AP station with id= IWL_AP_ID
902 * NOTE: mutex must be held before calling this fnction 603 * NOTE: mutex must be held before calling this fnction
903 */ 604 */
904static int iwl4965_rxon_add_station(struct iwl4965_priv *priv, 605static int iwl4965_rxon_add_station(struct iwl_priv *priv,
905 const u8 *addr, int is_ap) 606 const u8 *addr, int is_ap)
906{ 607{
907 u8 sta_id; 608 u8 sta_id;
@@ -928,42 +629,6 @@ static int iwl4965_rxon_add_station(struct iwl4965_priv *priv,
928} 629}
929 630
930/** 631/**
931 * iwl4965_set_rxon_channel - Set the phymode and channel values in staging RXON
932 * @phymode: MODE_IEEE80211A sets to 5.2GHz; all else set to 2.4GHz
933 * @channel: Any channel valid for the requested phymode
934
935 * In addition to setting the staging RXON, priv->phymode is also set.
936 *
937 * NOTE: Does not commit to the hardware; it sets appropriate bit fields
938 * in the staging RXON flag structure based on the phymode
939 */
940static int iwl4965_set_rxon_channel(struct iwl4965_priv *priv, u8 phymode,
941 u16 channel)
942{
943 if (!iwl4965_get_channel_info(priv, phymode, channel)) {
944 IWL_DEBUG_INFO("Could not set channel to %d [%d]\n",
945 channel, phymode);
946 return -EINVAL;
947 }
948
949 if ((le16_to_cpu(priv->staging_rxon.channel) == channel) &&
950 (priv->phymode == phymode))
951 return 0;
952
953 priv->staging_rxon.channel = cpu_to_le16(channel);
954 if (phymode == MODE_IEEE80211A)
955 priv->staging_rxon.flags &= ~RXON_FLG_BAND_24G_MSK;
956 else
957 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
958
959 priv->phymode = phymode;
960
961 IWL_DEBUG_INFO("Staging channel set to %d [%d]\n", channel, phymode);
962
963 return 0;
964}
965
966/**
967 * iwl4965_check_rxon_cmd - validate RXON structure is valid 632 * iwl4965_check_rxon_cmd - validate RXON structure is valid
968 * 633 *
969 * NOTE: This is really only useful during development and can eventually 634 * NOTE: This is really only useful during development and can eventually
@@ -1044,7 +709,7 @@ static int iwl4965_check_rxon_cmd(struct iwl4965_rxon_cmd *rxon)
1044 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that 709 * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
1045 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required. 710 * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
1046 */ 711 */
1047static int iwl4965_full_rxon_required(struct iwl4965_priv *priv) 712static int iwl4965_full_rxon_required(struct iwl_priv *priv)
1048{ 713{
1049 714
1050 /* These items are only settable from the full RXON command */ 715 /* These items are only settable from the full RXON command */
@@ -1084,60 +749,6 @@ static int iwl4965_full_rxon_required(struct iwl4965_priv *priv)
1084 return 0; 749 return 0;
1085} 750}
1086 751
1087static int iwl4965_send_rxon_assoc(struct iwl4965_priv *priv)
1088{
1089 int rc = 0;
1090 struct iwl4965_rx_packet *res = NULL;
1091 struct iwl4965_rxon_assoc_cmd rxon_assoc;
1092 struct iwl4965_host_cmd cmd = {
1093 .id = REPLY_RXON_ASSOC,
1094 .len = sizeof(rxon_assoc),
1095 .meta.flags = CMD_WANT_SKB,
1096 .data = &rxon_assoc,
1097 };
1098 const struct iwl4965_rxon_cmd *rxon1 = &priv->staging_rxon;
1099 const struct iwl4965_rxon_cmd *rxon2 = &priv->active_rxon;
1100
1101 if ((rxon1->flags == rxon2->flags) &&
1102 (rxon1->filter_flags == rxon2->filter_flags) &&
1103 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1104 (rxon1->ofdm_ht_single_stream_basic_rates ==
1105 rxon2->ofdm_ht_single_stream_basic_rates) &&
1106 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1107 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1108 (rxon1->rx_chain == rxon2->rx_chain) &&
1109 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1110 IWL_DEBUG_INFO("Using current RXON_ASSOC. Not resending.\n");
1111 return 0;
1112 }
1113
1114 rxon_assoc.flags = priv->staging_rxon.flags;
1115 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1116 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1117 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1118 rxon_assoc.reserved = 0;
1119 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1120 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1121 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1122 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1123 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1124
1125 rc = iwl4965_send_cmd_sync(priv, &cmd);
1126 if (rc)
1127 return rc;
1128
1129 res = (struct iwl4965_rx_packet *)cmd.meta.u.skb->data;
1130 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
1131 IWL_ERROR("Bad return from REPLY_RXON_ASSOC command\n");
1132 rc = -EIO;
1133 }
1134
1135 priv->alloc_rxb_skb--;
1136 dev_kfree_skb_any(cmd.meta.u.skb);
1137
1138 return rc;
1139}
1140
1141/** 752/**
1142 * iwl4965_commit_rxon - commit staging_rxon to hardware 753 * iwl4965_commit_rxon - commit staging_rxon to hardware
1143 * 754 *
@@ -1146,14 +757,14 @@ static int iwl4965_send_rxon_assoc(struct iwl4965_priv *priv)
1146 * function correctly transitions out of the RXON_ASSOC_MSK state if 757 * function correctly transitions out of the RXON_ASSOC_MSK state if
1147 * a HW tune is required based on the RXON structure changes. 758 * a HW tune is required based on the RXON structure changes.
1148 */ 759 */
1149static int iwl4965_commit_rxon(struct iwl4965_priv *priv) 760static int iwl4965_commit_rxon(struct iwl_priv *priv)
1150{ 761{
1151 /* cast away the const for active_rxon in this function */ 762 /* cast away the const for active_rxon in this function */
1152 struct iwl4965_rxon_cmd *active_rxon = (void *)&priv->active_rxon; 763 struct iwl4965_rxon_cmd *active_rxon = (void *)&priv->active_rxon;
1153 DECLARE_MAC_BUF(mac); 764 DECLARE_MAC_BUF(mac);
1154 int rc = 0; 765 int rc = 0;
1155 766
1156 if (!iwl4965_is_alive(priv)) 767 if (!iwl_is_alive(priv))
1157 return -1; 768 return -1;
1158 769
1159 /* always get timestamp with Rx frame */ 770 /* always get timestamp with Rx frame */
@@ -1169,7 +780,7 @@ static int iwl4965_commit_rxon(struct iwl4965_priv *priv)
1169 * iwl4965_rxon_assoc_cmd which is used to reconfigure filter 780 * iwl4965_rxon_assoc_cmd which is used to reconfigure filter
1170 * and other flags for the current radio configuration. */ 781 * and other flags for the current radio configuration. */
1171 if (!iwl4965_full_rxon_required(priv)) { 782 if (!iwl4965_full_rxon_required(priv)) {
1172 rc = iwl4965_send_rxon_assoc(priv); 783 rc = iwl_send_rxon_assoc(priv);
1173 if (rc) { 784 if (rc) {
1174 IWL_ERROR("Error setting RXON_ASSOC " 785 IWL_ERROR("Error setting RXON_ASSOC "
1175 "configuration (%d).\n", rc); 786 "configuration (%d).\n", rc);
@@ -1196,12 +807,12 @@ static int iwl4965_commit_rxon(struct iwl4965_priv *priv)
1196 * an RXON_ASSOC and the new config wants the associated mask enabled, 807 * an RXON_ASSOC and the new config wants the associated mask enabled,
1197 * we must clear the associated from the active configuration 808 * we must clear the associated from the active configuration
1198 * before we apply the new config */ 809 * before we apply the new config */
1199 if (iwl4965_is_associated(priv) && 810 if (iwl_is_associated(priv) &&
1200 (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) { 811 (priv->staging_rxon.filter_flags & RXON_FILTER_ASSOC_MSK)) {
1201 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n"); 812 IWL_DEBUG_INFO("Toggling associated bit on current RXON\n");
1202 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 813 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1203 814
1204 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON, 815 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1205 sizeof(struct iwl4965_rxon_cmd), 816 sizeof(struct iwl4965_rxon_cmd),
1206 &priv->active_rxon); 817 &priv->active_rxon);
1207 818
@@ -1224,15 +835,16 @@ static int iwl4965_commit_rxon(struct iwl4965_priv *priv)
1224 le16_to_cpu(priv->staging_rxon.channel), 835 le16_to_cpu(priv->staging_rxon.channel),
1225 print_mac(mac, priv->staging_rxon.bssid_addr)); 836 print_mac(mac, priv->staging_rxon.bssid_addr));
1226 837
838 iwl4965_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto);
1227 /* Apply the new configuration */ 839 /* Apply the new configuration */
1228 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON, 840 rc = iwl_send_cmd_pdu(priv, REPLY_RXON,
1229 sizeof(struct iwl4965_rxon_cmd), &priv->staging_rxon); 841 sizeof(struct iwl4965_rxon_cmd), &priv->staging_rxon);
1230 if (rc) { 842 if (rc) {
1231 IWL_ERROR("Error setting new configuration (%d).\n", rc); 843 IWL_ERROR("Error setting new configuration (%d).\n", rc);
1232 return rc; 844 return rc;
1233 } 845 }
1234 846
1235 iwl4965_clear_stations_table(priv); 847 iwlcore_clear_stations_table(priv);
1236 848
1237#ifdef CONFIG_IWL4965_SENSITIVITY 849#ifdef CONFIG_IWL4965_SENSITIVITY
1238 if (!priv->error_recovering) 850 if (!priv->error_recovering)
@@ -1261,7 +873,7 @@ static int iwl4965_commit_rxon(struct iwl4965_priv *priv)
1261 873
1262 /* If we have set the ASSOC_MSK and we are in BSS mode then 874 /* If we have set the ASSOC_MSK and we are in BSS mode then
1263 * add the IWL_AP_ID to the station rate table */ 875 * add the IWL_AP_ID to the station rate table */
1264 if (iwl4965_is_associated(priv) && 876 if (iwl_is_associated(priv) &&
1265 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) { 877 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
1266 if (iwl4965_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1) 878 if (iwl4965_rxon_add_station(priv, priv->active_rxon.bssid_addr, 1)
1267 == IWL_INVALID_STATION) { 879 == IWL_INVALID_STATION) {
@@ -1269,12 +881,15 @@ static int iwl4965_commit_rxon(struct iwl4965_priv *priv)
1269 return -EIO; 881 return -EIO;
1270 } 882 }
1271 priv->assoc_station_added = 1; 883 priv->assoc_station_added = 1;
884 if (priv->default_wep_key &&
885 iwl_send_static_wepkey_cmd(priv, 0))
886 IWL_ERROR("Could not send WEP static key.\n");
1272 } 887 }
1273 888
1274 return 0; 889 return 0;
1275} 890}
1276 891
1277static int iwl4965_send_bt_config(struct iwl4965_priv *priv) 892static int iwl4965_send_bt_config(struct iwl_priv *priv)
1278{ 893{
1279 struct iwl4965_bt_cmd bt_cmd = { 894 struct iwl4965_bt_cmd bt_cmd = {
1280 .flags = 3, 895 .flags = 3,
@@ -1284,15 +899,15 @@ static int iwl4965_send_bt_config(struct iwl4965_priv *priv)
1284 .kill_cts_mask = 0, 899 .kill_cts_mask = 0,
1285 }; 900 };
1286 901
1287 return iwl4965_send_cmd_pdu(priv, REPLY_BT_CONFIG, 902 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
1288 sizeof(struct iwl4965_bt_cmd), &bt_cmd); 903 sizeof(struct iwl4965_bt_cmd), &bt_cmd);
1289} 904}
1290 905
1291static int iwl4965_send_scan_abort(struct iwl4965_priv *priv) 906static int iwl4965_send_scan_abort(struct iwl_priv *priv)
1292{ 907{
1293 int rc = 0; 908 int rc = 0;
1294 struct iwl4965_rx_packet *res; 909 struct iwl4965_rx_packet *res;
1295 struct iwl4965_host_cmd cmd = { 910 struct iwl_host_cmd cmd = {
1296 .id = REPLY_SCAN_ABORT_CMD, 911 .id = REPLY_SCAN_ABORT_CMD,
1297 .meta.flags = CMD_WANT_SKB, 912 .meta.flags = CMD_WANT_SKB,
1298 }; 913 };
@@ -1305,7 +920,7 @@ static int iwl4965_send_scan_abort(struct iwl4965_priv *priv)
1305 return 0; 920 return 0;
1306 } 921 }
1307 922
1308 rc = iwl4965_send_cmd_sync(priv, &cmd); 923 rc = iwl_send_cmd_sync(priv, &cmd);
1309 if (rc) { 924 if (rc) {
1310 clear_bit(STATUS_SCAN_ABORTING, &priv->status); 925 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
1311 return rc; 926 return rc;
@@ -1329,8 +944,8 @@ static int iwl4965_send_scan_abort(struct iwl4965_priv *priv)
1329 return rc; 944 return rc;
1330} 945}
1331 946
1332static int iwl4965_card_state_sync_callback(struct iwl4965_priv *priv, 947static int iwl4965_card_state_sync_callback(struct iwl_priv *priv,
1333 struct iwl4965_cmd *cmd, 948 struct iwl_cmd *cmd,
1334 struct sk_buff *skb) 949 struct sk_buff *skb)
1335{ 950{
1336 return 1; 951 return 1;
@@ -1346,9 +961,9 @@ static int iwl4965_card_state_sync_callback(struct iwl4965_priv *priv,
1346 * When in the 'halt' state, the card is shut down and must be fully 961 * When in the 'halt' state, the card is shut down and must be fully
1347 * restarted to come back on. 962 * restarted to come back on.
1348 */ 963 */
1349static int iwl4965_send_card_state(struct iwl4965_priv *priv, u32 flags, u8 meta_flag) 964static int iwl4965_send_card_state(struct iwl_priv *priv, u32 flags, u8 meta_flag)
1350{ 965{
1351 struct iwl4965_host_cmd cmd = { 966 struct iwl_host_cmd cmd = {
1352 .id = REPLY_CARD_STATE_CMD, 967 .id = REPLY_CARD_STATE_CMD,
1353 .len = sizeof(u32), 968 .len = sizeof(u32),
1354 .data = &flags, 969 .data = &flags,
@@ -1358,11 +973,11 @@ static int iwl4965_send_card_state(struct iwl4965_priv *priv, u32 flags, u8 meta
1358 if (meta_flag & CMD_ASYNC) 973 if (meta_flag & CMD_ASYNC)
1359 cmd.meta.u.callback = iwl4965_card_state_sync_callback; 974 cmd.meta.u.callback = iwl4965_card_state_sync_callback;
1360 975
1361 return iwl4965_send_cmd(priv, &cmd); 976 return iwl_send_cmd(priv, &cmd);
1362} 977}
1363 978
1364static int iwl4965_add_sta_sync_callback(struct iwl4965_priv *priv, 979static int iwl4965_add_sta_sync_callback(struct iwl_priv *priv,
1365 struct iwl4965_cmd *cmd, struct sk_buff *skb) 980 struct iwl_cmd *cmd, struct sk_buff *skb)
1366{ 981{
1367 struct iwl4965_rx_packet *res = NULL; 982 struct iwl4965_rx_packet *res = NULL;
1368 983
@@ -1389,12 +1004,12 @@ static int iwl4965_add_sta_sync_callback(struct iwl4965_priv *priv,
1389 return 1; 1004 return 1;
1390} 1005}
1391 1006
1392int iwl4965_send_add_station(struct iwl4965_priv *priv, 1007int iwl4965_send_add_station(struct iwl_priv *priv,
1393 struct iwl4965_addsta_cmd *sta, u8 flags) 1008 struct iwl4965_addsta_cmd *sta, u8 flags)
1394{ 1009{
1395 struct iwl4965_rx_packet *res = NULL; 1010 struct iwl4965_rx_packet *res = NULL;
1396 int rc = 0; 1011 int rc = 0;
1397 struct iwl4965_host_cmd cmd = { 1012 struct iwl_host_cmd cmd = {
1398 .id = REPLY_ADD_STA, 1013 .id = REPLY_ADD_STA,
1399 .len = sizeof(struct iwl4965_addsta_cmd), 1014 .len = sizeof(struct iwl4965_addsta_cmd),
1400 .meta.flags = flags, 1015 .meta.flags = flags,
@@ -1406,7 +1021,7 @@ int iwl4965_send_add_station(struct iwl4965_priv *priv,
1406 else 1021 else
1407 cmd.meta.flags |= CMD_WANT_SKB; 1022 cmd.meta.flags |= CMD_WANT_SKB;
1408 1023
1409 rc = iwl4965_send_cmd(priv, &cmd); 1024 rc = iwl_send_cmd(priv, &cmd);
1410 1025
1411 if (rc || (flags & CMD_ASYNC)) 1026 if (rc || (flags & CMD_ASYNC))
1412 return rc; 1027 return rc;
@@ -1436,62 +1051,7 @@ int iwl4965_send_add_station(struct iwl4965_priv *priv,
1436 return rc; 1051 return rc;
1437} 1052}
1438 1053
1439static int iwl4965_update_sta_key_info(struct iwl4965_priv *priv, 1054static void iwl4965_clear_free_frames(struct iwl_priv *priv)
1440 struct ieee80211_key_conf *keyconf,
1441 u8 sta_id)
1442{
1443 unsigned long flags;
1444 __le16 key_flags = 0;
1445
1446 switch (keyconf->alg) {
1447 case ALG_CCMP:
1448 key_flags |= STA_KEY_FLG_CCMP;
1449 key_flags |= cpu_to_le16(
1450 keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
1451 key_flags &= ~STA_KEY_FLG_INVALID;
1452 break;
1453 case ALG_TKIP:
1454 case ALG_WEP:
1455 default:
1456 return -EINVAL;
1457 }
1458 spin_lock_irqsave(&priv->sta_lock, flags);
1459 priv->stations[sta_id].keyinfo.alg = keyconf->alg;
1460 priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
1461 memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
1462 keyconf->keylen);
1463
1464 memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
1465 keyconf->keylen);
1466 priv->stations[sta_id].sta.key.key_flags = key_flags;
1467 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1468 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1469
1470 spin_unlock_irqrestore(&priv->sta_lock, flags);
1471
1472 IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
1473 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0);
1474 return 0;
1475}
1476
1477static int iwl4965_clear_sta_key_info(struct iwl4965_priv *priv, u8 sta_id)
1478{
1479 unsigned long flags;
1480
1481 spin_lock_irqsave(&priv->sta_lock, flags);
1482 memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl4965_hw_key));
1483 memset(&priv->stations[sta_id].sta.key, 0, sizeof(struct iwl4965_keyinfo));
1484 priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
1485 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
1486 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
1487 spin_unlock_irqrestore(&priv->sta_lock, flags);
1488
1489 IWL_DEBUG_INFO("hwcrypto: clear ucode station key info\n");
1490 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, 0);
1491 return 0;
1492}
1493
1494static void iwl4965_clear_free_frames(struct iwl4965_priv *priv)
1495{ 1055{
1496 struct list_head *element; 1056 struct list_head *element;
1497 1057
@@ -1512,7 +1072,7 @@ static void iwl4965_clear_free_frames(struct iwl4965_priv *priv)
1512 } 1072 }
1513} 1073}
1514 1074
1515static struct iwl4965_frame *iwl4965_get_free_frame(struct iwl4965_priv *priv) 1075static struct iwl4965_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
1516{ 1076{
1517 struct iwl4965_frame *frame; 1077 struct iwl4965_frame *frame;
1518 struct list_head *element; 1078 struct list_head *element;
@@ -1532,18 +1092,18 @@ static struct iwl4965_frame *iwl4965_get_free_frame(struct iwl4965_priv *priv)
1532 return list_entry(element, struct iwl4965_frame, list); 1092 return list_entry(element, struct iwl4965_frame, list);
1533} 1093}
1534 1094
1535static void iwl4965_free_frame(struct iwl4965_priv *priv, struct iwl4965_frame *frame) 1095static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl4965_frame *frame)
1536{ 1096{
1537 memset(frame, 0, sizeof(*frame)); 1097 memset(frame, 0, sizeof(*frame));
1538 list_add(&frame->list, &priv->free_frames); 1098 list_add(&frame->list, &priv->free_frames);
1539} 1099}
1540 1100
1541unsigned int iwl4965_fill_beacon_frame(struct iwl4965_priv *priv, 1101unsigned int iwl4965_fill_beacon_frame(struct iwl_priv *priv,
1542 struct ieee80211_hdr *hdr, 1102 struct ieee80211_hdr *hdr,
1543 const u8 *dest, int left) 1103 const u8 *dest, int left)
1544{ 1104{
1545 1105
1546 if (!iwl4965_is_associated(priv) || !priv->ibss_beacon || 1106 if (!iwl_is_associated(priv) || !priv->ibss_beacon ||
1547 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) && 1107 ((priv->iw_mode != IEEE80211_IF_TYPE_IBSS) &&
1548 (priv->iw_mode != IEEE80211_IF_TYPE_AP))) 1108 (priv->iw_mode != IEEE80211_IF_TYPE_AP)))
1549 return 0; 1109 return 0;
@@ -1556,34 +1116,6 @@ unsigned int iwl4965_fill_beacon_frame(struct iwl4965_priv *priv,
1556 return priv->ibss_beacon->len; 1116 return priv->ibss_beacon->len;
1557} 1117}
1558 1118
1559int iwl4965_rate_index_from_plcp(int plcp)
1560{
1561 int i = 0;
1562
1563 /* 4965 HT rate format */
1564 if (plcp & RATE_MCS_HT_MSK) {
1565 i = (plcp & 0xff);
1566
1567 if (i >= IWL_RATE_MIMO_6M_PLCP)
1568 i = i - IWL_RATE_MIMO_6M_PLCP;
1569
1570 i += IWL_FIRST_OFDM_RATE;
1571 /* skip 9M not supported in ht*/
1572 if (i >= IWL_RATE_9M_INDEX)
1573 i += 1;
1574 if ((i >= IWL_FIRST_OFDM_RATE) &&
1575 (i <= IWL_LAST_OFDM_RATE))
1576 return i;
1577
1578 /* 4965 legacy rate format, search for match in table */
1579 } else {
1580 for (i = 0; i < ARRAY_SIZE(iwl4965_rates); i++)
1581 if (iwl4965_rates[i].plcp == (plcp &0xFF))
1582 return i;
1583 }
1584 return -1;
1585}
1586
1587static u8 iwl4965_rate_get_lowest_plcp(int rate_mask) 1119static u8 iwl4965_rate_get_lowest_plcp(int rate_mask)
1588{ 1120{
1589 u8 i; 1121 u8 i;
@@ -1597,7 +1129,7 @@ static u8 iwl4965_rate_get_lowest_plcp(int rate_mask)
1597 return IWL_RATE_INVALID; 1129 return IWL_RATE_INVALID;
1598} 1130}
1599 1131
1600static int iwl4965_send_beacon_cmd(struct iwl4965_priv *priv) 1132static int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
1601{ 1133{
1602 struct iwl4965_frame *frame; 1134 struct iwl4965_frame *frame;
1603 unsigned int frame_size; 1135 unsigned int frame_size;
@@ -1625,7 +1157,7 @@ static int iwl4965_send_beacon_cmd(struct iwl4965_priv *priv)
1625 1157
1626 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame, rate); 1158 frame_size = iwl4965_hw_get_beacon_cmd(priv, frame, rate);
1627 1159
1628 rc = iwl4965_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size, 1160 rc = iwl_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
1629 &frame->u.cmd[0]); 1161 &frame->u.cmd[0]);
1630 1162
1631 iwl4965_free_frame(priv, frame); 1163 iwl4965_free_frame(priv, frame);
@@ -1635,238 +1167,17 @@ static int iwl4965_send_beacon_cmd(struct iwl4965_priv *priv)
1635 1167
1636/****************************************************************************** 1168/******************************************************************************
1637 * 1169 *
1638 * EEPROM related functions
1639 *
1640 ******************************************************************************/
1641
1642static void get_eeprom_mac(struct iwl4965_priv *priv, u8 *mac)
1643{
1644 memcpy(mac, priv->eeprom.mac_address, 6);
1645}
1646
1647static inline void iwl4965_eeprom_release_semaphore(struct iwl4965_priv *priv)
1648{
1649 iwl4965_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
1650 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
1651}
1652
1653/**
1654 * iwl4965_eeprom_init - read EEPROM contents
1655 *
1656 * Load the EEPROM contents from adapter into priv->eeprom
1657 *
1658 * NOTE: This routine uses the non-debug IO access functions.
1659 */
1660int iwl4965_eeprom_init(struct iwl4965_priv *priv)
1661{
1662 u16 *e = (u16 *)&priv->eeprom;
1663 u32 gp = iwl4965_read32(priv, CSR_EEPROM_GP);
1664 u32 r;
1665 int sz = sizeof(priv->eeprom);
1666 int rc;
1667 int i;
1668 u16 addr;
1669
1670 /* The EEPROM structure has several padding buffers within it
1671 * and when adding new EEPROM maps is subject to programmer errors
1672 * which may be very difficult to identify without explicitly
1673 * checking the resulting size of the eeprom map. */
1674 BUILD_BUG_ON(sizeof(priv->eeprom) != IWL_EEPROM_IMAGE_SIZE);
1675
1676 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) {
1677 IWL_ERROR("EEPROM not found, EEPROM_GP=0x%08x", gp);
1678 return -ENOENT;
1679 }
1680
1681 /* Make sure driver (instead of uCode) is allowed to read EEPROM */
1682 rc = iwl4965_eeprom_acquire_semaphore(priv);
1683 if (rc < 0) {
1684 IWL_ERROR("Failed to acquire EEPROM semaphore.\n");
1685 return -ENOENT;
1686 }
1687
1688 /* eeprom is an array of 16bit values */
1689 for (addr = 0; addr < sz; addr += sizeof(u16)) {
1690 _iwl4965_write32(priv, CSR_EEPROM_REG, addr << 1);
1691 _iwl4965_clear_bit(priv, CSR_EEPROM_REG, CSR_EEPROM_REG_BIT_CMD);
1692
1693 for (i = 0; i < IWL_EEPROM_ACCESS_TIMEOUT;
1694 i += IWL_EEPROM_ACCESS_DELAY) {
1695 r = _iwl4965_read_direct32(priv, CSR_EEPROM_REG);
1696 if (r & CSR_EEPROM_REG_READ_VALID_MSK)
1697 break;
1698 udelay(IWL_EEPROM_ACCESS_DELAY);
1699 }
1700
1701 if (!(r & CSR_EEPROM_REG_READ_VALID_MSK)) {
1702 IWL_ERROR("Time out reading EEPROM[%d]", addr);
1703 rc = -ETIMEDOUT;
1704 goto done;
1705 }
1706 e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
1707 }
1708 rc = 0;
1709
1710done:
1711 iwl4965_eeprom_release_semaphore(priv);
1712 return rc;
1713}
1714
1715/******************************************************************************
1716 *
1717 * Misc. internal state and helper functions 1170 * Misc. internal state and helper functions
1718 * 1171 *
1719 ******************************************************************************/ 1172 ******************************************************************************/
1720#ifdef CONFIG_IWL4965_DEBUG
1721
1722/**
1723 * iwl4965_report_frame - dump frame to syslog during debug sessions
1724 *
1725 * You may hack this function to show different aspects of received frames,
1726 * including selective frame dumps.
1727 * group100 parameter selects whether to show 1 out of 100 good frames.
1728 *
1729 * TODO: This was originally written for 3945, need to audit for
1730 * proper operation with 4965.
1731 */
1732void iwl4965_report_frame(struct iwl4965_priv *priv,
1733 struct iwl4965_rx_packet *pkt,
1734 struct ieee80211_hdr *header, int group100)
1735{
1736 u32 to_us;
1737 u32 print_summary = 0;
1738 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
1739 u32 hundred = 0;
1740 u32 dataframe = 0;
1741 u16 fc;
1742 u16 seq_ctl;
1743 u16 channel;
1744 u16 phy_flags;
1745 int rate_sym;
1746 u16 length;
1747 u16 status;
1748 u16 bcn_tmr;
1749 u32 tsf_low;
1750 u64 tsf;
1751 u8 rssi;
1752 u8 agc;
1753 u16 sig_avg;
1754 u16 noise_diff;
1755 struct iwl4965_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
1756 struct iwl4965_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
1757 struct iwl4965_rx_frame_end *rx_end = IWL_RX_END(pkt);
1758 u8 *data = IWL_RX_DATA(pkt);
1759
1760 /* MAC header */
1761 fc = le16_to_cpu(header->frame_control);
1762 seq_ctl = le16_to_cpu(header->seq_ctrl);
1763
1764 /* metadata */
1765 channel = le16_to_cpu(rx_hdr->channel);
1766 phy_flags = le16_to_cpu(rx_hdr->phy_flags);
1767 rate_sym = rx_hdr->rate;
1768 length = le16_to_cpu(rx_hdr->len);
1769
1770 /* end-of-frame status and timestamp */
1771 status = le32_to_cpu(rx_end->status);
1772 bcn_tmr = le32_to_cpu(rx_end->beacon_timestamp);
1773 tsf_low = le64_to_cpu(rx_end->timestamp) & 0x0ffffffff;
1774 tsf = le64_to_cpu(rx_end->timestamp);
1775
1776 /* signal statistics */
1777 rssi = rx_stats->rssi;
1778 agc = rx_stats->agc;
1779 sig_avg = le16_to_cpu(rx_stats->sig_avg);
1780 noise_diff = le16_to_cpu(rx_stats->noise_diff);
1781
1782 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
1783
1784 /* if data frame is to us and all is good,
1785 * (optionally) print summary for only 1 out of every 100 */
1786 if (to_us && (fc & ~IEEE80211_FCTL_PROTECTED) ==
1787 (IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
1788 dataframe = 1;
1789 if (!group100)
1790 print_summary = 1; /* print each frame */
1791 else if (priv->framecnt_to_us < 100) {
1792 priv->framecnt_to_us++;
1793 print_summary = 0;
1794 } else {
1795 priv->framecnt_to_us = 0;
1796 print_summary = 1;
1797 hundred = 1;
1798 }
1799 } else {
1800 /* print summary for all other frames */
1801 print_summary = 1;
1802 }
1803
1804 if (print_summary) {
1805 char *title;
1806 u32 rate;
1807
1808 if (hundred)
1809 title = "100Frames";
1810 else if (fc & IEEE80211_FCTL_RETRY)
1811 title = "Retry";
1812 else if (ieee80211_is_assoc_response(fc))
1813 title = "AscRsp";
1814 else if (ieee80211_is_reassoc_response(fc))
1815 title = "RasRsp";
1816 else if (ieee80211_is_probe_response(fc)) {
1817 title = "PrbRsp";
1818 print_dump = 1; /* dump frame contents */
1819 } else if (ieee80211_is_beacon(fc)) {
1820 title = "Beacon";
1821 print_dump = 1; /* dump frame contents */
1822 } else if (ieee80211_is_atim(fc))
1823 title = "ATIM";
1824 else if (ieee80211_is_auth(fc))
1825 title = "Auth";
1826 else if (ieee80211_is_deauth(fc))
1827 title = "DeAuth";
1828 else if (ieee80211_is_disassoc(fc))
1829 title = "DisAssoc";
1830 else
1831 title = "Frame";
1832 1173
1833 rate = iwl4965_rate_index_from_plcp(rate_sym); 1174static void iwl4965_unset_hw_params(struct iwl_priv *priv)
1834 if (rate == -1)
1835 rate = 0;
1836 else
1837 rate = iwl4965_rates[rate].ieee / 2;
1838
1839 /* print frame summary.
1840 * MAC addresses show just the last byte (for brevity),
1841 * but you can hack it to show more, if you'd like to. */
1842 if (dataframe)
1843 IWL_DEBUG_RX("%s: mhd=0x%04x, dst=0x%02x, "
1844 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
1845 title, fc, header->addr1[5],
1846 length, rssi, channel, rate);
1847 else {
1848 /* src/dst addresses assume managed mode */
1849 IWL_DEBUG_RX("%s: 0x%04x, dst=0x%02x, "
1850 "src=0x%02x, rssi=%u, tim=%lu usec, "
1851 "phy=0x%02x, chnl=%d\n",
1852 title, fc, header->addr1[5],
1853 header->addr3[5], rssi,
1854 tsf_low - priv->scan_start_tsf,
1855 phy_flags, channel);
1856 }
1857 }
1858 if (print_dump)
1859 iwl4965_print_hex_dump(IWL_DL_RX, data, length);
1860}
1861#endif
1862
1863static void iwl4965_unset_hw_setting(struct iwl4965_priv *priv)
1864{ 1175{
1865 if (priv->hw_setting.shared_virt) 1176 if (priv->shared_virt)
1866 pci_free_consistent(priv->pci_dev, 1177 pci_free_consistent(priv->pci_dev,
1867 sizeof(struct iwl4965_shared), 1178 sizeof(struct iwl4965_shared),
1868 priv->hw_setting.shared_virt, 1179 priv->shared_virt,
1869 priv->hw_setting.shared_phys); 1180 priv->shared_phys);
1870} 1181}
1871 1182
1872/** 1183/**
@@ -1898,24 +1209,20 @@ static u16 iwl4965_supported_rate_to_ie(u8 *ie, u16 supported_rate,
1898 return ret_rates; 1209 return ret_rates;
1899} 1210}
1900 1211
1901#ifdef CONFIG_IWL4965_HT
1902void static iwl4965_set_ht_capab(struct ieee80211_hw *hw,
1903 struct ieee80211_ht_cap *ht_cap,
1904 u8 use_current_config);
1905#endif
1906
1907/** 1212/**
1908 * iwl4965_fill_probe_req - fill in all required fields and IE for probe request 1213 * iwl4965_fill_probe_req - fill in all required fields and IE for probe request
1909 */ 1214 */
1910static u16 iwl4965_fill_probe_req(struct iwl4965_priv *priv, 1215static u16 iwl4965_fill_probe_req(struct iwl_priv *priv,
1911 struct ieee80211_mgmt *frame, 1216 enum ieee80211_band band,
1912 int left, int is_direct) 1217 struct ieee80211_mgmt *frame,
1218 int left, int is_direct)
1913{ 1219{
1914 int len = 0; 1220 int len = 0;
1915 u8 *pos = NULL; 1221 u8 *pos = NULL;
1916 u16 active_rates, ret_rates, cck_rates, active_rate_basic; 1222 u16 active_rates, ret_rates, cck_rates, active_rate_basic;
1917#ifdef CONFIG_IWL4965_HT 1223#ifdef CONFIG_IWL4965_HT
1918 struct ieee80211_hw_mode *mode; 1224 const struct ieee80211_supported_band *sband =
1225 iwl4965_get_hw_mode(priv, band);
1919#endif /* CONFIG_IWL4965_HT */ 1226#endif /* CONFIG_IWL4965_HT */
1920 1227
1921 /* Make sure there is enough space for the probe request, 1228 /* Make sure there is enough space for the probe request,
@@ -2000,13 +1307,18 @@ static u16 iwl4965_fill_probe_req(struct iwl4965_priv *priv,
2000 len += 2 + *pos; 1307 len += 2 + *pos;
2001 1308
2002#ifdef CONFIG_IWL4965_HT 1309#ifdef CONFIG_IWL4965_HT
2003 mode = priv->hw->conf.mode; 1310 if (sband && sband->ht_info.ht_supported) {
2004 if (mode->ht_info.ht_supported) { 1311 struct ieee80211_ht_cap *ht_cap;
2005 pos += (*pos) + 1; 1312 pos += (*pos) + 1;
2006 *pos++ = WLAN_EID_HT_CAPABILITY; 1313 *pos++ = WLAN_EID_HT_CAPABILITY;
2007 *pos++ = sizeof(struct ieee80211_ht_cap); 1314 *pos++ = sizeof(struct ieee80211_ht_cap);
2008 iwl4965_set_ht_capab(priv->hw, 1315 ht_cap = (struct ieee80211_ht_cap *)pos;
2009 (struct ieee80211_ht_cap *)pos, 0); 1316 ht_cap->cap_info = cpu_to_le16(sband->ht_info.cap);
1317 memcpy(ht_cap->supp_mcs_set, sband->ht_info.supp_mcs_set, 16);
1318 ht_cap->ampdu_params_info =(sband->ht_info.ampdu_factor &
1319 IEEE80211_HT_CAP_AMPDU_FACTOR) |
1320 ((sband->ht_info.ampdu_density << 2) &
1321 IEEE80211_HT_CAP_AMPDU_DENSITY);
2010 len += 2 + sizeof(struct ieee80211_ht_cap); 1322 len += 2 + sizeof(struct ieee80211_ht_cap);
2011 } 1323 }
2012#endif /*CONFIG_IWL4965_HT */ 1324#endif /*CONFIG_IWL4965_HT */
@@ -2018,103 +1330,15 @@ static u16 iwl4965_fill_probe_req(struct iwl4965_priv *priv,
2018/* 1330/*
2019 * QoS support 1331 * QoS support
2020*/ 1332*/
2021#ifdef CONFIG_IWL4965_QOS 1333static int iwl4965_send_qos_params_command(struct iwl_priv *priv,
2022static int iwl4965_send_qos_params_command(struct iwl4965_priv *priv,
2023 struct iwl4965_qosparam_cmd *qos) 1334 struct iwl4965_qosparam_cmd *qos)
2024{ 1335{
2025 1336
2026 return iwl4965_send_cmd_pdu(priv, REPLY_QOS_PARAM, 1337 return iwl_send_cmd_pdu(priv, REPLY_QOS_PARAM,
2027 sizeof(struct iwl4965_qosparam_cmd), qos); 1338 sizeof(struct iwl4965_qosparam_cmd), qos);
2028} 1339}
2029 1340
2030static void iwl4965_reset_qos(struct iwl4965_priv *priv) 1341static void iwl4965_activate_qos(struct iwl_priv *priv, u8 force)
2031{
2032 u16 cw_min = 15;
2033 u16 cw_max = 1023;
2034 u8 aifs = 2;
2035 u8 is_legacy = 0;
2036 unsigned long flags;
2037 int i;
2038
2039 spin_lock_irqsave(&priv->lock, flags);
2040 priv->qos_data.qos_active = 0;
2041
2042 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) {
2043 if (priv->qos_data.qos_enable)
2044 priv->qos_data.qos_active = 1;
2045 if (!(priv->active_rate & 0xfff0)) {
2046 cw_min = 31;
2047 is_legacy = 1;
2048 }
2049 } else if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2050 if (priv->qos_data.qos_enable)
2051 priv->qos_data.qos_active = 1;
2052 } else if (!(priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK)) {
2053 cw_min = 31;
2054 is_legacy = 1;
2055 }
2056
2057 if (priv->qos_data.qos_active)
2058 aifs = 3;
2059
2060 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
2061 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
2062 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
2063 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
2064 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
2065
2066 if (priv->qos_data.qos_active) {
2067 i = 1;
2068 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
2069 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
2070 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
2071 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
2072 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2073
2074 i = 2;
2075 priv->qos_data.def_qos_parm.ac[i].cw_min =
2076 cpu_to_le16((cw_min + 1) / 2 - 1);
2077 priv->qos_data.def_qos_parm.ac[i].cw_max =
2078 cpu_to_le16(cw_max);
2079 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2080 if (is_legacy)
2081 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2082 cpu_to_le16(6016);
2083 else
2084 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2085 cpu_to_le16(3008);
2086 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2087
2088 i = 3;
2089 priv->qos_data.def_qos_parm.ac[i].cw_min =
2090 cpu_to_le16((cw_min + 1) / 4 - 1);
2091 priv->qos_data.def_qos_parm.ac[i].cw_max =
2092 cpu_to_le16((cw_max + 1) / 2 - 1);
2093 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
2094 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2095 if (is_legacy)
2096 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2097 cpu_to_le16(3264);
2098 else
2099 priv->qos_data.def_qos_parm.ac[i].edca_txop =
2100 cpu_to_le16(1504);
2101 } else {
2102 for (i = 1; i < 4; i++) {
2103 priv->qos_data.def_qos_parm.ac[i].cw_min =
2104 cpu_to_le16(cw_min);
2105 priv->qos_data.def_qos_parm.ac[i].cw_max =
2106 cpu_to_le16(cw_max);
2107 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
2108 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
2109 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
2110 }
2111 }
2112 IWL_DEBUG_QOS("set QoS to default \n");
2113
2114 spin_unlock_irqrestore(&priv->lock, flags);
2115}
2116
2117static void iwl4965_activate_qos(struct iwl4965_priv *priv, u8 force)
2118{ 1342{
2119 unsigned long flags; 1343 unsigned long flags;
2120 1344
@@ -2142,7 +1366,7 @@ static void iwl4965_activate_qos(struct iwl4965_priv *priv, u8 force)
2142 1366
2143 spin_unlock_irqrestore(&priv->lock, flags); 1367 spin_unlock_irqrestore(&priv->lock, flags);
2144 1368
2145 if (force || iwl4965_is_associated(priv)) { 1369 if (force || iwl_is_associated(priv)) {
2146 IWL_DEBUG_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n", 1370 IWL_DEBUG_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
2147 priv->qos_data.qos_active, 1371 priv->qos_data.qos_active,
2148 priv->qos_data.def_qos_parm.qos_flags); 1372 priv->qos_data.def_qos_parm.qos_flags);
@@ -2152,7 +1376,6 @@ static void iwl4965_activate_qos(struct iwl4965_priv *priv, u8 force)
2152 } 1376 }
2153} 1377}
2154 1378
2155#endif /* CONFIG_IWL4965_QOS */
2156/* 1379/*
2157 * Power management (not Tx power!) functions 1380 * Power management (not Tx power!) functions
2158 */ 1381 */
@@ -2193,7 +1416,7 @@ static struct iwl4965_power_vec_entry range_1[IWL_POWER_AC] = {
2193 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} 1416 SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
2194}; 1417};
2195 1418
2196int iwl4965_power_init_handle(struct iwl4965_priv *priv) 1419int iwl4965_power_init_handle(struct iwl_priv *priv)
2197{ 1420{
2198 int rc = 0, i; 1421 int rc = 0, i;
2199 struct iwl4965_power_mgr *pow_data; 1422 struct iwl4965_power_mgr *pow_data;
@@ -2232,7 +1455,7 @@ int iwl4965_power_init_handle(struct iwl4965_priv *priv)
2232 return rc; 1455 return rc;
2233} 1456}
2234 1457
2235static int iwl4965_update_power_cmd(struct iwl4965_priv *priv, 1458static int iwl4965_update_power_cmd(struct iwl_priv *priv,
2236 struct iwl4965_powertable_cmd *cmd, u32 mode) 1459 struct iwl4965_powertable_cmd *cmd, u32 mode)
2237{ 1460{
2238 int rc = 0, i; 1461 int rc = 0, i;
@@ -2296,7 +1519,7 @@ static int iwl4965_update_power_cmd(struct iwl4965_priv *priv,
2296 return rc; 1519 return rc;
2297} 1520}
2298 1521
2299static int iwl4965_send_power_mode(struct iwl4965_priv *priv, u32 mode) 1522static int iwl4965_send_power_mode(struct iwl_priv *priv, u32 mode)
2300{ 1523{
2301 u32 uninitialized_var(final_mode); 1524 u32 uninitialized_var(final_mode);
2302 int rc; 1525 int rc;
@@ -2321,7 +1544,7 @@ static int iwl4965_send_power_mode(struct iwl4965_priv *priv, u32 mode)
2321 1544
2322 iwl4965_update_power_cmd(priv, &cmd, final_mode); 1545 iwl4965_update_power_cmd(priv, &cmd, final_mode);
2323 1546
2324 rc = iwl4965_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd); 1547 rc = iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(cmd), &cmd);
2325 1548
2326 if (final_mode == IWL_POWER_MODE_CAM) 1549 if (final_mode == IWL_POWER_MODE_CAM)
2327 clear_bit(STATUS_POWER_PMI, &priv->status); 1550 clear_bit(STATUS_POWER_PMI, &priv->status);
@@ -2331,7 +1554,7 @@ static int iwl4965_send_power_mode(struct iwl4965_priv *priv, u32 mode)
2331 return rc; 1554 return rc;
2332} 1555}
2333 1556
2334int iwl4965_is_network_packet(struct iwl4965_priv *priv, struct ieee80211_hdr *header) 1557int iwl4965_is_network_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
2335{ 1558{
2336 /* Filter incoming packets to determine if they are targeted toward 1559 /* Filter incoming packets to determine if they are targeted toward
2337 * this network, discarding packets coming from ourselves */ 1560 * this network, discarding packets coming from ourselves */
@@ -2354,6 +1577,8 @@ int iwl4965_is_network_packet(struct iwl4965_priv *priv, struct ieee80211_hdr *h
2354 return !compare_ether_addr(header->addr2, priv->bssid); 1577 return !compare_ether_addr(header->addr2, priv->bssid);
2355 /* packets to our adapter go through */ 1578 /* packets to our adapter go through */
2356 return !compare_ether_addr(header->addr1, priv->mac_addr); 1579 return !compare_ether_addr(header->addr1, priv->mac_addr);
1580 default:
1581 break;
2357 } 1582 }
2358 1583
2359 return 1; 1584 return 1;
@@ -2392,7 +1617,7 @@ static const char *iwl4965_get_tx_fail_reason(u32 status)
2392 * 1617 *
2393 * NOTE: priv->mutex is not required before calling this function 1618 * NOTE: priv->mutex is not required before calling this function
2394 */ 1619 */
2395static int iwl4965_scan_cancel(struct iwl4965_priv *priv) 1620static int iwl4965_scan_cancel(struct iwl_priv *priv)
2396{ 1621{
2397 if (!test_bit(STATUS_SCAN_HW, &priv->status)) { 1622 if (!test_bit(STATUS_SCAN_HW, &priv->status)) {
2398 clear_bit(STATUS_SCANNING, &priv->status); 1623 clear_bit(STATUS_SCANNING, &priv->status);
@@ -2420,7 +1645,7 @@ static int iwl4965_scan_cancel(struct iwl4965_priv *priv)
2420 * 1645 *
2421 * NOTE: priv->mutex must be held before calling this function 1646 * NOTE: priv->mutex must be held before calling this function
2422 */ 1647 */
2423static int iwl4965_scan_cancel_timeout(struct iwl4965_priv *priv, unsigned long ms) 1648static int iwl4965_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
2424{ 1649{
2425 unsigned long now = jiffies; 1650 unsigned long now = jiffies;
2426 int ret; 1651 int ret;
@@ -2439,7 +1664,7 @@ static int iwl4965_scan_cancel_timeout(struct iwl4965_priv *priv, unsigned long
2439 return ret; 1664 return ret;
2440} 1665}
2441 1666
2442static void iwl4965_sequence_reset(struct iwl4965_priv *priv) 1667static void iwl4965_sequence_reset(struct iwl_priv *priv)
2443{ 1668{
2444 /* Reset ieee stats */ 1669 /* Reset ieee stats */
2445 1670
@@ -2469,7 +1694,7 @@ static __le16 iwl4965_adjust_beacon_interval(u16 beacon_val)
2469 return cpu_to_le16(new_val); 1694 return cpu_to_le16(new_val);
2470} 1695}
2471 1696
2472static void iwl4965_setup_rxon_timing(struct iwl4965_priv *priv) 1697static void iwl4965_setup_rxon_timing(struct iwl_priv *priv)
2473{ 1698{
2474 u64 interval_tm_unit; 1699 u64 interval_tm_unit;
2475 u64 tsf, result; 1700 u64 tsf, result;
@@ -2480,13 +1705,13 @@ static void iwl4965_setup_rxon_timing(struct iwl4965_priv *priv)
2480 conf = ieee80211_get_hw_conf(priv->hw); 1705 conf = ieee80211_get_hw_conf(priv->hw);
2481 1706
2482 spin_lock_irqsave(&priv->lock, flags); 1707 spin_lock_irqsave(&priv->lock, flags);
2483 priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp1); 1708 priv->rxon_timing.timestamp.dw[1] = cpu_to_le32(priv->timestamp >> 32);
2484 priv->rxon_timing.timestamp.dw[0] = cpu_to_le32(priv->timestamp0); 1709 priv->rxon_timing.timestamp.dw[0] =
1710 cpu_to_le32(priv->timestamp & 0xFFFFFFFF);
2485 1711
2486 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL; 1712 priv->rxon_timing.listen_interval = INTEL_CONN_LISTEN_INTERVAL;
2487 1713
2488 tsf = priv->timestamp1; 1714 tsf = priv->timestamp;
2489 tsf = ((tsf << 32) | priv->timestamp0);
2490 1715
2491 beacon_int = priv->beacon_int; 1716 beacon_int = priv->beacon_int;
2492 spin_unlock_irqrestore(&priv->lock, flags); 1717 spin_unlock_irqrestore(&priv->lock, flags);
@@ -2525,14 +1750,14 @@ static void iwl4965_setup_rxon_timing(struct iwl4965_priv *priv)
2525 le16_to_cpu(priv->rxon_timing.atim_window)); 1750 le16_to_cpu(priv->rxon_timing.atim_window));
2526} 1751}
2527 1752
2528static int iwl4965_scan_initiate(struct iwl4965_priv *priv) 1753static int iwl4965_scan_initiate(struct iwl_priv *priv)
2529{ 1754{
2530 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 1755 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
2531 IWL_ERROR("APs don't scan.\n"); 1756 IWL_ERROR("APs don't scan.\n");
2532 return 0; 1757 return 0;
2533 } 1758 }
2534 1759
2535 if (!iwl4965_is_ready_rf(priv)) { 1760 if (!iwl_is_ready_rf(priv)) {
2536 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n"); 1761 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
2537 return -EIO; 1762 return -EIO;
2538 } 1763 }
@@ -2559,27 +1784,17 @@ static int iwl4965_scan_initiate(struct iwl4965_priv *priv)
2559 return 0; 1784 return 0;
2560} 1785}
2561 1786
2562static int iwl4965_set_rxon_hwcrypto(struct iwl4965_priv *priv, int hw_decrypt)
2563{
2564 struct iwl4965_rxon_cmd *rxon = &priv->staging_rxon;
2565 1787
2566 if (hw_decrypt) 1788static void iwl4965_set_flags_for_phymode(struct iwl_priv *priv,
2567 rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK; 1789 enum ieee80211_band band)
2568 else
2569 rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
2570
2571 return 0;
2572}
2573
2574static void iwl4965_set_flags_for_phymode(struct iwl4965_priv *priv, u8 phymode)
2575{ 1790{
2576 if (phymode == MODE_IEEE80211A) { 1791 if (band == IEEE80211_BAND_5GHZ) {
2577 priv->staging_rxon.flags &= 1792 priv->staging_rxon.flags &=
2578 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK 1793 ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
2579 | RXON_FLG_CCK_MSK); 1794 | RXON_FLG_CCK_MSK);
2580 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 1795 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2581 } else { 1796 } else {
2582 /* Copied from iwl4965_bg_post_associate() */ 1797 /* Copied from iwl4965_post_associate() */
2583 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) 1798 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
2584 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 1799 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2585 else 1800 else
@@ -2597,9 +1812,9 @@ static void iwl4965_set_flags_for_phymode(struct iwl4965_priv *priv, u8 phymode)
2597/* 1812/*
2598 * initialize rxon structure with default values from eeprom 1813 * initialize rxon structure with default values from eeprom
2599 */ 1814 */
2600static void iwl4965_connection_init_rx_config(struct iwl4965_priv *priv) 1815static void iwl4965_connection_init_rx_config(struct iwl_priv *priv)
2601{ 1816{
2602 const struct iwl4965_channel_info *ch_info; 1817 const struct iwl_channel_info *ch_info;
2603 1818
2604 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); 1819 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
2605 1820
@@ -2625,6 +1840,9 @@ static void iwl4965_connection_init_rx_config(struct iwl4965_priv *priv)
2625 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK | 1840 priv->staging_rxon.filter_flags = RXON_FILTER_PROMISC_MSK |
2626 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK; 1841 RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_ACCEPT_GRP_MSK;
2627 break; 1842 break;
1843 default:
1844 IWL_ERROR("Unsupported interface type %d\n", priv->iw_mode);
1845 break;
2628 } 1846 }
2629 1847
2630#if 0 1848#if 0
@@ -2636,7 +1854,7 @@ static void iwl4965_connection_init_rx_config(struct iwl4965_priv *priv)
2636 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 1854 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2637#endif 1855#endif
2638 1856
2639 ch_info = iwl4965_get_channel_info(priv, priv->phymode, 1857 ch_info = iwl_get_channel_info(priv, priv->band,
2640 le16_to_cpu(priv->staging_rxon.channel)); 1858 le16_to_cpu(priv->staging_rxon.channel));
2641 1859
2642 if (!ch_info) 1860 if (!ch_info)
@@ -2651,12 +1869,9 @@ static void iwl4965_connection_init_rx_config(struct iwl4965_priv *priv)
2651 ch_info = &priv->channel_info[0]; 1869 ch_info = &priv->channel_info[0];
2652 1870
2653 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel); 1871 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
2654 if (is_channel_a_band(ch_info)) 1872 priv->band = ch_info->band;
2655 priv->phymode = MODE_IEEE80211A;
2656 else
2657 priv->phymode = MODE_IEEE80211G;
2658 1873
2659 iwl4965_set_flags_for_phymode(priv, priv->phymode); 1874 iwl4965_set_flags_for_phymode(priv, priv->band);
2660 1875
2661 priv->staging_rxon.ofdm_basic_rates = 1876 priv->staging_rxon.ofdm_basic_rates =
2662 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 1877 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
@@ -2672,13 +1887,13 @@ static void iwl4965_connection_init_rx_config(struct iwl4965_priv *priv)
2672 iwl4965_set_rxon_chain(priv); 1887 iwl4965_set_rxon_chain(priv);
2673} 1888}
2674 1889
2675static int iwl4965_set_mode(struct iwl4965_priv *priv, int mode) 1890static int iwl4965_set_mode(struct iwl_priv *priv, int mode)
2676{ 1891{
2677 if (mode == IEEE80211_IF_TYPE_IBSS) { 1892 if (mode == IEEE80211_IF_TYPE_IBSS) {
2678 const struct iwl4965_channel_info *ch_info; 1893 const struct iwl_channel_info *ch_info;
2679 1894
2680 ch_info = iwl4965_get_channel_info(priv, 1895 ch_info = iwl_get_channel_info(priv,
2681 priv->phymode, 1896 priv->band,
2682 le16_to_cpu(priv->staging_rxon.channel)); 1897 le16_to_cpu(priv->staging_rxon.channel));
2683 1898
2684 if (!ch_info || !is_channel_ibss(ch_info)) { 1899 if (!ch_info || !is_channel_ibss(ch_info)) {
@@ -2693,10 +1908,10 @@ static int iwl4965_set_mode(struct iwl4965_priv *priv, int mode)
2693 iwl4965_connection_init_rx_config(priv); 1908 iwl4965_connection_init_rx_config(priv);
2694 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); 1909 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2695 1910
2696 iwl4965_clear_stations_table(priv); 1911 iwlcore_clear_stations_table(priv);
2697 1912
2698 /* dont commit rxon if rf-kill is on*/ 1913 /* dont commit rxon if rf-kill is on*/
2699 if (!iwl4965_is_ready_rf(priv)) 1914 if (!iwl_is_ready_rf(priv))
2700 return -EAGAIN; 1915 return -EAGAIN;
2701 1916
2702 cancel_delayed_work(&priv->scan_check); 1917 cancel_delayed_work(&priv->scan_check);
@@ -2711,44 +1926,58 @@ static int iwl4965_set_mode(struct iwl4965_priv *priv, int mode)
2711 return 0; 1926 return 0;
2712} 1927}
2713 1928
2714static void iwl4965_build_tx_cmd_hwcrypto(struct iwl4965_priv *priv, 1929static void iwl4965_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
2715 struct ieee80211_tx_control *ctl, 1930 struct ieee80211_tx_control *ctl,
2716 struct iwl4965_cmd *cmd, 1931 struct iwl_cmd *cmd,
2717 struct sk_buff *skb_frag, 1932 struct sk_buff *skb_frag,
2718 int last_frag) 1933 int sta_id)
2719{ 1934{
2720 struct iwl4965_hw_key *keyinfo = &priv->stations[ctl->key_idx].keyinfo; 1935 struct iwl4965_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
1936 struct iwl_wep_key *wepkey;
1937 int keyidx = 0;
1938
1939 BUG_ON(ctl->key_idx > 3);
2721 1940
2722 switch (keyinfo->alg) { 1941 switch (keyinfo->alg) {
2723 case ALG_CCMP: 1942 case ALG_CCMP:
2724 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM; 1943 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_CCM;
2725 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen); 1944 memcpy(cmd->cmd.tx.key, keyinfo->key, keyinfo->keylen);
1945 if (ctl->flags & IEEE80211_TXCTL_AMPDU)
1946 cmd->cmd.tx.tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
2726 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n"); 1947 IWL_DEBUG_TX("tx_cmd with aes hwcrypto\n");
2727 break; 1948 break;
2728 1949
2729 case ALG_TKIP: 1950 case ALG_TKIP:
2730#if 0
2731 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP; 1951 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_TKIP;
2732 1952 ieee80211_get_tkip_key(keyinfo->conf, skb_frag,
2733 if (last_frag) 1953 IEEE80211_TKIP_P2_KEY, cmd->cmd.tx.key);
2734 memcpy(cmd->cmd.tx.tkip_mic.byte, skb_frag->tail - 8, 1954 IWL_DEBUG_TX("tx_cmd with tkip hwcrypto\n");
2735 8);
2736 else
2737 memset(cmd->cmd.tx.tkip_mic.byte, 0, 8);
2738#endif
2739 break; 1955 break;
2740 1956
2741 case ALG_WEP: 1957 case ALG_WEP:
2742 cmd->cmd.tx.sec_ctl = TX_CMD_SEC_WEP | 1958 wepkey = &priv->wep_keys[ctl->key_idx];
2743 (ctl->key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; 1959 cmd->cmd.tx.sec_ctl = 0;
2744 1960 if (priv->default_wep_key) {
2745 if (keyinfo->keylen == 13) 1961 /* the WEP key was sent as static */
2746 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128; 1962 keyidx = ctl->key_idx;
1963 memcpy(&cmd->cmd.tx.key[3], wepkey->key,
1964 wepkey->key_size);
1965 if (wepkey->key_size == WEP_KEY_LEN_128)
1966 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
1967 } else {
1968 /* the WEP key was sent as dynamic */
1969 keyidx = keyinfo->keyidx;
1970 memcpy(&cmd->cmd.tx.key[3], keyinfo->key,
1971 keyinfo->keylen);
1972 if (keyinfo->keylen == WEP_KEY_LEN_128)
1973 cmd->cmd.tx.sec_ctl |= TX_CMD_SEC_KEY128;
1974 }
2747 1975
2748 memcpy(&cmd->cmd.tx.key[3], keyinfo->key, keyinfo->keylen); 1976 cmd->cmd.tx.sec_ctl |= (TX_CMD_SEC_WEP |
1977 (keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
2749 1978
2750 IWL_DEBUG_TX("Configuring packet for WEP encryption " 1979 IWL_DEBUG_TX("Configuring packet for WEP encryption "
2751 "with key %d\n", ctl->key_idx); 1980 "with key %d\n", keyidx);
2752 break; 1981 break;
2753 1982
2754 default: 1983 default:
@@ -2760,8 +1989,8 @@ static void iwl4965_build_tx_cmd_hwcrypto(struct iwl4965_priv *priv,
2760/* 1989/*
2761 * handle build REPLY_TX command notification. 1990 * handle build REPLY_TX command notification.
2762 */ 1991 */
2763static void iwl4965_build_tx_cmd_basic(struct iwl4965_priv *priv, 1992static void iwl4965_build_tx_cmd_basic(struct iwl_priv *priv,
2764 struct iwl4965_cmd *cmd, 1993 struct iwl_cmd *cmd,
2765 struct ieee80211_tx_control *ctrl, 1994 struct ieee80211_tx_control *ctrl,
2766 struct ieee80211_hdr *hdr, 1995 struct ieee80211_hdr *hdr,
2767 int is_unicast, u8 std_id) 1996 int is_unicast, u8 std_id)
@@ -2816,20 +2045,27 @@ static void iwl4965_build_tx_cmd_basic(struct iwl4965_priv *priv,
2816 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3); 2045 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(3);
2817 else 2046 else
2818 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2); 2047 cmd->cmd.tx.timeout.pm_frame_timeout = cpu_to_le16(2);
2819 } else 2048 } else {
2820 cmd->cmd.tx.timeout.pm_frame_timeout = 0; 2049 cmd->cmd.tx.timeout.pm_frame_timeout = 0;
2050 }
2821 2051
2822 cmd->cmd.tx.driver_txop = 0; 2052 cmd->cmd.tx.driver_txop = 0;
2823 cmd->cmd.tx.tx_flags = tx_flags; 2053 cmd->cmd.tx.tx_flags = tx_flags;
2824 cmd->cmd.tx.next_frame_len = 0; 2054 cmd->cmd.tx.next_frame_len = 0;
2825} 2055}
2826 2056static void iwl_update_tx_stats(struct iwl_priv *priv, u16 fc, u16 len)
2057{
2058 /* 0 - mgmt, 1 - cnt, 2 - data */
2059 int idx = (fc & IEEE80211_FCTL_FTYPE) >> 2;
2060 priv->tx_stats[idx].cnt++;
2061 priv->tx_stats[idx].bytes += len;
2062}
2827/** 2063/**
2828 * iwl4965_get_sta_id - Find station's index within station table 2064 * iwl4965_get_sta_id - Find station's index within station table
2829 * 2065 *
2830 * If new IBSS station, create new entry in station table 2066 * If new IBSS station, create new entry in station table
2831 */ 2067 */
2832static int iwl4965_get_sta_id(struct iwl4965_priv *priv, 2068static int iwl4965_get_sta_id(struct iwl_priv *priv,
2833 struct ieee80211_hdr *hdr) 2069 struct ieee80211_hdr *hdr)
2834{ 2070{
2835 int sta_id; 2071 int sta_id;
@@ -2839,7 +2075,7 @@ static int iwl4965_get_sta_id(struct iwl4965_priv *priv,
2839 /* If this frame is broadcast or management, use broadcast station id */ 2075 /* If this frame is broadcast or management, use broadcast station id */
2840 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) || 2076 if (((fc & IEEE80211_FCTL_FTYPE) != IEEE80211_FTYPE_DATA) ||
2841 is_multicast_ether_addr(hdr->addr1)) 2077 is_multicast_ether_addr(hdr->addr1))
2842 return priv->hw_setting.bcast_sta_id; 2078 return priv->hw_params.bcast_sta_id;
2843 2079
2844 switch (priv->iw_mode) { 2080 switch (priv->iw_mode) {
2845 2081
@@ -2853,7 +2089,7 @@ static int iwl4965_get_sta_id(struct iwl4965_priv *priv,
2853 sta_id = iwl4965_hw_find_station(priv, hdr->addr1); 2089 sta_id = iwl4965_hw_find_station(priv, hdr->addr1);
2854 if (sta_id != IWL_INVALID_STATION) 2090 if (sta_id != IWL_INVALID_STATION)
2855 return sta_id; 2091 return sta_id;
2856 return priv->hw_setting.bcast_sta_id; 2092 return priv->hw_params.bcast_sta_id;
2857 2093
2858 /* If this frame is going out to an IBSS network, find the station, 2094 /* If this frame is going out to an IBSS network, find the station,
2859 * or create a new station table entry */ 2095 * or create a new station table entry */
@@ -2872,19 +2108,19 @@ static int iwl4965_get_sta_id(struct iwl4965_priv *priv,
2872 IWL_DEBUG_DROP("Station %s not in station map. " 2108 IWL_DEBUG_DROP("Station %s not in station map. "
2873 "Defaulting to broadcast...\n", 2109 "Defaulting to broadcast...\n",
2874 print_mac(mac, hdr->addr1)); 2110 print_mac(mac, hdr->addr1));
2875 iwl4965_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr)); 2111 iwl_print_hex_dump(IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
2876 return priv->hw_setting.bcast_sta_id; 2112 return priv->hw_params.bcast_sta_id;
2877 2113
2878 default: 2114 default:
2879 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode); 2115 IWL_WARNING("Unknown mode of operation: %d", priv->iw_mode);
2880 return priv->hw_setting.bcast_sta_id; 2116 return priv->hw_params.bcast_sta_id;
2881 } 2117 }
2882} 2118}
2883 2119
2884/* 2120/*
2885 * start REPLY_TX command process 2121 * start REPLY_TX command process
2886 */ 2122 */
2887static int iwl4965_tx_skb(struct iwl4965_priv *priv, 2123static int iwl4965_tx_skb(struct iwl_priv *priv,
2888 struct sk_buff *skb, struct ieee80211_tx_control *ctl) 2124 struct sk_buff *skb, struct ieee80211_tx_control *ctl)
2889{ 2125{
2890 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 2126 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -2896,7 +2132,7 @@ static int iwl4965_tx_skb(struct iwl4965_priv *priv,
2896 dma_addr_t phys_addr; 2132 dma_addr_t phys_addr;
2897 dma_addr_t txcmd_phys; 2133 dma_addr_t txcmd_phys;
2898 dma_addr_t scratch_phys; 2134 dma_addr_t scratch_phys;
2899 struct iwl4965_cmd *out_cmd = NULL; 2135 struct iwl_cmd *out_cmd = NULL;
2900 u16 len, idx, len_org; 2136 u16 len, idx, len_org;
2901 u8 id, hdr_len, unicast; 2137 u8 id, hdr_len, unicast;
2902 u8 sta_id; 2138 u8 sta_id;
@@ -2908,7 +2144,7 @@ static int iwl4965_tx_skb(struct iwl4965_priv *priv,
2908 int rc; 2144 int rc;
2909 2145
2910 spin_lock_irqsave(&priv->lock, flags); 2146 spin_lock_irqsave(&priv->lock, flags);
2911 if (iwl4965_is_rfkill(priv)) { 2147 if (iwl_is_rfkill(priv)) {
2912 IWL_DEBUG_DROP("Dropping - RF KILL\n"); 2148 IWL_DEBUG_DROP("Dropping - RF KILL\n");
2913 goto drop_unlock; 2149 goto drop_unlock;
2914 } 2150 }
@@ -2918,7 +2154,7 @@ static int iwl4965_tx_skb(struct iwl4965_priv *priv,
2918 goto drop_unlock; 2154 goto drop_unlock;
2919 } 2155 }
2920 2156
2921 if ((ctl->tx_rate & 0xFF) == IWL_INVALID_RATE) { 2157 if ((ctl->tx_rate->hw_value & 0xFF) == IWL_INVALID_RATE) {
2922 IWL_ERROR("ERROR: No TX rate available.\n"); 2158 IWL_ERROR("ERROR: No TX rate available.\n");
2923 goto drop_unlock; 2159 goto drop_unlock;
2924 } 2160 }
@@ -2928,7 +2164,7 @@ static int iwl4965_tx_skb(struct iwl4965_priv *priv,
2928 2164
2929 fc = le16_to_cpu(hdr->frame_control); 2165 fc = le16_to_cpu(hdr->frame_control);
2930 2166
2931#ifdef CONFIG_IWL4965_DEBUG 2167#ifdef CONFIG_IWLWIFI_DEBUG
2932 if (ieee80211_is_auth(fc)) 2168 if (ieee80211_is_auth(fc))
2933 IWL_DEBUG_TX("Sending AUTH frame\n"); 2169 IWL_DEBUG_TX("Sending AUTH frame\n");
2934 else if (ieee80211_is_assoc_request(fc)) 2170 else if (ieee80211_is_assoc_request(fc))
@@ -2939,10 +2175,10 @@ static int iwl4965_tx_skb(struct iwl4965_priv *priv,
2939 2175
2940 /* drop all data frame if we are not associated */ 2176 /* drop all data frame if we are not associated */
2941 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) && 2177 if (((fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) &&
2942 (!iwl4965_is_associated(priv) || 2178 (!iwl_is_associated(priv) ||
2943 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) || 2179 ((priv->iw_mode == IEEE80211_IF_TYPE_STA) && !priv->assoc_id) ||
2944 !priv->assoc_station_added)) { 2180 !priv->assoc_station_added)) {
2945 IWL_DEBUG_DROP("Dropping - !iwl4965_is_associated\n"); 2181 IWL_DEBUG_DROP("Dropping - !iwl_is_associated\n");
2946 goto drop_unlock; 2182 goto drop_unlock;
2947 } 2183 }
2948 2184
@@ -2972,11 +2208,10 @@ static int iwl4965_tx_skb(struct iwl4965_priv *priv,
2972 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG)); 2208 __constant_cpu_to_le16(IEEE80211_SCTL_FRAG));
2973 seq_number += 0x10; 2209 seq_number += 0x10;
2974#ifdef CONFIG_IWL4965_HT 2210#ifdef CONFIG_IWL4965_HT
2975#ifdef CONFIG_IWL4965_HT_AGG
2976 /* aggregation is on for this <sta,tid> */ 2211 /* aggregation is on for this <sta,tid> */
2977 if (ctl->flags & IEEE80211_TXCTL_HT_MPDU_AGG) 2212 if (ctl->flags & IEEE80211_TXCTL_AMPDU)
2978 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; 2213 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
2979#endif /* CONFIG_IWL4965_HT_AGG */ 2214 priv->stations[sta_id].tid[tid].tfds_in_queue++;
2980#endif /* CONFIG_IWL4965_HT */ 2215#endif /* CONFIG_IWL4965_HT */
2981 } 2216 }
2982 2217
@@ -3025,8 +2260,8 @@ static int iwl4965_tx_skb(struct iwl4965_priv *priv,
3025 * of the MAC header (device reads on dword boundaries). 2260 * of the MAC header (device reads on dword boundaries).
3026 * We'll tell device about this padding later. 2261 * We'll tell device about this padding later.
3027 */ 2262 */
3028 len = priv->hw_setting.tx_cmd_len + 2263 len = priv->hw_params.tx_cmd_len +
3029 sizeof(struct iwl4965_cmd_header) + hdr_len; 2264 sizeof(struct iwl_cmd_header) + hdr_len;
3030 2265
3031 len_org = len; 2266 len_org = len;
3032 len = (len + 3) & ~3; 2267 len = (len + 3) & ~3;
@@ -3038,15 +2273,15 @@ static int iwl4965_tx_skb(struct iwl4965_priv *priv,
3038 2273
3039 /* Physical address of this Tx command's header (not MAC header!), 2274 /* Physical address of this Tx command's header (not MAC header!),
3040 * within command buffer array. */ 2275 * within command buffer array. */
3041 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl4965_cmd) * idx + 2276 txcmd_phys = txq->dma_addr_cmd + sizeof(struct iwl_cmd) * idx +
3042 offsetof(struct iwl4965_cmd, hdr); 2277 offsetof(struct iwl_cmd, hdr);
3043 2278
3044 /* Add buffer containing Tx command and MAC(!) header to TFD's 2279 /* Add buffer containing Tx command and MAC(!) header to TFD's
3045 * first entry */ 2280 * first entry */
3046 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len); 2281 iwl4965_hw_txq_attach_buf_to_tfd(priv, tfd, txcmd_phys, len);
3047 2282
3048 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT)) 2283 if (!(ctl->flags & IEEE80211_TXCTL_DO_NOT_ENCRYPT))
3049 iwl4965_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, 0); 2284 iwl4965_build_tx_cmd_hwcrypto(priv, ctl, out_cmd, skb, sta_id);
3050 2285
3051 /* Set up TFD's 2nd entry to point directly to remainder of skb, 2286 /* Set up TFD's 2nd entry to point directly to remainder of skb,
3052 * if any (802.11 null frames have no payload). */ 2287 * if any (802.11 null frames have no payload). */
@@ -3071,19 +2306,13 @@ static int iwl4965_tx_skb(struct iwl4965_priv *priv,
3071 /* set is_hcca to 0; it probably will never be implemented */ 2306 /* set is_hcca to 0; it probably will never be implemented */
3072 iwl4965_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0); 2307 iwl4965_hw_build_tx_cmd_rate(priv, out_cmd, ctl, hdr, sta_id, 0);
3073 2308
3074 scratch_phys = txcmd_phys + sizeof(struct iwl4965_cmd_header) + 2309 iwl_update_tx_stats(priv, fc, len);
2310
2311 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
3075 offsetof(struct iwl4965_tx_cmd, scratch); 2312 offsetof(struct iwl4965_tx_cmd, scratch);
3076 out_cmd->cmd.tx.dram_lsb_ptr = cpu_to_le32(scratch_phys); 2313 out_cmd->cmd.tx.dram_lsb_ptr = cpu_to_le32(scratch_phys);
3077 out_cmd->cmd.tx.dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys); 2314 out_cmd->cmd.tx.dram_msb_ptr = iwl_get_dma_hi_address(scratch_phys);
3078 2315
3079#ifdef CONFIG_IWL4965_HT_AGG
3080#ifdef CONFIG_IWL4965_HT
3081 /* TODO: move this functionality to rate scaling */
3082 iwl4965_tl_get_stats(priv, hdr);
3083#endif /* CONFIG_IWL4965_HT_AGG */
3084#endif /*CONFIG_IWL4965_HT */
3085
3086
3087 if (!ieee80211_get_morefrag(hdr)) { 2316 if (!ieee80211_get_morefrag(hdr)) {
3088 txq->need_update = 1; 2317 txq->need_update = 1;
3089 if (qc) { 2318 if (qc) {
@@ -3095,17 +2324,17 @@ static int iwl4965_tx_skb(struct iwl4965_priv *priv,
3095 txq->need_update = 0; 2324 txq->need_update = 0;
3096 } 2325 }
3097 2326
3098 iwl4965_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload, 2327 iwl_print_hex_dump(IWL_DL_TX, out_cmd->cmd.payload,
3099 sizeof(out_cmd->cmd.tx)); 2328 sizeof(out_cmd->cmd.tx));
3100 2329
3101 iwl4965_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr, 2330 iwl_print_hex_dump(IWL_DL_TX, (u8 *)out_cmd->cmd.tx.hdr,
3102 ieee80211_get_hdrlen(fc)); 2331 ieee80211_get_hdrlen(fc));
3103 2332
3104 /* Set up entry for this TFD in Tx byte-count array */ 2333 /* Set up entry for this TFD in Tx byte-count array */
3105 iwl4965_tx_queue_update_wr_ptr(priv, txq, len); 2334 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, len);
3106 2335
3107 /* Tell device the write index *just past* this latest filled TFD */ 2336 /* Tell device the write index *just past* this latest filled TFD */
3108 q->write_ptr = iwl4965_queue_inc_wrap(q->write_ptr, q->n_bd); 2337 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
3109 rc = iwl4965_tx_queue_update_write_ptr(priv, txq); 2338 rc = iwl4965_tx_queue_update_write_ptr(priv, txq);
3110 spin_unlock_irqrestore(&priv->lock, flags); 2339 spin_unlock_irqrestore(&priv->lock, flags);
3111 2340
@@ -3132,13 +2361,13 @@ drop:
3132 return -1; 2361 return -1;
3133} 2362}
3134 2363
3135static void iwl4965_set_rate(struct iwl4965_priv *priv) 2364static void iwl4965_set_rate(struct iwl_priv *priv)
3136{ 2365{
3137 const struct ieee80211_hw_mode *hw = NULL; 2366 const struct ieee80211_supported_band *hw = NULL;
3138 struct ieee80211_rate *rate; 2367 struct ieee80211_rate *rate;
3139 int i; 2368 int i;
3140 2369
3141 hw = iwl4965_get_hw_mode(priv, priv->phymode); 2370 hw = iwl4965_get_hw_mode(priv, priv->band);
3142 if (!hw) { 2371 if (!hw) {
3143 IWL_ERROR("Failed to set rate: unable to get hw mode\n"); 2372 IWL_ERROR("Failed to set rate: unable to get hw mode\n");
3144 return; 2373 return;
@@ -3147,24 +2376,10 @@ static void iwl4965_set_rate(struct iwl4965_priv *priv)
3147 priv->active_rate = 0; 2376 priv->active_rate = 0;
3148 priv->active_rate_basic = 0; 2377 priv->active_rate_basic = 0;
3149 2378
3150 IWL_DEBUG_RATE("Setting rates for 802.11%c\n", 2379 for (i = 0; i < hw->n_bitrates; i++) {
3151 hw->mode == MODE_IEEE80211A ? 2380 rate = &(hw->bitrates[i]);
3152 'a' : ((hw->mode == MODE_IEEE80211B) ? 'b' : 'g')); 2381 if (rate->hw_value < IWL_RATE_COUNT)
3153 2382 priv->active_rate |= (1 << rate->hw_value);
3154 for (i = 0; i < hw->num_rates; i++) {
3155 rate = &(hw->rates[i]);
3156 if ((rate->val < IWL_RATE_COUNT) &&
3157 (rate->flags & IEEE80211_RATE_SUPPORTED)) {
3158 IWL_DEBUG_RATE("Adding rate index %d (plcp %d)%s\n",
3159 rate->val, iwl4965_rates[rate->val].plcp,
3160 (rate->flags & IEEE80211_RATE_BASIC) ?
3161 "*" : "");
3162 priv->active_rate |= (1 << rate->val);
3163 if (rate->flags & IEEE80211_RATE_BASIC)
3164 priv->active_rate_basic |= (1 << rate->val);
3165 } else
3166 IWL_DEBUG_RATE("Not adding rate %d (plcp %d)\n",
3167 rate->val, iwl4965_rates[rate->val].plcp);
3168 } 2383 }
3169 2384
3170 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n", 2385 IWL_DEBUG_RATE("Set active_rate = %0x, active_rate_basic = %0x\n",
@@ -3193,7 +2408,7 @@ static void iwl4965_set_rate(struct iwl4965_priv *priv)
3193 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 2408 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
3194} 2409}
3195 2410
3196static void iwl4965_radio_kill_sw(struct iwl4965_priv *priv, int disable_radio) 2411void iwl4965_radio_kill_sw(struct iwl_priv *priv, int disable_radio)
3197{ 2412{
3198 unsigned long flags; 2413 unsigned long flags;
3199 2414
@@ -3208,17 +2423,26 @@ static void iwl4965_radio_kill_sw(struct iwl4965_priv *priv, int disable_radio)
3208 /* FIXME: This is a workaround for AP */ 2423 /* FIXME: This is a workaround for AP */
3209 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) { 2424 if (priv->iw_mode != IEEE80211_IF_TYPE_AP) {
3210 spin_lock_irqsave(&priv->lock, flags); 2425 spin_lock_irqsave(&priv->lock, flags);
3211 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET, 2426 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
3212 CSR_UCODE_SW_BIT_RFKILL); 2427 CSR_UCODE_SW_BIT_RFKILL);
3213 spin_unlock_irqrestore(&priv->lock, flags); 2428 spin_unlock_irqrestore(&priv->lock, flags);
3214 iwl4965_send_card_state(priv, CARD_STATE_CMD_DISABLE, 0); 2429 /* call the host command only if no hw rf-kill set */
2430 if (!test_bit(STATUS_RF_KILL_HW, &priv->status) &&
2431 iwl_is_ready(priv))
2432 iwl4965_send_card_state(priv,
2433 CARD_STATE_CMD_DISABLE,
2434 0);
3215 set_bit(STATUS_RF_KILL_SW, &priv->status); 2435 set_bit(STATUS_RF_KILL_SW, &priv->status);
2436
2437 /* make sure mac80211 stop sending Tx frame */
2438 if (priv->mac80211_registered)
2439 ieee80211_stop_queues(priv->hw);
3216 } 2440 }
3217 return; 2441 return;
3218 } 2442 }
3219 2443
3220 spin_lock_irqsave(&priv->lock, flags); 2444 spin_lock_irqsave(&priv->lock, flags);
3221 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 2445 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
3222 2446
3223 clear_bit(STATUS_RF_KILL_SW, &priv->status); 2447 clear_bit(STATUS_RF_KILL_SW, &priv->status);
3224 spin_unlock_irqrestore(&priv->lock, flags); 2448 spin_unlock_irqrestore(&priv->lock, flags);
@@ -3227,9 +2451,9 @@ static void iwl4965_radio_kill_sw(struct iwl4965_priv *priv, int disable_radio)
3227 msleep(10); 2451 msleep(10);
3228 2452
3229 spin_lock_irqsave(&priv->lock, flags); 2453 spin_lock_irqsave(&priv->lock, flags);
3230 iwl4965_read32(priv, CSR_UCODE_DRV_GP1); 2454 iwl_read32(priv, CSR_UCODE_DRV_GP1);
3231 if (!iwl4965_grab_nic_access(priv)) 2455 if (!iwl_grab_nic_access(priv))
3232 iwl4965_release_nic_access(priv); 2456 iwl_release_nic_access(priv);
3233 spin_unlock_irqrestore(&priv->lock, flags); 2457 spin_unlock_irqrestore(&priv->lock, flags);
3234 2458
3235 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { 2459 if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
@@ -3242,7 +2466,7 @@ static void iwl4965_radio_kill_sw(struct iwl4965_priv *priv, int disable_radio)
3242 return; 2466 return;
3243} 2467}
3244 2468
3245void iwl4965_set_decrypted_flag(struct iwl4965_priv *priv, struct sk_buff *skb, 2469void iwl4965_set_decrypted_flag(struct iwl_priv *priv, struct sk_buff *skb,
3246 u32 decrypt_res, struct ieee80211_rx_status *stats) 2470 u32 decrypt_res, struct ieee80211_rx_status *stats)
3247{ 2471{
3248 u16 fc = 2472 u16 fc =
@@ -3257,6 +2481,12 @@ void iwl4965_set_decrypted_flag(struct iwl4965_priv *priv, struct sk_buff *skb,
3257 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res); 2481 IWL_DEBUG_RX("decrypt_res:0x%x\n", decrypt_res);
3258 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) { 2482 switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
3259 case RX_RES_STATUS_SEC_TYPE_TKIP: 2483 case RX_RES_STATUS_SEC_TYPE_TKIP:
2484 /* The uCode has got a bad phase 1 Key, pushes the packet.
2485 * Decryption will be done in SW. */
2486 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
2487 RX_RES_STATUS_BAD_KEY_TTAK)
2488 break;
2489
3260 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == 2490 if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
3261 RX_RES_STATUS_BAD_ICV_MIC) 2491 RX_RES_STATUS_BAD_ICV_MIC)
3262 stats->flag |= RX_FLAG_MMIC_ERROR; 2492 stats->flag |= RX_FLAG_MMIC_ERROR;
@@ -3277,7 +2507,7 @@ void iwl4965_set_decrypted_flag(struct iwl4965_priv *priv, struct sk_buff *skb,
3277 2507
3278#define IWL_PACKET_RETRY_TIME HZ 2508#define IWL_PACKET_RETRY_TIME HZ
3279 2509
3280int iwl4965_is_duplicate_packet(struct iwl4965_priv *priv, struct ieee80211_hdr *header) 2510int iwl4965_is_duplicate_packet(struct iwl_priv *priv, struct ieee80211_hdr *header)
3281{ 2511{
3282 u16 sc = le16_to_cpu(header->seq_ctrl); 2512 u16 sc = le16_to_cpu(header->seq_ctrl);
3283 u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 2513 u16 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
@@ -3394,13 +2624,13 @@ static __le32 iwl4965_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
3394 return cpu_to_le32(res); 2624 return cpu_to_le32(res);
3395} 2625}
3396 2626
3397static int iwl4965_get_measurement(struct iwl4965_priv *priv, 2627static int iwl4965_get_measurement(struct iwl_priv *priv,
3398 struct ieee80211_measurement_params *params, 2628 struct ieee80211_measurement_params *params,
3399 u8 type) 2629 u8 type)
3400{ 2630{
3401 struct iwl4965_spectrum_cmd spectrum; 2631 struct iwl4965_spectrum_cmd spectrum;
3402 struct iwl4965_rx_packet *res; 2632 struct iwl4965_rx_packet *res;
3403 struct iwl4965_host_cmd cmd = { 2633 struct iwl_host_cmd cmd = {
3404 .id = REPLY_SPECTRUM_MEASUREMENT_CMD, 2634 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
3405 .data = (void *)&spectrum, 2635 .data = (void *)&spectrum,
3406 .meta.flags = CMD_WANT_SKB, 2636 .meta.flags = CMD_WANT_SKB,
@@ -3410,7 +2640,7 @@ static int iwl4965_get_measurement(struct iwl4965_priv *priv,
3410 int spectrum_resp_status; 2640 int spectrum_resp_status;
3411 int duration = le16_to_cpu(params->duration); 2641 int duration = le16_to_cpu(params->duration);
3412 2642
3413 if (iwl4965_is_associated(priv)) 2643 if (iwl_is_associated(priv))
3414 add_time = 2644 add_time =
3415 iwl4965_usecs_to_beacons( 2645 iwl4965_usecs_to_beacons(
3416 le64_to_cpu(params->start_time) - priv->last_tsf, 2646 le64_to_cpu(params->start_time) - priv->last_tsf,
@@ -3425,7 +2655,7 @@ static int iwl4965_get_measurement(struct iwl4965_priv *priv,
3425 cmd.len = sizeof(spectrum); 2655 cmd.len = sizeof(spectrum);
3426 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len)); 2656 spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
3427 2657
3428 if (iwl4965_is_associated(priv)) 2658 if (iwl_is_associated(priv))
3429 spectrum.start_time = 2659 spectrum.start_time =
3430 iwl4965_add_beacon_time(priv->last_beacon_time, 2660 iwl4965_add_beacon_time(priv->last_beacon_time,
3431 add_time, 2661 add_time,
@@ -3440,7 +2670,7 @@ static int iwl4965_get_measurement(struct iwl4965_priv *priv,
3440 spectrum.flags |= RXON_FLG_BAND_24G_MSK | 2670 spectrum.flags |= RXON_FLG_BAND_24G_MSK |
3441 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK; 2671 RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
3442 2672
3443 rc = iwl4965_send_cmd_sync(priv, &cmd); 2673 rc = iwl_send_cmd_sync(priv, &cmd);
3444 if (rc) 2674 if (rc)
3445 return rc; 2675 return rc;
3446 2676
@@ -3474,7 +2704,7 @@ static int iwl4965_get_measurement(struct iwl4965_priv *priv,
3474} 2704}
3475#endif 2705#endif
3476 2706
3477static void iwl4965_txstatus_to_ieee(struct iwl4965_priv *priv, 2707static void iwl4965_txstatus_to_ieee(struct iwl_priv *priv,
3478 struct iwl4965_tx_info *tx_sta) 2708 struct iwl4965_tx_info *tx_sta)
3479{ 2709{
3480 2710
@@ -3500,7 +2730,7 @@ static void iwl4965_txstatus_to_ieee(struct iwl4965_priv *priv,
3500 * need to be reclaimed. As result, some free space forms. If there is 2730 * need to be reclaimed. As result, some free space forms. If there is
3501 * enough free space (> low mark), wake the stack that feeds us. 2731 * enough free space (> low mark), wake the stack that feeds us.
3502 */ 2732 */
3503int iwl4965_tx_queue_reclaim(struct iwl4965_priv *priv, int txq_id, int index) 2733int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
3504{ 2734{
3505 struct iwl4965_tx_queue *txq = &priv->txq[txq_id]; 2735 struct iwl4965_tx_queue *txq = &priv->txq[txq_id];
3506 struct iwl4965_queue *q = &txq->q; 2736 struct iwl4965_queue *q = &txq->q;
@@ -3513,9 +2743,9 @@ int iwl4965_tx_queue_reclaim(struct iwl4965_priv *priv, int txq_id, int index)
3513 return 0; 2743 return 0;
3514 } 2744 }
3515 2745
3516 for (index = iwl4965_queue_inc_wrap(index, q->n_bd); 2746 for (index = iwl_queue_inc_wrap(index, q->n_bd);
3517 q->read_ptr != index; 2747 q->read_ptr != index;
3518 q->read_ptr = iwl4965_queue_inc_wrap(q->read_ptr, q->n_bd)) { 2748 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
3519 if (txq_id != IWL_CMD_QUEUE_NUM) { 2749 if (txq_id != IWL_CMD_QUEUE_NUM) {
3520 iwl4965_txstatus_to_ieee(priv, 2750 iwl4965_txstatus_to_ieee(priv,
3521 &(txq->txb[txq->q.read_ptr])); 2751 &(txq->txb[txq->q.read_ptr]));
@@ -3528,10 +2758,10 @@ int iwl4965_tx_queue_reclaim(struct iwl4965_priv *priv, int txq_id, int index)
3528 nfreed++; 2758 nfreed++;
3529 } 2759 }
3530 2760
3531 if (iwl4965_queue_space(q) > q->low_mark && (txq_id >= 0) && 2761/* if (iwl4965_queue_space(q) > q->low_mark && (txq_id >= 0) &&
3532 (txq_id != IWL_CMD_QUEUE_NUM) && 2762 (txq_id != IWL_CMD_QUEUE_NUM) &&
3533 priv->mac80211_registered) 2763 priv->mac80211_registered)
3534 ieee80211_wake_queue(priv->hw, txq_id); 2764 ieee80211_wake_queue(priv->hw, txq_id); */
3535 2765
3536 2766
3537 return nfreed; 2767 return nfreed;
@@ -3550,9 +2780,8 @@ static int iwl4965_is_tx_success(u32 status)
3550 * 2780 *
3551 ******************************************************************************/ 2781 ******************************************************************************/
3552#ifdef CONFIG_IWL4965_HT 2782#ifdef CONFIG_IWL4965_HT
3553#ifdef CONFIG_IWL4965_HT_AGG
3554 2783
3555static inline int iwl4965_get_ra_sta_id(struct iwl4965_priv *priv, 2784static inline int iwl4965_get_ra_sta_id(struct iwl_priv *priv,
3556 struct ieee80211_hdr *hdr) 2785 struct ieee80211_hdr *hdr)
3557{ 2786{
3558 if (priv->iw_mode == IEEE80211_IF_TYPE_STA) 2787 if (priv->iw_mode == IEEE80211_IF_TYPE_STA)
@@ -3564,7 +2793,7 @@ static inline int iwl4965_get_ra_sta_id(struct iwl4965_priv *priv,
3564} 2793}
3565 2794
3566static struct ieee80211_hdr *iwl4965_tx_queue_get_hdr( 2795static struct ieee80211_hdr *iwl4965_tx_queue_get_hdr(
3567 struct iwl4965_priv *priv, int txq_id, int idx) 2796 struct iwl_priv *priv, int txq_id, int idx)
3568{ 2797{
3569 if (priv->txq[txq_id].txb[idx].skb[0]) 2798 if (priv->txq[txq_id].txb[idx].skb[0])
3570 return (struct ieee80211_hdr *)priv->txq[txq_id]. 2799 return (struct ieee80211_hdr *)priv->txq[txq_id].
@@ -3583,13 +2812,13 @@ static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
3583/** 2812/**
3584 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue 2813 * iwl4965_tx_status_reply_tx - Handle Tx rspnse for frames in aggregation queue
3585 */ 2814 */
3586static int iwl4965_tx_status_reply_tx(struct iwl4965_priv *priv, 2815static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
3587 struct iwl4965_ht_agg *agg, 2816 struct iwl4965_ht_agg *agg,
3588 struct iwl4965_tx_resp *tx_resp, 2817 struct iwl4965_tx_resp_agg *tx_resp,
3589 u16 start_idx) 2818 u16 start_idx)
3590{ 2819{
3591 u32 status; 2820 u16 status;
3592 __le32 *frame_status = &tx_resp->status; 2821 struct agg_tx_status *frame_status = &tx_resp->status;
3593 struct ieee80211_tx_status *tx_status = NULL; 2822 struct ieee80211_tx_status *tx_status = NULL;
3594 struct ieee80211_hdr *hdr = NULL; 2823 struct ieee80211_hdr *hdr = NULL;
3595 int i, sh; 2824 int i, sh;
@@ -3602,30 +2831,30 @@ static int iwl4965_tx_status_reply_tx(struct iwl4965_priv *priv,
3602 agg->frame_count = tx_resp->frame_count; 2831 agg->frame_count = tx_resp->frame_count;
3603 agg->start_idx = start_idx; 2832 agg->start_idx = start_idx;
3604 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); 2833 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3605 agg->bitmap0 = agg->bitmap1 = 0; 2834 agg->bitmap = 0;
3606 2835
3607 /* # frames attempted by Tx command */ 2836 /* # frames attempted by Tx command */
3608 if (agg->frame_count == 1) { 2837 if (agg->frame_count == 1) {
3609 /* Only one frame was attempted; no block-ack will arrive */ 2838 /* Only one frame was attempted; no block-ack will arrive */
3610 struct iwl4965_tx_queue *txq ; 2839 status = le16_to_cpu(frame_status[0].status);
3611 status = le32_to_cpu(frame_status[0]); 2840 seq = le16_to_cpu(frame_status[0].sequence);
2841 idx = SEQ_TO_INDEX(seq);
2842 txq_id = SEQ_TO_QUEUE(seq);
3612 2843
3613 txq_id = agg->txq_id;
3614 txq = &priv->txq[txq_id];
3615 /* FIXME: code repetition */ 2844 /* FIXME: code repetition */
3616 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d \n", 2845 IWL_DEBUG_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
3617 agg->frame_count, agg->start_idx); 2846 agg->frame_count, agg->start_idx, idx);
3618 2847
3619 tx_status = &(priv->txq[txq_id].txb[txq->q.read_ptr].status); 2848 tx_status = &(priv->txq[txq_id].txb[idx].status);
3620 tx_status->retry_count = tx_resp->failure_frame; 2849 tx_status->retry_count = tx_resp->failure_frame;
3621 tx_status->queue_number = status & 0xff; 2850 tx_status->queue_number = status & 0xff;
3622 tx_status->queue_length = tx_resp->bt_kill_count; 2851 tx_status->queue_length = tx_resp->failure_rts;
3623 tx_status->queue_length |= tx_resp->failure_rts; 2852 tx_status->control.flags &= ~IEEE80211_TXCTL_AMPDU;
3624
3625 tx_status->flags = iwl4965_is_tx_success(status)? 2853 tx_status->flags = iwl4965_is_tx_success(status)?
3626 IEEE80211_TX_STATUS_ACK : 0; 2854 IEEE80211_TX_STATUS_ACK : 0;
3627 tx_status->control.tx_rate = 2855 iwl4965_hwrate_to_tx_control(priv,
3628 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags); 2856 le32_to_cpu(tx_resp->rate_n_flags),
2857 &tx_status->control);
3629 /* FIXME: code repetition end */ 2858 /* FIXME: code repetition end */
3630 2859
3631 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n", 2860 IWL_DEBUG_TX_REPLY("1 Frame 0x%x failure :%d\n",
@@ -3642,8 +2871,8 @@ static int iwl4965_tx_status_reply_tx(struct iwl4965_priv *priv,
3642 /* Construct bit-map of pending frames within Tx window */ 2871 /* Construct bit-map of pending frames within Tx window */
3643 for (i = 0; i < agg->frame_count; i++) { 2872 for (i = 0; i < agg->frame_count; i++) {
3644 u16 sc; 2873 u16 sc;
3645 status = le32_to_cpu(frame_status[i]); 2874 status = le16_to_cpu(frame_status[i].status);
3646 seq = status >> 16; 2875 seq = le16_to_cpu(frame_status[i].sequence);
3647 idx = SEQ_TO_INDEX(seq); 2876 idx = SEQ_TO_INDEX(seq);
3648 txq_id = SEQ_TO_QUEUE(seq); 2877 txq_id = SEQ_TO_QUEUE(seq);
3649 2878
@@ -3687,13 +2916,12 @@ static int iwl4965_tx_status_reply_tx(struct iwl4965_priv *priv,
3687 start, (u32)(bitmap & 0xFFFFFFFF)); 2916 start, (u32)(bitmap & 0xFFFFFFFF));
3688 } 2917 }
3689 2918
3690 agg->bitmap0 = bitmap & 0xFFFFFFFF; 2919 agg->bitmap = bitmap;
3691 agg->bitmap1 = bitmap >> 32;
3692 agg->start_idx = start; 2920 agg->start_idx = start;
3693 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags); 2921 agg->rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
3694 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%x\n", 2922 IWL_DEBUG_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
3695 agg->frame_count, agg->start_idx, 2923 agg->frame_count, agg->start_idx,
3696 agg->bitmap0); 2924 (unsigned long long)agg->bitmap);
3697 2925
3698 if (bitmap) 2926 if (bitmap)
3699 agg->wait_for_ba = 1; 2927 agg->wait_for_ba = 1;
@@ -3701,12 +2929,11 @@ static int iwl4965_tx_status_reply_tx(struct iwl4965_priv *priv,
3701 return 0; 2929 return 0;
3702} 2930}
3703#endif 2931#endif
3704#endif
3705 2932
3706/** 2933/**
3707 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response 2934 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
3708 */ 2935 */
3709static void iwl4965_rx_reply_tx(struct iwl4965_priv *priv, 2936static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
3710 struct iwl4965_rx_mem_buffer *rxb) 2937 struct iwl4965_rx_mem_buffer *rxb)
3711{ 2938{
3712 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 2939 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
@@ -3718,9 +2945,9 @@ static void iwl4965_rx_reply_tx(struct iwl4965_priv *priv,
3718 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0]; 2945 struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
3719 u32 status = le32_to_cpu(tx_resp->status); 2946 u32 status = le32_to_cpu(tx_resp->status);
3720#ifdef CONFIG_IWL4965_HT 2947#ifdef CONFIG_IWL4965_HT
3721#ifdef CONFIG_IWL4965_HT_AGG 2948 int tid = MAX_TID_COUNT, sta_id = IWL_INVALID_STATION;
3722 int tid, sta_id; 2949 struct ieee80211_hdr *hdr;
3723#endif 2950 __le16 *qc;
3724#endif 2951#endif
3725 2952
3726 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) { 2953 if ((index >= txq->q.n_bd) || (x2_queue_used(&txq->q, index) == 0)) {
@@ -3732,44 +2959,51 @@ static void iwl4965_rx_reply_tx(struct iwl4965_priv *priv,
3732 } 2959 }
3733 2960
3734#ifdef CONFIG_IWL4965_HT 2961#ifdef CONFIG_IWL4965_HT
3735#ifdef CONFIG_IWL4965_HT_AGG 2962 hdr = iwl4965_tx_queue_get_hdr(priv, txq_id, index);
2963 qc = ieee80211_get_qos_ctrl(hdr);
2964
2965 if (qc)
2966 tid = le16_to_cpu(*qc) & 0xf;
2967
2968 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
2969 if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
2970 IWL_ERROR("Station not known\n");
2971 return;
2972 }
2973
3736 if (txq->sched_retry) { 2974 if (txq->sched_retry) {
3737 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp); 2975 const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
3738 struct ieee80211_hdr *hdr =
3739 iwl4965_tx_queue_get_hdr(priv, txq_id, index);
3740 struct iwl4965_ht_agg *agg = NULL; 2976 struct iwl4965_ht_agg *agg = NULL;
3741 __le16 *qc = ieee80211_get_qos_ctrl(hdr);
3742 2977
3743 if (qc == NULL) { 2978 if (!qc)
3744 IWL_ERROR("BUG_ON qc is null!!!!\n");
3745 return; 2979 return;
3746 }
3747
3748 tid = le16_to_cpu(*qc) & 0xf;
3749
3750 sta_id = iwl4965_get_ra_sta_id(priv, hdr);
3751 if (unlikely(sta_id == IWL_INVALID_STATION)) {
3752 IWL_ERROR("Station not known for\n");
3753 return;
3754 }
3755 2980
3756 agg = &priv->stations[sta_id].tid[tid].agg; 2981 agg = &priv->stations[sta_id].tid[tid].agg;
3757 2982
3758 iwl4965_tx_status_reply_tx(priv, agg, tx_resp, index); 2983 iwl4965_tx_status_reply_tx(priv, agg,
2984 (struct iwl4965_tx_resp_agg *)tx_resp, index);
3759 2985
3760 if ((tx_resp->frame_count == 1) && 2986 if ((tx_resp->frame_count == 1) &&
3761 !iwl4965_is_tx_success(status)) { 2987 !iwl4965_is_tx_success(status)) {
3762 /* TODO: send BAR */ 2988 /* TODO: send BAR */
3763 } 2989 }
3764 2990
3765 if ((txq->q.read_ptr != (scd_ssn & 0xff))) { 2991 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
3766 index = iwl4965_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); 2992 int freed;
2993 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
3767 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn " 2994 IWL_DEBUG_TX_REPLY("Retry scheduler reclaim scd_ssn "
3768 "%d index %d\n", scd_ssn , index); 2995 "%d index %d\n", scd_ssn , index);
3769 iwl4965_tx_queue_reclaim(priv, txq_id, index); 2996 freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
2997 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
2998
2999 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
3000 txq_id >= 0 && priv->mac80211_registered &&
3001 agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)
3002 ieee80211_wake_queue(priv->hw, txq_id);
3003
3004 iwl4965_check_empty_hw_queue(priv, sta_id, tid, txq_id);
3770 } 3005 }
3771 } else { 3006 } else {
3772#endif /* CONFIG_IWL4965_HT_AGG */
3773#endif /* CONFIG_IWL4965_HT */ 3007#endif /* CONFIG_IWL4965_HT */
3774 tx_status = &(txq->txb[txq->q.read_ptr].status); 3008 tx_status = &(txq->txb[txq->q.read_ptr].status);
3775 3009
@@ -3777,12 +3011,10 @@ static void iwl4965_rx_reply_tx(struct iwl4965_priv *priv,
3777 tx_status->queue_number = status; 3011 tx_status->queue_number = status;
3778 tx_status->queue_length = tx_resp->bt_kill_count; 3012 tx_status->queue_length = tx_resp->bt_kill_count;
3779 tx_status->queue_length |= tx_resp->failure_rts; 3013 tx_status->queue_length |= tx_resp->failure_rts;
3780
3781 tx_status->flags = 3014 tx_status->flags =
3782 iwl4965_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0; 3015 iwl4965_is_tx_success(status) ? IEEE80211_TX_STATUS_ACK : 0;
3783 3016 iwl4965_hwrate_to_tx_control(priv, le32_to_cpu(tx_resp->rate_n_flags),
3784 tx_status->control.tx_rate = 3017 &tx_status->control);
3785 iwl4965_hw_get_rate_n_flags(tx_resp->rate_n_flags);
3786 3018
3787 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x " 3019 IWL_DEBUG_TX("Tx queue %d Status %s (0x%08x) rate_n_flags 0x%x "
3788 "retries %d\n", txq_id, iwl4965_get_tx_fail_reason(status), 3020 "retries %d\n", txq_id, iwl4965_get_tx_fail_reason(status),
@@ -3790,12 +3022,21 @@ static void iwl4965_rx_reply_tx(struct iwl4965_priv *priv,
3790 tx_resp->failure_frame); 3022 tx_resp->failure_frame);
3791 3023
3792 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index); 3024 IWL_DEBUG_TX_REPLY("Tx queue reclaim %d\n", index);
3793 if (index != -1) 3025 if (index != -1) {
3794 iwl4965_tx_queue_reclaim(priv, txq_id, index); 3026 int freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
3027#ifdef CONFIG_IWL4965_HT
3028 if (tid != MAX_TID_COUNT)
3029 priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
3030 if (iwl4965_queue_space(&txq->q) > txq->q.low_mark &&
3031 (txq_id >= 0) &&
3032 priv->mac80211_registered)
3033 ieee80211_wake_queue(priv->hw, txq_id);
3034 if (tid != MAX_TID_COUNT)
3035 iwl4965_check_empty_hw_queue(priv, sta_id, tid, txq_id);
3036#endif
3037 }
3795#ifdef CONFIG_IWL4965_HT 3038#ifdef CONFIG_IWL4965_HT
3796#ifdef CONFIG_IWL4965_HT_AGG
3797 } 3039 }
3798#endif /* CONFIG_IWL4965_HT_AGG */
3799#endif /* CONFIG_IWL4965_HT */ 3040#endif /* CONFIG_IWL4965_HT */
3800 3041
3801 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 3042 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
@@ -3803,7 +3044,7 @@ static void iwl4965_rx_reply_tx(struct iwl4965_priv *priv,
3803} 3044}
3804 3045
3805 3046
3806static void iwl4965_rx_reply_alive(struct iwl4965_priv *priv, 3047static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
3807 struct iwl4965_rx_mem_buffer *rxb) 3048 struct iwl4965_rx_mem_buffer *rxb)
3808{ 3049{
3809 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3050 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
@@ -3839,7 +3080,7 @@ static void iwl4965_rx_reply_alive(struct iwl4965_priv *priv,
3839 IWL_WARNING("uCode did not respond OK.\n"); 3080 IWL_WARNING("uCode did not respond OK.\n");
3840} 3081}
3841 3082
3842static void iwl4965_rx_reply_add_sta(struct iwl4965_priv *priv, 3083static void iwl4965_rx_reply_add_sta(struct iwl_priv *priv,
3843 struct iwl4965_rx_mem_buffer *rxb) 3084 struct iwl4965_rx_mem_buffer *rxb)
3844{ 3085{
3845 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3086 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
@@ -3848,7 +3089,7 @@ static void iwl4965_rx_reply_add_sta(struct iwl4965_priv *priv,
3848 return; 3089 return;
3849} 3090}
3850 3091
3851static void iwl4965_rx_reply_error(struct iwl4965_priv *priv, 3092static void iwl4965_rx_reply_error(struct iwl_priv *priv,
3852 struct iwl4965_rx_mem_buffer *rxb) 3093 struct iwl4965_rx_mem_buffer *rxb)
3853{ 3094{
3854 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3095 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
@@ -3864,7 +3105,7 @@ static void iwl4965_rx_reply_error(struct iwl4965_priv *priv,
3864 3105
3865#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x 3106#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x
3866 3107
3867static void iwl4965_rx_csa(struct iwl4965_priv *priv, struct iwl4965_rx_mem_buffer *rxb) 3108static void iwl4965_rx_csa(struct iwl_priv *priv, struct iwl4965_rx_mem_buffer *rxb)
3868{ 3109{
3869 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3110 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3870 struct iwl4965_rxon_cmd *rxon = (void *)&priv->active_rxon; 3111 struct iwl4965_rxon_cmd *rxon = (void *)&priv->active_rxon;
@@ -3875,7 +3116,7 @@ static void iwl4965_rx_csa(struct iwl4965_priv *priv, struct iwl4965_rx_mem_buff
3875 priv->staging_rxon.channel = csa->channel; 3116 priv->staging_rxon.channel = csa->channel;
3876} 3117}
3877 3118
3878static void iwl4965_rx_spectrum_measure_notif(struct iwl4965_priv *priv, 3119static void iwl4965_rx_spectrum_measure_notif(struct iwl_priv *priv,
3879 struct iwl4965_rx_mem_buffer *rxb) 3120 struct iwl4965_rx_mem_buffer *rxb)
3880{ 3121{
3881#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 3122#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
@@ -3893,10 +3134,10 @@ static void iwl4965_rx_spectrum_measure_notif(struct iwl4965_priv *priv,
3893#endif 3134#endif
3894} 3135}
3895 3136
3896static void iwl4965_rx_pm_sleep_notif(struct iwl4965_priv *priv, 3137static void iwl4965_rx_pm_sleep_notif(struct iwl_priv *priv,
3897 struct iwl4965_rx_mem_buffer *rxb) 3138 struct iwl4965_rx_mem_buffer *rxb)
3898{ 3139{
3899#ifdef CONFIG_IWL4965_DEBUG 3140#ifdef CONFIG_IWLWIFI_DEBUG
3900 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3141 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3901 struct iwl4965_sleep_notification *sleep = &(pkt->u.sleep_notif); 3142 struct iwl4965_sleep_notification *sleep = &(pkt->u.sleep_notif);
3902 IWL_DEBUG_RX("sleep mode: %d, src: %d\n", 3143 IWL_DEBUG_RX("sleep mode: %d, src: %d\n",
@@ -3904,20 +3145,20 @@ static void iwl4965_rx_pm_sleep_notif(struct iwl4965_priv *priv,
3904#endif 3145#endif
3905} 3146}
3906 3147
3907static void iwl4965_rx_pm_debug_statistics_notif(struct iwl4965_priv *priv, 3148static void iwl4965_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
3908 struct iwl4965_rx_mem_buffer *rxb) 3149 struct iwl4965_rx_mem_buffer *rxb)
3909{ 3150{
3910 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3151 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3911 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled " 3152 IWL_DEBUG_RADIO("Dumping %d bytes of unhandled "
3912 "notification for %s:\n", 3153 "notification for %s:\n",
3913 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd)); 3154 le32_to_cpu(pkt->len), get_cmd_string(pkt->hdr.cmd));
3914 iwl4965_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len)); 3155 iwl_print_hex_dump(IWL_DL_RADIO, pkt->u.raw, le32_to_cpu(pkt->len));
3915} 3156}
3916 3157
3917static void iwl4965_bg_beacon_update(struct work_struct *work) 3158static void iwl4965_bg_beacon_update(struct work_struct *work)
3918{ 3159{
3919 struct iwl4965_priv *priv = 3160 struct iwl_priv *priv =
3920 container_of(work, struct iwl4965_priv, beacon_update); 3161 container_of(work, struct iwl_priv, beacon_update);
3921 struct sk_buff *beacon; 3162 struct sk_buff *beacon;
3922 3163
3923 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */ 3164 /* Pull updated AP beacon from mac80211. will fail if not in AP mode */
@@ -3939,10 +3180,10 @@ static void iwl4965_bg_beacon_update(struct work_struct *work)
3939 iwl4965_send_beacon_cmd(priv); 3180 iwl4965_send_beacon_cmd(priv);
3940} 3181}
3941 3182
3942static void iwl4965_rx_beacon_notif(struct iwl4965_priv *priv, 3183static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
3943 struct iwl4965_rx_mem_buffer *rxb) 3184 struct iwl4965_rx_mem_buffer *rxb)
3944{ 3185{
3945#ifdef CONFIG_IWL4965_DEBUG 3186#ifdef CONFIG_IWLWIFI_DEBUG
3946 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3187 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3947 struct iwl4965_beacon_notif *beacon = &(pkt->u.beacon_status); 3188 struct iwl4965_beacon_notif *beacon = &(pkt->u.beacon_status);
3948 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); 3189 u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
@@ -3962,10 +3203,10 @@ static void iwl4965_rx_beacon_notif(struct iwl4965_priv *priv,
3962} 3203}
3963 3204
3964/* Service response to REPLY_SCAN_CMD (0x80) */ 3205/* Service response to REPLY_SCAN_CMD (0x80) */
3965static void iwl4965_rx_reply_scan(struct iwl4965_priv *priv, 3206static void iwl4965_rx_reply_scan(struct iwl_priv *priv,
3966 struct iwl4965_rx_mem_buffer *rxb) 3207 struct iwl4965_rx_mem_buffer *rxb)
3967{ 3208{
3968#ifdef CONFIG_IWL4965_DEBUG 3209#ifdef CONFIG_IWLWIFI_DEBUG
3969 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3210 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
3970 struct iwl4965_scanreq_notification *notif = 3211 struct iwl4965_scanreq_notification *notif =
3971 (struct iwl4965_scanreq_notification *)pkt->u.raw; 3212 (struct iwl4965_scanreq_notification *)pkt->u.raw;
@@ -3975,7 +3216,7 @@ static void iwl4965_rx_reply_scan(struct iwl4965_priv *priv,
3975} 3216}
3976 3217
3977/* Service SCAN_START_NOTIFICATION (0x82) */ 3218/* Service SCAN_START_NOTIFICATION (0x82) */
3978static void iwl4965_rx_scan_start_notif(struct iwl4965_priv *priv, 3219static void iwl4965_rx_scan_start_notif(struct iwl_priv *priv,
3979 struct iwl4965_rx_mem_buffer *rxb) 3220 struct iwl4965_rx_mem_buffer *rxb)
3980{ 3221{
3981 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3222 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
@@ -3992,7 +3233,7 @@ static void iwl4965_rx_scan_start_notif(struct iwl4965_priv *priv,
3992} 3233}
3993 3234
3994/* Service SCAN_RESULTS_NOTIFICATION (0x83) */ 3235/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
3995static void iwl4965_rx_scan_results_notif(struct iwl4965_priv *priv, 3236static void iwl4965_rx_scan_results_notif(struct iwl_priv *priv,
3996 struct iwl4965_rx_mem_buffer *rxb) 3237 struct iwl4965_rx_mem_buffer *rxb)
3997{ 3238{
3998 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3239 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
@@ -4017,7 +3258,7 @@ static void iwl4965_rx_scan_results_notif(struct iwl4965_priv *priv,
4017} 3258}
4018 3259
4019/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ 3260/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
4020static void iwl4965_rx_scan_complete_notif(struct iwl4965_priv *priv, 3261static void iwl4965_rx_scan_complete_notif(struct iwl_priv *priv,
4021 struct iwl4965_rx_mem_buffer *rxb) 3262 struct iwl4965_rx_mem_buffer *rxb)
4022{ 3263{
4023 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3264 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
@@ -4075,7 +3316,7 @@ reschedule:
4075 3316
4076/* Handle notification from uCode that card's power state is changing 3317/* Handle notification from uCode that card's power state is changing
4077 * due to software, hardware, or critical temperature RFKILL */ 3318 * due to software, hardware, or critical temperature RFKILL */
4078static void iwl4965_rx_card_state_notif(struct iwl4965_priv *priv, 3319static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
4079 struct iwl4965_rx_mem_buffer *rxb) 3320 struct iwl4965_rx_mem_buffer *rxb)
4080{ 3321{
4081 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data; 3322 struct iwl4965_rx_packet *pkt = (void *)rxb->skb->data;
@@ -4089,35 +3330,35 @@ static void iwl4965_rx_card_state_notif(struct iwl4965_priv *priv,
4089 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | 3330 if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
4090 RF_CARD_DISABLED)) { 3331 RF_CARD_DISABLED)) {
4091 3332
4092 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET, 3333 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
4093 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 3334 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4094 3335
4095 if (!iwl4965_grab_nic_access(priv)) { 3336 if (!iwl_grab_nic_access(priv)) {
4096 iwl4965_write_direct32( 3337 iwl_write_direct32(
4097 priv, HBUS_TARG_MBX_C, 3338 priv, HBUS_TARG_MBX_C,
4098 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); 3339 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4099 3340
4100 iwl4965_release_nic_access(priv); 3341 iwl_release_nic_access(priv);
4101 } 3342 }
4102 3343
4103 if (!(flags & RXON_CARD_DISABLED)) { 3344 if (!(flags & RXON_CARD_DISABLED)) {
4104 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, 3345 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
4105 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 3346 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
4106 if (!iwl4965_grab_nic_access(priv)) { 3347 if (!iwl_grab_nic_access(priv)) {
4107 iwl4965_write_direct32( 3348 iwl_write_direct32(
4108 priv, HBUS_TARG_MBX_C, 3349 priv, HBUS_TARG_MBX_C,
4109 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED); 3350 HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
4110 3351
4111 iwl4965_release_nic_access(priv); 3352 iwl_release_nic_access(priv);
4112 } 3353 }
4113 } 3354 }
4114 3355
4115 if (flags & RF_CARD_DISABLED) { 3356 if (flags & RF_CARD_DISABLED) {
4116 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_SET, 3357 iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
4117 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT); 3358 CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
4118 iwl4965_read32(priv, CSR_UCODE_DRV_GP1); 3359 iwl_read32(priv, CSR_UCODE_DRV_GP1);
4119 if (!iwl4965_grab_nic_access(priv)) 3360 if (!iwl_grab_nic_access(priv))
4120 iwl4965_release_nic_access(priv); 3361 iwl_release_nic_access(priv);
4121 } 3362 }
4122 } 3363 }
4123 3364
@@ -4153,7 +3394,7 @@ static void iwl4965_rx_card_state_notif(struct iwl4965_priv *priv,
4153 * This function chains into the hardware specific files for them to setup 3394 * This function chains into the hardware specific files for them to setup
4154 * any hardware specific handlers as well. 3395 * any hardware specific handlers as well.
4155 */ 3396 */
4156static void iwl4965_setup_rx_handlers(struct iwl4965_priv *priv) 3397static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
4157{ 3398{
4158 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive; 3399 priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
4159 priv->rx_handlers[REPLY_ADD_STA] = iwl4965_rx_reply_add_sta; 3400 priv->rx_handlers[REPLY_ADD_STA] = iwl4965_rx_reply_add_sta;
@@ -4195,7 +3436,7 @@ static void iwl4965_setup_rx_handlers(struct iwl4965_priv *priv)
4195 * will be executed. The attached skb (if present) will only be freed 3436 * will be executed. The attached skb (if present) will only be freed
4196 * if the callback returns 1 3437 * if the callback returns 1
4197 */ 3438 */
4198static void iwl4965_tx_cmd_complete(struct iwl4965_priv *priv, 3439static void iwl4965_tx_cmd_complete(struct iwl_priv *priv,
4199 struct iwl4965_rx_mem_buffer *rxb) 3440 struct iwl4965_rx_mem_buffer *rxb)
4200{ 3441{
4201 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data; 3442 struct iwl4965_rx_packet *pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
@@ -4204,7 +3445,7 @@ static void iwl4965_tx_cmd_complete(struct iwl4965_priv *priv,
4204 int index = SEQ_TO_INDEX(sequence); 3445 int index = SEQ_TO_INDEX(sequence);
4205 int huge = sequence & SEQ_HUGE_FRAME; 3446 int huge = sequence & SEQ_HUGE_FRAME;
4206 int cmd_index; 3447 int cmd_index;
4207 struct iwl4965_cmd *cmd; 3448 struct iwl_cmd *cmd;
4208 3449
4209 /* If a Tx command is being handled and it isn't in the actual 3450 /* If a Tx command is being handled and it isn't in the actual
4210 * command queue then there a command routing bug has been introduced 3451 * command queue then there a command routing bug has been introduced
@@ -4318,7 +3559,7 @@ static int iwl4965_rx_queue_space(const struct iwl4965_rx_queue *q)
4318/** 3559/**
4319 * iwl4965_rx_queue_update_write_ptr - Update the write pointer for the RX queue 3560 * iwl4965_rx_queue_update_write_ptr - Update the write pointer for the RX queue
4320 */ 3561 */
4321int iwl4965_rx_queue_update_write_ptr(struct iwl4965_priv *priv, struct iwl4965_rx_queue *q) 3562int iwl4965_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl4965_rx_queue *q)
4322{ 3563{
4323 u32 reg = 0; 3564 u32 reg = 0;
4324 int rc = 0; 3565 int rc = 0;
@@ -4331,27 +3572,27 @@ int iwl4965_rx_queue_update_write_ptr(struct iwl4965_priv *priv, struct iwl4965_
4331 3572
4332 /* If power-saving is in use, make sure device is awake */ 3573 /* If power-saving is in use, make sure device is awake */
4333 if (test_bit(STATUS_POWER_PMI, &priv->status)) { 3574 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
4334 reg = iwl4965_read32(priv, CSR_UCODE_DRV_GP1); 3575 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
4335 3576
4336 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 3577 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4337 iwl4965_set_bit(priv, CSR_GP_CNTRL, 3578 iwl_set_bit(priv, CSR_GP_CNTRL,
4338 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 3579 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4339 goto exit_unlock; 3580 goto exit_unlock;
4340 } 3581 }
4341 3582
4342 rc = iwl4965_grab_nic_access(priv); 3583 rc = iwl_grab_nic_access(priv);
4343 if (rc) 3584 if (rc)
4344 goto exit_unlock; 3585 goto exit_unlock;
4345 3586
4346 /* Device expects a multiple of 8 */ 3587 /* Device expects a multiple of 8 */
4347 iwl4965_write_direct32(priv, FH_RSCSR_CHNL0_WPTR, 3588 iwl_write_direct32(priv, FH_RSCSR_CHNL0_WPTR,
4348 q->write & ~0x7); 3589 q->write & ~0x7);
4349 iwl4965_release_nic_access(priv); 3590 iwl_release_nic_access(priv);
4350 3591
4351 /* Else device is assumed to be awake */ 3592 /* Else device is assumed to be awake */
4352 } else 3593 } else
4353 /* Device expects a multiple of 8 */ 3594 /* Device expects a multiple of 8 */
4354 iwl4965_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7); 3595 iwl_write32(priv, FH_RSCSR_CHNL0_WPTR, q->write & ~0x7);
4355 3596
4356 3597
4357 q->need_update = 0; 3598 q->need_update = 0;
@@ -4364,7 +3605,7 @@ int iwl4965_rx_queue_update_write_ptr(struct iwl4965_priv *priv, struct iwl4965_
4364/** 3605/**
4365 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr 3606 * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
4366 */ 3607 */
4367static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl4965_priv *priv, 3608static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
4368 dma_addr_t dma_addr) 3609 dma_addr_t dma_addr)
4369{ 3610{
4370 return cpu_to_le32((u32)(dma_addr >> 8)); 3611 return cpu_to_le32((u32)(dma_addr >> 8));
@@ -4382,7 +3623,7 @@ static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl4965_priv *priv,
4382 * also updates the memory address in the firmware to reference the new 3623 * also updates the memory address in the firmware to reference the new
4383 * target buffer. 3624 * target buffer.
4384 */ 3625 */
4385static int iwl4965_rx_queue_restock(struct iwl4965_priv *priv) 3626static int iwl4965_rx_queue_restock(struct iwl_priv *priv)
4386{ 3627{
4387 struct iwl4965_rx_queue *rxq = &priv->rxq; 3628 struct iwl4965_rx_queue *rxq = &priv->rxq;
4388 struct list_head *element; 3629 struct list_head *element;
@@ -4434,7 +3675,7 @@ static int iwl4965_rx_queue_restock(struct iwl4965_priv *priv)
4434 * Also restock the Rx queue via iwl4965_rx_queue_restock. 3675 * Also restock the Rx queue via iwl4965_rx_queue_restock.
4435 * This is called as a scheduled work item (except for during initialization) 3676 * This is called as a scheduled work item (except for during initialization)
4436 */ 3677 */
4437static void iwl4965_rx_allocate(struct iwl4965_priv *priv) 3678static void iwl4965_rx_allocate(struct iwl_priv *priv)
4438{ 3679{
4439 struct iwl4965_rx_queue *rxq = &priv->rxq; 3680 struct iwl4965_rx_queue *rxq = &priv->rxq;
4440 struct list_head *element; 3681 struct list_head *element;
@@ -4447,7 +3688,7 @@ static void iwl4965_rx_allocate(struct iwl4965_priv *priv)
4447 3688
4448 /* Alloc a new receive buffer */ 3689 /* Alloc a new receive buffer */
4449 rxb->skb = 3690 rxb->skb =
4450 alloc_skb(priv->hw_setting.rx_buf_size, 3691 alloc_skb(priv->hw_params.rx_buf_size,
4451 __GFP_NOWARN | GFP_ATOMIC); 3692 __GFP_NOWARN | GFP_ATOMIC);
4452 if (!rxb->skb) { 3693 if (!rxb->skb) {
4453 if (net_ratelimit()) 3694 if (net_ratelimit())
@@ -4464,7 +3705,7 @@ static void iwl4965_rx_allocate(struct iwl4965_priv *priv)
4464 /* Get physical address of RB/SKB */ 3705 /* Get physical address of RB/SKB */
4465 rxb->dma_addr = 3706 rxb->dma_addr =
4466 pci_map_single(priv->pci_dev, rxb->skb->data, 3707 pci_map_single(priv->pci_dev, rxb->skb->data,
4467 priv->hw_setting.rx_buf_size, PCI_DMA_FROMDEVICE); 3708 priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE);
4468 list_add_tail(&rxb->list, &rxq->rx_free); 3709 list_add_tail(&rxb->list, &rxq->rx_free);
4469 rxq->free_count++; 3710 rxq->free_count++;
4470 } 3711 }
@@ -4476,7 +3717,7 @@ static void iwl4965_rx_allocate(struct iwl4965_priv *priv)
4476*/ 3717*/
4477static void __iwl4965_rx_replenish(void *data) 3718static void __iwl4965_rx_replenish(void *data)
4478{ 3719{
4479 struct iwl4965_priv *priv = data; 3720 struct iwl_priv *priv = data;
4480 3721
4481 iwl4965_rx_allocate(priv); 3722 iwl4965_rx_allocate(priv);
4482 iwl4965_rx_queue_restock(priv); 3723 iwl4965_rx_queue_restock(priv);
@@ -4485,7 +3726,7 @@ static void __iwl4965_rx_replenish(void *data)
4485 3726
4486void iwl4965_rx_replenish(void *data) 3727void iwl4965_rx_replenish(void *data)
4487{ 3728{
4488 struct iwl4965_priv *priv = data; 3729 struct iwl_priv *priv = data;
4489 unsigned long flags; 3730 unsigned long flags;
4490 3731
4491 iwl4965_rx_allocate(priv); 3732 iwl4965_rx_allocate(priv);
@@ -4500,14 +3741,14 @@ void iwl4965_rx_replenish(void *data)
4500 * This free routine walks the list of POOL entries and if SKB is set to 3741 * This free routine walks the list of POOL entries and if SKB is set to
4501 * non NULL it is unmapped and freed 3742 * non NULL it is unmapped and freed
4502 */ 3743 */
4503static void iwl4965_rx_queue_free(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq) 3744static void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
4504{ 3745{
4505 int i; 3746 int i;
4506 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 3747 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
4507 if (rxq->pool[i].skb != NULL) { 3748 if (rxq->pool[i].skb != NULL) {
4508 pci_unmap_single(priv->pci_dev, 3749 pci_unmap_single(priv->pci_dev,
4509 rxq->pool[i].dma_addr, 3750 rxq->pool[i].dma_addr,
4510 priv->hw_setting.rx_buf_size, 3751 priv->hw_params.rx_buf_size,
4511 PCI_DMA_FROMDEVICE); 3752 PCI_DMA_FROMDEVICE);
4512 dev_kfree_skb(rxq->pool[i].skb); 3753 dev_kfree_skb(rxq->pool[i].skb);
4513 } 3754 }
@@ -4518,7 +3759,7 @@ static void iwl4965_rx_queue_free(struct iwl4965_priv *priv, struct iwl4965_rx_q
4518 rxq->bd = NULL; 3759 rxq->bd = NULL;
4519} 3760}
4520 3761
4521int iwl4965_rx_queue_alloc(struct iwl4965_priv *priv) 3762int iwl4965_rx_queue_alloc(struct iwl_priv *priv)
4522{ 3763{
4523 struct iwl4965_rx_queue *rxq = &priv->rxq; 3764 struct iwl4965_rx_queue *rxq = &priv->rxq;
4524 struct pci_dev *dev = priv->pci_dev; 3765 struct pci_dev *dev = priv->pci_dev;
@@ -4545,7 +3786,7 @@ int iwl4965_rx_queue_alloc(struct iwl4965_priv *priv)
4545 return 0; 3786 return 0;
4546} 3787}
4547 3788
4548void iwl4965_rx_queue_reset(struct iwl4965_priv *priv, struct iwl4965_rx_queue *rxq) 3789void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl4965_rx_queue *rxq)
4549{ 3790{
4550 unsigned long flags; 3791 unsigned long flags;
4551 int i; 3792 int i;
@@ -4559,7 +3800,7 @@ void iwl4965_rx_queue_reset(struct iwl4965_priv *priv, struct iwl4965_rx_queue *
4559 if (rxq->pool[i].skb != NULL) { 3800 if (rxq->pool[i].skb != NULL) {
4560 pci_unmap_single(priv->pci_dev, 3801 pci_unmap_single(priv->pci_dev,
4561 rxq->pool[i].dma_addr, 3802 rxq->pool[i].dma_addr,
4562 priv->hw_setting.rx_buf_size, 3803 priv->hw_params.rx_buf_size,
4563 PCI_DMA_FROMDEVICE); 3804 PCI_DMA_FROMDEVICE);
4564 priv->alloc_rxb_skb--; 3805 priv->alloc_rxb_skb--;
4565 dev_kfree_skb(rxq->pool[i].skb); 3806 dev_kfree_skb(rxq->pool[i].skb);
@@ -4660,7 +3901,7 @@ int iwl4965_calc_sig_qual(int rssi_dbm, int noise_dbm)
4660 * the appropriate handlers, including command responses, 3901 * the appropriate handlers, including command responses,
4661 * frame-received notifications, and other notifications. 3902 * frame-received notifications, and other notifications.
4662 */ 3903 */
4663static void iwl4965_rx_handle(struct iwl4965_priv *priv) 3904static void iwl4965_rx_handle(struct iwl_priv *priv)
4664{ 3905{
4665 struct iwl4965_rx_mem_buffer *rxb; 3906 struct iwl4965_rx_mem_buffer *rxb;
4666 struct iwl4965_rx_packet *pkt; 3907 struct iwl4965_rx_packet *pkt;
@@ -4694,7 +3935,7 @@ static void iwl4965_rx_handle(struct iwl4965_priv *priv)
4694 rxq->queue[i] = NULL; 3935 rxq->queue[i] = NULL;
4695 3936
4696 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr, 3937 pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
4697 priv->hw_setting.rx_buf_size, 3938 priv->hw_params.rx_buf_size,
4698 PCI_DMA_FROMDEVICE); 3939 PCI_DMA_FROMDEVICE);
4699 pkt = (struct iwl4965_rx_packet *)rxb->skb->data; 3940 pkt = (struct iwl4965_rx_packet *)rxb->skb->data;
4700 3941
@@ -4706,7 +3947,7 @@ static void iwl4965_rx_handle(struct iwl4965_priv *priv)
4706 * but apparently a few don't get set; catch them here. */ 3947 * but apparently a few don't get set; catch them here. */
4707 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) && 3948 reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
4708 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) && 3949 (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
4709 (pkt->hdr.cmd != REPLY_4965_RX) && 3950 (pkt->hdr.cmd != REPLY_RX) &&
4710 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) && 3951 (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
4711 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) && 3952 (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
4712 (pkt->hdr.cmd != REPLY_TX); 3953 (pkt->hdr.cmd != REPLY_TX);
@@ -4729,7 +3970,7 @@ static void iwl4965_rx_handle(struct iwl4965_priv *priv)
4729 3970
4730 if (reclaim) { 3971 if (reclaim) {
4731 /* Invoke any callbacks, transfer the skb to caller, and 3972 /* Invoke any callbacks, transfer the skb to caller, and
4732 * fire off the (possibly) blocking iwl4965_send_cmd() 3973 * fire off the (possibly) blocking iwl_send_cmd()
4733 * as we reclaim the driver command queue */ 3974 * as we reclaim the driver command queue */
4734 if (rxb && rxb->skb) 3975 if (rxb && rxb->skb)
4735 iwl4965_tx_cmd_complete(priv, rxb); 3976 iwl4965_tx_cmd_complete(priv, rxb);
@@ -4747,7 +3988,7 @@ static void iwl4965_rx_handle(struct iwl4965_priv *priv)
4747 } 3988 }
4748 3989
4749 pci_unmap_single(priv->pci_dev, rxb->dma_addr, 3990 pci_unmap_single(priv->pci_dev, rxb->dma_addr,
4750 priv->hw_setting.rx_buf_size, 3991 priv->hw_params.rx_buf_size,
4751 PCI_DMA_FROMDEVICE); 3992 PCI_DMA_FROMDEVICE);
4752 spin_lock_irqsave(&rxq->lock, flags); 3993 spin_lock_irqsave(&rxq->lock, flags);
4753 list_add_tail(&rxb->list, &priv->rxq.rx_used); 3994 list_add_tail(&rxb->list, &priv->rxq.rx_used);
@@ -4773,7 +4014,7 @@ static void iwl4965_rx_handle(struct iwl4965_priv *priv)
4773/** 4014/**
4774 * iwl4965_tx_queue_update_write_ptr - Send new write index to hardware 4015 * iwl4965_tx_queue_update_write_ptr - Send new write index to hardware
4775 */ 4016 */
4776static int iwl4965_tx_queue_update_write_ptr(struct iwl4965_priv *priv, 4017static int iwl4965_tx_queue_update_write_ptr(struct iwl_priv *priv,
4777 struct iwl4965_tx_queue *txq) 4018 struct iwl4965_tx_queue *txq)
4778{ 4019{
4779 u32 reg = 0; 4020 u32 reg = 0;
@@ -4788,27 +4029,27 @@ static int iwl4965_tx_queue_update_write_ptr(struct iwl4965_priv *priv,
4788 /* wake up nic if it's powered down ... 4029 /* wake up nic if it's powered down ...
4789 * uCode will wake up, and interrupt us again, so next 4030 * uCode will wake up, and interrupt us again, so next
4790 * time we'll skip this part. */ 4031 * time we'll skip this part. */
4791 reg = iwl4965_read32(priv, CSR_UCODE_DRV_GP1); 4032 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
4792 4033
4793 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 4034 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
4794 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg); 4035 IWL_DEBUG_INFO("Requesting wakeup, GP1 = 0x%x\n", reg);
4795 iwl4965_set_bit(priv, CSR_GP_CNTRL, 4036 iwl_set_bit(priv, CSR_GP_CNTRL,
4796 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 4037 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
4797 return rc; 4038 return rc;
4798 } 4039 }
4799 4040
4800 /* restore this queue's parameters in nic hardware. */ 4041 /* restore this queue's parameters in nic hardware. */
4801 rc = iwl4965_grab_nic_access(priv); 4042 rc = iwl_grab_nic_access(priv);
4802 if (rc) 4043 if (rc)
4803 return rc; 4044 return rc;
4804 iwl4965_write_direct32(priv, HBUS_TARG_WRPTR, 4045 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
4805 txq->q.write_ptr | (txq_id << 8)); 4046 txq->q.write_ptr | (txq_id << 8));
4806 iwl4965_release_nic_access(priv); 4047 iwl_release_nic_access(priv);
4807 4048
4808 /* else not in power-save mode, uCode will never sleep when we're 4049 /* else not in power-save mode, uCode will never sleep when we're
4809 * trying to tx (during RFKILL, we're not trying to tx). */ 4050 * trying to tx (during RFKILL, we're not trying to tx). */
4810 } else 4051 } else
4811 iwl4965_write32(priv, HBUS_TARG_WRPTR, 4052 iwl_write32(priv, HBUS_TARG_WRPTR,
4812 txq->q.write_ptr | (txq_id << 8)); 4053 txq->q.write_ptr | (txq_id << 8));
4813 4054
4814 txq->need_update = 0; 4055 txq->need_update = 0;
@@ -4816,13 +4057,13 @@ static int iwl4965_tx_queue_update_write_ptr(struct iwl4965_priv *priv,
4816 return rc; 4057 return rc;
4817} 4058}
4818 4059
4819#ifdef CONFIG_IWL4965_DEBUG 4060#ifdef CONFIG_IWLWIFI_DEBUG
4820static void iwl4965_print_rx_config_cmd(struct iwl4965_rxon_cmd *rxon) 4061static void iwl4965_print_rx_config_cmd(struct iwl4965_rxon_cmd *rxon)
4821{ 4062{
4822 DECLARE_MAC_BUF(mac); 4063 DECLARE_MAC_BUF(mac);
4823 4064
4824 IWL_DEBUG_RADIO("RX CONFIG:\n"); 4065 IWL_DEBUG_RADIO("RX CONFIG:\n");
4825 iwl4965_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon)); 4066 iwl_print_hex_dump(IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
4826 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel)); 4067 IWL_DEBUG_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
4827 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags)); 4068 IWL_DEBUG_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
4828 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n", 4069 IWL_DEBUG_RADIO("u32 filter_flags: 0x%08x\n",
@@ -4839,24 +4080,32 @@ static void iwl4965_print_rx_config_cmd(struct iwl4965_rxon_cmd *rxon)
4839} 4080}
4840#endif 4081#endif
4841 4082
4842static void iwl4965_enable_interrupts(struct iwl4965_priv *priv) 4083static void iwl4965_enable_interrupts(struct iwl_priv *priv)
4843{ 4084{
4844 IWL_DEBUG_ISR("Enabling interrupts\n"); 4085 IWL_DEBUG_ISR("Enabling interrupts\n");
4845 set_bit(STATUS_INT_ENABLED, &priv->status); 4086 set_bit(STATUS_INT_ENABLED, &priv->status);
4846 iwl4965_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK); 4087 iwl_write32(priv, CSR_INT_MASK, CSR_INI_SET_MASK);
4088}
4089
4090/* call this function to flush any scheduled tasklet */
4091static inline void iwl_synchronize_irq(struct iwl_priv *priv)
4092{
4093 /* wait to make sure we flush pedding tasklet*/
4094 synchronize_irq(priv->pci_dev->irq);
4095 tasklet_kill(&priv->irq_tasklet);
4847} 4096}
4848 4097
4849static inline void iwl4965_disable_interrupts(struct iwl4965_priv *priv) 4098static inline void iwl4965_disable_interrupts(struct iwl_priv *priv)
4850{ 4099{
4851 clear_bit(STATUS_INT_ENABLED, &priv->status); 4100 clear_bit(STATUS_INT_ENABLED, &priv->status);
4852 4101
4853 /* disable interrupts from uCode/NIC to host */ 4102 /* disable interrupts from uCode/NIC to host */
4854 iwl4965_write32(priv, CSR_INT_MASK, 0x00000000); 4103 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
4855 4104
4856 /* acknowledge/clear/reset any interrupts still pending 4105 /* acknowledge/clear/reset any interrupts still pending
4857 * from uCode or flow handler (Rx/Tx DMA) */ 4106 * from uCode or flow handler (Rx/Tx DMA) */
4858 iwl4965_write32(priv, CSR_INT, 0xffffffff); 4107 iwl_write32(priv, CSR_INT, 0xffffffff);
4859 iwl4965_write32(priv, CSR_FH_INT_STATUS, 0xffffffff); 4108 iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
4860 IWL_DEBUG_ISR("Disabled interrupts\n"); 4109 IWL_DEBUG_ISR("Disabled interrupts\n");
4861} 4110}
4862 4111
@@ -4883,7 +4132,7 @@ static const char *desc_lookup(int i)
4883#define ERROR_START_OFFSET (1 * sizeof(u32)) 4132#define ERROR_START_OFFSET (1 * sizeof(u32))
4884#define ERROR_ELEM_SIZE (7 * sizeof(u32)) 4133#define ERROR_ELEM_SIZE (7 * sizeof(u32))
4885 4134
4886static void iwl4965_dump_nic_error_log(struct iwl4965_priv *priv) 4135static void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
4887{ 4136{
4888 u32 data2, line; 4137 u32 data2, line;
4889 u32 desc, time, count, base, data1; 4138 u32 desc, time, count, base, data1;
@@ -4892,34 +4141,33 @@ static void iwl4965_dump_nic_error_log(struct iwl4965_priv *priv)
4892 4141
4893 base = le32_to_cpu(priv->card_alive.error_event_table_ptr); 4142 base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
4894 4143
4895 if (!iwl4965_hw_valid_rtc_data_addr(base)) { 4144 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
4896 IWL_ERROR("Not valid error log pointer 0x%08X\n", base); 4145 IWL_ERROR("Not valid error log pointer 0x%08X\n", base);
4897 return; 4146 return;
4898 } 4147 }
4899 4148
4900 rc = iwl4965_grab_nic_access(priv); 4149 rc = iwl_grab_nic_access(priv);
4901 if (rc) { 4150 if (rc) {
4902 IWL_WARNING("Can not read from adapter at this time.\n"); 4151 IWL_WARNING("Can not read from adapter at this time.\n");
4903 return; 4152 return;
4904 } 4153 }
4905 4154
4906 count = iwl4965_read_targ_mem(priv, base); 4155 count = iwl_read_targ_mem(priv, base);
4907 4156
4908 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) { 4157 if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
4909 IWL_ERROR("Start IWL Error Log Dump:\n"); 4158 IWL_ERROR("Start IWL Error Log Dump:\n");
4910 IWL_ERROR("Status: 0x%08lX, Config: %08X count: %d\n", 4159 IWL_ERROR("Status: 0x%08lX, count: %d\n", priv->status, count);
4911 priv->status, priv->config, count);
4912 } 4160 }
4913 4161
4914 desc = iwl4965_read_targ_mem(priv, base + 1 * sizeof(u32)); 4162 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
4915 blink1 = iwl4965_read_targ_mem(priv, base + 3 * sizeof(u32)); 4163 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
4916 blink2 = iwl4965_read_targ_mem(priv, base + 4 * sizeof(u32)); 4164 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
4917 ilink1 = iwl4965_read_targ_mem(priv, base + 5 * sizeof(u32)); 4165 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
4918 ilink2 = iwl4965_read_targ_mem(priv, base + 6 * sizeof(u32)); 4166 ilink2 = iwl_read_targ_mem(priv, base + 6 * sizeof(u32));
4919 data1 = iwl4965_read_targ_mem(priv, base + 7 * sizeof(u32)); 4167 data1 = iwl_read_targ_mem(priv, base + 7 * sizeof(u32));
4920 data2 = iwl4965_read_targ_mem(priv, base + 8 * sizeof(u32)); 4168 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
4921 line = iwl4965_read_targ_mem(priv, base + 9 * sizeof(u32)); 4169 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
4922 time = iwl4965_read_targ_mem(priv, base + 11 * sizeof(u32)); 4170 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
4923 4171
4924 IWL_ERROR("Desc Time " 4172 IWL_ERROR("Desc Time "
4925 "data1 data2 line\n"); 4173 "data1 data2 line\n");
@@ -4929,7 +4177,7 @@ static void iwl4965_dump_nic_error_log(struct iwl4965_priv *priv)
4929 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2, 4177 IWL_ERROR("0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2,
4930 ilink1, ilink2); 4178 ilink1, ilink2);
4931 4179
4932 iwl4965_release_nic_access(priv); 4180 iwl_release_nic_access(priv);
4933} 4181}
4934 4182
4935#define EVENT_START_OFFSET (4 * sizeof(u32)) 4183#define EVENT_START_OFFSET (4 * sizeof(u32))
@@ -4937,9 +4185,9 @@ static void iwl4965_dump_nic_error_log(struct iwl4965_priv *priv)
4937/** 4185/**
4938 * iwl4965_print_event_log - Dump error event log to syslog 4186 * iwl4965_print_event_log - Dump error event log to syslog
4939 * 4187 *
4940 * NOTE: Must be called with iwl4965_grab_nic_access() already obtained! 4188 * NOTE: Must be called with iwl_grab_nic_access() already obtained!
4941 */ 4189 */
4942static void iwl4965_print_event_log(struct iwl4965_priv *priv, u32 start_idx, 4190static void iwl4965_print_event_log(struct iwl_priv *priv, u32 start_idx,
4943 u32 num_events, u32 mode) 4191 u32 num_events, u32 mode)
4944{ 4192{
4945 u32 i; 4193 u32 i;
@@ -4963,21 +4211,21 @@ static void iwl4965_print_event_log(struct iwl4965_priv *priv, u32 start_idx,
4963 /* "time" is actually "data" for mode 0 (no timestamp). 4211 /* "time" is actually "data" for mode 0 (no timestamp).
4964 * place event id # at far right for easier visual parsing. */ 4212 * place event id # at far right for easier visual parsing. */
4965 for (i = 0; i < num_events; i++) { 4213 for (i = 0; i < num_events; i++) {
4966 ev = iwl4965_read_targ_mem(priv, ptr); 4214 ev = iwl_read_targ_mem(priv, ptr);
4967 ptr += sizeof(u32); 4215 ptr += sizeof(u32);
4968 time = iwl4965_read_targ_mem(priv, ptr); 4216 time = iwl_read_targ_mem(priv, ptr);
4969 ptr += sizeof(u32); 4217 ptr += sizeof(u32);
4970 if (mode == 0) 4218 if (mode == 0)
4971 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */ 4219 IWL_ERROR("0x%08x\t%04u\n", time, ev); /* data, ev */
4972 else { 4220 else {
4973 data = iwl4965_read_targ_mem(priv, ptr); 4221 data = iwl_read_targ_mem(priv, ptr);
4974 ptr += sizeof(u32); 4222 ptr += sizeof(u32);
4975 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev); 4223 IWL_ERROR("%010u\t0x%08x\t%04u\n", time, data, ev);
4976 } 4224 }
4977 } 4225 }
4978} 4226}
4979 4227
4980static void iwl4965_dump_nic_event_log(struct iwl4965_priv *priv) 4228static void iwl4965_dump_nic_event_log(struct iwl_priv *priv)
4981{ 4229{
4982 int rc; 4230 int rc;
4983 u32 base; /* SRAM byte address of event log header */ 4231 u32 base; /* SRAM byte address of event log header */
@@ -4988,29 +4236,29 @@ static void iwl4965_dump_nic_event_log(struct iwl4965_priv *priv)
4988 u32 size; /* # entries that we'll print */ 4236 u32 size; /* # entries that we'll print */
4989 4237
4990 base = le32_to_cpu(priv->card_alive.log_event_table_ptr); 4238 base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
4991 if (!iwl4965_hw_valid_rtc_data_addr(base)) { 4239 if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
4992 IWL_ERROR("Invalid event log pointer 0x%08X\n", base); 4240 IWL_ERROR("Invalid event log pointer 0x%08X\n", base);
4993 return; 4241 return;
4994 } 4242 }
4995 4243
4996 rc = iwl4965_grab_nic_access(priv); 4244 rc = iwl_grab_nic_access(priv);
4997 if (rc) { 4245 if (rc) {
4998 IWL_WARNING("Can not read from adapter at this time.\n"); 4246 IWL_WARNING("Can not read from adapter at this time.\n");
4999 return; 4247 return;
5000 } 4248 }
5001 4249
5002 /* event log header */ 4250 /* event log header */
5003 capacity = iwl4965_read_targ_mem(priv, base); 4251 capacity = iwl_read_targ_mem(priv, base);
5004 mode = iwl4965_read_targ_mem(priv, base + (1 * sizeof(u32))); 4252 mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
5005 num_wraps = iwl4965_read_targ_mem(priv, base + (2 * sizeof(u32))); 4253 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
5006 next_entry = iwl4965_read_targ_mem(priv, base + (3 * sizeof(u32))); 4254 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
5007 4255
5008 size = num_wraps ? capacity : next_entry; 4256 size = num_wraps ? capacity : next_entry;
5009 4257
5010 /* bail out if nothing in log */ 4258 /* bail out if nothing in log */
5011 if (size == 0) { 4259 if (size == 0) {
5012 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n"); 4260 IWL_ERROR("Start IWL Event Log Dump: nothing in log\n");
5013 iwl4965_release_nic_access(priv); 4261 iwl_release_nic_access(priv);
5014 return; 4262 return;
5015 } 4263 }
5016 4264
@@ -5026,13 +4274,13 @@ static void iwl4965_dump_nic_event_log(struct iwl4965_priv *priv)
5026 /* (then/else) start at top of log */ 4274 /* (then/else) start at top of log */
5027 iwl4965_print_event_log(priv, 0, next_entry, mode); 4275 iwl4965_print_event_log(priv, 0, next_entry, mode);
5028 4276
5029 iwl4965_release_nic_access(priv); 4277 iwl_release_nic_access(priv);
5030} 4278}
5031 4279
5032/** 4280/**
5033 * iwl4965_irq_handle_error - called for HW or SW error interrupt from card 4281 * iwl4965_irq_handle_error - called for HW or SW error interrupt from card
5034 */ 4282 */
5035static void iwl4965_irq_handle_error(struct iwl4965_priv *priv) 4283static void iwl4965_irq_handle_error(struct iwl_priv *priv)
5036{ 4284{
5037 /* Set the FW error flag -- cleared on iwl4965_down */ 4285 /* Set the FW error flag -- cleared on iwl4965_down */
5038 set_bit(STATUS_FW_ERROR, &priv->status); 4286 set_bit(STATUS_FW_ERROR, &priv->status);
@@ -5040,8 +4288,8 @@ static void iwl4965_irq_handle_error(struct iwl4965_priv *priv)
5040 /* Cancel currently queued command. */ 4288 /* Cancel currently queued command. */
5041 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 4289 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
5042 4290
5043#ifdef CONFIG_IWL4965_DEBUG 4291#ifdef CONFIG_IWLWIFI_DEBUG
5044 if (iwl4965_debug_level & IWL_DL_FW_ERRORS) { 4292 if (iwl_debug_level & IWL_DL_FW_ERRORS) {
5045 iwl4965_dump_nic_error_log(priv); 4293 iwl4965_dump_nic_error_log(priv);
5046 iwl4965_dump_nic_event_log(priv); 4294 iwl4965_dump_nic_event_log(priv);
5047 iwl4965_print_rx_config_cmd(&priv->staging_rxon); 4295 iwl4965_print_rx_config_cmd(&priv->staging_rxon);
@@ -5058,7 +4306,7 @@ static void iwl4965_irq_handle_error(struct iwl4965_priv *priv)
5058 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS, 4306 IWL_DEBUG(IWL_DL_INFO | IWL_DL_FW_ERRORS,
5059 "Restarting adapter due to uCode error.\n"); 4307 "Restarting adapter due to uCode error.\n");
5060 4308
5061 if (iwl4965_is_associated(priv)) { 4309 if (iwl_is_associated(priv)) {
5062 memcpy(&priv->recovery_rxon, &priv->active_rxon, 4310 memcpy(&priv->recovery_rxon, &priv->active_rxon,
5063 sizeof(priv->recovery_rxon)); 4311 sizeof(priv->recovery_rxon));
5064 priv->error_recovering = 1; 4312 priv->error_recovering = 1;
@@ -5067,7 +4315,7 @@ static void iwl4965_irq_handle_error(struct iwl4965_priv *priv)
5067 } 4315 }
5068} 4316}
5069 4317
5070static void iwl4965_error_recovery(struct iwl4965_priv *priv) 4318static void iwl4965_error_recovery(struct iwl_priv *priv)
5071{ 4319{
5072 unsigned long flags; 4320 unsigned long flags;
5073 4321
@@ -5084,12 +4332,12 @@ static void iwl4965_error_recovery(struct iwl4965_priv *priv)
5084 spin_unlock_irqrestore(&priv->lock, flags); 4332 spin_unlock_irqrestore(&priv->lock, flags);
5085} 4333}
5086 4334
5087static void iwl4965_irq_tasklet(struct iwl4965_priv *priv) 4335static void iwl4965_irq_tasklet(struct iwl_priv *priv)
5088{ 4336{
5089 u32 inta, handled = 0; 4337 u32 inta, handled = 0;
5090 u32 inta_fh; 4338 u32 inta_fh;
5091 unsigned long flags; 4339 unsigned long flags;
5092#ifdef CONFIG_IWL4965_DEBUG 4340#ifdef CONFIG_IWLWIFI_DEBUG
5093 u32 inta_mask; 4341 u32 inta_mask;
5094#endif 4342#endif
5095 4343
@@ -5098,19 +4346,19 @@ static void iwl4965_irq_tasklet(struct iwl4965_priv *priv)
5098 /* Ack/clear/reset pending uCode interrupts. 4346 /* Ack/clear/reset pending uCode interrupts.
5099 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS, 4347 * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
5100 * and will clear only when CSR_FH_INT_STATUS gets cleared. */ 4348 * and will clear only when CSR_FH_INT_STATUS gets cleared. */
5101 inta = iwl4965_read32(priv, CSR_INT); 4349 inta = iwl_read32(priv, CSR_INT);
5102 iwl4965_write32(priv, CSR_INT, inta); 4350 iwl_write32(priv, CSR_INT, inta);
5103 4351
5104 /* Ack/clear/reset pending flow-handler (DMA) interrupts. 4352 /* Ack/clear/reset pending flow-handler (DMA) interrupts.
5105 * Any new interrupts that happen after this, either while we're 4353 * Any new interrupts that happen after this, either while we're
5106 * in this tasklet, or later, will show up in next ISR/tasklet. */ 4354 * in this tasklet, or later, will show up in next ISR/tasklet. */
5107 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS); 4355 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
5108 iwl4965_write32(priv, CSR_FH_INT_STATUS, inta_fh); 4356 iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
5109 4357
5110#ifdef CONFIG_IWL4965_DEBUG 4358#ifdef CONFIG_IWLWIFI_DEBUG
5111 if (iwl4965_debug_level & IWL_DL_ISR) { 4359 if (iwl_debug_level & IWL_DL_ISR) {
5112 /* just for debug */ 4360 /* just for debug */
5113 inta_mask = iwl4965_read32(priv, CSR_INT_MASK); 4361 inta_mask = iwl_read32(priv, CSR_INT_MASK);
5114 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", 4362 IWL_DEBUG_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
5115 inta, inta_mask, inta_fh); 4363 inta, inta_mask, inta_fh);
5116 } 4364 }
@@ -5120,9 +4368,9 @@ static void iwl4965_irq_tasklet(struct iwl4965_priv *priv)
5120 * atomic, make sure that inta covers all the interrupts that 4368 * atomic, make sure that inta covers all the interrupts that
5121 * we've discovered, even if FH interrupt came in just after 4369 * we've discovered, even if FH interrupt came in just after
5122 * reading CSR_INT. */ 4370 * reading CSR_INT. */
5123 if (inta_fh & CSR_FH_INT_RX_MASK) 4371 if (inta_fh & CSR49_FH_INT_RX_MASK)
5124 inta |= CSR_INT_BIT_FH_RX; 4372 inta |= CSR_INT_BIT_FH_RX;
5125 if (inta_fh & CSR_FH_INT_TX_MASK) 4373 if (inta_fh & CSR49_FH_INT_TX_MASK)
5126 inta |= CSR_INT_BIT_FH_TX; 4374 inta |= CSR_INT_BIT_FH_TX;
5127 4375
5128 /* Now service all interrupt bits discovered above. */ 4376 /* Now service all interrupt bits discovered above. */
@@ -5141,8 +4389,8 @@ static void iwl4965_irq_tasklet(struct iwl4965_priv *priv)
5141 return; 4389 return;
5142 } 4390 }
5143 4391
5144#ifdef CONFIG_IWL4965_DEBUG 4392#ifdef CONFIG_IWLWIFI_DEBUG
5145 if (iwl4965_debug_level & (IWL_DL_ISR)) { 4393 if (iwl_debug_level & (IWL_DL_ISR)) {
5146 /* NIC fires this, but we don't use it, redundant with WAKEUP */ 4394 /* NIC fires this, but we don't use it, redundant with WAKEUP */
5147 if (inta & CSR_INT_BIT_SCD) 4395 if (inta & CSR_INT_BIT_SCD)
5148 IWL_DEBUG_ISR("Scheduler finished to transmit " 4396 IWL_DEBUG_ISR("Scheduler finished to transmit "
@@ -5159,7 +4407,7 @@ static void iwl4965_irq_tasklet(struct iwl4965_priv *priv)
5159 /* HW RF KILL switch toggled */ 4407 /* HW RF KILL switch toggled */
5160 if (inta & CSR_INT_BIT_RF_KILL) { 4408 if (inta & CSR_INT_BIT_RF_KILL) {
5161 int hw_rf_kill = 0; 4409 int hw_rf_kill = 0;
5162 if (!(iwl4965_read32(priv, CSR_GP_CNTRL) & 4410 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
5163 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)) 4411 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
5164 hw_rf_kill = 1; 4412 hw_rf_kill = 1;
5165 4413
@@ -5170,7 +4418,7 @@ static void iwl4965_irq_tasklet(struct iwl4965_priv *priv)
5170 /* Queue restart only if RF_KILL switch was set to "kill" 4418 /* Queue restart only if RF_KILL switch was set to "kill"
5171 * when we loaded driver, and is now set to "enable". 4419 * when we loaded driver, and is now set to "enable".
5172 * After we're Alive, RF_KILL gets handled by 4420 * After we're Alive, RF_KILL gets handled by
5173 * iwl_rx_card_state_notif() */ 4421 * iwl4965_rx_card_state_notif() */
5174 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) { 4422 if (!hw_rf_kill && !test_bit(STATUS_ALIVE, &priv->status)) {
5175 clear_bit(STATUS_RF_KILL_HW, &priv->status); 4423 clear_bit(STATUS_RF_KILL_HW, &priv->status);
5176 queue_work(priv->workqueue, &priv->restart); 4424 queue_work(priv->workqueue, &priv->restart);
@@ -5230,13 +4478,15 @@ static void iwl4965_irq_tasklet(struct iwl4965_priv *priv)
5230 } 4478 }
5231 4479
5232 /* Re-enable all interrupts */ 4480 /* Re-enable all interrupts */
5233 iwl4965_enable_interrupts(priv); 4481 /* only Re-enable if diabled by irq */
5234 4482 if (test_bit(STATUS_INT_ENABLED, &priv->status))
5235#ifdef CONFIG_IWL4965_DEBUG 4483 iwl4965_enable_interrupts(priv);
5236 if (iwl4965_debug_level & (IWL_DL_ISR)) { 4484
5237 inta = iwl4965_read32(priv, CSR_INT); 4485#ifdef CONFIG_IWLWIFI_DEBUG
5238 inta_mask = iwl4965_read32(priv, CSR_INT_MASK); 4486 if (iwl_debug_level & (IWL_DL_ISR)) {
5239 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS); 4487 inta = iwl_read32(priv, CSR_INT);
4488 inta_mask = iwl_read32(priv, CSR_INT_MASK);
4489 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
5240 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, " 4490 IWL_DEBUG_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
5241 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); 4491 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
5242 } 4492 }
@@ -5246,7 +4496,7 @@ static void iwl4965_irq_tasklet(struct iwl4965_priv *priv)
5246 4496
5247static irqreturn_t iwl4965_isr(int irq, void *data) 4497static irqreturn_t iwl4965_isr(int irq, void *data)
5248{ 4498{
5249 struct iwl4965_priv *priv = data; 4499 struct iwl_priv *priv = data;
5250 u32 inta, inta_mask; 4500 u32 inta, inta_mask;
5251 u32 inta_fh; 4501 u32 inta_fh;
5252 if (!priv) 4502 if (!priv)
@@ -5258,12 +4508,12 @@ static irqreturn_t iwl4965_isr(int irq, void *data)
5258 * back-to-back ISRs and sporadic interrupts from our NIC. 4508 * back-to-back ISRs and sporadic interrupts from our NIC.
5259 * If we have something to service, the tasklet will re-enable ints. 4509 * If we have something to service, the tasklet will re-enable ints.
5260 * If we *don't* have something, we'll re-enable before leaving here. */ 4510 * If we *don't* have something, we'll re-enable before leaving here. */
5261 inta_mask = iwl4965_read32(priv, CSR_INT_MASK); /* just for debug */ 4511 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
5262 iwl4965_write32(priv, CSR_INT_MASK, 0x00000000); 4512 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
5263 4513
5264 /* Discover which interrupts are active/pending */ 4514 /* Discover which interrupts are active/pending */
5265 inta = iwl4965_read32(priv, CSR_INT); 4515 inta = iwl_read32(priv, CSR_INT);
5266 inta_fh = iwl4965_read32(priv, CSR_FH_INT_STATUS); 4516 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
5267 4517
5268 /* Ignore interrupt if there's nothing in NIC to service. 4518 /* Ignore interrupt if there's nothing in NIC to service.
5269 * This may be due to IRQ shared with another device, 4519 * This may be due to IRQ shared with another device,
@@ -5295,313 +4545,13 @@ static irqreturn_t iwl4965_isr(int irq, void *data)
5295 4545
5296 none: 4546 none:
5297 /* re-enable interrupts here since we don't have anything to service. */ 4547 /* re-enable interrupts here since we don't have anything to service. */
5298 iwl4965_enable_interrupts(priv); 4548 /* only Re-enable if diabled by irq */
4549 if (test_bit(STATUS_INT_ENABLED, &priv->status))
4550 iwl4965_enable_interrupts(priv);
5299 spin_unlock(&priv->lock); 4551 spin_unlock(&priv->lock);
5300 return IRQ_NONE; 4552 return IRQ_NONE;
5301} 4553}
5302 4554
5303/************************** EEPROM BANDS ****************************
5304 *
5305 * The iwl4965_eeprom_band definitions below provide the mapping from the
5306 * EEPROM contents to the specific channel number supported for each
5307 * band.
5308 *
5309 * For example, iwl4965_priv->eeprom.band_3_channels[4] from the band_3
5310 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
5311 * The specific geography and calibration information for that channel
5312 * is contained in the eeprom map itself.
5313 *
5314 * During init, we copy the eeprom information and channel map
5315 * information into priv->channel_info_24/52 and priv->channel_map_24/52
5316 *
5317 * channel_map_24/52 provides the index in the channel_info array for a
5318 * given channel. We have to have two separate maps as there is channel
5319 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
5320 * band_2
5321 *
5322 * A value of 0xff stored in the channel_map indicates that the channel
5323 * is not supported by the hardware at all.
5324 *
5325 * A value of 0xfe in the channel_map indicates that the channel is not
5326 * valid for Tx with the current hardware. This means that
5327 * while the system can tune and receive on a given channel, it may not
5328 * be able to associate or transmit any frames on that
5329 * channel. There is no corresponding channel information for that
5330 * entry.
5331 *
5332 *********************************************************************/
5333
5334/* 2.4 GHz */
5335static const u8 iwl4965_eeprom_band_1[14] = {
5336 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
5337};
5338
5339/* 5.2 GHz bands */
5340static const u8 iwl4965_eeprom_band_2[] = { /* 4915-5080MHz */
5341 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
5342};
5343
5344static const u8 iwl4965_eeprom_band_3[] = { /* 5170-5320MHz */
5345 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
5346};
5347
5348static const u8 iwl4965_eeprom_band_4[] = { /* 5500-5700MHz */
5349 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
5350};
5351
5352static const u8 iwl4965_eeprom_band_5[] = { /* 5725-5825MHz */
5353 145, 149, 153, 157, 161, 165
5354};
5355
5356static u8 iwl4965_eeprom_band_6[] = { /* 2.4 FAT channel */
5357 1, 2, 3, 4, 5, 6, 7
5358};
5359
5360static u8 iwl4965_eeprom_band_7[] = { /* 5.2 FAT channel */
5361 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
5362};
5363
5364static void iwl4965_init_band_reference(const struct iwl4965_priv *priv,
5365 int band,
5366 int *eeprom_ch_count,
5367 const struct iwl4965_eeprom_channel
5368 **eeprom_ch_info,
5369 const u8 **eeprom_ch_index)
5370{
5371 switch (band) {
5372 case 1: /* 2.4GHz band */
5373 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_1);
5374 *eeprom_ch_info = priv->eeprom.band_1_channels;
5375 *eeprom_ch_index = iwl4965_eeprom_band_1;
5376 break;
5377 case 2: /* 4.9GHz band */
5378 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_2);
5379 *eeprom_ch_info = priv->eeprom.band_2_channels;
5380 *eeprom_ch_index = iwl4965_eeprom_band_2;
5381 break;
5382 case 3: /* 5.2GHz band */
5383 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_3);
5384 *eeprom_ch_info = priv->eeprom.band_3_channels;
5385 *eeprom_ch_index = iwl4965_eeprom_band_3;
5386 break;
5387 case 4: /* 5.5GHz band */
5388 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_4);
5389 *eeprom_ch_info = priv->eeprom.band_4_channels;
5390 *eeprom_ch_index = iwl4965_eeprom_band_4;
5391 break;
5392 case 5: /* 5.7GHz band */
5393 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_5);
5394 *eeprom_ch_info = priv->eeprom.band_5_channels;
5395 *eeprom_ch_index = iwl4965_eeprom_band_5;
5396 break;
5397 case 6: /* 2.4GHz FAT channels */
5398 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_6);
5399 *eeprom_ch_info = priv->eeprom.band_24_channels;
5400 *eeprom_ch_index = iwl4965_eeprom_band_6;
5401 break;
5402 case 7: /* 5 GHz FAT channels */
5403 *eeprom_ch_count = ARRAY_SIZE(iwl4965_eeprom_band_7);
5404 *eeprom_ch_info = priv->eeprom.band_52_channels;
5405 *eeprom_ch_index = iwl4965_eeprom_band_7;
5406 break;
5407 default:
5408 BUG();
5409 return;
5410 }
5411}
5412
5413/**
5414 * iwl4965_get_channel_info - Find driver's private channel info
5415 *
5416 * Based on band and channel number.
5417 */
5418const struct iwl4965_channel_info *iwl4965_get_channel_info(const struct iwl4965_priv *priv,
5419 int phymode, u16 channel)
5420{
5421 int i;
5422
5423 switch (phymode) {
5424 case MODE_IEEE80211A:
5425 for (i = 14; i < priv->channel_count; i++) {
5426 if (priv->channel_info[i].channel == channel)
5427 return &priv->channel_info[i];
5428 }
5429 break;
5430
5431 case MODE_IEEE80211B:
5432 case MODE_IEEE80211G:
5433 if (channel >= 1 && channel <= 14)
5434 return &priv->channel_info[channel - 1];
5435 break;
5436
5437 }
5438
5439 return NULL;
5440}
5441
5442#define CHECK_AND_PRINT(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
5443 ? # x " " : "")
5444
5445/**
5446 * iwl4965_init_channel_map - Set up driver's info for all possible channels
5447 */
5448static int iwl4965_init_channel_map(struct iwl4965_priv *priv)
5449{
5450 int eeprom_ch_count = 0;
5451 const u8 *eeprom_ch_index = NULL;
5452 const struct iwl4965_eeprom_channel *eeprom_ch_info = NULL;
5453 int band, ch;
5454 struct iwl4965_channel_info *ch_info;
5455
5456 if (priv->channel_count) {
5457 IWL_DEBUG_INFO("Channel map already initialized.\n");
5458 return 0;
5459 }
5460
5461 if (priv->eeprom.version < 0x2f) {
5462 IWL_WARNING("Unsupported EEPROM version: 0x%04X\n",
5463 priv->eeprom.version);
5464 return -EINVAL;
5465 }
5466
5467 IWL_DEBUG_INFO("Initializing regulatory info from EEPROM\n");
5468
5469 priv->channel_count =
5470 ARRAY_SIZE(iwl4965_eeprom_band_1) +
5471 ARRAY_SIZE(iwl4965_eeprom_band_2) +
5472 ARRAY_SIZE(iwl4965_eeprom_band_3) +
5473 ARRAY_SIZE(iwl4965_eeprom_band_4) +
5474 ARRAY_SIZE(iwl4965_eeprom_band_5);
5475
5476 IWL_DEBUG_INFO("Parsing data for %d channels.\n", priv->channel_count);
5477
5478 priv->channel_info = kzalloc(sizeof(struct iwl4965_channel_info) *
5479 priv->channel_count, GFP_KERNEL);
5480 if (!priv->channel_info) {
5481 IWL_ERROR("Could not allocate channel_info\n");
5482 priv->channel_count = 0;
5483 return -ENOMEM;
5484 }
5485
5486 ch_info = priv->channel_info;
5487
5488 /* Loop through the 5 EEPROM bands adding them in order to the
5489 * channel map we maintain (that contains additional information than
5490 * what just in the EEPROM) */
5491 for (band = 1; band <= 5; band++) {
5492
5493 iwl4965_init_band_reference(priv, band, &eeprom_ch_count,
5494 &eeprom_ch_info, &eeprom_ch_index);
5495
5496 /* Loop through each band adding each of the channels */
5497 for (ch = 0; ch < eeprom_ch_count; ch++) {
5498 ch_info->channel = eeprom_ch_index[ch];
5499 ch_info->phymode = (band == 1) ? MODE_IEEE80211B :
5500 MODE_IEEE80211A;
5501
5502 /* permanently store EEPROM's channel regulatory flags
5503 * and max power in channel info database. */
5504 ch_info->eeprom = eeprom_ch_info[ch];
5505
5506 /* Copy the run-time flags so they are there even on
5507 * invalid channels */
5508 ch_info->flags = eeprom_ch_info[ch].flags;
5509
5510 if (!(is_channel_valid(ch_info))) {
5511 IWL_DEBUG_INFO("Ch. %d Flags %x [%sGHz] - "
5512 "No traffic\n",
5513 ch_info->channel,
5514 ch_info->flags,
5515 is_channel_a_band(ch_info) ?
5516 "5.2" : "2.4");
5517 ch_info++;
5518 continue;
5519 }
5520
5521 /* Initialize regulatory-based run-time data */
5522 ch_info->max_power_avg = ch_info->curr_txpow =
5523 eeprom_ch_info[ch].max_power_avg;
5524 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
5525 ch_info->min_power = 0;
5526
5527 IWL_DEBUG_INFO("Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x"
5528 " %ddBm): Ad-Hoc %ssupported\n",
5529 ch_info->channel,
5530 is_channel_a_band(ch_info) ?
5531 "5.2" : "2.4",
5532 CHECK_AND_PRINT(IBSS),
5533 CHECK_AND_PRINT(ACTIVE),
5534 CHECK_AND_PRINT(RADAR),
5535 CHECK_AND_PRINT(WIDE),
5536 CHECK_AND_PRINT(NARROW),
5537 CHECK_AND_PRINT(DFS),
5538 eeprom_ch_info[ch].flags,
5539 eeprom_ch_info[ch].max_power_avg,
5540 ((eeprom_ch_info[ch].
5541 flags & EEPROM_CHANNEL_IBSS)
5542 && !(eeprom_ch_info[ch].
5543 flags & EEPROM_CHANNEL_RADAR))
5544 ? "" : "not ");
5545
5546 /* Set the user_txpower_limit to the highest power
5547 * supported by any channel */
5548 if (eeprom_ch_info[ch].max_power_avg >
5549 priv->user_txpower_limit)
5550 priv->user_txpower_limit =
5551 eeprom_ch_info[ch].max_power_avg;
5552
5553 ch_info++;
5554 }
5555 }
5556
5557 /* Two additional EEPROM bands for 2.4 and 5 GHz FAT channels */
5558 for (band = 6; band <= 7; band++) {
5559 int phymode;
5560 u8 fat_extension_chan;
5561
5562 iwl4965_init_band_reference(priv, band, &eeprom_ch_count,
5563 &eeprom_ch_info, &eeprom_ch_index);
5564
5565 /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
5566 phymode = (band == 6) ? MODE_IEEE80211B : MODE_IEEE80211A;
5567
5568 /* Loop through each band adding each of the channels */
5569 for (ch = 0; ch < eeprom_ch_count; ch++) {
5570
5571 if ((band == 6) &&
5572 ((eeprom_ch_index[ch] == 5) ||
5573 (eeprom_ch_index[ch] == 6) ||
5574 (eeprom_ch_index[ch] == 7)))
5575 fat_extension_chan = HT_IE_EXT_CHANNEL_MAX;
5576 else
5577 fat_extension_chan = HT_IE_EXT_CHANNEL_ABOVE;
5578
5579 /* Set up driver's info for lower half */
5580 iwl4965_set_fat_chan_info(priv, phymode,
5581 eeprom_ch_index[ch],
5582 &(eeprom_ch_info[ch]),
5583 fat_extension_chan);
5584
5585 /* Set up driver's info for upper half */
5586 iwl4965_set_fat_chan_info(priv, phymode,
5587 (eeprom_ch_index[ch] + 4),
5588 &(eeprom_ch_info[ch]),
5589 HT_IE_EXT_CHANNEL_BELOW);
5590 }
5591 }
5592
5593 return 0;
5594}
5595
5596/*
5597 * iwl4965_free_channel_map - undo allocations in iwl4965_init_channel_map
5598 */
5599static void iwl4965_free_channel_map(struct iwl4965_priv *priv)
5600{
5601 kfree(priv->channel_info);
5602 priv->channel_count = 0;
5603}
5604
5605/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after 4555/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
5606 * sending probe req. This should be set long enough to hear probe responses 4556 * sending probe req. This should be set long enough to hear probe responses
5607 * from more than one AP. */ 4557 * from more than one AP. */
@@ -5625,22 +4575,24 @@ static void iwl4965_free_channel_map(struct iwl4965_priv *priv)
5625#define IWL_PASSIVE_DWELL_BASE (100) 4575#define IWL_PASSIVE_DWELL_BASE (100)
5626#define IWL_CHANNEL_TUNE_TIME 5 4576#define IWL_CHANNEL_TUNE_TIME 5
5627 4577
5628static inline u16 iwl4965_get_active_dwell_time(struct iwl4965_priv *priv, int phymode) 4578static inline u16 iwl4965_get_active_dwell_time(struct iwl_priv *priv,
4579 enum ieee80211_band band)
5629{ 4580{
5630 if (phymode == MODE_IEEE80211A) 4581 if (band == IEEE80211_BAND_5GHZ)
5631 return IWL_ACTIVE_DWELL_TIME_52; 4582 return IWL_ACTIVE_DWELL_TIME_52;
5632 else 4583 else
5633 return IWL_ACTIVE_DWELL_TIME_24; 4584 return IWL_ACTIVE_DWELL_TIME_24;
5634} 4585}
5635 4586
5636static u16 iwl4965_get_passive_dwell_time(struct iwl4965_priv *priv, int phymode) 4587static u16 iwl4965_get_passive_dwell_time(struct iwl_priv *priv,
4588 enum ieee80211_band band)
5637{ 4589{
5638 u16 active = iwl4965_get_active_dwell_time(priv, phymode); 4590 u16 active = iwl4965_get_active_dwell_time(priv, band);
5639 u16 passive = (phymode != MODE_IEEE80211A) ? 4591 u16 passive = (band != IEEE80211_BAND_5GHZ) ?
5640 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : 4592 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
5641 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52; 4593 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
5642 4594
5643 if (iwl4965_is_associated(priv)) { 4595 if (iwl_is_associated(priv)) {
5644 /* If we're associated, we clamp the maximum passive 4596 /* If we're associated, we clamp the maximum passive
5645 * dwell time to be 98% of the beacon interval (minus 4597 * dwell time to be 98% of the beacon interval (minus
5646 * 2 * channel tune time) */ 4598 * 2 * channel tune time) */
@@ -5656,30 +4608,34 @@ static u16 iwl4965_get_passive_dwell_time(struct iwl4965_priv *priv, int phymode
5656 return passive; 4608 return passive;
5657} 4609}
5658 4610
5659static int iwl4965_get_channels_for_scan(struct iwl4965_priv *priv, int phymode, 4611static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
4612 enum ieee80211_band band,
5660 u8 is_active, u8 direct_mask, 4613 u8 is_active, u8 direct_mask,
5661 struct iwl4965_scan_channel *scan_ch) 4614 struct iwl4965_scan_channel *scan_ch)
5662{ 4615{
5663 const struct ieee80211_channel *channels = NULL; 4616 const struct ieee80211_channel *channels = NULL;
5664 const struct ieee80211_hw_mode *hw_mode; 4617 const struct ieee80211_supported_band *sband;
5665 const struct iwl4965_channel_info *ch_info; 4618 const struct iwl_channel_info *ch_info;
5666 u16 passive_dwell = 0; 4619 u16 passive_dwell = 0;
5667 u16 active_dwell = 0; 4620 u16 active_dwell = 0;
5668 int added, i; 4621 int added, i;
5669 4622
5670 hw_mode = iwl4965_get_hw_mode(priv, phymode); 4623 sband = iwl4965_get_hw_mode(priv, band);
5671 if (!hw_mode) 4624 if (!sband)
5672 return 0; 4625 return 0;
5673 4626
5674 channels = hw_mode->channels; 4627 channels = sband->channels;
4628
4629 active_dwell = iwl4965_get_active_dwell_time(priv, band);
4630 passive_dwell = iwl4965_get_passive_dwell_time(priv, band);
5675 4631
5676 active_dwell = iwl4965_get_active_dwell_time(priv, phymode); 4632 for (i = 0, added = 0; i < sband->n_channels; i++) {
5677 passive_dwell = iwl4965_get_passive_dwell_time(priv, phymode); 4633 if (channels[i].flags & IEEE80211_CHAN_DISABLED)
4634 continue;
5678 4635
5679 for (i = 0, added = 0; i < hw_mode->num_channels; i++) { 4636 if (ieee80211_frequency_to_channel(channels[i].center_freq) ==
5680 if (channels[i].chan ==
5681 le16_to_cpu(priv->active_rxon.channel)) { 4637 le16_to_cpu(priv->active_rxon.channel)) {
5682 if (iwl4965_is_associated(priv)) { 4638 if (iwl_is_associated(priv)) {
5683 IWL_DEBUG_SCAN 4639 IWL_DEBUG_SCAN
5684 ("Skipping current channel %d\n", 4640 ("Skipping current channel %d\n",
5685 le16_to_cpu(priv->active_rxon.channel)); 4641 le16_to_cpu(priv->active_rxon.channel));
@@ -5688,9 +4644,9 @@ static int iwl4965_get_channels_for_scan(struct iwl4965_priv *priv, int phymode,
5688 } else if (priv->only_active_channel) 4644 } else if (priv->only_active_channel)
5689 continue; 4645 continue;
5690 4646
5691 scan_ch->channel = channels[i].chan; 4647 scan_ch->channel = ieee80211_frequency_to_channel(channels[i].center_freq);
5692 4648
5693 ch_info = iwl4965_get_channel_info(priv, phymode, 4649 ch_info = iwl_get_channel_info(priv, band,
5694 scan_ch->channel); 4650 scan_ch->channel);
5695 if (!is_channel_valid(ch_info)) { 4651 if (!is_channel_valid(ch_info)) {
5696 IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n", 4652 IWL_DEBUG_SCAN("Channel %d is INVALID for this SKU.\n",
@@ -5699,7 +4655,7 @@ static int iwl4965_get_channels_for_scan(struct iwl4965_priv *priv, int phymode,
5699 } 4655 }
5700 4656
5701 if (!is_active || is_channel_passive(ch_info) || 4657 if (!is_active || is_channel_passive(ch_info) ||
5702 !(channels[i].flag & IEEE80211_CHAN_W_ACTIVE_SCAN)) 4658 (channels[i].flags & IEEE80211_CHAN_PASSIVE_SCAN))
5703 scan_ch->type = 0; /* passive */ 4659 scan_ch->type = 0; /* passive */
5704 else 4660 else
5705 scan_ch->type = 1; /* active */ 4661 scan_ch->type = 1; /* active */
@@ -5718,7 +4674,7 @@ static int iwl4965_get_channels_for_scan(struct iwl4965_priv *priv, int phymode,
5718 /* scan_pwr_info->tpc.dsp_atten; */ 4674 /* scan_pwr_info->tpc.dsp_atten; */
5719 4675
5720 /*scan_pwr_info->tpc.tx_gain; */ 4676 /*scan_pwr_info->tpc.tx_gain; */
5721 if (phymode == MODE_IEEE80211A) 4677 if (band == IEEE80211_BAND_5GHZ)
5722 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3; 4678 scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
5723 else { 4679 else {
5724 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3)); 4680 scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
@@ -5742,194 +4698,148 @@ static int iwl4965_get_channels_for_scan(struct iwl4965_priv *priv, int phymode,
5742 return added; 4698 return added;
5743} 4699}
5744 4700
5745static void iwl4965_reset_channel_flag(struct iwl4965_priv *priv) 4701static void iwl4965_init_hw_rates(struct iwl_priv *priv,
5746{
5747 int i, j;
5748 for (i = 0; i < 3; i++) {
5749 struct ieee80211_hw_mode *hw_mode = (void *)&priv->modes[i];
5750 for (j = 0; j < hw_mode->num_channels; j++)
5751 hw_mode->channels[j].flag = hw_mode->channels[j].val;
5752 }
5753}
5754
5755static void iwl4965_init_hw_rates(struct iwl4965_priv *priv,
5756 struct ieee80211_rate *rates) 4702 struct ieee80211_rate *rates)
5757{ 4703{
5758 int i; 4704 int i;
5759 4705
5760 for (i = 0; i < IWL_RATE_COUNT; i++) { 4706 for (i = 0; i < IWL_RATE_COUNT; i++) {
5761 rates[i].rate = iwl4965_rates[i].ieee * 5; 4707 rates[i].bitrate = iwl4965_rates[i].ieee * 5;
5762 rates[i].val = i; /* Rate scaling will work on indexes */ 4708 rates[i].hw_value = i; /* Rate scaling will work on indexes */
5763 rates[i].val2 = i; 4709 rates[i].hw_value_short = i;
5764 rates[i].flags = IEEE80211_RATE_SUPPORTED; 4710 rates[i].flags = 0;
5765 /* Only OFDM have the bits-per-symbol set */ 4711 if ((i > IWL_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
5766 if ((i <= IWL_LAST_OFDM_RATE) && (i >= IWL_FIRST_OFDM_RATE))
5767 rates[i].flags |= IEEE80211_RATE_OFDM;
5768 else {
5769 /* 4712 /*
5770 * If CCK 1M then set rate flag to CCK else CCK_2 4713 * If CCK != 1M then set short preamble rate flag.
5771 * which is CCK | PREAMBLE2
5772 */ 4714 */
5773 rates[i].flags |= (iwl4965_rates[i].plcp == 10) ? 4715 rates[i].flags |=
5774 IEEE80211_RATE_CCK : IEEE80211_RATE_CCK_2; 4716 (iwl4965_rates[i].plcp == IWL_RATE_1M_PLCP) ?
4717 0 : IEEE80211_RATE_SHORT_PREAMBLE;
5775 } 4718 }
5776
5777 /* Set up which ones are basic rates... */
5778 if (IWL_BASIC_RATES_MASK & (1 << i))
5779 rates[i].flags |= IEEE80211_RATE_BASIC;
5780 } 4719 }
5781} 4720}
5782 4721
5783/** 4722/**
5784 * iwl4965_init_geos - Initialize mac80211's geo/channel info based from eeprom 4723 * iwl4965_init_geos - Initialize mac80211's geo/channel info based from eeprom
5785 */ 4724 */
5786static int iwl4965_init_geos(struct iwl4965_priv *priv) 4725int iwl4965_init_geos(struct iwl_priv *priv)
5787{ 4726{
5788 struct iwl4965_channel_info *ch; 4727 struct iwl_channel_info *ch;
5789 struct ieee80211_hw_mode *modes; 4728 struct ieee80211_supported_band *sband;
5790 struct ieee80211_channel *channels; 4729 struct ieee80211_channel *channels;
5791 struct ieee80211_channel *geo_ch; 4730 struct ieee80211_channel *geo_ch;
5792 struct ieee80211_rate *rates; 4731 struct ieee80211_rate *rates;
5793 int i = 0; 4732 int i = 0;
5794 enum {
5795 A = 0,
5796 B = 1,
5797 G = 2,
5798 };
5799 int mode_count = 3;
5800 4733
5801 if (priv->modes) { 4734 if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
4735 priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
5802 IWL_DEBUG_INFO("Geography modes already initialized.\n"); 4736 IWL_DEBUG_INFO("Geography modes already initialized.\n");
5803 set_bit(STATUS_GEO_CONFIGURED, &priv->status); 4737 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5804 return 0; 4738 return 0;
5805 } 4739 }
5806 4740
5807 modes = kzalloc(sizeof(struct ieee80211_hw_mode) * mode_count,
5808 GFP_KERNEL);
5809 if (!modes)
5810 return -ENOMEM;
5811
5812 channels = kzalloc(sizeof(struct ieee80211_channel) * 4741 channels = kzalloc(sizeof(struct ieee80211_channel) *
5813 priv->channel_count, GFP_KERNEL); 4742 priv->channel_count, GFP_KERNEL);
5814 if (!channels) { 4743 if (!channels)
5815 kfree(modes);
5816 return -ENOMEM; 4744 return -ENOMEM;
5817 }
5818 4745
5819 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_MAX_RATES + 1)), 4746 rates = kzalloc((sizeof(struct ieee80211_rate) * (IWL_RATE_COUNT + 1)),
5820 GFP_KERNEL); 4747 GFP_KERNEL);
5821 if (!rates) { 4748 if (!rates) {
5822 kfree(modes);
5823 kfree(channels); 4749 kfree(channels);
5824 return -ENOMEM; 4750 return -ENOMEM;
5825 } 4751 }
5826 4752
5827 /* 0 = 802.11a
5828 * 1 = 802.11b
5829 * 2 = 802.11g
5830 */
5831
5832 /* 5.2GHz channels start after the 2.4GHz channels */ 4753 /* 5.2GHz channels start after the 2.4GHz channels */
5833 modes[A].mode = MODE_IEEE80211A; 4754 sband = &priv->bands[IEEE80211_BAND_5GHZ];
5834 modes[A].channels = &channels[ARRAY_SIZE(iwl4965_eeprom_band_1)]; 4755 sband->channels = &channels[ARRAY_SIZE(iwl_eeprom_band_1)];
5835 modes[A].rates = rates; 4756 /* just OFDM */
5836 modes[A].num_rates = 8; /* just OFDM */ 4757 sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
5837 modes[A].rates = &rates[4]; 4758 sband->n_bitrates = IWL_RATE_COUNT - IWL_FIRST_OFDM_RATE;
5838 modes[A].num_channels = 0;
5839#ifdef CONFIG_IWL4965_HT
5840 iwl4965_init_ht_hw_capab(&modes[A].ht_info, MODE_IEEE80211A);
5841#endif
5842 4759
5843 modes[B].mode = MODE_IEEE80211B; 4760 iwl4965_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_5GHZ);
5844 modes[B].channels = channels; 4761
5845 modes[B].rates = rates; 4762 sband = &priv->bands[IEEE80211_BAND_2GHZ];
5846 modes[B].num_rates = 4; /* just CCK */ 4763 sband->channels = channels;
5847 modes[B].num_channels = 0; 4764 /* OFDM & CCK */
5848 4765 sband->bitrates = rates;
5849 modes[G].mode = MODE_IEEE80211G; 4766 sband->n_bitrates = IWL_RATE_COUNT;
5850 modes[G].channels = channels; 4767
5851 modes[G].rates = rates; 4768 iwl4965_init_ht_hw_capab(priv, &sband->ht_info, IEEE80211_BAND_2GHZ);
5852 modes[G].num_rates = 12; /* OFDM & CCK */
5853 modes[G].num_channels = 0;
5854#ifdef CONFIG_IWL4965_HT
5855 iwl4965_init_ht_hw_capab(&modes[G].ht_info, MODE_IEEE80211G);
5856#endif
5857 4769
5858 priv->ieee_channels = channels; 4770 priv->ieee_channels = channels;
5859 priv->ieee_rates = rates; 4771 priv->ieee_rates = rates;
5860 4772
5861 iwl4965_init_hw_rates(priv, rates); 4773 iwl4965_init_hw_rates(priv, rates);
5862 4774
5863 for (i = 0, geo_ch = channels; i < priv->channel_count; i++) { 4775 for (i = 0; i < priv->channel_count; i++) {
5864 ch = &priv->channel_info[i]; 4776 ch = &priv->channel_info[i];
5865 4777
5866 if (!is_channel_valid(ch)) { 4778 /* FIXME: might be removed if scan is OK */
5867 IWL_DEBUG_INFO("Channel %d [%sGHz] is restricted -- " 4779 if (!is_channel_valid(ch))
5868 "skipping.\n",
5869 ch->channel, is_channel_a_band(ch) ?
5870 "5.2" : "2.4");
5871 continue; 4780 continue;
5872 }
5873 4781
5874 if (is_channel_a_band(ch)) { 4782 if (is_channel_a_band(ch))
5875 geo_ch = &modes[A].channels[modes[A].num_channels++]; 4783 sband = &priv->bands[IEEE80211_BAND_5GHZ];
5876 } else { 4784 else
5877 geo_ch = &modes[B].channels[modes[B].num_channels++]; 4785 sband = &priv->bands[IEEE80211_BAND_2GHZ];
5878 modes[G].num_channels++;
5879 }
5880 4786
5881 geo_ch->freq = ieee80211chan2mhz(ch->channel); 4787 geo_ch = &sband->channels[sband->n_channels++];
5882 geo_ch->chan = ch->channel; 4788
5883 geo_ch->power_level = ch->max_power_avg; 4789 geo_ch->center_freq = ieee80211_channel_to_frequency(ch->channel);
5884 geo_ch->antenna_max = 0xff; 4790 geo_ch->max_power = ch->max_power_avg;
4791 geo_ch->max_antenna_gain = 0xff;
4792 geo_ch->hw_value = ch->channel;
5885 4793
5886 if (is_channel_valid(ch)) { 4794 if (is_channel_valid(ch)) {
5887 geo_ch->flag = IEEE80211_CHAN_W_SCAN; 4795 if (!(ch->flags & EEPROM_CHANNEL_IBSS))
5888 if (ch->flags & EEPROM_CHANNEL_IBSS) 4796 geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
5889 geo_ch->flag |= IEEE80211_CHAN_W_IBSS;
5890 4797
5891 if (ch->flags & EEPROM_CHANNEL_ACTIVE) 4798 if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
5892 geo_ch->flag |= IEEE80211_CHAN_W_ACTIVE_SCAN; 4799 geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
5893 4800
5894 if (ch->flags & EEPROM_CHANNEL_RADAR) 4801 if (ch->flags & EEPROM_CHANNEL_RADAR)
5895 geo_ch->flag |= IEEE80211_CHAN_W_RADAR_DETECT; 4802 geo_ch->flags |= IEEE80211_CHAN_RADAR;
5896 4803
5897 if (ch->max_power_avg > priv->max_channel_txpower_limit) 4804 if (ch->max_power_avg > priv->max_channel_txpower_limit)
5898 priv->max_channel_txpower_limit = 4805 priv->max_channel_txpower_limit =
5899 ch->max_power_avg; 4806 ch->max_power_avg;
4807 } else {
4808 geo_ch->flags |= IEEE80211_CHAN_DISABLED;
5900 } 4809 }
5901 4810
5902 geo_ch->val = geo_ch->flag; 4811 /* Save flags for reg domain usage */
4812 geo_ch->orig_flags = geo_ch->flags;
4813
4814 IWL_DEBUG_INFO("Channel %d Freq=%d[%sGHz] %s flag=0%X\n",
4815 ch->channel, geo_ch->center_freq,
4816 is_channel_a_band(ch) ? "5.2" : "2.4",
4817 geo_ch->flags & IEEE80211_CHAN_DISABLED ?
4818 "restricted" : "valid",
4819 geo_ch->flags);
5903 } 4820 }
5904 4821
5905 if ((modes[A].num_channels == 0) && priv->is_abg) { 4822 if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
4823 priv->cfg->sku & IWL_SKU_A) {
5906 printk(KERN_INFO DRV_NAME 4824 printk(KERN_INFO DRV_NAME
5907 ": Incorrectly detected BG card as ABG. Please send " 4825 ": Incorrectly detected BG card as ABG. Please send "
5908 "your PCI ID 0x%04X:0x%04X to maintainer.\n", 4826 "your PCI ID 0x%04X:0x%04X to maintainer.\n",
5909 priv->pci_dev->device, priv->pci_dev->subsystem_device); 4827 priv->pci_dev->device, priv->pci_dev->subsystem_device);
5910 priv->is_abg = 0; 4828 priv->cfg->sku &= ~IWL_SKU_A;
5911 } 4829 }
5912 4830
5913 printk(KERN_INFO DRV_NAME 4831 printk(KERN_INFO DRV_NAME
5914 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n", 4832 ": Tunable channels: %d 802.11bg, %d 802.11a channels\n",
5915 modes[G].num_channels, modes[A].num_channels); 4833 priv->bands[IEEE80211_BAND_2GHZ].n_channels,
4834 priv->bands[IEEE80211_BAND_5GHZ].n_channels);
5916 4835
5917 /* 4836 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
5918 * NOTE: We register these in preference of order -- the 4837 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5919 * stack doesn't currently (as of 7.0.6 / Apr 24 '07) pick 4838 &priv->bands[IEEE80211_BAND_2GHZ];
5920 * a phymode based on rates or AP capabilities but seems to 4839 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
5921 * configure it purely on if the channel being configured 4840 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5922 * is supported by a mode -- and the first match is taken 4841 &priv->bands[IEEE80211_BAND_5GHZ];
5923 */
5924 4842
5925 if (modes[G].num_channels)
5926 ieee80211_register_hwmode(priv->hw, &modes[G]);
5927 if (modes[B].num_channels)
5928 ieee80211_register_hwmode(priv->hw, &modes[B]);
5929 if (modes[A].num_channels)
5930 ieee80211_register_hwmode(priv->hw, &modes[A]);
5931
5932 priv->modes = modes;
5933 set_bit(STATUS_GEO_CONFIGURED, &priv->status); 4843 set_bit(STATUS_GEO_CONFIGURED, &priv->status);
5934 4844
5935 return 0; 4845 return 0;
@@ -5938,9 +4848,8 @@ static int iwl4965_init_geos(struct iwl4965_priv *priv)
5938/* 4848/*
5939 * iwl4965_free_geos - undo allocations in iwl4965_init_geos 4849 * iwl4965_free_geos - undo allocations in iwl4965_init_geos
5940 */ 4850 */
5941static void iwl4965_free_geos(struct iwl4965_priv *priv) 4851void iwl4965_free_geos(struct iwl_priv *priv)
5942{ 4852{
5943 kfree(priv->modes);
5944 kfree(priv->ieee_channels); 4853 kfree(priv->ieee_channels);
5945 kfree(priv->ieee_rates); 4854 kfree(priv->ieee_rates);
5946 clear_bit(STATUS_GEO_CONFIGURED, &priv->status); 4855 clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
@@ -5952,7 +4861,7 @@ static void iwl4965_free_geos(struct iwl4965_priv *priv)
5952 * 4861 *
5953 ******************************************************************************/ 4862 ******************************************************************************/
5954 4863
5955static void iwl4965_dealloc_ucode_pci(struct iwl4965_priv *priv) 4864static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
5956{ 4865{
5957 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code); 4866 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_code);
5958 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data); 4867 iwl_free_fw_desc(priv->pci_dev, &priv->ucode_data);
@@ -5966,7 +4875,7 @@ static void iwl4965_dealloc_ucode_pci(struct iwl4965_priv *priv)
5966 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host, 4875 * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
5967 * looking at all data. 4876 * looking at all data.
5968 */ 4877 */
5969static int iwl4965_verify_inst_full(struct iwl4965_priv *priv, __le32 *image, 4878static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
5970 u32 len) 4879 u32 len)
5971{ 4880{
5972 u32 val; 4881 u32 val;
@@ -5976,18 +4885,18 @@ static int iwl4965_verify_inst_full(struct iwl4965_priv *priv, __le32 *image,
5976 4885
5977 IWL_DEBUG_INFO("ucode inst image size is %u\n", len); 4886 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
5978 4887
5979 rc = iwl4965_grab_nic_access(priv); 4888 rc = iwl_grab_nic_access(priv);
5980 if (rc) 4889 if (rc)
5981 return rc; 4890 return rc;
5982 4891
5983 iwl4965_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND); 4892 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, RTC_INST_LOWER_BOUND);
5984 4893
5985 errcnt = 0; 4894 errcnt = 0;
5986 for (; len > 0; len -= sizeof(u32), image++) { 4895 for (; len > 0; len -= sizeof(u32), image++) {
5987 /* read data comes through single port, auto-incr addr */ 4896 /* read data comes through single port, auto-incr addr */
5988 /* NOTE: Use the debugless read so we don't flood kernel log 4897 /* NOTE: Use the debugless read so we don't flood kernel log
5989 * if IWL_DL_IO is set */ 4898 * if IWL_DL_IO is set */
5990 val = _iwl4965_read_direct32(priv, HBUS_TARG_MEM_RDAT); 4899 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
5991 if (val != le32_to_cpu(*image)) { 4900 if (val != le32_to_cpu(*image)) {
5992 IWL_ERROR("uCode INST section is invalid at " 4901 IWL_ERROR("uCode INST section is invalid at "
5993 "offset 0x%x, is 0x%x, s/b 0x%x\n", 4902 "offset 0x%x, is 0x%x, s/b 0x%x\n",
@@ -5999,7 +4908,7 @@ static int iwl4965_verify_inst_full(struct iwl4965_priv *priv, __le32 *image,
5999 } 4908 }
6000 } 4909 }
6001 4910
6002 iwl4965_release_nic_access(priv); 4911 iwl_release_nic_access(priv);
6003 4912
6004 if (!errcnt) 4913 if (!errcnt)
6005 IWL_DEBUG_INFO 4914 IWL_DEBUG_INFO
@@ -6014,7 +4923,7 @@ static int iwl4965_verify_inst_full(struct iwl4965_priv *priv, __le32 *image,
6014 * using sample data 100 bytes apart. If these sample points are good, 4923 * using sample data 100 bytes apart. If these sample points are good,
6015 * it's a pretty good bet that everything between them is good, too. 4924 * it's a pretty good bet that everything between them is good, too.
6016 */ 4925 */
6017static int iwl4965_verify_inst_sparse(struct iwl4965_priv *priv, __le32 *image, u32 len) 4926static int iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
6018{ 4927{
6019 u32 val; 4928 u32 val;
6020 int rc = 0; 4929 int rc = 0;
@@ -6023,7 +4932,7 @@ static int iwl4965_verify_inst_sparse(struct iwl4965_priv *priv, __le32 *image,
6023 4932
6024 IWL_DEBUG_INFO("ucode inst image size is %u\n", len); 4933 IWL_DEBUG_INFO("ucode inst image size is %u\n", len);
6025 4934
6026 rc = iwl4965_grab_nic_access(priv); 4935 rc = iwl_grab_nic_access(priv);
6027 if (rc) 4936 if (rc)
6028 return rc; 4937 return rc;
6029 4938
@@ -6031,9 +4940,9 @@ static int iwl4965_verify_inst_sparse(struct iwl4965_priv *priv, __le32 *image,
6031 /* read data comes through single port, auto-incr addr */ 4940 /* read data comes through single port, auto-incr addr */
6032 /* NOTE: Use the debugless read so we don't flood kernel log 4941 /* NOTE: Use the debugless read so we don't flood kernel log
6033 * if IWL_DL_IO is set */ 4942 * if IWL_DL_IO is set */
6034 iwl4965_write_direct32(priv, HBUS_TARG_MEM_RADDR, 4943 iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR,
6035 i + RTC_INST_LOWER_BOUND); 4944 i + RTC_INST_LOWER_BOUND);
6036 val = _iwl4965_read_direct32(priv, HBUS_TARG_MEM_RDAT); 4945 val = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
6037 if (val != le32_to_cpu(*image)) { 4946 if (val != le32_to_cpu(*image)) {
6038#if 0 /* Enable this if you want to see details */ 4947#if 0 /* Enable this if you want to see details */
6039 IWL_ERROR("uCode INST section is invalid at " 4948 IWL_ERROR("uCode INST section is invalid at "
@@ -6047,7 +4956,7 @@ static int iwl4965_verify_inst_sparse(struct iwl4965_priv *priv, __le32 *image,
6047 } 4956 }
6048 } 4957 }
6049 4958
6050 iwl4965_release_nic_access(priv); 4959 iwl_release_nic_access(priv);
6051 4960
6052 return rc; 4961 return rc;
6053} 4962}
@@ -6057,7 +4966,7 @@ static int iwl4965_verify_inst_sparse(struct iwl4965_priv *priv, __le32 *image,
6057 * iwl4965_verify_ucode - determine which instruction image is in SRAM, 4966 * iwl4965_verify_ucode - determine which instruction image is in SRAM,
6058 * and verify its contents 4967 * and verify its contents
6059 */ 4968 */
6060static int iwl4965_verify_ucode(struct iwl4965_priv *priv) 4969static int iwl4965_verify_ucode(struct iwl_priv *priv)
6061{ 4970{
6062 __le32 *image; 4971 __le32 *image;
6063 u32 len; 4972 u32 len;
@@ -6102,160 +5011,10 @@ static int iwl4965_verify_ucode(struct iwl4965_priv *priv)
6102 return rc; 5011 return rc;
6103} 5012}
6104 5013
6105 5014static void iwl4965_nic_start(struct iwl_priv *priv)
6106/* check contents of special bootstrap uCode SRAM */
6107static int iwl4965_verify_bsm(struct iwl4965_priv *priv)
6108{
6109 __le32 *image = priv->ucode_boot.v_addr;
6110 u32 len = priv->ucode_boot.len;
6111 u32 reg;
6112 u32 val;
6113
6114 IWL_DEBUG_INFO("Begin verify bsm\n");
6115
6116 /* verify BSM SRAM contents */
6117 val = iwl4965_read_prph(priv, BSM_WR_DWCOUNT_REG);
6118 for (reg = BSM_SRAM_LOWER_BOUND;
6119 reg < BSM_SRAM_LOWER_BOUND + len;
6120 reg += sizeof(u32), image ++) {
6121 val = iwl4965_read_prph(priv, reg);
6122 if (val != le32_to_cpu(*image)) {
6123 IWL_ERROR("BSM uCode verification failed at "
6124 "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
6125 BSM_SRAM_LOWER_BOUND,
6126 reg - BSM_SRAM_LOWER_BOUND, len,
6127 val, le32_to_cpu(*image));
6128 return -EIO;
6129 }
6130 }
6131
6132 IWL_DEBUG_INFO("BSM bootstrap uCode image OK\n");
6133
6134 return 0;
6135}
6136
6137/**
6138 * iwl4965_load_bsm - Load bootstrap instructions
6139 *
6140 * BSM operation:
6141 *
6142 * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
6143 * in special SRAM that does not power down during RFKILL. When powering back
6144 * up after power-saving sleeps (or during initial uCode load), the BSM loads
6145 * the bootstrap program into the on-board processor, and starts it.
6146 *
6147 * The bootstrap program loads (via DMA) instructions and data for a new
6148 * program from host DRAM locations indicated by the host driver in the
6149 * BSM_DRAM_* registers. Once the new program is loaded, it starts
6150 * automatically.
6151 *
6152 * When initializing the NIC, the host driver points the BSM to the
6153 * "initialize" uCode image. This uCode sets up some internal data, then
6154 * notifies host via "initialize alive" that it is complete.
6155 *
6156 * The host then replaces the BSM_DRAM_* pointer values to point to the
6157 * normal runtime uCode instructions and a backup uCode data cache buffer
6158 * (filled initially with starting data values for the on-board processor),
6159 * then triggers the "initialize" uCode to load and launch the runtime uCode,
6160 * which begins normal operation.
6161 *
6162 * When doing a power-save shutdown, runtime uCode saves data SRAM into
6163 * the backup data cache in DRAM before SRAM is powered down.
6164 *
6165 * When powering back up, the BSM loads the bootstrap program. This reloads
6166 * the runtime uCode instructions and the backup data cache into SRAM,
6167 * and re-launches the runtime uCode from where it left off.
6168 */
6169static int iwl4965_load_bsm(struct iwl4965_priv *priv)
6170{
6171 __le32 *image = priv->ucode_boot.v_addr;
6172 u32 len = priv->ucode_boot.len;
6173 dma_addr_t pinst;
6174 dma_addr_t pdata;
6175 u32 inst_len;
6176 u32 data_len;
6177 int rc;
6178 int i;
6179 u32 done;
6180 u32 reg_offset;
6181
6182 IWL_DEBUG_INFO("Begin load bsm\n");
6183
6184 /* make sure bootstrap program is no larger than BSM's SRAM size */
6185 if (len > IWL_MAX_BSM_SIZE)
6186 return -EINVAL;
6187
6188 /* Tell bootstrap uCode where to find the "Initialize" uCode
6189 * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
6190 * NOTE: iwl4965_initialize_alive_start() will replace these values,
6191 * after the "initialize" uCode has run, to point to
6192 * runtime/protocol instructions and backup data cache. */
6193 pinst = priv->ucode_init.p_addr >> 4;
6194 pdata = priv->ucode_init_data.p_addr >> 4;
6195 inst_len = priv->ucode_init.len;
6196 data_len = priv->ucode_init_data.len;
6197
6198 rc = iwl4965_grab_nic_access(priv);
6199 if (rc)
6200 return rc;
6201
6202 iwl4965_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
6203 iwl4965_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6204 iwl4965_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
6205 iwl4965_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
6206
6207 /* Fill BSM memory with bootstrap instructions */
6208 for (reg_offset = BSM_SRAM_LOWER_BOUND;
6209 reg_offset < BSM_SRAM_LOWER_BOUND + len;
6210 reg_offset += sizeof(u32), image++)
6211 _iwl4965_write_prph(priv, reg_offset,
6212 le32_to_cpu(*image));
6213
6214 rc = iwl4965_verify_bsm(priv);
6215 if (rc) {
6216 iwl4965_release_nic_access(priv);
6217 return rc;
6218 }
6219
6220 /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
6221 iwl4965_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
6222 iwl4965_write_prph(priv, BSM_WR_MEM_DST_REG,
6223 RTC_INST_LOWER_BOUND);
6224 iwl4965_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
6225
6226 /* Load bootstrap code into instruction SRAM now,
6227 * to prepare to load "initialize" uCode */
6228 iwl4965_write_prph(priv, BSM_WR_CTRL_REG,
6229 BSM_WR_CTRL_REG_BIT_START);
6230
6231 /* Wait for load of bootstrap uCode to finish */
6232 for (i = 0; i < 100; i++) {
6233 done = iwl4965_read_prph(priv, BSM_WR_CTRL_REG);
6234 if (!(done & BSM_WR_CTRL_REG_BIT_START))
6235 break;
6236 udelay(10);
6237 }
6238 if (i < 100)
6239 IWL_DEBUG_INFO("BSM write complete, poll %d iterations\n", i);
6240 else {
6241 IWL_ERROR("BSM write did not complete!\n");
6242 return -EIO;
6243 }
6244
6245 /* Enable future boot loads whenever power management unit triggers it
6246 * (e.g. when powering back up after power-save shutdown) */
6247 iwl4965_write_prph(priv, BSM_WR_CTRL_REG,
6248 BSM_WR_CTRL_REG_BIT_START_EN);
6249
6250 iwl4965_release_nic_access(priv);
6251
6252 return 0;
6253}
6254
6255static void iwl4965_nic_start(struct iwl4965_priv *priv)
6256{ 5015{
6257 /* Remove all resets to allow NIC to operate */ 5016 /* Remove all resets to allow NIC to operate */
6258 iwl4965_write32(priv, CSR_RESET, 0); 5017 iwl_write32(priv, CSR_RESET, 0);
6259} 5018}
6260 5019
6261 5020
@@ -6264,12 +5023,12 @@ static void iwl4965_nic_start(struct iwl4965_priv *priv)
6264 * 5023 *
6265 * Copy into buffers for card to fetch via bus-mastering 5024 * Copy into buffers for card to fetch via bus-mastering
6266 */ 5025 */
6267static int iwl4965_read_ucode(struct iwl4965_priv *priv) 5026static int iwl4965_read_ucode(struct iwl_priv *priv)
6268{ 5027{
6269 struct iwl4965_ucode *ucode; 5028 struct iwl4965_ucode *ucode;
6270 int ret; 5029 int ret;
6271 const struct firmware *ucode_raw; 5030 const struct firmware *ucode_raw;
6272 const char *name = "iwlwifi-4965" IWL4965_UCODE_API ".ucode"; 5031 const char *name = priv->cfg->fw_name;
6273 u8 *src; 5032 u8 *src;
6274 size_t len; 5033 size_t len;
6275 u32 ver, inst_size, data_size, init_size, init_data_size, boot_size; 5034 u32 ver, inst_size, data_size, init_size, init_data_size, boot_size;
@@ -6465,7 +5224,7 @@ static int iwl4965_read_ucode(struct iwl4965_priv *priv)
6465 * We need to replace them to load runtime uCode inst and data, 5224 * We need to replace them to load runtime uCode inst and data,
6466 * and to save runtime data when powering down. 5225 * and to save runtime data when powering down.
6467 */ 5226 */
6468static int iwl4965_set_ucode_ptrs(struct iwl4965_priv *priv) 5227static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
6469{ 5228{
6470 dma_addr_t pinst; 5229 dma_addr_t pinst;
6471 dma_addr_t pdata; 5230 dma_addr_t pdata;
@@ -6477,24 +5236,24 @@ static int iwl4965_set_ucode_ptrs(struct iwl4965_priv *priv)
6477 pdata = priv->ucode_data_backup.p_addr >> 4; 5236 pdata = priv->ucode_data_backup.p_addr >> 4;
6478 5237
6479 spin_lock_irqsave(&priv->lock, flags); 5238 spin_lock_irqsave(&priv->lock, flags);
6480 rc = iwl4965_grab_nic_access(priv); 5239 rc = iwl_grab_nic_access(priv);
6481 if (rc) { 5240 if (rc) {
6482 spin_unlock_irqrestore(&priv->lock, flags); 5241 spin_unlock_irqrestore(&priv->lock, flags);
6483 return rc; 5242 return rc;
6484 } 5243 }
6485 5244
6486 /* Tell bootstrap uCode where to find image to load */ 5245 /* Tell bootstrap uCode where to find image to load */
6487 iwl4965_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst); 5246 iwl_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
6488 iwl4965_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata); 5247 iwl_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
6489 iwl4965_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, 5248 iwl_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
6490 priv->ucode_data.len); 5249 priv->ucode_data.len);
6491 5250
6492 /* Inst bytecount must be last to set up, bit 31 signals uCode 5251 /* Inst bytecount must be last to set up, bit 31 signals uCode
6493 * that all new ptr/size info is in place */ 5252 * that all new ptr/size info is in place */
6494 iwl4965_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, 5253 iwl_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
6495 priv->ucode_code.len | BSM_DRAM_INST_LOAD); 5254 priv->ucode_code.len | BSM_DRAM_INST_LOAD);
6496 5255
6497 iwl4965_release_nic_access(priv); 5256 iwl_release_nic_access(priv);
6498 5257
6499 spin_unlock_irqrestore(&priv->lock, flags); 5258 spin_unlock_irqrestore(&priv->lock, flags);
6500 5259
@@ -6514,7 +5273,7 @@ static int iwl4965_set_ucode_ptrs(struct iwl4965_priv *priv)
6514 * 5273 *
6515 * Tell "initialize" uCode to go ahead and load the runtime uCode. 5274 * Tell "initialize" uCode to go ahead and load the runtime uCode.
6516*/ 5275*/
6517static void iwl4965_init_alive_start(struct iwl4965_priv *priv) 5276static void iwl4965_init_alive_start(struct iwl_priv *priv)
6518{ 5277{
6519 /* Check alive response for "valid" sign from uCode */ 5278 /* Check alive response for "valid" sign from uCode */
6520 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) { 5279 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
@@ -6559,9 +5318,9 @@ static void iwl4965_init_alive_start(struct iwl4965_priv *priv)
6559 * from protocol/runtime uCode (initialization uCode's 5318 * from protocol/runtime uCode (initialization uCode's
6560 * Alive gets handled by iwl4965_init_alive_start()). 5319 * Alive gets handled by iwl4965_init_alive_start()).
6561 */ 5320 */
6562static void iwl4965_alive_start(struct iwl4965_priv *priv) 5321static void iwl4965_alive_start(struct iwl_priv *priv)
6563{ 5322{
6564 int rc = 0; 5323 int ret = 0;
6565 5324
6566 IWL_DEBUG_INFO("Runtime Alive received.\n"); 5325 IWL_DEBUG_INFO("Runtime Alive received.\n");
6567 5326
@@ -6582,12 +5341,12 @@ static void iwl4965_alive_start(struct iwl4965_priv *priv)
6582 goto restart; 5341 goto restart;
6583 } 5342 }
6584 5343
6585 iwl4965_clear_stations_table(priv); 5344 iwlcore_clear_stations_table(priv);
6586 5345
6587 rc = iwl4965_alive_notify(priv); 5346 ret = priv->cfg->ops->lib->alive_notify(priv);
6588 if (rc) { 5347 if (ret) {
6589 IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n", 5348 IWL_WARNING("Could not complete ALIVE transition [ntf]: %d\n",
6590 rc); 5349 ret);
6591 goto restart; 5350 goto restart;
6592 } 5351 }
6593 5352
@@ -6597,7 +5356,7 @@ static void iwl4965_alive_start(struct iwl4965_priv *priv)
6597 /* Clear out the uCode error bit if it is set */ 5356 /* Clear out the uCode error bit if it is set */
6598 clear_bit(STATUS_FW_ERROR, &priv->status); 5357 clear_bit(STATUS_FW_ERROR, &priv->status);
6599 5358
6600 if (iwl4965_is_rfkill(priv)) 5359 if (iwl_is_rfkill(priv))
6601 return; 5360 return;
6602 5361
6603 ieee80211_start_queues(priv->hw); 5362 ieee80211_start_queues(priv->hw);
@@ -6607,7 +5366,7 @@ static void iwl4965_alive_start(struct iwl4965_priv *priv)
6607 5366
6608 iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode)); 5367 iwl4965_send_power_mode(priv, IWL_POWER_LEVEL(priv->power_mode));
6609 5368
6610 if (iwl4965_is_associated(priv)) { 5369 if (iwl_is_associated(priv)) {
6611 struct iwl4965_rxon_cmd *active_rxon = 5370 struct iwl4965_rxon_cmd *active_rxon =
6612 (struct iwl4965_rxon_cmd *)(&priv->active_rxon); 5371 (struct iwl4965_rxon_cmd *)(&priv->active_rxon);
6613 5372
@@ -6631,6 +5390,8 @@ static void iwl4965_alive_start(struct iwl4965_priv *priv)
6631 5390
6632 iwl4965_rf_kill_ct_config(priv); 5391 iwl4965_rf_kill_ct_config(priv);
6633 5392
5393 iwl_leds_register(priv);
5394
6634 IWL_DEBUG_INFO("ALIVE processing complete.\n"); 5395 IWL_DEBUG_INFO("ALIVE processing complete.\n");
6635 set_bit(STATUS_READY, &priv->status); 5396 set_bit(STATUS_READY, &priv->status);
6636 wake_up_interruptible(&priv->wait_command_queue); 5397 wake_up_interruptible(&priv->wait_command_queue);
@@ -6638,15 +5399,17 @@ static void iwl4965_alive_start(struct iwl4965_priv *priv)
6638 if (priv->error_recovering) 5399 if (priv->error_recovering)
6639 iwl4965_error_recovery(priv); 5400 iwl4965_error_recovery(priv);
6640 5401
5402 iwlcore_low_level_notify(priv, IWLCORE_START_EVT);
5403 ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
6641 return; 5404 return;
6642 5405
6643 restart: 5406 restart:
6644 queue_work(priv->workqueue, &priv->restart); 5407 queue_work(priv->workqueue, &priv->restart);
6645} 5408}
6646 5409
6647static void iwl4965_cancel_deferred_work(struct iwl4965_priv *priv); 5410static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
6648 5411
6649static void __iwl4965_down(struct iwl4965_priv *priv) 5412static void __iwl4965_down(struct iwl_priv *priv)
6650{ 5413{
6651 unsigned long flags; 5414 unsigned long flags;
6652 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status); 5415 int exit_pending = test_bit(STATUS_EXIT_PENDING, &priv->status);
@@ -6659,7 +5422,11 @@ static void __iwl4965_down(struct iwl4965_priv *priv)
6659 if (!exit_pending) 5422 if (!exit_pending)
6660 set_bit(STATUS_EXIT_PENDING, &priv->status); 5423 set_bit(STATUS_EXIT_PENDING, &priv->status);
6661 5424
6662 iwl4965_clear_stations_table(priv); 5425 iwl_leds_unregister(priv);
5426
5427 iwlcore_low_level_notify(priv, IWLCORE_STOP_EVT);
5428
5429 iwlcore_clear_stations_table(priv);
6663 5430
6664 /* Unblock any waiting calls */ 5431 /* Unblock any waiting calls */
6665 wake_up_interruptible_all(&priv->wait_command_queue); 5432 wake_up_interruptible_all(&priv->wait_command_queue);
@@ -6670,17 +5437,20 @@ static void __iwl4965_down(struct iwl4965_priv *priv)
6670 clear_bit(STATUS_EXIT_PENDING, &priv->status); 5437 clear_bit(STATUS_EXIT_PENDING, &priv->status);
6671 5438
6672 /* stop and reset the on-board processor */ 5439 /* stop and reset the on-board processor */
6673 iwl4965_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 5440 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
6674 5441
6675 /* tell the device to stop sending interrupts */ 5442 /* tell the device to stop sending interrupts */
5443 spin_lock_irqsave(&priv->lock, flags);
6676 iwl4965_disable_interrupts(priv); 5444 iwl4965_disable_interrupts(priv);
5445 spin_unlock_irqrestore(&priv->lock, flags);
5446 iwl_synchronize_irq(priv);
6677 5447
6678 if (priv->mac80211_registered) 5448 if (priv->mac80211_registered)
6679 ieee80211_stop_queues(priv->hw); 5449 ieee80211_stop_queues(priv->hw);
6680 5450
6681 /* If we have not previously called iwl4965_init() then 5451 /* If we have not previously called iwl4965_init() then
6682 * clear all bits but the RF Kill and SUSPEND bits and return */ 5452 * clear all bits but the RF Kill and SUSPEND bits and return */
6683 if (!iwl4965_is_init(priv)) { 5453 if (!iwl_is_init(priv)) {
6684 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) << 5454 priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
6685 STATUS_RF_KILL_HW | 5455 STATUS_RF_KILL_HW |
6686 test_bit(STATUS_RF_KILL_SW, &priv->status) << 5456 test_bit(STATUS_RF_KILL_SW, &priv->status) <<
@@ -6706,7 +5476,7 @@ static void __iwl4965_down(struct iwl4965_priv *priv)
6706 STATUS_FW_ERROR; 5476 STATUS_FW_ERROR;
6707 5477
6708 spin_lock_irqsave(&priv->lock, flags); 5478 spin_lock_irqsave(&priv->lock, flags);
6709 iwl4965_clear_bit(priv, CSR_GP_CNTRL, 5479 iwl_clear_bit(priv, CSR_GP_CNTRL,
6710 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 5480 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
6711 spin_unlock_irqrestore(&priv->lock, flags); 5481 spin_unlock_irqrestore(&priv->lock, flags);
6712 5482
@@ -6714,17 +5484,17 @@ static void __iwl4965_down(struct iwl4965_priv *priv)
6714 iwl4965_hw_rxq_stop(priv); 5484 iwl4965_hw_rxq_stop(priv);
6715 5485
6716 spin_lock_irqsave(&priv->lock, flags); 5486 spin_lock_irqsave(&priv->lock, flags);
6717 if (!iwl4965_grab_nic_access(priv)) { 5487 if (!iwl_grab_nic_access(priv)) {
6718 iwl4965_write_prph(priv, APMG_CLK_DIS_REG, 5488 iwl_write_prph(priv, APMG_CLK_DIS_REG,
6719 APMG_CLK_VAL_DMA_CLK_RQT); 5489 APMG_CLK_VAL_DMA_CLK_RQT);
6720 iwl4965_release_nic_access(priv); 5490 iwl_release_nic_access(priv);
6721 } 5491 }
6722 spin_unlock_irqrestore(&priv->lock, flags); 5492 spin_unlock_irqrestore(&priv->lock, flags);
6723 5493
6724 udelay(5); 5494 udelay(5);
6725 5495
6726 iwl4965_hw_nic_stop_master(priv); 5496 iwl4965_hw_nic_stop_master(priv);
6727 iwl4965_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET); 5497 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
6728 iwl4965_hw_nic_reset(priv); 5498 iwl4965_hw_nic_reset(priv);
6729 5499
6730 exit: 5500 exit:
@@ -6738,7 +5508,7 @@ static void __iwl4965_down(struct iwl4965_priv *priv)
6738 iwl4965_clear_free_frames(priv); 5508 iwl4965_clear_free_frames(priv);
6739} 5509}
6740 5510
6741static void iwl4965_down(struct iwl4965_priv *priv) 5511static void iwl4965_down(struct iwl_priv *priv)
6742{ 5512{
6743 mutex_lock(&priv->mutex); 5513 mutex_lock(&priv->mutex);
6744 __iwl4965_down(priv); 5514 __iwl4965_down(priv);
@@ -6749,9 +5519,10 @@ static void iwl4965_down(struct iwl4965_priv *priv)
6749 5519
6750#define MAX_HW_RESTARTS 5 5520#define MAX_HW_RESTARTS 5
6751 5521
6752static int __iwl4965_up(struct iwl4965_priv *priv) 5522static int __iwl4965_up(struct iwl_priv *priv)
6753{ 5523{
6754 int rc, i; 5524 int i;
5525 int ret;
6755 5526
6756 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 5527 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
6757 IWL_WARNING("Exit pending; will not bring the NIC up\n"); 5528 IWL_WARNING("Exit pending; will not bring the NIC up\n");
@@ -6761,6 +5532,7 @@ static int __iwl4965_up(struct iwl4965_priv *priv)
6761 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) { 5532 if (test_bit(STATUS_RF_KILL_SW, &priv->status)) {
6762 IWL_WARNING("Radio disabled by SW RF kill (module " 5533 IWL_WARNING("Radio disabled by SW RF kill (module "
6763 "parameter)\n"); 5534 "parameter)\n");
5535 iwl_rfkill_set_hw_state(priv);
6764 return -ENODEV; 5536 return -ENODEV;
6765 } 5537 }
6766 5538
@@ -6770,37 +5542,39 @@ static int __iwl4965_up(struct iwl4965_priv *priv)
6770 } 5542 }
6771 5543
6772 /* If platform's RF_KILL switch is NOT set to KILL */ 5544 /* If platform's RF_KILL switch is NOT set to KILL */
6773 if (iwl4965_read32(priv, CSR_GP_CNTRL) & 5545 if (iwl_read32(priv, CSR_GP_CNTRL) &
6774 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 5546 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
6775 clear_bit(STATUS_RF_KILL_HW, &priv->status); 5547 clear_bit(STATUS_RF_KILL_HW, &priv->status);
6776 else { 5548 else {
6777 set_bit(STATUS_RF_KILL_HW, &priv->status); 5549 set_bit(STATUS_RF_KILL_HW, &priv->status);
6778 if (!test_bit(STATUS_IN_SUSPEND, &priv->status)) { 5550 if (!test_bit(STATUS_IN_SUSPEND, &priv->status)) {
5551 iwl_rfkill_set_hw_state(priv);
6779 IWL_WARNING("Radio disabled by HW RF Kill switch\n"); 5552 IWL_WARNING("Radio disabled by HW RF Kill switch\n");
6780 return -ENODEV; 5553 return -ENODEV;
6781 } 5554 }
6782 } 5555 }
6783 5556
6784 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF); 5557 iwl_rfkill_set_hw_state(priv);
5558 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
6785 5559
6786 rc = iwl4965_hw_nic_init(priv); 5560 ret = priv->cfg->ops->lib->hw_nic_init(priv);
6787 if (rc) { 5561 if (ret) {
6788 IWL_ERROR("Unable to int nic\n"); 5562 IWL_ERROR("Unable to init nic\n");
6789 return rc; 5563 return ret;
6790 } 5564 }
6791 5565
6792 /* make sure rfkill handshake bits are cleared */ 5566 /* make sure rfkill handshake bits are cleared */
6793 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 5567 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6794 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, 5568 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
6795 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED); 5569 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
6796 5570
6797 /* clear (again), then enable host interrupts */ 5571 /* clear (again), then enable host interrupts */
6798 iwl4965_write32(priv, CSR_INT, 0xFFFFFFFF); 5572 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
6799 iwl4965_enable_interrupts(priv); 5573 iwl4965_enable_interrupts(priv);
6800 5574
6801 /* really make sure rfkill handshake bits are cleared */ 5575 /* really make sure rfkill handshake bits are cleared */
6802 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 5576 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6803 iwl4965_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL); 5577 iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
6804 5578
6805 /* Copy original ucode data image from disk into backup cache. 5579 /* Copy original ucode data image from disk into backup cache.
6806 * This will be used to initialize the on-board processor's 5580 * This will be used to initialize the on-board processor's
@@ -6814,15 +5588,15 @@ static int __iwl4965_up(struct iwl4965_priv *priv)
6814 5588
6815 for (i = 0; i < MAX_HW_RESTARTS; i++) { 5589 for (i = 0; i < MAX_HW_RESTARTS; i++) {
6816 5590
6817 iwl4965_clear_stations_table(priv); 5591 iwlcore_clear_stations_table(priv);
6818 5592
6819 /* load bootstrap state machine, 5593 /* load bootstrap state machine,
6820 * load bootstrap program into processor's memory, 5594 * load bootstrap program into processor's memory,
6821 * prepare to load the "initialize" uCode */ 5595 * prepare to load the "initialize" uCode */
6822 rc = iwl4965_load_bsm(priv); 5596 ret = priv->cfg->ops->lib->load_ucode(priv);
6823 5597
6824 if (rc) { 5598 if (ret) {
6825 IWL_ERROR("Unable to set up bootstrap uCode: %d\n", rc); 5599 IWL_ERROR("Unable to set up bootstrap uCode: %d\n", ret);
6826 continue; 5600 continue;
6827 } 5601 }
6828 5602
@@ -6852,8 +5626,8 @@ static int __iwl4965_up(struct iwl4965_priv *priv)
6852 5626
6853static void iwl4965_bg_init_alive_start(struct work_struct *data) 5627static void iwl4965_bg_init_alive_start(struct work_struct *data)
6854{ 5628{
6855 struct iwl4965_priv *priv = 5629 struct iwl_priv *priv =
6856 container_of(data, struct iwl4965_priv, init_alive_start.work); 5630 container_of(data, struct iwl_priv, init_alive_start.work);
6857 5631
6858 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 5632 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6859 return; 5633 return;
@@ -6865,8 +5639,8 @@ static void iwl4965_bg_init_alive_start(struct work_struct *data)
6865 5639
6866static void iwl4965_bg_alive_start(struct work_struct *data) 5640static void iwl4965_bg_alive_start(struct work_struct *data)
6867{ 5641{
6868 struct iwl4965_priv *priv = 5642 struct iwl_priv *priv =
6869 container_of(data, struct iwl4965_priv, alive_start.work); 5643 container_of(data, struct iwl_priv, alive_start.work);
6870 5644
6871 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 5645 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6872 return; 5646 return;
@@ -6878,7 +5652,7 @@ static void iwl4965_bg_alive_start(struct work_struct *data)
6878 5652
6879static void iwl4965_bg_rf_kill(struct work_struct *work) 5653static void iwl4965_bg_rf_kill(struct work_struct *work)
6880{ 5654{
6881 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv, rf_kill); 5655 struct iwl_priv *priv = container_of(work, struct iwl_priv, rf_kill);
6882 5656
6883 wake_up_interruptible(&priv->wait_command_queue); 5657 wake_up_interruptible(&priv->wait_command_queue);
6884 5658
@@ -6887,13 +5661,16 @@ static void iwl4965_bg_rf_kill(struct work_struct *work)
6887 5661
6888 mutex_lock(&priv->mutex); 5662 mutex_lock(&priv->mutex);
6889 5663
6890 if (!iwl4965_is_rfkill(priv)) { 5664 if (!iwl_is_rfkill(priv)) {
6891 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL, 5665 IWL_DEBUG(IWL_DL_INFO | IWL_DL_RF_KILL,
6892 "HW and/or SW RF Kill no longer active, restarting " 5666 "HW and/or SW RF Kill no longer active, restarting "
6893 "device\n"); 5667 "device\n");
6894 if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) 5668 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
6895 queue_work(priv->workqueue, &priv->restart); 5669 queue_work(priv->workqueue, &priv->restart);
6896 } else { 5670 } else {
5671 /* make sure mac80211 stop sending Tx frame */
5672 if (priv->mac80211_registered)
5673 ieee80211_stop_queues(priv->hw);
6897 5674
6898 if (!test_bit(STATUS_RF_KILL_HW, &priv->status)) 5675 if (!test_bit(STATUS_RF_KILL_HW, &priv->status))
6899 IWL_DEBUG_RF_KILL("Can not turn radio back on - " 5676 IWL_DEBUG_RF_KILL("Can not turn radio back on - "
@@ -6903,6 +5680,8 @@ static void iwl4965_bg_rf_kill(struct work_struct *work)
6903 "Kill switch must be turned off for " 5680 "Kill switch must be turned off for "
6904 "wireless networking to work.\n"); 5681 "wireless networking to work.\n");
6905 } 5682 }
5683 iwl_rfkill_set_hw_state(priv);
5684
6906 mutex_unlock(&priv->mutex); 5685 mutex_unlock(&priv->mutex);
6907} 5686}
6908 5687
@@ -6910,8 +5689,8 @@ static void iwl4965_bg_rf_kill(struct work_struct *work)
6910 5689
6911static void iwl4965_bg_scan_check(struct work_struct *data) 5690static void iwl4965_bg_scan_check(struct work_struct *data)
6912{ 5691{
6913 struct iwl4965_priv *priv = 5692 struct iwl_priv *priv =
6914 container_of(data, struct iwl4965_priv, scan_check.work); 5693 container_of(data, struct iwl_priv, scan_check.work);
6915 5694
6916 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 5695 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
6917 return; 5696 return;
@@ -6931,24 +5710,25 @@ static void iwl4965_bg_scan_check(struct work_struct *data)
6931 5710
6932static void iwl4965_bg_request_scan(struct work_struct *data) 5711static void iwl4965_bg_request_scan(struct work_struct *data)
6933{ 5712{
6934 struct iwl4965_priv *priv = 5713 struct iwl_priv *priv =
6935 container_of(data, struct iwl4965_priv, request_scan); 5714 container_of(data, struct iwl_priv, request_scan);
6936 struct iwl4965_host_cmd cmd = { 5715 struct iwl_host_cmd cmd = {
6937 .id = REPLY_SCAN_CMD, 5716 .id = REPLY_SCAN_CMD,
6938 .len = sizeof(struct iwl4965_scan_cmd), 5717 .len = sizeof(struct iwl4965_scan_cmd),
6939 .meta.flags = CMD_SIZE_HUGE, 5718 .meta.flags = CMD_SIZE_HUGE,
6940 }; 5719 };
6941 int rc = 0;
6942 struct iwl4965_scan_cmd *scan; 5720 struct iwl4965_scan_cmd *scan;
6943 struct ieee80211_conf *conf = NULL; 5721 struct ieee80211_conf *conf = NULL;
5722 u16 cmd_len;
5723 enum ieee80211_band band;
6944 u8 direct_mask; 5724 u8 direct_mask;
6945 int phymode; 5725 int ret = 0;
6946 5726
6947 conf = ieee80211_get_hw_conf(priv->hw); 5727 conf = ieee80211_get_hw_conf(priv->hw);
6948 5728
6949 mutex_lock(&priv->mutex); 5729 mutex_lock(&priv->mutex);
6950 5730
6951 if (!iwl4965_is_ready(priv)) { 5731 if (!iwl_is_ready(priv)) {
6952 IWL_WARNING("request scan called when driver not ready.\n"); 5732 IWL_WARNING("request scan called when driver not ready.\n");
6953 goto done; 5733 goto done;
6954 } 5734 }
@@ -6963,7 +5743,7 @@ static void iwl4965_bg_request_scan(struct work_struct *data)
6963 if (test_bit(STATUS_SCAN_HW, &priv->status)) { 5743 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
6964 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. " 5744 IWL_DEBUG_INFO("Multiple concurrent scan requests in parallel. "
6965 "Ignoring second request.\n"); 5745 "Ignoring second request.\n");
6966 rc = -EIO; 5746 ret = -EIO;
6967 goto done; 5747 goto done;
6968 } 5748 }
6969 5749
@@ -6977,7 +5757,7 @@ static void iwl4965_bg_request_scan(struct work_struct *data)
6977 goto done; 5757 goto done;
6978 } 5758 }
6979 5759
6980 if (iwl4965_is_rfkill(priv)) { 5760 if (iwl_is_rfkill(priv)) {
6981 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n"); 5761 IWL_DEBUG_HC("Aborting scan due to RF Kill activation\n");
6982 goto done; 5762 goto done;
6983 } 5763 }
@@ -6996,7 +5776,7 @@ static void iwl4965_bg_request_scan(struct work_struct *data)
6996 priv->scan = kmalloc(sizeof(struct iwl4965_scan_cmd) + 5776 priv->scan = kmalloc(sizeof(struct iwl4965_scan_cmd) +
6997 IWL_MAX_SCAN_SIZE, GFP_KERNEL); 5777 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
6998 if (!priv->scan) { 5778 if (!priv->scan) {
6999 rc = -ENOMEM; 5779 ret = -ENOMEM;
7000 goto done; 5780 goto done;
7001 } 5781 }
7002 } 5782 }
@@ -7006,7 +5786,7 @@ static void iwl4965_bg_request_scan(struct work_struct *data)
7006 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; 5786 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
7007 scan->quiet_time = IWL_ACTIVE_QUIET_TIME; 5787 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
7008 5788
7009 if (iwl4965_is_associated(priv)) { 5789 if (iwl_is_associated(priv)) {
7010 u16 interval = 0; 5790 u16 interval = 0;
7011 u32 extra; 5791 u32 extra;
7012 u32 suspend_time = 100; 5792 u32 suspend_time = 100;
@@ -7043,26 +5823,19 @@ static void iwl4965_bg_request_scan(struct work_struct *data)
7043 memcpy(scan->direct_scan[0].ssid, 5823 memcpy(scan->direct_scan[0].ssid,
7044 priv->direct_ssid, priv->direct_ssid_len); 5824 priv->direct_ssid, priv->direct_ssid_len);
7045 direct_mask = 1; 5825 direct_mask = 1;
7046 } else if (!iwl4965_is_associated(priv) && priv->essid_len) { 5826 } else if (!iwl_is_associated(priv) && priv->essid_len) {
7047 scan->direct_scan[0].id = WLAN_EID_SSID; 5827 scan->direct_scan[0].id = WLAN_EID_SSID;
7048 scan->direct_scan[0].len = priv->essid_len; 5828 scan->direct_scan[0].len = priv->essid_len;
7049 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len); 5829 memcpy(scan->direct_scan[0].ssid, priv->essid, priv->essid_len);
7050 direct_mask = 1; 5830 direct_mask = 1;
7051 } else 5831 } else {
7052 direct_mask = 0; 5832 direct_mask = 0;
5833 }
7053 5834
7054 /* We don't build a direct scan probe request; the uCode will do
7055 * that based on the direct_mask added to each channel entry */
7056 scan->tx_cmd.len = cpu_to_le16(
7057 iwl4965_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
7058 IWL_MAX_SCAN_SIZE - sizeof(*scan), 0));
7059 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK; 5835 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
7060 scan->tx_cmd.sta_id = priv->hw_setting.bcast_sta_id; 5836 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
7061 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 5837 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
7062 5838
7063 /* flags + rate selection */
7064
7065 scan->tx_cmd.tx_flags |= cpu_to_le32(0x200);
7066 5839
7067 switch (priv->scan_bands) { 5840 switch (priv->scan_bands) {
7068 case 2: 5841 case 2:
@@ -7072,7 +5845,7 @@ static void iwl4965_bg_request_scan(struct work_struct *data)
7072 RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK); 5845 RATE_MCS_ANT_B_MSK|RATE_MCS_CCK_MSK);
7073 5846
7074 scan->good_CRC_th = 0; 5847 scan->good_CRC_th = 0;
7075 phymode = MODE_IEEE80211G; 5848 band = IEEE80211_BAND_2GHZ;
7076 break; 5849 break;
7077 5850
7078 case 1: 5851 case 1:
@@ -7080,7 +5853,7 @@ static void iwl4965_bg_request_scan(struct work_struct *data)
7080 iwl4965_hw_set_rate_n_flags(IWL_RATE_6M_PLCP, 5853 iwl4965_hw_set_rate_n_flags(IWL_RATE_6M_PLCP,
7081 RATE_MCS_ANT_B_MSK); 5854 RATE_MCS_ANT_B_MSK);
7082 scan->good_CRC_th = IWL_GOOD_CRC_TH; 5855 scan->good_CRC_th = IWL_GOOD_CRC_TH;
7083 phymode = MODE_IEEE80211A; 5856 band = IEEE80211_BAND_5GHZ;
7084 break; 5857 break;
7085 5858
7086 default: 5859 default:
@@ -7088,6 +5861,13 @@ static void iwl4965_bg_request_scan(struct work_struct *data)
7088 goto done; 5861 goto done;
7089 } 5862 }
7090 5863
5864 /* We don't build a direct scan probe request; the uCode will do
5865 * that based on the direct_mask added to each channel entry */
5866 cmd_len = iwl4965_fill_probe_req(priv, band,
5867 (struct ieee80211_mgmt *)scan->data,
5868 IWL_MAX_SCAN_SIZE - sizeof(*scan), 0);
5869
5870 scan->tx_cmd.len = cpu_to_le16(cmd_len);
7091 /* select Rx chains */ 5871 /* select Rx chains */
7092 5872
7093 /* Force use of chains B and C (0x6) for scan Rx. 5873 /* Force use of chains B and C (0x6) for scan Rx.
@@ -7101,18 +5881,23 @@ static void iwl4965_bg_request_scan(struct work_struct *data)
7101 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR) 5881 if (priv->iw_mode == IEEE80211_IF_TYPE_MNTR)
7102 scan->filter_flags = RXON_FILTER_PROMISC_MSK; 5882 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
7103 5883
7104 if (direct_mask) 5884 if (direct_mask) {
7105 IWL_DEBUG_SCAN 5885 IWL_DEBUG_SCAN
7106 ("Initiating direct scan for %s.\n", 5886 ("Initiating direct scan for %s.\n",
7107 iwl4965_escape_essid(priv->essid, priv->essid_len)); 5887 iwl4965_escape_essid(priv->essid, priv->essid_len));
7108 else 5888 scan->channel_count =
5889 iwl4965_get_channels_for_scan(
5890 priv, band, 1, /* active */
5891 direct_mask,
5892 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
5893 } else {
7109 IWL_DEBUG_SCAN("Initiating indirect scan.\n"); 5894 IWL_DEBUG_SCAN("Initiating indirect scan.\n");
7110 5895 scan->channel_count =
7111 scan->channel_count = 5896 iwl4965_get_channels_for_scan(
7112 iwl4965_get_channels_for_scan( 5897 priv, band, 0, /* passive */
7113 priv, phymode, 1, /* active */ 5898 direct_mask,
7114 direct_mask, 5899 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
7115 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); 5900 }
7116 5901
7117 cmd.len += le16_to_cpu(scan->tx_cmd.len) + 5902 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
7118 scan->channel_count * sizeof(struct iwl4965_scan_channel); 5903 scan->channel_count * sizeof(struct iwl4965_scan_channel);
@@ -7120,8 +5905,8 @@ static void iwl4965_bg_request_scan(struct work_struct *data)
7120 scan->len = cpu_to_le16(cmd.len); 5905 scan->len = cpu_to_le16(cmd.len);
7121 5906
7122 set_bit(STATUS_SCAN_HW, &priv->status); 5907 set_bit(STATUS_SCAN_HW, &priv->status);
7123 rc = iwl4965_send_cmd_sync(priv, &cmd); 5908 ret = iwl_send_cmd_sync(priv, &cmd);
7124 if (rc) 5909 if (ret)
7125 goto done; 5910 goto done;
7126 5911
7127 queue_delayed_work(priv->workqueue, &priv->scan_check, 5912 queue_delayed_work(priv->workqueue, &priv->scan_check,
@@ -7138,7 +5923,7 @@ static void iwl4965_bg_request_scan(struct work_struct *data)
7138 5923
7139static void iwl4965_bg_up(struct work_struct *data) 5924static void iwl4965_bg_up(struct work_struct *data)
7140{ 5925{
7141 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv, up); 5926 struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
7142 5927
7143 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 5928 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7144 return; 5929 return;
@@ -7150,7 +5935,7 @@ static void iwl4965_bg_up(struct work_struct *data)
7150 5935
7151static void iwl4965_bg_restart(struct work_struct *data) 5936static void iwl4965_bg_restart(struct work_struct *data)
7152{ 5937{
7153 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv, restart); 5938 struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
7154 5939
7155 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 5940 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7156 return; 5941 return;
@@ -7161,8 +5946,8 @@ static void iwl4965_bg_restart(struct work_struct *data)
7161 5946
7162static void iwl4965_bg_rx_replenish(struct work_struct *data) 5947static void iwl4965_bg_rx_replenish(struct work_struct *data)
7163{ 5948{
7164 struct iwl4965_priv *priv = 5949 struct iwl_priv *priv =
7165 container_of(data, struct iwl4965_priv, rx_replenish); 5950 container_of(data, struct iwl_priv, rx_replenish);
7166 5951
7167 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 5952 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7168 return; 5953 return;
@@ -7174,13 +5959,10 @@ static void iwl4965_bg_rx_replenish(struct work_struct *data)
7174 5959
7175#define IWL_DELAY_NEXT_SCAN (HZ*2) 5960#define IWL_DELAY_NEXT_SCAN (HZ*2)
7176 5961
7177static void iwl4965_bg_post_associate(struct work_struct *data) 5962static void iwl4965_post_associate(struct iwl_priv *priv)
7178{ 5963{
7179 struct iwl4965_priv *priv = container_of(data, struct iwl4965_priv,
7180 post_associate.work);
7181
7182 int rc = 0;
7183 struct ieee80211_conf *conf = NULL; 5964 struct ieee80211_conf *conf = NULL;
5965 int ret = 0;
7184 DECLARE_MAC_BUF(mac); 5966 DECLARE_MAC_BUF(mac);
7185 5967
7186 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 5968 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
@@ -7196,12 +5978,10 @@ static void iwl4965_bg_post_associate(struct work_struct *data)
7196 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 5978 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7197 return; 5979 return;
7198 5980
7199 mutex_lock(&priv->mutex);
7200 5981
7201 if (!priv->vif || !priv->is_open) { 5982 if (!priv->vif || !priv->is_open)
7202 mutex_unlock(&priv->mutex);
7203 return; 5983 return;
7204 } 5984
7205 iwl4965_scan_cancel_timeout(priv, 200); 5985 iwl4965_scan_cancel_timeout(priv, 200);
7206 5986
7207 conf = ieee80211_get_hw_conf(priv->hw); 5987 conf = ieee80211_get_hw_conf(priv->hw);
@@ -7211,9 +5991,9 @@ static void iwl4965_bg_post_associate(struct work_struct *data)
7211 5991
7212 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd)); 5992 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd));
7213 iwl4965_setup_rxon_timing(priv); 5993 iwl4965_setup_rxon_timing(priv);
7214 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON_TIMING, 5994 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
7215 sizeof(priv->rxon_timing), &priv->rxon_timing); 5995 sizeof(priv->rxon_timing), &priv->rxon_timing);
7216 if (rc) 5996 if (ret)
7217 IWL_WARNING("REPLY_RXON_TIMING failed - " 5997 IWL_WARNING("REPLY_RXON_TIMING failed - "
7218 "Attempting to continue.\n"); 5998 "Attempting to continue.\n");
7219 5999
@@ -7255,7 +6035,7 @@ static void iwl4965_bg_post_associate(struct work_struct *data)
7255 case IEEE80211_IF_TYPE_IBSS: 6035 case IEEE80211_IF_TYPE_IBSS:
7256 6036
7257 /* clear out the station table */ 6037 /* clear out the station table */
7258 iwl4965_clear_stations_table(priv); 6038 iwlcore_clear_stations_table(priv);
7259 6039
7260 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0); 6040 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0);
7261 iwl4965_rxon_add_station(priv, priv->bssid, 0); 6041 iwl4965_rxon_add_station(priv, priv->bssid, 0);
@@ -7281,19 +6061,29 @@ static void iwl4965_bg_post_associate(struct work_struct *data)
7281 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS) 6061 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
7282 priv->assoc_station_added = 1; 6062 priv->assoc_station_added = 1;
7283 6063
7284#ifdef CONFIG_IWL4965_QOS
7285 iwl4965_activate_qos(priv, 0); 6064 iwl4965_activate_qos(priv, 0);
7286#endif /* CONFIG_IWL4965_QOS */ 6065
7287 /* we have just associated, don't start scan too early */ 6066 /* we have just associated, don't start scan too early */
7288 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN; 6067 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
6068}
6069
6070
6071static void iwl4965_bg_post_associate(struct work_struct *data)
6072{
6073 struct iwl_priv *priv = container_of(data, struct iwl_priv,
6074 post_associate.work);
6075
6076 mutex_lock(&priv->mutex);
6077 iwl4965_post_associate(priv);
7289 mutex_unlock(&priv->mutex); 6078 mutex_unlock(&priv->mutex);
6079
7290} 6080}
7291 6081
7292static void iwl4965_bg_abort_scan(struct work_struct *work) 6082static void iwl4965_bg_abort_scan(struct work_struct *work)
7293{ 6083{
7294 struct iwl4965_priv *priv = container_of(work, struct iwl4965_priv, abort_scan); 6084 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
7295 6085
7296 if (!iwl4965_is_ready(priv)) 6086 if (!iwl_is_ready(priv))
7297 return; 6087 return;
7298 6088
7299 mutex_lock(&priv->mutex); 6089 mutex_lock(&priv->mutex);
@@ -7308,8 +6098,8 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
7308 6098
7309static void iwl4965_bg_scan_completed(struct work_struct *work) 6099static void iwl4965_bg_scan_completed(struct work_struct *work)
7310{ 6100{
7311 struct iwl4965_priv *priv = 6101 struct iwl_priv *priv =
7312 container_of(work, struct iwl4965_priv, scan_completed); 6102 container_of(work, struct iwl_priv, scan_completed);
7313 6103
7314 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n"); 6104 IWL_DEBUG(IWL_DL_INFO | IWL_DL_SCAN, "SCAN complete scan\n");
7315 6105
@@ -7338,7 +6128,7 @@ static void iwl4965_bg_scan_completed(struct work_struct *work)
7338 6128
7339static int iwl4965_mac_start(struct ieee80211_hw *hw) 6129static int iwl4965_mac_start(struct ieee80211_hw *hw)
7340{ 6130{
7341 struct iwl4965_priv *priv = hw->priv; 6131 struct iwl_priv *priv = hw->priv;
7342 int ret; 6132 int ret;
7343 6133
7344 IWL_DEBUG_MAC80211("enter\n"); 6134 IWL_DEBUG_MAC80211("enter\n");
@@ -7415,7 +6205,7 @@ out_disable_msi:
7415 6205
7416static void iwl4965_mac_stop(struct ieee80211_hw *hw) 6206static void iwl4965_mac_stop(struct ieee80211_hw *hw)
7417{ 6207{
7418 struct iwl4965_priv *priv = hw->priv; 6208 struct iwl_priv *priv = hw->priv;
7419 6209
7420 IWL_DEBUG_MAC80211("enter\n"); 6210 IWL_DEBUG_MAC80211("enter\n");
7421 6211
@@ -7426,7 +6216,7 @@ static void iwl4965_mac_stop(struct ieee80211_hw *hw)
7426 6216
7427 priv->is_open = 0; 6217 priv->is_open = 0;
7428 6218
7429 if (iwl4965_is_ready_rf(priv)) { 6219 if (iwl_is_ready_rf(priv)) {
7430 /* stop mac, cancel any scan request and clear 6220 /* stop mac, cancel any scan request and clear
7431 * RXON_FILTER_ASSOC_MSK BIT 6221 * RXON_FILTER_ASSOC_MSK BIT
7432 */ 6222 */
@@ -7450,7 +6240,7 @@ static void iwl4965_mac_stop(struct ieee80211_hw *hw)
7450static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb, 6240static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
7451 struct ieee80211_tx_control *ctl) 6241 struct ieee80211_tx_control *ctl)
7452{ 6242{
7453 struct iwl4965_priv *priv = hw->priv; 6243 struct iwl_priv *priv = hw->priv;
7454 6244
7455 IWL_DEBUG_MAC80211("enter\n"); 6245 IWL_DEBUG_MAC80211("enter\n");
7456 6246
@@ -7460,7 +6250,7 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
7460 } 6250 }
7461 6251
7462 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 6252 IWL_DEBUG_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
7463 ctl->tx_rate); 6253 ctl->tx_rate->bitrate);
7464 6254
7465 if (iwl4965_tx_skb(priv, skb, ctl)) 6255 if (iwl4965_tx_skb(priv, skb, ctl))
7466 dev_kfree_skb_any(skb); 6256 dev_kfree_skb_any(skb);
@@ -7472,7 +6262,7 @@ static int iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
7472static int iwl4965_mac_add_interface(struct ieee80211_hw *hw, 6262static int iwl4965_mac_add_interface(struct ieee80211_hw *hw,
7473 struct ieee80211_if_init_conf *conf) 6263 struct ieee80211_if_init_conf *conf)
7474{ 6264{
7475 struct iwl4965_priv *priv = hw->priv; 6265 struct iwl_priv *priv = hw->priv;
7476 unsigned long flags; 6266 unsigned long flags;
7477 DECLARE_MAC_BUF(mac); 6267 DECLARE_MAC_BUF(mac);
7478 6268
@@ -7495,7 +6285,7 @@ static int iwl4965_mac_add_interface(struct ieee80211_hw *hw,
7495 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN); 6285 memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
7496 } 6286 }
7497 6287
7498 if (iwl4965_is_ready(priv)) 6288 if (iwl_is_ready(priv))
7499 iwl4965_set_mode(priv, conf->type); 6289 iwl4965_set_mode(priv, conf->type);
7500 6290
7501 mutex_unlock(&priv->mutex); 6291 mutex_unlock(&priv->mutex);
@@ -7513,23 +6303,23 @@ static int iwl4965_mac_add_interface(struct ieee80211_hw *hw,
7513 */ 6303 */
7514static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) 6304static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
7515{ 6305{
7516 struct iwl4965_priv *priv = hw->priv; 6306 struct iwl_priv *priv = hw->priv;
7517 const struct iwl4965_channel_info *ch_info; 6307 const struct iwl_channel_info *ch_info;
7518 unsigned long flags; 6308 unsigned long flags;
7519 int ret = 0; 6309 int ret = 0;
7520 6310
7521 mutex_lock(&priv->mutex); 6311 mutex_lock(&priv->mutex);
7522 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel); 6312 IWL_DEBUG_MAC80211("enter to channel %d\n", conf->channel->hw_value);
7523 6313
7524 priv->add_radiotap = !!(conf->flags & IEEE80211_CONF_RADIOTAP); 6314 priv->add_radiotap = !!(conf->flags & IEEE80211_CONF_RADIOTAP);
7525 6315
7526 if (!iwl4965_is_ready(priv)) { 6316 if (!iwl_is_ready(priv)) {
7527 IWL_DEBUG_MAC80211("leave - not ready\n"); 6317 IWL_DEBUG_MAC80211("leave - not ready\n");
7528 ret = -EIO; 6318 ret = -EIO;
7529 goto out; 6319 goto out;
7530 } 6320 }
7531 6321
7532 if (unlikely(!iwl4965_param_disable_hw_scan && 6322 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
7533 test_bit(STATUS_SCANNING, &priv->status))) { 6323 test_bit(STATUS_SCANNING, &priv->status))) {
7534 IWL_DEBUG_MAC80211("leave - scanning\n"); 6324 IWL_DEBUG_MAC80211("leave - scanning\n");
7535 set_bit(STATUS_CONF_PENDING, &priv->status); 6325 set_bit(STATUS_CONF_PENDING, &priv->status);
@@ -7539,10 +6329,9 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
7539 6329
7540 spin_lock_irqsave(&priv->lock, flags); 6330 spin_lock_irqsave(&priv->lock, flags);
7541 6331
7542 ch_info = iwl4965_get_channel_info(priv, conf->phymode, conf->channel); 6332 ch_info = iwl_get_channel_info(priv, conf->channel->band,
6333 ieee80211_frequency_to_channel(conf->channel->center_freq));
7543 if (!is_channel_valid(ch_info)) { 6334 if (!is_channel_valid(ch_info)) {
7544 IWL_DEBUG_SCAN("Channel %d [%d] is INVALID for this SKU.\n",
7545 conf->channel, conf->phymode);
7546 IWL_DEBUG_MAC80211("leave - invalid channel\n"); 6335 IWL_DEBUG_MAC80211("leave - invalid channel\n");
7547 spin_unlock_irqrestore(&priv->lock, flags); 6336 spin_unlock_irqrestore(&priv->lock, flags);
7548 ret = -EINVAL; 6337 ret = -EINVAL;
@@ -7550,10 +6339,10 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
7550 } 6339 }
7551 6340
7552#ifdef CONFIG_IWL4965_HT 6341#ifdef CONFIG_IWL4965_HT
7553 /* if we are switching fron ht to 2.4 clear flags 6342 /* if we are switching from ht to 2.4 clear flags
7554 * from any ht related info since 2.4 does not 6343 * from any ht related info since 2.4 does not
7555 * support ht */ 6344 * support ht */
7556 if ((le16_to_cpu(priv->staging_rxon.channel) != conf->channel) 6345 if ((le16_to_cpu(priv->staging_rxon.channel) != conf->channel->hw_value)
7557#ifdef IEEE80211_CONF_CHANNEL_SWITCH 6346#ifdef IEEE80211_CONF_CHANNEL_SWITCH
7558 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH) 6347 && !(conf->flags & IEEE80211_CONF_CHANNEL_SWITCH)
7559#endif 6348#endif
@@ -7561,12 +6350,13 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
7561 priv->staging_rxon.flags = 0; 6350 priv->staging_rxon.flags = 0;
7562#endif /* CONFIG_IWL4965_HT */ 6351#endif /* CONFIG_IWL4965_HT */
7563 6352
7564 iwl4965_set_rxon_channel(priv, conf->phymode, conf->channel); 6353 iwlcore_set_rxon_channel(priv, conf->channel->band,
6354 ieee80211_frequency_to_channel(conf->channel->center_freq));
7565 6355
7566 iwl4965_set_flags_for_phymode(priv, conf->phymode); 6356 iwl4965_set_flags_for_phymode(priv, conf->channel->band);
7567 6357
7568 /* The list of supported rates and rate mask can be different 6358 /* The list of supported rates and rate mask can be different
7569 * for each phymode; since the phymode may have changed, reset 6359 * for each band; since the band may have changed, reset
7570 * the rate mask to what mac80211 lists */ 6360 * the rate mask to what mac80211 lists */
7571 iwl4965_set_rate(priv); 6361 iwl4965_set_rate(priv);
7572 6362
@@ -7579,14 +6369,15 @@ static int iwl4965_mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *co
7579 } 6369 }
7580#endif 6370#endif
7581 6371
7582 iwl4965_radio_kill_sw(priv, !conf->radio_enabled); 6372 if (priv->cfg->ops->lib->radio_kill_sw)
6373 priv->cfg->ops->lib->radio_kill_sw(priv, !conf->radio_enabled);
7583 6374
7584 if (!conf->radio_enabled) { 6375 if (!conf->radio_enabled) {
7585 IWL_DEBUG_MAC80211("leave - radio disabled\n"); 6376 IWL_DEBUG_MAC80211("leave - radio disabled\n");
7586 goto out; 6377 goto out;
7587 } 6378 }
7588 6379
7589 if (iwl4965_is_rfkill(priv)) { 6380 if (iwl_is_rfkill(priv)) {
7590 IWL_DEBUG_MAC80211("leave - RF kill\n"); 6381 IWL_DEBUG_MAC80211("leave - RF kill\n");
7591 ret = -EIO; 6382 ret = -EIO;
7592 goto out; 6383 goto out;
@@ -7608,9 +6399,9 @@ out:
7608 return ret; 6399 return ret;
7609} 6400}
7610 6401
7611static void iwl4965_config_ap(struct iwl4965_priv *priv) 6402static void iwl4965_config_ap(struct iwl_priv *priv)
7612{ 6403{
7613 int rc = 0; 6404 int ret = 0;
7614 6405
7615 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 6406 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
7616 return; 6407 return;
@@ -7625,9 +6416,9 @@ static void iwl4965_config_ap(struct iwl4965_priv *priv)
7625 /* RXON Timing */ 6416 /* RXON Timing */
7626 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd)); 6417 memset(&priv->rxon_timing, 0, sizeof(struct iwl4965_rxon_time_cmd));
7627 iwl4965_setup_rxon_timing(priv); 6418 iwl4965_setup_rxon_timing(priv);
7628 rc = iwl4965_send_cmd_pdu(priv, REPLY_RXON_TIMING, 6419 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
7629 sizeof(priv->rxon_timing), &priv->rxon_timing); 6420 sizeof(priv->rxon_timing), &priv->rxon_timing);
7630 if (rc) 6421 if (ret)
7631 IWL_WARNING("REPLY_RXON_TIMING failed - " 6422 IWL_WARNING("REPLY_RXON_TIMING failed - "
7632 "Attempting to continue.\n"); 6423 "Attempting to continue.\n");
7633 6424
@@ -7658,9 +6449,7 @@ static void iwl4965_config_ap(struct iwl4965_priv *priv)
7658 /* restore RXON assoc */ 6449 /* restore RXON assoc */
7659 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 6450 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
7660 iwl4965_commit_rxon(priv); 6451 iwl4965_commit_rxon(priv);
7661#ifdef CONFIG_IWL4965_QOS
7662 iwl4965_activate_qos(priv, 1); 6452 iwl4965_activate_qos(priv, 1);
7663#endif
7664 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0); 6453 iwl4965_rxon_add_station(priv, iwl4965_broadcast_addr, 0);
7665 } 6454 }
7666 iwl4965_send_beacon_cmd(priv); 6455 iwl4965_send_beacon_cmd(priv);
@@ -7674,7 +6463,7 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
7674 struct ieee80211_vif *vif, 6463 struct ieee80211_vif *vif,
7675 struct ieee80211_if_conf *conf) 6464 struct ieee80211_if_conf *conf)
7676{ 6465{
7677 struct iwl4965_priv *priv = hw->priv; 6466 struct iwl_priv *priv = hw->priv;
7678 DECLARE_MAC_BUF(mac); 6467 DECLARE_MAC_BUF(mac);
7679 unsigned long flags; 6468 unsigned long flags;
7680 int rc; 6469 int rc;
@@ -7682,6 +6471,12 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
7682 if (conf == NULL) 6471 if (conf == NULL)
7683 return -EIO; 6472 return -EIO;
7684 6473
6474 if (priv->vif != vif) {
6475 IWL_DEBUG_MAC80211("leave - priv->vif != vif\n");
6476 mutex_unlock(&priv->mutex);
6477 return 0;
6478 }
6479
7685 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) && 6480 if ((priv->iw_mode == IEEE80211_IF_TYPE_AP) &&
7686 (!conf->beacon || !conf->ssid_len)) { 6481 (!conf->beacon || !conf->ssid_len)) {
7687 IWL_DEBUG_MAC80211 6482 IWL_DEBUG_MAC80211
@@ -7689,7 +6484,7 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
7689 return 0; 6484 return 0;
7690 } 6485 }
7691 6486
7692 if (!iwl4965_is_alive(priv)) 6487 if (!iwl_is_alive(priv))
7693 return -EAGAIN; 6488 return -EAGAIN;
7694 6489
7695 mutex_lock(&priv->mutex); 6490 mutex_lock(&priv->mutex);
@@ -7704,17 +6499,6 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
7704 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) && 6499 if (unlikely(test_bit(STATUS_SCANNING, &priv->status)) &&
7705 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) { 6500 !(priv->hw->flags & IEEE80211_HW_NO_PROBE_FILTERING)) {
7706 */ 6501 */
7707 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
7708 IWL_DEBUG_MAC80211("leave - scanning\n");
7709 mutex_unlock(&priv->mutex);
7710 return 0;
7711 }
7712
7713 if (priv->vif != vif) {
7714 IWL_DEBUG_MAC80211("leave - priv->vif != vif\n");
7715 mutex_unlock(&priv->mutex);
7716 return 0;
7717 }
7718 6502
7719 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) { 6503 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
7720 if (!conf->bssid) { 6504 if (!conf->bssid) {
@@ -7729,7 +6513,7 @@ static int iwl4965_mac_config_interface(struct ieee80211_hw *hw,
7729 priv->ibss_beacon = conf->beacon; 6513 priv->ibss_beacon = conf->beacon;
7730 } 6514 }
7731 6515
7732 if (iwl4965_is_rfkill(priv)) 6516 if (iwl_is_rfkill(priv))
7733 goto done; 6517 goto done;
7734 6518
7735 if (conf->bssid && !is_zero_ether_addr(conf->bssid) && 6519 if (conf->bssid && !is_zero_ether_addr(conf->bssid) &&
@@ -7797,13 +6581,13 @@ static void iwl4965_configure_filter(struct ieee80211_hw *hw,
7797static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw, 6581static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
7798 struct ieee80211_if_init_conf *conf) 6582 struct ieee80211_if_init_conf *conf)
7799{ 6583{
7800 struct iwl4965_priv *priv = hw->priv; 6584 struct iwl_priv *priv = hw->priv;
7801 6585
7802 IWL_DEBUG_MAC80211("enter\n"); 6586 IWL_DEBUG_MAC80211("enter\n");
7803 6587
7804 mutex_lock(&priv->mutex); 6588 mutex_lock(&priv->mutex);
7805 6589
7806 if (iwl4965_is_ready_rf(priv)) { 6590 if (iwl_is_ready_rf(priv)) {
7807 iwl4965_scan_cancel_timeout(priv, 100); 6591 iwl4965_scan_cancel_timeout(priv, 100);
7808 cancel_delayed_work(&priv->post_associate); 6592 cancel_delayed_work(&priv->post_associate);
7809 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 6593 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
@@ -7821,14 +6605,77 @@ static void iwl4965_mac_remove_interface(struct ieee80211_hw *hw,
7821 6605
7822} 6606}
7823 6607
6608
6609#ifdef CONFIG_IWL4965_HT
6610static void iwl4965_ht_conf(struct iwl_priv *priv,
6611 struct ieee80211_bss_conf *bss_conf)
6612{
6613 struct ieee80211_ht_info *ht_conf = bss_conf->ht_conf;
6614 struct ieee80211_ht_bss_info *ht_bss_conf = bss_conf->ht_bss_conf;
6615 struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
6616
6617 IWL_DEBUG_MAC80211("enter: \n");
6618
6619 iwl_conf->is_ht = bss_conf->assoc_ht;
6620
6621 if (!iwl_conf->is_ht)
6622 return;
6623
6624 priv->ps_mode = (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
6625
6626 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
6627 iwl_conf->sgf |= 0x1;
6628 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
6629 iwl_conf->sgf |= 0x2;
6630
6631 iwl_conf->is_green_field = !!(ht_conf->cap & IEEE80211_HT_CAP_GRN_FLD);
6632 iwl_conf->max_amsdu_size =
6633 !!(ht_conf->cap & IEEE80211_HT_CAP_MAX_AMSDU);
6634
6635 iwl_conf->supported_chan_width =
6636 !!(ht_conf->cap & IEEE80211_HT_CAP_SUP_WIDTH);
6637 iwl_conf->extension_chan_offset =
6638 ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_SEC_OFFSET;
6639 /* If no above or below channel supplied disable FAT channel */
6640 if (iwl_conf->extension_chan_offset != IWL_EXT_CHANNEL_OFFSET_ABOVE &&
6641 iwl_conf->extension_chan_offset != IWL_EXT_CHANNEL_OFFSET_BELOW)
6642 iwl_conf->supported_chan_width = 0;
6643
6644 iwl_conf->tx_mimo_ps_mode =
6645 (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
6646 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16);
6647
6648 iwl_conf->control_channel = ht_bss_conf->primary_channel;
6649 iwl_conf->tx_chan_width =
6650 !!(ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_WIDTH);
6651 iwl_conf->ht_protection =
6652 ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_HT_PROTECTION;
6653 iwl_conf->non_GF_STA_present =
6654 !!(ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_NON_GF_STA_PRSNT);
6655
6656 IWL_DEBUG_MAC80211("control channel %d\n", iwl_conf->control_channel);
6657 IWL_DEBUG_MAC80211("leave\n");
6658}
6659#else
6660static inline void iwl4965_ht_conf(struct iwl_priv *priv,
6661 struct ieee80211_bss_conf *bss_conf)
6662{
6663}
6664#endif
6665
6666#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
7824static void iwl4965_bss_info_changed(struct ieee80211_hw *hw, 6667static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
7825 struct ieee80211_vif *vif, 6668 struct ieee80211_vif *vif,
7826 struct ieee80211_bss_conf *bss_conf, 6669 struct ieee80211_bss_conf *bss_conf,
7827 u32 changes) 6670 u32 changes)
7828{ 6671{
7829 struct iwl4965_priv *priv = hw->priv; 6672 struct iwl_priv *priv = hw->priv;
6673
6674 IWL_DEBUG_MAC80211("changes = 0x%X\n", changes);
7830 6675
7831 if (changes & BSS_CHANGED_ERP_PREAMBLE) { 6676 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
6677 IWL_DEBUG_MAC80211("ERP_PREAMBLE %d\n",
6678 bss_conf->use_short_preamble);
7832 if (bss_conf->use_short_preamble) 6679 if (bss_conf->use_short_preamble)
7833 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 6680 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
7834 else 6681 else
@@ -7836,35 +6683,58 @@ static void iwl4965_bss_info_changed(struct ieee80211_hw *hw,
7836 } 6683 }
7837 6684
7838 if (changes & BSS_CHANGED_ERP_CTS_PROT) { 6685 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
7839 if (bss_conf->use_cts_prot && (priv->phymode != MODE_IEEE80211A)) 6686 IWL_DEBUG_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
6687 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
7840 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK; 6688 priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK;
7841 else 6689 else
7842 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK; 6690 priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
7843 } 6691 }
7844 6692
6693 if (changes & BSS_CHANGED_HT) {
6694 IWL_DEBUG_MAC80211("HT %d\n", bss_conf->assoc_ht);
6695 iwl4965_ht_conf(priv, bss_conf);
6696 iwl4965_set_rxon_chain(priv);
6697 }
6698
7845 if (changes & BSS_CHANGED_ASSOC) { 6699 if (changes & BSS_CHANGED_ASSOC) {
7846 /* 6700 IWL_DEBUG_MAC80211("ASSOC %d\n", bss_conf->assoc);
7847 * TODO: 6701 /* This should never happen as this function should
7848 * do stuff instead of sniffing assoc resp 6702 * never be called from interrupt context. */
7849 */ 6703 if (WARN_ON_ONCE(in_interrupt()))
6704 return;
6705 if (bss_conf->assoc) {
6706 priv->assoc_id = bss_conf->aid;
6707 priv->beacon_int = bss_conf->beacon_int;
6708 priv->timestamp = bss_conf->timestamp;
6709 priv->assoc_capability = bss_conf->assoc_capability;
6710 priv->next_scan_jiffies = jiffies +
6711 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
6712 mutex_lock(&priv->mutex);
6713 iwl4965_post_associate(priv);
6714 mutex_unlock(&priv->mutex);
6715 } else {
6716 priv->assoc_id = 0;
6717 IWL_DEBUG_MAC80211("DISASSOC %d\n", bss_conf->assoc);
6718 }
6719 } else if (changes && iwl_is_associated(priv) && priv->assoc_id) {
6720 IWL_DEBUG_MAC80211("Associated Changes %d\n", changes);
6721 iwl_send_rxon_assoc(priv);
7850 } 6722 }
7851 6723
7852 if (iwl4965_is_associated(priv))
7853 iwl4965_send_rxon_assoc(priv);
7854} 6724}
7855 6725
7856static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len) 6726static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
7857{ 6727{
7858 int rc = 0; 6728 int rc = 0;
7859 unsigned long flags; 6729 unsigned long flags;
7860 struct iwl4965_priv *priv = hw->priv; 6730 struct iwl_priv *priv = hw->priv;
7861 6731
7862 IWL_DEBUG_MAC80211("enter\n"); 6732 IWL_DEBUG_MAC80211("enter\n");
7863 6733
7864 mutex_lock(&priv->mutex); 6734 mutex_lock(&priv->mutex);
7865 spin_lock_irqsave(&priv->lock, flags); 6735 spin_lock_irqsave(&priv->lock, flags);
7866 6736
7867 if (!iwl4965_is_ready_rf(priv)) { 6737 if (!iwl_is_ready_rf(priv)) {
7868 rc = -EIO; 6738 rc = -EIO;
7869 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n"); 6739 IWL_DEBUG_MAC80211("leave - not ready or exit pending\n");
7870 goto out_unlock; 6740 goto out_unlock;
@@ -7910,18 +6780,67 @@ out_unlock:
7910 return rc; 6780 return rc;
7911} 6781}
7912 6782
6783static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
6784 struct ieee80211_key_conf *keyconf, const u8 *addr,
6785 u32 iv32, u16 *phase1key)
6786{
6787 struct iwl_priv *priv = hw->priv;
6788 u8 sta_id = IWL_INVALID_STATION;
6789 unsigned long flags;
6790 __le16 key_flags = 0;
6791 int i;
6792 DECLARE_MAC_BUF(mac);
6793
6794 IWL_DEBUG_MAC80211("enter\n");
6795
6796 sta_id = iwl4965_hw_find_station(priv, addr);
6797 if (sta_id == IWL_INVALID_STATION) {
6798 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
6799 print_mac(mac, addr));
6800 return;
6801 }
6802
6803 iwl4965_scan_cancel_timeout(priv, 100);
6804
6805 key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
6806 key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
6807 key_flags &= ~STA_KEY_FLG_INVALID;
6808
6809 if (sta_id == priv->hw_params.bcast_sta_id)
6810 key_flags |= STA_KEY_MULTICAST_MSK;
6811
6812 spin_lock_irqsave(&priv->sta_lock, flags);
6813
6814 priv->stations[sta_id].sta.key.key_flags = key_flags;
6815 priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
6816
6817 for (i = 0; i < 5; i++)
6818 priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
6819 cpu_to_le16(phase1key[i]);
6820
6821 priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
6822 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
6823
6824 iwl4965_send_add_station(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
6825
6826 spin_unlock_irqrestore(&priv->sta_lock, flags);
6827
6828 IWL_DEBUG_MAC80211("leave\n");
6829}
6830
7913static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 6831static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
7914 const u8 *local_addr, const u8 *addr, 6832 const u8 *local_addr, const u8 *addr,
7915 struct ieee80211_key_conf *key) 6833 struct ieee80211_key_conf *key)
7916{ 6834{
7917 struct iwl4965_priv *priv = hw->priv; 6835 struct iwl_priv *priv = hw->priv;
7918 DECLARE_MAC_BUF(mac); 6836 DECLARE_MAC_BUF(mac);
7919 int rc = 0; 6837 int ret = 0;
7920 u8 sta_id; 6838 u8 sta_id = IWL_INVALID_STATION;
6839 u8 is_default_wep_key = 0;
7921 6840
7922 IWL_DEBUG_MAC80211("enter\n"); 6841 IWL_DEBUG_MAC80211("enter\n");
7923 6842
7924 if (!iwl4965_param_hwcrypto) { 6843 if (priv->cfg->mod_params->sw_crypto) {
7925 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n"); 6844 IWL_DEBUG_MAC80211("leave - hwcrypto disabled\n");
7926 return -EOPNOTSUPP; 6845 return -EOPNOTSUPP;
7927 } 6846 }
@@ -7935,53 +6854,61 @@ static int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
7935 IWL_DEBUG_MAC80211("leave - %s not in station map.\n", 6854 IWL_DEBUG_MAC80211("leave - %s not in station map.\n",
7936 print_mac(mac, addr)); 6855 print_mac(mac, addr));
7937 return -EINVAL; 6856 return -EINVAL;
6857
7938 } 6858 }
7939 6859
7940 mutex_lock(&priv->mutex); 6860 mutex_lock(&priv->mutex);
7941
7942 iwl4965_scan_cancel_timeout(priv, 100); 6861 iwl4965_scan_cancel_timeout(priv, 100);
6862 mutex_unlock(&priv->mutex);
6863
6864 /* If we are getting WEP group key and we didn't receive any key mapping
6865 * so far, we are in legacy wep mode (group key only), otherwise we are
6866 * in 1X mode.
6867 * In legacy wep mode, we use another host command to the uCode */
6868 if (key->alg == ALG_WEP && sta_id == priv->hw_params.bcast_sta_id &&
6869 priv->iw_mode != IEEE80211_IF_TYPE_AP) {
6870 if (cmd == SET_KEY)
6871 is_default_wep_key = !priv->key_mapping_key;
6872 else
6873 is_default_wep_key = priv->default_wep_key;
6874 }
7943 6875
7944 switch (cmd) { 6876 switch (cmd) {
7945 case SET_KEY: 6877 case SET_KEY:
7946 rc = iwl4965_update_sta_key_info(priv, key, sta_id); 6878 if (is_default_wep_key)
7947 if (!rc) { 6879 ret = iwl_set_default_wep_key(priv, key);
7948 iwl4965_set_rxon_hwcrypto(priv, 1); 6880 else
7949 iwl4965_commit_rxon(priv); 6881 ret = iwl_set_dynamic_key(priv, key, sta_id);
7950 key->hw_key_idx = sta_id; 6882
7951 IWL_DEBUG_MAC80211("set_key success, using hwcrypto\n"); 6883 IWL_DEBUG_MAC80211("enable hwcrypto key\n");
7952 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
7953 }
7954 break; 6884 break;
7955 case DISABLE_KEY: 6885 case DISABLE_KEY:
7956 rc = iwl4965_clear_sta_key_info(priv, sta_id); 6886 if (is_default_wep_key)
7957 if (!rc) { 6887 ret = iwl_remove_default_wep_key(priv, key);
7958 iwl4965_set_rxon_hwcrypto(priv, 0); 6888 else
7959 iwl4965_commit_rxon(priv); 6889 ret = iwl_remove_dynamic_key(priv, sta_id);
7960 IWL_DEBUG_MAC80211("disable hwcrypto key\n"); 6890
7961 } 6891 IWL_DEBUG_MAC80211("disable hwcrypto key\n");
7962 break; 6892 break;
7963 default: 6893 default:
7964 rc = -EINVAL; 6894 ret = -EINVAL;
7965 } 6895 }
7966 6896
7967 IWL_DEBUG_MAC80211("leave\n"); 6897 IWL_DEBUG_MAC80211("leave\n");
7968 mutex_unlock(&priv->mutex);
7969 6898
7970 return rc; 6899 return ret;
7971} 6900}
7972 6901
7973static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, int queue, 6902static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, int queue,
7974 const struct ieee80211_tx_queue_params *params) 6903 const struct ieee80211_tx_queue_params *params)
7975{ 6904{
7976 struct iwl4965_priv *priv = hw->priv; 6905 struct iwl_priv *priv = hw->priv;
7977#ifdef CONFIG_IWL4965_QOS
7978 unsigned long flags; 6906 unsigned long flags;
7979 int q; 6907 int q;
7980#endif /* CONFIG_IWL4965_QOS */
7981 6908
7982 IWL_DEBUG_MAC80211("enter\n"); 6909 IWL_DEBUG_MAC80211("enter\n");
7983 6910
7984 if (!iwl4965_is_ready_rf(priv)) { 6911 if (!iwl_is_ready_rf(priv)) {
7985 IWL_DEBUG_MAC80211("leave - RF not ready\n"); 6912 IWL_DEBUG_MAC80211("leave - RF not ready\n");
7986 return -EIO; 6913 return -EIO;
7987 } 6914 }
@@ -7991,7 +6918,6 @@ static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, int queue,
7991 return 0; 6918 return 0;
7992 } 6919 }
7993 6920
7994#ifdef CONFIG_IWL4965_QOS
7995 if (!priv->qos_data.qos_enable) { 6921 if (!priv->qos_data.qos_enable) {
7996 priv->qos_data.qos_active = 0; 6922 priv->qos_data.qos_active = 0;
7997 IWL_DEBUG_MAC80211("leave - qos not enabled\n"); 6923 IWL_DEBUG_MAC80211("leave - qos not enabled\n");
@@ -8005,7 +6931,7 @@ static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, int queue,
8005 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max); 6931 priv->qos_data.def_qos_parm.ac[q].cw_max = cpu_to_le16(params->cw_max);
8006 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; 6932 priv->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
8007 priv->qos_data.def_qos_parm.ac[q].edca_txop = 6933 priv->qos_data.def_qos_parm.ac[q].edca_txop =
8008 cpu_to_le16((params->burst_time * 100)); 6934 cpu_to_le16((params->txop * 32));
8009 6935
8010 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; 6936 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
8011 priv->qos_data.qos_active = 1; 6937 priv->qos_data.qos_active = 1;
@@ -8015,13 +6941,11 @@ static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, int queue,
8015 mutex_lock(&priv->mutex); 6941 mutex_lock(&priv->mutex);
8016 if (priv->iw_mode == IEEE80211_IF_TYPE_AP) 6942 if (priv->iw_mode == IEEE80211_IF_TYPE_AP)
8017 iwl4965_activate_qos(priv, 1); 6943 iwl4965_activate_qos(priv, 1);
8018 else if (priv->assoc_id && iwl4965_is_associated(priv)) 6944 else if (priv->assoc_id && iwl_is_associated(priv))
8019 iwl4965_activate_qos(priv, 0); 6945 iwl4965_activate_qos(priv, 0);
8020 6946
8021 mutex_unlock(&priv->mutex); 6947 mutex_unlock(&priv->mutex);
8022 6948
8023#endif /*CONFIG_IWL4965_QOS */
8024
8025 IWL_DEBUG_MAC80211("leave\n"); 6949 IWL_DEBUG_MAC80211("leave\n");
8026 return 0; 6950 return 0;
8027} 6951}
@@ -8029,7 +6953,7 @@ static int iwl4965_mac_conf_tx(struct ieee80211_hw *hw, int queue,
8029static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw, 6953static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
8030 struct ieee80211_tx_queue_stats *stats) 6954 struct ieee80211_tx_queue_stats *stats)
8031{ 6955{
8032 struct iwl4965_priv *priv = hw->priv; 6956 struct iwl_priv *priv = hw->priv;
8033 int i, avail; 6957 int i, avail;
8034 struct iwl4965_tx_queue *txq; 6958 struct iwl4965_tx_queue *txq;
8035 struct iwl4965_queue *q; 6959 struct iwl4965_queue *q;
@@ -8037,7 +6961,7 @@ static int iwl4965_mac_get_tx_stats(struct ieee80211_hw *hw,
8037 6961
8038 IWL_DEBUG_MAC80211("enter\n"); 6962 IWL_DEBUG_MAC80211("enter\n");
8039 6963
8040 if (!iwl4965_is_ready_rf(priv)) { 6964 if (!iwl_is_ready_rf(priv)) {
8041 IWL_DEBUG_MAC80211("leave - RF not ready\n"); 6965 IWL_DEBUG_MAC80211("leave - RF not ready\n");
8042 return -EIO; 6966 return -EIO;
8043 } 6967 }
@@ -8080,7 +7004,7 @@ static u64 iwl4965_mac_get_tsf(struct ieee80211_hw *hw)
8080 7004
8081static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw) 7005static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
8082{ 7006{
8083 struct iwl4965_priv *priv = hw->priv; 7007 struct iwl_priv *priv = hw->priv;
8084 unsigned long flags; 7008 unsigned long flags;
8085 7009
8086 mutex_lock(&priv->mutex); 7010 mutex_lock(&priv->mutex);
@@ -8091,30 +7015,15 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
8091 spin_lock_irqsave(&priv->lock, flags); 7015 spin_lock_irqsave(&priv->lock, flags);
8092 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_info)); 7016 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_info));
8093 spin_unlock_irqrestore(&priv->lock, flags); 7017 spin_unlock_irqrestore(&priv->lock, flags);
8094#ifdef CONFIG_IWL4965_HT_AGG
8095/* if (priv->lq_mngr.agg_ctrl.granted_ba)
8096 iwl4965_turn_off_agg(priv, TID_ALL_SPECIFIED);*/
8097
8098 memset(&(priv->lq_mngr.agg_ctrl), 0, sizeof(struct iwl4965_agg_control));
8099 priv->lq_mngr.agg_ctrl.tid_traffic_load_threshold = 10;
8100 priv->lq_mngr.agg_ctrl.ba_timeout = 5000;
8101 priv->lq_mngr.agg_ctrl.auto_agg = 1;
8102
8103 if (priv->lq_mngr.agg_ctrl.auto_agg)
8104 priv->lq_mngr.agg_ctrl.requested_ba = TID_ALL_ENABLED;
8105#endif /*CONFIG_IWL4965_HT_AGG */
8106#endif /* CONFIG_IWL4965_HT */ 7018#endif /* CONFIG_IWL4965_HT */
8107 7019
8108#ifdef CONFIG_IWL4965_QOS 7020 iwlcore_reset_qos(priv);
8109 iwl4965_reset_qos(priv);
8110#endif
8111 7021
8112 cancel_delayed_work(&priv->post_associate); 7022 cancel_delayed_work(&priv->post_associate);
8113 7023
8114 spin_lock_irqsave(&priv->lock, flags); 7024 spin_lock_irqsave(&priv->lock, flags);
8115 priv->assoc_id = 0; 7025 priv->assoc_id = 0;
8116 priv->assoc_capability = 0; 7026 priv->assoc_capability = 0;
8117 priv->call_post_assoc_from_beacon = 0;
8118 priv->assoc_station_added = 0; 7027 priv->assoc_station_added = 0;
8119 7028
8120 /* new association get rid of ibss beacon skb */ 7029 /* new association get rid of ibss beacon skb */
@@ -8124,14 +7033,13 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
8124 priv->ibss_beacon = NULL; 7033 priv->ibss_beacon = NULL;
8125 7034
8126 priv->beacon_int = priv->hw->conf.beacon_int; 7035 priv->beacon_int = priv->hw->conf.beacon_int;
8127 priv->timestamp1 = 0; 7036 priv->timestamp = 0;
8128 priv->timestamp0 = 0;
8129 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA)) 7037 if ((priv->iw_mode == IEEE80211_IF_TYPE_STA))
8130 priv->beacon_int = 0; 7038 priv->beacon_int = 0;
8131 7039
8132 spin_unlock_irqrestore(&priv->lock, flags); 7040 spin_unlock_irqrestore(&priv->lock, flags);
8133 7041
8134 if (!iwl4965_is_ready_rf(priv)) { 7042 if (!iwl_is_ready_rf(priv)) {
8135 IWL_DEBUG_MAC80211("leave - not ready\n"); 7043 IWL_DEBUG_MAC80211("leave - not ready\n");
8136 mutex_unlock(&priv->mutex); 7044 mutex_unlock(&priv->mutex);
8137 return; 7045 return;
@@ -8166,13 +7074,13 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
8166static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 7074static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
8167 struct ieee80211_tx_control *control) 7075 struct ieee80211_tx_control *control)
8168{ 7076{
8169 struct iwl4965_priv *priv = hw->priv; 7077 struct iwl_priv *priv = hw->priv;
8170 unsigned long flags; 7078 unsigned long flags;
8171 7079
8172 mutex_lock(&priv->mutex); 7080 mutex_lock(&priv->mutex);
8173 IWL_DEBUG_MAC80211("enter\n"); 7081 IWL_DEBUG_MAC80211("enter\n");
8174 7082
8175 if (!iwl4965_is_ready_rf(priv)) { 7083 if (!iwl_is_ready_rf(priv)) {
8176 IWL_DEBUG_MAC80211("leave - RF not ready\n"); 7084 IWL_DEBUG_MAC80211("leave - RF not ready\n");
8177 mutex_unlock(&priv->mutex); 7085 mutex_unlock(&priv->mutex);
8178 return -EIO; 7086 return -EIO;
@@ -8196,9 +7104,7 @@ static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
8196 IWL_DEBUG_MAC80211("leave\n"); 7104 IWL_DEBUG_MAC80211("leave\n");
8197 spin_unlock_irqrestore(&priv->lock, flags); 7105 spin_unlock_irqrestore(&priv->lock, flags);
8198 7106
8199#ifdef CONFIG_IWL4965_QOS 7107 iwlcore_reset_qos(priv);
8200 iwl4965_reset_qos(priv);
8201#endif
8202 7108
8203 queue_work(priv->workqueue, &priv->post_associate.work); 7109 queue_work(priv->workqueue, &priv->post_associate.work);
8204 7110
@@ -8207,111 +7113,13 @@ static int iwl4965_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *sk
8207 return 0; 7113 return 0;
8208} 7114}
8209 7115
8210#ifdef CONFIG_IWL4965_HT
8211
8212static void iwl4965_ht_info_fill(struct ieee80211_conf *conf,
8213 struct iwl4965_priv *priv)
8214{
8215 struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
8216 struct ieee80211_ht_info *ht_conf = &conf->ht_conf;
8217 struct ieee80211_ht_bss_info *ht_bss_conf = &conf->ht_bss_conf;
8218
8219 IWL_DEBUG_MAC80211("enter: \n");
8220
8221 if (!(conf->flags & IEEE80211_CONF_SUPPORT_HT_MODE)) {
8222 iwl_conf->is_ht = 0;
8223 return;
8224 }
8225
8226 iwl_conf->is_ht = 1;
8227 priv->ps_mode = (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
8228
8229 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_20)
8230 iwl_conf->sgf |= 0x1;
8231 if (ht_conf->cap & IEEE80211_HT_CAP_SGI_40)
8232 iwl_conf->sgf |= 0x2;
8233
8234 iwl_conf->is_green_field = !!(ht_conf->cap & IEEE80211_HT_CAP_GRN_FLD);
8235 iwl_conf->max_amsdu_size =
8236 !!(ht_conf->cap & IEEE80211_HT_CAP_MAX_AMSDU);
8237 iwl_conf->supported_chan_width =
8238 !!(ht_conf->cap & IEEE80211_HT_CAP_SUP_WIDTH);
8239 iwl_conf->tx_mimo_ps_mode =
8240 (u8)((ht_conf->cap & IEEE80211_HT_CAP_MIMO_PS) >> 2);
8241 memcpy(iwl_conf->supp_mcs_set, ht_conf->supp_mcs_set, 16);
8242
8243 iwl_conf->control_channel = ht_bss_conf->primary_channel;
8244 iwl_conf->extension_chan_offset =
8245 ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_SEC_OFFSET;
8246 iwl_conf->tx_chan_width =
8247 !!(ht_bss_conf->bss_cap & IEEE80211_HT_IE_CHA_WIDTH);
8248 iwl_conf->ht_protection =
8249 ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_HT_PROTECTION;
8250 iwl_conf->non_GF_STA_present =
8251 !!(ht_bss_conf->bss_op_mode & IEEE80211_HT_IE_NON_GF_STA_PRSNT);
8252
8253 IWL_DEBUG_MAC80211("control channel %d\n",
8254 iwl_conf->control_channel);
8255 IWL_DEBUG_MAC80211("leave\n");
8256}
8257
8258static int iwl4965_mac_conf_ht(struct ieee80211_hw *hw,
8259 struct ieee80211_conf *conf)
8260{
8261 struct iwl4965_priv *priv = hw->priv;
8262
8263 IWL_DEBUG_MAC80211("enter: \n");
8264
8265 iwl4965_ht_info_fill(conf, priv);
8266 iwl4965_set_rxon_chain(priv);
8267
8268 if (priv && priv->assoc_id &&
8269 (priv->iw_mode == IEEE80211_IF_TYPE_STA)) {
8270 unsigned long flags;
8271
8272 spin_lock_irqsave(&priv->lock, flags);
8273 if (priv->beacon_int)
8274 queue_work(priv->workqueue, &priv->post_associate.work);
8275 else
8276 priv->call_post_assoc_from_beacon = 1;
8277 spin_unlock_irqrestore(&priv->lock, flags);
8278 }
8279
8280 IWL_DEBUG_MAC80211("leave:\n");
8281 return 0;
8282}
8283
8284static void iwl4965_set_ht_capab(struct ieee80211_hw *hw,
8285 struct ieee80211_ht_cap *ht_cap,
8286 u8 use_current_config)
8287{
8288 struct ieee80211_conf *conf = &hw->conf;
8289 struct ieee80211_hw_mode *mode = conf->mode;
8290
8291 if (use_current_config) {
8292 ht_cap->cap_info = cpu_to_le16(conf->ht_conf.cap);
8293 memcpy(ht_cap->supp_mcs_set,
8294 conf->ht_conf.supp_mcs_set, 16);
8295 } else {
8296 ht_cap->cap_info = cpu_to_le16(mode->ht_info.cap);
8297 memcpy(ht_cap->supp_mcs_set,
8298 mode->ht_info.supp_mcs_set, 16);
8299 }
8300 ht_cap->ampdu_params_info =
8301 (mode->ht_info.ampdu_factor & IEEE80211_HT_CAP_AMPDU_FACTOR) |
8302 ((mode->ht_info.ampdu_density << 2) &
8303 IEEE80211_HT_CAP_AMPDU_DENSITY);
8304}
8305
8306#endif /*CONFIG_IWL4965_HT*/
8307
8308/***************************************************************************** 7116/*****************************************************************************
8309 * 7117 *
8310 * sysfs attributes 7118 * sysfs attributes
8311 * 7119 *
8312 *****************************************************************************/ 7120 *****************************************************************************/
8313 7121
8314#ifdef CONFIG_IWL4965_DEBUG 7122#ifdef CONFIG_IWLWIFI_DEBUG
8315 7123
8316/* 7124/*
8317 * The following adds a new attribute to the sysfs representation 7125 * The following adds a new attribute to the sysfs representation
@@ -8323,7 +7131,7 @@ static void iwl4965_set_ht_capab(struct ieee80211_hw *hw,
8323 7131
8324static ssize_t show_debug_level(struct device_driver *d, char *buf) 7132static ssize_t show_debug_level(struct device_driver *d, char *buf)
8325{ 7133{
8326 return sprintf(buf, "0x%08X\n", iwl4965_debug_level); 7134 return sprintf(buf, "0x%08X\n", iwl_debug_level);
8327} 7135}
8328static ssize_t store_debug_level(struct device_driver *d, 7136static ssize_t store_debug_level(struct device_driver *d,
8329 const char *buf, size_t count) 7137 const char *buf, size_t count)
@@ -8336,7 +7144,7 @@ static ssize_t store_debug_level(struct device_driver *d,
8336 printk(KERN_INFO DRV_NAME 7144 printk(KERN_INFO DRV_NAME
8337 ": %s is not in hex or decimal form.\n", buf); 7145 ": %s is not in hex or decimal form.\n", buf);
8338 else 7146 else
8339 iwl4965_debug_level = val; 7147 iwl_debug_level = val;
8340 7148
8341 return strnlen(buf, count); 7149 return strnlen(buf, count);
8342} 7150}
@@ -8344,45 +7152,15 @@ static ssize_t store_debug_level(struct device_driver *d,
8344static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, 7152static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO,
8345 show_debug_level, store_debug_level); 7153 show_debug_level, store_debug_level);
8346 7154
8347#endif /* CONFIG_IWL4965_DEBUG */ 7155#endif /* CONFIG_IWLWIFI_DEBUG */
8348
8349static ssize_t show_rf_kill(struct device *d,
8350 struct device_attribute *attr, char *buf)
8351{
8352 /*
8353 * 0 - RF kill not enabled
8354 * 1 - SW based RF kill active (sysfs)
8355 * 2 - HW based RF kill active
8356 * 3 - Both HW and SW based RF kill active
8357 */
8358 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
8359 int val = (test_bit(STATUS_RF_KILL_SW, &priv->status) ? 0x1 : 0x0) |
8360 (test_bit(STATUS_RF_KILL_HW, &priv->status) ? 0x2 : 0x0);
8361
8362 return sprintf(buf, "%i\n", val);
8363}
8364 7156
8365static ssize_t store_rf_kill(struct device *d,
8366 struct device_attribute *attr,
8367 const char *buf, size_t count)
8368{
8369 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
8370
8371 mutex_lock(&priv->mutex);
8372 iwl4965_radio_kill_sw(priv, buf[0] == '1');
8373 mutex_unlock(&priv->mutex);
8374
8375 return count;
8376}
8377
8378static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill);
8379 7157
8380static ssize_t show_temperature(struct device *d, 7158static ssize_t show_temperature(struct device *d,
8381 struct device_attribute *attr, char *buf) 7159 struct device_attribute *attr, char *buf)
8382{ 7160{
8383 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data; 7161 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8384 7162
8385 if (!iwl4965_is_alive(priv)) 7163 if (!iwl_is_alive(priv))
8386 return -EAGAIN; 7164 return -EAGAIN;
8387 7165
8388 return sprintf(buf, "%d\n", iwl4965_hw_get_temperature(priv)); 7166 return sprintf(buf, "%d\n", iwl4965_hw_get_temperature(priv));
@@ -8394,7 +7172,7 @@ static ssize_t show_rs_window(struct device *d,
8394 struct device_attribute *attr, 7172 struct device_attribute *attr,
8395 char *buf) 7173 char *buf)
8396{ 7174{
8397 struct iwl4965_priv *priv = d->driver_data; 7175 struct iwl_priv *priv = d->driver_data;
8398 return iwl4965_fill_rs_info(priv->hw, buf, IWL_AP_ID); 7176 return iwl4965_fill_rs_info(priv->hw, buf, IWL_AP_ID);
8399} 7177}
8400static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL); 7178static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL);
@@ -8402,7 +7180,7 @@ static DEVICE_ATTR(rs_window, S_IRUGO, show_rs_window, NULL);
8402static ssize_t show_tx_power(struct device *d, 7180static ssize_t show_tx_power(struct device *d,
8403 struct device_attribute *attr, char *buf) 7181 struct device_attribute *attr, char *buf)
8404{ 7182{
8405 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data; 7183 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8406 return sprintf(buf, "%d\n", priv->user_txpower_limit); 7184 return sprintf(buf, "%d\n", priv->user_txpower_limit);
8407} 7185}
8408 7186
@@ -8410,7 +7188,7 @@ static ssize_t store_tx_power(struct device *d,
8410 struct device_attribute *attr, 7188 struct device_attribute *attr,
8411 const char *buf, size_t count) 7189 const char *buf, size_t count)
8412{ 7190{
8413 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data; 7191 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8414 char *p = (char *)buf; 7192 char *p = (char *)buf;
8415 u32 val; 7193 u32 val;
8416 7194
@@ -8429,7 +7207,7 @@ static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
8429static ssize_t show_flags(struct device *d, 7207static ssize_t show_flags(struct device *d,
8430 struct device_attribute *attr, char *buf) 7208 struct device_attribute *attr, char *buf)
8431{ 7209{
8432 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data; 7210 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8433 7211
8434 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags); 7212 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
8435} 7213}
@@ -8438,7 +7216,7 @@ static ssize_t store_flags(struct device *d,
8438 struct device_attribute *attr, 7216 struct device_attribute *attr,
8439 const char *buf, size_t count) 7217 const char *buf, size_t count)
8440{ 7218{
8441 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data; 7219 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8442 u32 flags = simple_strtoul(buf, NULL, 0); 7220 u32 flags = simple_strtoul(buf, NULL, 0);
8443 7221
8444 mutex_lock(&priv->mutex); 7222 mutex_lock(&priv->mutex);
@@ -8463,7 +7241,7 @@ static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
8463static ssize_t show_filter_flags(struct device *d, 7241static ssize_t show_filter_flags(struct device *d,
8464 struct device_attribute *attr, char *buf) 7242 struct device_attribute *attr, char *buf)
8465{ 7243{
8466 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data; 7244 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8467 7245
8468 return sprintf(buf, "0x%04X\n", 7246 return sprintf(buf, "0x%04X\n",
8469 le32_to_cpu(priv->active_rxon.filter_flags)); 7247 le32_to_cpu(priv->active_rxon.filter_flags));
@@ -8473,7 +7251,7 @@ static ssize_t store_filter_flags(struct device *d,
8473 struct device_attribute *attr, 7251 struct device_attribute *attr,
8474 const char *buf, size_t count) 7252 const char *buf, size_t count)
8475{ 7253{
8476 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data; 7254 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8477 u32 filter_flags = simple_strtoul(buf, NULL, 0); 7255 u32 filter_flags = simple_strtoul(buf, NULL, 0);
8478 7256
8479 mutex_lock(&priv->mutex); 7257 mutex_lock(&priv->mutex);
@@ -8497,71 +7275,12 @@ static ssize_t store_filter_flags(struct device *d,
8497static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags, 7275static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
8498 store_filter_flags); 7276 store_filter_flags);
8499 7277
8500static ssize_t show_tune(struct device *d,
8501 struct device_attribute *attr, char *buf)
8502{
8503 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
8504
8505 return sprintf(buf, "0x%04X\n",
8506 (priv->phymode << 8) |
8507 le16_to_cpu(priv->active_rxon.channel));
8508}
8509
8510static void iwl4965_set_flags_for_phymode(struct iwl4965_priv *priv, u8 phymode);
8511
8512static ssize_t store_tune(struct device *d,
8513 struct device_attribute *attr,
8514 const char *buf, size_t count)
8515{
8516 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data;
8517 char *p = (char *)buf;
8518 u16 tune = simple_strtoul(p, &p, 0);
8519 u8 phymode = (tune >> 8) & 0xff;
8520 u16 channel = tune & 0xff;
8521
8522 IWL_DEBUG_INFO("Tune request to:%d channel:%d\n", phymode, channel);
8523
8524 mutex_lock(&priv->mutex);
8525 if ((le16_to_cpu(priv->staging_rxon.channel) != channel) ||
8526 (priv->phymode != phymode)) {
8527 const struct iwl4965_channel_info *ch_info;
8528
8529 ch_info = iwl4965_get_channel_info(priv, phymode, channel);
8530 if (!ch_info) {
8531 IWL_WARNING("Requested invalid phymode/channel "
8532 "combination: %d %d\n", phymode, channel);
8533 mutex_unlock(&priv->mutex);
8534 return -EINVAL;
8535 }
8536
8537 /* Cancel any currently running scans... */
8538 if (iwl4965_scan_cancel_timeout(priv, 100))
8539 IWL_WARNING("Could not cancel scan.\n");
8540 else {
8541 IWL_DEBUG_INFO("Committing phymode and "
8542 "rxon.channel = %d %d\n",
8543 phymode, channel);
8544
8545 iwl4965_set_rxon_channel(priv, phymode, channel);
8546 iwl4965_set_flags_for_phymode(priv, phymode);
8547
8548 iwl4965_set_rate(priv);
8549 iwl4965_commit_rxon(priv);
8550 }
8551 }
8552 mutex_unlock(&priv->mutex);
8553
8554 return count;
8555}
8556
8557static DEVICE_ATTR(tune, S_IWUSR | S_IRUGO, show_tune, store_tune);
8558
8559#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT 7278#ifdef CONFIG_IWL4965_SPECTRUM_MEASUREMENT
8560 7279
8561static ssize_t show_measurement(struct device *d, 7280static ssize_t show_measurement(struct device *d,
8562 struct device_attribute *attr, char *buf) 7281 struct device_attribute *attr, char *buf)
8563{ 7282{
8564 struct iwl4965_priv *priv = dev_get_drvdata(d); 7283 struct iwl_priv *priv = dev_get_drvdata(d);
8565 struct iwl4965_spectrum_notification measure_report; 7284 struct iwl4965_spectrum_notification measure_report;
8566 u32 size = sizeof(measure_report), len = 0, ofs = 0; 7285 u32 size = sizeof(measure_report), len = 0, ofs = 0;
8567 u8 *data = (u8 *) & measure_report; 7286 u8 *data = (u8 *) & measure_report;
@@ -8594,7 +7313,7 @@ static ssize_t store_measurement(struct device *d,
8594 struct device_attribute *attr, 7313 struct device_attribute *attr,
8595 const char *buf, size_t count) 7314 const char *buf, size_t count)
8596{ 7315{
8597 struct iwl4965_priv *priv = dev_get_drvdata(d); 7316 struct iwl_priv *priv = dev_get_drvdata(d);
8598 struct ieee80211_measurement_params params = { 7317 struct ieee80211_measurement_params params = {
8599 .channel = le16_to_cpu(priv->active_rxon.channel), 7318 .channel = le16_to_cpu(priv->active_rxon.channel),
8600 .start_time = cpu_to_le64(priv->last_tsf), 7319 .start_time = cpu_to_le64(priv->last_tsf),
@@ -8633,7 +7352,7 @@ static ssize_t store_retry_rate(struct device *d,
8633 struct device_attribute *attr, 7352 struct device_attribute *attr,
8634 const char *buf, size_t count) 7353 const char *buf, size_t count)
8635{ 7354{
8636 struct iwl4965_priv *priv = dev_get_drvdata(d); 7355 struct iwl_priv *priv = dev_get_drvdata(d);
8637 7356
8638 priv->retry_rate = simple_strtoul(buf, NULL, 0); 7357 priv->retry_rate = simple_strtoul(buf, NULL, 0);
8639 if (priv->retry_rate <= 0) 7358 if (priv->retry_rate <= 0)
@@ -8645,7 +7364,7 @@ static ssize_t store_retry_rate(struct device *d,
8645static ssize_t show_retry_rate(struct device *d, 7364static ssize_t show_retry_rate(struct device *d,
8646 struct device_attribute *attr, char *buf) 7365 struct device_attribute *attr, char *buf)
8647{ 7366{
8648 struct iwl4965_priv *priv = dev_get_drvdata(d); 7367 struct iwl_priv *priv = dev_get_drvdata(d);
8649 return sprintf(buf, "%d", priv->retry_rate); 7368 return sprintf(buf, "%d", priv->retry_rate);
8650} 7369}
8651 7370
@@ -8656,14 +7375,14 @@ static ssize_t store_power_level(struct device *d,
8656 struct device_attribute *attr, 7375 struct device_attribute *attr,
8657 const char *buf, size_t count) 7376 const char *buf, size_t count)
8658{ 7377{
8659 struct iwl4965_priv *priv = dev_get_drvdata(d); 7378 struct iwl_priv *priv = dev_get_drvdata(d);
8660 int rc; 7379 int rc;
8661 int mode; 7380 int mode;
8662 7381
8663 mode = simple_strtoul(buf, NULL, 0); 7382 mode = simple_strtoul(buf, NULL, 0);
8664 mutex_lock(&priv->mutex); 7383 mutex_lock(&priv->mutex);
8665 7384
8666 if (!iwl4965_is_ready(priv)) { 7385 if (!iwl_is_ready(priv)) {
8667 rc = -EAGAIN; 7386 rc = -EAGAIN;
8668 goto out; 7387 goto out;
8669 } 7388 }
@@ -8710,7 +7429,7 @@ static const s32 period_duration[] = {
8710static ssize_t show_power_level(struct device *d, 7429static ssize_t show_power_level(struct device *d,
8711 struct device_attribute *attr, char *buf) 7430 struct device_attribute *attr, char *buf)
8712{ 7431{
8713 struct iwl4965_priv *priv = dev_get_drvdata(d); 7432 struct iwl_priv *priv = dev_get_drvdata(d);
8714 int level = IWL_POWER_LEVEL(priv->power_mode); 7433 int level = IWL_POWER_LEVEL(priv->power_mode);
8715 char *p = buf; 7434 char *p = buf;
8716 7435
@@ -8745,73 +7464,8 @@ static DEVICE_ATTR(power_level, S_IWUSR | S_IRUSR, show_power_level,
8745static ssize_t show_channels(struct device *d, 7464static ssize_t show_channels(struct device *d,
8746 struct device_attribute *attr, char *buf) 7465 struct device_attribute *attr, char *buf)
8747{ 7466{
8748 struct iwl4965_priv *priv = dev_get_drvdata(d); 7467 /* all this shit doesn't belong into sysfs anyway */
8749 int len = 0, i; 7468 return 0;
8750 struct ieee80211_channel *channels = NULL;
8751 const struct ieee80211_hw_mode *hw_mode = NULL;
8752 int count = 0;
8753
8754 if (!iwl4965_is_ready(priv))
8755 return -EAGAIN;
8756
8757 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211G);
8758 if (!hw_mode)
8759 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211B);
8760 if (hw_mode) {
8761 channels = hw_mode->channels;
8762 count = hw_mode->num_channels;
8763 }
8764
8765 len +=
8766 sprintf(&buf[len],
8767 "Displaying %d channels in 2.4GHz band "
8768 "(802.11bg):\n", count);
8769
8770 for (i = 0; i < count; i++)
8771 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8772 channels[i].chan,
8773 channels[i].power_level,
8774 channels[i].
8775 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8776 " (IEEE 802.11h required)" : "",
8777 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8778 || (channels[i].
8779 flag &
8780 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8781 ", IBSS",
8782 channels[i].
8783 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8784 "active/passive" : "passive only");
8785
8786 hw_mode = iwl4965_get_hw_mode(priv, MODE_IEEE80211A);
8787 if (hw_mode) {
8788 channels = hw_mode->channels;
8789 count = hw_mode->num_channels;
8790 } else {
8791 channels = NULL;
8792 count = 0;
8793 }
8794
8795 len += sprintf(&buf[len], "Displaying %d channels in 5.2GHz band "
8796 "(802.11a):\n", count);
8797
8798 for (i = 0; i < count; i++)
8799 len += sprintf(&buf[len], "%d: %ddBm: BSS%s%s, %s.\n",
8800 channels[i].chan,
8801 channels[i].power_level,
8802 channels[i].
8803 flag & IEEE80211_CHAN_W_RADAR_DETECT ?
8804 " (IEEE 802.11h required)" : "",
8805 (!(channels[i].flag & IEEE80211_CHAN_W_IBSS)
8806 || (channels[i].
8807 flag &
8808 IEEE80211_CHAN_W_RADAR_DETECT)) ? "" :
8809 ", IBSS",
8810 channels[i].
8811 flag & IEEE80211_CHAN_W_ACTIVE_SCAN ?
8812 "active/passive" : "passive only");
8813
8814 return len;
8815} 7469}
8816 7470
8817static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); 7471static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
@@ -8819,17 +7473,17 @@ static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
8819static ssize_t show_statistics(struct device *d, 7473static ssize_t show_statistics(struct device *d,
8820 struct device_attribute *attr, char *buf) 7474 struct device_attribute *attr, char *buf)
8821{ 7475{
8822 struct iwl4965_priv *priv = dev_get_drvdata(d); 7476 struct iwl_priv *priv = dev_get_drvdata(d);
8823 u32 size = sizeof(struct iwl4965_notif_statistics); 7477 u32 size = sizeof(struct iwl4965_notif_statistics);
8824 u32 len = 0, ofs = 0; 7478 u32 len = 0, ofs = 0;
8825 u8 *data = (u8 *) & priv->statistics; 7479 u8 *data = (u8 *) & priv->statistics;
8826 int rc = 0; 7480 int rc = 0;
8827 7481
8828 if (!iwl4965_is_alive(priv)) 7482 if (!iwl_is_alive(priv))
8829 return -EAGAIN; 7483 return -EAGAIN;
8830 7484
8831 mutex_lock(&priv->mutex); 7485 mutex_lock(&priv->mutex);
8832 rc = iwl4965_send_statistics_request(priv); 7486 rc = iwl_send_statistics_request(priv, 0);
8833 mutex_unlock(&priv->mutex); 7487 mutex_unlock(&priv->mutex);
8834 7488
8835 if (rc) { 7489 if (rc) {
@@ -8857,9 +7511,9 @@ static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
8857static ssize_t show_antenna(struct device *d, 7511static ssize_t show_antenna(struct device *d,
8858 struct device_attribute *attr, char *buf) 7512 struct device_attribute *attr, char *buf)
8859{ 7513{
8860 struct iwl4965_priv *priv = dev_get_drvdata(d); 7514 struct iwl_priv *priv = dev_get_drvdata(d);
8861 7515
8862 if (!iwl4965_is_alive(priv)) 7516 if (!iwl_is_alive(priv))
8863 return -EAGAIN; 7517 return -EAGAIN;
8864 7518
8865 return sprintf(buf, "%d\n", priv->antenna); 7519 return sprintf(buf, "%d\n", priv->antenna);
@@ -8870,7 +7524,7 @@ static ssize_t store_antenna(struct device *d,
8870 const char *buf, size_t count) 7524 const char *buf, size_t count)
8871{ 7525{
8872 int ant; 7526 int ant;
8873 struct iwl4965_priv *priv = dev_get_drvdata(d); 7527 struct iwl_priv *priv = dev_get_drvdata(d);
8874 7528
8875 if (count == 0) 7529 if (count == 0)
8876 return 0; 7530 return 0;
@@ -8895,8 +7549,8 @@ static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, show_antenna, store_antenna);
8895static ssize_t show_status(struct device *d, 7549static ssize_t show_status(struct device *d,
8896 struct device_attribute *attr, char *buf) 7550 struct device_attribute *attr, char *buf)
8897{ 7551{
8898 struct iwl4965_priv *priv = (struct iwl4965_priv *)d->driver_data; 7552 struct iwl_priv *priv = (struct iwl_priv *)d->driver_data;
8899 if (!iwl4965_is_alive(priv)) 7553 if (!iwl_is_alive(priv))
8900 return -EAGAIN; 7554 return -EAGAIN;
8901 return sprintf(buf, "0x%08x\n", (int)priv->status); 7555 return sprintf(buf, "0x%08x\n", (int)priv->status);
8902} 7556}
@@ -8910,7 +7564,7 @@ static ssize_t dump_error_log(struct device *d,
8910 char *p = (char *)buf; 7564 char *p = (char *)buf;
8911 7565
8912 if (p[0] == '1') 7566 if (p[0] == '1')
8913 iwl4965_dump_nic_error_log((struct iwl4965_priv *)d->driver_data); 7567 iwl4965_dump_nic_error_log((struct iwl_priv *)d->driver_data);
8914 7568
8915 return strnlen(buf, count); 7569 return strnlen(buf, count);
8916} 7570}
@@ -8924,7 +7578,7 @@ static ssize_t dump_event_log(struct device *d,
8924 char *p = (char *)buf; 7578 char *p = (char *)buf;
8925 7579
8926 if (p[0] == '1') 7580 if (p[0] == '1')
8927 iwl4965_dump_nic_event_log((struct iwl4965_priv *)d->driver_data); 7581 iwl4965_dump_nic_event_log((struct iwl_priv *)d->driver_data);
8928 7582
8929 return strnlen(buf, count); 7583 return strnlen(buf, count);
8930} 7584}
@@ -8937,7 +7591,7 @@ static DEVICE_ATTR(dump_events, S_IWUSR, NULL, dump_event_log);
8937 * 7591 *
8938 *****************************************************************************/ 7592 *****************************************************************************/
8939 7593
8940static void iwl4965_setup_deferred_work(struct iwl4965_priv *priv) 7594static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
8941{ 7595{
8942 priv->workqueue = create_workqueue(DRV_NAME); 7596 priv->workqueue = create_workqueue(DRV_NAME);
8943 7597
@@ -8962,7 +7616,7 @@ static void iwl4965_setup_deferred_work(struct iwl4965_priv *priv)
8962 iwl4965_irq_tasklet, (unsigned long)priv); 7616 iwl4965_irq_tasklet, (unsigned long)priv);
8963} 7617}
8964 7618
8965static void iwl4965_cancel_deferred_work(struct iwl4965_priv *priv) 7619static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
8966{ 7620{
8967 iwl4965_hw_cancel_deferred_work(priv); 7621 iwl4965_hw_cancel_deferred_work(priv);
8968 7622
@@ -8985,12 +7639,10 @@ static struct attribute *iwl4965_sysfs_entries[] = {
8985#endif 7639#endif
8986 &dev_attr_power_level.attr, 7640 &dev_attr_power_level.attr,
8987 &dev_attr_retry_rate.attr, 7641 &dev_attr_retry_rate.attr,
8988 &dev_attr_rf_kill.attr,
8989 &dev_attr_rs_window.attr, 7642 &dev_attr_rs_window.attr,
8990 &dev_attr_statistics.attr, 7643 &dev_attr_statistics.attr,
8991 &dev_attr_status.attr, 7644 &dev_attr_status.attr,
8992 &dev_attr_temperature.attr, 7645 &dev_attr_temperature.attr,
8993 &dev_attr_tune.attr,
8994 &dev_attr_tx_power.attr, 7646 &dev_attr_tx_power.attr,
8995 7647
8996 NULL 7648 NULL
@@ -9011,6 +7663,7 @@ static struct ieee80211_ops iwl4965_hw_ops = {
9011 .config_interface = iwl4965_mac_config_interface, 7663 .config_interface = iwl4965_mac_config_interface,
9012 .configure_filter = iwl4965_configure_filter, 7664 .configure_filter = iwl4965_configure_filter,
9013 .set_key = iwl4965_mac_set_key, 7665 .set_key = iwl4965_mac_set_key,
7666 .update_tkip_key = iwl4965_mac_update_tkip_key,
9014 .get_stats = iwl4965_mac_get_stats, 7667 .get_stats = iwl4965_mac_get_stats,
9015 .get_tx_stats = iwl4965_mac_get_tx_stats, 7668 .get_tx_stats = iwl4965_mac_get_tx_stats,
9016 .conf_tx = iwl4965_mac_conf_tx, 7669 .conf_tx = iwl4965_mac_conf_tx,
@@ -9019,12 +7672,7 @@ static struct ieee80211_ops iwl4965_hw_ops = {
9019 .beacon_update = iwl4965_mac_beacon_update, 7672 .beacon_update = iwl4965_mac_beacon_update,
9020 .bss_info_changed = iwl4965_bss_info_changed, 7673 .bss_info_changed = iwl4965_bss_info_changed,
9021#ifdef CONFIG_IWL4965_HT 7674#ifdef CONFIG_IWL4965_HT
9022 .conf_ht = iwl4965_mac_conf_ht,
9023 .ampdu_action = iwl4965_mac_ampdu_action, 7675 .ampdu_action = iwl4965_mac_ampdu_action,
9024#ifdef CONFIG_IWL4965_HT_AGG
9025 .ht_tx_agg_start = iwl4965_mac_ht_tx_agg_start,
9026 .ht_tx_agg_stop = iwl4965_mac_ht_tx_agg_stop,
9027#endif /* CONFIG_IWL4965_HT_AGG */
9028#endif /* CONFIG_IWL4965_HT */ 7676#endif /* CONFIG_IWL4965_HT */
9029 .hw_scan = iwl4965_mac_hw_scan 7677 .hw_scan = iwl4965_mac_hw_scan
9030}; 7678};
@@ -9032,85 +7680,45 @@ static struct ieee80211_ops iwl4965_hw_ops = {
9032static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 7680static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
9033{ 7681{
9034 int err = 0; 7682 int err = 0;
9035 struct iwl4965_priv *priv; 7683 struct iwl_priv *priv;
9036 struct ieee80211_hw *hw; 7684 struct ieee80211_hw *hw;
9037 int i; 7685 struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
7686 unsigned long flags;
9038 DECLARE_MAC_BUF(mac); 7687 DECLARE_MAC_BUF(mac);
9039 7688
7689 /************************
7690 * 1. Allocating HW data
7691 ************************/
7692
9040 /* Disabling hardware scan means that mac80211 will perform scans 7693 /* Disabling hardware scan means that mac80211 will perform scans
9041 * "the hard way", rather than using device's scan. */ 7694 * "the hard way", rather than using device's scan. */
9042 if (iwl4965_param_disable_hw_scan) { 7695 if (cfg->mod_params->disable_hw_scan) {
9043 IWL_DEBUG_INFO("Disabling hw_scan\n"); 7696 IWL_DEBUG_INFO("Disabling hw_scan\n");
9044 iwl4965_hw_ops.hw_scan = NULL; 7697 iwl4965_hw_ops.hw_scan = NULL;
9045 } 7698 }
9046 7699
9047 if ((iwl4965_param_queues_num > IWL_MAX_NUM_QUEUES) || 7700 hw = iwl_alloc_all(cfg, &iwl4965_hw_ops);
9048 (iwl4965_param_queues_num < IWL_MIN_NUM_QUEUES)) { 7701 if (!hw) {
9049 IWL_ERROR("invalid queues_num, should be between %d and %d\n",
9050 IWL_MIN_NUM_QUEUES, IWL_MAX_NUM_QUEUES);
9051 err = -EINVAL;
9052 goto out;
9053 }
9054
9055 /* mac80211 allocates memory for this device instance, including
9056 * space for this driver's private structure */
9057 hw = ieee80211_alloc_hw(sizeof(struct iwl4965_priv), &iwl4965_hw_ops);
9058 if (hw == NULL) {
9059 IWL_ERROR("Can not allocate network device\n");
9060 err = -ENOMEM; 7702 err = -ENOMEM;
9061 goto out; 7703 goto out;
9062 } 7704 }
9063 SET_IEEE80211_DEV(hw, &pdev->dev); 7705 priv = hw->priv;
7706 /* At this point both hw and priv are allocated. */
9064 7707
9065 hw->rate_control_algorithm = "iwl-4965-rs"; 7708 SET_IEEE80211_DEV(hw, &pdev->dev);
9066 7709
9067 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n"); 7710 IWL_DEBUG_INFO("*** LOAD DRIVER ***\n");
9068 priv = hw->priv; 7711 priv->cfg = cfg;
9069 priv->hw = hw;
9070
9071 priv->pci_dev = pdev; 7712 priv->pci_dev = pdev;
9072 priv->antenna = (enum iwl4965_antenna)iwl4965_param_antenna; 7713
9073#ifdef CONFIG_IWL4965_DEBUG 7714#ifdef CONFIG_IWLWIFI_DEBUG
9074 iwl4965_debug_level = iwl4965_param_debug; 7715 iwl_debug_level = priv->cfg->mod_params->debug;
9075 atomic_set(&priv->restrict_refcnt, 0); 7716 atomic_set(&priv->restrict_refcnt, 0);
9076#endif 7717#endif
9077 priv->retry_rate = 1;
9078
9079 priv->ibss_beacon = NULL;
9080
9081 /* Tell mac80211 and its clients (e.g. Wireless Extensions)
9082 * the range of signal quality values that we'll provide.
9083 * Negative values for level/noise indicate that we'll provide dBm.
9084 * For WE, at least, non-0 values here *enable* display of values
9085 * in app (iwconfig). */
9086 hw->max_rssi = -20; /* signal level, negative indicates dBm */
9087 hw->max_noise = -20; /* noise level, negative indicates dBm */
9088 hw->max_signal = 100; /* link quality indication (%) */
9089 7718
9090 /* Tell mac80211 our Tx characteristics */ 7719 /**************************
9091 hw->flags = IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE; 7720 * 2. Initializing PCI bus
9092 7721 **************************/
9093 /* Default value; 4 EDCA QOS priorities */
9094 hw->queues = 4;
9095#ifdef CONFIG_IWL4965_HT
9096#ifdef CONFIG_IWL4965_HT_AGG
9097 /* Enhanced value; more queues, to support 11n aggregation */
9098 hw->queues = 16;
9099#endif /* CONFIG_IWL4965_HT_AGG */
9100#endif /* CONFIG_IWL4965_HT */
9101
9102 spin_lock_init(&priv->lock);
9103 spin_lock_init(&priv->power_data.lock);
9104 spin_lock_init(&priv->sta_lock);
9105 spin_lock_init(&priv->hcmd_lock);
9106 spin_lock_init(&priv->lq_mngr.lock);
9107
9108 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++)
9109 INIT_LIST_HEAD(&priv->ibss_mac_hash[i]);
9110
9111 INIT_LIST_HEAD(&priv->free_frames);
9112
9113 mutex_init(&priv->mutex);
9114 if (pci_enable_device(pdev)) { 7722 if (pci_enable_device(pdev)) {
9115 err = -ENODEV; 7723 err = -ENODEV;
9116 goto out_ieee80211_free_hw; 7724 goto out_ieee80211_free_hw;
@@ -9118,31 +7726,28 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
9118 7726
9119 pci_set_master(pdev); 7727 pci_set_master(pdev);
9120 7728
9121 /* Clear the driver's (not device's) station table */
9122 iwl4965_clear_stations_table(priv);
9123
9124 priv->data_retry_limit = -1;
9125 priv->ieee_channels = NULL;
9126 priv->ieee_rates = NULL;
9127 priv->phymode = -1;
9128
9129 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 7729 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
9130 if (!err) 7730 if (!err)
9131 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 7731 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
9132 if (err) { 7732 if (err) {
9133 printk(KERN_WARNING DRV_NAME ": No suitable DMA available.\n"); 7733 printk(KERN_WARNING DRV_NAME
9134 goto out_pci_disable_device; 7734 ": No suitable DMA available.\n");
7735 goto out_pci_disable_device;
9135 } 7736 }
9136 7737
9137 pci_set_drvdata(pdev, priv);
9138 err = pci_request_regions(pdev, DRV_NAME); 7738 err = pci_request_regions(pdev, DRV_NAME);
9139 if (err) 7739 if (err)
9140 goto out_pci_disable_device; 7740 goto out_pci_disable_device;
9141 7741
7742 pci_set_drvdata(pdev, priv);
7743
9142 /* We disable the RETRY_TIMEOUT register (0x41) to keep 7744 /* We disable the RETRY_TIMEOUT register (0x41) to keep
9143 * PCI Tx retries from interfering with C3 CPU state */ 7745 * PCI Tx retries from interfering with C3 CPU state */
9144 pci_write_config_byte(pdev, 0x41, 0x00); 7746 pci_write_config_byte(pdev, 0x41, 0x00);
9145 7747
7748 /***********************
7749 * 3. Read REV register
7750 ***********************/
9146 priv->hw_base = pci_iomap(pdev, 0, 0); 7751 priv->hw_base = pci_iomap(pdev, 0, 0);
9147 if (!priv->hw_base) { 7752 if (!priv->hw_base) {
9148 err = -ENODEV; 7753 err = -ENODEV;
@@ -9150,132 +7755,112 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
9150 } 7755 }
9151 7756
9152 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n", 7757 IWL_DEBUG_INFO("pci_resource_len = 0x%08llx\n",
9153 (unsigned long long) pci_resource_len(pdev, 0)); 7758 (unsigned long long) pci_resource_len(pdev, 0));
9154 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base); 7759 IWL_DEBUG_INFO("pci_resource_base = %p\n", priv->hw_base);
9155 7760
9156 /* Initialize module parameter values here */ 7761 printk(KERN_INFO DRV_NAME
7762 ": Detected Intel Wireless WiFi Link %s\n", priv->cfg->name);
9157 7763
9158 /* Disable radio (SW RF KILL) via parameter when loading driver */ 7764 /*****************
9159 if (iwl4965_param_disable) { 7765 * 4. Read EEPROM
9160 set_bit(STATUS_RF_KILL_SW, &priv->status); 7766 *****************/
9161 IWL_DEBUG_INFO("Radio disabled.\n"); 7767 /* nic init */
7768 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
7769 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
7770
7771 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7772 err = iwl_poll_bit(priv, CSR_GP_CNTRL,
7773 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7774 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
7775 if (err < 0) {
7776 IWL_DEBUG_INFO("Failed to init the card\n");
7777 goto out_iounmap;
9162 } 7778 }
7779 /* Read the EEPROM */
7780 err = iwl_eeprom_init(priv);
7781 if (err) {
7782 IWL_ERROR("Unable to init EEPROM\n");
7783 goto out_iounmap;
7784 }
7785 /* MAC Address location in EEPROM same for 3945/4965 */
7786 iwl_eeprom_get_mac(priv, priv->mac_addr);
7787 IWL_DEBUG_INFO("MAC address: %s\n", print_mac(mac, priv->mac_addr));
7788 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
9163 7789
9164 priv->iw_mode = IEEE80211_IF_TYPE_STA; 7790 /************************
9165 7791 * 5. Setup HW constants
9166 priv->ps_mode = 0; 7792 ************************/
9167 priv->use_ant_b_for_management_frame = 1; /* start with ant B */
9168 priv->valid_antenna = 0x7; /* assume all 3 connected */
9169 priv->ps_mode = IWL_MIMO_PS_NONE;
9170
9171 /* Choose which receivers/antennas to use */
9172 iwl4965_set_rxon_chain(priv);
9173
9174 printk(KERN_INFO DRV_NAME
9175 ": Detected Intel Wireless WiFi Link 4965AGN\n");
9176
9177 /* Device-specific setup */ 7793 /* Device-specific setup */
9178 if (iwl4965_hw_set_hw_setting(priv)) { 7794 if (priv->cfg->ops->lib->set_hw_params(priv)) {
9179 IWL_ERROR("failed to set hw settings\n"); 7795 IWL_ERROR("failed to set hw parameters\n");
9180 goto out_iounmap; 7796 goto out_iounmap;
9181 } 7797 }
9182 7798
9183#ifdef CONFIG_IWL4965_QOS 7799 /*******************
9184 if (iwl4965_param_qos_enable) 7800 * 6. Setup hw/priv
9185 priv->qos_data.qos_enable = 1; 7801 *******************/
9186 7802
9187 iwl4965_reset_qos(priv); 7803 err = iwl_setup(priv);
7804 if (err)
7805 goto out_unset_hw_params;
7806 /* At this point both hw and priv are initialized. */
9188 7807
9189 priv->qos_data.qos_active = 0; 7808 /**********************************
9190 priv->qos_data.qos_cap.val = 0; 7809 * 7. Initialize module parameters
9191#endif /* CONFIG_IWL4965_QOS */ 7810 **********************************/
9192 7811
9193 iwl4965_set_rxon_channel(priv, MODE_IEEE80211G, 6); 7812 /* Disable radio (SW RF KILL) via parameter when loading driver */
9194 iwl4965_setup_deferred_work(priv); 7813 if (priv->cfg->mod_params->disable) {
9195 iwl4965_setup_rx_handlers(priv); 7814 set_bit(STATUS_RF_KILL_SW, &priv->status);
7815 IWL_DEBUG_INFO("Radio disabled.\n");
7816 }
9196 7817
9197 priv->rates_mask = IWL_RATES_MASK; 7818 if (priv->cfg->mod_params->enable_qos)
9198 /* If power management is turned on, default to AC mode */ 7819 priv->qos_data.qos_enable = 1;
9199 priv->power_mode = IWL_POWER_AC;
9200 priv->user_txpower_limit = IWL_DEFAULT_TX_POWER;
9201 7820
7821 /********************
7822 * 8. Setup services
7823 ********************/
7824 spin_lock_irqsave(&priv->lock, flags);
9202 iwl4965_disable_interrupts(priv); 7825 iwl4965_disable_interrupts(priv);
7826 spin_unlock_irqrestore(&priv->lock, flags);
9203 7827
9204 err = sysfs_create_group(&pdev->dev.kobj, &iwl4965_attribute_group); 7828 err = sysfs_create_group(&pdev->dev.kobj, &iwl4965_attribute_group);
9205 if (err) { 7829 if (err) {
9206 IWL_ERROR("failed to create sysfs device attributes\n"); 7830 IWL_ERROR("failed to create sysfs device attributes\n");
9207 goto out_release_irq; 7831 goto out_unset_hw_params;
9208 }
9209
9210 /* nic init */
9211 iwl4965_set_bit(priv, CSR_GIO_CHICKEN_BITS,
9212 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
9213
9214 iwl4965_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
9215 err = iwl4965_poll_bit(priv, CSR_GP_CNTRL,
9216 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
9217 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
9218 if (err < 0) {
9219 IWL_DEBUG_INFO("Failed to init the card\n");
9220 goto out_remove_sysfs;
9221 }
9222 /* Read the EEPROM */
9223 err = iwl4965_eeprom_init(priv);
9224 if (err) {
9225 IWL_ERROR("Unable to init EEPROM\n");
9226 goto out_remove_sysfs;
9227 } 7832 }
9228 /* MAC Address location in EEPROM same for 3945/4965 */
9229 get_eeprom_mac(priv, priv->mac_addr);
9230 IWL_DEBUG_INFO("MAC address: %s\n", print_mac(mac, priv->mac_addr));
9231 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
9232 7833
9233 err = iwl4965_init_channel_map(priv); 7834 err = iwl_dbgfs_register(priv, DRV_NAME);
9234 if (err) { 7835 if (err) {
9235 IWL_ERROR("initializing regulatory failed: %d\n", err); 7836 IWL_ERROR("failed to create debugfs files\n");
9236 goto out_remove_sysfs; 7837 goto out_remove_sysfs;
9237 } 7838 }
9238 7839
9239 err = iwl4965_init_geos(priv); 7840 iwl4965_setup_deferred_work(priv);
9240 if (err) { 7841 iwl4965_setup_rx_handlers(priv);
9241 IWL_ERROR("initializing geos failed: %d\n", err);
9242 goto out_free_channel_map;
9243 }
9244 iwl4965_reset_channel_flag(priv);
9245
9246 iwl4965_rate_control_register(priv->hw);
9247 err = ieee80211_register_hw(priv->hw);
9248 if (err) {
9249 IWL_ERROR("Failed to register network device (error %d)\n", err);
9250 goto out_free_geos;
9251 }
9252 7842
9253 priv->hw->conf.beacon_int = 100; 7843 /********************
9254 priv->mac80211_registered = 1; 7844 * 9. Conclude
7845 ********************/
9255 pci_save_state(pdev); 7846 pci_save_state(pdev);
9256 pci_disable_device(pdev); 7847 pci_disable_device(pdev);
9257 7848
7849 /* notify iwlcore to init */
7850 iwlcore_low_level_notify(priv, IWLCORE_INIT_EVT);
9258 return 0; 7851 return 0;
9259 7852
9260 out_free_geos:
9261 iwl4965_free_geos(priv);
9262 out_free_channel_map:
9263 iwl4965_free_channel_map(priv);
9264 out_remove_sysfs: 7853 out_remove_sysfs:
9265 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group); 7854 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
9266 7855 out_unset_hw_params:
9267 out_release_irq: 7856 iwl4965_unset_hw_params(priv);
9268 destroy_workqueue(priv->workqueue);
9269 priv->workqueue = NULL;
9270 iwl4965_unset_hw_setting(priv);
9271
9272 out_iounmap: 7857 out_iounmap:
9273 pci_iounmap(pdev, priv->hw_base); 7858 pci_iounmap(pdev, priv->hw_base);
9274 out_pci_release_regions: 7859 out_pci_release_regions:
9275 pci_release_regions(pdev); 7860 pci_release_regions(pdev);
7861 pci_set_drvdata(pdev, NULL);
9276 out_pci_disable_device: 7862 out_pci_disable_device:
9277 pci_disable_device(pdev); 7863 pci_disable_device(pdev);
9278 pci_set_drvdata(pdev, NULL);
9279 out_ieee80211_free_hw: 7864 out_ieee80211_free_hw:
9280 ieee80211_free_hw(priv->hw); 7865 ieee80211_free_hw(priv->hw);
9281 out: 7866 out:
@@ -9284,19 +7869,34 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
9284 7869
9285static void __devexit iwl4965_pci_remove(struct pci_dev *pdev) 7870static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
9286{ 7871{
9287 struct iwl4965_priv *priv = pci_get_drvdata(pdev); 7872 struct iwl_priv *priv = pci_get_drvdata(pdev);
9288 struct list_head *p, *q; 7873 struct list_head *p, *q;
9289 int i; 7874 int i;
7875 unsigned long flags;
9290 7876
9291 if (!priv) 7877 if (!priv)
9292 return; 7878 return;
9293 7879
9294 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n"); 7880 IWL_DEBUG_INFO("*** UNLOAD DRIVER ***\n");
9295 7881
7882 if (priv->mac80211_registered) {
7883 ieee80211_unregister_hw(priv->hw);
7884 priv->mac80211_registered = 0;
7885 }
7886
9296 set_bit(STATUS_EXIT_PENDING, &priv->status); 7887 set_bit(STATUS_EXIT_PENDING, &priv->status);
9297 7888
9298 iwl4965_down(priv); 7889 iwl4965_down(priv);
9299 7890
7891 /* make sure we flush any pending irq or
7892 * tasklet for the driver
7893 */
7894 spin_lock_irqsave(&priv->lock, flags);
7895 iwl4965_disable_interrupts(priv);
7896 spin_unlock_irqrestore(&priv->lock, flags);
7897
7898 iwl_synchronize_irq(priv);
7899
9300 /* Free MAC hash list for ADHOC */ 7900 /* Free MAC hash list for ADHOC */
9301 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) { 7901 for (i = 0; i < IWL_IBSS_MAC_HASH_SIZE; i++) {
9302 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) { 7902 list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) {
@@ -9305,6 +7905,8 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
9305 } 7905 }
9306 } 7906 }
9307 7907
7908 iwlcore_low_level_notify(priv, IWLCORE_REMOVE_EVT);
7909 iwl_dbgfs_unregister(priv);
9308 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group); 7910 sysfs_remove_group(&pdev->dev.kobj, &iwl4965_attribute_group);
9309 7911
9310 iwl4965_dealloc_ucode_pci(priv); 7912 iwl4965_dealloc_ucode_pci(priv);
@@ -9313,13 +7915,9 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
9313 iwl4965_rx_queue_free(priv, &priv->rxq); 7915 iwl4965_rx_queue_free(priv, &priv->rxq);
9314 iwl4965_hw_txq_ctx_free(priv); 7916 iwl4965_hw_txq_ctx_free(priv);
9315 7917
9316 iwl4965_unset_hw_setting(priv); 7918 iwl4965_unset_hw_params(priv);
9317 iwl4965_clear_stations_table(priv); 7919 iwlcore_clear_stations_table(priv);
9318 7920
9319 if (priv->mac80211_registered) {
9320 ieee80211_unregister_hw(priv->hw);
9321 iwl4965_rate_control_unregister(priv->hw);
9322 }
9323 7921
9324 /*netif_stop_queue(dev); */ 7922 /*netif_stop_queue(dev); */
9325 flush_workqueue(priv->workqueue); 7923 flush_workqueue(priv->workqueue);
@@ -9335,7 +7933,7 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
9335 pci_disable_device(pdev); 7933 pci_disable_device(pdev);
9336 pci_set_drvdata(pdev, NULL); 7934 pci_set_drvdata(pdev, NULL);
9337 7935
9338 iwl4965_free_channel_map(priv); 7936 iwl_free_channel_map(priv);
9339 iwl4965_free_geos(priv); 7937 iwl4965_free_geos(priv);
9340 7938
9341 if (priv->ibss_beacon) 7939 if (priv->ibss_beacon)
@@ -9348,7 +7946,7 @@ static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
9348 7946
9349static int iwl4965_pci_suspend(struct pci_dev *pdev, pm_message_t state) 7947static int iwl4965_pci_suspend(struct pci_dev *pdev, pm_message_t state)
9350{ 7948{
9351 struct iwl4965_priv *priv = pci_get_drvdata(pdev); 7949 struct iwl_priv *priv = pci_get_drvdata(pdev);
9352 7950
9353 if (priv->is_open) { 7951 if (priv->is_open) {
9354 set_bit(STATUS_IN_SUSPEND, &priv->status); 7952 set_bit(STATUS_IN_SUSPEND, &priv->status);
@@ -9363,7 +7961,7 @@ static int iwl4965_pci_suspend(struct pci_dev *pdev, pm_message_t state)
9363 7961
9364static int iwl4965_pci_resume(struct pci_dev *pdev) 7962static int iwl4965_pci_resume(struct pci_dev *pdev)
9365{ 7963{
9366 struct iwl4965_priv *priv = pci_get_drvdata(pdev); 7964 struct iwl_priv *priv = pci_get_drvdata(pdev);
9367 7965
9368 pci_set_power_state(pdev, PCI_D0); 7966 pci_set_power_state(pdev, PCI_D0);
9369 7967
@@ -9382,9 +7980,17 @@ static int iwl4965_pci_resume(struct pci_dev *pdev)
9382 * 7980 *
9383 *****************************************************************************/ 7981 *****************************************************************************/
9384 7982
9385static struct pci_driver iwl4965_driver = { 7983/* Hardware specific file defines the PCI IDs table for that hardware module */
7984static struct pci_device_id iwl_hw_card_ids[] = {
7985 {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
7986 {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
7987 {0}
7988};
7989MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
7990
7991static struct pci_driver iwl_driver = {
9386 .name = DRV_NAME, 7992 .name = DRV_NAME,
9387 .id_table = iwl4965_hw_card_ids, 7993 .id_table = iwl_hw_card_ids,
9388 .probe = iwl4965_pci_probe, 7994 .probe = iwl4965_pci_probe,
9389 .remove = __devexit_p(iwl4965_pci_remove), 7995 .remove = __devexit_p(iwl4965_pci_remove),
9390#ifdef CONFIG_PM 7996#ifdef CONFIG_PM
@@ -9399,51 +8005,45 @@ static int __init iwl4965_init(void)
9399 int ret; 8005 int ret;
9400 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n"); 8006 printk(KERN_INFO DRV_NAME ": " DRV_DESCRIPTION ", " DRV_VERSION "\n");
9401 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n"); 8007 printk(KERN_INFO DRV_NAME ": " DRV_COPYRIGHT "\n");
9402 ret = pci_register_driver(&iwl4965_driver); 8008
8009 ret = iwl4965_rate_control_register();
9403 if (ret) { 8010 if (ret) {
9404 IWL_ERROR("Unable to initialize PCI module\n"); 8011 IWL_ERROR("Unable to register rate control algorithm: %d\n", ret);
9405 return ret; 8012 return ret;
9406 } 8013 }
9407#ifdef CONFIG_IWL4965_DEBUG 8014
9408 ret = driver_create_file(&iwl4965_driver.driver, &driver_attr_debug_level); 8015 ret = pci_register_driver(&iwl_driver);
8016 if (ret) {
8017 IWL_ERROR("Unable to initialize PCI module\n");
8018 goto error_register;
8019 }
8020#ifdef CONFIG_IWLWIFI_DEBUG
8021 ret = driver_create_file(&iwl_driver.driver, &driver_attr_debug_level);
9409 if (ret) { 8022 if (ret) {
9410 IWL_ERROR("Unable to create driver sysfs file\n"); 8023 IWL_ERROR("Unable to create driver sysfs file\n");
9411 pci_unregister_driver(&iwl4965_driver); 8024 goto error_debug;
9412 return ret;
9413 } 8025 }
9414#endif 8026#endif
9415 8027
9416 return ret; 8028 return ret;
8029
8030#ifdef CONFIG_IWLWIFI_DEBUG
8031error_debug:
8032 pci_unregister_driver(&iwl_driver);
8033#endif
8034error_register:
8035 iwl4965_rate_control_unregister();
8036 return ret;
9417} 8037}
9418 8038
9419static void __exit iwl4965_exit(void) 8039static void __exit iwl4965_exit(void)
9420{ 8040{
9421#ifdef CONFIG_IWL4965_DEBUG 8041#ifdef CONFIG_IWLWIFI_DEBUG
9422 driver_remove_file(&iwl4965_driver.driver, &driver_attr_debug_level); 8042 driver_remove_file(&iwl_driver.driver, &driver_attr_debug_level);
9423#endif 8043#endif
9424 pci_unregister_driver(&iwl4965_driver); 8044 pci_unregister_driver(&iwl_driver);
9425} 8045 iwl4965_rate_control_unregister();
9426 8046}
9427module_param_named(antenna, iwl4965_param_antenna, int, 0444);
9428MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
9429module_param_named(disable, iwl4965_param_disable, int, 0444);
9430MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])");
9431module_param_named(hwcrypto, iwl4965_param_hwcrypto, int, 0444);
9432MODULE_PARM_DESC(hwcrypto,
9433 "using hardware crypto engine (default 0 [software])\n");
9434module_param_named(debug, iwl4965_param_debug, int, 0444);
9435MODULE_PARM_DESC(debug, "debug output mask");
9436module_param_named(disable_hw_scan, iwl4965_param_disable_hw_scan, int, 0444);
9437MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
9438
9439module_param_named(queues_num, iwl4965_param_queues_num, int, 0444);
9440MODULE_PARM_DESC(queues_num, "number of hw queues.");
9441
9442/* QoS */
9443module_param_named(qos_enable, iwl4965_param_qos_enable, int, 0444);
9444MODULE_PARM_DESC(qos_enable, "enable all QoS functionality");
9445module_param_named(amsdu_size_8K, iwl4965_param_amsdu_size_8K, int, 0444);
9446MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
9447 8047
9448module_exit(iwl4965_exit); 8048module_exit(iwl4965_exit);
9449module_init(iwl4965_init); 8049module_init(iwl4965_init);
diff --git a/drivers/net/wireless/libertas/11d.c b/drivers/net/wireless/libertas/11d.c
index 5e10ce0d351c..4bc46a60ae2f 100644
--- a/drivers/net/wireless/libertas/11d.c
+++ b/drivers/net/wireless/libertas/11d.c
@@ -79,7 +79,7 @@ static u8 *lbs_code_2_region(u8 code)
79 * @param nrchan number of channels 79 * @param nrchan number of channels
80 * @return the nrchan-th chan number 80 * @return the nrchan-th chan number
81*/ 81*/
82static u8 lbs_get_chan_11d(u8 band, u8 firstchan, u8 nrchan, u8 *chan) 82static u8 lbs_get_chan_11d(u8 firstchan, u8 nrchan, u8 *chan)
83/*find the nrchan-th chan after the firstchan*/ 83/*find the nrchan-th chan after the firstchan*/
84{ 84{
85 u8 i; 85 u8 i;
@@ -134,7 +134,7 @@ static u8 lbs_channel_known_11d(u8 chan,
134 return 0; 134 return 0;
135} 135}
136 136
137u32 lbs_chan_2_freq(u8 chan, u8 band) 137u32 lbs_chan_2_freq(u8 chan)
138{ 138{
139 struct chan_freq_power *cf; 139 struct chan_freq_power *cf;
140 u16 i; 140 u16 i;
@@ -264,7 +264,7 @@ static void lbs_generate_parsed_region_chan_11d(struct region_channel *region_ch
264 * @param chan chan 264 * @param chan chan
265 * @return TRUE;FALSE 265 * @return TRUE;FALSE
266*/ 266*/
267static u8 lbs_region_chan_supported_11d(u8 region, u8 band, u8 chan) 267static u8 lbs_region_chan_supported_11d(u8 region, u8 chan)
268{ 268{
269 struct chan_freq_power *cfp; 269 struct chan_freq_power *cfp;
270 int cfp_no; 270 int cfp_no;
@@ -273,7 +273,7 @@ static u8 lbs_region_chan_supported_11d(u8 region, u8 band, u8 chan)
273 273
274 lbs_deb_enter(LBS_DEB_11D); 274 lbs_deb_enter(LBS_DEB_11D);
275 275
276 cfp = lbs_get_region_cfp_table(region, band, &cfp_no); 276 cfp = lbs_get_region_cfp_table(region, &cfp_no);
277 if (cfp == NULL) 277 if (cfp == NULL)
278 return 0; 278 return 0;
279 279
@@ -367,7 +367,7 @@ static int parse_domain_info_11d(struct ieeetypes_countryinfofullset*
367 for (i = 0; idx < MAX_NO_OF_CHAN && i < nrchan; i++) { 367 for (i = 0; idx < MAX_NO_OF_CHAN && i < nrchan; i++) {
368 /*step4: channel is supported? */ 368 /*step4: channel is supported? */
369 369
370 if (!lbs_get_chan_11d(band, firstchan, i, &curchan)) { 370 if (!lbs_get_chan_11d(firstchan, i, &curchan)) {
371 /* Chan is not found in UN table */ 371 /* Chan is not found in UN table */
372 lbs_deb_11d("chan is not supported: %d \n", i); 372 lbs_deb_11d("chan is not supported: %d \n", i);
373 break; 373 break;
@@ -375,8 +375,7 @@ static int parse_domain_info_11d(struct ieeetypes_countryinfofullset*
375 375
376 lastchan = curchan; 376 lastchan = curchan;
377 377
378 if (lbs_region_chan_supported_11d 378 if (lbs_region_chan_supported_11d(region, curchan)) {
379 (region, band, curchan)) {
380 /*step5: Check if curchan is supported by mrvl in region */ 379 /*step5: Check if curchan is supported by mrvl in region */
381 parsed_region_chan->chanpwr[idx].chan = curchan; 380 parsed_region_chan->chanpwr[idx].chan = curchan;
382 parsed_region_chan->chanpwr[idx].pwr = 381 parsed_region_chan->chanpwr[idx].pwr =
@@ -554,8 +553,7 @@ done:
554 * @param resp pointer to command response buffer 553 * @param resp pointer to command response buffer
555 * @return 0; -1 554 * @return 0; -1
556 */ 555 */
557int lbs_ret_802_11d_domain_info(struct lbs_private *priv, 556int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp)
558 struct cmd_ds_command *resp)
559{ 557{
560 struct cmd_ds_802_11d_domain_info *domaininfo = &resp->params.domaininforesp; 558 struct cmd_ds_802_11d_domain_info *domaininfo = &resp->params.domaininforesp;
561 struct mrvlietypes_domainparamset *domain = &domaininfo->domain; 559 struct mrvlietypes_domainparamset *domain = &domaininfo->domain;
diff --git a/drivers/net/wireless/libertas/11d.h b/drivers/net/wireless/libertas/11d.h
index 811eea2cfba3..4f4f47f0f878 100644
--- a/drivers/net/wireless/libertas/11d.h
+++ b/drivers/net/wireless/libertas/11d.h
@@ -83,7 +83,7 @@ struct lbs_private;
83u8 lbs_get_scan_type_11d(u8 chan, 83u8 lbs_get_scan_type_11d(u8 chan,
84 struct parsed_region_chan_11d *parsed_region_chan); 84 struct parsed_region_chan_11d *parsed_region_chan);
85 85
86u32 lbs_chan_2_freq(u8 chan, u8 band); 86u32 lbs_chan_2_freq(u8 chan);
87 87
88void lbs_init_11d(struct lbs_private *priv); 88void lbs_init_11d(struct lbs_private *priv);
89 89
@@ -93,8 +93,7 @@ int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
93 struct cmd_ds_command *cmd, u16 cmdno, 93 struct cmd_ds_command *cmd, u16 cmdno,
94 u16 cmdOption); 94 u16 cmdOption);
95 95
96int lbs_ret_802_11d_domain_info(struct lbs_private *priv, 96int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp);
97 struct cmd_ds_command *resp);
98 97
99struct bss_descriptor; 98struct bss_descriptor;
100int lbs_parse_dnld_countryinfo_11d(struct lbs_private *priv, 99int lbs_parse_dnld_countryinfo_11d(struct lbs_private *priv,
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index 0e2787691f96..f0724e31adfd 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -1,7 +1,7 @@
1libertas-objs := main.o wext.o \ 1libertas-objs := main.o wext.o \
2 rx.o tx.o cmd.o \ 2 rx.o tx.o cmd.o \
3 cmdresp.o scan.o \ 3 cmdresp.o scan.o \
4 join.o 11d.o \ 4 11d.o \
5 debugfs.o \ 5 debugfs.o \
6 ethtool.o assoc.o 6 ethtool.o assoc.o
7 7
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index 6a24ed6067e0..c9c3640ce9fb 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -1,14 +1,11 @@
1/* Copyright (C) 2006, Red Hat, Inc. */ 1/* Copyright (C) 2006, Red Hat, Inc. */
2 2
3#include <linux/bitops.h>
4#include <net/ieee80211.h>
5#include <linux/etherdevice.h> 3#include <linux/etherdevice.h>
6 4
7#include "assoc.h" 5#include "assoc.h"
8#include "join.h"
9#include "decl.h" 6#include "decl.h"
10#include "hostcmd.h"
11#include "host.h" 7#include "host.h"
8#include "scan.h"
12#include "cmd.h" 9#include "cmd.h"
13 10
14 11
@@ -17,6 +14,428 @@ static const u8 bssid_any[ETH_ALEN] __attribute__ ((aligned (2))) =
17static const u8 bssid_off[ETH_ALEN] __attribute__ ((aligned (2))) = 14static const u8 bssid_off[ETH_ALEN] __attribute__ ((aligned (2))) =
18 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 15 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
19 16
17/* The firmware needs certain bits masked out of the beacon-derviced capability
18 * field when associating/joining to BSSs.
19 */
20#define CAPINFO_MASK (~(0xda00))
21
22
23
24/**
25 * @brief Associate to a specific BSS discovered in a scan
26 *
27 * @param priv A pointer to struct lbs_private structure
28 * @param pbssdesc Pointer to the BSS descriptor to associate with.
29 *
30 * @return 0-success, otherwise fail
31 */
32static int lbs_associate(struct lbs_private *priv,
33 struct assoc_request *assoc_req)
34{
35 int ret;
36
37 lbs_deb_enter(LBS_DEB_ASSOC);
38
39 ret = lbs_prepare_and_send_command(priv, CMD_802_11_AUTHENTICATE,
40 0, CMD_OPTION_WAITFORRSP,
41 0, assoc_req->bss.bssid);
42
43 if (ret)
44 goto done;
45
46 /* set preamble to firmware */
47 if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) &&
48 (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
49 priv->preamble = CMD_TYPE_SHORT_PREAMBLE;
50 else
51 priv->preamble = CMD_TYPE_LONG_PREAMBLE;
52
53 lbs_set_radio_control(priv);
54
55 ret = lbs_prepare_and_send_command(priv, CMD_802_11_ASSOCIATE,
56 0, CMD_OPTION_WAITFORRSP, 0, assoc_req);
57
58done:
59 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
60 return ret;
61}
62
63/**
64 * @brief Join an adhoc network found in a previous scan
65 *
66 * @param priv A pointer to struct lbs_private structure
67 * @param pbssdesc Pointer to a BSS descriptor found in a previous scan
68 * to attempt to join
69 *
70 * @return 0--success, -1--fail
71 */
72static int lbs_join_adhoc_network(struct lbs_private *priv,
73 struct assoc_request *assoc_req)
74{
75 struct bss_descriptor *bss = &assoc_req->bss;
76 int ret = 0;
77
78 lbs_deb_join("current SSID '%s', ssid length %u\n",
79 escape_essid(priv->curbssparams.ssid,
80 priv->curbssparams.ssid_len),
81 priv->curbssparams.ssid_len);
82 lbs_deb_join("requested ssid '%s', ssid length %u\n",
83 escape_essid(bss->ssid, bss->ssid_len),
84 bss->ssid_len);
85
86 /* check if the requested SSID is already joined */
87 if (priv->curbssparams.ssid_len &&
88 !lbs_ssid_cmp(priv->curbssparams.ssid,
89 priv->curbssparams.ssid_len,
90 bss->ssid, bss->ssid_len) &&
91 (priv->mode == IW_MODE_ADHOC) &&
92 (priv->connect_status == LBS_CONNECTED)) {
93 union iwreq_data wrqu;
94
95 lbs_deb_join("ADHOC_J_CMD: New ad-hoc SSID is the same as "
96 "current, not attempting to re-join");
97
98 /* Send the re-association event though, because the association
99 * request really was successful, even if just a null-op.
100 */
101 memset(&wrqu, 0, sizeof(wrqu));
102 memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid,
103 ETH_ALEN);
104 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
105 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
106 goto out;
107 }
108
109 /* Use shortpreamble only when both creator and card supports
110 short preamble */
111 if (!(bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) ||
112 !(priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) {
113 lbs_deb_join("AdhocJoin: Long preamble\n");
114 priv->preamble = CMD_TYPE_LONG_PREAMBLE;
115 } else {
116 lbs_deb_join("AdhocJoin: Short preamble\n");
117 priv->preamble = CMD_TYPE_SHORT_PREAMBLE;
118 }
119
120 lbs_set_radio_control(priv);
121
122 lbs_deb_join("AdhocJoin: channel = %d\n", assoc_req->channel);
123 lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band);
124
125 priv->adhoccreate = 0;
126
127 ret = lbs_prepare_and_send_command(priv, CMD_802_11_AD_HOC_JOIN,
128 0, CMD_OPTION_WAITFORRSP,
129 OID_802_11_SSID, assoc_req);
130
131out:
132 return ret;
133}
134
135/**
136 * @brief Start an Adhoc Network
137 *
138 * @param priv A pointer to struct lbs_private structure
139 * @param adhocssid The ssid of the Adhoc Network
140 * @return 0--success, -1--fail
141 */
142static int lbs_start_adhoc_network(struct lbs_private *priv,
143 struct assoc_request *assoc_req)
144{
145 int ret = 0;
146
147 priv->adhoccreate = 1;
148
149 if (priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
150 lbs_deb_join("AdhocStart: Short preamble\n");
151 priv->preamble = CMD_TYPE_SHORT_PREAMBLE;
152 } else {
153 lbs_deb_join("AdhocStart: Long preamble\n");
154 priv->preamble = CMD_TYPE_LONG_PREAMBLE;
155 }
156
157 lbs_set_radio_control(priv);
158
159 lbs_deb_join("AdhocStart: channel = %d\n", assoc_req->channel);
160 lbs_deb_join("AdhocStart: band = %d\n", assoc_req->band);
161
162 ret = lbs_prepare_and_send_command(priv, CMD_802_11_AD_HOC_START,
163 0, CMD_OPTION_WAITFORRSP, 0, assoc_req);
164
165 return ret;
166}
167
168int lbs_stop_adhoc_network(struct lbs_private *priv)
169{
170 return lbs_prepare_and_send_command(priv, CMD_802_11_AD_HOC_STOP,
171 0, CMD_OPTION_WAITFORRSP, 0, NULL);
172}
173
174static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
175 struct bss_descriptor *match_bss)
176{
177 if (!secinfo->wep_enabled && !secinfo->WPAenabled
178 && !secinfo->WPA2enabled
179 && match_bss->wpa_ie[0] != MFIE_TYPE_GENERIC
180 && match_bss->rsn_ie[0] != MFIE_TYPE_RSN
181 && !(match_bss->capability & WLAN_CAPABILITY_PRIVACY))
182 return 1;
183 else
184 return 0;
185}
186
187static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo,
188 struct bss_descriptor *match_bss)
189{
190 if (secinfo->wep_enabled && !secinfo->WPAenabled
191 && !secinfo->WPA2enabled
192 && (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
193 return 1;
194 else
195 return 0;
196}
197
198static inline int match_bss_wpa(struct lbs_802_11_security *secinfo,
199 struct bss_descriptor *match_bss)
200{
201 if (!secinfo->wep_enabled && secinfo->WPAenabled
202 && (match_bss->wpa_ie[0] == MFIE_TYPE_GENERIC)
203 /* privacy bit may NOT be set in some APs like LinkSys WRT54G
204 && (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */
205 )
206 return 1;
207 else
208 return 0;
209}
210
211static inline int match_bss_wpa2(struct lbs_802_11_security *secinfo,
212 struct bss_descriptor *match_bss)
213{
214 if (!secinfo->wep_enabled && secinfo->WPA2enabled &&
215 (match_bss->rsn_ie[0] == MFIE_TYPE_RSN)
216 /* privacy bit may NOT be set in some APs like LinkSys WRT54G
217 (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */
218 )
219 return 1;
220 else
221 return 0;
222}
223
224static inline int match_bss_dynamic_wep(struct lbs_802_11_security *secinfo,
225 struct bss_descriptor *match_bss)
226{
227 if (!secinfo->wep_enabled && !secinfo->WPAenabled
228 && !secinfo->WPA2enabled
229 && (match_bss->wpa_ie[0] != MFIE_TYPE_GENERIC)
230 && (match_bss->rsn_ie[0] != MFIE_TYPE_RSN)
231 && (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
232 return 1;
233 else
234 return 0;
235}
236
237/**
238 * @brief Check if a scanned network compatible with the driver settings
239 *
240 * WEP WPA WPA2 ad-hoc encrypt Network
241 * enabled enabled enabled AES mode privacy WPA WPA2 Compatible
242 * 0 0 0 0 NONE 0 0 0 yes No security
243 * 1 0 0 0 NONE 1 0 0 yes Static WEP
244 * 0 1 0 0 x 1x 1 x yes WPA
245 * 0 0 1 0 x 1x x 1 yes WPA2
246 * 0 0 0 1 NONE 1 0 0 yes Ad-hoc AES
247 * 0 0 0 0 !=NONE 1 0 0 yes Dynamic WEP
248 *
249 *
250 * @param priv A pointer to struct lbs_private
251 * @param index Index in scantable to check against current driver settings
252 * @param mode Network mode: Infrastructure or IBSS
253 *
254 * @return Index in scantable, or error code if negative
255 */
256static int is_network_compatible(struct lbs_private *priv,
257 struct bss_descriptor *bss, uint8_t mode)
258{
259 int matched = 0;
260
261 lbs_deb_enter(LBS_DEB_SCAN);
262
263 if (bss->mode != mode)
264 goto done;
265
266 matched = match_bss_no_security(&priv->secinfo, bss);
267 if (matched)
268 goto done;
269 matched = match_bss_static_wep(&priv->secinfo, bss);
270 if (matched)
271 goto done;
272 matched = match_bss_wpa(&priv->secinfo, bss);
273 if (matched) {
274 lbs_deb_scan("is_network_compatible() WPA: wpa_ie 0x%x "
275 "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s "
276 "privacy 0x%x\n", bss->wpa_ie[0], bss->rsn_ie[0],
277 priv->secinfo.wep_enabled ? "e" : "d",
278 priv->secinfo.WPAenabled ? "e" : "d",
279 priv->secinfo.WPA2enabled ? "e" : "d",
280 (bss->capability & WLAN_CAPABILITY_PRIVACY));
281 goto done;
282 }
283 matched = match_bss_wpa2(&priv->secinfo, bss);
284 if (matched) {
285 lbs_deb_scan("is_network_compatible() WPA2: wpa_ie 0x%x "
286 "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s "
287 "privacy 0x%x\n", bss->wpa_ie[0], bss->rsn_ie[0],
288 priv->secinfo.wep_enabled ? "e" : "d",
289 priv->secinfo.WPAenabled ? "e" : "d",
290 priv->secinfo.WPA2enabled ? "e" : "d",
291 (bss->capability & WLAN_CAPABILITY_PRIVACY));
292 goto done;
293 }
294 matched = match_bss_dynamic_wep(&priv->secinfo, bss);
295 if (matched) {
296 lbs_deb_scan("is_network_compatible() dynamic WEP: "
297 "wpa_ie 0x%x wpa2_ie 0x%x privacy 0x%x\n",
298 bss->wpa_ie[0], bss->rsn_ie[0],
299 (bss->capability & WLAN_CAPABILITY_PRIVACY));
300 goto done;
301 }
302
303 /* bss security settings don't match those configured on card */
304 lbs_deb_scan("is_network_compatible() FAILED: wpa_ie 0x%x "
305 "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s privacy 0x%x\n",
306 bss->wpa_ie[0], bss->rsn_ie[0],
307 priv->secinfo.wep_enabled ? "e" : "d",
308 priv->secinfo.WPAenabled ? "e" : "d",
309 priv->secinfo.WPA2enabled ? "e" : "d",
310 (bss->capability & WLAN_CAPABILITY_PRIVACY));
311
312done:
313 lbs_deb_leave_args(LBS_DEB_SCAN, "matched: %d", matched);
314 return matched;
315}
316
317/**
318 * @brief This function finds a specific compatible BSSID in the scan list
319 *
320 * Used in association code
321 *
322 * @param priv A pointer to struct lbs_private
323 * @param bssid BSSID to find in the scan list
324 * @param mode Network mode: Infrastructure or IBSS
325 *
326 * @return index in BSSID list, or error return code (< 0)
327 */
328static struct bss_descriptor *lbs_find_bssid_in_list(struct lbs_private *priv,
329 uint8_t *bssid, uint8_t mode)
330{
331 struct bss_descriptor *iter_bss;
332 struct bss_descriptor *found_bss = NULL;
333
334 lbs_deb_enter(LBS_DEB_SCAN);
335
336 if (!bssid)
337 goto out;
338
339 lbs_deb_hex(LBS_DEB_SCAN, "looking for", bssid, ETH_ALEN);
340
341 /* Look through the scan table for a compatible match. The loop will
342 * continue past a matched bssid that is not compatible in case there
343 * is an AP with multiple SSIDs assigned to the same BSSID
344 */
345 mutex_lock(&priv->lock);
346 list_for_each_entry(iter_bss, &priv->network_list, list) {
347 if (compare_ether_addr(iter_bss->bssid, bssid))
348 continue; /* bssid doesn't match */
349 switch (mode) {
350 case IW_MODE_INFRA:
351 case IW_MODE_ADHOC:
352 if (!is_network_compatible(priv, iter_bss, mode))
353 break;
354 found_bss = iter_bss;
355 break;
356 default:
357 found_bss = iter_bss;
358 break;
359 }
360 }
361 mutex_unlock(&priv->lock);
362
363out:
364 lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss);
365 return found_bss;
366}
367
368/**
369 * @brief This function finds ssid in ssid list.
370 *
371 * Used in association code
372 *
373 * @param priv A pointer to struct lbs_private
374 * @param ssid SSID to find in the list
375 * @param bssid BSSID to qualify the SSID selection (if provided)
376 * @param mode Network mode: Infrastructure or IBSS
377 *
378 * @return index in BSSID list
379 */
380static struct bss_descriptor *lbs_find_ssid_in_list(struct lbs_private *priv,
381 uint8_t *ssid, uint8_t ssid_len,
382 uint8_t *bssid, uint8_t mode,
383 int channel)
384{
385 u32 bestrssi = 0;
386 struct bss_descriptor *iter_bss = NULL;
387 struct bss_descriptor *found_bss = NULL;
388 struct bss_descriptor *tmp_oldest = NULL;
389
390 lbs_deb_enter(LBS_DEB_SCAN);
391
392 mutex_lock(&priv->lock);
393
394 list_for_each_entry(iter_bss, &priv->network_list, list) {
395 if (!tmp_oldest ||
396 (iter_bss->last_scanned < tmp_oldest->last_scanned))
397 tmp_oldest = iter_bss;
398
399 if (lbs_ssid_cmp(iter_bss->ssid, iter_bss->ssid_len,
400 ssid, ssid_len) != 0)
401 continue; /* ssid doesn't match */
402 if (bssid && compare_ether_addr(iter_bss->bssid, bssid) != 0)
403 continue; /* bssid doesn't match */
404 if ((channel > 0) && (iter_bss->channel != channel))
405 continue; /* channel doesn't match */
406
407 switch (mode) {
408 case IW_MODE_INFRA:
409 case IW_MODE_ADHOC:
410 if (!is_network_compatible(priv, iter_bss, mode))
411 break;
412
413 if (bssid) {
414 /* Found requested BSSID */
415 found_bss = iter_bss;
416 goto out;
417 }
418
419 if (SCAN_RSSI(iter_bss->rssi) > bestrssi) {
420 bestrssi = SCAN_RSSI(iter_bss->rssi);
421 found_bss = iter_bss;
422 }
423 break;
424 case IW_MODE_AUTO:
425 default:
426 if (SCAN_RSSI(iter_bss->rssi) > bestrssi) {
427 bestrssi = SCAN_RSSI(iter_bss->rssi);
428 found_bss = iter_bss;
429 }
430 break;
431 }
432 }
433
434out:
435 mutex_unlock(&priv->lock);
436 lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss);
437 return found_bss;
438}
20 439
21static int assoc_helper_essid(struct lbs_private *priv, 440static int assoc_helper_essid(struct lbs_private *priv,
22 struct assoc_request * assoc_req) 441 struct assoc_request * assoc_req)
@@ -38,7 +457,7 @@ static int assoc_helper_essid(struct lbs_private *priv,
38 escape_essid(assoc_req->ssid, assoc_req->ssid_len)); 457 escape_essid(assoc_req->ssid, assoc_req->ssid_len));
39 if (assoc_req->mode == IW_MODE_INFRA) { 458 if (assoc_req->mode == IW_MODE_INFRA) {
40 lbs_send_specific_ssid_scan(priv, assoc_req->ssid, 459 lbs_send_specific_ssid_scan(priv, assoc_req->ssid,
41 assoc_req->ssid_len, 0); 460 assoc_req->ssid_len);
42 461
43 bss = lbs_find_ssid_in_list(priv, assoc_req->ssid, 462 bss = lbs_find_ssid_in_list(priv, assoc_req->ssid,
44 assoc_req->ssid_len, NULL, IW_MODE_INFRA, channel); 463 assoc_req->ssid_len, NULL, IW_MODE_INFRA, channel);
@@ -53,7 +472,7 @@ static int assoc_helper_essid(struct lbs_private *priv,
53 * scan data will cause us to join a non-existant adhoc network 472 * scan data will cause us to join a non-existant adhoc network
54 */ 473 */
55 lbs_send_specific_ssid_scan(priv, assoc_req->ssid, 474 lbs_send_specific_ssid_scan(priv, assoc_req->ssid,
56 assoc_req->ssid_len, 1); 475 assoc_req->ssid_len);
57 476
58 /* Search for the requested SSID in the scan table */ 477 /* Search for the requested SSID in the scan table */
59 bss = lbs_find_ssid_in_list(priv, assoc_req->ssid, 478 bss = lbs_find_ssid_in_list(priv, assoc_req->ssid,
@@ -164,34 +583,6 @@ done:
164 return ret; 583 return ret;
165} 584}
166 585
167
168int lbs_update_channel(struct lbs_private *priv)
169{
170 int ret;
171
172 /* the channel in f/w could be out of sync; get the current channel */
173 lbs_deb_enter(LBS_DEB_ASSOC);
174
175 ret = lbs_get_channel(priv);
176 if (ret > 0) {
177 priv->curbssparams.channel = ret;
178 ret = 0;
179 }
180 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
181 return ret;
182}
183
184void lbs_sync_channel(struct work_struct *work)
185{
186 struct lbs_private *priv = container_of(work, struct lbs_private,
187 sync_channel);
188
189 lbs_deb_enter(LBS_DEB_ASSOC);
190 if (lbs_update_channel(priv))
191 lbs_pr_info("Channel synchronization failed.");
192 lbs_deb_leave(LBS_DEB_ASSOC);
193}
194
195static int assoc_helper_channel(struct lbs_private *priv, 586static int assoc_helper_channel(struct lbs_private *priv,
196 struct assoc_request * assoc_req) 587 struct assoc_request * assoc_req)
197{ 588{
@@ -279,13 +670,11 @@ static int assoc_helper_wep_keys(struct lbs_private *priv,
279 670
280 /* enable/disable the MAC's WEP packet filter */ 671 /* enable/disable the MAC's WEP packet filter */
281 if (assoc_req->secinfo.wep_enabled) 672 if (assoc_req->secinfo.wep_enabled)
282 priv->currentpacketfilter |= CMD_ACT_MAC_WEP_ENABLE; 673 priv->mac_control |= CMD_ACT_MAC_WEP_ENABLE;
283 else 674 else
284 priv->currentpacketfilter &= ~CMD_ACT_MAC_WEP_ENABLE; 675 priv->mac_control &= ~CMD_ACT_MAC_WEP_ENABLE;
285 676
286 ret = lbs_set_mac_packet_filter(priv); 677 lbs_set_mac_control(priv);
287 if (ret)
288 goto out;
289 678
290 mutex_lock(&priv->lock); 679 mutex_lock(&priv->lock);
291 680
@@ -315,9 +704,7 @@ static int assoc_helper_secinfo(struct lbs_private *priv,
315 memcpy(&priv->secinfo, &assoc_req->secinfo, 704 memcpy(&priv->secinfo, &assoc_req->secinfo,
316 sizeof(struct lbs_802_11_security)); 705 sizeof(struct lbs_802_11_security));
317 706
318 ret = lbs_set_mac_packet_filter(priv); 707 lbs_set_mac_control(priv);
319 if (ret)
320 goto out;
321 708
322 /* If RSN is already enabled, don't try to enable it again, since 709 /* If RSN is already enabled, don't try to enable it again, since
323 * ENABLE_RSN resets internal state machines and will clobber the 710 * ENABLE_RSN resets internal state machines and will clobber the
@@ -360,11 +747,7 @@ static int assoc_helper_wpa_keys(struct lbs_private *priv,
360 747
361 if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) { 748 if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
362 clear_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags); 749 clear_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags);
363 ret = lbs_prepare_and_send_command(priv, 750 ret = lbs_cmd_802_11_key_material(priv, CMD_ACT_SET, assoc_req);
364 CMD_802_11_KEY_MATERIAL,
365 CMD_ACT_SET,
366 CMD_OPTION_WAITFORRSP,
367 0, assoc_req);
368 assoc_req->flags = flags; 751 assoc_req->flags = flags;
369 } 752 }
370 753
@@ -374,11 +757,7 @@ static int assoc_helper_wpa_keys(struct lbs_private *priv,
374 if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)) { 757 if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)) {
375 clear_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags); 758 clear_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags);
376 759
377 ret = lbs_prepare_and_send_command(priv, 760 ret = lbs_cmd_802_11_key_material(priv, CMD_ACT_SET, assoc_req);
378 CMD_802_11_KEY_MATERIAL,
379 CMD_ACT_SET,
380 CMD_OPTION_WAITFORRSP,
381 0, assoc_req);
382 assoc_req->flags = flags; 761 assoc_req->flags = flags;
383 } 762 }
384 763
@@ -413,11 +792,10 @@ static int should_deauth_infrastructure(struct lbs_private *priv,
413{ 792{
414 int ret = 0; 793 int ret = 0;
415 794
416 lbs_deb_enter(LBS_DEB_ASSOC);
417
418 if (priv->connect_status != LBS_CONNECTED) 795 if (priv->connect_status != LBS_CONNECTED)
419 return 0; 796 return 0;
420 797
798 lbs_deb_enter(LBS_DEB_ASSOC);
421 if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) { 799 if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
422 lbs_deb_assoc("Deauthenticating due to new SSID\n"); 800 lbs_deb_assoc("Deauthenticating due to new SSID\n");
423 ret = 1; 801 ret = 1;
@@ -456,7 +834,7 @@ static int should_deauth_infrastructure(struct lbs_private *priv,
456 834
457out: 835out:
458 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 836 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
459 return 0; 837 return ret;
460} 838}
461 839
462 840
@@ -489,6 +867,91 @@ static int should_stop_adhoc(struct lbs_private *priv,
489} 867}
490 868
491 869
870/**
871 * @brief This function finds the best SSID in the Scan List
872 *
873 * Search the scan table for the best SSID that also matches the current
874 * adapter network preference (infrastructure or adhoc)
875 *
876 * @param priv A pointer to struct lbs_private
877 *
878 * @return index in BSSID list
879 */
880static struct bss_descriptor *lbs_find_best_ssid_in_list(
881 struct lbs_private *priv, uint8_t mode)
882{
883 uint8_t bestrssi = 0;
884 struct bss_descriptor *iter_bss;
885 struct bss_descriptor *best_bss = NULL;
886
887 lbs_deb_enter(LBS_DEB_SCAN);
888
889 mutex_lock(&priv->lock);
890
891 list_for_each_entry(iter_bss, &priv->network_list, list) {
892 switch (mode) {
893 case IW_MODE_INFRA:
894 case IW_MODE_ADHOC:
895 if (!is_network_compatible(priv, iter_bss, mode))
896 break;
897 if (SCAN_RSSI(iter_bss->rssi) <= bestrssi)
898 break;
899 bestrssi = SCAN_RSSI(iter_bss->rssi);
900 best_bss = iter_bss;
901 break;
902 case IW_MODE_AUTO:
903 default:
904 if (SCAN_RSSI(iter_bss->rssi) <= bestrssi)
905 break;
906 bestrssi = SCAN_RSSI(iter_bss->rssi);
907 best_bss = iter_bss;
908 break;
909 }
910 }
911
912 mutex_unlock(&priv->lock);
913 lbs_deb_leave_args(LBS_DEB_SCAN, "best_bss %p", best_bss);
914 return best_bss;
915}
916
917/**
918 * @brief Find the best AP
919 *
920 * Used from association worker.
921 *
922 * @param priv A pointer to struct lbs_private structure
923 * @param pSSID A pointer to AP's ssid
924 *
925 * @return 0--success, otherwise--fail
926 */
927static int lbs_find_best_network_ssid(struct lbs_private *priv,
928 uint8_t *out_ssid, uint8_t *out_ssid_len, uint8_t preferred_mode,
929 uint8_t *out_mode)
930{
931 int ret = -1;
932 struct bss_descriptor *found;
933
934 lbs_deb_enter(LBS_DEB_SCAN);
935
936 priv->scan_ssid_len = 0;
937 lbs_scan_networks(priv, 1);
938 if (priv->surpriseremoved)
939 goto out;
940
941 found = lbs_find_best_ssid_in_list(priv, preferred_mode);
942 if (found && (found->ssid_len > 0)) {
943 memcpy(out_ssid, &found->ssid, IW_ESSID_MAX_SIZE);
944 *out_ssid_len = found->ssid_len;
945 *out_mode = found->mode;
946 ret = 0;
947 }
948
949out:
950 lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
951 return ret;
952}
953
954
492void lbs_association_worker(struct work_struct *work) 955void lbs_association_worker(struct work_struct *work)
493{ 956{
494 struct lbs_private *priv = container_of(work, struct lbs_private, 957 struct lbs_private *priv = container_of(work, struct lbs_private,
@@ -643,17 +1106,11 @@ void lbs_association_worker(struct work_struct *work)
643 } 1106 }
644 1107
645 if (success) { 1108 if (success) {
646 lbs_deb_assoc("ASSOC: associated to '%s', %s\n", 1109 lbs_deb_assoc("associated to %s\n",
647 escape_essid(priv->curbssparams.ssid,
648 priv->curbssparams.ssid_len),
649 print_mac(mac, priv->curbssparams.bssid)); 1110 print_mac(mac, priv->curbssparams.bssid));
650 lbs_prepare_and_send_command(priv, 1111 lbs_prepare_and_send_command(priv,
651 CMD_802_11_RSSI, 1112 CMD_802_11_RSSI,
652 0, CMD_OPTION_WAITFORRSP, 0, NULL); 1113 0, CMD_OPTION_WAITFORRSP, 0, NULL);
653
654 lbs_prepare_and_send_command(priv,
655 CMD_802_11_GET_LOG,
656 0, CMD_OPTION_WAITFORRSP, 0, NULL);
657 } else { 1114 } else {
658 ret = -1; 1115 ret = -1;
659 } 1116 }
@@ -752,3 +1209,705 @@ struct assoc_request *lbs_get_association_request(struct lbs_private *priv)
752 lbs_deb_leave(LBS_DEB_ASSOC); 1209 lbs_deb_leave(LBS_DEB_ASSOC);
753 return assoc_req; 1210 return assoc_req;
754} 1211}
1212
1213
1214/**
1215 * @brief This function finds common rates between rate1 and card rates.
1216 *
1217 * It will fill common rates in rate1 as output if found.
1218 *
1219 * NOTE: Setting the MSB of the basic rates need to be taken
1220 * care, either before or after calling this function
1221 *
1222 * @param priv A pointer to struct lbs_private structure
1223 * @param rate1 the buffer which keeps input and output
1224 * @param rate1_size the size of rate1 buffer; new size of buffer on return
1225 *
1226 * @return 0 or -1
1227 */
1228static int get_common_rates(struct lbs_private *priv,
1229 u8 *rates,
1230 u16 *rates_size)
1231{
1232 u8 *card_rates = lbs_bg_rates;
1233 size_t num_card_rates = sizeof(lbs_bg_rates);
1234 int ret = 0, i, j;
1235 u8 tmp[30];
1236 size_t tmp_size = 0;
1237
1238 /* For each rate in card_rates that exists in rate1, copy to tmp */
1239 for (i = 0; card_rates[i] && (i < num_card_rates); i++) {
1240 for (j = 0; rates[j] && (j < *rates_size); j++) {
1241 if (rates[j] == card_rates[i])
1242 tmp[tmp_size++] = card_rates[i];
1243 }
1244 }
1245
1246 lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size);
1247 lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, num_card_rates);
1248 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size);
1249 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
1250
1251 if (!priv->auto_rate) {
1252 for (i = 0; i < tmp_size; i++) {
1253 if (tmp[i] == priv->cur_rate)
1254 goto done;
1255 }
1256 lbs_pr_alert("Previously set fixed data rate %#x isn't "
1257 "compatible with the network.\n", priv->cur_rate);
1258 ret = -1;
1259 goto done;
1260 }
1261 ret = 0;
1262
1263done:
1264 memset(rates, 0, *rates_size);
1265 *rates_size = min_t(int, tmp_size, *rates_size);
1266 memcpy(rates, tmp, *rates_size);
1267 return ret;
1268}
1269
1270
1271/**
1272 * @brief Sets the MSB on basic rates as the firmware requires
1273 *
1274 * Scan through an array and set the MSB for basic data rates.
1275 *
1276 * @param rates buffer of data rates
1277 * @param len size of buffer
1278 */
1279static void lbs_set_basic_rate_flags(u8 *rates, size_t len)
1280{
1281 int i;
1282
1283 for (i = 0; i < len; i++) {
1284 if (rates[i] == 0x02 || rates[i] == 0x04 ||
1285 rates[i] == 0x0b || rates[i] == 0x16)
1286 rates[i] |= 0x80;
1287 }
1288}
1289
1290/**
1291 * @brief Send Deauthentication Request
1292 *
1293 * @param priv A pointer to struct lbs_private structure
1294 * @return 0--success, -1--fail
1295 */
1296int lbs_send_deauthentication(struct lbs_private *priv)
1297{
1298 return lbs_prepare_and_send_command(priv, CMD_802_11_DEAUTHENTICATE,
1299 0, CMD_OPTION_WAITFORRSP, 0, NULL);
1300}
1301
1302/**
1303 * @brief This function prepares command of authenticate.
1304 *
1305 * @param priv A pointer to struct lbs_private structure
1306 * @param cmd A pointer to cmd_ds_command structure
1307 * @param pdata_buf Void cast of pointer to a BSSID to authenticate with
1308 *
1309 * @return 0 or -1
1310 */
1311int lbs_cmd_80211_authenticate(struct lbs_private *priv,
1312 struct cmd_ds_command *cmd,
1313 void *pdata_buf)
1314{
1315 struct cmd_ds_802_11_authenticate *pauthenticate = &cmd->params.auth;
1316 int ret = -1;
1317 u8 *bssid = pdata_buf;
1318 DECLARE_MAC_BUF(mac);
1319
1320 lbs_deb_enter(LBS_DEB_JOIN);
1321
1322 cmd->command = cpu_to_le16(CMD_802_11_AUTHENTICATE);
1323 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_authenticate)
1324 + S_DS_GEN);
1325
1326 /* translate auth mode to 802.11 defined wire value */
1327 switch (priv->secinfo.auth_mode) {
1328 case IW_AUTH_ALG_OPEN_SYSTEM:
1329 pauthenticate->authtype = 0x00;
1330 break;
1331 case IW_AUTH_ALG_SHARED_KEY:
1332 pauthenticate->authtype = 0x01;
1333 break;
1334 case IW_AUTH_ALG_LEAP:
1335 pauthenticate->authtype = 0x80;
1336 break;
1337 default:
1338 lbs_deb_join("AUTH_CMD: invalid auth alg 0x%X\n",
1339 priv->secinfo.auth_mode);
1340 goto out;
1341 }
1342
1343 memcpy(pauthenticate->macaddr, bssid, ETH_ALEN);
1344
1345 lbs_deb_join("AUTH_CMD: BSSID %s, auth 0x%x\n",
1346 print_mac(mac, bssid), pauthenticate->authtype);
1347 ret = 0;
1348
1349out:
1350 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
1351 return ret;
1352}
1353
1354int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
1355 struct cmd_ds_command *cmd)
1356{
1357 struct cmd_ds_802_11_deauthenticate *dauth = &cmd->params.deauth;
1358
1359 lbs_deb_enter(LBS_DEB_JOIN);
1360
1361 cmd->command = cpu_to_le16(CMD_802_11_DEAUTHENTICATE);
1362 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_deauthenticate) +
1363 S_DS_GEN);
1364
1365 /* set AP MAC address */
1366 memmove(dauth->macaddr, priv->curbssparams.bssid, ETH_ALEN);
1367
1368 /* Reason code 3 = Station is leaving */
1369#define REASON_CODE_STA_LEAVING 3
1370 dauth->reasoncode = cpu_to_le16(REASON_CODE_STA_LEAVING);
1371
1372 lbs_deb_leave(LBS_DEB_JOIN);
1373 return 0;
1374}
1375
1376int lbs_cmd_80211_associate(struct lbs_private *priv,
1377 struct cmd_ds_command *cmd, void *pdata_buf)
1378{
1379 struct cmd_ds_802_11_associate *passo = &cmd->params.associate;
1380 int ret = 0;
1381 struct assoc_request *assoc_req = pdata_buf;
1382 struct bss_descriptor *bss = &assoc_req->bss;
1383 u8 *pos;
1384 u16 tmpcap, tmplen;
1385 struct mrvlietypes_ssidparamset *ssid;
1386 struct mrvlietypes_phyparamset *phy;
1387 struct mrvlietypes_ssparamset *ss;
1388 struct mrvlietypes_ratesparamset *rates;
1389 struct mrvlietypes_rsnparamset *rsn;
1390
1391 lbs_deb_enter(LBS_DEB_ASSOC);
1392
1393 pos = (u8 *) passo;
1394
1395 if (!priv) {
1396 ret = -1;
1397 goto done;
1398 }
1399
1400 cmd->command = cpu_to_le16(CMD_802_11_ASSOCIATE);
1401
1402 memcpy(passo->peerstaaddr, bss->bssid, sizeof(passo->peerstaaddr));
1403 pos += sizeof(passo->peerstaaddr);
1404
1405 /* set the listen interval */
1406 passo->listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL);
1407
1408 pos += sizeof(passo->capability);
1409 pos += sizeof(passo->listeninterval);
1410 pos += sizeof(passo->bcnperiod);
1411 pos += sizeof(passo->dtimperiod);
1412
1413 ssid = (struct mrvlietypes_ssidparamset *) pos;
1414 ssid->header.type = cpu_to_le16(TLV_TYPE_SSID);
1415 tmplen = bss->ssid_len;
1416 ssid->header.len = cpu_to_le16(tmplen);
1417 memcpy(ssid->ssid, bss->ssid, tmplen);
1418 pos += sizeof(ssid->header) + tmplen;
1419
1420 phy = (struct mrvlietypes_phyparamset *) pos;
1421 phy->header.type = cpu_to_le16(TLV_TYPE_PHY_DS);
1422 tmplen = sizeof(phy->fh_ds.dsparamset);
1423 phy->header.len = cpu_to_le16(tmplen);
1424 memcpy(&phy->fh_ds.dsparamset,
1425 &bss->phyparamset.dsparamset.currentchan,
1426 tmplen);
1427 pos += sizeof(phy->header) + tmplen;
1428
1429 ss = (struct mrvlietypes_ssparamset *) pos;
1430 ss->header.type = cpu_to_le16(TLV_TYPE_CF);
1431 tmplen = sizeof(ss->cf_ibss.cfparamset);
1432 ss->header.len = cpu_to_le16(tmplen);
1433 pos += sizeof(ss->header) + tmplen;
1434
1435 rates = (struct mrvlietypes_ratesparamset *) pos;
1436 rates->header.type = cpu_to_le16(TLV_TYPE_RATES);
1437 memcpy(&rates->rates, &bss->rates, MAX_RATES);
1438 tmplen = MAX_RATES;
1439 if (get_common_rates(priv, rates->rates, &tmplen)) {
1440 ret = -1;
1441 goto done;
1442 }
1443 pos += sizeof(rates->header) + tmplen;
1444 rates->header.len = cpu_to_le16(tmplen);
1445 lbs_deb_assoc("ASSOC_CMD: num rates %u\n", tmplen);
1446
1447 /* Copy the infra. association rates into Current BSS state structure */
1448 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
1449 memcpy(&priv->curbssparams.rates, &rates->rates, tmplen);
1450
1451 /* Set MSB on basic rates as the firmware requires, but _after_
1452 * copying to current bss rates.
1453 */
1454 lbs_set_basic_rate_flags(rates->rates, tmplen);
1455
1456 if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) {
1457 rsn = (struct mrvlietypes_rsnparamset *) pos;
1458 /* WPA_IE or WPA2_IE */
1459 rsn->header.type = cpu_to_le16((u16) assoc_req->wpa_ie[0]);
1460 tmplen = (u16) assoc_req->wpa_ie[1];
1461 rsn->header.len = cpu_to_le16(tmplen);
1462 memcpy(rsn->rsnie, &assoc_req->wpa_ie[2], tmplen);
1463 lbs_deb_hex(LBS_DEB_JOIN, "ASSOC_CMD: RSN IE", (u8 *) rsn,
1464 sizeof(rsn->header) + tmplen);
1465 pos += sizeof(rsn->header) + tmplen;
1466 }
1467
1468 /* update curbssparams */
1469 priv->curbssparams.channel = bss->phyparamset.dsparamset.currentchan;
1470
1471 if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
1472 ret = -1;
1473 goto done;
1474 }
1475
1476 cmd->size = cpu_to_le16((u16) (pos - (u8 *) passo) + S_DS_GEN);
1477
1478 /* set the capability info */
1479 tmpcap = (bss->capability & CAPINFO_MASK);
1480 if (bss->mode == IW_MODE_INFRA)
1481 tmpcap |= WLAN_CAPABILITY_ESS;
1482 passo->capability = cpu_to_le16(tmpcap);
1483 lbs_deb_assoc("ASSOC_CMD: capability 0x%04x\n", tmpcap);
1484
1485done:
1486 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
1487 return ret;
1488}
1489
1490int lbs_cmd_80211_ad_hoc_start(struct lbs_private *priv,
1491 struct cmd_ds_command *cmd, void *pdata_buf)
1492{
1493 struct cmd_ds_802_11_ad_hoc_start *adhs = &cmd->params.ads;
1494 int ret = 0;
1495 int cmdappendsize = 0;
1496 struct assoc_request *assoc_req = pdata_buf;
1497 u16 tmpcap = 0;
1498 size_t ratesize = 0;
1499
1500 lbs_deb_enter(LBS_DEB_JOIN);
1501
1502 if (!priv) {
1503 ret = -1;
1504 goto done;
1505 }
1506
1507 cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_START);
1508
1509 /*
1510 * Fill in the parameters for 2 data structures:
1511 * 1. cmd_ds_802_11_ad_hoc_start command
1512 * 2. priv->scantable[i]
1513 *
1514 * Driver will fill up SSID, bsstype,IBSS param, Physical Param,
1515 * probe delay, and cap info.
1516 *
1517 * Firmware will fill up beacon period, DTIM, Basic rates
1518 * and operational rates.
1519 */
1520
1521 memset(adhs->ssid, 0, IW_ESSID_MAX_SIZE);
1522 memcpy(adhs->ssid, assoc_req->ssid, assoc_req->ssid_len);
1523
1524 lbs_deb_join("ADHOC_S_CMD: SSID '%s', ssid length %u\n",
1525 escape_essid(assoc_req->ssid, assoc_req->ssid_len),
1526 assoc_req->ssid_len);
1527
1528 /* set the BSS type */
1529 adhs->bsstype = CMD_BSS_TYPE_IBSS;
1530 priv->mode = IW_MODE_ADHOC;
1531 if (priv->beacon_period == 0)
1532 priv->beacon_period = MRVDRV_BEACON_INTERVAL;
1533 adhs->beaconperiod = cpu_to_le16(priv->beacon_period);
1534
1535 /* set Physical param set */
1536#define DS_PARA_IE_ID 3
1537#define DS_PARA_IE_LEN 1
1538
1539 adhs->phyparamset.dsparamset.elementid = DS_PARA_IE_ID;
1540 adhs->phyparamset.dsparamset.len = DS_PARA_IE_LEN;
1541
1542 WARN_ON(!assoc_req->channel);
1543
1544 lbs_deb_join("ADHOC_S_CMD: Creating ADHOC on channel %d\n",
1545 assoc_req->channel);
1546
1547 adhs->phyparamset.dsparamset.currentchan = assoc_req->channel;
1548
1549 /* set IBSS param set */
1550#define IBSS_PARA_IE_ID 6
1551#define IBSS_PARA_IE_LEN 2
1552
1553 adhs->ssparamset.ibssparamset.elementid = IBSS_PARA_IE_ID;
1554 adhs->ssparamset.ibssparamset.len = IBSS_PARA_IE_LEN;
1555 adhs->ssparamset.ibssparamset.atimwindow = 0;
1556
1557 /* set capability info */
1558 tmpcap = WLAN_CAPABILITY_IBSS;
1559 if (assoc_req->secinfo.wep_enabled) {
1560 lbs_deb_join("ADHOC_S_CMD: WEP enabled, "
1561 "setting privacy on\n");
1562 tmpcap |= WLAN_CAPABILITY_PRIVACY;
1563 } else {
1564 lbs_deb_join("ADHOC_S_CMD: WEP disabled, "
1565 "setting privacy off\n");
1566 }
1567 adhs->capability = cpu_to_le16(tmpcap);
1568
1569 /* probedelay */
1570 adhs->probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
1571
1572 memset(adhs->rates, 0, sizeof(adhs->rates));
1573 ratesize = min(sizeof(adhs->rates), sizeof(lbs_bg_rates));
1574 memcpy(adhs->rates, lbs_bg_rates, ratesize);
1575
1576 /* Copy the ad-hoc creating rates into Current BSS state structure */
1577 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
1578 memcpy(&priv->curbssparams.rates, &adhs->rates, ratesize);
1579
1580 /* Set MSB on basic rates as the firmware requires, but _after_
1581 * copying to current bss rates.
1582 */
1583 lbs_set_basic_rate_flags(adhs->rates, ratesize);
1584
1585 lbs_deb_join("ADHOC_S_CMD: rates=%02x %02x %02x %02x \n",
1586 adhs->rates[0], adhs->rates[1], adhs->rates[2], adhs->rates[3]);
1587
1588 lbs_deb_join("ADHOC_S_CMD: AD HOC Start command is ready\n");
1589
1590 if (lbs_create_dnld_countryinfo_11d(priv)) {
1591 lbs_deb_join("ADHOC_S_CMD: dnld_countryinfo_11d failed\n");
1592 ret = -1;
1593 goto done;
1594 }
1595
1596 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ad_hoc_start) +
1597 S_DS_GEN + cmdappendsize);
1598
1599 ret = 0;
1600done:
1601 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
1602 return ret;
1603}
1604
1605int lbs_cmd_80211_ad_hoc_stop(struct cmd_ds_command *cmd)
1606{
1607 cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_STOP);
1608 cmd->size = cpu_to_le16(S_DS_GEN);
1609
1610 return 0;
1611}
1612
1613int lbs_cmd_80211_ad_hoc_join(struct lbs_private *priv,
1614 struct cmd_ds_command *cmd, void *pdata_buf)
1615{
1616 struct cmd_ds_802_11_ad_hoc_join *join_cmd = &cmd->params.adj;
1617 struct assoc_request *assoc_req = pdata_buf;
1618 struct bss_descriptor *bss = &assoc_req->bss;
1619 int cmdappendsize = 0;
1620 int ret = 0;
1621 u16 ratesize = 0;
1622 DECLARE_MAC_BUF(mac);
1623
1624 lbs_deb_enter(LBS_DEB_JOIN);
1625
1626 cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_JOIN);
1627
1628 join_cmd->bss.type = CMD_BSS_TYPE_IBSS;
1629 join_cmd->bss.beaconperiod = cpu_to_le16(bss->beaconperiod);
1630
1631 memcpy(&join_cmd->bss.bssid, &bss->bssid, ETH_ALEN);
1632 memcpy(&join_cmd->bss.ssid, &bss->ssid, bss->ssid_len);
1633
1634 memcpy(&join_cmd->bss.phyparamset, &bss->phyparamset,
1635 sizeof(union ieeetypes_phyparamset));
1636
1637 memcpy(&join_cmd->bss.ssparamset, &bss->ssparamset,
1638 sizeof(union IEEEtypes_ssparamset));
1639
1640 join_cmd->bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK);
1641 lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n",
1642 bss->capability, CAPINFO_MASK);
1643
1644 /* information on BSSID descriptor passed to FW */
1645 lbs_deb_join(
1646 "ADHOC_J_CMD: BSSID = %s, SSID = '%s'\n",
1647 print_mac(mac, join_cmd->bss.bssid),
1648 join_cmd->bss.ssid);
1649
1650 /* failtimeout */
1651 join_cmd->failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT);
1652
1653 /* probedelay */
1654 join_cmd->probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
1655
1656 priv->curbssparams.channel = bss->channel;
1657
1658 /* Copy Data rates from the rates recorded in scan response */
1659 memset(join_cmd->bss.rates, 0, sizeof(join_cmd->bss.rates));
1660 ratesize = min_t(u16, sizeof(join_cmd->bss.rates), MAX_RATES);
1661 memcpy(join_cmd->bss.rates, bss->rates, ratesize);
1662 if (get_common_rates(priv, join_cmd->bss.rates, &ratesize)) {
1663 lbs_deb_join("ADHOC_J_CMD: get_common_rates returns error.\n");
1664 ret = -1;
1665 goto done;
1666 }
1667
1668 /* Copy the ad-hoc creating rates into Current BSS state structure */
1669 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
1670 memcpy(&priv->curbssparams.rates, join_cmd->bss.rates, ratesize);
1671
1672 /* Set MSB on basic rates as the firmware requires, but _after_
1673 * copying to current bss rates.
1674 */
1675 lbs_set_basic_rate_flags(join_cmd->bss.rates, ratesize);
1676
1677 join_cmd->bss.ssparamset.ibssparamset.atimwindow =
1678 cpu_to_le16(bss->atimwindow);
1679
1680 if (assoc_req->secinfo.wep_enabled) {
1681 u16 tmp = le16_to_cpu(join_cmd->bss.capability);
1682 tmp |= WLAN_CAPABILITY_PRIVACY;
1683 join_cmd->bss.capability = cpu_to_le16(tmp);
1684 }
1685
1686 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
1687 /* wake up first */
1688 __le32 Localpsmode;
1689
1690 Localpsmode = cpu_to_le32(LBS802_11POWERMODECAM);
1691 ret = lbs_prepare_and_send_command(priv,
1692 CMD_802_11_PS_MODE,
1693 CMD_ACT_SET,
1694 0, 0, &Localpsmode);
1695
1696 if (ret) {
1697 ret = -1;
1698 goto done;
1699 }
1700 }
1701
1702 if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
1703 ret = -1;
1704 goto done;
1705 }
1706
1707 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ad_hoc_join) +
1708 S_DS_GEN + cmdappendsize);
1709
1710done:
1711 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
1712 return ret;
1713}
1714
1715int lbs_ret_80211_associate(struct lbs_private *priv,
1716 struct cmd_ds_command *resp)
1717{
1718 int ret = 0;
1719 union iwreq_data wrqu;
1720 struct ieeetypes_assocrsp *passocrsp;
1721 struct bss_descriptor *bss;
1722 u16 status_code;
1723
1724 lbs_deb_enter(LBS_DEB_ASSOC);
1725
1726 if (!priv->in_progress_assoc_req) {
1727 lbs_deb_assoc("ASSOC_RESP: no in-progress assoc request\n");
1728 ret = -1;
1729 goto done;
1730 }
1731 bss = &priv->in_progress_assoc_req->bss;
1732
1733 passocrsp = (struct ieeetypes_assocrsp *) &resp->params;
1734
1735 /*
1736 * Older FW versions map the IEEE 802.11 Status Code in the association
1737 * response to the following values returned in passocrsp->statuscode:
1738 *
1739 * IEEE Status Code Marvell Status Code
1740 * 0 -> 0x0000 ASSOC_RESULT_SUCCESS
1741 * 13 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
1742 * 14 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
1743 * 15 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
1744 * 16 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
1745 * others -> 0x0003 ASSOC_RESULT_REFUSED
1746 *
1747 * Other response codes:
1748 * 0x0001 -> ASSOC_RESULT_INVALID_PARAMETERS (unused)
1749 * 0x0002 -> ASSOC_RESULT_TIMEOUT (internal timer expired waiting for
1750 * association response from the AP)
1751 */
1752
1753 status_code = le16_to_cpu(passocrsp->statuscode);
1754 switch (status_code) {
1755 case 0x00:
1756 break;
1757 case 0x01:
1758 lbs_deb_assoc("ASSOC_RESP: invalid parameters\n");
1759 break;
1760 case 0x02:
1761 lbs_deb_assoc("ASSOC_RESP: internal timer "
1762 "expired while waiting for the AP\n");
1763 break;
1764 case 0x03:
1765 lbs_deb_assoc("ASSOC_RESP: association "
1766 "refused by AP\n");
1767 break;
1768 case 0x04:
1769 lbs_deb_assoc("ASSOC_RESP: authentication "
1770 "refused by AP\n");
1771 break;
1772 default:
1773 lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x "
1774 " unknown\n", status_code);
1775 break;
1776 }
1777
1778 if (status_code) {
1779 lbs_mac_event_disconnected(priv);
1780 ret = -1;
1781 goto done;
1782 }
1783
1784 lbs_deb_hex(LBS_DEB_ASSOC, "ASSOC_RESP", (void *)&resp->params,
1785 le16_to_cpu(resp->size) - S_DS_GEN);
1786
1787 /* Send a Media Connected event, according to the Spec */
1788 priv->connect_status = LBS_CONNECTED;
1789
1790 /* Update current SSID and BSSID */
1791 memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE);
1792 priv->curbssparams.ssid_len = bss->ssid_len;
1793 memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
1794
1795 priv->SNR[TYPE_RXPD][TYPE_AVG] = 0;
1796 priv->NF[TYPE_RXPD][TYPE_AVG] = 0;
1797
1798 memset(priv->rawSNR, 0x00, sizeof(priv->rawSNR));
1799 memset(priv->rawNF, 0x00, sizeof(priv->rawNF));
1800 priv->nextSNRNF = 0;
1801 priv->numSNRNF = 0;
1802
1803 netif_carrier_on(priv->dev);
1804 if (!priv->tx_pending_len)
1805 netif_wake_queue(priv->dev);
1806
1807 memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
1808 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1809 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
1810
1811done:
1812 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
1813 return ret;
1814}
1815
1816int lbs_ret_80211_disassociate(struct lbs_private *priv)
1817{
1818 lbs_deb_enter(LBS_DEB_JOIN);
1819
1820 lbs_mac_event_disconnected(priv);
1821
1822 lbs_deb_leave(LBS_DEB_JOIN);
1823 return 0;
1824}
1825
1826int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv,
1827 struct cmd_ds_command *resp)
1828{
1829 int ret = 0;
1830 u16 command = le16_to_cpu(resp->command);
1831 u16 result = le16_to_cpu(resp->result);
1832 struct cmd_ds_802_11_ad_hoc_result *padhocresult;
1833 union iwreq_data wrqu;
1834 struct bss_descriptor *bss;
1835 DECLARE_MAC_BUF(mac);
1836
1837 lbs_deb_enter(LBS_DEB_JOIN);
1838
1839 padhocresult = &resp->params.result;
1840
1841 lbs_deb_join("ADHOC_RESP: size = %d\n", le16_to_cpu(resp->size));
1842 lbs_deb_join("ADHOC_RESP: command = %x\n", command);
1843 lbs_deb_join("ADHOC_RESP: result = %x\n", result);
1844
1845 if (!priv->in_progress_assoc_req) {
1846 lbs_deb_join("ADHOC_RESP: no in-progress association "
1847 "request\n");
1848 ret = -1;
1849 goto done;
1850 }
1851 bss = &priv->in_progress_assoc_req->bss;
1852
1853 /*
1854 * Join result code 0 --> SUCCESS
1855 */
1856 if (result) {
1857 lbs_deb_join("ADHOC_RESP: failed\n");
1858 if (priv->connect_status == LBS_CONNECTED)
1859 lbs_mac_event_disconnected(priv);
1860 ret = -1;
1861 goto done;
1862 }
1863
1864 /*
1865 * Now the join cmd should be successful
1866 * If BSSID has changed use SSID to compare instead of BSSID
1867 */
1868 lbs_deb_join("ADHOC_RESP: associated to '%s'\n",
1869 escape_essid(bss->ssid, bss->ssid_len));
1870
1871 /* Send a Media Connected event, according to the Spec */
1872 priv->connect_status = LBS_CONNECTED;
1873
1874 if (command == CMD_RET(CMD_802_11_AD_HOC_START)) {
1875 /* Update the created network descriptor with the new BSSID */
1876 memcpy(bss->bssid, padhocresult->bssid, ETH_ALEN);
1877 }
1878
1879 /* Set the BSSID from the joined/started descriptor */
1880 memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
1881
1882 /* Set the new SSID to current SSID */
1883 memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE);
1884 priv->curbssparams.ssid_len = bss->ssid_len;
1885
1886 netif_carrier_on(priv->dev);
1887 if (!priv->tx_pending_len)
1888 netif_wake_queue(priv->dev);
1889
1890 memset(&wrqu, 0, sizeof(wrqu));
1891 memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
1892 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
1893 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
1894
1895 lbs_deb_join("ADHOC_RESP: - Joined/Started Ad Hoc\n");
1896 lbs_deb_join("ADHOC_RESP: channel = %d\n", priv->curbssparams.channel);
1897 lbs_deb_join("ADHOC_RESP: BSSID = %s\n",
1898 print_mac(mac, padhocresult->bssid));
1899
1900done:
1901 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
1902 return ret;
1903}
1904
1905int lbs_ret_80211_ad_hoc_stop(struct lbs_private *priv)
1906{
1907 lbs_deb_enter(LBS_DEB_JOIN);
1908
1909 lbs_mac_event_disconnected(priv);
1910
1911 lbs_deb_leave(LBS_DEB_JOIN);
1912 return 0;
1913}
diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h
index 08372bbf3761..c516fbe518fd 100644
--- a/drivers/net/wireless/libertas/assoc.h
+++ b/drivers/net/wireless/libertas/assoc.h
@@ -7,6 +7,33 @@
7 7
8void lbs_association_worker(struct work_struct *work); 8void lbs_association_worker(struct work_struct *work);
9struct assoc_request *lbs_get_association_request(struct lbs_private *priv); 9struct assoc_request *lbs_get_association_request(struct lbs_private *priv);
10void lbs_sync_channel(struct work_struct *work); 10
11struct cmd_ds_command;
12int lbs_cmd_80211_authenticate(struct lbs_private *priv,
13 struct cmd_ds_command *cmd,
14 void *pdata_buf);
15int lbs_cmd_80211_ad_hoc_join(struct lbs_private *priv,
16 struct cmd_ds_command *cmd,
17 void *pdata_buf);
18int lbs_cmd_80211_ad_hoc_stop(struct cmd_ds_command *cmd);
19int lbs_cmd_80211_ad_hoc_start(struct lbs_private *priv,
20 struct cmd_ds_command *cmd,
21 void *pdata_buf);
22int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
23 struct cmd_ds_command *cmd);
24int lbs_cmd_80211_associate(struct lbs_private *priv,
25 struct cmd_ds_command *cmd,
26 void *pdata_buf);
27
28int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv,
29 struct cmd_ds_command *resp);
30int lbs_ret_80211_ad_hoc_stop(struct lbs_private *priv);
31int lbs_ret_80211_disassociate(struct lbs_private *priv);
32int lbs_ret_80211_associate(struct lbs_private *priv,
33 struct cmd_ds_command *resp);
34
35int lbs_stop_adhoc_network(struct lbs_private *priv);
36
37int lbs_send_deauthentication(struct lbs_private *priv);
11 38
12#endif /* _LBS_ASSOC_H */ 39#endif /* _LBS_ASSOC_H */
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index b3c1acbcc655..6328b9593877 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -4,19 +4,57 @@
4 */ 4 */
5 5
6#include <net/iw_handler.h> 6#include <net/iw_handler.h>
7#include <linux/kfifo.h>
7#include "host.h" 8#include "host.h"
8#include "hostcmd.h" 9#include "hostcmd.h"
9#include "decl.h" 10#include "decl.h"
10#include "defs.h" 11#include "defs.h"
11#include "dev.h" 12#include "dev.h"
12#include "join.h" 13#include "assoc.h"
13#include "wext.h" 14#include "wext.h"
14#include "cmd.h" 15#include "cmd.h"
15 16
16static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv); 17static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv);
17static void lbs_set_cmd_ctrl_node(struct lbs_private *priv, 18
18 struct cmd_ctrl_node *ptempnode, 19
19 void *pdata_buf); 20/**
21 * @brief Simple callback that copies response back into command
22 *
23 * @param priv A pointer to struct lbs_private structure
24 * @param extra A pointer to the original command structure for which
25 * 'resp' is a response
26 * @param resp A pointer to the command response
27 *
28 * @return 0 on success, error on failure
29 */
30int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra,
31 struct cmd_header *resp)
32{
33 struct cmd_header *buf = (void *)extra;
34 uint16_t copy_len;
35
36 copy_len = min(le16_to_cpu(buf->size), le16_to_cpu(resp->size));
37 memcpy(buf, resp, copy_len);
38 return 0;
39}
40EXPORT_SYMBOL_GPL(lbs_cmd_copyback);
41
42/**
43 * @brief Simple callback that ignores the result. Use this if
44 * you just want to send a command to the hardware, but don't
45 * care for the result.
46 *
47 * @param priv ignored
48 * @param extra ignored
49 * @param resp ignored
50 *
51 * @return 0 for success
52 */
53static int lbs_cmd_async_callback(struct lbs_private *priv, unsigned long extra,
54 struct cmd_header *resp)
55{
56 return 0;
57}
20 58
21 59
22/** 60/**
@@ -143,8 +181,7 @@ int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria)
143} 181}
144EXPORT_SYMBOL_GPL(lbs_host_sleep_cfg); 182EXPORT_SYMBOL_GPL(lbs_host_sleep_cfg);
145 183
146static int lbs_cmd_802_11_ps_mode(struct lbs_private *priv, 184static int lbs_cmd_802_11_ps_mode(struct cmd_ds_command *cmd,
147 struct cmd_ds_command *cmd,
148 u16 cmd_action) 185 u16 cmd_action)
149{ 186{
150 struct cmd_ds_802_11_ps_mode *psm = &cmd->params.psmode; 187 struct cmd_ds_802_11_ps_mode *psm = &cmd->params.psmode;
@@ -259,6 +296,7 @@ int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
259 296
260 lbs_deb_enter(LBS_DEB_CMD); 297 lbs_deb_enter(LBS_DEB_CMD);
261 298
299 memset(&cmd, 0, sizeof(cmd));
262 cmd.hdr.command = cpu_to_le16(CMD_802_11_SET_WEP); 300 cmd.hdr.command = cpu_to_le16(CMD_802_11_SET_WEP);
263 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 301 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
264 302
@@ -322,7 +360,9 @@ int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
322 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 360 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
323 cmd.action = cpu_to_le16(cmd_action); 361 cmd.action = cpu_to_le16(cmd_action);
324 362
325 if (cmd_action == CMD_ACT_SET) { 363 if (cmd_action == CMD_ACT_GET)
364 cmd.enable = 0;
365 else {
326 if (*enable) 366 if (*enable)
327 cmd.enable = cpu_to_le16(CMD_ENABLE_RSN); 367 cmd.enable = cpu_to_le16(CMD_ENABLE_RSN);
328 else 368 else
@@ -338,81 +378,108 @@ int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
338 return ret; 378 return ret;
339} 379}
340 380
341static void set_one_wpa_key(struct MrvlIEtype_keyParamSet * pkeyparamset, 381static void set_one_wpa_key(struct MrvlIEtype_keyParamSet *keyparam,
342 struct enc_key * pkey) 382 struct enc_key *key)
343{ 383{
344 lbs_deb_enter(LBS_DEB_CMD); 384 lbs_deb_enter(LBS_DEB_CMD);
345 385
346 if (pkey->flags & KEY_INFO_WPA_ENABLED) { 386 if (key->flags & KEY_INFO_WPA_ENABLED)
347 pkeyparamset->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED); 387 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED);
348 } 388 if (key->flags & KEY_INFO_WPA_UNICAST)
349 if (pkey->flags & KEY_INFO_WPA_UNICAST) { 389 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_UNICAST);
350 pkeyparamset->keyinfo |= cpu_to_le16(KEY_INFO_WPA_UNICAST); 390 if (key->flags & KEY_INFO_WPA_MCAST)
351 } 391 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_MCAST);
352 if (pkey->flags & KEY_INFO_WPA_MCAST) {
353 pkeyparamset->keyinfo |= cpu_to_le16(KEY_INFO_WPA_MCAST);
354 }
355 392
356 pkeyparamset->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL); 393 keyparam->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
357 pkeyparamset->keytypeid = cpu_to_le16(pkey->type); 394 keyparam->keytypeid = cpu_to_le16(key->type);
358 pkeyparamset->keylen = cpu_to_le16(pkey->len); 395 keyparam->keylen = cpu_to_le16(key->len);
359 memcpy(pkeyparamset->key, pkey->key, pkey->len); 396 memcpy(keyparam->key, key->key, key->len);
360 pkeyparamset->length = cpu_to_le16( sizeof(pkeyparamset->keytypeid) 397
361 + sizeof(pkeyparamset->keyinfo) 398 /* Length field doesn't include the {type,length} header */
362 + sizeof(pkeyparamset->keylen) 399 keyparam->length = cpu_to_le16(sizeof(*keyparam) - 4);
363 + sizeof(pkeyparamset->key));
364 lbs_deb_leave(LBS_DEB_CMD); 400 lbs_deb_leave(LBS_DEB_CMD);
365} 401}
366 402
367static int lbs_cmd_802_11_key_material(struct lbs_private *priv, 403int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
368 struct cmd_ds_command *cmd, 404 struct assoc_request *assoc)
369 u16 cmd_action,
370 u32 cmd_oid, void *pdata_buf)
371{ 405{
372 struct cmd_ds_802_11_key_material *pkeymaterial = 406 struct cmd_ds_802_11_key_material cmd;
373 &cmd->params.keymaterial;
374 struct assoc_request * assoc_req = pdata_buf;
375 int ret = 0; 407 int ret = 0;
376 int index = 0; 408 int index = 0;
377 409
378 lbs_deb_enter(LBS_DEB_CMD); 410 lbs_deb_enter(LBS_DEB_CMD);
379 411
380 cmd->command = cpu_to_le16(CMD_802_11_KEY_MATERIAL); 412 cmd.action = cpu_to_le16(cmd_action);
381 pkeymaterial->action = cpu_to_le16(cmd_action); 413 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
382 414
383 if (cmd_action == CMD_ACT_GET) { 415 if (cmd_action == CMD_ACT_GET) {
384 cmd->size = cpu_to_le16(S_DS_GEN + sizeof (pkeymaterial->action)); 416 cmd.hdr.size = cpu_to_le16(S_DS_GEN + 2);
385 ret = 0; 417 } else {
386 goto done; 418 memset(cmd.keyParamSet, 0, sizeof(cmd.keyParamSet));
387 }
388 419
389 memset(&pkeymaterial->keyParamSet, 0, sizeof(pkeymaterial->keyParamSet)); 420 if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc->flags)) {
421 set_one_wpa_key(&cmd.keyParamSet[index],
422 &assoc->wpa_unicast_key);
423 index++;
424 }
390 425
391 if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) { 426 if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc->flags)) {
392 set_one_wpa_key(&pkeymaterial->keyParamSet[index], 427 set_one_wpa_key(&cmd.keyParamSet[index],
393 &assoc_req->wpa_unicast_key); 428 &assoc->wpa_mcast_key);
394 index++; 429 index++;
395 } 430 }
396 431
397 if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)) { 432 /* The common header and as many keys as we included */
398 set_one_wpa_key(&pkeymaterial->keyParamSet[index], 433 cmd.hdr.size = cpu_to_le16(offsetof(typeof(cmd),
399 &assoc_req->wpa_mcast_key); 434 keyParamSet[index]));
400 index++;
401 } 435 }
436 ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd);
437 /* Copy the returned key to driver private data */
438 if (!ret && cmd_action == CMD_ACT_GET) {
439 void *buf_ptr = cmd.keyParamSet;
440 void *resp_end = &(&cmd)[1];
441
442 while (buf_ptr < resp_end) {
443 struct MrvlIEtype_keyParamSet *keyparam = buf_ptr;
444 struct enc_key *key;
445 uint16_t param_set_len = le16_to_cpu(keyparam->length);
446 uint16_t key_len = le16_to_cpu(keyparam->keylen);
447 uint16_t key_flags = le16_to_cpu(keyparam->keyinfo);
448 uint16_t key_type = le16_to_cpu(keyparam->keytypeid);
449 void *end;
450
451 end = (void *)keyparam + sizeof(keyparam->type)
452 + sizeof(keyparam->length) + param_set_len;
453
454 /* Make sure we don't access past the end of the IEs */
455 if (end > resp_end)
456 break;
457
458 if (key_flags & KEY_INFO_WPA_UNICAST)
459 key = &priv->wpa_unicast_key;
460 else if (key_flags & KEY_INFO_WPA_MCAST)
461 key = &priv->wpa_mcast_key;
462 else
463 break;
402 464
403 cmd->size = cpu_to_le16( S_DS_GEN 465 /* Copy returned key into driver */
404 + sizeof (pkeymaterial->action) 466 memset(key, 0, sizeof(struct enc_key));
405 + (index * sizeof(struct MrvlIEtype_keyParamSet))); 467 if (key_len > sizeof(key->key))
468 break;
469 key->type = key_type;
470 key->flags = key_flags;
471 key->len = key_len;
472 memcpy(key->key, keyparam->key, key->len);
406 473
407 ret = 0; 474 buf_ptr = end + 1;
475 }
476 }
408 477
409done:
410 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); 478 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
411 return ret; 479 return ret;
412} 480}
413 481
414static int lbs_cmd_802_11_reset(struct lbs_private *priv, 482static int lbs_cmd_802_11_reset(struct cmd_ds_command *cmd, int cmd_action)
415 struct cmd_ds_command *cmd, int cmd_action)
416{ 483{
417 struct cmd_ds_802_11_reset *reset = &cmd->params.reset; 484 struct cmd_ds_802_11_reset *reset = &cmd->params.reset;
418 485
@@ -426,30 +493,6 @@ static int lbs_cmd_802_11_reset(struct lbs_private *priv,
426 return 0; 493 return 0;
427} 494}
428 495
429static int lbs_cmd_802_11_get_log(struct lbs_private *priv,
430 struct cmd_ds_command *cmd)
431{
432 lbs_deb_enter(LBS_DEB_CMD);
433 cmd->command = cpu_to_le16(CMD_802_11_GET_LOG);
434 cmd->size =
435 cpu_to_le16(sizeof(struct cmd_ds_802_11_get_log) + S_DS_GEN);
436
437 lbs_deb_leave(LBS_DEB_CMD);
438 return 0;
439}
440
441static int lbs_cmd_802_11_get_stat(struct lbs_private *priv,
442 struct cmd_ds_command *cmd)
443{
444 lbs_deb_enter(LBS_DEB_CMD);
445 cmd->command = cpu_to_le16(CMD_802_11_GET_STAT);
446 cmd->size =
447 cpu_to_le16(sizeof(struct cmd_ds_802_11_get_stat) + S_DS_GEN);
448
449 lbs_deb_leave(LBS_DEB_CMD);
450 return 0;
451}
452
453static int lbs_cmd_802_11_snmp_mib(struct lbs_private *priv, 496static int lbs_cmd_802_11_snmp_mib(struct lbs_private *priv,
454 struct cmd_ds_command *cmd, 497 struct cmd_ds_command *cmd,
455 int cmd_action, 498 int cmd_action,
@@ -570,8 +613,7 @@ static int lbs_cmd_802_11_snmp_mib(struct lbs_private *priv,
570 return 0; 613 return 0;
571} 614}
572 615
573static int lbs_cmd_802_11_rf_tx_power(struct lbs_private *priv, 616static int lbs_cmd_802_11_rf_tx_power(struct cmd_ds_command *cmd,
574 struct cmd_ds_command *cmd,
575 u16 cmd_action, void *pdata_buf) 617 u16 cmd_action, void *pdata_buf)
576{ 618{
577 619
@@ -614,8 +656,7 @@ static int lbs_cmd_802_11_rf_tx_power(struct lbs_private *priv,
614 return 0; 656 return 0;
615} 657}
616 658
617static int lbs_cmd_802_11_monitor_mode(struct lbs_private *priv, 659static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd,
618 struct cmd_ds_command *cmd,
619 u16 cmd_action, void *pdata_buf) 660 u16 cmd_action, void *pdata_buf)
620{ 661{
621 struct cmd_ds_802_11_monitor_mode *monitor = &cmd->params.monitor; 662 struct cmd_ds_802_11_monitor_mode *monitor = &cmd->params.monitor;
@@ -773,6 +814,7 @@ int lbs_get_channel(struct lbs_private *priv)
773 814
774 lbs_deb_enter(LBS_DEB_CMD); 815 lbs_deb_enter(LBS_DEB_CMD);
775 816
817 memset(&cmd, 0, sizeof(cmd));
776 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 818 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
777 cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_GET); 819 cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_GET);
778 820
@@ -788,6 +830,22 @@ out:
788 return ret; 830 return ret;
789} 831}
790 832
833int lbs_update_channel(struct lbs_private *priv)
834{
835 int ret;
836
837 /* the channel in f/w could be out of sync; get the current channel */
838 lbs_deb_enter(LBS_DEB_ASSOC);
839
840 ret = lbs_get_channel(priv);
841 if (ret > 0) {
842 priv->curbssparams.channel = ret;
843 ret = 0;
844 }
845 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
846 return ret;
847}
848
791/** 849/**
792 * @brief Set the radio channel 850 * @brief Set the radio channel
793 * 851 *
@@ -804,6 +862,7 @@ int lbs_set_channel(struct lbs_private *priv, u8 channel)
804 862
805 lbs_deb_enter(LBS_DEB_CMD); 863 lbs_deb_enter(LBS_DEB_CMD);
806 864
865 memset(&cmd, 0, sizeof(cmd));
807 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 866 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
808 cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_SET); 867 cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_SET);
809 cmd.channel = cpu_to_le16(channel); 868 cmd.channel = cpu_to_le16(channel);
@@ -842,8 +901,7 @@ static int lbs_cmd_802_11_rssi(struct lbs_private *priv,
842 return 0; 901 return 0;
843} 902}
844 903
845static int lbs_cmd_reg_access(struct lbs_private *priv, 904static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
846 struct cmd_ds_command *cmdptr,
847 u8 cmd_action, void *pdata_buf) 905 u8 cmd_action, void *pdata_buf)
848{ 906{
849 struct lbs_offset_value *offval; 907 struct lbs_offset_value *offval;
@@ -917,53 +975,7 @@ static int lbs_cmd_reg_access(struct lbs_private *priv,
917 return 0; 975 return 0;
918} 976}
919 977
920static int lbs_cmd_802_11_mac_address(struct lbs_private *priv, 978static int lbs_cmd_bt_access(struct cmd_ds_command *cmd,
921 struct cmd_ds_command *cmd,
922 u16 cmd_action)
923{
924
925 lbs_deb_enter(LBS_DEB_CMD);
926 cmd->command = cpu_to_le16(CMD_802_11_MAC_ADDRESS);
927 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_mac_address) +
928 S_DS_GEN);
929 cmd->result = 0;
930
931 cmd->params.macadd.action = cpu_to_le16(cmd_action);
932
933 if (cmd_action == CMD_ACT_SET) {
934 memcpy(cmd->params.macadd.macadd,
935 priv->current_addr, ETH_ALEN);
936 lbs_deb_hex(LBS_DEB_CMD, "SET_CMD: MAC addr", priv->current_addr, 6);
937 }
938
939 lbs_deb_leave(LBS_DEB_CMD);
940 return 0;
941}
942
943static int lbs_cmd_802_11_eeprom_access(struct lbs_private *priv,
944 struct cmd_ds_command *cmd,
945 int cmd_action, void *pdata_buf)
946{
947 struct lbs_ioctl_regrdwr *ea = pdata_buf;
948
949 lbs_deb_enter(LBS_DEB_CMD);
950
951 cmd->command = cpu_to_le16(CMD_802_11_EEPROM_ACCESS);
952 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_eeprom_access) +
953 S_DS_GEN);
954 cmd->result = 0;
955
956 cmd->params.rdeeprom.action = cpu_to_le16(ea->action);
957 cmd->params.rdeeprom.offset = cpu_to_le16(ea->offset);
958 cmd->params.rdeeprom.bytecount = cpu_to_le16(ea->NOB);
959 cmd->params.rdeeprom.value = 0;
960
961 lbs_deb_leave(LBS_DEB_CMD);
962 return 0;
963}
964
965static int lbs_cmd_bt_access(struct lbs_private *priv,
966 struct cmd_ds_command *cmd,
967 u16 cmd_action, void *pdata_buf) 979 u16 cmd_action, void *pdata_buf)
968{ 980{
969 struct cmd_ds_bt_access *bt_access = &cmd->params.bt; 981 struct cmd_ds_bt_access *bt_access = &cmd->params.bt;
@@ -1000,8 +1012,7 @@ static int lbs_cmd_bt_access(struct lbs_private *priv,
1000 return 0; 1012 return 0;
1001} 1013}
1002 1014
1003static int lbs_cmd_fwt_access(struct lbs_private *priv, 1015static int lbs_cmd_fwt_access(struct cmd_ds_command *cmd,
1004 struct cmd_ds_command *cmd,
1005 u16 cmd_action, void *pdata_buf) 1016 u16 cmd_action, void *pdata_buf)
1006{ 1017{
1007 struct cmd_ds_fwt_access *fwt_access = &cmd->params.fwt; 1018 struct cmd_ds_fwt_access *fwt_access = &cmd->params.fwt;
@@ -1153,9 +1164,9 @@ static void lbs_submit_command(struct lbs_private *priv,
1153 command == CMD_802_11_AUTHENTICATE) 1164 command == CMD_802_11_AUTHENTICATE)
1154 timeo = 10 * HZ; 1165 timeo = 10 * HZ;
1155 1166
1156 lbs_deb_host("DNLD_CMD: command 0x%04x, seq %d, size %d, jiffies %lu\n", 1167 lbs_deb_cmd("DNLD_CMD: command 0x%04x, seq %d, size %d\n",
1157 command, le16_to_cpu(cmd->seqnum), cmdsize, jiffies); 1168 command, le16_to_cpu(cmd->seqnum), cmdsize);
1158 lbs_deb_hex(LBS_DEB_HOST, "DNLD_CMD", (void *) cmdnode->cmdbuf, cmdsize); 1169 lbs_deb_hex(LBS_DEB_CMD, "DNLD_CMD", (void *) cmdnode->cmdbuf, cmdsize);
1159 1170
1160 ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize); 1171 ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize);
1161 1172
@@ -1164,9 +1175,7 @@ static void lbs_submit_command(struct lbs_private *priv,
1164 /* Let the timer kick in and retry, and potentially reset 1175 /* Let the timer kick in and retry, and potentially reset
1165 the whole thing if the condition persists */ 1176 the whole thing if the condition persists */
1166 timeo = HZ; 1177 timeo = HZ;
1167 } else 1178 }
1168 lbs_deb_cmd("DNLD_CMD: sent command 0x%04x, jiffies %lu\n",
1169 command, jiffies);
1170 1179
1171 /* Setup the timer after transmit command */ 1180 /* Setup the timer after transmit command */
1172 mod_timer(&priv->command_timer, jiffies + timeo); 1181 mod_timer(&priv->command_timer, jiffies + timeo);
@@ -1174,24 +1183,6 @@ static void lbs_submit_command(struct lbs_private *priv,
1174 lbs_deb_leave(LBS_DEB_HOST); 1183 lbs_deb_leave(LBS_DEB_HOST);
1175} 1184}
1176 1185
1177static int lbs_cmd_mac_control(struct lbs_private *priv,
1178 struct cmd_ds_command *cmd)
1179{
1180 struct cmd_ds_mac_control *mac = &cmd->params.macctrl;
1181
1182 lbs_deb_enter(LBS_DEB_CMD);
1183
1184 cmd->command = cpu_to_le16(CMD_MAC_CONTROL);
1185 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_mac_control) + S_DS_GEN);
1186 mac->action = cpu_to_le16(priv->currentpacketfilter);
1187
1188 lbs_deb_cmd("MAC_CONTROL: action 0x%x, size %d\n",
1189 le16_to_cpu(mac->action), le16_to_cpu(cmd->size));
1190
1191 lbs_deb_leave(LBS_DEB_CMD);
1192 return 0;
1193}
1194
1195/** 1186/**
1196 * This function inserts command node to cmdfreeq 1187 * This function inserts command node to cmdfreeq
1197 * after cleans it. Requires priv->driver_lock held. 1188 * after cleans it. Requires priv->driver_lock held.
@@ -1234,7 +1225,7 @@ void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
1234 cmd->cmdwaitqwoken = 1; 1225 cmd->cmdwaitqwoken = 1;
1235 wake_up_interruptible(&cmd->cmdwait_q); 1226 wake_up_interruptible(&cmd->cmdwait_q);
1236 1227
1237 if (!cmd->callback) 1228 if (!cmd->callback || cmd->callback == lbs_cmd_async_callback)
1238 __lbs_cleanup_and_insert_cmd(priv, cmd); 1229 __lbs_cleanup_and_insert_cmd(priv, cmd);
1239 priv->cur_cmd = NULL; 1230 priv->cur_cmd = NULL;
1240} 1231}
@@ -1278,18 +1269,20 @@ int lbs_set_radio_control(struct lbs_private *priv)
1278 return ret; 1269 return ret;
1279} 1270}
1280 1271
1281int lbs_set_mac_packet_filter(struct lbs_private *priv) 1272void lbs_set_mac_control(struct lbs_private *priv)
1282{ 1273{
1283 int ret = 0; 1274 struct cmd_ds_mac_control cmd;
1284 1275
1285 lbs_deb_enter(LBS_DEB_CMD); 1276 lbs_deb_enter(LBS_DEB_CMD);
1286 1277
1287 /* Send MAC control command to station */ 1278 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
1288 ret = lbs_prepare_and_send_command(priv, 1279 cmd.action = cpu_to_le16(priv->mac_control);
1289 CMD_MAC_CONTROL, 0, 0, 0, NULL); 1280 cmd.reserved = 0;
1290 1281
1291 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); 1282 lbs_cmd_async(priv, CMD_MAC_CONTROL,
1292 return ret; 1283 &cmd.hdr, sizeof(cmd));
1284
1285 lbs_deb_leave(LBS_DEB_CMD);
1293} 1286}
1294 1287
1295/** 1288/**
@@ -1338,7 +1331,8 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1338 goto done; 1331 goto done;
1339 } 1332 }
1340 1333
1341 lbs_set_cmd_ctrl_node(priv, cmdnode, pdata_buf); 1334 cmdnode->callback = NULL;
1335 cmdnode->callback_arg = (unsigned long)pdata_buf;
1342 1336
1343 cmdptr = (struct cmd_ds_command *)cmdnode->cmdbuf; 1337 cmdptr = (struct cmd_ds_command *)cmdnode->cmdbuf;
1344 1338
@@ -1353,15 +1347,7 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1353 1347
1354 switch (cmd_no) { 1348 switch (cmd_no) {
1355 case CMD_802_11_PS_MODE: 1349 case CMD_802_11_PS_MODE:
1356 ret = lbs_cmd_802_11_ps_mode(priv, cmdptr, cmd_action); 1350 ret = lbs_cmd_802_11_ps_mode(cmdptr, cmd_action);
1357 break;
1358
1359 case CMD_802_11_SCAN:
1360 ret = lbs_cmd_80211_scan(priv, cmdptr, pdata_buf);
1361 break;
1362
1363 case CMD_MAC_CONTROL:
1364 ret = lbs_cmd_mac_control(priv, cmdptr);
1365 break; 1351 break;
1366 1352
1367 case CMD_802_11_ASSOCIATE: 1353 case CMD_802_11_ASSOCIATE:
@@ -1376,25 +1362,15 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1376 case CMD_802_11_AD_HOC_START: 1362 case CMD_802_11_AD_HOC_START:
1377 ret = lbs_cmd_80211_ad_hoc_start(priv, cmdptr, pdata_buf); 1363 ret = lbs_cmd_80211_ad_hoc_start(priv, cmdptr, pdata_buf);
1378 break; 1364 break;
1379 case CMD_CODE_DNLD:
1380 break;
1381 1365
1382 case CMD_802_11_RESET: 1366 case CMD_802_11_RESET:
1383 ret = lbs_cmd_802_11_reset(priv, cmdptr, cmd_action); 1367 ret = lbs_cmd_802_11_reset(cmdptr, cmd_action);
1384 break;
1385
1386 case CMD_802_11_GET_LOG:
1387 ret = lbs_cmd_802_11_get_log(priv, cmdptr);
1388 break; 1368 break;
1389 1369
1390 case CMD_802_11_AUTHENTICATE: 1370 case CMD_802_11_AUTHENTICATE:
1391 ret = lbs_cmd_80211_authenticate(priv, cmdptr, pdata_buf); 1371 ret = lbs_cmd_80211_authenticate(priv, cmdptr, pdata_buf);
1392 break; 1372 break;
1393 1373
1394 case CMD_802_11_GET_STAT:
1395 ret = lbs_cmd_802_11_get_stat(priv, cmdptr);
1396 break;
1397
1398 case CMD_802_11_SNMP_MIB: 1374 case CMD_802_11_SNMP_MIB:
1399 ret = lbs_cmd_802_11_snmp_mib(priv, cmdptr, 1375 ret = lbs_cmd_802_11_snmp_mib(priv, cmdptr,
1400 cmd_action, cmd_oid, pdata_buf); 1376 cmd_action, cmd_oid, pdata_buf);
@@ -1403,12 +1379,12 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1403 case CMD_MAC_REG_ACCESS: 1379 case CMD_MAC_REG_ACCESS:
1404 case CMD_BBP_REG_ACCESS: 1380 case CMD_BBP_REG_ACCESS:
1405 case CMD_RF_REG_ACCESS: 1381 case CMD_RF_REG_ACCESS:
1406 ret = lbs_cmd_reg_access(priv, cmdptr, cmd_action, pdata_buf); 1382 ret = lbs_cmd_reg_access(cmdptr, cmd_action, pdata_buf);
1407 break; 1383 break;
1408 1384
1409 case CMD_802_11_RF_TX_POWER: 1385 case CMD_802_11_RF_TX_POWER:
1410 ret = lbs_cmd_802_11_rf_tx_power(priv, cmdptr, 1386 ret = lbs_cmd_802_11_rf_tx_power(cmdptr,
1411 cmd_action, pdata_buf); 1387 cmd_action, pdata_buf);
1412 break; 1388 break;
1413 1389
1414 case CMD_802_11_RATE_ADAPT_RATESET: 1390 case CMD_802_11_RATE_ADAPT_RATESET:
@@ -1421,7 +1397,7 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1421 break; 1397 break;
1422 1398
1423 case CMD_802_11_MONITOR_MODE: 1399 case CMD_802_11_MONITOR_MODE:
1424 ret = lbs_cmd_802_11_monitor_mode(priv, cmdptr, 1400 ret = lbs_cmd_802_11_monitor_mode(cmdptr,
1425 cmd_action, pdata_buf); 1401 cmd_action, pdata_buf);
1426 break; 1402 break;
1427 1403
@@ -1434,26 +1410,7 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1434 break; 1410 break;
1435 1411
1436 case CMD_802_11_AD_HOC_STOP: 1412 case CMD_802_11_AD_HOC_STOP:
1437 ret = lbs_cmd_80211_ad_hoc_stop(priv, cmdptr); 1413 ret = lbs_cmd_80211_ad_hoc_stop(cmdptr);
1438 break;
1439
1440 case CMD_802_11_KEY_MATERIAL:
1441 ret = lbs_cmd_802_11_key_material(priv, cmdptr, cmd_action,
1442 cmd_oid, pdata_buf);
1443 break;
1444
1445 case CMD_802_11_PAIRWISE_TSC:
1446 break;
1447 case CMD_802_11_GROUP_TSC:
1448 break;
1449
1450 case CMD_802_11_MAC_ADDRESS:
1451 ret = lbs_cmd_802_11_mac_address(priv, cmdptr, cmd_action);
1452 break;
1453
1454 case CMD_802_11_EEPROM_ACCESS:
1455 ret = lbs_cmd_802_11_eeprom_access(priv, cmdptr,
1456 cmd_action, pdata_buf);
1457 break; 1414 break;
1458 1415
1459 case CMD_802_11_SET_AFC: 1416 case CMD_802_11_SET_AFC:
@@ -1509,22 +1466,12 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1509 break; 1466 break;
1510 } 1467 }
1511 1468
1512 case CMD_802_11_PWR_CFG:
1513 cmdptr->command = cpu_to_le16(CMD_802_11_PWR_CFG);
1514 cmdptr->size =
1515 cpu_to_le16(sizeof(struct cmd_ds_802_11_pwr_cfg) +
1516 S_DS_GEN);
1517 memmove(&cmdptr->params.pwrcfg, pdata_buf,
1518 sizeof(struct cmd_ds_802_11_pwr_cfg));
1519
1520 ret = 0;
1521 break;
1522 case CMD_BT_ACCESS: 1469 case CMD_BT_ACCESS:
1523 ret = lbs_cmd_bt_access(priv, cmdptr, cmd_action, pdata_buf); 1470 ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf);
1524 break; 1471 break;
1525 1472
1526 case CMD_FWT_ACCESS: 1473 case CMD_FWT_ACCESS:
1527 ret = lbs_cmd_fwt_access(priv, cmdptr, cmd_action, pdata_buf); 1474 ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf);
1528 break; 1475 break;
1529 1476
1530 case CMD_GET_TSF: 1477 case CMD_GET_TSF:
@@ -1697,36 +1644,6 @@ static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv)
1697} 1644}
1698 1645
1699/** 1646/**
1700 * @brief This function cleans command node.
1701 *
1702 * @param ptempnode A pointer to cmdCtrlNode structure
1703 * @return n/a
1704 */
1705
1706/**
1707 * @brief This function initializes the command node.
1708 *
1709 * @param priv A pointer to struct lbs_private structure
1710 * @param ptempnode A pointer to cmd_ctrl_node structure
1711 * @param pdata_buf A pointer to informaion buffer
1712 * @return 0 or -1
1713 */
1714static void lbs_set_cmd_ctrl_node(struct lbs_private *priv,
1715 struct cmd_ctrl_node *ptempnode,
1716 void *pdata_buf)
1717{
1718 lbs_deb_enter(LBS_DEB_HOST);
1719
1720 if (!ptempnode)
1721 return;
1722
1723 ptempnode->callback = NULL;
1724 ptempnode->callback_arg = (unsigned long)pdata_buf;
1725
1726 lbs_deb_leave(LBS_DEB_HOST);
1727}
1728
1729/**
1730 * @brief This function executes next command in command 1647 * @brief This function executes next command in command
1731 * pending queue. It will put fimware back to PS mode 1648 * pending queue. It will put fimware back to PS mode
1732 * if applicable. 1649 * if applicable.
@@ -1741,9 +1658,9 @@ int lbs_execute_next_command(struct lbs_private *priv)
1741 unsigned long flags; 1658 unsigned long flags;
1742 int ret = 0; 1659 int ret = 0;
1743 1660
1744 // Debug group is LBS_DEB_THREAD and not LBS_DEB_HOST, because the 1661 /* Debug group is LBS_DEB_THREAD and not LBS_DEB_HOST, because the
1745 // only caller to us is lbs_thread() and we get even when a 1662 * only caller to us is lbs_thread() and we get even when a
1746 // data packet is received 1663 * data packet is received */
1747 lbs_deb_enter(LBS_DEB_THREAD); 1664 lbs_deb_enter(LBS_DEB_THREAD);
1748 1665
1749 spin_lock_irqsave(&priv->driver_lock, flags); 1666 spin_lock_irqsave(&priv->driver_lock, flags);
@@ -1907,44 +1824,32 @@ void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str)
1907 lbs_deb_leave(LBS_DEB_WEXT); 1824 lbs_deb_leave(LBS_DEB_WEXT);
1908} 1825}
1909 1826
1910static int sendconfirmsleep(struct lbs_private *priv, u8 *cmdptr, u16 size) 1827static void lbs_send_confirmsleep(struct lbs_private *priv)
1911{ 1828{
1912 unsigned long flags; 1829 unsigned long flags;
1913 int ret = 0; 1830 int ret;
1914 1831
1915 lbs_deb_enter(LBS_DEB_HOST); 1832 lbs_deb_enter(LBS_DEB_HOST);
1833 lbs_deb_hex(LBS_DEB_HOST, "sleep confirm", (u8 *) &confirm_sleep,
1834 sizeof(confirm_sleep));
1916 1835
1917 lbs_deb_host("SEND_SLEEPC_CMD: before download, cmd size %d\n", 1836 ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) &confirm_sleep,
1918 size); 1837 sizeof(confirm_sleep));
1919 1838 if (ret) {
1920 lbs_deb_hex(LBS_DEB_HOST, "sleep confirm command", cmdptr, size); 1839 lbs_pr_alert("confirm_sleep failed\n");
1921 1840 goto out;
1922 ret = priv->hw_host_to_card(priv, MVMS_CMD, cmdptr, size); 1841 }
1923 1842
1924 spin_lock_irqsave(&priv->driver_lock, flags); 1843 spin_lock_irqsave(&priv->driver_lock, flags);
1925 if (priv->intcounter || priv->currenttxskb)
1926 lbs_deb_host("SEND_SLEEPC_CMD: intcounter %d, currenttxskb %p\n",
1927 priv->intcounter, priv->currenttxskb);
1928 spin_unlock_irqrestore(&priv->driver_lock, flags);
1929 1844
1930 if (ret) { 1845 /* If nothing to do, go back to sleep (?) */
1931 lbs_pr_alert( 1846 if (!__kfifo_len(priv->event_fifo) && !priv->resp_len[priv->resp_idx])
1932 "SEND_SLEEPC_CMD: Host to Card failed for Confirm Sleep\n"); 1847 priv->psstate = PS_STATE_SLEEP;
1933 } else {
1934 spin_lock_irqsave(&priv->driver_lock, flags);
1935 if (!priv->intcounter) {
1936 priv->psstate = PS_STATE_SLEEP;
1937 } else {
1938 lbs_deb_host("SEND_SLEEPC_CMD: after sent, intcounter %d\n",
1939 priv->intcounter);
1940 }
1941 spin_unlock_irqrestore(&priv->driver_lock, flags);
1942 1848
1943 lbs_deb_host("SEND_SLEEPC_CMD: sent confirm sleep\n"); 1849 spin_unlock_irqrestore(&priv->driver_lock, flags);
1944 }
1945 1850
1946 lbs_deb_leave_args(LBS_DEB_HOST, "ret %d", ret); 1851out:
1947 return ret; 1852 lbs_deb_leave(LBS_DEB_HOST);
1948} 1853}
1949 1854
1950void lbs_ps_sleep(struct lbs_private *priv, int wait_option) 1855void lbs_ps_sleep(struct lbs_private *priv, int wait_option)
@@ -1992,10 +1897,10 @@ void lbs_ps_wakeup(struct lbs_private *priv, int wait_option)
1992 * @param psmode Power Saving mode 1897 * @param psmode Power Saving mode
1993 * @return n/a 1898 * @return n/a
1994 */ 1899 */
1995void lbs_ps_confirm_sleep(struct lbs_private *priv, u16 psmode) 1900void lbs_ps_confirm_sleep(struct lbs_private *priv)
1996{ 1901{
1997 unsigned long flags =0; 1902 unsigned long flags =0;
1998 u8 allowed = 1; 1903 int allowed = 1;
1999 1904
2000 lbs_deb_enter(LBS_DEB_HOST); 1905 lbs_deb_enter(LBS_DEB_HOST);
2001 1906
@@ -2005,20 +1910,22 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv, u16 psmode)
2005 } 1910 }
2006 1911
2007 spin_lock_irqsave(&priv->driver_lock, flags); 1912 spin_lock_irqsave(&priv->driver_lock, flags);
1913 /* In-progress command? */
2008 if (priv->cur_cmd) { 1914 if (priv->cur_cmd) {
2009 allowed = 0; 1915 allowed = 0;
2010 lbs_deb_host("cur_cmd was set\n"); 1916 lbs_deb_host("cur_cmd was set\n");
2011 } 1917 }
2012 if (priv->intcounter > 0) { 1918
1919 /* Pending events or command responses? */
1920 if (__kfifo_len(priv->event_fifo) || priv->resp_len[priv->resp_idx]) {
2013 allowed = 0; 1921 allowed = 0;
2014 lbs_deb_host("intcounter %d\n", priv->intcounter); 1922 lbs_deb_host("pending events or command responses\n");
2015 } 1923 }
2016 spin_unlock_irqrestore(&priv->driver_lock, flags); 1924 spin_unlock_irqrestore(&priv->driver_lock, flags);
2017 1925
2018 if (allowed) { 1926 if (allowed) {
2019 lbs_deb_host("sending lbs_ps_confirm_sleep\n"); 1927 lbs_deb_host("sending lbs_ps_confirm_sleep\n");
2020 sendconfirmsleep(priv, (u8 *) & priv->lbs_ps_confirm_sleep, 1928 lbs_send_confirmsleep(priv);
2021 sizeof(struct PS_CMD_ConfirmSleep));
2022 } else { 1929 } else {
2023 lbs_deb_host("sleep confirm has been delayed\n"); 1930 lbs_deb_host("sleep confirm has been delayed\n");
2024 } 1931 }
@@ -2027,39 +1934,10 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv, u16 psmode)
2027} 1934}
2028 1935
2029 1936
2030/** 1937static struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
2031 * @brief Simple callback that copies response back into command 1938 uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
2032 * 1939 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
2033 * @param priv A pointer to struct lbs_private structure 1940 unsigned long callback_arg)
2034 * @param extra A pointer to the original command structure for which
2035 * 'resp' is a response
2036 * @param resp A pointer to the command response
2037 *
2038 * @return 0 on success, error on failure
2039 */
2040int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra,
2041 struct cmd_header *resp)
2042{
2043 struct cmd_header *buf = (void *)extra;
2044 uint16_t copy_len;
2045
2046 lbs_deb_enter(LBS_DEB_CMD);
2047
2048 copy_len = min(le16_to_cpu(buf->size), le16_to_cpu(resp->size));
2049 lbs_deb_cmd("Copying back %u bytes; command response was %u bytes, "
2050 "copy back buffer was %u bytes\n", copy_len,
2051 le16_to_cpu(resp->size), le16_to_cpu(buf->size));
2052 memcpy(buf, resp, copy_len);
2053
2054 lbs_deb_leave(LBS_DEB_CMD);
2055 return 0;
2056}
2057EXPORT_SYMBOL_GPL(lbs_cmd_copyback);
2058
2059struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv, uint16_t command,
2060 struct cmd_header *in_cmd, int in_cmd_size,
2061 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
2062 unsigned long callback_arg)
2063{ 1941{
2064 struct cmd_ctrl_node *cmdnode; 1942 struct cmd_ctrl_node *cmdnode;
2065 1943
@@ -2096,9 +1974,6 @@ struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv, uint16_t command
2096 1974
2097 lbs_deb_host("PREP_CMD: command 0x%04x\n", command); 1975 lbs_deb_host("PREP_CMD: command 0x%04x\n", command);
2098 1976
2099 /* here was the big old switch() statement, which is now obsolete,
2100 * because the caller of lbs_cmd() sets up all of *cmd for us. */
2101
2102 cmdnode->cmdwaitqwoken = 0; 1977 cmdnode->cmdwaitqwoken = 0;
2103 lbs_queue_cmd(priv, cmdnode); 1978 lbs_queue_cmd(priv, cmdnode);
2104 wake_up_interruptible(&priv->waitq); 1979 wake_up_interruptible(&priv->waitq);
@@ -2108,6 +1983,15 @@ struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv, uint16_t command
2108 return cmdnode; 1983 return cmdnode;
2109} 1984}
2110 1985
1986void lbs_cmd_async(struct lbs_private *priv, uint16_t command,
1987 struct cmd_header *in_cmd, int in_cmd_size)
1988{
1989 lbs_deb_enter(LBS_DEB_CMD);
1990 __lbs_cmd_async(priv, command, in_cmd, in_cmd_size,
1991 lbs_cmd_async_callback, 0);
1992 lbs_deb_leave(LBS_DEB_CMD);
1993}
1994
2111int __lbs_cmd(struct lbs_private *priv, uint16_t command, 1995int __lbs_cmd(struct lbs_private *priv, uint16_t command,
2112 struct cmd_header *in_cmd, int in_cmd_size, 1996 struct cmd_header *in_cmd, int in_cmd_size,
2113 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *), 1997 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index b9ab85cc7913..3dfc2d43c224 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -18,12 +18,9 @@
18#define lbs_cmd_with_response(priv, cmdnr, cmd) \ 18#define lbs_cmd_with_response(priv, cmdnr, cmd) \
19 lbs_cmd(priv, cmdnr, cmd, lbs_cmd_copyback, (unsigned long) (cmd)) 19 lbs_cmd(priv, cmdnr, cmd, lbs_cmd_copyback, (unsigned long) (cmd))
20 20
21/* __lbs_cmd() will free the cmdnode and return success/failure. 21void lbs_cmd_async(struct lbs_private *priv, uint16_t command,
22 __lbs_cmd_async() requires that the callback free the cmdnode */ 22 struct cmd_header *in_cmd, int in_cmd_size);
23struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv, uint16_t command, 23
24 struct cmd_header *in_cmd, int in_cmd_size,
25 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
26 unsigned long callback_arg);
27int __lbs_cmd(struct lbs_private *priv, uint16_t command, 24int __lbs_cmd(struct lbs_private *priv, uint16_t command,
28 struct cmd_header *in_cmd, int in_cmd_size, 25 struct cmd_header *in_cmd, int in_cmd_size,
29 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *), 26 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
@@ -57,5 +54,7 @@ int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
57 struct assoc_request *assoc); 54 struct assoc_request *assoc);
58int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action, 55int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
59 uint16_t *enable); 56 uint16_t *enable);
57int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
58 struct assoc_request *assoc);
60 59
61#endif /* _LBS_CMD_H */ 60#endif /* _LBS_CMD_H */
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index f0ef7081bdeb..5abecb7673e6 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -12,7 +12,7 @@
12#include "decl.h" 12#include "decl.h"
13#include "defs.h" 13#include "defs.h"
14#include "dev.h" 14#include "dev.h"
15#include "join.h" 15#include "assoc.h"
16#include "wext.h" 16#include "wext.h"
17 17
18/** 18/**
@@ -74,7 +74,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv)
74 lbs_deb_cmd("disconnected, so exit PS mode\n"); 74 lbs_deb_cmd("disconnected, so exit PS mode\n");
75 lbs_ps_wakeup(priv, 0); 75 lbs_ps_wakeup(priv, 0);
76 } 76 }
77 lbs_deb_leave(LBS_DEB_CMD); 77 lbs_deb_leave(LBS_DEB_ASSOC);
78} 78}
79 79
80/** 80/**
@@ -146,22 +146,6 @@ static int lbs_ret_reg_access(struct lbs_private *priv,
146 return ret; 146 return ret;
147} 147}
148 148
149static int lbs_ret_802_11_stat(struct lbs_private *priv,
150 struct cmd_ds_command *resp)
151{
152 lbs_deb_enter(LBS_DEB_CMD);
153/* currently priv->wlan802_11Stat is unused
154
155 struct cmd_ds_802_11_get_stat *p11Stat = &resp->params.gstat;
156
157 // TODO Convert it to Big endian befor copy
158 memcpy(&priv->wlan802_11Stat,
159 p11Stat, sizeof(struct cmd_ds_802_11_get_stat));
160*/
161 lbs_deb_leave(LBS_DEB_CMD);
162 return 0;
163}
164
165static int lbs_ret_802_11_snmp_mib(struct lbs_private *priv, 149static int lbs_ret_802_11_snmp_mib(struct lbs_private *priv,
166 struct cmd_ds_command *resp) 150 struct cmd_ds_command *resp)
167{ 151{
@@ -204,74 +188,6 @@ static int lbs_ret_802_11_snmp_mib(struct lbs_private *priv,
204 return 0; 188 return 0;
205} 189}
206 190
207static int lbs_ret_802_11_key_material(struct lbs_private *priv,
208 struct cmd_ds_command *resp)
209{
210 struct cmd_ds_802_11_key_material *pkeymaterial =
211 &resp->params.keymaterial;
212 u16 action = le16_to_cpu(pkeymaterial->action);
213
214 lbs_deb_enter(LBS_DEB_CMD);
215
216 /* Copy the returned key to driver private data */
217 if (action == CMD_ACT_GET) {
218 u8 * buf_ptr = (u8 *) &pkeymaterial->keyParamSet;
219 u8 * resp_end = (u8 *) (resp + le16_to_cpu(resp->size));
220
221 while (buf_ptr < resp_end) {
222 struct MrvlIEtype_keyParamSet * pkeyparamset =
223 (struct MrvlIEtype_keyParamSet *) buf_ptr;
224 struct enc_key * pkey;
225 u16 param_set_len = le16_to_cpu(pkeyparamset->length);
226 u16 key_len = le16_to_cpu(pkeyparamset->keylen);
227 u16 key_flags = le16_to_cpu(pkeyparamset->keyinfo);
228 u16 key_type = le16_to_cpu(pkeyparamset->keytypeid);
229 u8 * end;
230
231 end = (u8 *) pkeyparamset + sizeof (pkeyparamset->type)
232 + sizeof (pkeyparamset->length)
233 + param_set_len;
234 /* Make sure we don't access past the end of the IEs */
235 if (end > resp_end)
236 break;
237
238 if (key_flags & KEY_INFO_WPA_UNICAST)
239 pkey = &priv->wpa_unicast_key;
240 else if (key_flags & KEY_INFO_WPA_MCAST)
241 pkey = &priv->wpa_mcast_key;
242 else
243 break;
244
245 /* Copy returned key into driver */
246 memset(pkey, 0, sizeof(struct enc_key));
247 if (key_len > sizeof(pkey->key))
248 break;
249 pkey->type = key_type;
250 pkey->flags = key_flags;
251 pkey->len = key_len;
252 memcpy(pkey->key, pkeyparamset->key, pkey->len);
253
254 buf_ptr = end + 1;
255 }
256 }
257
258 lbs_deb_enter(LBS_DEB_CMD);
259 return 0;
260}
261
262static int lbs_ret_802_11_mac_address(struct lbs_private *priv,
263 struct cmd_ds_command *resp)
264{
265 struct cmd_ds_802_11_mac_address *macadd = &resp->params.macadd;
266
267 lbs_deb_enter(LBS_DEB_CMD);
268
269 memcpy(priv->current_addr, macadd->macadd, ETH_ALEN);
270
271 lbs_deb_enter(LBS_DEB_CMD);
272 return 0;
273}
274
275static int lbs_ret_802_11_rf_tx_power(struct lbs_private *priv, 191static int lbs_ret_802_11_rf_tx_power(struct lbs_private *priv,
276 struct cmd_ds_command *resp) 192 struct cmd_ds_command *resp)
277{ 193{
@@ -333,45 +249,6 @@ static int lbs_ret_802_11_rssi(struct lbs_private *priv,
333 return 0; 249 return 0;
334} 250}
335 251
336static int lbs_ret_802_11_eeprom_access(struct lbs_private *priv,
337 struct cmd_ds_command *resp)
338{
339 struct lbs_ioctl_regrdwr *pbuf;
340 pbuf = (struct lbs_ioctl_regrdwr *) priv->prdeeprom;
341
342 lbs_deb_enter_args(LBS_DEB_CMD, "len %d",
343 le16_to_cpu(resp->params.rdeeprom.bytecount));
344 if (pbuf->NOB < le16_to_cpu(resp->params.rdeeprom.bytecount)) {
345 pbuf->NOB = 0;
346 lbs_deb_cmd("EEPROM read length too big\n");
347 return -1;
348 }
349 pbuf->NOB = le16_to_cpu(resp->params.rdeeprom.bytecount);
350 if (pbuf->NOB > 0) {
351
352 memcpy(&pbuf->value, (u8 *) & resp->params.rdeeprom.value,
353 le16_to_cpu(resp->params.rdeeprom.bytecount));
354 lbs_deb_hex(LBS_DEB_CMD, "EEPROM", (char *)&pbuf->value,
355 le16_to_cpu(resp->params.rdeeprom.bytecount));
356 }
357 lbs_deb_leave(LBS_DEB_CMD);
358 return 0;
359}
360
361static int lbs_ret_get_log(struct lbs_private *priv,
362 struct cmd_ds_command *resp)
363{
364 struct cmd_ds_802_11_get_log *logmessage = &resp->params.glog;
365
366 lbs_deb_enter(LBS_DEB_CMD);
367
368 /* Stored little-endian */
369 memcpy(&priv->logmsg, logmessage, sizeof(struct cmd_ds_802_11_get_log));
370
371 lbs_deb_leave(LBS_DEB_CMD);
372 return 0;
373}
374
375static int lbs_ret_802_11_bcn_ctrl(struct lbs_private * priv, 252static int lbs_ret_802_11_bcn_ctrl(struct lbs_private * priv,
376 struct cmd_ds_command *resp) 253 struct cmd_ds_command *resp)
377{ 254{
@@ -390,7 +267,6 @@ static int lbs_ret_802_11_bcn_ctrl(struct lbs_private * priv,
390} 267}
391 268
392static inline int handle_cmd_response(struct lbs_private *priv, 269static inline int handle_cmd_response(struct lbs_private *priv,
393 unsigned long dummy,
394 struct cmd_header *cmd_response) 270 struct cmd_header *cmd_response)
395{ 271{
396 struct cmd_ds_command *resp = (struct cmd_ds_command *) cmd_response; 272 struct cmd_ds_command *resp = (struct cmd_ds_command *) cmd_response;
@@ -407,14 +283,6 @@ static inline int handle_cmd_response(struct lbs_private *priv,
407 ret = lbs_ret_reg_access(priv, respcmd, resp); 283 ret = lbs_ret_reg_access(priv, respcmd, resp);
408 break; 284 break;
409 285
410 case CMD_RET(CMD_802_11_SCAN):
411 ret = lbs_ret_80211_scan(priv, resp);
412 break;
413
414 case CMD_RET(CMD_802_11_GET_LOG):
415 ret = lbs_ret_get_log(priv, resp);
416 break;
417
418 case CMD_RET_802_11_ASSOCIATE: 286 case CMD_RET_802_11_ASSOCIATE:
419 case CMD_RET(CMD_802_11_ASSOCIATE): 287 case CMD_RET(CMD_802_11_ASSOCIATE):
420 case CMD_RET(CMD_802_11_REASSOCIATE): 288 case CMD_RET(CMD_802_11_REASSOCIATE):
@@ -423,7 +291,7 @@ static inline int handle_cmd_response(struct lbs_private *priv,
423 291
424 case CMD_RET(CMD_802_11_DISASSOCIATE): 292 case CMD_RET(CMD_802_11_DISASSOCIATE):
425 case CMD_RET(CMD_802_11_DEAUTHENTICATE): 293 case CMD_RET(CMD_802_11_DEAUTHENTICATE):
426 ret = lbs_ret_80211_disassociate(priv, resp); 294 ret = lbs_ret_80211_disassociate(priv);
427 break; 295 break;
428 296
429 case CMD_RET(CMD_802_11_AD_HOC_START): 297 case CMD_RET(CMD_802_11_AD_HOC_START):
@@ -431,10 +299,6 @@ static inline int handle_cmd_response(struct lbs_private *priv,
431 ret = lbs_ret_80211_ad_hoc_start(priv, resp); 299 ret = lbs_ret_80211_ad_hoc_start(priv, resp);
432 break; 300 break;
433 301
434 case CMD_RET(CMD_802_11_GET_STAT):
435 ret = lbs_ret_802_11_stat(priv, resp);
436 break;
437
438 case CMD_RET(CMD_802_11_SNMP_MIB): 302 case CMD_RET(CMD_802_11_SNMP_MIB):
439 ret = lbs_ret_802_11_snmp_mib(priv, resp); 303 ret = lbs_ret_802_11_snmp_mib(priv, resp);
440 break; 304 break;
@@ -453,7 +317,6 @@ static inline int handle_cmd_response(struct lbs_private *priv,
453 break; 317 break;
454 318
455 case CMD_RET(CMD_MAC_MULTICAST_ADR): 319 case CMD_RET(CMD_MAC_MULTICAST_ADR):
456 case CMD_RET(CMD_MAC_CONTROL):
457 case CMD_RET(CMD_802_11_RESET): 320 case CMD_RET(CMD_802_11_RESET):
458 case CMD_RET(CMD_802_11_AUTHENTICATE): 321 case CMD_RET(CMD_802_11_AUTHENTICATE):
459 case CMD_RET(CMD_802_11_BEACON_STOP): 322 case CMD_RET(CMD_802_11_BEACON_STOP):
@@ -467,24 +330,12 @@ static inline int handle_cmd_response(struct lbs_private *priv,
467 ret = lbs_ret_802_11_rssi(priv, resp); 330 ret = lbs_ret_802_11_rssi(priv, resp);
468 break; 331 break;
469 332
470 case CMD_RET(CMD_802_11_MAC_ADDRESS):
471 ret = lbs_ret_802_11_mac_address(priv, resp);
472 break;
473
474 case CMD_RET(CMD_802_11_AD_HOC_STOP): 333 case CMD_RET(CMD_802_11_AD_HOC_STOP):
475 ret = lbs_ret_80211_ad_hoc_stop(priv, resp); 334 ret = lbs_ret_80211_ad_hoc_stop(priv);
476 break;
477
478 case CMD_RET(CMD_802_11_KEY_MATERIAL):
479 ret = lbs_ret_802_11_key_material(priv, resp);
480 break;
481
482 case CMD_RET(CMD_802_11_EEPROM_ACCESS):
483 ret = lbs_ret_802_11_eeprom_access(priv, resp);
484 break; 335 break;
485 336
486 case CMD_RET(CMD_802_11D_DOMAIN_INFO): 337 case CMD_RET(CMD_802_11D_DOMAIN_INFO):
487 ret = lbs_ret_802_11d_domain_info(priv, resp); 338 ret = lbs_ret_802_11d_domain_info(resp);
488 break; 339 break;
489 340
490 case CMD_RET(CMD_802_11_TPC_CFG): 341 case CMD_RET(CMD_802_11_TPC_CFG):
@@ -500,14 +351,6 @@ static inline int handle_cmd_response(struct lbs_private *priv,
500 spin_unlock_irqrestore(&priv->driver_lock, flags); 351 spin_unlock_irqrestore(&priv->driver_lock, flags);
501 break; 352 break;
502 353
503 case CMD_RET(CMD_802_11_PWR_CFG):
504 spin_lock_irqsave(&priv->driver_lock, flags);
505 memmove((void *)priv->cur_cmd->callback_arg, &resp->params.pwrcfg,
506 sizeof(struct cmd_ds_802_11_pwr_cfg));
507 spin_unlock_irqrestore(&priv->driver_lock, flags);
508
509 break;
510
511 case CMD_RET(CMD_GET_TSF): 354 case CMD_RET(CMD_GET_TSF):
512 spin_lock_irqsave(&priv->driver_lock, flags); 355 spin_lock_irqsave(&priv->driver_lock, flags);
513 memcpy((void *)priv->cur_cmd->callback_arg, 356 memcpy((void *)priv->cur_cmd->callback_arg,
@@ -541,7 +384,7 @@ static inline int handle_cmd_response(struct lbs_private *priv,
541 return ret; 384 return ret;
542} 385}
543 386
544int lbs_process_rx_command(struct lbs_private *priv) 387int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
545{ 388{
546 uint16_t respcmd, curcmd; 389 uint16_t respcmd, curcmd;
547 struct cmd_header *resp; 390 struct cmd_header *resp;
@@ -561,14 +404,14 @@ int lbs_process_rx_command(struct lbs_private *priv)
561 goto done; 404 goto done;
562 } 405 }
563 406
564 resp = (void *)priv->upld_buf; 407 resp = (void *)data;
565 curcmd = le16_to_cpu(priv->cur_cmd->cmdbuf->command); 408 curcmd = le16_to_cpu(priv->cur_cmd->cmdbuf->command);
566 respcmd = le16_to_cpu(resp->command); 409 respcmd = le16_to_cpu(resp->command);
567 result = le16_to_cpu(resp->result); 410 result = le16_to_cpu(resp->result);
568 411
569 lbs_deb_host("CMD_RESP: response 0x%04x, seq %d, size %d, jiffies %lu\n", 412 lbs_deb_cmd("CMD_RESP: response 0x%04x, seq %d, size %d\n",
570 respcmd, le16_to_cpu(resp->seqnum), priv->upld_len, jiffies); 413 respcmd, le16_to_cpu(resp->seqnum), len);
571 lbs_deb_hex(LBS_DEB_HOST, "CMD_RESP", (void *) resp, priv->upld_len); 414 lbs_deb_hex(LBS_DEB_CMD, "CMD_RESP", (void *) resp, len);
572 415
573 if (resp->seqnum != priv->cur_cmd->cmdbuf->seqnum) { 416 if (resp->seqnum != priv->cur_cmd->cmdbuf->seqnum) {
574 lbs_pr_info("Received CMD_RESP with invalid sequence %d (expected %d)\n", 417 lbs_pr_info("Received CMD_RESP with invalid sequence %d (expected %d)\n",
@@ -687,7 +530,7 @@ int lbs_process_rx_command(struct lbs_private *priv)
687 ret = priv->cur_cmd->callback(priv, priv->cur_cmd->callback_arg, 530 ret = priv->cur_cmd->callback(priv, priv->cur_cmd->callback_arg,
688 resp); 531 resp);
689 } else 532 } else
690 ret = handle_cmd_response(priv, 0, resp); 533 ret = handle_cmd_response(priv, resp);
691 534
692 spin_lock_irqsave(&priv->driver_lock, flags); 535 spin_lock_irqsave(&priv->driver_lock, flags);
693 536
@@ -705,21 +548,20 @@ done:
705 548
706static int lbs_send_confirmwake(struct lbs_private *priv) 549static int lbs_send_confirmwake(struct lbs_private *priv)
707{ 550{
708 struct cmd_header *cmd = &priv->lbs_ps_confirm_wake; 551 struct cmd_header cmd;
709 int ret = 0; 552 int ret = 0;
710 553
711 lbs_deb_enter(LBS_DEB_HOST); 554 lbs_deb_enter(LBS_DEB_HOST);
712 555
713 cmd->command = cpu_to_le16(CMD_802_11_WAKEUP_CONFIRM); 556 cmd.command = cpu_to_le16(CMD_802_11_WAKEUP_CONFIRM);
714 cmd->size = cpu_to_le16(sizeof(*cmd)); 557 cmd.size = cpu_to_le16(sizeof(cmd));
715 cmd->seqnum = cpu_to_le16(++priv->seqnum); 558 cmd.seqnum = cpu_to_le16(++priv->seqnum);
716 cmd->result = 0; 559 cmd.result = 0;
717
718 lbs_deb_host("SEND_WAKEC_CMD: before download\n");
719 560
720 lbs_deb_hex(LBS_DEB_HOST, "wake confirm command", (void *)cmd, sizeof(*cmd)); 561 lbs_deb_hex(LBS_DEB_HOST, "wake confirm", (u8 *) &cmd,
562 sizeof(cmd));
721 563
722 ret = priv->hw_host_to_card(priv, MVMS_CMD, (void *)cmd, sizeof(*cmd)); 564 ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) &cmd, sizeof(cmd));
723 if (ret) 565 if (ret)
724 lbs_pr_alert("SEND_WAKEC_CMD: Host to Card failed for Confirm Wake\n"); 566 lbs_pr_alert("SEND_WAKEC_CMD: Host to Card failed for Confirm Wake\n");
725 567
@@ -727,22 +569,15 @@ static int lbs_send_confirmwake(struct lbs_private *priv)
727 return ret; 569 return ret;
728} 570}
729 571
730int lbs_process_event(struct lbs_private *priv) 572int lbs_process_event(struct lbs_private *priv, u32 event)
731{ 573{
732 int ret = 0; 574 int ret = 0;
733 u32 eventcause;
734 575
735 lbs_deb_enter(LBS_DEB_CMD); 576 lbs_deb_enter(LBS_DEB_CMD);
736 577
737 spin_lock_irq(&priv->driver_lock); 578 switch (event) {
738 eventcause = priv->eventcause >> SBI_EVENT_CAUSE_SHIFT;
739 spin_unlock_irq(&priv->driver_lock);
740
741 lbs_deb_cmd("event cause %d\n", eventcause);
742
743 switch (eventcause) {
744 case MACREG_INT_CODE_LINK_SENSED: 579 case MACREG_INT_CODE_LINK_SENSED:
745 lbs_deb_cmd("EVENT: MACREG_INT_CODE_LINK_SENSED\n"); 580 lbs_deb_cmd("EVENT: link sensed\n");
746 break; 581 break;
747 582
748 case MACREG_INT_CODE_DEAUTHENTICATED: 583 case MACREG_INT_CODE_DEAUTHENTICATED:
@@ -761,7 +596,7 @@ int lbs_process_event(struct lbs_private *priv)
761 break; 596 break;
762 597
763 case MACREG_INT_CODE_PS_SLEEP: 598 case MACREG_INT_CODE_PS_SLEEP:
764 lbs_deb_cmd("EVENT: sleep\n"); 599 lbs_deb_cmd("EVENT: ps sleep\n");
765 600
766 /* handle unexpected PS SLEEP event */ 601 /* handle unexpected PS SLEEP event */
767 if (priv->psstate == PS_STATE_FULL_POWER) { 602 if (priv->psstate == PS_STATE_FULL_POWER) {
@@ -771,17 +606,17 @@ int lbs_process_event(struct lbs_private *priv)
771 } 606 }
772 priv->psstate = PS_STATE_PRE_SLEEP; 607 priv->psstate = PS_STATE_PRE_SLEEP;
773 608
774 lbs_ps_confirm_sleep(priv, (u16) priv->psmode); 609 lbs_ps_confirm_sleep(priv);
775 610
776 break; 611 break;
777 612
778 case MACREG_INT_CODE_HOST_AWAKE: 613 case MACREG_INT_CODE_HOST_AWAKE:
779 lbs_deb_cmd("EVENT: HOST_AWAKE\n"); 614 lbs_deb_cmd("EVENT: host awake\n");
780 lbs_send_confirmwake(priv); 615 lbs_send_confirmwake(priv);
781 break; 616 break;
782 617
783 case MACREG_INT_CODE_PS_AWAKE: 618 case MACREG_INT_CODE_PS_AWAKE:
784 lbs_deb_cmd("EVENT: awake\n"); 619 lbs_deb_cmd("EVENT: ps awake\n");
785 /* handle unexpected PS AWAKE event */ 620 /* handle unexpected PS AWAKE event */
786 if (priv->psstate == PS_STATE_FULL_POWER) { 621 if (priv->psstate == PS_STATE_FULL_POWER) {
787 lbs_deb_cmd( 622 lbs_deb_cmd(
@@ -812,14 +647,16 @@ int lbs_process_event(struct lbs_private *priv)
812 lbs_deb_cmd("EVENT: MULTICAST MIC ERROR\n"); 647 lbs_deb_cmd("EVENT: MULTICAST MIC ERROR\n");
813 handle_mic_failureevent(priv, MACREG_INT_CODE_MIC_ERR_MULTICAST); 648 handle_mic_failureevent(priv, MACREG_INT_CODE_MIC_ERR_MULTICAST);
814 break; 649 break;
650
815 case MACREG_INT_CODE_MIB_CHANGED: 651 case MACREG_INT_CODE_MIB_CHANGED:
652 lbs_deb_cmd("EVENT: MIB CHANGED\n");
653 break;
816 case MACREG_INT_CODE_INIT_DONE: 654 case MACREG_INT_CODE_INIT_DONE:
655 lbs_deb_cmd("EVENT: INIT DONE\n");
817 break; 656 break;
818
819 case MACREG_INT_CODE_ADHOC_BCN_LOST: 657 case MACREG_INT_CODE_ADHOC_BCN_LOST:
820 lbs_deb_cmd("EVENT: ADHOC beacon lost\n"); 658 lbs_deb_cmd("EVENT: ADHOC beacon lost\n");
821 break; 659 break;
822
823 case MACREG_INT_CODE_RSSI_LOW: 660 case MACREG_INT_CODE_RSSI_LOW:
824 lbs_pr_alert("EVENT: rssi low\n"); 661 lbs_pr_alert("EVENT: rssi low\n");
825 break; 662 break;
@@ -854,14 +691,10 @@ int lbs_process_event(struct lbs_private *priv)
854 break; 691 break;
855 692
856 default: 693 default:
857 lbs_pr_alert("EVENT: unknown event id %d\n", eventcause); 694 lbs_pr_alert("EVENT: unknown event id %d\n", event);
858 break; 695 break;
859 } 696 }
860 697
861 spin_lock_irq(&priv->driver_lock);
862 priv->eventcause = 0;
863 spin_unlock_irq(&priv->driver_lock);
864
865 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); 698 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
866 return ret; 699 return ret;
867} 700}
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index fd67b770dd78..ad2fabca9116 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -19,7 +19,7 @@ static char *szStates[] = {
19}; 19};
20 20
21#ifdef PROC_DEBUG 21#ifdef PROC_DEBUG
22static void lbs_debug_init(struct lbs_private *priv, struct net_device *dev); 22static void lbs_debug_init(struct lbs_private *priv);
23#endif 23#endif
24 24
25static int open_file_generic(struct inode *inode, struct file *file) 25static int open_file_generic(struct inode *inode, struct file *file)
@@ -78,7 +78,7 @@ static ssize_t lbs_getscantable(struct file *file, char __user *userbuf,
78 u16 spectrum_mgmt = (iter_bss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT); 78 u16 spectrum_mgmt = (iter_bss->capability & WLAN_CAPABILITY_SPECTRUM_MGMT);
79 79
80 pos += snprintf(buf+pos, len-pos, 80 pos += snprintf(buf+pos, len-pos,
81 "%02u| %03d | %04ld | %s |", 81 "%02u| %03d | %04d | %s |",
82 numscansdone, iter_bss->channel, iter_bss->rssi, 82 numscansdone, iter_bss->channel, iter_bss->rssi,
83 print_mac(mac, iter_bss->bssid)); 83 print_mac(mac, iter_bss->bssid));
84 pos += snprintf(buf+pos, len-pos, " %04x-", iter_bss->capability); 84 pos += snprintf(buf+pos, len-pos, " %04x-", iter_bss->capability);
@@ -164,173 +164,6 @@ out_unlock:
164 return ret; 164 return ret;
165} 165}
166 166
167static ssize_t lbs_extscan(struct file *file, const char __user *userbuf,
168 size_t count, loff_t *ppos)
169{
170 struct lbs_private *priv = file->private_data;
171 ssize_t res, buf_size;
172 union iwreq_data wrqu;
173 unsigned long addr = get_zeroed_page(GFP_KERNEL);
174 char *buf = (char *)addr;
175
176 buf_size = min(count, len - 1);
177 if (copy_from_user(buf, userbuf, buf_size)) {
178 res = -EFAULT;
179 goto out_unlock;
180 }
181
182 lbs_send_specific_ssid_scan(priv, buf, strlen(buf)-1, 0);
183
184 memset(&wrqu, 0, sizeof(union iwreq_data));
185 wireless_send_event(priv->dev, SIOCGIWSCAN, &wrqu, NULL);
186
187out_unlock:
188 free_page(addr);
189 return count;
190}
191
192static void lbs_parse_bssid(char *buf, size_t count,
193 struct lbs_ioctl_user_scan_cfg *scan_cfg)
194{
195 char *hold;
196 unsigned int mac[ETH_ALEN];
197
198 hold = strstr(buf, "bssid=");
199 if (!hold)
200 return;
201 hold += 6;
202 sscanf(hold, "%02x:%02x:%02x:%02x:%02x:%02x",
203 mac, mac+1, mac+2, mac+3, mac+4, mac+5);
204 memcpy(scan_cfg->bssid, mac, ETH_ALEN);
205}
206
207static void lbs_parse_ssid(char *buf, size_t count,
208 struct lbs_ioctl_user_scan_cfg *scan_cfg)
209{
210 char *hold, *end;
211 ssize_t size;
212
213 hold = strstr(buf, "ssid=");
214 if (!hold)
215 return;
216 hold += 5;
217 end = strchr(hold, ' ');
218 if (!end)
219 end = buf + count - 1;
220
221 size = min((size_t)IW_ESSID_MAX_SIZE, (size_t) (end - hold));
222 strncpy(scan_cfg->ssid, hold, size);
223
224 return;
225}
226
227static int lbs_parse_clear(char *buf, size_t count, const char *tag)
228{
229 char *hold;
230 int val;
231
232 hold = strstr(buf, tag);
233 if (!hold)
234 return 0;
235 hold += strlen(tag);
236 sscanf(hold, "%d", &val);
237
238 if (val != 0)
239 val = 1;
240
241 return val;
242}
243
244static int lbs_parse_dur(char *buf, size_t count,
245 struct lbs_ioctl_user_scan_cfg *scan_cfg)
246{
247 char *hold;
248 int val;
249
250 hold = strstr(buf, "dur=");
251 if (!hold)
252 return 0;
253 hold += 4;
254 sscanf(hold, "%d", &val);
255
256 return val;
257}
258
259static void lbs_parse_type(char *buf, size_t count,
260 struct lbs_ioctl_user_scan_cfg *scan_cfg)
261{
262 char *hold;
263 int val;
264
265 hold = strstr(buf, "type=");
266 if (!hold)
267 return;
268 hold += 5;
269 sscanf(hold, "%d", &val);
270
271 /* type=1,2 or 3 */
272 if (val < 1 || val > 3)
273 return;
274
275 scan_cfg->bsstype = val;
276
277 return;
278}
279
280static ssize_t lbs_setuserscan(struct file *file,
281 const char __user *userbuf,
282 size_t count, loff_t *ppos)
283{
284 struct lbs_private *priv = file->private_data;
285 ssize_t res, buf_size;
286 struct lbs_ioctl_user_scan_cfg *scan_cfg;
287 union iwreq_data wrqu;
288 int dur;
289 char *buf = (char *)get_zeroed_page(GFP_KERNEL);
290
291 if (!buf)
292 return -ENOMEM;
293
294 buf_size = min(count, len - 1);
295 if (copy_from_user(buf, userbuf, buf_size)) {
296 res = -EFAULT;
297 goto out_buf;
298 }
299
300 scan_cfg = kzalloc(sizeof(struct lbs_ioctl_user_scan_cfg), GFP_KERNEL);
301 if (!scan_cfg) {
302 res = -ENOMEM;
303 goto out_buf;
304 }
305 res = count;
306
307 scan_cfg->bsstype = LBS_SCAN_BSS_TYPE_ANY;
308
309 dur = lbs_parse_dur(buf, count, scan_cfg);
310 lbs_parse_bssid(buf, count, scan_cfg);
311 scan_cfg->clear_bssid = lbs_parse_clear(buf, count, "clear_bssid=");
312 lbs_parse_ssid(buf, count, scan_cfg);
313 scan_cfg->clear_ssid = lbs_parse_clear(buf, count, "clear_ssid=");
314 lbs_parse_type(buf, count, scan_cfg);
315
316 lbs_scan_networks(priv, scan_cfg, 1);
317 wait_event_interruptible(priv->cmd_pending,
318 priv->surpriseremoved || !priv->last_scanned_channel);
319
320 if (priv->surpriseremoved)
321 goto out_scan_cfg;
322
323 memset(&wrqu, 0x00, sizeof(union iwreq_data));
324 wireless_send_event(priv->dev, SIOCGIWSCAN, &wrqu, NULL);
325
326 out_scan_cfg:
327 kfree(scan_cfg);
328 out_buf:
329 free_page((unsigned long)buf);
330 return res;
331}
332
333
334/* 167/*
335 * When calling CMD_802_11_SUBSCRIBE_EVENT with CMD_ACT_GET, me might 168 * When calling CMD_802_11_SUBSCRIBE_EVENT with CMD_ACT_GET, me might
336 * get a bunch of vendor-specific TLVs (a.k.a. IEs) back from the 169 * get a bunch of vendor-specific TLVs (a.k.a. IEs) back from the
@@ -857,8 +690,6 @@ static struct lbs_debugfs_files debugfs_files[] = {
857 write_file_dummy), }, 690 write_file_dummy), },
858 { "sleepparams", 0644, FOPS(lbs_sleepparams_read, 691 { "sleepparams", 0644, FOPS(lbs_sleepparams_read,
859 lbs_sleepparams_write), }, 692 lbs_sleepparams_write), },
860 { "extscan", 0600, FOPS(NULL, lbs_extscan), },
861 { "setuserscan", 0600, FOPS(NULL, lbs_setuserscan), },
862}; 693};
863 694
864static struct lbs_debugfs_files debugfs_events_files[] = { 695static struct lbs_debugfs_files debugfs_events_files[] = {
@@ -947,7 +778,7 @@ void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
947 } 778 }
948 779
949#ifdef PROC_DEBUG 780#ifdef PROC_DEBUG
950 lbs_debug_init(priv, dev); 781 lbs_debug_init(priv);
951#endif 782#endif
952exit: 783exit:
953 return; 784 return;
@@ -993,7 +824,6 @@ struct debug_data {
993/* To debug any member of struct lbs_private, simply add one line here. 824/* To debug any member of struct lbs_private, simply add one line here.
994 */ 825 */
995static struct debug_data items[] = { 826static struct debug_data items[] = {
996 {"intcounter", item_size(intcounter), item_addr(intcounter)},
997 {"psmode", item_size(psmode), item_addr(psmode)}, 827 {"psmode", item_size(psmode), item_addr(psmode)},
998 {"psstate", item_size(psstate), item_addr(psstate)}, 828 {"psstate", item_size(psstate), item_addr(psstate)},
999}; 829};
@@ -1121,7 +951,7 @@ static struct file_operations lbs_debug_fops = {
1121 * @param dev pointer net_device 951 * @param dev pointer net_device
1122 * @return N/A 952 * @return N/A
1123 */ 953 */
1124static void lbs_debug_init(struct lbs_private *priv, struct net_device *dev) 954static void lbs_debug_init(struct lbs_private *priv)
1125{ 955{
1126 int i; 956 int i;
1127 957
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index 4e22341b4f3d..b652fa301e19 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -17,9 +17,9 @@ struct net_device;
17struct cmd_ctrl_node; 17struct cmd_ctrl_node;
18struct cmd_ds_command; 18struct cmd_ds_command;
19 19
20int lbs_set_mac_packet_filter(struct lbs_private *priv); 20void lbs_set_mac_control(struct lbs_private *priv);
21 21
22void lbs_send_tx_feedback(struct lbs_private *priv); 22void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count);
23 23
24int lbs_free_cmd_buffer(struct lbs_private *priv); 24int lbs_free_cmd_buffer(struct lbs_private *priv);
25 25
@@ -30,17 +30,16 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
30 30
31int lbs_allocate_cmd_buffer(struct lbs_private *priv); 31int lbs_allocate_cmd_buffer(struct lbs_private *priv);
32int lbs_execute_next_command(struct lbs_private *priv); 32int lbs_execute_next_command(struct lbs_private *priv);
33int lbs_process_event(struct lbs_private *priv); 33int lbs_process_event(struct lbs_private *priv, u32 event);
34void lbs_interrupt(struct lbs_private *priv); 34void lbs_queue_event(struct lbs_private *priv, u32 event);
35void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
36
35int lbs_set_radio_control(struct lbs_private *priv); 37int lbs_set_radio_control(struct lbs_private *priv);
36u32 lbs_fw_index_to_data_rate(u8 index); 38u32 lbs_fw_index_to_data_rate(u8 index);
37u8 lbs_data_rate_to_fw_index(u32 rate); 39u8 lbs_data_rate_to_fw_index(u32 rate);
38void lbs_get_fwversion(struct lbs_private *priv,
39 char *fwversion,
40 int maxlen);
41 40
42/** The proc fs interface */ 41/** The proc fs interface */
43int lbs_process_rx_command(struct lbs_private *priv); 42int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len);
44void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd, 43void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
45 int result); 44 int result);
46int lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev); 45int lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -49,7 +48,7 @@ int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band);
49int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *); 48int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *);
50 49
51void lbs_ps_sleep(struct lbs_private *priv, int wait_option); 50void lbs_ps_sleep(struct lbs_private *priv, int wait_option);
52void lbs_ps_confirm_sleep(struct lbs_private *priv, u16 psmode); 51void lbs_ps_confirm_sleep(struct lbs_private *priv);
53void lbs_ps_wakeup(struct lbs_private *priv, int wait_option); 52void lbs_ps_wakeup(struct lbs_private *priv, int wait_option);
54 53
55struct chan_freq_power *lbs_find_cfp_by_band_and_channel( 54struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
@@ -63,7 +62,6 @@ void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str);
63 62
64/* main.c */ 63/* main.c */
65struct chan_freq_power *lbs_get_region_cfp_table(u8 region, 64struct chan_freq_power *lbs_get_region_cfp_table(u8 region,
66 u8 band,
67 int *cfp_no); 65 int *cfp_no);
68struct lbs_private *lbs_add_card(void *card, struct device *dmdev); 66struct lbs_private *lbs_add_card(void *card, struct device *dmdev);
69int lbs_remove_card(struct lbs_private *priv); 67int lbs_remove_card(struct lbs_private *priv);
@@ -72,4 +70,9 @@ int lbs_stop_card(struct lbs_private *priv);
72void lbs_host_to_card_done(struct lbs_private *priv); 70void lbs_host_to_card_done(struct lbs_private *priv);
73 71
74int lbs_update_channel(struct lbs_private *priv); 72int lbs_update_channel(struct lbs_private *priv);
73
74#ifndef CONFIG_IEEE80211
75const char *escape_essid(const char *essid, u8 essid_len);
76#endif
77
75#endif 78#endif
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 3053cc2160bc..d39520111062 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -53,14 +53,14 @@ do { if ((lbs_debug & (grp)) == (grp)) \
53#endif 53#endif
54 54
55#define lbs_deb_enter(grp) \ 55#define lbs_deb_enter(grp) \
56 LBS_DEB_LL(grp | LBS_DEB_ENTER, " enter", "%s():%d\n", __FUNCTION__, __LINE__); 56 LBS_DEB_LL(grp | LBS_DEB_ENTER, " enter", "%s()\n", __func__);
57#define lbs_deb_enter_args(grp, fmt, args...) \ 57#define lbs_deb_enter_args(grp, fmt, args...) \
58 LBS_DEB_LL(grp | LBS_DEB_ENTER, " enter", "%s(" fmt "):%d\n", __FUNCTION__, ## args, __LINE__); 58 LBS_DEB_LL(grp | LBS_DEB_ENTER, " enter", "%s(" fmt ")\n", __func__, ## args);
59#define lbs_deb_leave(grp) \ 59#define lbs_deb_leave(grp) \
60 LBS_DEB_LL(grp | LBS_DEB_LEAVE, " leave", "%s():%d\n", __FUNCTION__, __LINE__); 60 LBS_DEB_LL(grp | LBS_DEB_LEAVE, " leave", "%s()\n", __func__);
61#define lbs_deb_leave_args(grp, fmt, args...) \ 61#define lbs_deb_leave_args(grp, fmt, args...) \
62 LBS_DEB_LL(grp | LBS_DEB_LEAVE, " leave", "%s():%d, " fmt "\n", \ 62 LBS_DEB_LL(grp | LBS_DEB_LEAVE, " leave", "%s(), " fmt "\n", \
63 __FUNCTION__, __LINE__, ##args); 63 __func__, ##args);
64#define lbs_deb_main(fmt, args...) LBS_DEB_LL(LBS_DEB_MAIN, " main", fmt, ##args) 64#define lbs_deb_main(fmt, args...) LBS_DEB_LL(LBS_DEB_MAIN, " main", fmt, ##args)
65#define lbs_deb_net(fmt, args...) LBS_DEB_LL(LBS_DEB_NET, " net", fmt, ##args) 65#define lbs_deb_net(fmt, args...) LBS_DEB_LL(LBS_DEB_NET, " net", fmt, ##args)
66#define lbs_deb_mesh(fmt, args...) LBS_DEB_LL(LBS_DEB_MESH, " mesh", fmt, ##args) 66#define lbs_deb_mesh(fmt, args...) LBS_DEB_LL(LBS_DEB_MESH, " mesh", fmt, ##args)
@@ -177,8 +177,6 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
177#define MRVDRV_CMD_UPLD_RDY 0x0008 177#define MRVDRV_CMD_UPLD_RDY 0x0008
178#define MRVDRV_CARDEVENT 0x0010 178#define MRVDRV_CARDEVENT 0x0010
179 179
180#define SBI_EVENT_CAUSE_SHIFT 3
181
182/** TxPD status */ 180/** TxPD status */
183 181
184/* Station firmware use TxPD status field to report final Tx transmit 182/* Station firmware use TxPD status field to report final Tx transmit
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 5a69f2b60865..0d9edb9b11f5 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -10,9 +10,10 @@
10#include <linux/wireless.h> 10#include <linux/wireless.h>
11#include <linux/ethtool.h> 11#include <linux/ethtool.h>
12#include <linux/debugfs.h> 12#include <linux/debugfs.h>
13#include <net/ieee80211.h>
13 14
14#include "defs.h" 15#include "defs.h"
15#include "scan.h" 16#include "hostcmd.h"
16 17
17extern struct ethtool_ops lbs_ethtool_ops; 18extern struct ethtool_ops lbs_ethtool_ops;
18 19
@@ -128,10 +129,6 @@ struct lbs_private {
128 u32 bbp_offset; 129 u32 bbp_offset;
129 u32 rf_offset; 130 u32 rf_offset;
130 131
131 /** Upload length */
132 u32 upld_len;
133 /* Upload buffer */
134 u8 upld_buf[LBS_UPLD_SIZE];
135 /* Download sent: 132 /* Download sent:
136 bit0 1/0=data_sent/data_tx_done, 133 bit0 1/0=data_sent/data_tx_done,
137 bit1 1/0=cmd_sent/cmd_tx_done, 134 bit1 1/0=cmd_sent/cmd_tx_done,
@@ -143,27 +140,27 @@ struct lbs_private {
143 wait_queue_head_t waitq; 140 wait_queue_head_t waitq;
144 struct workqueue_struct *work_thread; 141 struct workqueue_struct *work_thread;
145 142
143 /** Scanning */
146 struct delayed_work scan_work; 144 struct delayed_work scan_work;
147 struct delayed_work assoc_work; 145 struct delayed_work assoc_work;
148 struct work_struct sync_channel; 146 struct work_struct sync_channel;
147 /* remember which channel was scanned last, != 0 if currently scanning */
148 int scan_channel;
149 u8 scan_ssid[IW_ESSID_MAX_SIZE + 1];
150 u8 scan_ssid_len;
149 151
150 /** Hardware access */ 152 /** Hardware access */
151 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb); 153 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
152 int (*hw_get_int_status) (struct lbs_private *priv, u8 *);
153 int (*hw_read_event_cause) (struct lbs_private *);
154 154
155 /* Wake On LAN */ 155 /* Wake On LAN */
156 uint32_t wol_criteria; 156 uint32_t wol_criteria;
157 uint8_t wol_gpio; 157 uint8_t wol_gpio;
158 uint8_t wol_gap; 158 uint8_t wol_gap;
159 159
160 /* was struct lbs_adapter from here... */
161
162 /** Wlan adapter data structure*/ 160 /** Wlan adapter data structure*/
163 /** STATUS variables */ 161 /** STATUS variables */
164 u32 fwrelease; 162 u32 fwrelease;
165 u32 fwcapinfo; 163 u32 fwcapinfo;
166 /* protected with big lock */
167 164
168 struct mutex lock; 165 struct mutex lock;
169 166
@@ -175,7 +172,6 @@ struct lbs_private {
175 172
176 /** command-related variables */ 173 /** command-related variables */
177 u16 seqnum; 174 u16 seqnum;
178 /* protected by big lock */
179 175
180 struct cmd_ctrl_node *cmd_array; 176 struct cmd_ctrl_node *cmd_array;
181 /** Current command */ 177 /** Current command */
@@ -188,12 +184,17 @@ struct lbs_private {
188 struct list_head cmdpendingq; 184 struct list_head cmdpendingq;
189 185
190 wait_queue_head_t cmd_pending; 186 wait_queue_head_t cmd_pending;
191 /* command related variables protected by priv->driver_lock */
192 187
193 /** Async and Sync Event variables */ 188 /* Command responses sent from the hardware to the driver */
194 u32 intcounter; 189 u8 resp_idx;
195 u32 eventcause; 190 u8 resp_buf[2][LBS_UPLD_SIZE];
196 u8 nodename[16]; /* nickname */ 191 u32 resp_len[2];
192
193 /* Events sent from hardware to driver */
194 struct kfifo *event_fifo;
195
196 /* nickname */
197 u8 nodename[16];
197 198
198 /** spin locks */ 199 /** spin locks */
199 spinlock_t driver_lock; 200 spinlock_t driver_lock;
@@ -203,8 +204,6 @@ struct lbs_private {
203 int nr_retries; 204 int nr_retries;
204 int cmd_timed_out; 205 int cmd_timed_out;
205 206
206 u8 hisregcpy;
207
208 /** current ssid/bssid related parameters*/ 207 /** current ssid/bssid related parameters*/
209 struct current_bss_params curbssparams; 208 struct current_bss_params curbssparams;
210 209
@@ -247,7 +246,7 @@ struct lbs_private {
247 struct sk_buff *currenttxskb; 246 struct sk_buff *currenttxskb;
248 247
249 /** NIC Operation characteristics */ 248 /** NIC Operation characteristics */
250 u16 currentpacketfilter; 249 u16 mac_control;
251 u32 connect_status; 250 u32 connect_status;
252 u32 mesh_connect_status; 251 u32 mesh_connect_status;
253 u16 regioncode; 252 u16 regioncode;
@@ -262,9 +261,6 @@ struct lbs_private {
262 char ps_supported; 261 char ps_supported;
263 u8 needtowakeup; 262 u8 needtowakeup;
264 263
265 struct PS_CMD_ConfirmSleep lbs_ps_confirm_sleep;
266 struct cmd_header lbs_ps_confirm_wake;
267
268 struct assoc_request * pending_assoc_req; 264 struct assoc_request * pending_assoc_req;
269 struct assoc_request * in_progress_assoc_req; 265 struct assoc_request * in_progress_assoc_req;
270 266
@@ -315,16 +311,52 @@ struct lbs_private {
315 u32 enable11d; 311 u32 enable11d;
316 312
317 /** MISCELLANEOUS */ 313 /** MISCELLANEOUS */
318 u8 *prdeeprom;
319 struct lbs_offset_value offsetvalue; 314 struct lbs_offset_value offsetvalue;
320 315
321 struct cmd_ds_802_11_get_log logmsg;
322
323 u32 monitormode; 316 u32 monitormode;
324 int last_scanned_channel;
325 u8 fw_ready; 317 u8 fw_ready;
326}; 318};
327 319
320extern struct cmd_confirm_sleep confirm_sleep;
321
322/**
323 * @brief Structure used to store information for each beacon/probe response
324 */
325struct bss_descriptor {
326 u8 bssid[ETH_ALEN];
327
328 u8 ssid[IW_ESSID_MAX_SIZE + 1];
329 u8 ssid_len;
330
331 u16 capability;
332 u32 rssi;
333 u32 channel;
334 u16 beaconperiod;
335 u32 atimwindow;
336
337 /* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */
338 u8 mode;
339
340 /* zero-terminated array of supported data rates */
341 u8 rates[MAX_RATES + 1];
342
343 unsigned long last_scanned;
344
345 union ieeetypes_phyparamset phyparamset;
346 union IEEEtypes_ssparamset ssparamset;
347
348 struct ieeetypes_countryinfofullset countryinfo;
349
350 u8 wpa_ie[MAX_WPA_IE_LEN];
351 size_t wpa_ie_len;
352 u8 rsn_ie[MAX_WPA_IE_LEN];
353 size_t rsn_ie_len;
354
355 u8 mesh;
356
357 struct list_head list;
358};
359
328/** Association request 360/** Association request
329 * 361 *
330 * Encapsulates all the options that describe a specific assocation request 362 * Encapsulates all the options that describe a specific assocation request
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 21e6f988ea81..dcfdb404678b 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -6,7 +6,6 @@
6#include "decl.h" 6#include "decl.h"
7#include "defs.h" 7#include "defs.h"
8#include "dev.h" 8#include "dev.h"
9#include "join.h"
10#include "wext.h" 9#include "wext.h"
11#include "cmd.h" 10#include "cmd.h"
12 11
@@ -25,13 +24,14 @@ static void lbs_ethtool_get_drvinfo(struct net_device *dev,
25 struct ethtool_drvinfo *info) 24 struct ethtool_drvinfo *info)
26{ 25{
27 struct lbs_private *priv = (struct lbs_private *) dev->priv; 26 struct lbs_private *priv = (struct lbs_private *) dev->priv;
28 char fwver[32];
29
30 lbs_get_fwversion(priv, fwver, sizeof(fwver) - 1);
31 27
28 snprintf(info->fw_version, 32, "%u.%u.%u.p%u",
29 priv->fwrelease >> 24 & 0xff,
30 priv->fwrelease >> 16 & 0xff,
31 priv->fwrelease >> 8 & 0xff,
32 priv->fwrelease & 0xff);
32 strcpy(info->driver, "libertas"); 33 strcpy(info->driver, "libertas");
33 strcpy(info->version, lbs_driver_version); 34 strcpy(info->version, lbs_driver_version);
34 strcpy(info->fw_version, fwver);
35} 35}
36 36
37/* All 8388 parts have 16KiB EEPROM size at the time of writing. 37/* All 8388 parts have 16KiB EEPROM size at the time of writing.
@@ -48,61 +48,28 @@ static int lbs_ethtool_get_eeprom(struct net_device *dev,
48 struct ethtool_eeprom *eeprom, u8 * bytes) 48 struct ethtool_eeprom *eeprom, u8 * bytes)
49{ 49{
50 struct lbs_private *priv = (struct lbs_private *) dev->priv; 50 struct lbs_private *priv = (struct lbs_private *) dev->priv;
51 struct lbs_ioctl_regrdwr regctrl; 51 struct cmd_ds_802_11_eeprom_access cmd;
52 char *ptr;
53 int ret; 52 int ret;
54 53
55 regctrl.action = 0; 54 lbs_deb_enter(LBS_DEB_ETHTOOL);
56 regctrl.offset = eeprom->offset;
57 regctrl.NOB = eeprom->len;
58
59 if (eeprom->offset + eeprom->len > LBS_EEPROM_LEN)
60 return -EINVAL;
61
62// mutex_lock(&priv->mutex);
63
64 priv->prdeeprom = kmalloc(eeprom->len+sizeof(regctrl), GFP_KERNEL);
65 if (!priv->prdeeprom)
66 return -ENOMEM;
67 memcpy(priv->prdeeprom, &regctrl, sizeof(regctrl));
68
69 /* +14 is for action, offset, and NOB in
70 * response */
71 lbs_deb_ethtool("action:%d offset: %x NOB: %02x\n",
72 regctrl.action, regctrl.offset, regctrl.NOB);
73 55
74 ret = lbs_prepare_and_send_command(priv, 56 if (eeprom->offset + eeprom->len > LBS_EEPROM_LEN ||
75 CMD_802_11_EEPROM_ACCESS, 57 eeprom->len > LBS_EEPROM_READ_LEN) {
76 regctrl.action, 58 ret = -EINVAL;
77 CMD_OPTION_WAITFORRSP, 0, 59 goto out;
78 &regctrl);
79
80 if (ret) {
81 if (priv->prdeeprom)
82 kfree(priv->prdeeprom);
83 goto done;
84 } 60 }
85 61
86 mdelay(10); 62 cmd.hdr.size = cpu_to_le16(sizeof(struct cmd_ds_802_11_eeprom_access) -
87 63 LBS_EEPROM_READ_LEN + eeprom->len);
88 ptr = (char *)priv->prdeeprom; 64 cmd.action = cpu_to_le16(CMD_ACT_GET);
89 65 cmd.offset = cpu_to_le16(eeprom->offset);
90 /* skip the command header, but include the "value" u32 variable */ 66 cmd.len = cpu_to_le16(eeprom->len);
91 ptr = ptr + sizeof(struct lbs_ioctl_regrdwr) - 4; 67 ret = lbs_cmd_with_response(priv, CMD_802_11_EEPROM_ACCESS, &cmd);
92 68 if (!ret)
93 /* 69 memcpy(bytes, cmd.value, eeprom->len);
94 * Return the result back to the user 70
95 */ 71out:
96 memcpy(bytes, ptr, eeprom->len); 72 lbs_deb_leave_args(LBS_DEB_ETHTOOL, "ret %d", ret);
97
98 if (priv->prdeeprom)
99 kfree(priv->prdeeprom);
100// mutex_unlock(&priv->mutex);
101
102 ret = 0;
103
104done:
105 lbs_deb_enter_args(LBS_DEB_ETHTOOL, "ret %d", ret);
106 return ret; 73 return ret;
107} 74}
108 75
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index 1aa04076b1ac..3915c3144fad 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -33,7 +33,6 @@
33#define CMD_RET_802_11_ASSOCIATE 0x8012 33#define CMD_RET_802_11_ASSOCIATE 0x8012
34 34
35/* Command codes */ 35/* Command codes */
36#define CMD_CODE_DNLD 0x0002
37#define CMD_GET_HW_SPEC 0x0003 36#define CMD_GET_HW_SPEC 0x0003
38#define CMD_EEPROM_UPDATE 0x0004 37#define CMD_EEPROM_UPDATE 0x0004
39#define CMD_802_11_RESET 0x0005 38#define CMD_802_11_RESET 0x0005
@@ -68,8 +67,6 @@
68#define CMD_802_11_AD_HOC_JOIN 0x002c 67#define CMD_802_11_AD_HOC_JOIN 0x002c
69#define CMD_802_11_QUERY_TKIP_REPLY_CNTRS 0x002e 68#define CMD_802_11_QUERY_TKIP_REPLY_CNTRS 0x002e
70#define CMD_802_11_ENABLE_RSN 0x002f 69#define CMD_802_11_ENABLE_RSN 0x002f
71#define CMD_802_11_PAIRWISE_TSC 0x0036
72#define CMD_802_11_GROUP_TSC 0x0037
73#define CMD_802_11_SET_AFC 0x003c 70#define CMD_802_11_SET_AFC 0x003c
74#define CMD_802_11_GET_AFC 0x003d 71#define CMD_802_11_GET_AFC 0x003d
75#define CMD_802_11_AD_HOC_STOP 0x0040 72#define CMD_802_11_AD_HOC_STOP 0x0040
@@ -87,7 +84,6 @@
87#define CMD_802_11_INACTIVITY_TIMEOUT 0x0067 84#define CMD_802_11_INACTIVITY_TIMEOUT 0x0067
88#define CMD_802_11_SLEEP_PERIOD 0x0068 85#define CMD_802_11_SLEEP_PERIOD 0x0068
89#define CMD_802_11_TPC_CFG 0x0072 86#define CMD_802_11_TPC_CFG 0x0072
90#define CMD_802_11_PWR_CFG 0x0073
91#define CMD_802_11_FW_WAKE_METHOD 0x0074 87#define CMD_802_11_FW_WAKE_METHOD 0x0074
92#define CMD_802_11_SUBSCRIBE_EVENT 0x0075 88#define CMD_802_11_SUBSCRIBE_EVENT 0x0075
93#define CMD_802_11_RATE_ADAPT_RATESET 0x0076 89#define CMD_802_11_RATE_ADAPT_RATESET 0x0076
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
index d35b015b6657..f29bc5bbda3e 100644
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ b/drivers/net/wireless/libertas/hostcmd.h
@@ -174,9 +174,11 @@ struct cmd_ds_802_11_subscribe_event {
174 * Define data structure for CMD_802_11_SCAN 174 * Define data structure for CMD_802_11_SCAN
175 */ 175 */
176struct cmd_ds_802_11_scan { 176struct cmd_ds_802_11_scan {
177 u8 bsstype; 177 struct cmd_header hdr;
178 u8 bssid[ETH_ALEN]; 178
179 u8 tlvbuffer[1]; 179 uint8_t bsstype;
180 uint8_t bssid[ETH_ALEN];
181 uint8_t tlvbuffer[0];
180#if 0 182#if 0
181 mrvlietypes_ssidparamset_t ssidParamSet; 183 mrvlietypes_ssidparamset_t ssidParamSet;
182 mrvlietypes_chanlistparamset_t ChanListParamSet; 184 mrvlietypes_chanlistparamset_t ChanListParamSet;
@@ -185,12 +187,16 @@ struct cmd_ds_802_11_scan {
185}; 187};
186 188
187struct cmd_ds_802_11_scan_rsp { 189struct cmd_ds_802_11_scan_rsp {
190 struct cmd_header hdr;
191
188 __le16 bssdescriptsize; 192 __le16 bssdescriptsize;
189 u8 nr_sets; 193 uint8_t nr_sets;
190 u8 bssdesc_and_tlvbuffer[1]; 194 uint8_t bssdesc_and_tlvbuffer[0];
191}; 195};
192 196
193struct cmd_ds_802_11_get_log { 197struct cmd_ds_802_11_get_log {
198 struct cmd_header hdr;
199
194 __le32 mcasttxframe; 200 __le32 mcasttxframe;
195 __le32 failed; 201 __le32 failed;
196 __le32 retry; 202 __le32 retry;
@@ -207,8 +213,9 @@ struct cmd_ds_802_11_get_log {
207}; 213};
208 214
209struct cmd_ds_mac_control { 215struct cmd_ds_mac_control {
216 struct cmd_header hdr;
210 __le16 action; 217 __le16 action;
211 __le16 reserved; 218 u16 reserved;
212}; 219};
213 220
214struct cmd_ds_mac_multicast_adr { 221struct cmd_ds_mac_multicast_adr {
@@ -420,6 +427,8 @@ struct cmd_ds_802_11_rssi_rsp {
420}; 427};
421 428
422struct cmd_ds_802_11_mac_address { 429struct cmd_ds_802_11_mac_address {
430 struct cmd_header hdr;
431
423 __le16 action; 432 __le16 action;
424 u8 macadd[ETH_ALEN]; 433 u8 macadd[ETH_ALEN];
425}; 434};
@@ -471,14 +480,11 @@ struct cmd_ds_802_11_ps_mode {
471 __le16 locallisteninterval; 480 __le16 locallisteninterval;
472}; 481};
473 482
474struct PS_CMD_ConfirmSleep { 483struct cmd_confirm_sleep {
475 __le16 command; 484 struct cmd_header hdr;
476 __le16 size;
477 __le16 seqnum;
478 __le16 result;
479 485
480 __le16 action; 486 __le16 action;
481 __le16 reserved1; 487 __le16 nullpktinterval;
482 __le16 multipledtim; 488 __le16 multipledtim;
483 __le16 reserved; 489 __le16 reserved;
484 __le16 locallisteninterval; 490 __le16 locallisteninterval;
@@ -572,17 +578,20 @@ struct cmd_ds_host_sleep {
572} __attribute__ ((packed)); 578} __attribute__ ((packed));
573 579
574struct cmd_ds_802_11_key_material { 580struct cmd_ds_802_11_key_material {
581 struct cmd_header hdr;
582
575 __le16 action; 583 __le16 action;
576 struct MrvlIEtype_keyParamSet keyParamSet[2]; 584 struct MrvlIEtype_keyParamSet keyParamSet[2];
577} __attribute__ ((packed)); 585} __attribute__ ((packed));
578 586
579struct cmd_ds_802_11_eeprom_access { 587struct cmd_ds_802_11_eeprom_access {
588 struct cmd_header hdr;
580 __le16 action; 589 __le16 action;
581
582 /* multiple 4 */
583 __le16 offset; 590 __le16 offset;
584 __le16 bytecount; 591 __le16 len;
585 u8 value; 592 /* firmware says it returns a maximum of 20 bytes */
593#define LBS_EEPROM_READ_LEN 20
594 u8 value[LBS_EEPROM_READ_LEN];
586} __attribute__ ((packed)); 595} __attribute__ ((packed));
587 596
588struct cmd_ds_802_11_tpc_cfg { 597struct cmd_ds_802_11_tpc_cfg {
@@ -600,14 +609,6 @@ struct cmd_ds_802_11_led_ctrl {
600 u8 data[256]; 609 u8 data[256];
601} __attribute__ ((packed)); 610} __attribute__ ((packed));
602 611
603struct cmd_ds_802_11_pwr_cfg {
604 __le16 action;
605 u8 enable;
606 s8 PA_P0;
607 s8 PA_P1;
608 s8 PA_P2;
609} __attribute__ ((packed));
610
611struct cmd_ds_802_11_afc { 612struct cmd_ds_802_11_afc {
612 __le16 afc_auto; 613 __le16 afc_auto;
613 union { 614 union {
@@ -689,15 +690,11 @@ struct cmd_ds_command {
689 /* command Body */ 690 /* command Body */
690 union { 691 union {
691 struct cmd_ds_802_11_ps_mode psmode; 692 struct cmd_ds_802_11_ps_mode psmode;
692 struct cmd_ds_802_11_scan scan;
693 struct cmd_ds_802_11_scan_rsp scanresp;
694 struct cmd_ds_mac_control macctrl;
695 struct cmd_ds_802_11_associate associate; 693 struct cmd_ds_802_11_associate associate;
696 struct cmd_ds_802_11_deauthenticate deauth; 694 struct cmd_ds_802_11_deauthenticate deauth;
697 struct cmd_ds_802_11_ad_hoc_start ads; 695 struct cmd_ds_802_11_ad_hoc_start ads;
698 struct cmd_ds_802_11_reset reset; 696 struct cmd_ds_802_11_reset reset;
699 struct cmd_ds_802_11_ad_hoc_result result; 697 struct cmd_ds_802_11_ad_hoc_result result;
700 struct cmd_ds_802_11_get_log glog;
701 struct cmd_ds_802_11_authenticate auth; 698 struct cmd_ds_802_11_authenticate auth;
702 struct cmd_ds_802_11_get_stat gstat; 699 struct cmd_ds_802_11_get_stat gstat;
703 struct cmd_ds_802_3_get_stat gstat_8023; 700 struct cmd_ds_802_3_get_stat gstat_8023;
@@ -711,18 +708,14 @@ struct cmd_ds_command {
711 struct cmd_ds_802_11_rssi rssi; 708 struct cmd_ds_802_11_rssi rssi;
712 struct cmd_ds_802_11_rssi_rsp rssirsp; 709 struct cmd_ds_802_11_rssi_rsp rssirsp;
713 struct cmd_ds_802_11_disassociate dassociate; 710 struct cmd_ds_802_11_disassociate dassociate;
714 struct cmd_ds_802_11_mac_address macadd;
715 struct cmd_ds_802_11_key_material keymaterial;
716 struct cmd_ds_mac_reg_access macreg; 711 struct cmd_ds_mac_reg_access macreg;
717 struct cmd_ds_bbp_reg_access bbpreg; 712 struct cmd_ds_bbp_reg_access bbpreg;
718 struct cmd_ds_rf_reg_access rfreg; 713 struct cmd_ds_rf_reg_access rfreg;
719 struct cmd_ds_802_11_eeprom_access rdeeprom;
720 714
721 struct cmd_ds_802_11d_domain_info domaininfo; 715 struct cmd_ds_802_11d_domain_info domaininfo;
722 struct cmd_ds_802_11d_domain_info domaininforesp; 716 struct cmd_ds_802_11d_domain_info domaininforesp;
723 717
724 struct cmd_ds_802_11_tpc_cfg tpccfg; 718 struct cmd_ds_802_11_tpc_cfg tpccfg;
725 struct cmd_ds_802_11_pwr_cfg pwrcfg;
726 struct cmd_ds_802_11_afc afc; 719 struct cmd_ds_802_11_afc afc;
727 struct cmd_ds_802_11_led_ctrl ledgpio; 720 struct cmd_ds_802_11_led_ctrl ledgpio;
728 721
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 038c66a98f15..54280e292ea5 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -83,14 +83,14 @@ static inline unsigned int if_cs_read8(struct if_cs_card *card, uint reg)
83{ 83{
84 unsigned int val = ioread8(card->iobase + reg); 84 unsigned int val = ioread8(card->iobase + reg);
85 if (debug_output) 85 if (debug_output)
86 printk(KERN_INFO "##inb %08x<%02x\n", reg, val); 86 printk(KERN_INFO "inb %08x<%02x\n", reg, val);
87 return val; 87 return val;
88} 88}
89static inline unsigned int if_cs_read16(struct if_cs_card *card, uint reg) 89static inline unsigned int if_cs_read16(struct if_cs_card *card, uint reg)
90{ 90{
91 unsigned int val = ioread16(card->iobase + reg); 91 unsigned int val = ioread16(card->iobase + reg);
92 if (debug_output) 92 if (debug_output)
93 printk(KERN_INFO "##inw %08x<%04x\n", reg, val); 93 printk(KERN_INFO "inw %08x<%04x\n", reg, val);
94 return val; 94 return val;
95} 95}
96static inline void if_cs_read16_rep( 96static inline void if_cs_read16_rep(
@@ -100,7 +100,7 @@ static inline void if_cs_read16_rep(
100 unsigned long count) 100 unsigned long count)
101{ 101{
102 if (debug_output) 102 if (debug_output)
103 printk(KERN_INFO "##insw %08x<(0x%lx words)\n", 103 printk(KERN_INFO "insw %08x<(0x%lx words)\n",
104 reg, count); 104 reg, count);
105 ioread16_rep(card->iobase + reg, buf, count); 105 ioread16_rep(card->iobase + reg, buf, count);
106} 106}
@@ -108,14 +108,14 @@ static inline void if_cs_read16_rep(
108static inline void if_cs_write8(struct if_cs_card *card, uint reg, u8 val) 108static inline void if_cs_write8(struct if_cs_card *card, uint reg, u8 val)
109{ 109{
110 if (debug_output) 110 if (debug_output)
111 printk(KERN_INFO "##outb %08x>%02x\n", reg, val); 111 printk(KERN_INFO "outb %08x>%02x\n", reg, val);
112 iowrite8(val, card->iobase + reg); 112 iowrite8(val, card->iobase + reg);
113} 113}
114 114
115static inline void if_cs_write16(struct if_cs_card *card, uint reg, u16 val) 115static inline void if_cs_write16(struct if_cs_card *card, uint reg, u16 val)
116{ 116{
117 if (debug_output) 117 if (debug_output)
118 printk(KERN_INFO "##outw %08x>%04x\n", reg, val); 118 printk(KERN_INFO "outw %08x>%04x\n", reg, val);
119 iowrite16(val, card->iobase + reg); 119 iowrite16(val, card->iobase + reg);
120} 120}
121 121
@@ -126,7 +126,7 @@ static inline void if_cs_write16_rep(
126 unsigned long count) 126 unsigned long count)
127{ 127{
128 if (debug_output) 128 if (debug_output)
129 printk(KERN_INFO "##outsw %08x>(0x%lx words)\n", 129 printk(KERN_INFO "outsw %08x>(0x%lx words)\n",
130 reg, count); 130 reg, count);
131 iowrite16_rep(card->iobase + reg, buf, count); 131 iowrite16_rep(card->iobase + reg, buf, count);
132} 132}
@@ -199,17 +199,6 @@ static int if_cs_poll_while_fw_download(struct if_cs_card *card, uint addr, u8 r
199#define IF_CS_C_S_CARDEVENT 0x0010 199#define IF_CS_C_S_CARDEVENT 0x0010
200#define IF_CS_C_S_MASK 0x001f 200#define IF_CS_C_S_MASK 0x001f
201#define IF_CS_C_S_STATUS_MASK 0x7f00 201#define IF_CS_C_S_STATUS_MASK 0x7f00
202/* The following definitions should be the same as the MRVDRV_ ones */
203
204#if MRVDRV_CMD_DNLD_RDY != IF_CS_C_S_CMD_DNLD_RDY
205#error MRVDRV_CMD_DNLD_RDY and IF_CS_C_S_CMD_DNLD_RDY not in sync
206#endif
207#if MRVDRV_CMD_UPLD_RDY != IF_CS_C_S_CMD_UPLD_RDY
208#error MRVDRV_CMD_UPLD_RDY and IF_CS_C_S_CMD_UPLD_RDY not in sync
209#endif
210#if MRVDRV_CARDEVENT != IF_CS_C_S_CARDEVENT
211#error MRVDRV_CARDEVENT and IF_CS_C_S_CARDEVENT not in sync
212#endif
213 202
214#define IF_CS_C_INT_CAUSE 0x00000022 203#define IF_CS_C_INT_CAUSE 0x00000022
215#define IF_CS_C_IC_MASK 0x001f 204#define IF_CS_C_IC_MASK 0x001f
@@ -226,55 +215,6 @@ static int if_cs_poll_while_fw_download(struct if_cs_card *card, uint addr, u8 r
226 215
227 216
228/********************************************************************/ 217/********************************************************************/
229/* Interrupts */
230/********************************************************************/
231
232static inline void if_cs_enable_ints(struct if_cs_card *card)
233{
234 lbs_deb_enter(LBS_DEB_CS);
235 if_cs_write16(card, IF_CS_H_INT_MASK, 0);
236}
237
238static inline void if_cs_disable_ints(struct if_cs_card *card)
239{
240 lbs_deb_enter(LBS_DEB_CS);
241 if_cs_write16(card, IF_CS_H_INT_MASK, IF_CS_H_IM_MASK);
242}
243
244static irqreturn_t if_cs_interrupt(int irq, void *data)
245{
246 struct if_cs_card *card = data;
247 u16 int_cause;
248
249 lbs_deb_enter(LBS_DEB_CS);
250
251 int_cause = if_cs_read16(card, IF_CS_C_INT_CAUSE);
252 if (int_cause == 0x0) {
253 /* Not for us */
254 return IRQ_NONE;
255
256 } else if (int_cause == 0xffff) {
257 /* Read in junk, the card has probably been removed */
258 card->priv->surpriseremoved = 1;
259 return IRQ_HANDLED;
260 } else {
261 if (int_cause & IF_CS_H_IC_TX_OVER)
262 lbs_host_to_card_done(card->priv);
263
264 /* clear interrupt */
265 if_cs_write16(card, IF_CS_C_INT_CAUSE, int_cause & IF_CS_C_IC_MASK);
266 }
267 spin_lock(&card->priv->driver_lock);
268 lbs_interrupt(card->priv);
269 spin_unlock(&card->priv->driver_lock);
270
271 return IRQ_HANDLED;
272}
273
274
275
276
277/********************************************************************/
278/* I/O */ 218/* I/O */
279/********************************************************************/ 219/********************************************************************/
280 220
@@ -351,6 +291,7 @@ static void if_cs_send_data(struct lbs_private *priv, u8 *buf, u16 nb)
351 */ 291 */
352static int if_cs_receive_cmdres(struct lbs_private *priv, u8 *data, u32 *len) 292static int if_cs_receive_cmdres(struct lbs_private *priv, u8 *data, u32 *len)
353{ 293{
294 unsigned long flags;
354 int ret = -1; 295 int ret = -1;
355 u16 val; 296 u16 val;
356 297
@@ -378,6 +319,12 @@ static int if_cs_receive_cmdres(struct lbs_private *priv, u8 *data, u32 *len)
378 * bytes */ 319 * bytes */
379 *len -= 8; 320 *len -= 8;
380 ret = 0; 321 ret = 0;
322
323 /* Clear this flag again */
324 spin_lock_irqsave(&priv->driver_lock, flags);
325 priv->dnld_sent = DNLD_RES_RECEIVED;
326 spin_unlock_irqrestore(&priv->driver_lock, flags);
327
381out: 328out:
382 lbs_deb_leave_args(LBS_DEB_CS, "ret %d, len %d", ret, *len); 329 lbs_deb_leave_args(LBS_DEB_CS, "ret %d, len %d", ret, *len);
383 return ret; 330 return ret;
@@ -396,11 +343,9 @@ static struct sk_buff *if_cs_receive_data(struct lbs_private *priv)
396 if (len == 0 || len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) { 343 if (len == 0 || len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) {
397 lbs_pr_err("card data buffer has invalid # of bytes (%d)\n", len); 344 lbs_pr_err("card data buffer has invalid # of bytes (%d)\n", len);
398 priv->stats.rx_dropped++; 345 priv->stats.rx_dropped++;
399 printk(KERN_INFO "##HS %s:%d TODO\n", __FUNCTION__, __LINE__);
400 goto dat_err; 346 goto dat_err;
401 } 347 }
402 348
403 //TODO: skb = dev_alloc_skb(len+ETH_FRAME_LEN+MRVDRV_SNAP_HEADER_LEN+EXTRA_LEN);
404 skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + 2); 349 skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + 2);
405 if (!skb) 350 if (!skb)
406 goto out; 351 goto out;
@@ -425,6 +370,96 @@ out:
425 370
426 371
427/********************************************************************/ 372/********************************************************************/
373/* Interrupts */
374/********************************************************************/
375
376static inline void if_cs_enable_ints(struct if_cs_card *card)
377{
378 lbs_deb_enter(LBS_DEB_CS);
379 if_cs_write16(card, IF_CS_H_INT_MASK, 0);
380}
381
382static inline void if_cs_disable_ints(struct if_cs_card *card)
383{
384 lbs_deb_enter(LBS_DEB_CS);
385 if_cs_write16(card, IF_CS_H_INT_MASK, IF_CS_H_IM_MASK);
386}
387
388
389static irqreturn_t if_cs_interrupt(int irq, void *data)
390{
391 struct if_cs_card *card = data;
392 struct lbs_private *priv = card->priv;
393 u16 cause;
394
395 lbs_deb_enter(LBS_DEB_CS);
396
397 cause = if_cs_read16(card, IF_CS_C_INT_CAUSE);
398 if_cs_write16(card, IF_CS_C_INT_CAUSE, cause & IF_CS_C_IC_MASK);
399
400 lbs_deb_cs("cause 0x%04x\n", cause);
401 if (cause == 0) {
402 /* Not for us */
403 return IRQ_NONE;
404 }
405
406 if (cause == 0xffff) {
407 /* Read in junk, the card has probably been removed */
408 card->priv->surpriseremoved = 1;
409 return IRQ_HANDLED;
410 }
411
412 /* TODO: I'm not sure what the best ordering is */
413
414 cause = if_cs_read16(card, IF_CS_C_STATUS) & IF_CS_C_S_MASK;
415
416 if (cause & IF_CS_C_S_RX_UPLD_RDY) {
417 struct sk_buff *skb;
418 lbs_deb_cs("rx packet\n");
419 skb = if_cs_receive_data(priv);
420 if (skb)
421 lbs_process_rxed_packet(priv, skb);
422 }
423
424 if (cause & IF_CS_H_IC_TX_OVER) {
425 lbs_deb_cs("tx over\n");
426 lbs_host_to_card_done(priv);
427 }
428
429 if (cause & IF_CS_C_S_CMD_UPLD_RDY) {
430 unsigned long flags;
431 u8 i;
432
433 lbs_deb_cs("cmd upload ready\n");
434 spin_lock_irqsave(&priv->driver_lock, flags);
435 i = (priv->resp_idx == 0) ? 1 : 0;
436 spin_unlock_irqrestore(&priv->driver_lock, flags);
437
438 BUG_ON(priv->resp_len[i]);
439 if_cs_receive_cmdres(priv, priv->resp_buf[i],
440 &priv->resp_len[i]);
441
442 spin_lock_irqsave(&priv->driver_lock, flags);
443 lbs_notify_command_response(priv, i);
444 spin_unlock_irqrestore(&priv->driver_lock, flags);
445 }
446
447 if (cause & IF_CS_H_IC_HOST_EVENT) {
448 u16 event = if_cs_read16(priv->card, IF_CS_C_STATUS)
449 & IF_CS_C_S_STATUS_MASK;
450 if_cs_write16(priv->card, IF_CS_H_INT_CAUSE,
451 IF_CS_H_IC_HOST_EVENT);
452 lbs_deb_cs("eventcause 0x%04x\n", event);
453 lbs_queue_event(priv, event >> 8 & 0xff);
454 }
455
456 return IRQ_HANDLED;
457}
458
459
460
461
462/********************************************************************/
428/* Firmware */ 463/* Firmware */
429/********************************************************************/ 464/********************************************************************/
430 465
@@ -476,8 +511,6 @@ static int if_cs_prog_helper(struct if_cs_card *card)
476 511
477 if (remain < count) 512 if (remain < count)
478 count = remain; 513 count = remain;
479 /* printk(KERN_INFO "//HS %d loading %d of %d bytes\n",
480 __LINE__, sent, fw->size); */
481 514
482 /* "write the number of bytes to be sent to the I/O Command 515 /* "write the number of bytes to be sent to the I/O Command
483 * write length register" */ 516 * write length register" */
@@ -544,18 +577,12 @@ static int if_cs_prog_real(struct if_cs_card *card)
544 577
545 ret = if_cs_poll_while_fw_download(card, IF_CS_C_SQ_READ_LOW, IF_CS_C_SQ_HELPER_OK); 578 ret = if_cs_poll_while_fw_download(card, IF_CS_C_SQ_READ_LOW, IF_CS_C_SQ_HELPER_OK);
546 if (ret < 0) { 579 if (ret < 0) {
547 int i;
548 lbs_pr_err("helper firmware doesn't answer\n"); 580 lbs_pr_err("helper firmware doesn't answer\n");
549 for (i = 0; i < 0x50; i += 2)
550 printk(KERN_INFO "## HS %02x: %04x\n",
551 i, if_cs_read16(card, i));
552 goto err_release; 581 goto err_release;
553 } 582 }
554 583
555 for (sent = 0; sent < fw->size; sent += len) { 584 for (sent = 0; sent < fw->size; sent += len) {
556 len = if_cs_read16(card, IF_CS_C_SQ_READ_LOW); 585 len = if_cs_read16(card, IF_CS_C_SQ_READ_LOW);
557 /* printk(KERN_INFO "//HS %d loading %d of %d bytes\n",
558 __LINE__, sent, fw->size); */
559 if (len & 1) { 586 if (len & 1) {
560 retry++; 587 retry++;
561 lbs_pr_info("odd, need to retry this firmware block\n"); 588 lbs_pr_info("odd, need to retry this firmware block\n");
@@ -642,64 +669,6 @@ static int if_cs_host_to_card(struct lbs_private *priv,
642} 669}
643 670
644 671
645static int if_cs_get_int_status(struct lbs_private *priv, u8 *ireg)
646{
647 struct if_cs_card *card = (struct if_cs_card *)priv->card;
648 int ret = 0;
649 u16 int_cause;
650 *ireg = 0;
651
652 lbs_deb_enter(LBS_DEB_CS);
653
654 if (priv->surpriseremoved)
655 goto out;
656
657 int_cause = if_cs_read16(card, IF_CS_C_INT_CAUSE) & IF_CS_C_IC_MASK;
658 if_cs_write16(card, IF_CS_C_INT_CAUSE, int_cause);
659
660 *ireg = if_cs_read16(card, IF_CS_C_STATUS) & IF_CS_C_S_MASK;
661
662 if (!*ireg)
663 goto sbi_get_int_status_exit;
664
665sbi_get_int_status_exit:
666
667 /* is there a data packet for us? */
668 if (*ireg & IF_CS_C_S_RX_UPLD_RDY) {
669 struct sk_buff *skb = if_cs_receive_data(priv);
670 lbs_process_rxed_packet(priv, skb);
671 *ireg &= ~IF_CS_C_S_RX_UPLD_RDY;
672 }
673
674 if (*ireg & IF_CS_C_S_TX_DNLD_RDY) {
675 priv->dnld_sent = DNLD_RES_RECEIVED;
676 }
677
678 /* Card has a command result for us */
679 if (*ireg & IF_CS_C_S_CMD_UPLD_RDY) {
680 ret = if_cs_receive_cmdres(priv, priv->upld_buf, &priv->upld_len);
681 if (ret < 0)
682 lbs_pr_err("could not receive cmd from card\n");
683 }
684
685out:
686 lbs_deb_leave_args(LBS_DEB_CS, "ret %d, ireg 0x%x, hisregcpy 0x%x", ret, *ireg, priv->hisregcpy);
687 return ret;
688}
689
690
691static int if_cs_read_event_cause(struct lbs_private *priv)
692{
693 lbs_deb_enter(LBS_DEB_CS);
694
695 priv->eventcause = (if_cs_read16(priv->card, IF_CS_C_STATUS) & IF_CS_C_S_STATUS_MASK) >> 5;
696 if_cs_write16(priv->card, IF_CS_H_INT_CAUSE, IF_CS_H_IC_HOST_EVENT);
697
698 return 0;
699}
700
701
702
703/********************************************************************/ 672/********************************************************************/
704/* Card Services */ 673/* Card Services */
705/********************************************************************/ 674/********************************************************************/
@@ -852,13 +821,10 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
852 goto out2; 821 goto out2;
853 } 822 }
854 823
855 /* Store pointers to our call-back functions */ 824 /* Finish setting up fields in lbs_private */
856 card->priv = priv; 825 card->priv = priv;
857 priv->card = card; 826 priv->card = card;
858 priv->hw_host_to_card = if_cs_host_to_card; 827 priv->hw_host_to_card = if_cs_host_to_card;
859 priv->hw_get_int_status = if_cs_get_int_status;
860 priv->hw_read_event_cause = if_cs_read_event_cause;
861
862 priv->fw_ready = 1; 828 priv->fw_ready = 1;
863 829
864 /* Now actually get the IRQ */ 830 /* Now actually get the IRQ */
@@ -880,6 +846,9 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
880 goto out3; 846 goto out3;
881 } 847 }
882 848
849 /* The firmware for the CF card supports powersave */
850 priv->ps_supported = 1;
851
883 ret = 0; 852 ret = 0;
884 goto out; 853 goto out;
885 854
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index eed73204bcc9..51f664bbee9d 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -91,8 +91,6 @@ struct if_sdio_card {
91 const char *firmware; 91 const char *firmware;
92 92
93 u8 buffer[65536]; 93 u8 buffer[65536];
94 u8 int_cause;
95 u32 event;
96 94
97 spinlock_t lock; 95 spinlock_t lock;
98 struct if_sdio_packet *packets; 96 struct if_sdio_packet *packets;
@@ -129,13 +127,13 @@ static u16 if_sdio_read_scratch(struct if_sdio_card *card, int *err)
129static int if_sdio_handle_cmd(struct if_sdio_card *card, 127static int if_sdio_handle_cmd(struct if_sdio_card *card,
130 u8 *buffer, unsigned size) 128 u8 *buffer, unsigned size)
131{ 129{
130 struct lbs_private *priv = card->priv;
132 int ret; 131 int ret;
133 unsigned long flags; 132 unsigned long flags;
133 u8 i;
134 134
135 lbs_deb_enter(LBS_DEB_SDIO); 135 lbs_deb_enter(LBS_DEB_SDIO);
136 136
137 spin_lock_irqsave(&card->priv->driver_lock, flags);
138
139 if (size > LBS_CMD_BUFFER_SIZE) { 137 if (size > LBS_CMD_BUFFER_SIZE) {
140 lbs_deb_sdio("response packet too large (%d bytes)\n", 138 lbs_deb_sdio("response packet too large (%d bytes)\n",
141 (int)size); 139 (int)size);
@@ -143,20 +141,20 @@ static int if_sdio_handle_cmd(struct if_sdio_card *card,
143 goto out; 141 goto out;
144 } 142 }
145 143
146 memcpy(card->priv->upld_buf, buffer, size); 144 spin_lock_irqsave(&priv->driver_lock, flags);
147 card->priv->upld_len = size;
148 145
149 card->int_cause |= MRVDRV_CMD_UPLD_RDY; 146 i = (priv->resp_idx == 0) ? 1 : 0;
147 BUG_ON(priv->resp_len[i]);
148 priv->resp_len[i] = size;
149 memcpy(priv->resp_buf[i], buffer, size);
150 lbs_notify_command_response(priv, i);
150 151
151 lbs_interrupt(card->priv); 152 spin_unlock_irqrestore(&card->priv->driver_lock, flags);
152 153
153 ret = 0; 154 ret = 0;
154 155
155out: 156out:
156 spin_unlock_irqrestore(&card->priv->driver_lock, flags);
157
158 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret); 157 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
159
160 return ret; 158 return ret;
161} 159}
162 160
@@ -202,7 +200,6 @@ static int if_sdio_handle_event(struct if_sdio_card *card,
202 u8 *buffer, unsigned size) 200 u8 *buffer, unsigned size)
203{ 201{
204 int ret; 202 int ret;
205 unsigned long flags;
206 u32 event; 203 u32 event;
207 204
208 lbs_deb_enter(LBS_DEB_SDIO); 205 lbs_deb_enter(LBS_DEB_SDIO);
@@ -222,18 +219,9 @@ static int if_sdio_handle_event(struct if_sdio_card *card,
222 event |= buffer[2] << 16; 219 event |= buffer[2] << 16;
223 event |= buffer[1] << 8; 220 event |= buffer[1] << 8;
224 event |= buffer[0] << 0; 221 event |= buffer[0] << 0;
225 event <<= SBI_EVENT_CAUSE_SHIFT;
226 } 222 }
227 223
228 spin_lock_irqsave(&card->priv->driver_lock, flags); 224 lbs_queue_event(card->priv, event & 0xFF);
229
230 card->event = event;
231 card->int_cause |= MRVDRV_CARDEVENT;
232
233 lbs_interrupt(card->priv);
234
235 spin_unlock_irqrestore(&card->priv->driver_lock, flags);
236
237 ret = 0; 225 ret = 0;
238 226
239out: 227out:
@@ -770,37 +758,6 @@ out:
770 return ret; 758 return ret;
771} 759}
772 760
773static int if_sdio_get_int_status(struct lbs_private *priv, u8 *ireg)
774{
775 struct if_sdio_card *card;
776
777 lbs_deb_enter(LBS_DEB_SDIO);
778
779 card = priv->card;
780
781 *ireg = card->int_cause;
782 card->int_cause = 0;
783
784 lbs_deb_leave(LBS_DEB_SDIO);
785
786 return 0;
787}
788
789static int if_sdio_read_event_cause(struct lbs_private *priv)
790{
791 struct if_sdio_card *card;
792
793 lbs_deb_enter(LBS_DEB_SDIO);
794
795 card = priv->card;
796
797 priv->eventcause = card->event;
798
799 lbs_deb_leave(LBS_DEB_SDIO);
800
801 return 0;
802}
803
804/*******************************************************************/ 761/*******************************************************************/
805/* SDIO callbacks */ 762/* SDIO callbacks */
806/*******************************************************************/ 763/*******************************************************************/
@@ -953,8 +910,6 @@ static int if_sdio_probe(struct sdio_func *func,
953 910
954 priv->card = card; 911 priv->card = card;
955 priv->hw_host_to_card = if_sdio_host_to_card; 912 priv->hw_host_to_card = if_sdio_host_to_card;
956 priv->hw_get_int_status = if_sdio_get_int_status;
957 priv->hw_read_event_cause = if_sdio_read_event_cause;
958 913
959 priv->fw_ready = 1; 914 priv->fw_ready = 1;
960 915
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 75aed9d07367..8032df72aaab 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -38,8 +38,6 @@ static void if_usb_receive_fwload(struct urb *urb);
38static int if_usb_prog_firmware(struct if_usb_card *cardp); 38static int if_usb_prog_firmware(struct if_usb_card *cardp);
39static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type, 39static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type,
40 uint8_t *payload, uint16_t nb); 40 uint8_t *payload, uint16_t nb);
41static int if_usb_get_int_status(struct lbs_private *priv, uint8_t *);
42static int if_usb_read_event_cause(struct lbs_private *);
43static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, 41static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
44 uint16_t nb); 42 uint16_t nb);
45static void if_usb_free(struct if_usb_card *cardp); 43static void if_usb_free(struct if_usb_card *cardp);
@@ -233,8 +231,6 @@ static int if_usb_probe(struct usb_interface *intf,
233 cardp->priv->fw_ready = 1; 231 cardp->priv->fw_ready = 1;
234 232
235 priv->hw_host_to_card = if_usb_host_to_card; 233 priv->hw_host_to_card = if_usb_host_to_card;
236 priv->hw_get_int_status = if_usb_get_int_status;
237 priv->hw_read_event_cause = if_usb_read_event_cause;
238 cardp->boot2_version = udev->descriptor.bcdDevice; 234 cardp->boot2_version = udev->descriptor.bcdDevice;
239 235
240 if_usb_submit_rx_urb(cardp); 236 if_usb_submit_rx_urb(cardp);
@@ -582,7 +578,6 @@ static inline void process_cmdtypedata(int recvlength, struct sk_buff *skb,
582 skb_pull(skb, MESSAGE_HEADER_LEN); 578 skb_pull(skb, MESSAGE_HEADER_LEN);
583 579
584 lbs_process_rxed_packet(priv, skb); 580 lbs_process_rxed_packet(priv, skb);
585 priv->upld_len = (recvlength - MESSAGE_HEADER_LEN);
586} 581}
587 582
588static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff, 583static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
@@ -590,6 +585,8 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
590 struct if_usb_card *cardp, 585 struct if_usb_card *cardp,
591 struct lbs_private *priv) 586 struct lbs_private *priv)
592{ 587{
588 u8 i;
589
593 if (recvlength > LBS_CMD_BUFFER_SIZE) { 590 if (recvlength > LBS_CMD_BUFFER_SIZE) {
594 lbs_deb_usbd(&cardp->udev->dev, 591 lbs_deb_usbd(&cardp->udev->dev,
595 "The receive buffer is too large\n"); 592 "The receive buffer is too large\n");
@@ -601,12 +598,15 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
601 BUG(); 598 BUG();
602 599
603 spin_lock(&priv->driver_lock); 600 spin_lock(&priv->driver_lock);
604 cardp->usb_int_cause |= MRVDRV_CMD_UPLD_RDY;
605 priv->upld_len = (recvlength - MESSAGE_HEADER_LEN);
606 memcpy(priv->upld_buf, recvbuff + MESSAGE_HEADER_LEN, priv->upld_len);
607 601
602 i = (priv->resp_idx == 0) ? 1 : 0;
603 BUG_ON(priv->resp_len[i]);
604 priv->resp_len[i] = (recvlength - MESSAGE_HEADER_LEN);
605 memcpy(priv->resp_buf[i], recvbuff + MESSAGE_HEADER_LEN,
606 priv->resp_len[i]);
608 kfree_skb(skb); 607 kfree_skb(skb);
609 lbs_interrupt(priv); 608 lbs_notify_command_response(priv, i);
609
610 spin_unlock(&priv->driver_lock); 610 spin_unlock(&priv->driver_lock);
611 611
612 lbs_deb_usbd(&cardp->udev->dev, 612 lbs_deb_usbd(&cardp->udev->dev,
@@ -629,6 +629,7 @@ static void if_usb_receive(struct urb *urb)
629 uint8_t *recvbuff = NULL; 629 uint8_t *recvbuff = NULL;
630 uint32_t recvtype = 0; 630 uint32_t recvtype = 0;
631 __le32 *pkt = (__le32 *)(skb->data + IPFIELD_ALIGN_OFFSET); 631 __le32 *pkt = (__le32 *)(skb->data + IPFIELD_ALIGN_OFFSET);
632 uint32_t event;
632 633
633 lbs_deb_enter(LBS_DEB_USB); 634 lbs_deb_enter(LBS_DEB_USB);
634 635
@@ -660,26 +661,20 @@ static void if_usb_receive(struct urb *urb)
660 break; 661 break;
661 662
662 case CMD_TYPE_INDICATION: 663 case CMD_TYPE_INDICATION:
663 /* Event cause handling */ 664 /* Event handling */
664 spin_lock(&priv->driver_lock); 665 event = le32_to_cpu(pkt[1]);
666 lbs_deb_usbd(&cardp->udev->dev, "**EVENT** 0x%X\n", event);
667 kfree_skb(skb);
665 668
666 cardp->usb_event_cause = le32_to_cpu(pkt[1]); 669 /* Icky undocumented magic special case */
670 if (event & 0xffff0000) {
671 u32 trycount = (event & 0xffff0000) >> 16;
667 672
668 lbs_deb_usbd(&cardp->udev->dev,"**EVENT** 0x%X\n", 673 lbs_send_tx_feedback(priv, trycount);
669 cardp->usb_event_cause); 674 } else
675 lbs_queue_event(priv, event & 0xFF);
676 break;
670 677
671 /* Icky undocumented magic special case */
672 if (cardp->usb_event_cause & 0xffff0000) {
673 lbs_send_tx_feedback(priv);
674 spin_unlock(&priv->driver_lock);
675 break;
676 }
677 cardp->usb_event_cause <<= 3;
678 cardp->usb_int_cause |= MRVDRV_CARDEVENT;
679 kfree_skb(skb);
680 lbs_interrupt(priv);
681 spin_unlock(&priv->driver_lock);
682 goto rx_exit;
683 default: 678 default:
684 lbs_deb_usbd(&cardp->udev->dev, "Unknown command type 0x%X\n", 679 lbs_deb_usbd(&cardp->udev->dev, "Unknown command type 0x%X\n",
685 recvtype); 680 recvtype);
@@ -722,30 +717,6 @@ static int if_usb_host_to_card(struct lbs_private *priv, uint8_t type,
722 return usb_tx_block(cardp, cardp->ep_out_buf, nb + MESSAGE_HEADER_LEN); 717 return usb_tx_block(cardp, cardp->ep_out_buf, nb + MESSAGE_HEADER_LEN);
723} 718}
724 719
725/* called with priv->driver_lock held */
726static int if_usb_get_int_status(struct lbs_private *priv, uint8_t *ireg)
727{
728 struct if_usb_card *cardp = priv->card;
729
730 *ireg = cardp->usb_int_cause;
731 cardp->usb_int_cause = 0;
732
733 lbs_deb_usbd(&cardp->udev->dev, "Int cause is 0x%X\n", *ireg);
734
735 return 0;
736}
737
738static int if_usb_read_event_cause(struct lbs_private *priv)
739{
740 struct if_usb_card *cardp = priv->card;
741
742 priv->eventcause = cardp->usb_event_cause;
743 /* Re-submit rx urb here to avoid event lost issue */
744 if_usb_submit_rx_urb(cardp);
745
746 return 0;
747}
748
749/** 720/**
750 * @brief This function issues Boot command to the Boot2 code 721 * @brief This function issues Boot command to the Boot2 code
751 * @param ivalue 1:Boot from FW by USB-Download 722 * @param ivalue 1:Boot from FW by USB-Download
diff --git a/drivers/net/wireless/libertas/if_usb.h b/drivers/net/wireless/libertas/if_usb.h
index e4829a391eb9..5771a83a43f0 100644
--- a/drivers/net/wireless/libertas/if_usb.h
+++ b/drivers/net/wireless/libertas/if_usb.h
@@ -46,8 +46,6 @@ struct if_usb_card {
46 struct lbs_private *priv; 46 struct lbs_private *priv;
47 47
48 struct sk_buff *rx_skb; 48 struct sk_buff *rx_skb;
49 uint32_t usb_event_cause;
50 uint8_t usb_int_cause;
51 49
52 uint8_t ep_in; 50 uint8_t ep_in;
53 uint8_t ep_out; 51 uint8_t ep_out;
diff --git a/drivers/net/wireless/libertas/join.c b/drivers/net/wireless/libertas/join.c
deleted file mode 100644
index 2d4508048b68..000000000000
--- a/drivers/net/wireless/libertas/join.c
+++ /dev/null
@@ -1,895 +0,0 @@
1/**
2 * Functions implementing wlan infrastructure and adhoc join routines,
3 * IOCTL handlers as well as command preperation and response routines
4 * for sending adhoc start, adhoc join, and association commands
5 * to the firmware.
6 */
7#include <linux/netdevice.h>
8#include <linux/if_arp.h>
9#include <linux/wireless.h>
10#include <linux/etherdevice.h>
11
12#include <net/iw_handler.h>
13
14#include "host.h"
15#include "decl.h"
16#include "join.h"
17#include "dev.h"
18#include "assoc.h"
19
20/* The firmware needs certain bits masked out of the beacon-derviced capability
21 * field when associating/joining to BSSs.
22 */
23#define CAPINFO_MASK (~(0xda00))
24
25/**
26 * @brief This function finds common rates between rate1 and card rates.
27 *
28 * It will fill common rates in rate1 as output if found.
29 *
30 * NOTE: Setting the MSB of the basic rates need to be taken
31 * care, either before or after calling this function
32 *
33 * @param priv A pointer to struct lbs_private structure
34 * @param rate1 the buffer which keeps input and output
35 * @param rate1_size the size of rate1 buffer; new size of buffer on return
36 *
37 * @return 0 or -1
38 */
39static int get_common_rates(struct lbs_private *priv,
40 u8 *rates,
41 u16 *rates_size)
42{
43 u8 *card_rates = lbs_bg_rates;
44 size_t num_card_rates = sizeof(lbs_bg_rates);
45 int ret = 0, i, j;
46 u8 tmp[30];
47 size_t tmp_size = 0;
48
49 /* For each rate in card_rates that exists in rate1, copy to tmp */
50 for (i = 0; card_rates[i] && (i < num_card_rates); i++) {
51 for (j = 0; rates[j] && (j < *rates_size); j++) {
52 if (rates[j] == card_rates[i])
53 tmp[tmp_size++] = card_rates[i];
54 }
55 }
56
57 lbs_deb_hex(LBS_DEB_JOIN, "AP rates ", rates, *rates_size);
58 lbs_deb_hex(LBS_DEB_JOIN, "card rates ", card_rates, num_card_rates);
59 lbs_deb_hex(LBS_DEB_JOIN, "common rates", tmp, tmp_size);
60 lbs_deb_join("TX data rate 0x%02x\n", priv->cur_rate);
61
62 if (!priv->auto_rate) {
63 for (i = 0; i < tmp_size; i++) {
64 if (tmp[i] == priv->cur_rate)
65 goto done;
66 }
67 lbs_pr_alert("Previously set fixed data rate %#x isn't "
68 "compatible with the network.\n", priv->cur_rate);
69 ret = -1;
70 goto done;
71 }
72 ret = 0;
73
74done:
75 memset(rates, 0, *rates_size);
76 *rates_size = min_t(int, tmp_size, *rates_size);
77 memcpy(rates, tmp, *rates_size);
78 return ret;
79}
80
81
82/**
83 * @brief Sets the MSB on basic rates as the firmware requires
84 *
85 * Scan through an array and set the MSB for basic data rates.
86 *
87 * @param rates buffer of data rates
88 * @param len size of buffer
89 */
90static void lbs_set_basic_rate_flags(u8 *rates, size_t len)
91{
92 int i;
93
94 for (i = 0; i < len; i++) {
95 if (rates[i] == 0x02 || rates[i] == 0x04 ||
96 rates[i] == 0x0b || rates[i] == 0x16)
97 rates[i] |= 0x80;
98 }
99}
100
101/**
102 * @brief Unsets the MSB on basic rates
103 *
104 * Scan through an array and unset the MSB for basic data rates.
105 *
106 * @param rates buffer of data rates
107 * @param len size of buffer
108 */
109void lbs_unset_basic_rate_flags(u8 *rates, size_t len)
110{
111 int i;
112
113 for (i = 0; i < len; i++)
114 rates[i] &= 0x7f;
115}
116
117
118/**
119 * @brief Associate to a specific BSS discovered in a scan
120 *
121 * @param priv A pointer to struct lbs_private structure
122 * @param pbssdesc Pointer to the BSS descriptor to associate with.
123 *
124 * @return 0-success, otherwise fail
125 */
126int lbs_associate(struct lbs_private *priv, struct assoc_request *assoc_req)
127{
128 int ret;
129
130 lbs_deb_enter(LBS_DEB_ASSOC);
131
132 ret = lbs_prepare_and_send_command(priv, CMD_802_11_AUTHENTICATE,
133 0, CMD_OPTION_WAITFORRSP,
134 0, assoc_req->bss.bssid);
135
136 if (ret)
137 goto done;
138
139 /* set preamble to firmware */
140 if ( (priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
141 && (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
142 priv->preamble = CMD_TYPE_SHORT_PREAMBLE;
143 else
144 priv->preamble = CMD_TYPE_LONG_PREAMBLE;
145
146 lbs_set_radio_control(priv);
147
148 ret = lbs_prepare_and_send_command(priv, CMD_802_11_ASSOCIATE,
149 0, CMD_OPTION_WAITFORRSP, 0, assoc_req);
150
151done:
152 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
153 return ret;
154}
155
156/**
157 * @brief Start an Adhoc Network
158 *
159 * @param priv A pointer to struct lbs_private structure
160 * @param adhocssid The ssid of the Adhoc Network
161 * @return 0--success, -1--fail
162 */
163int lbs_start_adhoc_network(struct lbs_private *priv,
164 struct assoc_request *assoc_req)
165{
166 int ret = 0;
167
168 priv->adhoccreate = 1;
169
170 if (priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
171 lbs_deb_join("AdhocStart: Short preamble\n");
172 priv->preamble = CMD_TYPE_SHORT_PREAMBLE;
173 } else {
174 lbs_deb_join("AdhocStart: Long preamble\n");
175 priv->preamble = CMD_TYPE_LONG_PREAMBLE;
176 }
177
178 lbs_set_radio_control(priv);
179
180 lbs_deb_join("AdhocStart: channel = %d\n", assoc_req->channel);
181 lbs_deb_join("AdhocStart: band = %d\n", assoc_req->band);
182
183 ret = lbs_prepare_and_send_command(priv, CMD_802_11_AD_HOC_START,
184 0, CMD_OPTION_WAITFORRSP, 0, assoc_req);
185
186 return ret;
187}
188
189/**
190 * @brief Join an adhoc network found in a previous scan
191 *
192 * @param priv A pointer to struct lbs_private structure
193 * @param pbssdesc Pointer to a BSS descriptor found in a previous scan
194 * to attempt to join
195 *
196 * @return 0--success, -1--fail
197 */
198int lbs_join_adhoc_network(struct lbs_private *priv,
199 struct assoc_request *assoc_req)
200{
201 struct bss_descriptor * bss = &assoc_req->bss;
202 int ret = 0;
203
204 lbs_deb_join("%s: Current SSID '%s', ssid length %u\n",
205 __func__,
206 escape_essid(priv->curbssparams.ssid,
207 priv->curbssparams.ssid_len),
208 priv->curbssparams.ssid_len);
209 lbs_deb_join("%s: requested ssid '%s', ssid length %u\n",
210 __func__, escape_essid(bss->ssid, bss->ssid_len),
211 bss->ssid_len);
212
213 /* check if the requested SSID is already joined */
214 if ( priv->curbssparams.ssid_len
215 && !lbs_ssid_cmp(priv->curbssparams.ssid,
216 priv->curbssparams.ssid_len,
217 bss->ssid, bss->ssid_len)
218 && (priv->mode == IW_MODE_ADHOC)
219 && (priv->connect_status == LBS_CONNECTED)) {
220 union iwreq_data wrqu;
221
222 lbs_deb_join("ADHOC_J_CMD: New ad-hoc SSID is the same as "
223 "current, not attempting to re-join");
224
225 /* Send the re-association event though, because the association
226 * request really was successful, even if just a null-op.
227 */
228 memset(&wrqu, 0, sizeof(wrqu));
229 memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid,
230 ETH_ALEN);
231 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
232 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
233 goto out;
234 }
235
236 /* Use shortpreamble only when both creator and card supports
237 short preamble */
238 if ( !(bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
239 || !(priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) {
240 lbs_deb_join("AdhocJoin: Long preamble\n");
241 priv->preamble = CMD_TYPE_LONG_PREAMBLE;
242 } else {
243 lbs_deb_join("AdhocJoin: Short preamble\n");
244 priv->preamble = CMD_TYPE_SHORT_PREAMBLE;
245 }
246
247 lbs_set_radio_control(priv);
248
249 lbs_deb_join("AdhocJoin: channel = %d\n", assoc_req->channel);
250 lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band);
251
252 priv->adhoccreate = 0;
253
254 ret = lbs_prepare_and_send_command(priv, CMD_802_11_AD_HOC_JOIN,
255 0, CMD_OPTION_WAITFORRSP,
256 OID_802_11_SSID, assoc_req);
257
258out:
259 return ret;
260}
261
262int lbs_stop_adhoc_network(struct lbs_private *priv)
263{
264 return lbs_prepare_and_send_command(priv, CMD_802_11_AD_HOC_STOP,
265 0, CMD_OPTION_WAITFORRSP, 0, NULL);
266}
267
268/**
269 * @brief Send Deauthentication Request
270 *
271 * @param priv A pointer to struct lbs_private structure
272 * @return 0--success, -1--fail
273 */
274int lbs_send_deauthentication(struct lbs_private *priv)
275{
276 return lbs_prepare_and_send_command(priv, CMD_802_11_DEAUTHENTICATE,
277 0, CMD_OPTION_WAITFORRSP, 0, NULL);
278}
279
280/**
281 * @brief This function prepares command of authenticate.
282 *
283 * @param priv A pointer to struct lbs_private structure
284 * @param cmd A pointer to cmd_ds_command structure
285 * @param pdata_buf Void cast of pointer to a BSSID to authenticate with
286 *
287 * @return 0 or -1
288 */
289int lbs_cmd_80211_authenticate(struct lbs_private *priv,
290 struct cmd_ds_command *cmd,
291 void *pdata_buf)
292{
293 struct cmd_ds_802_11_authenticate *pauthenticate = &cmd->params.auth;
294 int ret = -1;
295 u8 *bssid = pdata_buf;
296 DECLARE_MAC_BUF(mac);
297
298 lbs_deb_enter(LBS_DEB_JOIN);
299
300 cmd->command = cpu_to_le16(CMD_802_11_AUTHENTICATE);
301 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_authenticate)
302 + S_DS_GEN);
303
304 /* translate auth mode to 802.11 defined wire value */
305 switch (priv->secinfo.auth_mode) {
306 case IW_AUTH_ALG_OPEN_SYSTEM:
307 pauthenticate->authtype = 0x00;
308 break;
309 case IW_AUTH_ALG_SHARED_KEY:
310 pauthenticate->authtype = 0x01;
311 break;
312 case IW_AUTH_ALG_LEAP:
313 pauthenticate->authtype = 0x80;
314 break;
315 default:
316 lbs_deb_join("AUTH_CMD: invalid auth alg 0x%X\n",
317 priv->secinfo.auth_mode);
318 goto out;
319 }
320
321 memcpy(pauthenticate->macaddr, bssid, ETH_ALEN);
322
323 lbs_deb_join("AUTH_CMD: BSSID %s, auth 0x%x\n",
324 print_mac(mac, bssid), pauthenticate->authtype);
325 ret = 0;
326
327out:
328 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
329 return ret;
330}
331
332int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
333 struct cmd_ds_command *cmd)
334{
335 struct cmd_ds_802_11_deauthenticate *dauth = &cmd->params.deauth;
336
337 lbs_deb_enter(LBS_DEB_JOIN);
338
339 cmd->command = cpu_to_le16(CMD_802_11_DEAUTHENTICATE);
340 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_deauthenticate) +
341 S_DS_GEN);
342
343 /* set AP MAC address */
344 memmove(dauth->macaddr, priv->curbssparams.bssid, ETH_ALEN);
345
346 /* Reason code 3 = Station is leaving */
347#define REASON_CODE_STA_LEAVING 3
348 dauth->reasoncode = cpu_to_le16(REASON_CODE_STA_LEAVING);
349
350 lbs_deb_leave(LBS_DEB_JOIN);
351 return 0;
352}
353
354int lbs_cmd_80211_associate(struct lbs_private *priv,
355 struct cmd_ds_command *cmd, void *pdata_buf)
356{
357 struct cmd_ds_802_11_associate *passo = &cmd->params.associate;
358 int ret = 0;
359 struct assoc_request * assoc_req = pdata_buf;
360 struct bss_descriptor * bss = &assoc_req->bss;
361 u8 *pos;
362 u16 tmpcap, tmplen;
363 struct mrvlietypes_ssidparamset *ssid;
364 struct mrvlietypes_phyparamset *phy;
365 struct mrvlietypes_ssparamset *ss;
366 struct mrvlietypes_ratesparamset *rates;
367 struct mrvlietypes_rsnparamset *rsn;
368
369 lbs_deb_enter(LBS_DEB_ASSOC);
370
371 pos = (u8 *) passo;
372
373 if (!priv) {
374 ret = -1;
375 goto done;
376 }
377
378 cmd->command = cpu_to_le16(CMD_802_11_ASSOCIATE);
379
380 memcpy(passo->peerstaaddr, bss->bssid, sizeof(passo->peerstaaddr));
381 pos += sizeof(passo->peerstaaddr);
382
383 /* set the listen interval */
384 passo->listeninterval = cpu_to_le16(MRVDRV_DEFAULT_LISTEN_INTERVAL);
385
386 pos += sizeof(passo->capability);
387 pos += sizeof(passo->listeninterval);
388 pos += sizeof(passo->bcnperiod);
389 pos += sizeof(passo->dtimperiod);
390
391 ssid = (struct mrvlietypes_ssidparamset *) pos;
392 ssid->header.type = cpu_to_le16(TLV_TYPE_SSID);
393 tmplen = bss->ssid_len;
394 ssid->header.len = cpu_to_le16(tmplen);
395 memcpy(ssid->ssid, bss->ssid, tmplen);
396 pos += sizeof(ssid->header) + tmplen;
397
398 phy = (struct mrvlietypes_phyparamset *) pos;
399 phy->header.type = cpu_to_le16(TLV_TYPE_PHY_DS);
400 tmplen = sizeof(phy->fh_ds.dsparamset);
401 phy->header.len = cpu_to_le16(tmplen);
402 memcpy(&phy->fh_ds.dsparamset,
403 &bss->phyparamset.dsparamset.currentchan,
404 tmplen);
405 pos += sizeof(phy->header) + tmplen;
406
407 ss = (struct mrvlietypes_ssparamset *) pos;
408 ss->header.type = cpu_to_le16(TLV_TYPE_CF);
409 tmplen = sizeof(ss->cf_ibss.cfparamset);
410 ss->header.len = cpu_to_le16(tmplen);
411 pos += sizeof(ss->header) + tmplen;
412
413 rates = (struct mrvlietypes_ratesparamset *) pos;
414 rates->header.type = cpu_to_le16(TLV_TYPE_RATES);
415 memcpy(&rates->rates, &bss->rates, MAX_RATES);
416 tmplen = MAX_RATES;
417 if (get_common_rates(priv, rates->rates, &tmplen)) {
418 ret = -1;
419 goto done;
420 }
421 pos += sizeof(rates->header) + tmplen;
422 rates->header.len = cpu_to_le16(tmplen);
423 lbs_deb_assoc("ASSOC_CMD: num rates %u\n", tmplen);
424
425 /* Copy the infra. association rates into Current BSS state structure */
426 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
427 memcpy(&priv->curbssparams.rates, &rates->rates, tmplen);
428
429 /* Set MSB on basic rates as the firmware requires, but _after_
430 * copying to current bss rates.
431 */
432 lbs_set_basic_rate_flags(rates->rates, tmplen);
433
434 if (assoc_req->secinfo.WPAenabled || assoc_req->secinfo.WPA2enabled) {
435 rsn = (struct mrvlietypes_rsnparamset *) pos;
436 /* WPA_IE or WPA2_IE */
437 rsn->header.type = cpu_to_le16((u16) assoc_req->wpa_ie[0]);
438 tmplen = (u16) assoc_req->wpa_ie[1];
439 rsn->header.len = cpu_to_le16(tmplen);
440 memcpy(rsn->rsnie, &assoc_req->wpa_ie[2], tmplen);
441 lbs_deb_hex(LBS_DEB_JOIN, "ASSOC_CMD: RSN IE", (u8 *) rsn,
442 sizeof(rsn->header) + tmplen);
443 pos += sizeof(rsn->header) + tmplen;
444 }
445
446 /* update curbssparams */
447 priv->curbssparams.channel = bss->phyparamset.dsparamset.currentchan;
448
449 if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
450 ret = -1;
451 goto done;
452 }
453
454 cmd->size = cpu_to_le16((u16) (pos - (u8 *) passo) + S_DS_GEN);
455
456 /* set the capability info */
457 tmpcap = (bss->capability & CAPINFO_MASK);
458 if (bss->mode == IW_MODE_INFRA)
459 tmpcap |= WLAN_CAPABILITY_ESS;
460 passo->capability = cpu_to_le16(tmpcap);
461 lbs_deb_assoc("ASSOC_CMD: capability 0x%04x\n", tmpcap);
462
463done:
464 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
465 return ret;
466}
467
468int lbs_cmd_80211_ad_hoc_start(struct lbs_private *priv,
469 struct cmd_ds_command *cmd, void *pdata_buf)
470{
471 struct cmd_ds_802_11_ad_hoc_start *adhs = &cmd->params.ads;
472 int ret = 0;
473 int cmdappendsize = 0;
474 struct assoc_request * assoc_req = pdata_buf;
475 u16 tmpcap = 0;
476 size_t ratesize = 0;
477
478 lbs_deb_enter(LBS_DEB_JOIN);
479
480 if (!priv) {
481 ret = -1;
482 goto done;
483 }
484
485 cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_START);
486
487 /*
488 * Fill in the parameters for 2 data structures:
489 * 1. cmd_ds_802_11_ad_hoc_start command
490 * 2. priv->scantable[i]
491 *
492 * Driver will fill up SSID, bsstype,IBSS param, Physical Param,
493 * probe delay, and cap info.
494 *
495 * Firmware will fill up beacon period, DTIM, Basic rates
496 * and operational rates.
497 */
498
499 memset(adhs->ssid, 0, IW_ESSID_MAX_SIZE);
500 memcpy(adhs->ssid, assoc_req->ssid, assoc_req->ssid_len);
501
502 lbs_deb_join("ADHOC_S_CMD: SSID '%s', ssid length %u\n",
503 escape_essid(assoc_req->ssid, assoc_req->ssid_len),
504 assoc_req->ssid_len);
505
506 /* set the BSS type */
507 adhs->bsstype = CMD_BSS_TYPE_IBSS;
508 priv->mode = IW_MODE_ADHOC;
509 if (priv->beacon_period == 0)
510 priv->beacon_period = MRVDRV_BEACON_INTERVAL;
511 adhs->beaconperiod = cpu_to_le16(priv->beacon_period);
512
513 /* set Physical param set */
514#define DS_PARA_IE_ID 3
515#define DS_PARA_IE_LEN 1
516
517 adhs->phyparamset.dsparamset.elementid = DS_PARA_IE_ID;
518 adhs->phyparamset.dsparamset.len = DS_PARA_IE_LEN;
519
520 WARN_ON(!assoc_req->channel);
521
522 lbs_deb_join("ADHOC_S_CMD: Creating ADHOC on channel %d\n",
523 assoc_req->channel);
524
525 adhs->phyparamset.dsparamset.currentchan = assoc_req->channel;
526
527 /* set IBSS param set */
528#define IBSS_PARA_IE_ID 6
529#define IBSS_PARA_IE_LEN 2
530
531 adhs->ssparamset.ibssparamset.elementid = IBSS_PARA_IE_ID;
532 adhs->ssparamset.ibssparamset.len = IBSS_PARA_IE_LEN;
533 adhs->ssparamset.ibssparamset.atimwindow = 0;
534
535 /* set capability info */
536 tmpcap = WLAN_CAPABILITY_IBSS;
537 if (assoc_req->secinfo.wep_enabled) {
538 lbs_deb_join("ADHOC_S_CMD: WEP enabled, setting privacy on\n");
539 tmpcap |= WLAN_CAPABILITY_PRIVACY;
540 } else {
541 lbs_deb_join("ADHOC_S_CMD: WEP disabled, setting privacy off\n");
542 }
543 adhs->capability = cpu_to_le16(tmpcap);
544
545 /* probedelay */
546 adhs->probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
547
548 memset(adhs->rates, 0, sizeof(adhs->rates));
549 ratesize = min(sizeof(adhs->rates), sizeof(lbs_bg_rates));
550 memcpy(adhs->rates, lbs_bg_rates, ratesize);
551
552 /* Copy the ad-hoc creating rates into Current BSS state structure */
553 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
554 memcpy(&priv->curbssparams.rates, &adhs->rates, ratesize);
555
556 /* Set MSB on basic rates as the firmware requires, but _after_
557 * copying to current bss rates.
558 */
559 lbs_set_basic_rate_flags(adhs->rates, ratesize);
560
561 lbs_deb_join("ADHOC_S_CMD: rates=%02x %02x %02x %02x \n",
562 adhs->rates[0], adhs->rates[1], adhs->rates[2], adhs->rates[3]);
563
564 lbs_deb_join("ADHOC_S_CMD: AD HOC Start command is ready\n");
565
566 if (lbs_create_dnld_countryinfo_11d(priv)) {
567 lbs_deb_join("ADHOC_S_CMD: dnld_countryinfo_11d failed\n");
568 ret = -1;
569 goto done;
570 }
571
572 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ad_hoc_start) +
573 S_DS_GEN + cmdappendsize);
574
575 ret = 0;
576done:
577 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
578 return ret;
579}
580
581int lbs_cmd_80211_ad_hoc_stop(struct lbs_private *priv,
582 struct cmd_ds_command *cmd)
583{
584 cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_STOP);
585 cmd->size = cpu_to_le16(S_DS_GEN);
586
587 return 0;
588}
589
590int lbs_cmd_80211_ad_hoc_join(struct lbs_private *priv,
591 struct cmd_ds_command *cmd, void *pdata_buf)
592{
593 struct cmd_ds_802_11_ad_hoc_join *join_cmd = &cmd->params.adj;
594 struct assoc_request * assoc_req = pdata_buf;
595 struct bss_descriptor *bss = &assoc_req->bss;
596 int cmdappendsize = 0;
597 int ret = 0;
598 u16 ratesize = 0;
599 DECLARE_MAC_BUF(mac);
600
601 lbs_deb_enter(LBS_DEB_JOIN);
602
603 cmd->command = cpu_to_le16(CMD_802_11_AD_HOC_JOIN);
604
605 join_cmd->bss.type = CMD_BSS_TYPE_IBSS;
606 join_cmd->bss.beaconperiod = cpu_to_le16(bss->beaconperiod);
607
608 memcpy(&join_cmd->bss.bssid, &bss->bssid, ETH_ALEN);
609 memcpy(&join_cmd->bss.ssid, &bss->ssid, bss->ssid_len);
610
611 memcpy(&join_cmd->bss.phyparamset, &bss->phyparamset,
612 sizeof(union ieeetypes_phyparamset));
613
614 memcpy(&join_cmd->bss.ssparamset, &bss->ssparamset,
615 sizeof(union IEEEtypes_ssparamset));
616
617 join_cmd->bss.capability = cpu_to_le16(bss->capability & CAPINFO_MASK);
618 lbs_deb_join("ADHOC_J_CMD: tmpcap=%4X CAPINFO_MASK=%4X\n",
619 bss->capability, CAPINFO_MASK);
620
621 /* information on BSSID descriptor passed to FW */
622 lbs_deb_join(
623 "ADHOC_J_CMD: BSSID = %s, SSID = '%s'\n",
624 print_mac(mac, join_cmd->bss.bssid),
625 join_cmd->bss.ssid);
626
627 /* failtimeout */
628 join_cmd->failtimeout = cpu_to_le16(MRVDRV_ASSOCIATION_TIME_OUT);
629
630 /* probedelay */
631 join_cmd->probedelay = cpu_to_le16(CMD_SCAN_PROBE_DELAY_TIME);
632
633 priv->curbssparams.channel = bss->channel;
634
635 /* Copy Data rates from the rates recorded in scan response */
636 memset(join_cmd->bss.rates, 0, sizeof(join_cmd->bss.rates));
637 ratesize = min_t(u16, sizeof(join_cmd->bss.rates), MAX_RATES);
638 memcpy(join_cmd->bss.rates, bss->rates, ratesize);
639 if (get_common_rates(priv, join_cmd->bss.rates, &ratesize)) {
640 lbs_deb_join("ADHOC_J_CMD: get_common_rates returns error.\n");
641 ret = -1;
642 goto done;
643 }
644
645 /* Copy the ad-hoc creating rates into Current BSS state structure */
646 memset(&priv->curbssparams.rates, 0, sizeof(priv->curbssparams.rates));
647 memcpy(&priv->curbssparams.rates, join_cmd->bss.rates, ratesize);
648
649 /* Set MSB on basic rates as the firmware requires, but _after_
650 * copying to current bss rates.
651 */
652 lbs_set_basic_rate_flags(join_cmd->bss.rates, ratesize);
653
654 join_cmd->bss.ssparamset.ibssparamset.atimwindow =
655 cpu_to_le16(bss->atimwindow);
656
657 if (assoc_req->secinfo.wep_enabled) {
658 u16 tmp = le16_to_cpu(join_cmd->bss.capability);
659 tmp |= WLAN_CAPABILITY_PRIVACY;
660 join_cmd->bss.capability = cpu_to_le16(tmp);
661 }
662
663 if (priv->psmode == LBS802_11POWERMODEMAX_PSP) {
664 /* wake up first */
665 __le32 Localpsmode;
666
667 Localpsmode = cpu_to_le32(LBS802_11POWERMODECAM);
668 ret = lbs_prepare_and_send_command(priv,
669 CMD_802_11_PS_MODE,
670 CMD_ACT_SET,
671 0, 0, &Localpsmode);
672
673 if (ret) {
674 ret = -1;
675 goto done;
676 }
677 }
678
679 if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
680 ret = -1;
681 goto done;
682 }
683
684 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ad_hoc_join) +
685 S_DS_GEN + cmdappendsize);
686
687done:
688 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
689 return ret;
690}
691
692int lbs_ret_80211_associate(struct lbs_private *priv,
693 struct cmd_ds_command *resp)
694{
695 int ret = 0;
696 union iwreq_data wrqu;
697 struct ieeetypes_assocrsp *passocrsp;
698 struct bss_descriptor * bss;
699 u16 status_code;
700
701 lbs_deb_enter(LBS_DEB_ASSOC);
702
703 if (!priv->in_progress_assoc_req) {
704 lbs_deb_assoc("ASSOC_RESP: no in-progress assoc request\n");
705 ret = -1;
706 goto done;
707 }
708 bss = &priv->in_progress_assoc_req->bss;
709
710 passocrsp = (struct ieeetypes_assocrsp *) & resp->params;
711
712 /*
713 * Older FW versions map the IEEE 802.11 Status Code in the association
714 * response to the following values returned in passocrsp->statuscode:
715 *
716 * IEEE Status Code Marvell Status Code
717 * 0 -> 0x0000 ASSOC_RESULT_SUCCESS
718 * 13 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
719 * 14 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
720 * 15 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
721 * 16 -> 0x0004 ASSOC_RESULT_AUTH_REFUSED
722 * others -> 0x0003 ASSOC_RESULT_REFUSED
723 *
724 * Other response codes:
725 * 0x0001 -> ASSOC_RESULT_INVALID_PARAMETERS (unused)
726 * 0x0002 -> ASSOC_RESULT_TIMEOUT (internal timer expired waiting for
727 * association response from the AP)
728 */
729
730 status_code = le16_to_cpu(passocrsp->statuscode);
731 switch (status_code) {
732 case 0x00:
733 break;
734 case 0x01:
735 lbs_deb_assoc("ASSOC_RESP: invalid parameters\n");
736 break;
737 case 0x02:
738 lbs_deb_assoc("ASSOC_RESP: internal timer "
739 "expired while waiting for the AP\n");
740 break;
741 case 0x03:
742 lbs_deb_assoc("ASSOC_RESP: association "
743 "refused by AP\n");
744 break;
745 case 0x04:
746 lbs_deb_assoc("ASSOC_RESP: authentication "
747 "refused by AP\n");
748 break;
749 default:
750 lbs_deb_assoc("ASSOC_RESP: failure reason 0x%02x "
751 " unknown\n", status_code);
752 break;
753 }
754
755 if (status_code) {
756 lbs_mac_event_disconnected(priv);
757 ret = -1;
758 goto done;
759 }
760
761 lbs_deb_hex(LBS_DEB_ASSOC, "ASSOC_RESP", (void *)&resp->params,
762 le16_to_cpu(resp->size) - S_DS_GEN);
763
764 /* Send a Media Connected event, according to the Spec */
765 priv->connect_status = LBS_CONNECTED;
766
767 /* Update current SSID and BSSID */
768 memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE);
769 priv->curbssparams.ssid_len = bss->ssid_len;
770 memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
771
772 lbs_deb_assoc("ASSOC_RESP: currentpacketfilter is 0x%x\n",
773 priv->currentpacketfilter);
774
775 priv->SNR[TYPE_RXPD][TYPE_AVG] = 0;
776 priv->NF[TYPE_RXPD][TYPE_AVG] = 0;
777
778 memset(priv->rawSNR, 0x00, sizeof(priv->rawSNR));
779 memset(priv->rawNF, 0x00, sizeof(priv->rawNF));
780 priv->nextSNRNF = 0;
781 priv->numSNRNF = 0;
782
783 netif_carrier_on(priv->dev);
784 if (!priv->tx_pending_len)
785 netif_wake_queue(priv->dev);
786
787 memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
788 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
789 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
790
791done:
792 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
793 return ret;
794}
795
796int lbs_ret_80211_disassociate(struct lbs_private *priv,
797 struct cmd_ds_command *resp)
798{
799 lbs_deb_enter(LBS_DEB_JOIN);
800
801 lbs_mac_event_disconnected(priv);
802
803 lbs_deb_leave(LBS_DEB_JOIN);
804 return 0;
805}
806
807int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv,
808 struct cmd_ds_command *resp)
809{
810 int ret = 0;
811 u16 command = le16_to_cpu(resp->command);
812 u16 result = le16_to_cpu(resp->result);
813 struct cmd_ds_802_11_ad_hoc_result *padhocresult;
814 union iwreq_data wrqu;
815 struct bss_descriptor *bss;
816 DECLARE_MAC_BUF(mac);
817
818 lbs_deb_enter(LBS_DEB_JOIN);
819
820 padhocresult = &resp->params.result;
821
822 lbs_deb_join("ADHOC_RESP: size = %d\n", le16_to_cpu(resp->size));
823 lbs_deb_join("ADHOC_RESP: command = %x\n", command);
824 lbs_deb_join("ADHOC_RESP: result = %x\n", result);
825
826 if (!priv->in_progress_assoc_req) {
827 lbs_deb_join("ADHOC_RESP: no in-progress association request\n");
828 ret = -1;
829 goto done;
830 }
831 bss = &priv->in_progress_assoc_req->bss;
832
833 /*
834 * Join result code 0 --> SUCCESS
835 */
836 if (result) {
837 lbs_deb_join("ADHOC_RESP: failed\n");
838 if (priv->connect_status == LBS_CONNECTED) {
839 lbs_mac_event_disconnected(priv);
840 }
841 ret = -1;
842 goto done;
843 }
844
845 /*
846 * Now the join cmd should be successful
847 * If BSSID has changed use SSID to compare instead of BSSID
848 */
849 lbs_deb_join("ADHOC_RESP: associated to '%s'\n",
850 escape_essid(bss->ssid, bss->ssid_len));
851
852 /* Send a Media Connected event, according to the Spec */
853 priv->connect_status = LBS_CONNECTED;
854
855 if (command == CMD_RET(CMD_802_11_AD_HOC_START)) {
856 /* Update the created network descriptor with the new BSSID */
857 memcpy(bss->bssid, padhocresult->bssid, ETH_ALEN);
858 }
859
860 /* Set the BSSID from the joined/started descriptor */
861 memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
862
863 /* Set the new SSID to current SSID */
864 memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE);
865 priv->curbssparams.ssid_len = bss->ssid_len;
866
867 netif_carrier_on(priv->dev);
868 if (!priv->tx_pending_len)
869 netif_wake_queue(priv->dev);
870
871 memset(&wrqu, 0, sizeof(wrqu));
872 memcpy(wrqu.ap_addr.sa_data, priv->curbssparams.bssid, ETH_ALEN);
873 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
874 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
875
876 lbs_deb_join("ADHOC_RESP: - Joined/Started Ad Hoc\n");
877 lbs_deb_join("ADHOC_RESP: channel = %d\n", priv->curbssparams.channel);
878 lbs_deb_join("ADHOC_RESP: BSSID = %s\n",
879 print_mac(mac, padhocresult->bssid));
880
881done:
882 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
883 return ret;
884}
885
886int lbs_ret_80211_ad_hoc_stop(struct lbs_private *priv,
887 struct cmd_ds_command *resp)
888{
889 lbs_deb_enter(LBS_DEB_JOIN);
890
891 lbs_mac_event_disconnected(priv);
892
893 lbs_deb_leave(LBS_DEB_JOIN);
894 return 0;
895}
diff --git a/drivers/net/wireless/libertas/join.h b/drivers/net/wireless/libertas/join.h
deleted file mode 100644
index c617d071f781..000000000000
--- a/drivers/net/wireless/libertas/join.h
+++ /dev/null
@@ -1,53 +0,0 @@
1/**
2 * Interface for the wlan infrastructure and adhoc join routines
3 *
4 * Driver interface functions and type declarations for the join module
5 * implemented in join.c. Process all start/join requests for
6 * both adhoc and infrastructure networks
7 */
8#ifndef _LBS_JOIN_H
9#define _LBS_JOIN_H
10
11#include "defs.h"
12#include "dev.h"
13
14struct cmd_ds_command;
15int lbs_cmd_80211_authenticate(struct lbs_private *priv,
16 struct cmd_ds_command *cmd,
17 void *pdata_buf);
18int lbs_cmd_80211_ad_hoc_join(struct lbs_private *priv,
19 struct cmd_ds_command *cmd,
20 void *pdata_buf);
21int lbs_cmd_80211_ad_hoc_stop(struct lbs_private *priv,
22 struct cmd_ds_command *cmd);
23int lbs_cmd_80211_ad_hoc_start(struct lbs_private *priv,
24 struct cmd_ds_command *cmd,
25 void *pdata_buf);
26int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
27 struct cmd_ds_command *cmd);
28int lbs_cmd_80211_associate(struct lbs_private *priv,
29 struct cmd_ds_command *cmd,
30 void *pdata_buf);
31
32int lbs_ret_80211_ad_hoc_start(struct lbs_private *priv,
33 struct cmd_ds_command *resp);
34int lbs_ret_80211_ad_hoc_stop(struct lbs_private *priv,
35 struct cmd_ds_command *resp);
36int lbs_ret_80211_disassociate(struct lbs_private *priv,
37 struct cmd_ds_command *resp);
38int lbs_ret_80211_associate(struct lbs_private *priv,
39 struct cmd_ds_command *resp);
40
41int lbs_start_adhoc_network(struct lbs_private *priv,
42 struct assoc_request * assoc_req);
43int lbs_join_adhoc_network(struct lbs_private *priv,
44 struct assoc_request * assoc_req);
45int lbs_stop_adhoc_network(struct lbs_private *priv);
46
47int lbs_send_deauthentication(struct lbs_private *priv);
48
49int lbs_associate(struct lbs_private *priv, struct assoc_request *assoc_req);
50
51void lbs_unset_basic_rate_flags(u8 *rates, size_t len);
52
53#endif
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 4d4e2f3b66ac..406f54d40956 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -10,6 +10,7 @@
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/if_arp.h> 11#include <linux/if_arp.h>
12#include <linux/kthread.h> 12#include <linux/kthread.h>
13#include <linux/kfifo.h>
13 14
14#include <net/iw_handler.h> 15#include <net/iw_handler.h>
15#include <net/ieee80211.h> 16#include <net/ieee80211.h>
@@ -19,8 +20,8 @@
19#include "dev.h" 20#include "dev.h"
20#include "wext.h" 21#include "wext.h"
21#include "debugfs.h" 22#include "debugfs.h"
23#include "scan.h"
22#include "assoc.h" 24#include "assoc.h"
23#include "join.h"
24#include "cmd.h" 25#include "cmd.h"
25 26
26#define DRIVER_RELEASE_VERSION "323.p0" 27#define DRIVER_RELEASE_VERSION "323.p0"
@@ -37,6 +38,11 @@ EXPORT_SYMBOL_GPL(lbs_debug);
37module_param_named(libertas_debug, lbs_debug, int, 0644); 38module_param_named(libertas_debug, lbs_debug, int, 0644);
38 39
39 40
41/* This global structure is used to send the confirm_sleep command as
42 * fast as possible down to the firmware. */
43struct cmd_confirm_sleep confirm_sleep;
44
45
40#define LBS_TX_PWR_DEFAULT 20 /*100mW */ 46#define LBS_TX_PWR_DEFAULT 20 /*100mW */
41#define LBS_TX_PWR_US_DEFAULT 20 /*100mW */ 47#define LBS_TX_PWR_US_DEFAULT 20 /*100mW */
42#define LBS_TX_PWR_JP_DEFAULT 16 /*50mW */ 48#define LBS_TX_PWR_JP_DEFAULT 16 /*50mW */
@@ -277,10 +283,10 @@ static ssize_t lbs_rtap_set(struct device *dev,
277 struct lbs_private *priv = to_net_dev(dev)->priv; 283 struct lbs_private *priv = to_net_dev(dev)->priv;
278 284
279 sscanf(buf, "%x", &monitor_mode); 285 sscanf(buf, "%x", &monitor_mode);
280 if (monitor_mode != LBS_MONITOR_OFF) { 286 if (monitor_mode) {
281 if(priv->monitormode == monitor_mode) 287 if (priv->monitormode == monitor_mode)
282 return strlen(buf); 288 return strlen(buf);
283 if (priv->monitormode == LBS_MONITOR_OFF) { 289 if (!priv->monitormode) {
284 if (priv->infra_open || priv->mesh_open) 290 if (priv->infra_open || priv->mesh_open)
285 return -EBUSY; 291 return -EBUSY;
286 if (priv->mode == IW_MODE_INFRA) 292 if (priv->mode == IW_MODE_INFRA)
@@ -293,9 +299,9 @@ static ssize_t lbs_rtap_set(struct device *dev,
293 } 299 }
294 300
295 else { 301 else {
296 if (priv->monitormode == LBS_MONITOR_OFF) 302 if (!priv->monitormode)
297 return strlen(buf); 303 return strlen(buf);
298 priv->monitormode = LBS_MONITOR_OFF; 304 priv->monitormode = 0;
299 lbs_remove_rtap(priv); 305 lbs_remove_rtap(priv);
300 306
301 if (priv->currenttxskb) { 307 if (priv->currenttxskb) {
@@ -392,7 +398,7 @@ static int lbs_dev_open(struct net_device *dev)
392 398
393 spin_lock_irq(&priv->driver_lock); 399 spin_lock_irq(&priv->driver_lock);
394 400
395 if (priv->monitormode != LBS_MONITOR_OFF) { 401 if (priv->monitormode) {
396 ret = -EBUSY; 402 ret = -EBUSY;
397 goto out; 403 goto out;
398 } 404 }
@@ -475,10 +481,9 @@ static void lbs_tx_timeout(struct net_device *dev)
475 481
476 dev->trans_start = jiffies; 482 dev->trans_start = jiffies;
477 483
478 if (priv->currenttxskb) { 484 if (priv->currenttxskb)
479 priv->eventcause = 0x01000000; 485 lbs_send_tx_feedback(priv, 0);
480 lbs_send_tx_feedback(priv); 486
481 }
482 /* XX: Shouldn't we also call into the hw-specific driver 487 /* XX: Shouldn't we also call into the hw-specific driver
483 to kick it somehow? */ 488 to kick it somehow? */
484 lbs_host_to_card_done(priv); 489 lbs_host_to_card_done(priv);
@@ -531,34 +536,27 @@ static int lbs_set_mac_address(struct net_device *dev, void *addr)
531 int ret = 0; 536 int ret = 0;
532 struct lbs_private *priv = (struct lbs_private *) dev->priv; 537 struct lbs_private *priv = (struct lbs_private *) dev->priv;
533 struct sockaddr *phwaddr = addr; 538 struct sockaddr *phwaddr = addr;
539 struct cmd_ds_802_11_mac_address cmd;
534 540
535 lbs_deb_enter(LBS_DEB_NET); 541 lbs_deb_enter(LBS_DEB_NET);
536 542
537 /* In case it was called from the mesh device */ 543 /* In case it was called from the mesh device */
538 dev = priv->dev ; 544 dev = priv->dev;
539
540 memset(priv->current_addr, 0, ETH_ALEN);
541
542 /* dev->dev_addr is 8 bytes */
543 lbs_deb_hex(LBS_DEB_NET, "dev->dev_addr", dev->dev_addr, ETH_ALEN);
544
545 lbs_deb_hex(LBS_DEB_NET, "addr", phwaddr->sa_data, ETH_ALEN);
546 memcpy(priv->current_addr, phwaddr->sa_data, ETH_ALEN);
547 545
548 ret = lbs_prepare_and_send_command(priv, CMD_802_11_MAC_ADDRESS, 546 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
549 CMD_ACT_SET, 547 cmd.action = cpu_to_le16(CMD_ACT_SET);
550 CMD_OPTION_WAITFORRSP, 0, NULL); 548 memcpy(cmd.macadd, phwaddr->sa_data, ETH_ALEN);
551 549
550 ret = lbs_cmd_with_response(priv, CMD_802_11_MAC_ADDRESS, &cmd);
552 if (ret) { 551 if (ret) {
553 lbs_deb_net("set MAC address failed\n"); 552 lbs_deb_net("set MAC address failed\n");
554 ret = -1;
555 goto done; 553 goto done;
556 } 554 }
557 555
558 lbs_deb_hex(LBS_DEB_NET, "priv->macaddr", priv->current_addr, ETH_ALEN); 556 memcpy(priv->current_addr, phwaddr->sa_data, ETH_ALEN);
559 memcpy(dev->dev_addr, priv->current_addr, ETH_ALEN); 557 memcpy(dev->dev_addr, phwaddr->sa_data, ETH_ALEN);
560 if (priv->mesh_dev) 558 if (priv->mesh_dev)
561 memcpy(priv->mesh_dev->dev_addr, priv->current_addr, ETH_ALEN); 559 memcpy(priv->mesh_dev->dev_addr, phwaddr->sa_data, ETH_ALEN);
562 560
563done: 561done:
564 lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret); 562 lbs_deb_leave_args(LBS_DEB_NET, "ret %d", ret);
@@ -581,45 +579,45 @@ static int lbs_copy_multicast_address(struct lbs_private *priv,
581static void lbs_set_multicast_list(struct net_device *dev) 579static void lbs_set_multicast_list(struct net_device *dev)
582{ 580{
583 struct lbs_private *priv = dev->priv; 581 struct lbs_private *priv = dev->priv;
584 int oldpacketfilter; 582 int old_mac_control;
585 DECLARE_MAC_BUF(mac); 583 DECLARE_MAC_BUF(mac);
586 584
587 lbs_deb_enter(LBS_DEB_NET); 585 lbs_deb_enter(LBS_DEB_NET);
588 586
589 oldpacketfilter = priv->currentpacketfilter; 587 old_mac_control = priv->mac_control;
590 588
591 if (dev->flags & IFF_PROMISC) { 589 if (dev->flags & IFF_PROMISC) {
592 lbs_deb_net("enable promiscuous mode\n"); 590 lbs_deb_net("enable promiscuous mode\n");
593 priv->currentpacketfilter |= 591 priv->mac_control |=
594 CMD_ACT_MAC_PROMISCUOUS_ENABLE; 592 CMD_ACT_MAC_PROMISCUOUS_ENABLE;
595 priv->currentpacketfilter &= 593 priv->mac_control &=
596 ~(CMD_ACT_MAC_ALL_MULTICAST_ENABLE | 594 ~(CMD_ACT_MAC_ALL_MULTICAST_ENABLE |
597 CMD_ACT_MAC_MULTICAST_ENABLE); 595 CMD_ACT_MAC_MULTICAST_ENABLE);
598 } else { 596 } else {
599 /* Multicast */ 597 /* Multicast */
600 priv->currentpacketfilter &= 598 priv->mac_control &=
601 ~CMD_ACT_MAC_PROMISCUOUS_ENABLE; 599 ~CMD_ACT_MAC_PROMISCUOUS_ENABLE;
602 600
603 if (dev->flags & IFF_ALLMULTI || dev->mc_count > 601 if (dev->flags & IFF_ALLMULTI || dev->mc_count >
604 MRVDRV_MAX_MULTICAST_LIST_SIZE) { 602 MRVDRV_MAX_MULTICAST_LIST_SIZE) {
605 lbs_deb_net( "enabling all multicast\n"); 603 lbs_deb_net( "enabling all multicast\n");
606 priv->currentpacketfilter |= 604 priv->mac_control |=
607 CMD_ACT_MAC_ALL_MULTICAST_ENABLE; 605 CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
608 priv->currentpacketfilter &= 606 priv->mac_control &=
609 ~CMD_ACT_MAC_MULTICAST_ENABLE; 607 ~CMD_ACT_MAC_MULTICAST_ENABLE;
610 } else { 608 } else {
611 priv->currentpacketfilter &= 609 priv->mac_control &=
612 ~CMD_ACT_MAC_ALL_MULTICAST_ENABLE; 610 ~CMD_ACT_MAC_ALL_MULTICAST_ENABLE;
613 611
614 if (!dev->mc_count) { 612 if (!dev->mc_count) {
615 lbs_deb_net("no multicast addresses, " 613 lbs_deb_net("no multicast addresses, "
616 "disabling multicast\n"); 614 "disabling multicast\n");
617 priv->currentpacketfilter &= 615 priv->mac_control &=
618 ~CMD_ACT_MAC_MULTICAST_ENABLE; 616 ~CMD_ACT_MAC_MULTICAST_ENABLE;
619 } else { 617 } else {
620 int i; 618 int i;
621 619
622 priv->currentpacketfilter |= 620 priv->mac_control |=
623 CMD_ACT_MAC_MULTICAST_ENABLE; 621 CMD_ACT_MAC_MULTICAST_ENABLE;
624 622
625 priv->nr_of_multicastmacaddr = 623 priv->nr_of_multicastmacaddr =
@@ -642,9 +640,8 @@ static void lbs_set_multicast_list(struct net_device *dev)
642 } 640 }
643 } 641 }
644 642
645 if (priv->currentpacketfilter != oldpacketfilter) { 643 if (priv->mac_control != old_mac_control)
646 lbs_set_mac_packet_filter(priv); 644 lbs_set_mac_control(priv);
647 }
648 645
649 lbs_deb_leave(LBS_DEB_NET); 646 lbs_deb_leave(LBS_DEB_NET);
650} 647}
@@ -662,7 +659,6 @@ static int lbs_thread(void *data)
662 struct net_device *dev = data; 659 struct net_device *dev = data;
663 struct lbs_private *priv = dev->priv; 660 struct lbs_private *priv = dev->priv;
664 wait_queue_t wait; 661 wait_queue_t wait;
665 u8 ireg = 0;
666 662
667 lbs_deb_enter(LBS_DEB_THREAD); 663 lbs_deb_enter(LBS_DEB_THREAD);
668 664
@@ -670,9 +666,10 @@ static int lbs_thread(void *data)
670 666
671 for (;;) { 667 for (;;) {
672 int shouldsleep; 668 int shouldsleep;
669 u8 resp_idx;
673 670
674 lbs_deb_thread( "main-thread 111: intcounter=%d currenttxskb=%p dnld_sent=%d\n", 671 lbs_deb_thread("1: currenttxskb %p, dnld_sent %d\n",
675 priv->intcounter, priv->currenttxskb, priv->dnld_sent); 672 priv->currenttxskb, priv->dnld_sent);
676 673
677 add_wait_queue(&priv->waitq, &wait); 674 add_wait_queue(&priv->waitq, &wait);
678 set_current_state(TASK_INTERRUPTIBLE); 675 set_current_state(TASK_INTERRUPTIBLE);
@@ -684,8 +681,6 @@ static int lbs_thread(void *data)
684 shouldsleep = 1; /* We need to wait until we're _told_ to die */ 681 shouldsleep = 1; /* We need to wait until we're _told_ to die */
685 else if (priv->psstate == PS_STATE_SLEEP) 682 else if (priv->psstate == PS_STATE_SLEEP)
686 shouldsleep = 1; /* Sleep mode. Nothing we can do till it wakes */ 683 shouldsleep = 1; /* Sleep mode. Nothing we can do till it wakes */
687 else if (priv->intcounter)
688 shouldsleep = 0; /* Interrupt pending. Deal with it now */
689 else if (priv->cmd_timed_out) 684 else if (priv->cmd_timed_out)
690 shouldsleep = 0; /* Command timed out. Recover */ 685 shouldsleep = 0; /* Command timed out. Recover */
691 else if (!priv->fw_ready) 686 else if (!priv->fw_ready)
@@ -698,29 +693,34 @@ static int lbs_thread(void *data)
698 shouldsleep = 1; /* Can't send a command; one already running */ 693 shouldsleep = 1; /* Can't send a command; one already running */
699 else if (!list_empty(&priv->cmdpendingq)) 694 else if (!list_empty(&priv->cmdpendingq))
700 shouldsleep = 0; /* We have a command to send */ 695 shouldsleep = 0; /* We have a command to send */
696 else if (__kfifo_len(priv->event_fifo))
697 shouldsleep = 0; /* We have an event to process */
698 else if (priv->resp_len[priv->resp_idx])
699 shouldsleep = 0; /* We have a command response */
701 else 700 else
702 shouldsleep = 1; /* No command */ 701 shouldsleep = 1; /* No command */
703 702
704 if (shouldsleep) { 703 if (shouldsleep) {
705 lbs_deb_thread("main-thread sleeping... Conn=%d IntC=%d PS_mode=%d PS_State=%d\n", 704 lbs_deb_thread("sleeping, connect_status %d, "
706 priv->connect_status, priv->intcounter, 705 "ps_mode %d, ps_state %d\n",
707 priv->psmode, priv->psstate); 706 priv->connect_status,
707 priv->psmode, priv->psstate);
708 spin_unlock_irq(&priv->driver_lock); 708 spin_unlock_irq(&priv->driver_lock);
709 schedule(); 709 schedule();
710 } else 710 } else
711 spin_unlock_irq(&priv->driver_lock); 711 spin_unlock_irq(&priv->driver_lock);
712 712
713 lbs_deb_thread("main-thread 222 (waking up): intcounter=%d currenttxskb=%p dnld_sent=%d\n", 713 lbs_deb_thread("2: currenttxskb %p, dnld_send %d\n",
714 priv->intcounter, priv->currenttxskb, priv->dnld_sent); 714 priv->currenttxskb, priv->dnld_sent);
715 715
716 set_current_state(TASK_RUNNING); 716 set_current_state(TASK_RUNNING);
717 remove_wait_queue(&priv->waitq, &wait); 717 remove_wait_queue(&priv->waitq, &wait);
718 718
719 lbs_deb_thread("main-thread 333: intcounter=%d currenttxskb=%p dnld_sent=%d\n", 719 lbs_deb_thread("3: currenttxskb %p, dnld_sent %d\n",
720 priv->intcounter, priv->currenttxskb, priv->dnld_sent); 720 priv->currenttxskb, priv->dnld_sent);
721 721
722 if (kthread_should_stop()) { 722 if (kthread_should_stop()) {
723 lbs_deb_thread("main-thread: break from main thread\n"); 723 lbs_deb_thread("break from main thread\n");
724 break; 724 break;
725 } 725 }
726 726
@@ -729,35 +729,23 @@ static int lbs_thread(void *data)
729 continue; 729 continue;
730 } 730 }
731 731
732 spin_lock_irq(&priv->driver_lock); 732 lbs_deb_thread("4: currenttxskb %p, dnld_sent %d\n",
733 733 priv->currenttxskb, priv->dnld_sent);
734 if (priv->intcounter) {
735 u8 int_status;
736
737 priv->intcounter = 0;
738 int_status = priv->hw_get_int_status(priv, &ireg);
739
740 if (int_status) {
741 lbs_deb_thread("main-thread: reading HOST_INT_STATUS_REG failed\n");
742 spin_unlock_irq(&priv->driver_lock);
743 continue;
744 }
745 priv->hisregcpy |= ireg;
746 }
747
748 lbs_deb_thread("main-thread 444: intcounter=%d currenttxskb=%p dnld_sent=%d\n",
749 priv->intcounter, priv->currenttxskb, priv->dnld_sent);
750 734
751 /* command response? */ 735 spin_lock_irq(&priv->driver_lock);
752 if (priv->hisregcpy & MRVDRV_CMD_UPLD_RDY) { 736 /* Process any pending command response */
753 lbs_deb_thread("main-thread: cmd response ready\n"); 737 resp_idx = priv->resp_idx;
754 738 if (priv->resp_len[resp_idx]) {
755 priv->hisregcpy &= ~MRVDRV_CMD_UPLD_RDY;
756 spin_unlock_irq(&priv->driver_lock); 739 spin_unlock_irq(&priv->driver_lock);
757 lbs_process_rx_command(priv); 740 lbs_process_command_response(priv,
741 priv->resp_buf[resp_idx],
742 priv->resp_len[resp_idx]);
758 spin_lock_irq(&priv->driver_lock); 743 spin_lock_irq(&priv->driver_lock);
744 priv->resp_len[resp_idx] = 0;
759 } 745 }
746 spin_unlock_irq(&priv->driver_lock);
760 747
748 /* command timeout stuff */
761 if (priv->cmd_timed_out && priv->cur_cmd) { 749 if (priv->cmd_timed_out && priv->cur_cmd) {
762 struct cmd_ctrl_node *cmdnode = priv->cur_cmd; 750 struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
763 751
@@ -778,21 +766,18 @@ static int lbs_thread(void *data)
778 } 766 }
779 priv->cmd_timed_out = 0; 767 priv->cmd_timed_out = 0;
780 768
781 /* Any Card Event */ 769 /* Process hardware events, e.g. card removed, link lost */
782 if (priv->hisregcpy & MRVDRV_CARDEVENT) { 770 spin_lock_irq(&priv->driver_lock);
783 lbs_deb_thread("main-thread: Card Event Activity\n"); 771 while (__kfifo_len(priv->event_fifo)) {
784 772 u32 event;
785 priv->hisregcpy &= ~MRVDRV_CARDEVENT;
786 773
787 if (priv->hw_read_event_cause(priv)) { 774 __kfifo_get(priv->event_fifo, (unsigned char *) &event,
788 lbs_pr_alert("main-thread: hw_read_event_cause failed\n"); 775 sizeof(event));
789 spin_unlock_irq(&priv->driver_lock);
790 continue;
791 }
792 spin_unlock_irq(&priv->driver_lock);
793 lbs_process_event(priv);
794 } else
795 spin_unlock_irq(&priv->driver_lock); 776 spin_unlock_irq(&priv->driver_lock);
777 lbs_process_event(priv, event);
778 spin_lock_irq(&priv->driver_lock);
779 }
780 spin_unlock_irq(&priv->driver_lock);
796 781
797 if (!priv->fw_ready) 782 if (!priv->fw_ready)
798 continue; 783 continue;
@@ -801,10 +786,12 @@ static int lbs_thread(void *data)
801 if (priv->psstate == PS_STATE_PRE_SLEEP && 786 if (priv->psstate == PS_STATE_PRE_SLEEP &&
802 !priv->dnld_sent && !priv->cur_cmd) { 787 !priv->dnld_sent && !priv->cur_cmd) {
803 if (priv->connect_status == LBS_CONNECTED) { 788 if (priv->connect_status == LBS_CONNECTED) {
804 lbs_deb_thread("main_thread: PRE_SLEEP--intcounter=%d currenttxskb=%p dnld_sent=%d cur_cmd=%p, confirm now\n", 789 lbs_deb_thread("pre-sleep, currenttxskb %p, "
805 priv->intcounter, priv->currenttxskb, priv->dnld_sent, priv->cur_cmd); 790 "dnld_sent %d, cur_cmd %p\n",
791 priv->currenttxskb, priv->dnld_sent,
792 priv->cur_cmd);
806 793
807 lbs_ps_confirm_sleep(priv, (u16) priv->psmode); 794 lbs_ps_confirm_sleep(priv);
808 } else { 795 } else {
809 /* workaround for firmware sending 796 /* workaround for firmware sending
810 * deauth/linkloss event immediately 797 * deauth/linkloss event immediately
@@ -812,7 +799,8 @@ static int lbs_thread(void *data)
812 * after firmware fixes it 799 * after firmware fixes it
813 */ 800 */
814 priv->psstate = PS_STATE_AWAKE; 801 priv->psstate = PS_STATE_AWAKE;
815 lbs_pr_alert("main-thread: ignore PS_SleepConfirm in non-connected state\n"); 802 lbs_pr_alert("ignore PS_SleepConfirm in "
803 "non-connected state\n");
816 } 804 }
817 } 805 }
818 806
@@ -945,7 +933,7 @@ static int lbs_setup_firmware(struct lbs_private *priv)
945 goto done; 933 goto done;
946 } 934 }
947 935
948 lbs_set_mac_packet_filter(priv); 936 lbs_set_mac_control(priv);
949 937
950 ret = lbs_get_data_rate(priv); 938 ret = lbs_get_data_rate(priv);
951 if (ret < 0) { 939 if (ret < 0) {
@@ -985,6 +973,18 @@ out:
985 lbs_deb_leave(LBS_DEB_CMD); 973 lbs_deb_leave(LBS_DEB_CMD);
986} 974}
987 975
976static void lbs_sync_channel_worker(struct work_struct *work)
977{
978 struct lbs_private *priv = container_of(work, struct lbs_private,
979 sync_channel);
980
981 lbs_deb_enter(LBS_DEB_MAIN);
982 if (lbs_update_channel(priv))
983 lbs_pr_info("Channel synchronization failed.");
984 lbs_deb_leave(LBS_DEB_MAIN);
985}
986
987
988static int lbs_init_adapter(struct lbs_private *priv) 988static int lbs_init_adapter(struct lbs_private *priv)
989{ 989{
990 size_t bufsize; 990 size_t bufsize;
@@ -1009,14 +1009,6 @@ static int lbs_init_adapter(struct lbs_private *priv)
1009 &priv->network_free_list); 1009 &priv->network_free_list);
1010 } 1010 }
1011 1011
1012 priv->lbs_ps_confirm_sleep.seqnum = cpu_to_le16(++priv->seqnum);
1013 priv->lbs_ps_confirm_sleep.command =
1014 cpu_to_le16(CMD_802_11_PS_MODE);
1015 priv->lbs_ps_confirm_sleep.size =
1016 cpu_to_le16(sizeof(struct PS_CMD_ConfirmSleep));
1017 priv->lbs_ps_confirm_sleep.action =
1018 cpu_to_le16(CMD_SUBCMD_SLEEP_CONFIRMED);
1019
1020 memset(priv->current_addr, 0xff, ETH_ALEN); 1012 memset(priv->current_addr, 0xff, ETH_ALEN);
1021 1013
1022 priv->connect_status = LBS_DISCONNECTED; 1014 priv->connect_status = LBS_DISCONNECTED;
@@ -1024,7 +1016,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
1024 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; 1016 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
1025 priv->mode = IW_MODE_INFRA; 1017 priv->mode = IW_MODE_INFRA;
1026 priv->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL; 1018 priv->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL;
1027 priv->currentpacketfilter = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; 1019 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
1028 priv->radioon = RADIO_ON; 1020 priv->radioon = RADIO_ON;
1029 priv->auto_rate = 1; 1021 priv->auto_rate = 1;
1030 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE; 1022 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
@@ -1045,7 +1037,18 @@ static int lbs_init_adapter(struct lbs_private *priv)
1045 /* Allocate the command buffers */ 1037 /* Allocate the command buffers */
1046 if (lbs_allocate_cmd_buffer(priv)) { 1038 if (lbs_allocate_cmd_buffer(priv)) {
1047 lbs_pr_err("Out of memory allocating command buffers\n"); 1039 lbs_pr_err("Out of memory allocating command buffers\n");
1048 ret = -1; 1040 ret = -ENOMEM;
1041 goto out;
1042 }
1043 priv->resp_idx = 0;
1044 priv->resp_len[0] = priv->resp_len[1] = 0;
1045
1046 /* Create the event FIFO */
1047 priv->event_fifo = kfifo_alloc(sizeof(u32) * 16, GFP_KERNEL, NULL);
1048 if (IS_ERR(priv->event_fifo)) {
1049 lbs_pr_err("Out of memory allocating event FIFO buffer\n");
1050 ret = -ENOMEM;
1051 goto out;
1049 } 1052 }
1050 1053
1051out: 1054out:
@@ -1059,6 +1062,8 @@ static void lbs_free_adapter(struct lbs_private *priv)
1059 lbs_deb_enter(LBS_DEB_MAIN); 1062 lbs_deb_enter(LBS_DEB_MAIN);
1060 1063
1061 lbs_free_cmd_buffer(priv); 1064 lbs_free_cmd_buffer(priv);
1065 if (priv->event_fifo)
1066 kfifo_free(priv->event_fifo);
1062 del_timer(&priv->command_timer); 1067 del_timer(&priv->command_timer);
1063 kfree(priv->networks); 1068 kfree(priv->networks);
1064 priv->networks = NULL; 1069 priv->networks = NULL;
@@ -1128,7 +1133,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
1128 priv->work_thread = create_singlethread_workqueue("lbs_worker"); 1133 priv->work_thread = create_singlethread_workqueue("lbs_worker");
1129 INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker); 1134 INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker);
1130 INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker); 1135 INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
1131 INIT_WORK(&priv->sync_channel, lbs_sync_channel); 1136 INIT_WORK(&priv->sync_channel, lbs_sync_channel_worker);
1132 1137
1133 sprintf(priv->mesh_ssid, "mesh"); 1138 sprintf(priv->mesh_ssid, "mesh");
1134 priv->mesh_ssid_len = 4; 1139 priv->mesh_ssid_len = 4;
@@ -1380,7 +1385,7 @@ static void lbs_remove_mesh(struct lbs_private *priv)
1380 * @param cfp_no A pointer to CFP number 1385 * @param cfp_no A pointer to CFP number
1381 * @return A pointer to CFP 1386 * @return A pointer to CFP
1382 */ 1387 */
1383struct chan_freq_power *lbs_get_region_cfp_table(u8 region, u8 band, int *cfp_no) 1388struct chan_freq_power *lbs_get_region_cfp_table(u8 region, int *cfp_no)
1384{ 1389{
1385 int i, end; 1390 int i, end;
1386 1391
@@ -1414,7 +1419,7 @@ int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band)
1414 1419
1415 memset(priv->region_channel, 0, sizeof(priv->region_channel)); 1420 memset(priv->region_channel, 0, sizeof(priv->region_channel));
1416 1421
1417 cfp = lbs_get_region_cfp_table(region, band, &cfp_no); 1422 cfp = lbs_get_region_cfp_table(region, &cfp_no);
1418 if (cfp != NULL) { 1423 if (cfp != NULL) {
1419 priv->region_channel[i].nrcfp = cfp_no; 1424 priv->region_channel[i].nrcfp = cfp_no;
1420 priv->region_channel[i].CFP = cfp; 1425 priv->region_channel[i].CFP = cfp;
@@ -1433,31 +1438,49 @@ out:
1433 return ret; 1438 return ret;
1434} 1439}
1435 1440
1436/** 1441void lbs_queue_event(struct lbs_private *priv, u32 event)
1437 * @brief This function handles the interrupt. it will change PS 1442{
1438 * state if applicable. it will wake up main_thread to handle 1443 unsigned long flags;
1439 * the interrupt event as well. 1444
1440 * 1445 lbs_deb_enter(LBS_DEB_THREAD);
1441 * @param dev A pointer to net_device structure 1446 spin_lock_irqsave(&priv->driver_lock, flags);
1442 * @return n/a 1447
1443 */ 1448 if (priv->psstate == PS_STATE_SLEEP)
1444void lbs_interrupt(struct lbs_private *priv) 1449 priv->psstate = PS_STATE_AWAKE;
1450
1451 __kfifo_put(priv->event_fifo, (unsigned char *) &event, sizeof(u32));
1452
1453 wake_up_interruptible(&priv->waitq);
1454
1455 spin_unlock_irqrestore(&priv->driver_lock, flags);
1456 lbs_deb_leave(LBS_DEB_THREAD);
1457}
1458EXPORT_SYMBOL_GPL(lbs_queue_event);
1459
1460void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx)
1445{ 1461{
1446 lbs_deb_enter(LBS_DEB_THREAD); 1462 lbs_deb_enter(LBS_DEB_THREAD);
1447 1463
1448 lbs_deb_thread("lbs_interrupt: intcounter=%d\n", priv->intcounter);
1449 priv->intcounter++;
1450 if (priv->psstate == PS_STATE_SLEEP) 1464 if (priv->psstate == PS_STATE_SLEEP)
1451 priv->psstate = PS_STATE_AWAKE; 1465 priv->psstate = PS_STATE_AWAKE;
1466
1467 /* Swap buffers by flipping the response index */
1468 BUG_ON(resp_idx > 1);
1469 priv->resp_idx = resp_idx;
1470
1452 wake_up_interruptible(&priv->waitq); 1471 wake_up_interruptible(&priv->waitq);
1453 1472
1454 lbs_deb_leave(LBS_DEB_THREAD); 1473 lbs_deb_leave(LBS_DEB_THREAD);
1455} 1474}
1456EXPORT_SYMBOL_GPL(lbs_interrupt); 1475EXPORT_SYMBOL_GPL(lbs_notify_command_response);
1457 1476
1458static int __init lbs_init_module(void) 1477static int __init lbs_init_module(void)
1459{ 1478{
1460 lbs_deb_enter(LBS_DEB_MAIN); 1479 lbs_deb_enter(LBS_DEB_MAIN);
1480 memset(&confirm_sleep, 0, sizeof(confirm_sleep));
1481 confirm_sleep.hdr.command = cpu_to_le16(CMD_802_11_PS_MODE);
1482 confirm_sleep.hdr.size = cpu_to_le16(sizeof(confirm_sleep));
1483 confirm_sleep.action = cpu_to_le16(CMD_SUBCMD_SLEEP_CONFIRMED);
1461 lbs_debugfs_init(); 1484 lbs_debugfs_init();
1462 lbs_deb_leave(LBS_DEB_MAIN); 1485 lbs_deb_leave(LBS_DEB_MAIN);
1463 return 0; 1486 return 0;
@@ -1554,6 +1577,32 @@ out:
1554 return ret; 1577 return ret;
1555} 1578}
1556 1579
1580#ifndef CONFIG_IEEE80211
1581const char *escape_essid(const char *essid, u8 essid_len)
1582{
1583 static char escaped[IW_ESSID_MAX_SIZE * 2 + 1];
1584 const char *s = essid;
1585 char *d = escaped;
1586
1587 if (ieee80211_is_empty_essid(essid, essid_len)) {
1588 memcpy(escaped, "<hidden>", sizeof("<hidden>"));
1589 return escaped;
1590 }
1591
1592 essid_len = min(essid_len, (u8) IW_ESSID_MAX_SIZE);
1593 while (essid_len--) {
1594 if (*s == '\0') {
1595 *d++ = '\\';
1596 *d++ = '0';
1597 s++;
1598 } else {
1599 *d++ = *s++;
1600 }
1601 }
1602 *d = '\0';
1603 return escaped;
1604}
1605#endif
1557 1606
1558module_init(lbs_init_module); 1607module_init(lbs_init_module);
1559module_exit(lbs_exit_module); 1608module_exit(lbs_exit_module);
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 149557a478ac..05af7316f698 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -145,17 +145,17 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
145 struct net_device *dev = priv->dev; 145 struct net_device *dev = priv->dev;
146 struct rxpackethdr *p_rx_pkt; 146 struct rxpackethdr *p_rx_pkt;
147 struct rxpd *p_rx_pd; 147 struct rxpd *p_rx_pd;
148
149 int hdrchop; 148 int hdrchop;
150 struct ethhdr *p_ethhdr; 149 struct ethhdr *p_ethhdr;
151
152 const u8 rfc1042_eth_hdr[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; 150 const u8 rfc1042_eth_hdr[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
153 151
154 lbs_deb_enter(LBS_DEB_RX); 152 lbs_deb_enter(LBS_DEB_RX);
155 153
154 BUG_ON(!skb);
155
156 skb->ip_summed = CHECKSUM_NONE; 156 skb->ip_summed = CHECKSUM_NONE;
157 157
158 if (priv->monitormode != LBS_MONITOR_OFF) 158 if (priv->monitormode)
159 return process_rxed_802_11_packet(priv, skb); 159 return process_rxed_802_11_packet(priv, skb);
160 160
161 p_rx_pkt = (struct rxpackethdr *) skb->data; 161 p_rx_pkt = (struct rxpackethdr *) skb->data;
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 69f94c92b32d..e72c97a0d6c1 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -4,22 +4,14 @@
4 * IOCTL handlers as well as command preperation and response routines 4 * IOCTL handlers as well as command preperation and response routines
5 * for sending scan commands to the firmware. 5 * for sending scan commands to the firmware.
6 */ 6 */
7#include <linux/ctype.h>
8#include <linux/if.h>
9#include <linux/netdevice.h>
10#include <linux/wireless.h>
11#include <linux/etherdevice.h> 7#include <linux/etherdevice.h>
12
13#include <net/ieee80211.h>
14#include <net/iw_handler.h>
15
16#include <asm/unaligned.h> 8#include <asm/unaligned.h>
17 9
18#include "host.h" 10#include "host.h"
19#include "decl.h" 11#include "decl.h"
20#include "dev.h" 12#include "dev.h"
21#include "scan.h" 13#include "scan.h"
22#include "join.h" 14#include "cmd.h"
23 15
24//! Approximate amount of data needed to pass a scan result back to iwlist 16//! Approximate amount of data needed to pass a scan result back to iwlist
25#define MAX_SCAN_CELL_SIZE (IW_EV_ADDR_LEN \ 17#define MAX_SCAN_CELL_SIZE (IW_EV_ADDR_LEN \
@@ -39,10 +31,9 @@
39//! Memory needed to store a max number/size SSID TLV for a firmware scan 31//! Memory needed to store a max number/size SSID TLV for a firmware scan
40#define SSID_TLV_MAX_SIZE (1 * sizeof(struct mrvlietypes_ssidparamset)) 32#define SSID_TLV_MAX_SIZE (1 * sizeof(struct mrvlietypes_ssidparamset))
41 33
42//! Maximum memory needed for a lbs_scan_cmd_config with all TLVs at max 34//! Maximum memory needed for a cmd_ds_802_11_scan with all TLVs at max
43#define MAX_SCAN_CFG_ALLOC (sizeof(struct lbs_scan_cmd_config) \ 35#define MAX_SCAN_CFG_ALLOC (sizeof(struct cmd_ds_802_11_scan) \
44 + CHAN_TLV_MAX_SIZE \ 36 + CHAN_TLV_MAX_SIZE + SSID_TLV_MAX_SIZE)
45 + SSID_TLV_MAX_SIZE)
46 37
47//! The maximum number of channels the firmware can scan per command 38//! The maximum number of channels the firmware can scan per command
48#define MRVDRV_MAX_CHANNELS_PER_SCAN 14 39#define MRVDRV_MAX_CHANNELS_PER_SCAN 14
@@ -61,11 +52,8 @@
61//! Scan time specified in the channel TLV for each channel for active scans 52//! Scan time specified in the channel TLV for each channel for active scans
62#define MRVDRV_ACTIVE_SCAN_CHAN_TIME 100 53#define MRVDRV_ACTIVE_SCAN_CHAN_TIME 100
63 54
64static const u8 zeromac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 55static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy,
65static const u8 bcastmac[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 56 struct cmd_header *resp);
66
67
68
69 57
70/*********************************************************************/ 58/*********************************************************************/
71/* */ 59/* */
@@ -73,7 +61,24 @@ static const u8 bcastmac[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
73/* */ 61/* */
74/*********************************************************************/ 62/*********************************************************************/
75 63
76static inline void clear_bss_descriptor (struct bss_descriptor * bss) 64/**
65 * @brief Unsets the MSB on basic rates
66 *
67 * Scan through an array and unset the MSB for basic data rates.
68 *
69 * @param rates buffer of data rates
70 * @param len size of buffer
71 */
72static void lbs_unset_basic_rate_flags(u8 *rates, size_t len)
73{
74 int i;
75
76 for (i = 0; i < len; i++)
77 rates[i] &= 0x7f;
78}
79
80
81static inline void clear_bss_descriptor(struct bss_descriptor *bss)
77{ 82{
78 /* Don't blow away ->list, just BSS data */ 83 /* Don't blow away ->list, just BSS data */
79 memset(bss, 0, offsetof(struct bss_descriptor, list)); 84 memset(bss, 0, offsetof(struct bss_descriptor, list));
@@ -87,7 +92,8 @@ static inline void clear_bss_descriptor (struct bss_descriptor * bss)
87 * 92 *
88 * @return 0: ssid is same, otherwise is different 93 * @return 0: ssid is same, otherwise is different
89 */ 94 */
90int lbs_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len) 95int lbs_ssid_cmp(uint8_t *ssid1, uint8_t ssid1_len, uint8_t *ssid2,
96 uint8_t ssid2_len)
91{ 97{
92 if (ssid1_len != ssid2_len) 98 if (ssid1_len != ssid2_len)
93 return -1; 99 return -1;
@@ -95,76 +101,6 @@ int lbs_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len)
95 return memcmp(ssid1, ssid2, ssid1_len); 101 return memcmp(ssid1, ssid2, ssid1_len);
96} 102}
97 103
98static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
99 struct bss_descriptor * match_bss)
100{
101 if ( !secinfo->wep_enabled
102 && !secinfo->WPAenabled
103 && !secinfo->WPA2enabled
104 && match_bss->wpa_ie[0] != MFIE_TYPE_GENERIC
105 && match_bss->rsn_ie[0] != MFIE_TYPE_RSN
106 && !(match_bss->capability & WLAN_CAPABILITY_PRIVACY)) {
107 return 1;
108 }
109 return 0;
110}
111
112static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo,
113 struct bss_descriptor * match_bss)
114{
115 if ( secinfo->wep_enabled
116 && !secinfo->WPAenabled
117 && !secinfo->WPA2enabled
118 && (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) {
119 return 1;
120 }
121 return 0;
122}
123
124static inline int match_bss_wpa(struct lbs_802_11_security *secinfo,
125 struct bss_descriptor * match_bss)
126{
127 if ( !secinfo->wep_enabled
128 && secinfo->WPAenabled
129 && (match_bss->wpa_ie[0] == MFIE_TYPE_GENERIC)
130 /* privacy bit may NOT be set in some APs like LinkSys WRT54G
131 && (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) {
132 */
133 ) {
134 return 1;
135 }
136 return 0;
137}
138
139static inline int match_bss_wpa2(struct lbs_802_11_security *secinfo,
140 struct bss_descriptor * match_bss)
141{
142 if ( !secinfo->wep_enabled
143 && secinfo->WPA2enabled
144 && (match_bss->rsn_ie[0] == MFIE_TYPE_RSN)
145 /* privacy bit may NOT be set in some APs like LinkSys WRT54G
146 && (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) {
147 */
148 ) {
149 return 1;
150 }
151 return 0;
152}
153
154static inline int match_bss_dynamic_wep(struct lbs_802_11_security *secinfo,
155 struct bss_descriptor * match_bss)
156{
157 if ( !secinfo->wep_enabled
158 && !secinfo->WPAenabled
159 && !secinfo->WPA2enabled
160 && (match_bss->wpa_ie[0] != MFIE_TYPE_GENERIC)
161 && (match_bss->rsn_ie[0] != MFIE_TYPE_RSN)
162 && (match_bss->capability & WLAN_CAPABILITY_PRIVACY)) {
163 return 1;
164 }
165 return 0;
166}
167
168static inline int is_same_network(struct bss_descriptor *src, 104static inline int is_same_network(struct bss_descriptor *src,
169 struct bss_descriptor *dst) 105 struct bss_descriptor *dst)
170{ 106{
@@ -177,83 +113,6 @@ static inline int is_same_network(struct bss_descriptor *src,
177 !memcmp(src->ssid, dst->ssid, src->ssid_len)); 113 !memcmp(src->ssid, dst->ssid, src->ssid_len));
178} 114}
179 115
180/**
181 * @brief Check if a scanned network compatible with the driver settings
182 *
183 * WEP WPA WPA2 ad-hoc encrypt Network
184 * enabled enabled enabled AES mode privacy WPA WPA2 Compatible
185 * 0 0 0 0 NONE 0 0 0 yes No security
186 * 1 0 0 0 NONE 1 0 0 yes Static WEP
187 * 0 1 0 0 x 1x 1 x yes WPA
188 * 0 0 1 0 x 1x x 1 yes WPA2
189 * 0 0 0 1 NONE 1 0 0 yes Ad-hoc AES
190 * 0 0 0 0 !=NONE 1 0 0 yes Dynamic WEP
191 *
192 *
193 * @param priv A pointer to struct lbs_private
194 * @param index Index in scantable to check against current driver settings
195 * @param mode Network mode: Infrastructure or IBSS
196 *
197 * @return Index in scantable, or error code if negative
198 */
199static int is_network_compatible(struct lbs_private *priv,
200 struct bss_descriptor * bss, u8 mode)
201{
202 int matched = 0;
203
204 lbs_deb_enter(LBS_DEB_SCAN);
205
206 if (bss->mode != mode)
207 goto done;
208
209 if ((matched = match_bss_no_security(&priv->secinfo, bss))) {
210 goto done;
211 } else if ((matched = match_bss_static_wep(&priv->secinfo, bss))) {
212 goto done;
213 } else if ((matched = match_bss_wpa(&priv->secinfo, bss))) {
214 lbs_deb_scan(
215 "is_network_compatible() WPA: wpa_ie 0x%x "
216 "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s "
217 "privacy 0x%x\n", bss->wpa_ie[0], bss->rsn_ie[0],
218 priv->secinfo.wep_enabled ? "e" : "d",
219 priv->secinfo.WPAenabled ? "e" : "d",
220 priv->secinfo.WPA2enabled ? "e" : "d",
221 (bss->capability & WLAN_CAPABILITY_PRIVACY));
222 goto done;
223 } else if ((matched = match_bss_wpa2(&priv->secinfo, bss))) {
224 lbs_deb_scan(
225 "is_network_compatible() WPA2: wpa_ie 0x%x "
226 "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s "
227 "privacy 0x%x\n", bss->wpa_ie[0], bss->rsn_ie[0],
228 priv->secinfo.wep_enabled ? "e" : "d",
229 priv->secinfo.WPAenabled ? "e" : "d",
230 priv->secinfo.WPA2enabled ? "e" : "d",
231 (bss->capability & WLAN_CAPABILITY_PRIVACY));
232 goto done;
233 } else if ((matched = match_bss_dynamic_wep(&priv->secinfo, bss))) {
234 lbs_deb_scan(
235 "is_network_compatible() dynamic WEP: "
236 "wpa_ie 0x%x wpa2_ie 0x%x privacy 0x%x\n",
237 bss->wpa_ie[0], bss->rsn_ie[0],
238 (bss->capability & WLAN_CAPABILITY_PRIVACY));
239 goto done;
240 }
241
242 /* bss security settings don't match those configured on card */
243 lbs_deb_scan(
244 "is_network_compatible() FAILED: wpa_ie 0x%x "
245 "wpa2_ie 0x%x WEP %s WPA %s WPA2 %s privacy 0x%x\n",
246 bss->wpa_ie[0], bss->rsn_ie[0],
247 priv->secinfo.wep_enabled ? "e" : "d",
248 priv->secinfo.WPAenabled ? "e" : "d",
249 priv->secinfo.WPA2enabled ? "e" : "d",
250 (bss->capability & WLAN_CAPABILITY_PRIVACY));
251
252done:
253 lbs_deb_leave_args(LBS_DEB_SCAN, "matched: %d", matched);
254 return matched;
255}
256
257 116
258 117
259 118
@@ -263,17 +122,6 @@ done:
263/* */ 122/* */
264/*********************************************************************/ 123/*********************************************************************/
265 124
266void lbs_scan_worker(struct work_struct *work)
267{
268 struct lbs_private *priv =
269 container_of(work, struct lbs_private, scan_work.work);
270
271 lbs_deb_enter(LBS_DEB_SCAN);
272 lbs_scan_networks(priv, NULL, 0);
273 lbs_deb_leave(LBS_DEB_SCAN);
274}
275
276
277/** 125/**
278 * @brief Create a channel list for the driver to scan based on region info 126 * @brief Create a channel list for the driver to scan based on region info
279 * 127 *
@@ -285,25 +133,18 @@ void lbs_scan_worker(struct work_struct *work)
285 * 133 *
286 * @param priv A pointer to struct lbs_private structure 134 * @param priv A pointer to struct lbs_private structure
287 * @param scanchanlist Output parameter: resulting channel list to scan 135 * @param scanchanlist Output parameter: resulting channel list to scan
288 * @param filteredscan Flag indicating whether or not a BSSID or SSID filter
289 * is being sent in the command to firmware. Used to
290 * increase the number of channels sent in a scan
291 * command and to disable the firmware channel scan
292 * filter.
293 * 136 *
294 * @return void 137 * @return void
295 */ 138 */
296static int lbs_scan_create_channel_list(struct lbs_private *priv, 139static int lbs_scan_create_channel_list(struct lbs_private *priv,
297 struct chanscanparamset * scanchanlist, 140 struct chanscanparamset *scanchanlist)
298 u8 filteredscan)
299{ 141{
300
301 struct region_channel *scanregion; 142 struct region_channel *scanregion;
302 struct chan_freq_power *cfp; 143 struct chan_freq_power *cfp;
303 int rgnidx; 144 int rgnidx;
304 int chanidx; 145 int chanidx;
305 int nextchan; 146 int nextchan;
306 u8 scantype; 147 uint8_t scantype;
307 148
308 chanidx = 0; 149 chanidx = 0;
309 150
@@ -314,9 +155,8 @@ static int lbs_scan_create_channel_list(struct lbs_private *priv,
314 scantype = CMD_SCAN_TYPE_ACTIVE; 155 scantype = CMD_SCAN_TYPE_ACTIVE;
315 156
316 for (rgnidx = 0; rgnidx < ARRAY_SIZE(priv->region_channel); rgnidx++) { 157 for (rgnidx = 0; rgnidx < ARRAY_SIZE(priv->region_channel); rgnidx++) {
317 if (priv->enable11d && 158 if (priv->enable11d && (priv->connect_status != LBS_CONNECTED)
318 (priv->connect_status != LBS_CONNECTED) && 159 && (priv->mesh_connect_status != LBS_CONNECTED)) {
319 (priv->mesh_connect_status != LBS_CONNECTED)) {
320 /* Scan all the supported chan for the first scan */ 160 /* Scan all the supported chan for the first scan */
321 if (!priv->universal_channel[rgnidx].valid) 161 if (!priv->universal_channel[rgnidx].valid)
322 continue; 162 continue;
@@ -331,51 +171,32 @@ static int lbs_scan_create_channel_list(struct lbs_private *priv,
331 scanregion = &priv->region_channel[rgnidx]; 171 scanregion = &priv->region_channel[rgnidx];
332 } 172 }
333 173
334 for (nextchan = 0; 174 for (nextchan = 0; nextchan < scanregion->nrcfp; nextchan++, chanidx++) {
335 nextchan < scanregion->nrcfp; nextchan++, chanidx++) { 175 struct chanscanparamset *chan = &scanchanlist[chanidx];
336 176
337 cfp = scanregion->CFP + nextchan; 177 cfp = scanregion->CFP + nextchan;
338 178
339 if (priv->enable11d) { 179 if (priv->enable11d)
340 scantype = 180 scantype = lbs_get_scan_type_11d(cfp->channel,
341 lbs_get_scan_type_11d(cfp->channel, 181 &priv->parsed_region_chan);
342 &priv->
343 parsed_region_chan);
344 }
345 182
346 switch (scanregion->band) { 183 if (scanregion->band == BAND_B || scanregion->band == BAND_G)
347 case BAND_B: 184 chan->radiotype = CMD_SCAN_RADIO_TYPE_BG;
348 case BAND_G:
349 default:
350 scanchanlist[chanidx].radiotype =
351 CMD_SCAN_RADIO_TYPE_BG;
352 break;
353 }
354 185
355 if (scantype == CMD_SCAN_TYPE_PASSIVE) { 186 if (scantype == CMD_SCAN_TYPE_PASSIVE) {
356 scanchanlist[chanidx].maxscantime = 187 chan->maxscantime = cpu_to_le16(MRVDRV_PASSIVE_SCAN_CHAN_TIME);
357 cpu_to_le16(MRVDRV_PASSIVE_SCAN_CHAN_TIME); 188 chan->chanscanmode.passivescan = 1;
358 scanchanlist[chanidx].chanscanmode.passivescan =
359 1;
360 } else { 189 } else {
361 scanchanlist[chanidx].maxscantime = 190 chan->maxscantime = cpu_to_le16(MRVDRV_ACTIVE_SCAN_CHAN_TIME);
362 cpu_to_le16(MRVDRV_ACTIVE_SCAN_CHAN_TIME); 191 chan->chanscanmode.passivescan = 0;
363 scanchanlist[chanidx].chanscanmode.passivescan =
364 0;
365 } 192 }
366 193
367 scanchanlist[chanidx].channumber = cfp->channel; 194 chan->channumber = cfp->channel;
368
369 if (filteredscan) {
370 scanchanlist[chanidx].chanscanmode.
371 disablechanfilt = 1;
372 }
373 } 195 }
374 } 196 }
375 return chanidx; 197 return chanidx;
376} 198}
377 199
378
379/* 200/*
380 * Add SSID TLV of the form: 201 * Add SSID TLV of the form:
381 * 202 *
@@ -383,18 +204,16 @@ static int lbs_scan_create_channel_list(struct lbs_private *priv,
383 * length 06 00 204 * length 06 00
384 * ssid 4d 4e 54 45 53 54 205 * ssid 4d 4e 54 45 53 54
385 */ 206 */
386static int lbs_scan_add_ssid_tlv(u8 *tlv, 207static int lbs_scan_add_ssid_tlv(struct lbs_private *priv, u8 *tlv)
387 const struct lbs_ioctl_user_scan_cfg *user_cfg)
388{ 208{
389 struct mrvlietypes_ssidparamset *ssid_tlv = 209 struct mrvlietypes_ssidparamset *ssid_tlv = (void *)tlv;
390 (struct mrvlietypes_ssidparamset *)tlv; 210
391 ssid_tlv->header.type = cpu_to_le16(TLV_TYPE_SSID); 211 ssid_tlv->header.type = cpu_to_le16(TLV_TYPE_SSID);
392 ssid_tlv->header.len = cpu_to_le16(user_cfg->ssid_len); 212 ssid_tlv->header.len = cpu_to_le16(priv->scan_ssid_len);
393 memcpy(ssid_tlv->ssid, user_cfg->ssid, user_cfg->ssid_len); 213 memcpy(ssid_tlv->ssid, priv->scan_ssid, priv->scan_ssid_len);
394 return sizeof(ssid_tlv->header) + user_cfg->ssid_len; 214 return sizeof(ssid_tlv->header) + priv->scan_ssid_len;
395} 215}
396 216
397
398/* 217/*
399 * Add CHANLIST TLV of the form 218 * Add CHANLIST TLV of the form
400 * 219 *
@@ -420,13 +239,12 @@ static int lbs_scan_add_ssid_tlv(u8 *tlv,
420 * channel 13 00 0d 00 00 00 64 00 239 * channel 13 00 0d 00 00 00 64 00
421 * 240 *
422 */ 241 */
423static int lbs_scan_add_chanlist_tlv(u8 *tlv, 242static int lbs_scan_add_chanlist_tlv(uint8_t *tlv,
424 struct chanscanparamset *chan_list, 243 struct chanscanparamset *chan_list,
425 int chan_count) 244 int chan_count)
426{ 245{
427 size_t size = sizeof(struct chanscanparamset) * chan_count; 246 size_t size = sizeof(struct chanscanparamset) *chan_count;
428 struct mrvlietypes_chanlistparamset *chan_tlv = 247 struct mrvlietypes_chanlistparamset *chan_tlv = (void *)tlv;
429 (struct mrvlietypes_chanlistparamset *) tlv;
430 248
431 chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST); 249 chan_tlv->header.type = cpu_to_le16(TLV_TYPE_CHANLIST);
432 memcpy(chan_tlv->chanscanparam, chan_list, size); 250 memcpy(chan_tlv->chanscanparam, chan_list, size);
@@ -434,7 +252,6 @@ static int lbs_scan_add_chanlist_tlv(u8 *tlv,
434 return sizeof(chan_tlv->header) + size; 252 return sizeof(chan_tlv->header) + size;
435} 253}
436 254
437
438/* 255/*
439 * Add RATES TLV of the form 256 * Add RATES TLV of the form
440 * 257 *
@@ -445,11 +262,10 @@ static int lbs_scan_add_chanlist_tlv(u8 *tlv,
445 * The rates are in lbs_bg_rates[], but for the 802.11b 262 * The rates are in lbs_bg_rates[], but for the 802.11b
446 * rates the high bit isn't set. 263 * rates the high bit isn't set.
447 */ 264 */
448static int lbs_scan_add_rates_tlv(u8 *tlv) 265static int lbs_scan_add_rates_tlv(uint8_t *tlv)
449{ 266{
450 int i; 267 int i;
451 struct mrvlietypes_ratesparamset *rate_tlv = 268 struct mrvlietypes_ratesparamset *rate_tlv = (void *)tlv;
452 (struct mrvlietypes_ratesparamset *) tlv;
453 269
454 rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES); 270 rate_tlv->header.type = cpu_to_le16(TLV_TYPE_RATES);
455 tlv += sizeof(rate_tlv->header); 271 tlv += sizeof(rate_tlv->header);
@@ -470,82 +286,74 @@ static int lbs_scan_add_rates_tlv(u8 *tlv)
470 return sizeof(rate_tlv->header) + i; 286 return sizeof(rate_tlv->header) + i;
471} 287}
472 288
473
474/* 289/*
475 * Generate the CMD_802_11_SCAN command with the proper tlv 290 * Generate the CMD_802_11_SCAN command with the proper tlv
476 * for a bunch of channels. 291 * for a bunch of channels.
477 */ 292 */
478static int lbs_do_scan(struct lbs_private *priv, 293static int lbs_do_scan(struct lbs_private *priv, uint8_t bsstype,
479 u8 bsstype, 294 struct chanscanparamset *chan_list, int chan_count)
480 struct chanscanparamset *chan_list,
481 int chan_count,
482 const struct lbs_ioctl_user_scan_cfg *user_cfg)
483{ 295{
484 int ret = -ENOMEM; 296 int ret = -ENOMEM;
485 struct lbs_scan_cmd_config *scan_cmd; 297 struct cmd_ds_802_11_scan *scan_cmd;
486 u8 *tlv; /* pointer into our current, growing TLV storage area */ 298 uint8_t *tlv; /* pointer into our current, growing TLV storage area */
487 299
488 lbs_deb_enter_args(LBS_DEB_SCAN, "bsstype %d, chanlist[].chan %d, " 300 lbs_deb_enter_args(LBS_DEB_SCAN, "bsstype %d, chanlist[].chan %d, chan_count %d",
489 "chan_count %d", 301 bsstype, chan_list[0].channumber, chan_count);
490 bsstype, chan_list[0].channumber, chan_count);
491 302
492 /* create the fixed part for scan command */ 303 /* create the fixed part for scan command */
493 scan_cmd = kzalloc(MAX_SCAN_CFG_ALLOC, GFP_KERNEL); 304 scan_cmd = kzalloc(MAX_SCAN_CFG_ALLOC, GFP_KERNEL);
494 if (scan_cmd == NULL) 305 if (scan_cmd == NULL)
495 goto out; 306 goto out;
307
496 tlv = scan_cmd->tlvbuffer; 308 tlv = scan_cmd->tlvbuffer;
497 if (user_cfg) 309 /* TODO: do we need to scan for a specific BSSID?
498 memcpy(scan_cmd->bssid, user_cfg->bssid, ETH_ALEN); 310 memcpy(scan_cmd->bssid, priv->scan_bssid, ETH_ALEN); */
499 scan_cmd->bsstype = bsstype; 311 scan_cmd->bsstype = bsstype;
500 312
501 /* add TLVs */ 313 /* add TLVs */
502 if (user_cfg && user_cfg->ssid_len) 314 if (priv->scan_ssid_len)
503 tlv += lbs_scan_add_ssid_tlv(tlv, user_cfg); 315 tlv += lbs_scan_add_ssid_tlv(priv, tlv);
504 if (chan_list && chan_count) 316 if (chan_list && chan_count)
505 tlv += lbs_scan_add_chanlist_tlv(tlv, chan_list, chan_count); 317 tlv += lbs_scan_add_chanlist_tlv(tlv, chan_list, chan_count);
506 tlv += lbs_scan_add_rates_tlv(tlv); 318 tlv += lbs_scan_add_rates_tlv(tlv);
507 319
508 /* This is the final data we are about to send */ 320 /* This is the final data we are about to send */
509 scan_cmd->tlvbufferlen = tlv - scan_cmd->tlvbuffer; 321 scan_cmd->hdr.size = cpu_to_le16(tlv - (uint8_t *)scan_cmd);
510 lbs_deb_hex(LBS_DEB_SCAN, "SCAN_CMD", (void *)scan_cmd, 1+6); 322 lbs_deb_hex(LBS_DEB_SCAN, "SCAN_CMD", (void *)scan_cmd,
323 sizeof(*scan_cmd));
511 lbs_deb_hex(LBS_DEB_SCAN, "SCAN_TLV", scan_cmd->tlvbuffer, 324 lbs_deb_hex(LBS_DEB_SCAN, "SCAN_TLV", scan_cmd->tlvbuffer,
512 scan_cmd->tlvbufferlen); 325 tlv - scan_cmd->tlvbuffer);
326
327 ret = __lbs_cmd(priv, CMD_802_11_SCAN, &scan_cmd->hdr,
328 le16_to_cpu(scan_cmd->hdr.size),
329 lbs_ret_80211_scan, 0);
513 330
514 ret = lbs_prepare_and_send_command(priv, CMD_802_11_SCAN, 0,
515 CMD_OPTION_WAITFORRSP, 0, scan_cmd);
516out: 331out:
517 kfree(scan_cmd); 332 kfree(scan_cmd);
518 lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret); 333 lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
519 return ret; 334 return ret;
520} 335}
521 336
522
523/** 337/**
524 * @brief Internal function used to start a scan based on an input config 338 * @brief Internal function used to start a scan based on an input config
525 * 339 *
526 * Also used from debugfs
527 *
528 * Use the input user scan configuration information when provided in 340 * Use the input user scan configuration information when provided in
529 * order to send the appropriate scan commands to firmware to populate or 341 * order to send the appropriate scan commands to firmware to populate or
530 * update the internal driver scan table 342 * update the internal driver scan table
531 * 343 *
532 * @param priv A pointer to struct lbs_private structure 344 * @param priv A pointer to struct lbs_private structure
533 * @param puserscanin Pointer to the input configuration for the requested 345 * @param full_scan Do a full-scan (blocking)
534 * scan.
535 * 346 *
536 * @return 0 or < 0 if error 347 * @return 0 or < 0 if error
537 */ 348 */
538int lbs_scan_networks(struct lbs_private *priv, 349int lbs_scan_networks(struct lbs_private *priv, int full_scan)
539 const struct lbs_ioctl_user_scan_cfg *user_cfg,
540 int full_scan)
541{ 350{
542 int ret = -ENOMEM; 351 int ret = -ENOMEM;
543 struct chanscanparamset *chan_list; 352 struct chanscanparamset *chan_list;
544 struct chanscanparamset *curr_chans; 353 struct chanscanparamset *curr_chans;
545 int chan_count; 354 int chan_count;
546 u8 bsstype = CMD_BSS_TYPE_ANY; 355 uint8_t bsstype = CMD_BSS_TYPE_ANY;
547 int numchannels = MRVDRV_CHANNELS_PER_SCAN_CMD; 356 int numchannels = MRVDRV_CHANNELS_PER_SCAN_CMD;
548 int filteredscan = 0;
549 union iwreq_data wrqu; 357 union iwreq_data wrqu;
550#ifdef CONFIG_LIBERTAS_DEBUG 358#ifdef CONFIG_LIBERTAS_DEBUG
551 struct bss_descriptor *iter; 359 struct bss_descriptor *iter;
@@ -553,8 +361,7 @@ int lbs_scan_networks(struct lbs_private *priv,
553 DECLARE_MAC_BUF(mac); 361 DECLARE_MAC_BUF(mac);
554#endif 362#endif
555 363
556 lbs_deb_enter_args(LBS_DEB_SCAN, "full_scan %d", 364 lbs_deb_enter_args(LBS_DEB_SCAN, "full_scan %d", full_scan);
557 full_scan);
558 365
559 /* Cancel any partial outstanding partial scans if this scan 366 /* Cancel any partial outstanding partial scans if this scan
560 * is a full scan. 367 * is a full scan.
@@ -562,30 +369,27 @@ int lbs_scan_networks(struct lbs_private *priv,
562 if (full_scan && delayed_work_pending(&priv->scan_work)) 369 if (full_scan && delayed_work_pending(&priv->scan_work))
563 cancel_delayed_work(&priv->scan_work); 370 cancel_delayed_work(&priv->scan_work);
564 371
565 /* Determine same scan parameters */ 372 /* User-specified bsstype or channel list
373 TODO: this can be implemented if some user-space application
374 need the feature. Formerly, it was accessible from debugfs,
375 but then nowhere used.
566 if (user_cfg) { 376 if (user_cfg) {
567 if (user_cfg->bsstype) 377 if (user_cfg->bsstype)
568 bsstype = user_cfg->bsstype; 378 bsstype = user_cfg->bsstype;
569 if (compare_ether_addr(user_cfg->bssid, &zeromac[0]) != 0) { 379 } */
570 numchannels = MRVDRV_MAX_CHANNELS_PER_SCAN; 380
571 filteredscan = 1; 381 lbs_deb_scan("numchannels %d, bsstype %d\n", numchannels, bsstype);
572 }
573 }
574 lbs_deb_scan("numchannels %d, bsstype %d, "
575 "filteredscan %d\n",
576 numchannels, bsstype, filteredscan);
577 382
578 /* Create list of channels to scan */ 383 /* Create list of channels to scan */
579 chan_list = kzalloc(sizeof(struct chanscanparamset) * 384 chan_list = kzalloc(sizeof(struct chanscanparamset) *
580 LBS_IOCTL_USER_SCAN_CHAN_MAX, GFP_KERNEL); 385 LBS_IOCTL_USER_SCAN_CHAN_MAX, GFP_KERNEL);
581 if (!chan_list) { 386 if (!chan_list) {
582 lbs_pr_alert("SCAN: chan_list empty\n"); 387 lbs_pr_alert("SCAN: chan_list empty\n");
583 goto out; 388 goto out;
584 } 389 }
585 390
586 /* We want to scan all channels */ 391 /* We want to scan all channels */
587 chan_count = lbs_scan_create_channel_list(priv, chan_list, 392 chan_count = lbs_scan_create_channel_list(priv, chan_list);
588 filteredscan);
589 393
590 netif_stop_queue(priv->dev); 394 netif_stop_queue(priv->dev);
591 netif_carrier_off(priv->dev); 395 netif_carrier_off(priv->dev);
@@ -595,13 +399,13 @@ int lbs_scan_networks(struct lbs_private *priv,
595 } 399 }
596 400
597 /* Prepare to continue an interrupted scan */ 401 /* Prepare to continue an interrupted scan */
598 lbs_deb_scan("chan_count %d, last_scanned_channel %d\n", 402 lbs_deb_scan("chan_count %d, scan_channel %d\n",
599 chan_count, priv->last_scanned_channel); 403 chan_count, priv->scan_channel);
600 curr_chans = chan_list; 404 curr_chans = chan_list;
601 /* advance channel list by already-scanned-channels */ 405 /* advance channel list by already-scanned-channels */
602 if (priv->last_scanned_channel > 0) { 406 if (priv->scan_channel > 0) {
603 curr_chans += priv->last_scanned_channel; 407 curr_chans += priv->scan_channel;
604 chan_count -= priv->last_scanned_channel; 408 chan_count -= priv->scan_channel;
605 } 409 }
606 410
607 /* Send scan command(s) 411 /* Send scan command(s)
@@ -612,9 +416,9 @@ int lbs_scan_networks(struct lbs_private *priv,
612 while (chan_count) { 416 while (chan_count) {
613 int to_scan = min(numchannels, chan_count); 417 int to_scan = min(numchannels, chan_count);
614 lbs_deb_scan("scanning %d of %d channels\n", 418 lbs_deb_scan("scanning %d of %d channels\n",
615 to_scan, chan_count); 419 to_scan, chan_count);
616 ret = lbs_do_scan(priv, bsstype, curr_chans, 420 ret = lbs_do_scan(priv, bsstype, curr_chans,
617 to_scan, user_cfg); 421 to_scan);
618 if (ret) { 422 if (ret) {
619 lbs_pr_err("SCAN_CMD failed\n"); 423 lbs_pr_err("SCAN_CMD failed\n");
620 goto out2; 424 goto out2;
@@ -623,17 +427,16 @@ int lbs_scan_networks(struct lbs_private *priv,
623 chan_count -= to_scan; 427 chan_count -= to_scan;
624 428
625 /* somehow schedule the next part of the scan */ 429 /* somehow schedule the next part of the scan */
626 if (chan_count && 430 if (chan_count && !full_scan &&
627 !full_scan &&
628 !priv->surpriseremoved) { 431 !priv->surpriseremoved) {
629 /* -1 marks just that we're currently scanning */ 432 /* -1 marks just that we're currently scanning */
630 if (priv->last_scanned_channel < 0) 433 if (priv->scan_channel < 0)
631 priv->last_scanned_channel = to_scan; 434 priv->scan_channel = to_scan;
632 else 435 else
633 priv->last_scanned_channel += to_scan; 436 priv->scan_channel += to_scan;
634 cancel_delayed_work(&priv->scan_work); 437 cancel_delayed_work(&priv->scan_work);
635 queue_delayed_work(priv->work_thread, &priv->scan_work, 438 queue_delayed_work(priv->work_thread, &priv->scan_work,
636 msecs_to_jiffies(300)); 439 msecs_to_jiffies(300));
637 /* skip over GIWSCAN event */ 440 /* skip over GIWSCAN event */
638 goto out; 441 goto out;
639 } 442 }
@@ -648,13 +451,13 @@ int lbs_scan_networks(struct lbs_private *priv,
648 lbs_deb_scan("scan table:\n"); 451 lbs_deb_scan("scan table:\n");
649 list_for_each_entry(iter, &priv->network_list, list) 452 list_for_each_entry(iter, &priv->network_list, list)
650 lbs_deb_scan("%02d: BSSID %s, RSSI %d, SSID '%s'\n", 453 lbs_deb_scan("%02d: BSSID %s, RSSI %d, SSID '%s'\n",
651 i++, print_mac(mac, iter->bssid), (s32) iter->rssi, 454 i++, print_mac(mac, iter->bssid), iter->rssi,
652 escape_essid(iter->ssid, iter->ssid_len)); 455 escape_essid(iter->ssid, iter->ssid_len));
653 mutex_unlock(&priv->lock); 456 mutex_unlock(&priv->lock);
654#endif 457#endif
655 458
656out2: 459out2:
657 priv->last_scanned_channel = 0; 460 priv->scan_channel = 0;
658 461
659out: 462out:
660 if (priv->connect_status == LBS_CONNECTED) { 463 if (priv->connect_status == LBS_CONNECTED) {
@@ -673,7 +476,15 @@ out:
673 return ret; 476 return ret;
674} 477}
675 478
479void lbs_scan_worker(struct work_struct *work)
480{
481 struct lbs_private *priv =
482 container_of(work, struct lbs_private, scan_work.work);
676 483
484 lbs_deb_enter(LBS_DEB_SCAN);
485 lbs_scan_networks(priv, 0);
486 lbs_deb_leave(LBS_DEB_SCAN);
487}
677 488
678 489
679/*********************************************************************/ 490/*********************************************************************/
@@ -694,7 +505,7 @@ out:
694 * @return 0 or -1 505 * @return 0 or -1
695 */ 506 */
696static int lbs_process_bss(struct bss_descriptor *bss, 507static int lbs_process_bss(struct bss_descriptor *bss,
697 u8 ** pbeaconinfo, int *bytesleft) 508 uint8_t **pbeaconinfo, int *bytesleft)
698{ 509{
699 struct ieeetypes_fhparamset *pFH; 510 struct ieeetypes_fhparamset *pFH;
700 struct ieeetypes_dsparamset *pDS; 511 struct ieeetypes_dsparamset *pDS;
@@ -702,9 +513,9 @@ static int lbs_process_bss(struct bss_descriptor *bss,
702 struct ieeetypes_ibssparamset *pibss; 513 struct ieeetypes_ibssparamset *pibss;
703 DECLARE_MAC_BUF(mac); 514 DECLARE_MAC_BUF(mac);
704 struct ieeetypes_countryinfoset *pcountryinfo; 515 struct ieeetypes_countryinfoset *pcountryinfo;
705 u8 *pos, *end, *p; 516 uint8_t *pos, *end, *p;
706 u8 n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0; 517 uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0;
707 u16 beaconsize = 0; 518 uint16_t beaconsize = 0;
708 int ret; 519 int ret;
709 520
710 lbs_deb_enter(LBS_DEB_SCAN); 521 lbs_deb_enter(LBS_DEB_SCAN);
@@ -776,12 +587,11 @@ static int lbs_process_bss(struct bss_descriptor *bss,
776 587
777 /* process variable IE */ 588 /* process variable IE */
778 while (pos <= end - 2) { 589 while (pos <= end - 2) {
779 struct ieee80211_info_element * elem = 590 struct ieee80211_info_element * elem = (void *)pos;
780 (struct ieee80211_info_element *) pos;
781 591
782 if (pos + elem->len > end) { 592 if (pos + elem->len > end) {
783 lbs_deb_scan("process_bss: error in processing IE, " 593 lbs_deb_scan("process_bss: error in processing IE, "
784 "bytes left < IE length\n"); 594 "bytes left < IE length\n");
785 break; 595 break;
786 } 596 }
787 597
@@ -795,7 +605,7 @@ static int lbs_process_bss(struct bss_descriptor *bss,
795 break; 605 break;
796 606
797 case MFIE_TYPE_RATES: 607 case MFIE_TYPE_RATES:
798 n_basic_rates = min_t(u8, MAX_RATES, elem->len); 608 n_basic_rates = min_t(uint8_t, MAX_RATES, elem->len);
799 memcpy(bss->rates, elem->data, n_basic_rates); 609 memcpy(bss->rates, elem->data, n_basic_rates);
800 got_basic_rates = 1; 610 got_basic_rates = 1;
801 lbs_deb_scan("got RATES IE\n"); 611 lbs_deb_scan("got RATES IE\n");
@@ -836,19 +646,16 @@ static int lbs_process_bss(struct bss_descriptor *bss,
836 lbs_deb_scan("got COUNTRY IE\n"); 646 lbs_deb_scan("got COUNTRY IE\n");
837 if (pcountryinfo->len < sizeof(pcountryinfo->countrycode) 647 if (pcountryinfo->len < sizeof(pcountryinfo->countrycode)
838 || pcountryinfo->len > 254) { 648 || pcountryinfo->len > 254) {
839 lbs_deb_scan("process_bss: 11D- Err " 649 lbs_deb_scan("process_bss: 11D- Err CountryInfo len %d, min %zd, max 254\n",
840 "CountryInfo len %d, min %zd, max 254\n", 650 pcountryinfo->len, sizeof(pcountryinfo->countrycode));
841 pcountryinfo->len,
842 sizeof(pcountryinfo->countrycode));
843 ret = -1; 651 ret = -1;
844 goto done; 652 goto done;
845 } 653 }
846 654
847 memcpy(&bss->countryinfo, 655 memcpy(&bss->countryinfo, pcountryinfo, pcountryinfo->len + 2);
848 pcountryinfo, pcountryinfo->len + 2);
849 lbs_deb_hex(LBS_DEB_SCAN, "process_bss: 11d countryinfo", 656 lbs_deb_hex(LBS_DEB_SCAN, "process_bss: 11d countryinfo",
850 (u8 *) pcountryinfo, 657 (uint8_t *) pcountryinfo,
851 (u32) (pcountryinfo->len + 2)); 658 (int) (pcountryinfo->len + 2));
852 break; 659 break;
853 660
854 case MFIE_TYPE_RATES_EX: 661 case MFIE_TYPE_RATES_EX:
@@ -872,26 +679,19 @@ static int lbs_process_bss(struct bss_descriptor *bss,
872 679
873 case MFIE_TYPE_GENERIC: 680 case MFIE_TYPE_GENERIC:
874 if (elem->len >= 4 && 681 if (elem->len >= 4 &&
875 elem->data[0] == 0x00 && 682 elem->data[0] == 0x00 && elem->data[1] == 0x50 &&
876 elem->data[1] == 0x50 && 683 elem->data[2] == 0xf2 && elem->data[3] == 0x01) {
877 elem->data[2] == 0xf2 && 684 bss->wpa_ie_len = min(elem->len + 2, MAX_WPA_IE_LEN);
878 elem->data[3] == 0x01) {
879 bss->wpa_ie_len = min(elem->len + 2,
880 MAX_WPA_IE_LEN);
881 memcpy(bss->wpa_ie, elem, bss->wpa_ie_len); 685 memcpy(bss->wpa_ie, elem, bss->wpa_ie_len);
882 lbs_deb_scan("got WPA IE\n"); 686 lbs_deb_scan("got WPA IE\n");
883 lbs_deb_hex(LBS_DEB_SCAN, "WPA IE", bss->wpa_ie, 687 lbs_deb_hex(LBS_DEB_SCAN, "WPA IE", bss->wpa_ie, elem->len);
884 elem->len);
885 } else if (elem->len >= MARVELL_MESH_IE_LENGTH && 688 } else if (elem->len >= MARVELL_MESH_IE_LENGTH &&
886 elem->data[0] == 0x00 && 689 elem->data[0] == 0x00 && elem->data[1] == 0x50 &&
887 elem->data[1] == 0x50 && 690 elem->data[2] == 0x43 && elem->data[3] == 0x04) {
888 elem->data[2] == 0x43 &&
889 elem->data[3] == 0x04) {
890 lbs_deb_scan("got mesh IE\n"); 691 lbs_deb_scan("got mesh IE\n");
891 bss->mesh = 1; 692 bss->mesh = 1;
892 } else { 693 } else {
893 lbs_deb_scan("got generiec IE: " 694 lbs_deb_scan("got generic IE: %02x:%02x:%02x:%02x, len %d\n",
894 "%02x:%02x:%02x:%02x, len %d\n",
895 elem->data[0], elem->data[1], 695 elem->data[0], elem->data[1],
896 elem->data[2], elem->data[3], 696 elem->data[2], elem->data[3],
897 elem->len); 697 elem->len);
@@ -903,12 +703,12 @@ static int lbs_process_bss(struct bss_descriptor *bss,
903 bss->rsn_ie_len = min(elem->len + 2, MAX_WPA_IE_LEN); 703 bss->rsn_ie_len = min(elem->len + 2, MAX_WPA_IE_LEN);
904 memcpy(bss->rsn_ie, elem, bss->rsn_ie_len); 704 memcpy(bss->rsn_ie, elem, bss->rsn_ie_len);
905 lbs_deb_hex(LBS_DEB_SCAN, "process_bss: RSN_IE", 705 lbs_deb_hex(LBS_DEB_SCAN, "process_bss: RSN_IE",
906 bss->rsn_ie, elem->len); 706 bss->rsn_ie, elem->len);
907 break; 707 break;
908 708
909 default: 709 default:
910 lbs_deb_scan("got IE 0x%04x, len %d\n", 710 lbs_deb_scan("got IE 0x%04x, len %d\n",
911 elem->id, elem->len); 711 elem->id, elem->len);
912 break; 712 break;
913 } 713 }
914 714
@@ -927,213 +727,6 @@ done:
927} 727}
928 728
929/** 729/**
930 * @brief This function finds a specific compatible BSSID in the scan list
931 *
932 * Used in association code
933 *
934 * @param priv A pointer to struct lbs_private
935 * @param bssid BSSID to find in the scan list
936 * @param mode Network mode: Infrastructure or IBSS
937 *
938 * @return index in BSSID list, or error return code (< 0)
939 */
940struct bss_descriptor *lbs_find_bssid_in_list(struct lbs_private *priv,
941 u8 * bssid, u8 mode)
942{
943 struct bss_descriptor * iter_bss;
944 struct bss_descriptor * found_bss = NULL;
945
946 lbs_deb_enter(LBS_DEB_SCAN);
947
948 if (!bssid)
949 goto out;
950
951 lbs_deb_hex(LBS_DEB_SCAN, "looking for",
952 bssid, ETH_ALEN);
953
954 /* Look through the scan table for a compatible match. The loop will
955 * continue past a matched bssid that is not compatible in case there
956 * is an AP with multiple SSIDs assigned to the same BSSID
957 */
958 mutex_lock(&priv->lock);
959 list_for_each_entry (iter_bss, &priv->network_list, list) {
960 if (compare_ether_addr(iter_bss->bssid, bssid))
961 continue; /* bssid doesn't match */
962 switch (mode) {
963 case IW_MODE_INFRA:
964 case IW_MODE_ADHOC:
965 if (!is_network_compatible(priv, iter_bss, mode))
966 break;
967 found_bss = iter_bss;
968 break;
969 default:
970 found_bss = iter_bss;
971 break;
972 }
973 }
974 mutex_unlock(&priv->lock);
975
976out:
977 lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss);
978 return found_bss;
979}
980
981/**
982 * @brief This function finds ssid in ssid list.
983 *
984 * Used in association code
985 *
986 * @param priv A pointer to struct lbs_private
987 * @param ssid SSID to find in the list
988 * @param bssid BSSID to qualify the SSID selection (if provided)
989 * @param mode Network mode: Infrastructure or IBSS
990 *
991 * @return index in BSSID list
992 */
993struct bss_descriptor *lbs_find_ssid_in_list(struct lbs_private *priv,
994 u8 *ssid, u8 ssid_len, u8 * bssid, u8 mode,
995 int channel)
996{
997 u8 bestrssi = 0;
998 struct bss_descriptor * iter_bss = NULL;
999 struct bss_descriptor * found_bss = NULL;
1000 struct bss_descriptor * tmp_oldest = NULL;
1001
1002 lbs_deb_enter(LBS_DEB_SCAN);
1003
1004 mutex_lock(&priv->lock);
1005
1006 list_for_each_entry (iter_bss, &priv->network_list, list) {
1007 if ( !tmp_oldest
1008 || (iter_bss->last_scanned < tmp_oldest->last_scanned))
1009 tmp_oldest = iter_bss;
1010
1011 if (lbs_ssid_cmp(iter_bss->ssid, iter_bss->ssid_len,
1012 ssid, ssid_len) != 0)
1013 continue; /* ssid doesn't match */
1014 if (bssid && compare_ether_addr(iter_bss->bssid, bssid) != 0)
1015 continue; /* bssid doesn't match */
1016 if ((channel > 0) && (iter_bss->channel != channel))
1017 continue; /* channel doesn't match */
1018
1019 switch (mode) {
1020 case IW_MODE_INFRA:
1021 case IW_MODE_ADHOC:
1022 if (!is_network_compatible(priv, iter_bss, mode))
1023 break;
1024
1025 if (bssid) {
1026 /* Found requested BSSID */
1027 found_bss = iter_bss;
1028 goto out;
1029 }
1030
1031 if (SCAN_RSSI(iter_bss->rssi) > bestrssi) {
1032 bestrssi = SCAN_RSSI(iter_bss->rssi);
1033 found_bss = iter_bss;
1034 }
1035 break;
1036 case IW_MODE_AUTO:
1037 default:
1038 if (SCAN_RSSI(iter_bss->rssi) > bestrssi) {
1039 bestrssi = SCAN_RSSI(iter_bss->rssi);
1040 found_bss = iter_bss;
1041 }
1042 break;
1043 }
1044 }
1045
1046out:
1047 mutex_unlock(&priv->lock);
1048 lbs_deb_leave_args(LBS_DEB_SCAN, "found_bss %p", found_bss);
1049 return found_bss;
1050}
1051
1052/**
1053 * @brief This function finds the best SSID in the Scan List
1054 *
1055 * Search the scan table for the best SSID that also matches the current
1056 * adapter network preference (infrastructure or adhoc)
1057 *
1058 * @param priv A pointer to struct lbs_private
1059 *
1060 * @return index in BSSID list
1061 */
1062static struct bss_descriptor *lbs_find_best_ssid_in_list(
1063 struct lbs_private *priv,
1064 u8 mode)
1065{
1066 u8 bestrssi = 0;
1067 struct bss_descriptor * iter_bss;
1068 struct bss_descriptor * best_bss = NULL;
1069
1070 lbs_deb_enter(LBS_DEB_SCAN);
1071
1072 mutex_lock(&priv->lock);
1073
1074 list_for_each_entry (iter_bss, &priv->network_list, list) {
1075 switch (mode) {
1076 case IW_MODE_INFRA:
1077 case IW_MODE_ADHOC:
1078 if (!is_network_compatible(priv, iter_bss, mode))
1079 break;
1080 if (SCAN_RSSI(iter_bss->rssi) <= bestrssi)
1081 break;
1082 bestrssi = SCAN_RSSI(iter_bss->rssi);
1083 best_bss = iter_bss;
1084 break;
1085 case IW_MODE_AUTO:
1086 default:
1087 if (SCAN_RSSI(iter_bss->rssi) <= bestrssi)
1088 break;
1089 bestrssi = SCAN_RSSI(iter_bss->rssi);
1090 best_bss = iter_bss;
1091 break;
1092 }
1093 }
1094
1095 mutex_unlock(&priv->lock);
1096 lbs_deb_leave_args(LBS_DEB_SCAN, "best_bss %p", best_bss);
1097 return best_bss;
1098}
1099
1100/**
1101 * @brief Find the AP with specific ssid in the scan list
1102 *
1103 * Used from association worker.
1104 *
1105 * @param priv A pointer to struct lbs_private structure
1106 * @param pSSID A pointer to AP's ssid
1107 *
1108 * @return 0--success, otherwise--fail
1109 */
1110int lbs_find_best_network_ssid(struct lbs_private *priv,
1111 u8 *out_ssid, u8 *out_ssid_len, u8 preferred_mode, u8 *out_mode)
1112{
1113 int ret = -1;
1114 struct bss_descriptor * found;
1115
1116 lbs_deb_enter(LBS_DEB_SCAN);
1117
1118 lbs_scan_networks(priv, NULL, 1);
1119 if (priv->surpriseremoved)
1120 goto out;
1121
1122 found = lbs_find_best_ssid_in_list(priv, preferred_mode);
1123 if (found && (found->ssid_len > 0)) {
1124 memcpy(out_ssid, &found->ssid, IW_ESSID_MAX_SIZE);
1125 *out_ssid_len = found->ssid_len;
1126 *out_mode = found->mode;
1127 ret = 0;
1128 }
1129
1130out:
1131 lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
1132 return ret;
1133}
1134
1135
1136/**
1137 * @brief Send a scan command for all available channels filtered on a spec 730 * @brief Send a scan command for all available channels filtered on a spec
1138 * 731 *
1139 * Used in association code and from debugfs 732 * Used in association code and from debugfs
@@ -1141,29 +734,24 @@ out:
1141 * @param priv A pointer to struct lbs_private structure 734 * @param priv A pointer to struct lbs_private structure
1142 * @param ssid A pointer to the SSID to scan for 735 * @param ssid A pointer to the SSID to scan for
1143 * @param ssid_len Length of the SSID 736 * @param ssid_len Length of the SSID
1144 * @param clear_ssid Should existing scan results with this SSID
1145 * be cleared?
1146 * 737 *
1147 * @return 0-success, otherwise fail 738 * @return 0-success, otherwise fail
1148 */ 739 */
1149int lbs_send_specific_ssid_scan(struct lbs_private *priv, 740int lbs_send_specific_ssid_scan(struct lbs_private *priv, uint8_t *ssid,
1150 u8 *ssid, u8 ssid_len, u8 clear_ssid) 741 uint8_t ssid_len)
1151{ 742{
1152 struct lbs_ioctl_user_scan_cfg scancfg;
1153 int ret = 0; 743 int ret = 0;
1154 744
1155 lbs_deb_enter_args(LBS_DEB_SCAN, "SSID '%s', clear %d", 745 lbs_deb_enter_args(LBS_DEB_SCAN, "SSID '%s'\n",
1156 escape_essid(ssid, ssid_len), clear_ssid); 746 escape_essid(ssid, ssid_len));
1157 747
1158 if (!ssid_len) 748 if (!ssid_len)
1159 goto out; 749 goto out;
1160 750
1161 memset(&scancfg, 0x00, sizeof(scancfg)); 751 memcpy(priv->scan_ssid, ssid, ssid_len);
1162 memcpy(scancfg.ssid, ssid, ssid_len); 752 priv->scan_ssid_len = ssid_len;
1163 scancfg.ssid_len = ssid_len;
1164 scancfg.clear_ssid = clear_ssid;
1165 753
1166 lbs_scan_networks(priv, &scancfg, 1); 754 lbs_scan_networks(priv, 1);
1167 if (priv->surpriseremoved) { 755 if (priv->surpriseremoved) {
1168 ret = -1; 756 ret = -1;
1169 goto out; 757 goto out;
@@ -1187,17 +775,17 @@ out:
1187#define MAX_CUSTOM_LEN 64 775#define MAX_CUSTOM_LEN 64
1188 776
1189static inline char *lbs_translate_scan(struct lbs_private *priv, 777static inline char *lbs_translate_scan(struct lbs_private *priv,
1190 char *start, char *stop, 778 char *start, char *stop,
1191 struct bss_descriptor *bss) 779 struct bss_descriptor *bss)
1192{ 780{
1193 struct chan_freq_power *cfp; 781 struct chan_freq_power *cfp;
1194 char *current_val; /* For rates */ 782 char *current_val; /* For rates */
1195 struct iw_event iwe; /* Temporary buffer */ 783 struct iw_event iwe; /* Temporary buffer */
1196 int j; 784 int j;
1197#define PERFECT_RSSI ((u8)50) 785#define PERFECT_RSSI ((uint8_t)50)
1198#define WORST_RSSI ((u8)0) 786#define WORST_RSSI ((uint8_t)0)
1199#define RSSI_DIFF ((u8)(PERFECT_RSSI - WORST_RSSI)) 787#define RSSI_DIFF ((uint8_t)(PERFECT_RSSI - WORST_RSSI))
1200 u8 rssi; 788 uint8_t rssi;
1201 789
1202 lbs_deb_enter(LBS_DEB_SCAN); 790 lbs_deb_enter(LBS_DEB_SCAN);
1203 791
@@ -1217,7 +805,7 @@ static inline char *lbs_translate_scan(struct lbs_private *priv,
1217 /* SSID */ 805 /* SSID */
1218 iwe.cmd = SIOCGIWESSID; 806 iwe.cmd = SIOCGIWESSID;
1219 iwe.u.data.flags = 1; 807 iwe.u.data.flags = 1;
1220 iwe.u.data.length = min((u32) bss->ssid_len, (u32) IW_ESSID_MAX_SIZE); 808 iwe.u.data.length = min((uint32_t) bss->ssid_len, (uint32_t) IW_ESSID_MAX_SIZE);
1221 start = iwe_stream_add_point(start, stop, &iwe, bss->ssid); 809 start = iwe_stream_add_point(start, stop, &iwe, bss->ssid);
1222 810
1223 /* Mode */ 811 /* Mode */
@@ -1238,28 +826,26 @@ static inline char *lbs_translate_scan(struct lbs_private *priv,
1238 826
1239 rssi = iwe.u.qual.level - MRVDRV_NF_DEFAULT_SCAN_VALUE; 827 rssi = iwe.u.qual.level - MRVDRV_NF_DEFAULT_SCAN_VALUE;
1240 iwe.u.qual.qual = 828 iwe.u.qual.qual =
1241 (100 * RSSI_DIFF * RSSI_DIFF - (PERFECT_RSSI - rssi) * 829 (100 * RSSI_DIFF * RSSI_DIFF - (PERFECT_RSSI - rssi) *
1242 (15 * (RSSI_DIFF) + 62 * (PERFECT_RSSI - rssi))) / 830 (15 * (RSSI_DIFF) + 62 * (PERFECT_RSSI - rssi))) /
1243 (RSSI_DIFF * RSSI_DIFF); 831 (RSSI_DIFF * RSSI_DIFF);
1244 if (iwe.u.qual.qual > 100) 832 if (iwe.u.qual.qual > 100)
1245 iwe.u.qual.qual = 100; 833 iwe.u.qual.qual = 100;
1246 834
1247 if (priv->NF[TYPE_BEACON][TYPE_NOAVG] == 0) { 835 if (priv->NF[TYPE_BEACON][TYPE_NOAVG] == 0) {
1248 iwe.u.qual.noise = MRVDRV_NF_DEFAULT_SCAN_VALUE; 836 iwe.u.qual.noise = MRVDRV_NF_DEFAULT_SCAN_VALUE;
1249 } else { 837 } else {
1250 iwe.u.qual.noise = 838 iwe.u.qual.noise = CAL_NF(priv->NF[TYPE_BEACON][TYPE_NOAVG]);
1251 CAL_NF(priv->NF[TYPE_BEACON][TYPE_NOAVG]);
1252 } 839 }
1253 840
1254 /* Locally created ad-hoc BSSs won't have beacons if this is the 841 /* Locally created ad-hoc BSSs won't have beacons if this is the
1255 * only station in the adhoc network; so get signal strength 842 * only station in the adhoc network; so get signal strength
1256 * from receive statistics. 843 * from receive statistics.
1257 */ 844 */
1258 if ((priv->mode == IW_MODE_ADHOC) 845 if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate
1259 && priv->adhoccreate
1260 && !lbs_ssid_cmp(priv->curbssparams.ssid, 846 && !lbs_ssid_cmp(priv->curbssparams.ssid,
1261 priv->curbssparams.ssid_len, 847 priv->curbssparams.ssid_len,
1262 bss->ssid, bss->ssid_len)) { 848 bss->ssid, bss->ssid_len)) {
1263 int snr, nf; 849 int snr, nf;
1264 snr = priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE; 850 snr = priv->SNR[TYPE_RXPD][TYPE_AVG] / AVG_SCALE;
1265 nf = priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE; 851 nf = priv->NF[TYPE_RXPD][TYPE_AVG] / AVG_SCALE;
@@ -1290,14 +876,13 @@ static inline char *lbs_translate_scan(struct lbs_private *priv,
1290 current_val = iwe_stream_add_value(start, current_val, 876 current_val = iwe_stream_add_value(start, current_val,
1291 stop, &iwe, IW_EV_PARAM_LEN); 877 stop, &iwe, IW_EV_PARAM_LEN);
1292 } 878 }
1293 if ((bss->mode == IW_MODE_ADHOC) 879 if ((bss->mode == IW_MODE_ADHOC) && priv->adhoccreate
1294 && !lbs_ssid_cmp(priv->curbssparams.ssid, 880 && !lbs_ssid_cmp(priv->curbssparams.ssid,
1295 priv->curbssparams.ssid_len, 881 priv->curbssparams.ssid_len,
1296 bss->ssid, bss->ssid_len) 882 bss->ssid, bss->ssid_len)) {
1297 && priv->adhoccreate) {
1298 iwe.u.bitrate.value = 22 * 500000; 883 iwe.u.bitrate.value = 22 * 500000;
1299 current_val = iwe_stream_add_value(start, current_val, 884 current_val = iwe_stream_add_value(start, current_val,
1300 stop, &iwe, IW_EV_PARAM_LEN); 885 stop, &iwe, IW_EV_PARAM_LEN);
1301 } 886 }
1302 /* Check if we added any event */ 887 /* Check if we added any event */
1303 if((current_val - start) > IW_EV_LCP_LEN) 888 if((current_val - start) > IW_EV_LCP_LEN)
@@ -1326,8 +911,7 @@ static inline char *lbs_translate_scan(struct lbs_private *priv,
1326 char *p = custom; 911 char *p = custom;
1327 912
1328 iwe.cmd = IWEVCUSTOM; 913 iwe.cmd = IWEVCUSTOM;
1329 p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), 914 p += snprintf(p, MAX_CUSTOM_LEN, "mesh-type: olpc");
1330 "mesh-type: olpc");
1331 iwe.u.data.length = p - custom; 915 iwe.u.data.length = p - custom;
1332 if (iwe.u.data.length) 916 if (iwe.u.data.length)
1333 start = iwe_stream_add_point(start, stop, &iwe, custom); 917 start = iwe_stream_add_point(start, stop, &iwe, custom);
@@ -1350,39 +934,49 @@ out:
1350 * @return 0 --success, otherwise fail 934 * @return 0 --success, otherwise fail
1351 */ 935 */
1352int lbs_set_scan(struct net_device *dev, struct iw_request_info *info, 936int lbs_set_scan(struct net_device *dev, struct iw_request_info *info,
1353 struct iw_param *wrqu, char *extra) 937 union iwreq_data *wrqu, char *extra)
1354{ 938{
1355 struct lbs_private *priv = dev->priv; 939 struct lbs_private *priv = dev->priv;
940 int ret = 0;
1356 941
1357 lbs_deb_enter(LBS_DEB_SCAN); 942 lbs_deb_enter(LBS_DEB_WEXT);
1358 943
1359 if (!netif_running(dev)) 944 if (!netif_running(dev)) {
1360 return -ENETDOWN; 945 ret = -ENETDOWN;
946 goto out;
947 }
1361 948
1362 /* mac80211 does this: 949 /* mac80211 does this:
1363 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); 950 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1364 if (sdata->type != IEEE80211_IF_TYPE_xxx) 951 if (sdata->type != IEEE80211_IF_TYPE_xxx) {
1365 return -EOPNOTSUPP; 952 ret = -EOPNOTSUPP;
953 goto out;
954 }
955 */
1366 956
1367 if (wrqu->data.length == sizeof(struct iw_scan_req) && 957 if (wrqu->data.length == sizeof(struct iw_scan_req) &&
1368 wrqu->data.flags & IW_SCAN_THIS_ESSID) { 958 wrqu->data.flags & IW_SCAN_THIS_ESSID) {
1369 req = (struct iw_scan_req *)extra; 959 struct iw_scan_req *req = (struct iw_scan_req *)extra;
1370 ssid = req->essid; 960 priv->scan_ssid_len = req->essid_len;
1371 ssid_len = req->essid_len; 961 memcpy(priv->scan_ssid, req->essid, priv->scan_ssid_len);
962 lbs_deb_wext("set_scan, essid '%s'\n",
963 escape_essid(priv->scan_ssid, priv->scan_ssid_len));
964 } else {
965 priv->scan_ssid_len = 0;
1372 } 966 }
1373 */
1374 967
1375 if (!delayed_work_pending(&priv->scan_work)) 968 if (!delayed_work_pending(&priv->scan_work))
1376 queue_delayed_work(priv->work_thread, &priv->scan_work, 969 queue_delayed_work(priv->work_thread, &priv->scan_work,
1377 msecs_to_jiffies(50)); 970 msecs_to_jiffies(50));
1378 /* set marker that currently a scan is taking place */ 971 /* set marker that currently a scan is taking place */
1379 priv->last_scanned_channel = -1; 972 priv->scan_channel = -1;
1380 973
1381 if (priv->surpriseremoved) 974 if (priv->surpriseremoved)
1382 return -EIO; 975 ret = -EIO;
1383 976
1384 lbs_deb_leave(LBS_DEB_SCAN); 977out:
1385 return 0; 978 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
979 return ret;
1386} 980}
1387 981
1388 982
@@ -1397,31 +991,30 @@ int lbs_set_scan(struct net_device *dev, struct iw_request_info *info,
1397 * @return 0 --success, otherwise fail 991 * @return 0 --success, otherwise fail
1398 */ 992 */
1399int lbs_get_scan(struct net_device *dev, struct iw_request_info *info, 993int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
1400 struct iw_point *dwrq, char *extra) 994 struct iw_point *dwrq, char *extra)
1401{ 995{
1402#define SCAN_ITEM_SIZE 128 996#define SCAN_ITEM_SIZE 128
1403 struct lbs_private *priv = dev->priv; 997 struct lbs_private *priv = dev->priv;
1404 int err = 0; 998 int err = 0;
1405 char *ev = extra; 999 char *ev = extra;
1406 char *stop = ev + dwrq->length; 1000 char *stop = ev + dwrq->length;
1407 struct bss_descriptor * iter_bss; 1001 struct bss_descriptor *iter_bss;
1408 struct bss_descriptor * safe; 1002 struct bss_descriptor *safe;
1409 1003
1410 lbs_deb_enter(LBS_DEB_SCAN); 1004 lbs_deb_enter(LBS_DEB_WEXT);
1411 1005
1412 /* iwlist should wait until the current scan is finished */ 1006 /* iwlist should wait until the current scan is finished */
1413 if (priv->last_scanned_channel) 1007 if (priv->scan_channel)
1414 return -EAGAIN; 1008 return -EAGAIN;
1415 1009
1416 /* Update RSSI if current BSS is a locally created ad-hoc BSS */ 1010 /* Update RSSI if current BSS is a locally created ad-hoc BSS */
1417 if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate) { 1011 if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate)
1418 lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, 1012 lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
1419 CMD_OPTION_WAITFORRSP, 0, NULL); 1013 CMD_OPTION_WAITFORRSP, 0, NULL);
1420 }
1421 1014
1422 mutex_lock(&priv->lock); 1015 mutex_lock(&priv->lock);
1423 list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) { 1016 list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) {
1424 char * next_ev; 1017 char *next_ev;
1425 unsigned long stale_time; 1018 unsigned long stale_time;
1426 1019
1427 if (stop - ev < SCAN_ITEM_SIZE) { 1020 if (stop - ev < SCAN_ITEM_SIZE) {
@@ -1436,8 +1029,7 @@ int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
1436 /* Prune old an old scan result */ 1029 /* Prune old an old scan result */
1437 stale_time = iter_bss->last_scanned + DEFAULT_MAX_SCAN_AGE; 1030 stale_time = iter_bss->last_scanned + DEFAULT_MAX_SCAN_AGE;
1438 if (time_after(jiffies, stale_time)) { 1031 if (time_after(jiffies, stale_time)) {
1439 list_move_tail (&iter_bss->list, 1032 list_move_tail(&iter_bss->list, &priv->network_free_list);
1440 &priv->network_free_list);
1441 clear_bss_descriptor(iter_bss); 1033 clear_bss_descriptor(iter_bss);
1442 continue; 1034 continue;
1443 } 1035 }
@@ -1453,7 +1045,7 @@ int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
1453 dwrq->length = (ev - extra); 1045 dwrq->length = (ev - extra);
1454 dwrq->flags = 0; 1046 dwrq->flags = 0;
1455 1047
1456 lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", err); 1048 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", err);
1457 return err; 1049 return err;
1458} 1050}
1459 1051
@@ -1468,44 +1060,6 @@ int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
1468 1060
1469 1061
1470/** 1062/**
1471 * @brief Prepare a scan command to be sent to the firmware
1472 *
1473 * Called via lbs_prepare_and_send_command(priv, CMD_802_11_SCAN, ...)
1474 * from cmd.c
1475 *
1476 * Sends a fixed length data part (specifying the BSS type and BSSID filters)
1477 * as well as a variable number/length of TLVs to the firmware.
1478 *
1479 * @param priv A pointer to struct lbs_private structure
1480 * @param cmd A pointer to cmd_ds_command structure to be sent to
1481 * firmware with the cmd_DS_801_11_SCAN structure
1482 * @param pdata_buf Void pointer cast of a lbs_scan_cmd_config struct used
1483 * to set the fields/TLVs for the command sent to firmware
1484 *
1485 * @return 0 or -1
1486 */
1487int lbs_cmd_80211_scan(struct lbs_private *priv,
1488 struct cmd_ds_command *cmd, void *pdata_buf)
1489{
1490 struct cmd_ds_802_11_scan *pscan = &cmd->params.scan;
1491 struct lbs_scan_cmd_config *pscancfg = pdata_buf;
1492
1493 lbs_deb_enter(LBS_DEB_SCAN);
1494
1495 /* Set fixed field variables in scan command */
1496 pscan->bsstype = pscancfg->bsstype;
1497 memcpy(pscan->bssid, pscancfg->bssid, ETH_ALEN);
1498 memcpy(pscan->tlvbuffer, pscancfg->tlvbuffer, pscancfg->tlvbufferlen);
1499
1500 /* size is equal to the sizeof(fixed portions) + the TLV len + header */
1501 cmd->size = cpu_to_le16(sizeof(pscan->bsstype) + ETH_ALEN
1502 + pscancfg->tlvbufferlen + S_DS_GEN);
1503
1504 lbs_deb_leave(LBS_DEB_SCAN);
1505 return 0;
1506}
1507
1508/**
1509 * @brief This function handles the command response of scan 1063 * @brief This function handles the command response of scan
1510 * 1064 *
1511 * Called from handle_cmd_response() in cmdrespc. 1065 * Called from handle_cmd_response() in cmdrespc.
@@ -1531,13 +1085,14 @@ int lbs_cmd_80211_scan(struct lbs_private *priv,
1531 * 1085 *
1532 * @return 0 or -1 1086 * @return 0 or -1
1533 */ 1087 */
1534int lbs_ret_80211_scan(struct lbs_private *priv, struct cmd_ds_command *resp) 1088static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy,
1089 struct cmd_header *resp)
1535{ 1090{
1536 struct cmd_ds_802_11_scan_rsp *pscan; 1091 struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp;
1537 struct bss_descriptor * iter_bss; 1092 struct bss_descriptor *iter_bss;
1538 struct bss_descriptor * safe; 1093 struct bss_descriptor *safe;
1539 u8 *pbssinfo; 1094 uint8_t *bssinfo;
1540 u16 scanrespsize; 1095 uint16_t scanrespsize;
1541 int bytesleft; 1096 int bytesleft;
1542 int idx; 1097 int idx;
1543 int tlvbufsize; 1098 int tlvbufsize;
@@ -1554,48 +1109,45 @@ int lbs_ret_80211_scan(struct lbs_private *priv, struct cmd_ds_command *resp)
1554 clear_bss_descriptor(iter_bss); 1109 clear_bss_descriptor(iter_bss);
1555 } 1110 }
1556 1111
1557 pscan = &resp->params.scanresp; 1112 if (scanresp->nr_sets > MAX_NETWORK_COUNT) {
1558 1113 lbs_deb_scan("SCAN_RESP: too many scan results (%d, max %d)\n",
1559 if (pscan->nr_sets > MAX_NETWORK_COUNT) { 1114 scanresp->nr_sets, MAX_NETWORK_COUNT);
1560 lbs_deb_scan(
1561 "SCAN_RESP: too many scan results (%d, max %d)!!\n",
1562 pscan->nr_sets, MAX_NETWORK_COUNT);
1563 ret = -1; 1115 ret = -1;
1564 goto done; 1116 goto done;
1565 } 1117 }
1566 1118
1567 bytesleft = le16_to_cpu(pscan->bssdescriptsize); 1119 bytesleft = le16_to_cpu(scanresp->bssdescriptsize);
1568 lbs_deb_scan("SCAN_RESP: bssdescriptsize %d\n", bytesleft); 1120 lbs_deb_scan("SCAN_RESP: bssdescriptsize %d\n", bytesleft);
1569 1121
1570 scanrespsize = le16_to_cpu(resp->size); 1122 scanrespsize = le16_to_cpu(resp->size);
1571 lbs_deb_scan("SCAN_RESP: scan results %d\n", pscan->nr_sets); 1123 lbs_deb_scan("SCAN_RESP: scan results %d\n", scanresp->nr_sets);
1572 1124
1573 pbssinfo = pscan->bssdesc_and_tlvbuffer; 1125 bssinfo = scanresp->bssdesc_and_tlvbuffer;
1574 1126
1575 /* The size of the TLV buffer is equal to the entire command response 1127 /* The size of the TLV buffer is equal to the entire command response
1576 * size (scanrespsize) minus the fixed fields (sizeof()'s), the 1128 * size (scanrespsize) minus the fixed fields (sizeof()'s), the
1577 * BSS Descriptions (bssdescriptsize as bytesLef) and the command 1129 * BSS Descriptions (bssdescriptsize as bytesLef) and the command
1578 * response header (S_DS_GEN) 1130 * response header (S_DS_GEN)
1579 */ 1131 */
1580 tlvbufsize = scanrespsize - (bytesleft + sizeof(pscan->bssdescriptsize) 1132 tlvbufsize = scanrespsize - (bytesleft + sizeof(scanresp->bssdescriptsize)
1581 + sizeof(pscan->nr_sets) 1133 + sizeof(scanresp->nr_sets)
1582 + S_DS_GEN); 1134 + S_DS_GEN);
1583 1135
1584 /* 1136 /*
1585 * Process each scan response returned (pscan->nr_sets). Save 1137 * Process each scan response returned (scanresp->nr_sets). Save
1586 * the information in the newbssentry and then insert into the 1138 * the information in the newbssentry and then insert into the
1587 * driver scan table either as an update to an existing entry 1139 * driver scan table either as an update to an existing entry
1588 * or as an addition at the end of the table 1140 * or as an addition at the end of the table
1589 */ 1141 */
1590 for (idx = 0; idx < pscan->nr_sets && bytesleft; idx++) { 1142 for (idx = 0; idx < scanresp->nr_sets && bytesleft; idx++) {
1591 struct bss_descriptor new; 1143 struct bss_descriptor new;
1592 struct bss_descriptor * found = NULL; 1144 struct bss_descriptor *found = NULL;
1593 struct bss_descriptor * oldest = NULL; 1145 struct bss_descriptor *oldest = NULL;
1594 DECLARE_MAC_BUF(mac); 1146 DECLARE_MAC_BUF(mac);
1595 1147
1596 /* Process the data fields and IEs returned for this BSS */ 1148 /* Process the data fields and IEs returned for this BSS */
1597 memset(&new, 0, sizeof (struct bss_descriptor)); 1149 memset(&new, 0, sizeof (struct bss_descriptor));
1598 if (lbs_process_bss(&new, &pbssinfo, &bytesleft) != 0) { 1150 if (lbs_process_bss(&new, &bssinfo, &bytesleft) != 0) {
1599 /* error parsing the scan response, skipped */ 1151 /* error parsing the scan response, skipped */
1600 lbs_deb_scan("SCAN_RESP: process_bss returned ERROR\n"); 1152 lbs_deb_scan("SCAN_RESP: process_bss returned ERROR\n");
1601 continue; 1153 continue;
@@ -1630,8 +1182,7 @@ int lbs_ret_80211_scan(struct lbs_private *priv, struct cmd_ds_command *resp)
1630 continue; 1182 continue;
1631 } 1183 }
1632 1184
1633 lbs_deb_scan("SCAN_RESP: BSSID %s\n", 1185 lbs_deb_scan("SCAN_RESP: BSSID %s\n", print_mac(mac, new.bssid));
1634 print_mac(mac, new.bssid));
1635 1186
1636 /* Copy the locally created newbssentry to the scan table */ 1187 /* Copy the locally created newbssentry to the scan table */
1637 memcpy(found, &new, offsetof(struct bss_descriptor, list)); 1188 memcpy(found, &new, offsetof(struct bss_descriptor, list));
diff --git a/drivers/net/wireless/libertas/scan.h b/drivers/net/wireless/libertas/scan.h
index 319f70dde350..9e07b0464a8e 100644
--- a/drivers/net/wireless/libertas/scan.h
+++ b/drivers/net/wireless/libertas/scan.h
@@ -7,198 +7,22 @@
7#ifndef _LBS_SCAN_H 7#ifndef _LBS_SCAN_H
8#define _LBS_SCAN_H 8#define _LBS_SCAN_H
9 9
10#include <net/ieee80211.h>
11#include "hostcmd.h"
12
13/** 10/**
14 * @brief Maximum number of channels that can be sent in a setuserscan ioctl 11 * @brief Maximum number of channels that can be sent in a setuserscan ioctl
15 *
16 * @sa lbs_ioctl_user_scan_cfg
17 */ 12 */
18#define LBS_IOCTL_USER_SCAN_CHAN_MAX 50 13#define LBS_IOCTL_USER_SCAN_CHAN_MAX 50
19 14
20//! Infrastructure BSS scan type in lbs_scan_cmd_config
21#define LBS_SCAN_BSS_TYPE_BSS 1
22
23//! Adhoc BSS scan type in lbs_scan_cmd_config
24#define LBS_SCAN_BSS_TYPE_IBSS 2
25
26//! Adhoc or Infrastructure BSS scan type in lbs_scan_cmd_config, no filter
27#define LBS_SCAN_BSS_TYPE_ANY 3
28
29/**
30 * @brief Structure used internally in the wlan driver to configure a scan.
31 *
32 * Sent to the command processing module to configure the firmware
33 * scan command prepared by lbs_cmd_80211_scan.
34 *
35 * @sa lbs_scan_networks
36 *
37 */
38struct lbs_scan_cmd_config {
39 /**
40 * @brief BSS type to be sent in the firmware command
41 *
42 * Field can be used to restrict the types of networks returned in the
43 * scan. valid settings are:
44 *
45 * - LBS_SCAN_BSS_TYPE_BSS (infrastructure)
46 * - LBS_SCAN_BSS_TYPE_IBSS (adhoc)
47 * - LBS_SCAN_BSS_TYPE_ANY (unrestricted, adhoc and infrastructure)
48 */
49 u8 bsstype;
50
51 /**
52 * @brief Specific BSSID used to filter scan results in the firmware
53 */
54 u8 bssid[ETH_ALEN];
55
56 /**
57 * @brief length of TLVs sent in command starting at tlvBuffer
58 */
59 int tlvbufferlen;
60
61 /**
62 * @brief SSID TLV(s) and ChanList TLVs to be sent in the firmware command
63 *
64 * @sa TLV_TYPE_CHANLIST, mrvlietypes_chanlistparamset_t
65 * @sa TLV_TYPE_SSID, mrvlietypes_ssidparamset_t
66 */
67 u8 tlvbuffer[1]; //!< SSID TLV(s) and ChanList TLVs are stored here
68};
69
70/**
71 * @brief IOCTL channel sub-structure sent in lbs_ioctl_user_scan_cfg
72 *
73 * Multiple instances of this structure are included in the IOCTL command
74 * to configure a instance of a scan on the specific channel.
75 */
76struct lbs_ioctl_user_scan_chan {
77 u8 channumber; //!< channel Number to scan
78 u8 radiotype; //!< Radio type: 'B/G' band = 0, 'A' band = 1
79 u8 scantype; //!< Scan type: Active = 0, Passive = 1
80 u16 scantime; //!< Scan duration in milliseconds; if 0 default used
81};
82
83/**
84 * @brief IOCTL input structure to configure an immediate scan cmd to firmware
85 *
86 * Used in the setuserscan (LBS_SET_USER_SCAN) private ioctl. Specifies
87 * a number of parameters to be used in general for the scan as well
88 * as a channel list (lbs_ioctl_user_scan_chan) for each scan period
89 * desired.
90 *
91 * @sa lbs_set_user_scan_ioctl
92 */
93struct lbs_ioctl_user_scan_cfg {
94 /**
95 * @brief BSS type to be sent in the firmware command
96 *
97 * Field can be used to restrict the types of networks returned in the
98 * scan. valid settings are:
99 *
100 * - LBS_SCAN_BSS_TYPE_BSS (infrastructure)
101 * - LBS_SCAN_BSS_TYPE_IBSS (adhoc)
102 * - LBS_SCAN_BSS_TYPE_ANY (unrestricted, adhoc and infrastructure)
103 */
104 u8 bsstype;
105
106 /**
107 * @brief BSSID filter sent in the firmware command to limit the results
108 */
109 u8 bssid[ETH_ALEN];
110
111 /* Clear existing scan results matching this BSSID */
112 u8 clear_bssid;
113
114 /**
115 * @brief SSID filter sent in the firmware command to limit the results
116 */
117 char ssid[IW_ESSID_MAX_SIZE];
118 u8 ssid_len;
119
120 /* Clear existing scan results matching this SSID */
121 u8 clear_ssid;
122};
123
124/**
125 * @brief Structure used to store information for each beacon/probe response
126 */
127struct bss_descriptor {
128 u8 bssid[ETH_ALEN];
129
130 u8 ssid[IW_ESSID_MAX_SIZE + 1];
131 u8 ssid_len;
132
133 u16 capability;
134
135 /* receive signal strength in dBm */
136 long rssi;
137
138 u32 channel;
139
140 u16 beaconperiod;
141
142 u32 atimwindow;
143
144 /* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */
145 u8 mode;
146
147 /* zero-terminated array of supported data rates */
148 u8 rates[MAX_RATES + 1];
149
150 unsigned long last_scanned;
151
152 union ieeetypes_phyparamset phyparamset;
153 union IEEEtypes_ssparamset ssparamset;
154
155 struct ieeetypes_countryinfofullset countryinfo;
156
157 u8 wpa_ie[MAX_WPA_IE_LEN];
158 size_t wpa_ie_len;
159 u8 rsn_ie[MAX_WPA_IE_LEN];
160 size_t rsn_ie_len;
161
162 u8 mesh;
163
164 struct list_head list;
165};
166
167int lbs_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len); 15int lbs_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len);
168 16
169struct bss_descriptor *lbs_find_ssid_in_list(struct lbs_private *priv,
170 u8 *ssid, u8 ssid_len, u8 *bssid, u8 mode,
171 int channel);
172
173struct bss_descriptor *lbs_find_bssid_in_list(struct lbs_private *priv,
174 u8 *bssid, u8 mode);
175
176int lbs_find_best_network_ssid(struct lbs_private *priv, u8 *out_ssid,
177 u8 *out_ssid_len, u8 preferred_mode, u8 *out_mode);
178
179int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid, 17int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid,
180 u8 ssid_len, u8 clear_ssid); 18 u8 ssid_len);
181 19
182int lbs_cmd_80211_scan(struct lbs_private *priv,
183 struct cmd_ds_command *cmd,
184 void *pdata_buf);
185
186int lbs_ret_80211_scan(struct lbs_private *priv,
187 struct cmd_ds_command *resp);
188
189int lbs_scan_networks(struct lbs_private *priv,
190 const struct lbs_ioctl_user_scan_cfg *puserscanin,
191 int full_scan);
192
193struct ifreq;
194
195struct iw_point;
196struct iw_param;
197struct iw_request_info;
198int lbs_get_scan(struct net_device *dev, struct iw_request_info *info, 20int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
199 struct iw_point *dwrq, char *extra); 21 struct iw_point *dwrq, char *extra);
200int lbs_set_scan(struct net_device *dev, struct iw_request_info *info, 22int lbs_set_scan(struct net_device *dev, struct iw_request_info *info,
201 struct iw_param *vwrq, char *extra); 23 union iwreq_data *wrqu, char *extra);
24
25int lbs_scan_networks(struct lbs_private *priv, int full_scan);
202 26
203void lbs_scan_worker(struct work_struct *work); 27void lbs_scan_worker(struct work_struct *work);
204 28
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 00d95f75bd89..a4972fed2941 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -151,7 +151,7 @@ int lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
151 151
152 dev->trans_start = jiffies; 152 dev->trans_start = jiffies;
153 153
154 if (priv->monitormode != LBS_MONITOR_OFF) { 154 if (priv->monitormode) {
155 /* Keep the skb to echo it back once Tx feedback is 155 /* Keep the skb to echo it back once Tx feedback is
156 received from FW */ 156 received from FW */
157 skb_orphan(skb); 157 skb_orphan(skb);
@@ -179,32 +179,17 @@ int lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
179 * 179 *
180 * @returns void 180 * @returns void
181 */ 181 */
182void lbs_send_tx_feedback(struct lbs_private *priv) 182void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count)
183{ 183{
184 struct tx_radiotap_hdr *radiotap_hdr; 184 struct tx_radiotap_hdr *radiotap_hdr;
185 u32 status = priv->eventcause;
186 int txfail;
187 int try_count;
188 185
189 if (priv->monitormode == LBS_MONITOR_OFF || 186 if (!priv->monitormode || priv->currenttxskb == NULL)
190 priv->currenttxskb == NULL)
191 return; 187 return;
192 188
193 radiotap_hdr = (struct tx_radiotap_hdr *)priv->currenttxskb->data; 189 radiotap_hdr = (struct tx_radiotap_hdr *)priv->currenttxskb->data;
194 190
195 txfail = (status >> 24); 191 radiotap_hdr->data_retries = try_count ?
196 192 (1 + priv->txretrycount - try_count) : 0;
197#if 0
198 /* The version of roofnet that we've tested does not use this yet
199 * But it may be used in the future.
200 */
201 if (txfail)
202 radiotap_hdr->flags &= IEEE80211_RADIOTAP_F_TX_FAIL;
203#endif
204 try_count = (status >> 16) & 0xff;
205 radiotap_hdr->data_retries = (try_count) ?
206 (1 + priv->txretrycount - try_count) : 0;
207
208 193
209 priv->currenttxskb->protocol = eth_type_trans(priv->currenttxskb, 194 priv->currenttxskb->protocol = eth_type_trans(priv->currenttxskb,
210 priv->rtap_net_dev); 195 priv->rtap_net_dev);
diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h
index f0d57958b34b..4031be420862 100644
--- a/drivers/net/wireless/libertas/types.h
+++ b/drivers/net/wireless/libertas/types.h
@@ -239,4 +239,17 @@ struct mrvlietypes_ledgpio {
239 struct led_pin ledpin[1]; 239 struct led_pin ledpin[1];
240} __attribute__ ((packed)); 240} __attribute__ ((packed));
241 241
242struct led_bhv {
243 uint8_t firmwarestate;
244 uint8_t led;
245 uint8_t ledstate;
246 uint8_t ledarg;
247} __attribute__ ((packed));
248
249
250struct mrvlietypes_ledbhv {
251 struct mrvlietypesheader header;
252 struct led_bhv ledbhv[1];
253} __attribute__ ((packed));
254
242#endif 255#endif
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index e8bfc26b10a4..0973d015a520 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -16,8 +16,8 @@
16#include "decl.h" 16#include "decl.h"
17#include "defs.h" 17#include "defs.h"
18#include "dev.h" 18#include "dev.h"
19#include "join.h"
20#include "wext.h" 19#include "wext.h"
20#include "scan.h"
21#include "assoc.h" 21#include "assoc.h"
22#include "cmd.h" 22#include "cmd.h"
23 23
@@ -579,6 +579,9 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
579 range->num_bitrates); 579 range->num_bitrates);
580 580
581 range->num_frequency = 0; 581 range->num_frequency = 0;
582
583 range->scan_capa = IW_SCAN_CAPA_ESSID;
584
582 if (priv->enable11d && 585 if (priv->enable11d &&
583 (priv->connect_status == LBS_CONNECTED || 586 (priv->connect_status == LBS_CONNECTED ||
584 priv->mesh_connect_status == LBS_CONNECTED)) { 587 priv->mesh_connect_status == LBS_CONNECTED)) {
@@ -602,7 +605,7 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
602 lbs_deb_wext("chan_no %d\n", chan_no); 605 lbs_deb_wext("chan_no %d\n", chan_no);
603 range->freq[range->num_frequency].i = (long)chan_no; 606 range->freq[range->num_frequency].i = (long)chan_no;
604 range->freq[range->num_frequency].m = 607 range->freq[range->num_frequency].m =
605 (long)lbs_chan_2_freq(chan_no, band) * 100000; 608 (long)lbs_chan_2_freq(chan_no) * 100000;
606 range->freq[range->num_frequency].e = 1; 609 range->freq[range->num_frequency].e = 1;
607 range->num_frequency++; 610 range->num_frequency++;
608 } 611 }
@@ -653,13 +656,10 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
653 range->num_encoding_sizes = 2; 656 range->num_encoding_sizes = 2;
654 range->max_encoding_tokens = 4; 657 range->max_encoding_tokens = 4;
655 658
656 range->min_pmp = 1000000; 659 /*
657 range->max_pmp = 120000000; 660 * Right now we support only "iwconfig ethX power on|off"
658 range->min_pmt = 1000; 661 */
659 range->max_pmt = 1000000; 662 range->pm_capa = IW_POWER_ON;
660 range->pmp_flags = IW_POWER_PERIOD;
661 range->pmt_flags = IW_POWER_TIMEOUT;
662 range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R;
663 663
664 /* 664 /*
665 * Minimum version we recommend 665 * Minimum version we recommend
@@ -781,21 +781,14 @@ static int lbs_get_power(struct net_device *dev, struct iw_request_info *info,
781 struct iw_param *vwrq, char *extra) 781 struct iw_param *vwrq, char *extra)
782{ 782{
783 struct lbs_private *priv = dev->priv; 783 struct lbs_private *priv = dev->priv;
784 int mode;
785 784
786 lbs_deb_enter(LBS_DEB_WEXT); 785 lbs_deb_enter(LBS_DEB_WEXT);
787 786
788 mode = priv->psmode;
789
790 if ((vwrq->disabled = (mode == LBS802_11POWERMODECAM))
791 || priv->connect_status == LBS_DISCONNECTED)
792 {
793 goto out;
794 }
795
796 vwrq->value = 0; 787 vwrq->value = 0;
788 vwrq->flags = 0;
789 vwrq->disabled = priv->psmode == LBS802_11POWERMODECAM
790 || priv->connect_status == LBS_DISCONNECTED;
797 791
798out:
799 lbs_deb_leave(LBS_DEB_WEXT); 792 lbs_deb_leave(LBS_DEB_WEXT);
800 return 0; 793 return 0;
801} 794}
@@ -817,6 +810,7 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
817 int stats_valid = 0; 810 int stats_valid = 0;
818 u8 rssi; 811 u8 rssi;
819 u32 tx_retries; 812 u32 tx_retries;
813 struct cmd_ds_802_11_get_log log;
820 814
821 lbs_deb_enter(LBS_DEB_WEXT); 815 lbs_deb_enter(LBS_DEB_WEXT);
822 816
@@ -860,7 +854,11 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
860 /* Quality by TX errors */ 854 /* Quality by TX errors */
861 priv->wstats.discard.retries = priv->stats.tx_errors; 855 priv->wstats.discard.retries = priv->stats.tx_errors;
862 856
863 tx_retries = le32_to_cpu(priv->logmsg.retry); 857 memset(&log, 0, sizeof(log));
858 log.hdr.size = cpu_to_le16(sizeof(log));
859 lbs_cmd_with_response(priv, CMD_802_11_GET_LOG, &log);
860
861 tx_retries = le32_to_cpu(log.retry);
864 862
865 if (tx_retries > 75) 863 if (tx_retries > 75)
866 tx_qual = (90 - tx_retries) * POOR / 15; 864 tx_qual = (90 - tx_retries) * POOR / 15;
@@ -876,10 +874,9 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
876 (PERFECT - VERY_GOOD) / 50 + VERY_GOOD; 874 (PERFECT - VERY_GOOD) / 50 + VERY_GOOD;
877 quality = min(quality, tx_qual); 875 quality = min(quality, tx_qual);
878 876
879 priv->wstats.discard.code = le32_to_cpu(priv->logmsg.wepundecryptable); 877 priv->wstats.discard.code = le32_to_cpu(log.wepundecryptable);
880 priv->wstats.discard.fragment = le32_to_cpu(priv->logmsg.rxfrag);
881 priv->wstats.discard.retries = tx_retries; 878 priv->wstats.discard.retries = tx_retries;
882 priv->wstats.discard.misc = le32_to_cpu(priv->logmsg.ackfailure); 879 priv->wstats.discard.misc = le32_to_cpu(log.ackfailure);
883 880
884 /* Calculate quality */ 881 /* Calculate quality */
885 priv->wstats.qual.qual = min_t(u8, quality, 100); 882 priv->wstats.qual.qual = min_t(u8, quality, 100);
@@ -889,8 +886,6 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
889 /* update stats asynchronously for future calls */ 886 /* update stats asynchronously for future calls */
890 lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, 887 lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
891 0, 0, NULL); 888 0, 0, NULL);
892 lbs_prepare_and_send_command(priv, CMD_802_11_GET_LOG, 0,
893 0, 0, NULL);
894out: 889out:
895 if (!stats_valid) { 890 if (!stats_valid) {
896 priv->wstats.miss.beacon = 0; 891 priv->wstats.miss.beacon = 0;
@@ -2065,23 +2060,6 @@ static int lbs_set_wap(struct net_device *dev, struct iw_request_info *info,
2065 return ret; 2060 return ret;
2066} 2061}
2067 2062
2068void lbs_get_fwversion(struct lbs_private *priv, char *fwversion, int maxlen)
2069{
2070 char fwver[32];
2071
2072 mutex_lock(&priv->lock);
2073
2074 sprintf(fwver, "%u.%u.%u.p%u",
2075 priv->fwrelease >> 24 & 0xff,
2076 priv->fwrelease >> 16 & 0xff,
2077 priv->fwrelease >> 8 & 0xff,
2078 priv->fwrelease & 0xff);
2079
2080 mutex_unlock(&priv->lock);
2081 snprintf(fwversion, maxlen, fwver);
2082}
2083
2084
2085/* 2063/*
2086 * iwconfig settable callbacks 2064 * iwconfig settable callbacks
2087 */ 2065 */
diff --git a/drivers/net/wireless/libertas/wext.h b/drivers/net/wireless/libertas/wext.h
index a563d9a231b6..4c08db497606 100644
--- a/drivers/net/wireless/libertas/wext.h
+++ b/drivers/net/wireless/libertas/wext.h
@@ -4,19 +4,6 @@
4#ifndef _LBS_WEXT_H_ 4#ifndef _LBS_WEXT_H_
5#define _LBS_WEXT_H_ 5#define _LBS_WEXT_H_
6 6
7/** lbs_ioctl_regrdwr */
8struct lbs_ioctl_regrdwr {
9 /** Which register to access */
10 u16 whichreg;
11 /** Read or Write */
12 u16 action;
13 u32 offset;
14 u16 NOB;
15 u32 value;
16};
17
18#define LBS_MONITOR_OFF 0
19
20extern struct iw_handler_def lbs_handler_def; 7extern struct iw_handler_def lbs_handler_def;
21extern struct iw_handler_def mesh_handler_def; 8extern struct iw_handler_def mesh_handler_def;
22 9
diff --git a/drivers/net/wireless/net2280.h b/drivers/net/wireless/net2280.h
deleted file mode 100644
index 120eb831b287..000000000000
--- a/drivers/net/wireless/net2280.h
+++ /dev/null
@@ -1,452 +0,0 @@
1#ifndef NET2280_H
2#define NET2280_H
3/*
4 * NetChip 2280 high/full speed USB device controller.
5 * Unlike many such controllers, this one talks PCI.
6 */
7
8/*
9 * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
10 * Copyright (C) 2003 David Brownell
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27/*-------------------------------------------------------------------------*/
28
29/* NET2280 MEMORY MAPPED REGISTERS
30 *
31 * The register layout came from the chip documentation, and the bit
32 * number definitions were extracted from chip specification.
33 *
34 * Use the shift operator ('<<') to build bit masks, with readl/writel
35 * to access the registers through PCI.
36 */
37
38/* main registers, BAR0 + 0x0000 */
39struct net2280_regs {
40 // offset 0x0000
41 __le32 devinit;
42#define LOCAL_CLOCK_FREQUENCY 8
43#define FORCE_PCI_RESET 7
44#define PCI_ID 6
45#define PCI_ENABLE 5
46#define FIFO_SOFT_RESET 4
47#define CFG_SOFT_RESET 3
48#define PCI_SOFT_RESET 2
49#define USB_SOFT_RESET 1
50#define M8051_RESET 0
51 __le32 eectl;
52#define EEPROM_ADDRESS_WIDTH 23
53#define EEPROM_CHIP_SELECT_ACTIVE 22
54#define EEPROM_PRESENT 21
55#define EEPROM_VALID 20
56#define EEPROM_BUSY 19
57#define EEPROM_CHIP_SELECT_ENABLE 18
58#define EEPROM_BYTE_READ_START 17
59#define EEPROM_BYTE_WRITE_START 16
60#define EEPROM_READ_DATA 8
61#define EEPROM_WRITE_DATA 0
62 __le32 eeclkfreq;
63 u32 _unused0;
64 // offset 0x0010
65
66 __le32 pciirqenb0; /* interrupt PCI master ... */
67#define SETUP_PACKET_INTERRUPT_ENABLE 7
68#define ENDPOINT_F_INTERRUPT_ENABLE 6
69#define ENDPOINT_E_INTERRUPT_ENABLE 5
70#define ENDPOINT_D_INTERRUPT_ENABLE 4
71#define ENDPOINT_C_INTERRUPT_ENABLE 3
72#define ENDPOINT_B_INTERRUPT_ENABLE 2
73#define ENDPOINT_A_INTERRUPT_ENABLE 1
74#define ENDPOINT_0_INTERRUPT_ENABLE 0
75 __le32 pciirqenb1;
76#define PCI_INTERRUPT_ENABLE 31
77#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
78#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
79#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
80#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
81#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
82#define PCI_TARGET_ABORT_ASSERTED_INTERRUPT_ENABLE 18
83#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
84#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
85#define GPIO_INTERRUPT_ENABLE 13
86#define DMA_D_INTERRUPT_ENABLE 12
87#define DMA_C_INTERRUPT_ENABLE 11
88#define DMA_B_INTERRUPT_ENABLE 10
89#define DMA_A_INTERRUPT_ENABLE 9
90#define EEPROM_DONE_INTERRUPT_ENABLE 8
91#define VBUS_INTERRUPT_ENABLE 7
92#define CONTROL_STATUS_INTERRUPT_ENABLE 6
93#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
94#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
95#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
96#define RESUME_INTERRUPT_ENABLE 1
97#define SOF_INTERRUPT_ENABLE 0
98 __le32 cpu_irqenb0; /* ... or onboard 8051 */
99#define SETUP_PACKET_INTERRUPT_ENABLE 7
100#define ENDPOINT_F_INTERRUPT_ENABLE 6
101#define ENDPOINT_E_INTERRUPT_ENABLE 5
102#define ENDPOINT_D_INTERRUPT_ENABLE 4
103#define ENDPOINT_C_INTERRUPT_ENABLE 3
104#define ENDPOINT_B_INTERRUPT_ENABLE 2
105#define ENDPOINT_A_INTERRUPT_ENABLE 1
106#define ENDPOINT_0_INTERRUPT_ENABLE 0
107 __le32 cpu_irqenb1;
108#define CPU_INTERRUPT_ENABLE 31
109#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
110#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
111#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
112#define PCI_INTA_INTERRUPT_ENABLE 24
113#define PCI_PME_INTERRUPT_ENABLE 23
114#define PCI_SERR_INTERRUPT_ENABLE 22
115#define PCI_PERR_INTERRUPT_ENABLE 21
116#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
117#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
118#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
119#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
120#define GPIO_INTERRUPT_ENABLE 13
121#define DMA_D_INTERRUPT_ENABLE 12
122#define DMA_C_INTERRUPT_ENABLE 11
123#define DMA_B_INTERRUPT_ENABLE 10
124#define DMA_A_INTERRUPT_ENABLE 9
125#define EEPROM_DONE_INTERRUPT_ENABLE 8
126#define VBUS_INTERRUPT_ENABLE 7
127#define CONTROL_STATUS_INTERRUPT_ENABLE 6
128#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
129#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
130#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
131#define RESUME_INTERRUPT_ENABLE 1
132#define SOF_INTERRUPT_ENABLE 0
133
134 // offset 0x0020
135 u32 _unused1;
136 __le32 usbirqenb1;
137#define USB_INTERRUPT_ENABLE 31
138#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
139#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
140#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
141#define PCI_INTA_INTERRUPT_ENABLE 24
142#define PCI_PME_INTERRUPT_ENABLE 23
143#define PCI_SERR_INTERRUPT_ENABLE 22
144#define PCI_PERR_INTERRUPT_ENABLE 21
145#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
146#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
147#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
148#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
149#define GPIO_INTERRUPT_ENABLE 13
150#define DMA_D_INTERRUPT_ENABLE 12
151#define DMA_C_INTERRUPT_ENABLE 11
152#define DMA_B_INTERRUPT_ENABLE 10
153#define DMA_A_INTERRUPT_ENABLE 9
154#define EEPROM_DONE_INTERRUPT_ENABLE 8
155#define VBUS_INTERRUPT_ENABLE 7
156#define CONTROL_STATUS_INTERRUPT_ENABLE 6
157#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
158#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
159#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
160#define RESUME_INTERRUPT_ENABLE 1
161#define SOF_INTERRUPT_ENABLE 0
162 __le32 irqstat0;
163#define INTA_ASSERTED 12
164#define SETUP_PACKET_INTERRUPT 7
165#define ENDPOINT_F_INTERRUPT 6
166#define ENDPOINT_E_INTERRUPT 5
167#define ENDPOINT_D_INTERRUPT 4
168#define ENDPOINT_C_INTERRUPT 3
169#define ENDPOINT_B_INTERRUPT 2
170#define ENDPOINT_A_INTERRUPT 1
171#define ENDPOINT_0_INTERRUPT 0
172 __le32 irqstat1;
173#define POWER_STATE_CHANGE_INTERRUPT 27
174#define PCI_ARBITER_TIMEOUT_INTERRUPT 26
175#define PCI_PARITY_ERROR_INTERRUPT 25
176#define PCI_INTA_INTERRUPT 24
177#define PCI_PME_INTERRUPT 23
178#define PCI_SERR_INTERRUPT 22
179#define PCI_PERR_INTERRUPT 21
180#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT 20
181#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT 19
182#define PCI_RETRY_ABORT_INTERRUPT 17
183#define PCI_MASTER_CYCLE_DONE_INTERRUPT 16
184#define GPIO_INTERRUPT 13
185#define DMA_D_INTERRUPT 12
186#define DMA_C_INTERRUPT 11
187#define DMA_B_INTERRUPT 10
188#define DMA_A_INTERRUPT 9
189#define EEPROM_DONE_INTERRUPT 8
190#define VBUS_INTERRUPT 7
191#define CONTROL_STATUS_INTERRUPT 6
192#define ROOT_PORT_RESET_INTERRUPT 4
193#define SUSPEND_REQUEST_INTERRUPT 3
194#define SUSPEND_REQUEST_CHANGE_INTERRUPT 2
195#define RESUME_INTERRUPT 1
196#define SOF_INTERRUPT 0
197 // offset 0x0030
198 __le32 idxaddr;
199 __le32 idxdata;
200 __le32 fifoctl;
201#define PCI_BASE2_RANGE 16
202#define IGNORE_FIFO_AVAILABILITY 3
203#define PCI_BASE2_SELECT 2
204#define FIFO_CONFIGURATION_SELECT 0
205 u32 _unused2;
206 // offset 0x0040
207 __le32 memaddr;
208#define START 28
209#define DIRECTION 27
210#define FIFO_DIAGNOSTIC_SELECT 24
211#define MEMORY_ADDRESS 0
212 __le32 memdata0;
213 __le32 memdata1;
214 u32 _unused3;
215 // offset 0x0050
216 __le32 gpioctl;
217#define GPIO3_LED_SELECT 12
218#define GPIO3_INTERRUPT_ENABLE 11
219#define GPIO2_INTERRUPT_ENABLE 10
220#define GPIO1_INTERRUPT_ENABLE 9
221#define GPIO0_INTERRUPT_ENABLE 8
222#define GPIO3_OUTPUT_ENABLE 7
223#define GPIO2_OUTPUT_ENABLE 6
224#define GPIO1_OUTPUT_ENABLE 5
225#define GPIO0_OUTPUT_ENABLE 4
226#define GPIO3_DATA 3
227#define GPIO2_DATA 2
228#define GPIO1_DATA 1
229#define GPIO0_DATA 0
230 __le32 gpiostat;
231#define GPIO3_INTERRUPT 3
232#define GPIO2_INTERRUPT 2
233#define GPIO1_INTERRUPT 1
234#define GPIO0_INTERRUPT 0
235} __attribute__ ((packed));
236
237/* usb control, BAR0 + 0x0080 */
238struct net2280_usb_regs {
239 // offset 0x0080
240 __le32 stdrsp;
241#define STALL_UNSUPPORTED_REQUESTS 31
242#define SET_TEST_MODE 16
243#define GET_OTHER_SPEED_CONFIGURATION 15
244#define GET_DEVICE_QUALIFIER 14
245#define SET_ADDRESS 13
246#define ENDPOINT_SET_CLEAR_HALT 12
247#define DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP 11
248#define GET_STRING_DESCRIPTOR_2 10
249#define GET_STRING_DESCRIPTOR_1 9
250#define GET_STRING_DESCRIPTOR_0 8
251#define GET_SET_INTERFACE 6
252#define GET_SET_CONFIGURATION 5
253#define GET_CONFIGURATION_DESCRIPTOR 4
254#define GET_DEVICE_DESCRIPTOR 3
255#define GET_ENDPOINT_STATUS 2
256#define GET_INTERFACE_STATUS 1
257#define GET_DEVICE_STATUS 0
258 __le32 prodvendid;
259#define PRODUCT_ID 16
260#define VENDOR_ID 0
261 __le32 relnum;
262 __le32 usbctl;
263#define SERIAL_NUMBER_INDEX 16
264#define PRODUCT_ID_STRING_ENABLE 13
265#define VENDOR_ID_STRING_ENABLE 12
266#define USB_ROOT_PORT_WAKEUP_ENABLE 11
267#define VBUS_PIN 10
268#define TIMED_DISCONNECT 9
269#define SUSPEND_IMMEDIATELY 7
270#define SELF_POWERED_USB_DEVICE 6
271#define REMOTE_WAKEUP_SUPPORT 5
272#define PME_POLARITY 4
273#define USB_DETECT_ENABLE 3
274#define PME_WAKEUP_ENABLE 2
275#define DEVICE_REMOTE_WAKEUP_ENABLE 1
276#define SELF_POWERED_STATUS 0
277 // offset 0x0090
278 __le32 usbstat;
279#define HIGH_SPEED 7
280#define FULL_SPEED 6
281#define GENERATE_RESUME 5
282#define GENERATE_DEVICE_REMOTE_WAKEUP 4
283 __le32 xcvrdiag;
284#define FORCE_HIGH_SPEED_MODE 31
285#define FORCE_FULL_SPEED_MODE 30
286#define USB_TEST_MODE 24
287#define LINE_STATE 16
288#define TRANSCEIVER_OPERATION_MODE 2
289#define TRANSCEIVER_SELECT 1
290#define TERMINATION_SELECT 0
291 __le32 setup0123;
292 __le32 setup4567;
293 // offset 0x0090
294 u32 _unused0;
295 __le32 ouraddr;
296#define FORCE_IMMEDIATE 7
297#define OUR_USB_ADDRESS 0
298 __le32 ourconfig;
299} __attribute__ ((packed));
300
301/* pci control, BAR0 + 0x0100 */
302struct net2280_pci_regs {
303 // offset 0x0100
304 __le32 pcimstctl;
305#define PCI_ARBITER_PARK_SELECT 13
306#define PCI_MULTI LEVEL_ARBITER 12
307#define PCI_RETRY_ABORT_ENABLE 11
308#define DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE 10
309#define DMA_READ_MULTIPLE_ENABLE 9
310#define DMA_READ_LINE_ENABLE 8
311#define PCI_MASTER_COMMAND_SELECT 6
312#define MEM_READ_OR_WRITE 0
313#define IO_READ_OR_WRITE 1
314#define CFG_READ_OR_WRITE 2
315#define PCI_MASTER_START 5
316#define PCI_MASTER_READ_WRITE 4
317#define PCI_MASTER_WRITE 0
318#define PCI_MASTER_READ 1
319#define PCI_MASTER_BYTE_WRITE_ENABLES 0
320 __le32 pcimstaddr;
321 __le32 pcimstdata;
322 __le32 pcimststat;
323#define PCI_ARBITER_CLEAR 2
324#define PCI_EXTERNAL_ARBITER 1
325#define PCI_HOST_MODE 0
326} __attribute__ ((packed));
327
328/* dma control, BAR0 + 0x0180 ... array of four structs like this,
329 * for channels 0..3. see also struct net2280_dma: descriptor
330 * that can be loaded into some of these registers.
331 */
332struct net2280_dma_regs { /* [11.7] */
333 // offset 0x0180, 0x01a0, 0x01c0, 0x01e0,
334 __le32 dmactl;
335#define DMA_SCATTER_GATHER_DONE_INTERRUPT_ENABLE 25
336#define DMA_CLEAR_COUNT_ENABLE 21
337#define DESCRIPTOR_POLLING_RATE 19
338#define POLL_CONTINUOUS 0
339#define POLL_1_USEC 1
340#define POLL_100_USEC 2
341#define POLL_1_MSEC 3
342#define DMA_VALID_BIT_POLLING_ENABLE 18
343#define DMA_VALID_BIT_ENABLE 17
344#define DMA_SCATTER_GATHER_ENABLE 16
345#define DMA_OUT_AUTO_START_ENABLE 4
346#define DMA_PREEMPT_ENABLE 3
347#define DMA_FIFO_VALIDATE 2
348#define DMA_ENABLE 1
349#define DMA_ADDRESS_HOLD 0
350 __le32 dmastat;
351#define DMA_SCATTER_GATHER_DONE_INTERRUPT 25
352#define DMA_TRANSACTION_DONE_INTERRUPT 24
353#define DMA_ABORT 1
354#define DMA_START 0
355 u32 _unused0[2];
356 // offset 0x0190, 0x01b0, 0x01d0, 0x01f0,
357 __le32 dmacount;
358#define VALID_BIT 31
359#define DMA_DIRECTION 30
360#define DMA_DONE_INTERRUPT_ENABLE 29
361#define END_OF_CHAIN 28
362#define DMA_BYTE_COUNT_MASK ((1<<24)-1)
363#define DMA_BYTE_COUNT 0
364 __le32 dmaaddr;
365 __le32 dmadesc;
366 u32 _unused1;
367} __attribute__ ((packed));
368
369/* dedicated endpoint registers, BAR0 + 0x0200 */
370
371struct net2280_dep_regs { /* [11.8] */
372 // offset 0x0200, 0x0210, 0x220, 0x230, 0x240
373 __le32 dep_cfg;
374 // offset 0x0204, 0x0214, 0x224, 0x234, 0x244
375 __le32 dep_rsp;
376 u32 _unused[2];
377} __attribute__ ((packed));
378
379/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs
380 * like this, for ep0 then the configurable endpoints A..F
381 * ep0 reserved for control; E and F have only 64 bytes of fifo
382 */
383struct net2280_ep_regs { /* [11.9] */
384 // offset 0x0300, 0x0320, 0x0340, 0x0360, 0x0380, 0x03a0, 0x03c0
385 __le32 ep_cfg;
386#define ENDPOINT_BYTE_COUNT 16
387#define ENDPOINT_ENABLE 10
388#define ENDPOINT_TYPE 8
389#define ENDPOINT_DIRECTION 7
390#define ENDPOINT_NUMBER 0
391 __le32 ep_rsp;
392#define SET_NAK_OUT_PACKETS 15
393#define SET_EP_HIDE_STATUS_PHASE 14
394#define SET_EP_FORCE_CRC_ERROR 13
395#define SET_INTERRUPT_MODE 12
396#define SET_CONTROL_STATUS_PHASE_HANDSHAKE 11
397#define SET_NAK_OUT_PACKETS_MODE 10
398#define SET_ENDPOINT_TOGGLE 9
399#define SET_ENDPOINT_HALT 8
400#define CLEAR_NAK_OUT_PACKETS 7
401#define CLEAR_EP_HIDE_STATUS_PHASE 6
402#define CLEAR_EP_FORCE_CRC_ERROR 5
403#define CLEAR_INTERRUPT_MODE 4
404#define CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE 3
405#define CLEAR_NAK_OUT_PACKETS_MODE 2
406#define CLEAR_ENDPOINT_TOGGLE 1
407#define CLEAR_ENDPOINT_HALT 0
408 __le32 ep_irqenb;
409#define SHORT_PACKET_OUT_DONE_INTERRUPT_ENABLE 6
410#define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 5
411#define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3
412#define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2
413#define DATA_OUT_PING_TOKEN_INTERRUPT_ENABLE 1
414#define DATA_IN_TOKEN_INTERRUPT_ENABLE 0
415 __le32 ep_stat;
416#define FIFO_VALID_COUNT 24
417#define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 22
418#define TIMEOUT 21
419#define USB_STALL_SENT 20
420#define USB_IN_NAK_SENT 19
421#define USB_IN_ACK_RCVD 18
422#define USB_OUT_PING_NAK_SENT 17
423#define USB_OUT_ACK_SENT 16
424#define FIFO_OVERFLOW 13
425#define FIFO_UNDERFLOW 12
426#define FIFO_FULL 11
427#define FIFO_EMPTY 10
428#define FIFO_FLUSH 9
429#define SHORT_PACKET_OUT_DONE_INTERRUPT 6
430#define SHORT_PACKET_TRANSFERRED_INTERRUPT 5
431#define NAK_OUT_PACKETS 4
432#define DATA_PACKET_RECEIVED_INTERRUPT 3
433#define DATA_PACKET_TRANSMITTED_INTERRUPT 2
434#define DATA_OUT_PING_TOKEN_INTERRUPT 1
435#define DATA_IN_TOKEN_INTERRUPT 0
436 // offset 0x0310, 0x0330, 0x0350, 0x0370, 0x0390, 0x03b0, 0x03d0
437 __le32 ep_avail;
438 __le32 ep_data;
439 u32 _unused0[2];
440} __attribute__ ((packed));
441
442struct net2280_reg_write {
443 __le16 port;
444 __le32 addr;
445 __le32 val;
446} __attribute__ ((packed));
447
448struct net2280_reg_read {
449 __le16 port;
450 __le32 addr;
451} __attribute__ ((packed));
452#endif /* NET2280_H */
diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig
new file mode 100644
index 000000000000..d3469d08f966
--- /dev/null
+++ b/drivers/net/wireless/p54/Kconfig
@@ -0,0 +1,63 @@
1config P54_COMMON
2 tristate "Softmac Prism54 support"
3 depends on MAC80211 && WLAN_80211 && FW_LOADER && EXPERIMENTAL
4 ---help---
5 This is common code for isl38xx based cards.
6 This module does nothing by itself - the USB/PCI frontends
7 also need to be enabled in order to support any devices.
8
9 These devices require softmac firmware which can be found at
10 http://prism54.org/
11
12 If you choose to build a module, it'll be called p54common.
13
14config P54_USB
15 tristate "Prism54 USB support"
16 depends on P54_COMMON && USB
17 select CRC32
18 ---help---
19 This driver is for USB isl38xx based wireless cards.
20 These are USB based adapters found in devices such as:
21
22 3COM 3CRWE254G72
23 SMC 2862W-G
24 Accton 802.11g WN4501 USB
25 Siemens Gigaset USB
26 Netgear WG121
27 Netgear WG111
28 Medion 40900, Roper Europe
29 Shuttle PN15, Airvast WM168g, IOGear GWU513
30 Linksys WUSB54G
31 Linksys WUSB54G Portable
32 DLink DWL-G120 Spinnaker
33 DLink DWL-G122
34 Belkin F5D7050 ver 1000
35 Cohiba Proto board
36 SMC 2862W-G version 2
37 U.S. Robotics U5 802.11g Adapter
38 FUJITSU E-5400 USB D1700
39 Sagem XG703A
40 DLink DWL-G120 Cohiba
41 Spinnaker Proto board
42 Linksys WUSB54AG
43 Inventel UR054G
44 Spinnaker DUT
45
46 These devices require softmac firmware which can be found at
47 http://prism54.org/
48
49 If you choose to build a module, it'll be called p54usb.
50
51config P54_PCI
52 tristate "Prism54 PCI support"
53 depends on P54_COMMON && PCI
54 ---help---
55 This driver is for PCI isl38xx based wireless cards.
56 This driver supports most devices that are supported by the
57 fullmac prism54 driver plus many devices which are not
58 supported by the fullmac driver/firmware.
59
60 This driver requires softmac firmware which can be found at
61 http://prism54.org/
62
63 If you choose to build a module, it'll be called p54pci.
diff --git a/drivers/net/wireless/p54/Makefile b/drivers/net/wireless/p54/Makefile
new file mode 100644
index 000000000000..4fa9ce717360
--- /dev/null
+++ b/drivers/net/wireless/p54/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_P54_COMMON) += p54common.o
2obj-$(CONFIG_P54_USB) += p54usb.o
3obj-$(CONFIG_P54_PCI) += p54pci.o
diff --git a/drivers/net/wireless/p54/net2280.h b/drivers/net/wireless/p54/net2280.h
new file mode 100644
index 000000000000..4915d9d54203
--- /dev/null
+++ b/drivers/net/wireless/p54/net2280.h
@@ -0,0 +1,452 @@
1#ifndef NET2280_H
2#define NET2280_H
3/*
4 * NetChip 2280 high/full speed USB device controller.
5 * Unlike many such controllers, this one talks PCI.
6 */
7
8/*
9 * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com)
10 * Copyright (C) 2003 David Brownell
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27/*-------------------------------------------------------------------------*/
28
29/* NET2280 MEMORY MAPPED REGISTERS
30 *
31 * The register layout came from the chip documentation, and the bit
32 * number definitions were extracted from chip specification.
33 *
34 * Use the shift operator ('<<') to build bit masks, with readl/writel
35 * to access the registers through PCI.
36 */
37
38/* main registers, BAR0 + 0x0000 */
39struct net2280_regs {
40 /* offset 0x0000 */
41 __le32 devinit;
42#define LOCAL_CLOCK_FREQUENCY 8
43#define FORCE_PCI_RESET 7
44#define PCI_ID 6
45#define PCI_ENABLE 5
46#define FIFO_SOFT_RESET 4
47#define CFG_SOFT_RESET 3
48#define PCI_SOFT_RESET 2
49#define USB_SOFT_RESET 1
50#define M8051_RESET 0
51 __le32 eectl;
52#define EEPROM_ADDRESS_WIDTH 23
53#define EEPROM_CHIP_SELECT_ACTIVE 22
54#define EEPROM_PRESENT 21
55#define EEPROM_VALID 20
56#define EEPROM_BUSY 19
57#define EEPROM_CHIP_SELECT_ENABLE 18
58#define EEPROM_BYTE_READ_START 17
59#define EEPROM_BYTE_WRITE_START 16
60#define EEPROM_READ_DATA 8
61#define EEPROM_WRITE_DATA 0
62 __le32 eeclkfreq;
63 u32 _unused0;
64 /* offset 0x0010 */
65
66 __le32 pciirqenb0; /* interrupt PCI master ... */
67#define SETUP_PACKET_INTERRUPT_ENABLE 7
68#define ENDPOINT_F_INTERRUPT_ENABLE 6
69#define ENDPOINT_E_INTERRUPT_ENABLE 5
70#define ENDPOINT_D_INTERRUPT_ENABLE 4
71#define ENDPOINT_C_INTERRUPT_ENABLE 3
72#define ENDPOINT_B_INTERRUPT_ENABLE 2
73#define ENDPOINT_A_INTERRUPT_ENABLE 1
74#define ENDPOINT_0_INTERRUPT_ENABLE 0
75 __le32 pciirqenb1;
76#define PCI_INTERRUPT_ENABLE 31
77#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
78#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
79#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
80#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
81#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
82#define PCI_TARGET_ABORT_ASSERTED_INTERRUPT_ENABLE 18
83#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
84#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
85#define GPIO_INTERRUPT_ENABLE 13
86#define DMA_D_INTERRUPT_ENABLE 12
87#define DMA_C_INTERRUPT_ENABLE 11
88#define DMA_B_INTERRUPT_ENABLE 10
89#define DMA_A_INTERRUPT_ENABLE 9
90#define EEPROM_DONE_INTERRUPT_ENABLE 8
91#define VBUS_INTERRUPT_ENABLE 7
92#define CONTROL_STATUS_INTERRUPT_ENABLE 6
93#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
94#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
95#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
96#define RESUME_INTERRUPT_ENABLE 1
97#define SOF_INTERRUPT_ENABLE 0
98 __le32 cpu_irqenb0; /* ... or onboard 8051 */
99#define SETUP_PACKET_INTERRUPT_ENABLE 7
100#define ENDPOINT_F_INTERRUPT_ENABLE 6
101#define ENDPOINT_E_INTERRUPT_ENABLE 5
102#define ENDPOINT_D_INTERRUPT_ENABLE 4
103#define ENDPOINT_C_INTERRUPT_ENABLE 3
104#define ENDPOINT_B_INTERRUPT_ENABLE 2
105#define ENDPOINT_A_INTERRUPT_ENABLE 1
106#define ENDPOINT_0_INTERRUPT_ENABLE 0
107 __le32 cpu_irqenb1;
108#define CPU_INTERRUPT_ENABLE 31
109#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
110#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
111#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
112#define PCI_INTA_INTERRUPT_ENABLE 24
113#define PCI_PME_INTERRUPT_ENABLE 23
114#define PCI_SERR_INTERRUPT_ENABLE 22
115#define PCI_PERR_INTERRUPT_ENABLE 21
116#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
117#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
118#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
119#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
120#define GPIO_INTERRUPT_ENABLE 13
121#define DMA_D_INTERRUPT_ENABLE 12
122#define DMA_C_INTERRUPT_ENABLE 11
123#define DMA_B_INTERRUPT_ENABLE 10
124#define DMA_A_INTERRUPT_ENABLE 9
125#define EEPROM_DONE_INTERRUPT_ENABLE 8
126#define VBUS_INTERRUPT_ENABLE 7
127#define CONTROL_STATUS_INTERRUPT_ENABLE 6
128#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
129#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
130#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
131#define RESUME_INTERRUPT_ENABLE 1
132#define SOF_INTERRUPT_ENABLE 0
133
134 /* offset 0x0020 */
135 u32 _unused1;
136 __le32 usbirqenb1;
137#define USB_INTERRUPT_ENABLE 31
138#define POWER_STATE_CHANGE_INTERRUPT_ENABLE 27
139#define PCI_ARBITER_TIMEOUT_INTERRUPT_ENABLE 26
140#define PCI_PARITY_ERROR_INTERRUPT_ENABLE 25
141#define PCI_INTA_INTERRUPT_ENABLE 24
142#define PCI_PME_INTERRUPT_ENABLE 23
143#define PCI_SERR_INTERRUPT_ENABLE 22
144#define PCI_PERR_INTERRUPT_ENABLE 21
145#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE 20
146#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE 19
147#define PCI_RETRY_ABORT_INTERRUPT_ENABLE 17
148#define PCI_MASTER_CYCLE_DONE_INTERRUPT_ENABLE 16
149#define GPIO_INTERRUPT_ENABLE 13
150#define DMA_D_INTERRUPT_ENABLE 12
151#define DMA_C_INTERRUPT_ENABLE 11
152#define DMA_B_INTERRUPT_ENABLE 10
153#define DMA_A_INTERRUPT_ENABLE 9
154#define EEPROM_DONE_INTERRUPT_ENABLE 8
155#define VBUS_INTERRUPT_ENABLE 7
156#define CONTROL_STATUS_INTERRUPT_ENABLE 6
157#define ROOT_PORT_RESET_INTERRUPT_ENABLE 4
158#define SUSPEND_REQUEST_INTERRUPT_ENABLE 3
159#define SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE 2
160#define RESUME_INTERRUPT_ENABLE 1
161#define SOF_INTERRUPT_ENABLE 0
162 __le32 irqstat0;
163#define INTA_ASSERTED 12
164#define SETUP_PACKET_INTERRUPT 7
165#define ENDPOINT_F_INTERRUPT 6
166#define ENDPOINT_E_INTERRUPT 5
167#define ENDPOINT_D_INTERRUPT 4
168#define ENDPOINT_C_INTERRUPT 3
169#define ENDPOINT_B_INTERRUPT 2
170#define ENDPOINT_A_INTERRUPT 1
171#define ENDPOINT_0_INTERRUPT 0
172 __le32 irqstat1;
173#define POWER_STATE_CHANGE_INTERRUPT 27
174#define PCI_ARBITER_TIMEOUT_INTERRUPT 26
175#define PCI_PARITY_ERROR_INTERRUPT 25
176#define PCI_INTA_INTERRUPT 24
177#define PCI_PME_INTERRUPT 23
178#define PCI_SERR_INTERRUPT 22
179#define PCI_PERR_INTERRUPT 21
180#define PCI_MASTER_ABORT_RECEIVED_INTERRUPT 20
181#define PCI_TARGET_ABORT_RECEIVED_INTERRUPT 19
182#define PCI_RETRY_ABORT_INTERRUPT 17
183#define PCI_MASTER_CYCLE_DONE_INTERRUPT 16
184#define GPIO_INTERRUPT 13
185#define DMA_D_INTERRUPT 12
186#define DMA_C_INTERRUPT 11
187#define DMA_B_INTERRUPT 10
188#define DMA_A_INTERRUPT 9
189#define EEPROM_DONE_INTERRUPT 8
190#define VBUS_INTERRUPT 7
191#define CONTROL_STATUS_INTERRUPT 6
192#define ROOT_PORT_RESET_INTERRUPT 4
193#define SUSPEND_REQUEST_INTERRUPT 3
194#define SUSPEND_REQUEST_CHANGE_INTERRUPT 2
195#define RESUME_INTERRUPT 1
196#define SOF_INTERRUPT 0
197 /* offset 0x0030 */
198 __le32 idxaddr;
199 __le32 idxdata;
200 __le32 fifoctl;
201#define PCI_BASE2_RANGE 16
202#define IGNORE_FIFO_AVAILABILITY 3
203#define PCI_BASE2_SELECT 2
204#define FIFO_CONFIGURATION_SELECT 0
205 u32 _unused2;
206 /* offset 0x0040 */
207 __le32 memaddr;
208#define START 28
209#define DIRECTION 27
210#define FIFO_DIAGNOSTIC_SELECT 24
211#define MEMORY_ADDRESS 0
212 __le32 memdata0;
213 __le32 memdata1;
214 u32 _unused3;
215 /* offset 0x0050 */
216 __le32 gpioctl;
217#define GPIO3_LED_SELECT 12
218#define GPIO3_INTERRUPT_ENABLE 11
219#define GPIO2_INTERRUPT_ENABLE 10
220#define GPIO1_INTERRUPT_ENABLE 9
221#define GPIO0_INTERRUPT_ENABLE 8
222#define GPIO3_OUTPUT_ENABLE 7
223#define GPIO2_OUTPUT_ENABLE 6
224#define GPIO1_OUTPUT_ENABLE 5
225#define GPIO0_OUTPUT_ENABLE 4
226#define GPIO3_DATA 3
227#define GPIO2_DATA 2
228#define GPIO1_DATA 1
229#define GPIO0_DATA 0
230 __le32 gpiostat;
231#define GPIO3_INTERRUPT 3
232#define GPIO2_INTERRUPT 2
233#define GPIO1_INTERRUPT 1
234#define GPIO0_INTERRUPT 0
235} __attribute__ ((packed));
236
237/* usb control, BAR0 + 0x0080 */
238struct net2280_usb_regs {
239 /* offset 0x0080 */
240 __le32 stdrsp;
241#define STALL_UNSUPPORTED_REQUESTS 31
242#define SET_TEST_MODE 16
243#define GET_OTHER_SPEED_CONFIGURATION 15
244#define GET_DEVICE_QUALIFIER 14
245#define SET_ADDRESS 13
246#define ENDPOINT_SET_CLEAR_HALT 12
247#define DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP 11
248#define GET_STRING_DESCRIPTOR_2 10
249#define GET_STRING_DESCRIPTOR_1 9
250#define GET_STRING_DESCRIPTOR_0 8
251#define GET_SET_INTERFACE 6
252#define GET_SET_CONFIGURATION 5
253#define GET_CONFIGURATION_DESCRIPTOR 4
254#define GET_DEVICE_DESCRIPTOR 3
255#define GET_ENDPOINT_STATUS 2
256#define GET_INTERFACE_STATUS 1
257#define GET_DEVICE_STATUS 0
258 __le32 prodvendid;
259#define PRODUCT_ID 16
260#define VENDOR_ID 0
261 __le32 relnum;
262 __le32 usbctl;
263#define SERIAL_NUMBER_INDEX 16
264#define PRODUCT_ID_STRING_ENABLE 13
265#define VENDOR_ID_STRING_ENABLE 12
266#define USB_ROOT_PORT_WAKEUP_ENABLE 11
267#define VBUS_PIN 10
268#define TIMED_DISCONNECT 9
269#define SUSPEND_IMMEDIATELY 7
270#define SELF_POWERED_USB_DEVICE 6
271#define REMOTE_WAKEUP_SUPPORT 5
272#define PME_POLARITY 4
273#define USB_DETECT_ENABLE 3
274#define PME_WAKEUP_ENABLE 2
275#define DEVICE_REMOTE_WAKEUP_ENABLE 1
276#define SELF_POWERED_STATUS 0
277 /* offset 0x0090 */
278 __le32 usbstat;
279#define HIGH_SPEED 7
280#define FULL_SPEED 6
281#define GENERATE_RESUME 5
282#define GENERATE_DEVICE_REMOTE_WAKEUP 4
283 __le32 xcvrdiag;
284#define FORCE_HIGH_SPEED_MODE 31
285#define FORCE_FULL_SPEED_MODE 30
286#define USB_TEST_MODE 24
287#define LINE_STATE 16
288#define TRANSCEIVER_OPERATION_MODE 2
289#define TRANSCEIVER_SELECT 1
290#define TERMINATION_SELECT 0
291 __le32 setup0123;
292 __le32 setup4567;
293 /* offset 0x0090 */
294 u32 _unused0;
295 __le32 ouraddr;
296#define FORCE_IMMEDIATE 7
297#define OUR_USB_ADDRESS 0
298 __le32 ourconfig;
299} __attribute__ ((packed));
300
301/* pci control, BAR0 + 0x0100 */
302struct net2280_pci_regs {
303 /* offset 0x0100 */
304 __le32 pcimstctl;
305#define PCI_ARBITER_PARK_SELECT 13
306#define PCI_MULTI LEVEL_ARBITER 12
307#define PCI_RETRY_ABORT_ENABLE 11
308#define DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE 10
309#define DMA_READ_MULTIPLE_ENABLE 9
310#define DMA_READ_LINE_ENABLE 8
311#define PCI_MASTER_COMMAND_SELECT 6
312#define MEM_READ_OR_WRITE 0
313#define IO_READ_OR_WRITE 1
314#define CFG_READ_OR_WRITE 2
315#define PCI_MASTER_START 5
316#define PCI_MASTER_READ_WRITE 4
317#define PCI_MASTER_WRITE 0
318#define PCI_MASTER_READ 1
319#define PCI_MASTER_BYTE_WRITE_ENABLES 0
320 __le32 pcimstaddr;
321 __le32 pcimstdata;
322 __le32 pcimststat;
323#define PCI_ARBITER_CLEAR 2
324#define PCI_EXTERNAL_ARBITER 1
325#define PCI_HOST_MODE 0
326} __attribute__ ((packed));
327
328/* dma control, BAR0 + 0x0180 ... array of four structs like this,
329 * for channels 0..3. see also struct net2280_dma: descriptor
330 * that can be loaded into some of these registers.
331 */
332struct net2280_dma_regs { /* [11.7] */
333 /* offset 0x0180, 0x01a0, 0x01c0, 0x01e0, */
334 __le32 dmactl;
335#define DMA_SCATTER_GATHER_DONE_INTERRUPT_ENABLE 25
336#define DMA_CLEAR_COUNT_ENABLE 21
337#define DESCRIPTOR_POLLING_RATE 19
338#define POLL_CONTINUOUS 0
339#define POLL_1_USEC 1
340#define POLL_100_USEC 2
341#define POLL_1_MSEC 3
342#define DMA_VALID_BIT_POLLING_ENABLE 18
343#define DMA_VALID_BIT_ENABLE 17
344#define DMA_SCATTER_GATHER_ENABLE 16
345#define DMA_OUT_AUTO_START_ENABLE 4
346#define DMA_PREEMPT_ENABLE 3
347#define DMA_FIFO_VALIDATE 2
348#define DMA_ENABLE 1
349#define DMA_ADDRESS_HOLD 0
350 __le32 dmastat;
351#define DMA_SCATTER_GATHER_DONE_INTERRUPT 25
352#define DMA_TRANSACTION_DONE_INTERRUPT 24
353#define DMA_ABORT 1
354#define DMA_START 0
355 u32 _unused0[2];
356 /* offset 0x0190, 0x01b0, 0x01d0, 0x01f0, */
357 __le32 dmacount;
358#define VALID_BIT 31
359#define DMA_DIRECTION 30
360#define DMA_DONE_INTERRUPT_ENABLE 29
361#define END_OF_CHAIN 28
362#define DMA_BYTE_COUNT_MASK ((1<<24)-1)
363#define DMA_BYTE_COUNT 0
364 __le32 dmaaddr;
365 __le32 dmadesc;
366 u32 _unused1;
367} __attribute__ ((packed));
368
369/* dedicated endpoint registers, BAR0 + 0x0200 */
370
371struct net2280_dep_regs { /* [11.8] */
372 /* offset 0x0200, 0x0210, 0x220, 0x230, 0x240 */
373 __le32 dep_cfg;
374 /* offset 0x0204, 0x0214, 0x224, 0x234, 0x244 */
375 __le32 dep_rsp;
376 u32 _unused[2];
377} __attribute__ ((packed));
378
379/* configurable endpoint registers, BAR0 + 0x0300 ... array of seven structs
380 * like this, for ep0 then the configurable endpoints A..F
381 * ep0 reserved for control; E and F have only 64 bytes of fifo
382 */
383struct net2280_ep_regs { /* [11.9] */
384 /* offset 0x0300, 0x0320, 0x0340, 0x0360, 0x0380, 0x03a0, 0x03c0 */
385 __le32 ep_cfg;
386#define ENDPOINT_BYTE_COUNT 16
387#define ENDPOINT_ENABLE 10
388#define ENDPOINT_TYPE 8
389#define ENDPOINT_DIRECTION 7
390#define ENDPOINT_NUMBER 0
391 __le32 ep_rsp;
392#define SET_NAK_OUT_PACKETS 15
393#define SET_EP_HIDE_STATUS_PHASE 14
394#define SET_EP_FORCE_CRC_ERROR 13
395#define SET_INTERRUPT_MODE 12
396#define SET_CONTROL_STATUS_PHASE_HANDSHAKE 11
397#define SET_NAK_OUT_PACKETS_MODE 10
398#define SET_ENDPOINT_TOGGLE 9
399#define SET_ENDPOINT_HALT 8
400#define CLEAR_NAK_OUT_PACKETS 7
401#define CLEAR_EP_HIDE_STATUS_PHASE 6
402#define CLEAR_EP_FORCE_CRC_ERROR 5
403#define CLEAR_INTERRUPT_MODE 4
404#define CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE 3
405#define CLEAR_NAK_OUT_PACKETS_MODE 2
406#define CLEAR_ENDPOINT_TOGGLE 1
407#define CLEAR_ENDPOINT_HALT 0
408 __le32 ep_irqenb;
409#define SHORT_PACKET_OUT_DONE_INTERRUPT_ENABLE 6
410#define SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE 5
411#define DATA_PACKET_RECEIVED_INTERRUPT_ENABLE 3
412#define DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE 2
413#define DATA_OUT_PING_TOKEN_INTERRUPT_ENABLE 1
414#define DATA_IN_TOKEN_INTERRUPT_ENABLE 0
415 __le32 ep_stat;
416#define FIFO_VALID_COUNT 24
417#define HIGH_BANDWIDTH_OUT_TRANSACTION_PID 22
418#define TIMEOUT 21
419#define USB_STALL_SENT 20
420#define USB_IN_NAK_SENT 19
421#define USB_IN_ACK_RCVD 18
422#define USB_OUT_PING_NAK_SENT 17
423#define USB_OUT_ACK_SENT 16
424#define FIFO_OVERFLOW 13
425#define FIFO_UNDERFLOW 12
426#define FIFO_FULL 11
427#define FIFO_EMPTY 10
428#define FIFO_FLUSH 9
429#define SHORT_PACKET_OUT_DONE_INTERRUPT 6
430#define SHORT_PACKET_TRANSFERRED_INTERRUPT 5
431#define NAK_OUT_PACKETS 4
432#define DATA_PACKET_RECEIVED_INTERRUPT 3
433#define DATA_PACKET_TRANSMITTED_INTERRUPT 2
434#define DATA_OUT_PING_TOKEN_INTERRUPT 1
435#define DATA_IN_TOKEN_INTERRUPT 0
436 /* offset 0x0310, 0x0330, 0x0350, 0x0370, 0x0390, 0x03b0, 0x03d0 */
437 __le32 ep_avail;
438 __le32 ep_data;
439 u32 _unused0[2];
440} __attribute__ ((packed));
441
442struct net2280_reg_write {
443 __le16 port;
444 __le32 addr;
445 __le32 val;
446} __attribute__ ((packed));
447
448struct net2280_reg_read {
449 __le16 port;
450 __le32 addr;
451} __attribute__ ((packed));
452#endif /* NET2280_H */
diff --git a/drivers/net/wireless/p54.h b/drivers/net/wireless/p54/p54.h
index 744c866066c5..06d2c67f4c81 100644
--- a/drivers/net/wireless/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -64,10 +64,6 @@ struct p54_common {
64 unsigned int tx_hdr_len; 64 unsigned int tx_hdr_len;
65 void *cached_vdcf; 65 void *cached_vdcf;
66 unsigned int fw_var; 66 unsigned int fw_var;
67 /* FIXME: this channels/modes/rates stuff sucks */
68 struct ieee80211_channel channels[14];
69 struct ieee80211_rate rates[12];
70 struct ieee80211_hw_mode modes[2];
71 struct ieee80211_tx_queue_stats tx_stats; 67 struct ieee80211_tx_queue_stats tx_stats;
72}; 68};
73 69
diff --git a/drivers/net/wireless/p54common.c b/drivers/net/wireless/p54/p54common.c
index d191e055a788..63f9badf3f52 100644
--- a/drivers/net/wireless/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -27,6 +27,46 @@ MODULE_DESCRIPTION("Softmac Prism54 common code");
27MODULE_LICENSE("GPL"); 27MODULE_LICENSE("GPL");
28MODULE_ALIAS("prism54common"); 28MODULE_ALIAS("prism54common");
29 29
30static struct ieee80211_rate p54_rates[] = {
31 { .bitrate = 10, .hw_value = 0, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
32 { .bitrate = 20, .hw_value = 1, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
33 { .bitrate = 55, .hw_value = 2, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
34 { .bitrate = 110, .hw_value = 3, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
35 { .bitrate = 60, .hw_value = 4, },
36 { .bitrate = 90, .hw_value = 5, },
37 { .bitrate = 120, .hw_value = 6, },
38 { .bitrate = 180, .hw_value = 7, },
39 { .bitrate = 240, .hw_value = 8, },
40 { .bitrate = 360, .hw_value = 9, },
41 { .bitrate = 480, .hw_value = 10, },
42 { .bitrate = 540, .hw_value = 11, },
43};
44
45static struct ieee80211_channel p54_channels[] = {
46 { .center_freq = 2412, .hw_value = 1, },
47 { .center_freq = 2417, .hw_value = 2, },
48 { .center_freq = 2422, .hw_value = 3, },
49 { .center_freq = 2427, .hw_value = 4, },
50 { .center_freq = 2432, .hw_value = 5, },
51 { .center_freq = 2437, .hw_value = 6, },
52 { .center_freq = 2442, .hw_value = 7, },
53 { .center_freq = 2447, .hw_value = 8, },
54 { .center_freq = 2452, .hw_value = 9, },
55 { .center_freq = 2457, .hw_value = 10, },
56 { .center_freq = 2462, .hw_value = 11, },
57 { .center_freq = 2467, .hw_value = 12, },
58 { .center_freq = 2472, .hw_value = 13, },
59 { .center_freq = 2484, .hw_value = 14, },
60};
61
62static struct ieee80211_supported_band band_2GHz = {
63 .channels = p54_channels,
64 .n_channels = ARRAY_SIZE(p54_channels),
65 .bitrates = p54_rates,
66 .n_bitrates = ARRAY_SIZE(p54_rates),
67};
68
69
30void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw) 70void p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
31{ 71{
32 struct p54_common *priv = dev->priv; 72 struct p54_common *priv = dev->priv;
@@ -257,6 +297,10 @@ int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
257 /* make it overrun */ 297 /* make it overrun */
258 entry_len = len; 298 entry_len = len;
259 break; 299 break;
300 default:
301 printk(KERN_INFO "p54: unknown eeprom code : 0x%x\n",
302 le16_to_cpu(entry->code));
303 break;
260 } 304 }
261 305
262 entry = (void *)entry + (entry_len + 1)*2; 306 entry = (void *)entry + (entry_len + 1)*2;
@@ -312,10 +356,10 @@ static void p54_rx_data(struct ieee80211_hw *dev, struct sk_buff *skb)
312 u16 freq = le16_to_cpu(hdr->freq); 356 u16 freq = le16_to_cpu(hdr->freq);
313 357
314 rx_status.ssi = hdr->rssi; 358 rx_status.ssi = hdr->rssi;
315 rx_status.rate = hdr->rate & 0x1f; /* report short preambles & CCK too */ 359 /* XX correct? */
316 rx_status.channel = freq == 2484 ? 14 : (freq - 2407)/5; 360 rx_status.rate_idx = hdr->rate & 0xf;
317 rx_status.freq = freq; 361 rx_status.freq = freq;
318 rx_status.phymode = MODE_IEEE80211G; 362 rx_status.band = IEEE80211_BAND_2GHZ;
319 rx_status.antenna = hdr->antenna; 363 rx_status.antenna = hdr->antenna;
320 rx_status.mactime = le64_to_cpu(hdr->timestamp); 364 rx_status.mactime = le64_to_cpu(hdr->timestamp);
321 rx_status.flag |= RX_FLAG_TSFT; 365 rx_status.flag |= RX_FLAG_TSFT;
@@ -353,7 +397,7 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
353 while (entry != (struct sk_buff *)&priv->tx_queue) { 397 while (entry != (struct sk_buff *)&priv->tx_queue) {
354 range = (struct memrecord *)&entry->cb; 398 range = (struct memrecord *)&entry->cb;
355 if (range->start_addr == addr) { 399 if (range->start_addr == addr) {
356 struct ieee80211_tx_status status = {{0}}; 400 struct ieee80211_tx_status status;
357 struct p54_control_hdr *entry_hdr; 401 struct p54_control_hdr *entry_hdr;
358 struct p54_tx_control_allocdata *entry_data; 402 struct p54_tx_control_allocdata *entry_data;
359 int pad = 0; 403 int pad = 0;
@@ -369,6 +413,7 @@ static void p54_rx_frame_sent(struct ieee80211_hw *dev, struct sk_buff *skb)
369 kfree_skb(entry); 413 kfree_skb(entry);
370 break; 414 break;
371 } 415 }
416 memset(&status, 0, sizeof(status));
372 memcpy(&status.control, range->control, 417 memcpy(&status.control, range->control,
373 sizeof(status.control)); 418 sizeof(status.control));
374 kfree(range->control); 419 kfree(range->control);
@@ -551,7 +596,9 @@ static int p54_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
551 txhdr->padding2 = 0; 596 txhdr->padding2 = 0;
552 597
553 /* TODO: add support for alternate retry TX rates */ 598 /* TODO: add support for alternate retry TX rates */
554 rate = control->tx_rate; 599 rate = control->tx_rate->hw_value;
600 if (control->flags & IEEE80211_TXCTL_SHORT_PREAMBLE)
601 rate |= 0x10;
555 if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) 602 if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS)
556 rate |= 0x40; 603 rate |= 0x40;
557 else if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) 604 else if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)
@@ -721,13 +768,12 @@ static int p54_set_leds(struct ieee80211_hw *dev, int mode, int link, int act)
721 return 0; 768 return 0;
722} 769}
723 770
724#define P54_SET_QUEUE(queue, ai_fs, cw_min, cw_max, burst) \ 771#define P54_SET_QUEUE(queue, ai_fs, cw_min, cw_max, _txop) \
725do { \ 772do { \
726 queue.aifs = cpu_to_le16(ai_fs); \ 773 queue.aifs = cpu_to_le16(ai_fs); \
727 queue.cwmin = cpu_to_le16(cw_min); \ 774 queue.cwmin = cpu_to_le16(cw_min); \
728 queue.cwmax = cpu_to_le16(cw_max); \ 775 queue.cwmax = cpu_to_le16(cw_max); \
729 queue.txop = (burst == 0) ? \ 776 queue.txop = cpu_to_le16(_txop); \
730 0 : cpu_to_le16((burst * 100) / 32 + 1); \
731} while(0) 777} while(0)
732 778
733static void p54_init_vdcf(struct ieee80211_hw *dev) 779static void p54_init_vdcf(struct ieee80211_hw *dev)
@@ -745,10 +791,10 @@ static void p54_init_vdcf(struct ieee80211_hw *dev)
745 791
746 vdcf = (struct p54_tx_control_vdcf *) hdr->data; 792 vdcf = (struct p54_tx_control_vdcf *) hdr->data;
747 793
748 P54_SET_QUEUE(vdcf->queue[0], 0x0002, 0x0003, 0x0007, 0x000f); 794 P54_SET_QUEUE(vdcf->queue[0], 0x0002, 0x0003, 0x0007, 47);
749 P54_SET_QUEUE(vdcf->queue[1], 0x0002, 0x0007, 0x000f, 0x001e); 795 P54_SET_QUEUE(vdcf->queue[1], 0x0002, 0x0007, 0x000f, 94);
750 P54_SET_QUEUE(vdcf->queue[2], 0x0002, 0x000f, 0x03ff, 0x0014); 796 P54_SET_QUEUE(vdcf->queue[2], 0x0003, 0x000f, 0x03ff, 0);
751 P54_SET_QUEUE(vdcf->queue[3], 0x0007, 0x000f, 0x03ff, 0x0000); 797 P54_SET_QUEUE(vdcf->queue[3], 0x0007, 0x000f, 0x03ff, 0);
752} 798}
753 799
754static void p54_set_vdcf(struct ieee80211_hw *dev) 800static void p54_set_vdcf(struct ieee80211_hw *dev)
@@ -853,7 +899,7 @@ static int p54_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
853{ 899{
854 int ret; 900 int ret;
855 901
856 ret = p54_set_freq(dev, cpu_to_le16(conf->freq)); 902 ret = p54_set_freq(dev, cpu_to_le16(conf->channel->center_freq));
857 p54_set_vdcf(dev); 903 p54_set_vdcf(dev);
858 return ret; 904 return ret;
859} 905}
@@ -901,7 +947,7 @@ static int p54_conf_tx(struct ieee80211_hw *dev, int queue,
901 947
902 if ((params) && !((queue < 0) || (queue > 4))) { 948 if ((params) && !((queue < 0) || (queue > 4))) {
903 P54_SET_QUEUE(vdcf->queue[queue], params->aifs, 949 P54_SET_QUEUE(vdcf->queue[queue], params->aifs,
904 params->cw_min, params->cw_max, params->burst_time); 950 params->cw_min, params->cw_max, params->txop);
905 } else 951 } else
906 return -EINVAL; 952 return -EINVAL;
907 953
@@ -948,7 +994,6 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
948{ 994{
949 struct ieee80211_hw *dev; 995 struct ieee80211_hw *dev;
950 struct p54_common *priv; 996 struct p54_common *priv;
951 int i;
952 997
953 dev = ieee80211_alloc_hw(priv_data_len, &p54_ops); 998 dev = ieee80211_alloc_hw(priv_data_len, &p54_ops);
954 if (!dev) 999 if (!dev)
@@ -957,18 +1002,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
957 priv = dev->priv; 1002 priv = dev->priv;
958 priv->mode = IEEE80211_IF_TYPE_INVALID; 1003 priv->mode = IEEE80211_IF_TYPE_INVALID;
959 skb_queue_head_init(&priv->tx_queue); 1004 skb_queue_head_init(&priv->tx_queue);
960 memcpy(priv->channels, p54_channels, sizeof(p54_channels)); 1005 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz;
961 memcpy(priv->rates, p54_rates, sizeof(p54_rates));
962 priv->modes[1].mode = MODE_IEEE80211B;
963 priv->modes[1].num_rates = 4;
964 priv->modes[1].rates = priv->rates;
965 priv->modes[1].num_channels = ARRAY_SIZE(p54_channels);
966 priv->modes[1].channels = priv->channels;
967 priv->modes[0].mode = MODE_IEEE80211G;
968 priv->modes[0].num_rates = ARRAY_SIZE(p54_rates);
969 priv->modes[0].rates = priv->rates;
970 priv->modes[0].num_channels = ARRAY_SIZE(p54_channels);
971 priv->modes[0].channels = priv->channels;
972 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | /* not sure */ 1006 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | /* not sure */
973 IEEE80211_HW_RX_INCLUDES_FCS; 1007 IEEE80211_HW_RX_INCLUDES_FCS;
974 dev->channel_change_time = 1000; /* TODO: find actual value */ 1008 dev->channel_change_time = 1000; /* TODO: find actual value */
@@ -990,14 +1024,6 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
990 1024
991 p54_init_vdcf(dev); 1025 p54_init_vdcf(dev);
992 1026
993 for (i = 0; i < 2; i++) {
994 if (ieee80211_register_hwmode(dev, &priv->modes[i])) {
995 kfree(priv->cached_vdcf);
996 ieee80211_free_hw(dev);
997 return NULL;
998 }
999 }
1000
1001 return dev; 1027 return dev;
1002} 1028}
1003EXPORT_SYMBOL_GPL(p54_init_common); 1029EXPORT_SYMBOL_GPL(p54_init_common);
diff --git a/drivers/net/wireless/p54common.h b/drivers/net/wireless/p54/p54common.h
index b67ff34e26fe..c15b56e1d75e 100644
--- a/drivers/net/wireless/p54common.h
+++ b/drivers/net/wireless/p54/p54common.h
@@ -251,79 +251,4 @@ struct p54_tx_control_vdcf {
251 __le16 frameburst; 251 __le16 frameburst;
252} __attribute__ ((packed)); 252} __attribute__ ((packed));
253 253
254static const struct ieee80211_rate p54_rates[] = {
255 { .rate = 10,
256 .val = 0,
257 .val2 = 0x10,
258 .flags = IEEE80211_RATE_CCK_2 },
259 { .rate = 20,
260 .val = 1,
261 .val2 = 0x11,
262 .flags = IEEE80211_RATE_CCK_2 },
263 { .rate = 55,
264 .val = 2,
265 .val2 = 0x12,
266 .flags = IEEE80211_RATE_CCK_2 },
267 { .rate = 110,
268 .val = 3,
269 .val2 = 0x13,
270 .flags = IEEE80211_RATE_CCK_2 },
271 { .rate = 60,
272 .val = 4,
273 .flags = IEEE80211_RATE_OFDM },
274 { .rate = 90,
275 .val = 5,
276 .flags = IEEE80211_RATE_OFDM },
277 { .rate = 120,
278 .val = 6,
279 .flags = IEEE80211_RATE_OFDM },
280 { .rate = 180,
281 .val = 7,
282 .flags = IEEE80211_RATE_OFDM },
283 { .rate = 240,
284 .val = 8,
285 .flags = IEEE80211_RATE_OFDM },
286 { .rate = 360,
287 .val = 9,
288 .flags = IEEE80211_RATE_OFDM },
289 { .rate = 480,
290 .val = 10,
291 .flags = IEEE80211_RATE_OFDM },
292 { .rate = 540,
293 .val = 11,
294 .flags = IEEE80211_RATE_OFDM },
295};
296
297// TODO: just generate this..
298static const struct ieee80211_channel p54_channels[] = {
299 { .chan = 1,
300 .freq = 2412},
301 { .chan = 2,
302 .freq = 2417},
303 { .chan = 3,
304 .freq = 2422},
305 { .chan = 4,
306 .freq = 2427},
307 { .chan = 5,
308 .freq = 2432},
309 { .chan = 6,
310 .freq = 2437},
311 { .chan = 7,
312 .freq = 2442},
313 { .chan = 8,
314 .freq = 2447},
315 { .chan = 9,
316 .freq = 2452},
317 { .chan = 10,
318 .freq = 2457},
319 { .chan = 11,
320 .freq = 2462},
321 { .chan = 12,
322 .freq = 2467},
323 { .chan = 13,
324 .freq = 2472},
325 { .chan = 14,
326 .freq = 2484}
327};
328
329#endif /* PRISM54COMMON_H */ 254#endif /* PRISM54COMMON_H */
diff --git a/drivers/net/wireless/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index fa527723fbe0..fa527723fbe0 100644
--- a/drivers/net/wireless/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
diff --git a/drivers/net/wireless/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index 5bedd7af385d..5bedd7af385d 100644
--- a/drivers/net/wireless/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
diff --git a/drivers/net/wireless/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 98ddbb3b3273..98ddbb3b3273 100644
--- a/drivers/net/wireless/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
diff --git a/drivers/net/wireless/p54usb.h b/drivers/net/wireless/p54/p54usb.h
index d1896b396c1c..d1896b396c1c 100644
--- a/drivers/net/wireless/p54usb.h
+++ b/drivers/net/wireless/p54/p54usb.h
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index 1b595a6525f4..e5b3c282009c 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -165,7 +165,7 @@ prism54_update_stats(struct work_struct *work)
165 struct obj_bss bss, *bss2; 165 struct obj_bss bss, *bss2;
166 union oid_res_t r; 166 union oid_res_t r;
167 167
168 down(&priv->stats_sem); 168 mutex_lock(&priv->stats_lock);
169 169
170/* Noise floor. 170/* Noise floor.
171 * I'm not sure if the unit is dBm. 171 * I'm not sure if the unit is dBm.
@@ -207,7 +207,7 @@ prism54_update_stats(struct work_struct *work)
207 mgt_get_request(priv, DOT11_OID_MPDUTXFAILED, 0, NULL, &r); 207 mgt_get_request(priv, DOT11_OID_MPDUTXFAILED, 0, NULL, &r);
208 priv->local_iwstatistics.discard.retries = r.u; 208 priv->local_iwstatistics.discard.retries = r.u;
209 209
210 up(&priv->stats_sem); 210 mutex_unlock(&priv->stats_lock);
211 211
212 return; 212 return;
213} 213}
@@ -218,12 +218,12 @@ prism54_get_wireless_stats(struct net_device *ndev)
218 islpci_private *priv = netdev_priv(ndev); 218 islpci_private *priv = netdev_priv(ndev);
219 219
220 /* If the stats are being updated return old data */ 220 /* If the stats are being updated return old data */
221 if (down_trylock(&priv->stats_sem) == 0) { 221 if (mutex_trylock(&priv->stats_lock)) {
222 memcpy(&priv->iwstatistics, &priv->local_iwstatistics, 222 memcpy(&priv->iwstatistics, &priv->local_iwstatistics,
223 sizeof (struct iw_statistics)); 223 sizeof (struct iw_statistics));
224 /* They won't be marked updated for the next time */ 224 /* They won't be marked updated for the next time */
225 priv->local_iwstatistics.qual.updated = 0; 225 priv->local_iwstatistics.qual.updated = 0;
226 up(&priv->stats_sem); 226 mutex_unlock(&priv->stats_lock);
227 } else 227 } else
228 priv->iwstatistics.qual.updated = 0; 228 priv->iwstatistics.qual.updated = 0;
229 229
@@ -1780,7 +1780,7 @@ prism54_set_raw(struct net_device *ndev, struct iw_request_info *info,
1780void 1780void
1781prism54_acl_init(struct islpci_acl *acl) 1781prism54_acl_init(struct islpci_acl *acl)
1782{ 1782{
1783 sema_init(&acl->sem, 1); 1783 mutex_init(&acl->lock);
1784 INIT_LIST_HEAD(&acl->mac_list); 1784 INIT_LIST_HEAD(&acl->mac_list);
1785 acl->size = 0; 1785 acl->size = 0;
1786 acl->policy = MAC_POLICY_OPEN; 1786 acl->policy = MAC_POLICY_OPEN;
@@ -1792,10 +1792,10 @@ prism54_clear_mac(struct islpci_acl *acl)
1792 struct list_head *ptr, *next; 1792 struct list_head *ptr, *next;
1793 struct mac_entry *entry; 1793 struct mac_entry *entry;
1794 1794
1795 down(&acl->sem); 1795 mutex_lock(&acl->lock);
1796 1796
1797 if (acl->size == 0) { 1797 if (acl->size == 0) {
1798 up(&acl->sem); 1798 mutex_unlock(&acl->lock);
1799 return; 1799 return;
1800 } 1800 }
1801 1801
@@ -1806,7 +1806,7 @@ prism54_clear_mac(struct islpci_acl *acl)
1806 kfree(entry); 1806 kfree(entry);
1807 } 1807 }
1808 acl->size = 0; 1808 acl->size = 0;
1809 up(&acl->sem); 1809 mutex_unlock(&acl->lock);
1810} 1810}
1811 1811
1812void 1812void
@@ -1833,13 +1833,13 @@ prism54_add_mac(struct net_device *ndev, struct iw_request_info *info,
1833 1833
1834 memcpy(entry->addr, addr->sa_data, ETH_ALEN); 1834 memcpy(entry->addr, addr->sa_data, ETH_ALEN);
1835 1835
1836 if (down_interruptible(&acl->sem)) { 1836 if (mutex_lock_interruptible(&acl->lock)) {
1837 kfree(entry); 1837 kfree(entry);
1838 return -ERESTARTSYS; 1838 return -ERESTARTSYS;
1839 } 1839 }
1840 list_add_tail(&entry->_list, &acl->mac_list); 1840 list_add_tail(&entry->_list, &acl->mac_list);
1841 acl->size++; 1841 acl->size++;
1842 up(&acl->sem); 1842 mutex_unlock(&acl->lock);
1843 1843
1844 return 0; 1844 return 0;
1845} 1845}
@@ -1856,18 +1856,18 @@ prism54_del_mac(struct net_device *ndev, struct iw_request_info *info,
1856 if (addr->sa_family != ARPHRD_ETHER) 1856 if (addr->sa_family != ARPHRD_ETHER)
1857 return -EOPNOTSUPP; 1857 return -EOPNOTSUPP;
1858 1858
1859 if (down_interruptible(&acl->sem)) 1859 if (mutex_lock_interruptible(&acl->lock))
1860 return -ERESTARTSYS; 1860 return -ERESTARTSYS;
1861 list_for_each_entry(entry, &acl->mac_list, _list) { 1861 list_for_each_entry(entry, &acl->mac_list, _list) {
1862 if (memcmp(entry->addr, addr->sa_data, ETH_ALEN) == 0) { 1862 if (memcmp(entry->addr, addr->sa_data, ETH_ALEN) == 0) {
1863 list_del(&entry->_list); 1863 list_del(&entry->_list);
1864 acl->size--; 1864 acl->size--;
1865 kfree(entry); 1865 kfree(entry);
1866 up(&acl->sem); 1866 mutex_unlock(&acl->lock);
1867 return 0; 1867 return 0;
1868 } 1868 }
1869 } 1869 }
1870 up(&acl->sem); 1870 mutex_unlock(&acl->lock);
1871 return -EINVAL; 1871 return -EINVAL;
1872} 1872}
1873 1873
@@ -1882,7 +1882,7 @@ prism54_get_mac(struct net_device *ndev, struct iw_request_info *info,
1882 1882
1883 dwrq->length = 0; 1883 dwrq->length = 0;
1884 1884
1885 if (down_interruptible(&acl->sem)) 1885 if (mutex_lock_interruptible(&acl->lock))
1886 return -ERESTARTSYS; 1886 return -ERESTARTSYS;
1887 1887
1888 list_for_each_entry(entry, &acl->mac_list, _list) { 1888 list_for_each_entry(entry, &acl->mac_list, _list) {
@@ -1891,7 +1891,7 @@ prism54_get_mac(struct net_device *ndev, struct iw_request_info *info,
1891 dwrq->length++; 1891 dwrq->length++;
1892 dst++; 1892 dst++;
1893 } 1893 }
1894 up(&acl->sem); 1894 mutex_unlock(&acl->lock);
1895 return 0; 1895 return 0;
1896} 1896}
1897 1897
@@ -1955,11 +1955,11 @@ prism54_mac_accept(struct islpci_acl *acl, char *mac)
1955 struct mac_entry *entry; 1955 struct mac_entry *entry;
1956 int res = 0; 1956 int res = 0;
1957 1957
1958 if (down_interruptible(&acl->sem)) 1958 if (mutex_lock_interruptible(&acl->lock))
1959 return -ERESTARTSYS; 1959 return -ERESTARTSYS;
1960 1960
1961 if (acl->policy == MAC_POLICY_OPEN) { 1961 if (acl->policy == MAC_POLICY_OPEN) {
1962 up(&acl->sem); 1962 mutex_unlock(&acl->lock);
1963 return 1; 1963 return 1;
1964 } 1964 }
1965 1965
@@ -1970,7 +1970,7 @@ prism54_mac_accept(struct islpci_acl *acl, char *mac)
1970 } 1970 }
1971 } 1971 }
1972 res = (acl->policy == MAC_POLICY_ACCEPT) ? !res : res; 1972 res = (acl->policy == MAC_POLICY_ACCEPT) ? !res : res;
1973 up(&acl->sem); 1973 mutex_unlock(&acl->lock);
1974 1974
1975 return res; 1975 return res;
1976} 1976}
@@ -2081,6 +2081,7 @@ link_changed(struct net_device *ndev, u32 bitrate)
2081 islpci_private *priv = netdev_priv(ndev); 2081 islpci_private *priv = netdev_priv(ndev);
2082 2082
2083 if (bitrate) { 2083 if (bitrate) {
2084 netif_carrier_on(ndev);
2084 if (priv->iw_mode == IW_MODE_INFRA) { 2085 if (priv->iw_mode == IW_MODE_INFRA) {
2085 union iwreq_data uwrq; 2086 union iwreq_data uwrq;
2086 prism54_get_wap(ndev, NULL, (struct sockaddr *) &uwrq, 2087 prism54_get_wap(ndev, NULL, (struct sockaddr *) &uwrq,
@@ -2089,8 +2090,10 @@ link_changed(struct net_device *ndev, u32 bitrate)
2089 } else 2090 } else
2090 send_simple_event(netdev_priv(ndev), 2091 send_simple_event(netdev_priv(ndev),
2091 "Link established"); 2092 "Link established");
2092 } else 2093 } else {
2094 netif_carrier_off(ndev);
2093 send_simple_event(netdev_priv(ndev), "Link lost"); 2095 send_simple_event(netdev_priv(ndev), "Link lost");
2096 }
2094} 2097}
2095 2098
2096/* Beacon/ProbeResp payload header */ 2099/* Beacon/ProbeResp payload header */
@@ -2114,7 +2117,7 @@ prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid,
2114 if (wpa_ie_len > MAX_WPA_IE_LEN) 2117 if (wpa_ie_len > MAX_WPA_IE_LEN)
2115 wpa_ie_len = MAX_WPA_IE_LEN; 2118 wpa_ie_len = MAX_WPA_IE_LEN;
2116 2119
2117 down(&priv->wpa_sem); 2120 mutex_lock(&priv->wpa_lock);
2118 2121
2119 /* try to use existing entry */ 2122 /* try to use existing entry */
2120 list_for_each(ptr, &priv->bss_wpa_list) { 2123 list_for_each(ptr, &priv->bss_wpa_list) {
@@ -2165,7 +2168,7 @@ prism54_wpa_bss_ie_add(islpci_private *priv, u8 *bssid,
2165 kfree(bss); 2168 kfree(bss);
2166 } 2169 }
2167 2170
2168 up(&priv->wpa_sem); 2171 mutex_unlock(&priv->wpa_lock);
2169} 2172}
2170 2173
2171static size_t 2174static size_t
@@ -2175,7 +2178,7 @@ prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie)
2175 struct islpci_bss_wpa_ie *bss = NULL; 2178 struct islpci_bss_wpa_ie *bss = NULL;
2176 size_t len = 0; 2179 size_t len = 0;
2177 2180
2178 down(&priv->wpa_sem); 2181 mutex_lock(&priv->wpa_lock);
2179 2182
2180 list_for_each(ptr, &priv->bss_wpa_list) { 2183 list_for_each(ptr, &priv->bss_wpa_list) {
2181 bss = list_entry(ptr, struct islpci_bss_wpa_ie, list); 2184 bss = list_entry(ptr, struct islpci_bss_wpa_ie, list);
@@ -2187,7 +2190,7 @@ prism54_wpa_bss_ie_get(islpci_private *priv, u8 *bssid, u8 *wpa_ie)
2187 len = bss->wpa_ie_len; 2190 len = bss->wpa_ie_len;
2188 memcpy(wpa_ie, bss->wpa_ie, len); 2191 memcpy(wpa_ie, bss->wpa_ie, len);
2189 } 2192 }
2190 up(&priv->wpa_sem); 2193 mutex_unlock(&priv->wpa_lock);
2191 2194
2192 return len; 2195 return len;
2193} 2196}
@@ -2196,7 +2199,7 @@ void
2196prism54_wpa_bss_ie_init(islpci_private *priv) 2199prism54_wpa_bss_ie_init(islpci_private *priv)
2197{ 2200{
2198 INIT_LIST_HEAD(&priv->bss_wpa_list); 2201 INIT_LIST_HEAD(&priv->bss_wpa_list);
2199 sema_init(&priv->wpa_sem, 1); 2202 mutex_init(&priv->wpa_lock);
2200} 2203}
2201 2204
2202void 2205void
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index dbb538ccb4ec..04c2638d75ad 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -387,7 +387,9 @@ islpci_open(struct net_device *ndev)
387 } 387 }
388 388
389 netif_start_queue(ndev); 389 netif_start_queue(ndev);
390/* netif_mark_up( ndev ); */ 390
391 /* Turn off carrier unless we know we have associated */
392 netif_carrier_off(ndev);
391 393
392 return 0; 394 return 0;
393} 395}
@@ -864,7 +866,7 @@ islpci_setup(struct pci_dev *pdev)
864 mutex_init(&priv->mgmt_lock); 866 mutex_init(&priv->mgmt_lock);
865 priv->mgmt_received = NULL; 867 priv->mgmt_received = NULL;
866 init_waitqueue_head(&priv->mgmt_wqueue); 868 init_waitqueue_head(&priv->mgmt_wqueue);
867 sema_init(&priv->stats_sem, 1); 869 mutex_init(&priv->stats_lock);
868 spin_lock_init(&priv->slock); 870 spin_lock_init(&priv->slock);
869 871
870 /* init state machine with off#1 state */ 872 /* init state machine with off#1 state */
diff --git a/drivers/net/wireless/prism54/islpci_dev.h b/drivers/net/wireless/prism54/islpci_dev.h
index 4e0182ce835b..8e55a5fcffae 100644
--- a/drivers/net/wireless/prism54/islpci_dev.h
+++ b/drivers/net/wireless/prism54/islpci_dev.h
@@ -55,7 +55,7 @@ struct islpci_acl {
55 enum { MAC_POLICY_OPEN=0, MAC_POLICY_ACCEPT=1, MAC_POLICY_REJECT=2 } policy; 55 enum { MAC_POLICY_OPEN=0, MAC_POLICY_ACCEPT=1, MAC_POLICY_REJECT=2 } policy;
56 struct list_head mac_list; /* a list of mac_entry */ 56 struct list_head mac_list; /* a list of mac_entry */
57 int size; /* size of queue */ 57 int size; /* size of queue */
58 struct semaphore sem; /* accessed in ioctls and trap_work */ 58 struct mutex lock; /* accessed in ioctls and trap_work */
59}; 59};
60 60
61struct islpci_membuf { 61struct islpci_membuf {
@@ -88,7 +88,7 @@ typedef struct {
88 88
89 /* Take care of the wireless stats */ 89 /* Take care of the wireless stats */
90 struct work_struct stats_work; 90 struct work_struct stats_work;
91 struct semaphore stats_sem; 91 struct mutex stats_lock;
92 /* remember when we last updated the stats */ 92 /* remember when we last updated the stats */
93 unsigned long stats_timestamp; 93 unsigned long stats_timestamp;
94 /* The first is accessed under semaphore locking. 94 /* The first is accessed under semaphore locking.
@@ -178,7 +178,7 @@ typedef struct {
178 int wpa; /* WPA mode enabled */ 178 int wpa; /* WPA mode enabled */
179 struct list_head bss_wpa_list; 179 struct list_head bss_wpa_list;
180 int num_bss_wpa; 180 int num_bss_wpa;
181 struct semaphore wpa_sem; 181 struct mutex wpa_lock;
182 u8 wpa_ie[MAX_WPA_IE_LEN]; 182 u8 wpa_ie[MAX_WPA_IE_LEN];
183 size_t wpa_ie_len; 183 size_t wpa_ie_len;
184 184
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index f3858ee36f32..963960dc30f2 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -34,6 +34,7 @@
34#include <linux/kernel.h> 34#include <linux/kernel.h>
35#include <linux/proc_fs.h> 35#include <linux/proc_fs.h>
36#include <linux/ptrace.h> 36#include <linux/ptrace.h>
37#include <linux/seq_file.h>
37#include <linux/slab.h> 38#include <linux/slab.h>
38#include <linux/string.h> 39#include <linux/string.h>
39#include <linux/timer.h> 40#include <linux/timer.h>
@@ -2582,7 +2583,7 @@ static char *nettype[] = {"Adhoc", "Infra "};
2582static char *framing[] = {"Encapsulation", "Translation"} 2583static char *framing[] = {"Encapsulation", "Translation"}
2583; 2584;
2584/*===========================================================================*/ 2585/*===========================================================================*/
2585static int ray_cs_proc_read(char *buf, char **start, off_t offset, int len) 2586static int ray_cs_proc_show(struct seq_file *m, void *v)
2586{ 2587{
2587/* Print current values which are not available via other means 2588/* Print current values which are not available via other means
2588 * eg ifconfig 2589 * eg ifconfig
@@ -2606,83 +2607,93 @@ static int ray_cs_proc_read(char *buf, char **start, off_t offset, int len)
2606 if (!local) 2607 if (!local)
2607 return 0; 2608 return 0;
2608 2609
2609 len = 0; 2610 seq_puts(m, "Raylink Wireless LAN driver status\n");
2610 2611 seq_printf(m, "%s\n", rcsid);
2611 len += sprintf(buf + len, "Raylink Wireless LAN driver status\n");
2612 len += sprintf(buf + len, "%s\n", rcsid);
2613 /* build 4 does not report version, and field is 0x55 after memtest */ 2612 /* build 4 does not report version, and field is 0x55 after memtest */
2614 len += sprintf(buf + len, "Firmware version = "); 2613 seq_puts(m, "Firmware version = ");
2615 if (local->fw_ver == 0x55) 2614 if (local->fw_ver == 0x55)
2616 len += sprintf(buf + len, "4 - Use dump_cis for more details\n"); 2615 seq_puts(m, "4 - Use dump_cis for more details\n");
2617 else 2616 else
2618 len += sprintf(buf + len, "%2d.%02d.%02d\n", 2617 seq_printf(m, "%2d.%02d.%02d\n",
2619 local->fw_ver, local->fw_bld, local->fw_var); 2618 local->fw_ver, local->fw_bld, local->fw_var);
2620 2619
2621 for (i=0; i<32; i++) c[i] = local->sparm.b5.a_current_ess_id[i]; 2620 for (i=0; i<32; i++) c[i] = local->sparm.b5.a_current_ess_id[i];
2622 c[32] = 0; 2621 c[32] = 0;
2623 len += sprintf(buf + len, "%s network ESSID = \"%s\"\n", 2622 seq_printf(m, "%s network ESSID = \"%s\"\n",
2624 nettype[local->sparm.b5.a_network_type], c); 2623 nettype[local->sparm.b5.a_network_type], c);
2625 2624
2626 p = local->bss_id; 2625 p = local->bss_id;
2627 len += sprintf(buf + len, "BSSID = %s\n", 2626 seq_printf(m, "BSSID = %s\n",
2628 print_mac(mac, p)); 2627 print_mac(mac, p));
2629 2628
2630 len += sprintf(buf + len, "Country code = %d\n", 2629 seq_printf(m, "Country code = %d\n",
2631 local->sparm.b5.a_curr_country_code); 2630 local->sparm.b5.a_curr_country_code);
2632 2631
2633 i = local->card_status; 2632 i = local->card_status;
2634 if (i < 0) i = 10; 2633 if (i < 0) i = 10;
2635 if (i > 16) i = 10; 2634 if (i > 16) i = 10;
2636 len += sprintf(buf + len, "Card status = %s\n", card_status[i]); 2635 seq_printf(m, "Card status = %s\n", card_status[i]);
2637 2636
2638 len += sprintf(buf + len, "Framing mode = %s\n",framing[translate]); 2637 seq_printf(m, "Framing mode = %s\n",framing[translate]);
2639 2638
2640 len += sprintf(buf + len, "Last pkt signal lvl = %d\n", local->last_rsl); 2639 seq_printf(m, "Last pkt signal lvl = %d\n", local->last_rsl);
2641 2640
2642 if (local->beacon_rxed) { 2641 if (local->beacon_rxed) {
2643 /* Pull some fields out of last beacon received */ 2642 /* Pull some fields out of last beacon received */
2644 len += sprintf(buf + len, "Beacon Interval = %d Kus\n", 2643 seq_printf(m, "Beacon Interval = %d Kus\n",
2645 local->last_bcn.beacon_intvl[0] 2644 local->last_bcn.beacon_intvl[0]
2646 + 256 * local->last_bcn.beacon_intvl[1]); 2645 + 256 * local->last_bcn.beacon_intvl[1]);
2647 2646
2648 p = local->last_bcn.elements; 2647 p = local->last_bcn.elements;
2649 if (p[0] == C_ESSID_ELEMENT_ID) p += p[1] + 2; 2648 if (p[0] == C_ESSID_ELEMENT_ID) p += p[1] + 2;
2650 else { 2649 else {
2651 len += sprintf(buf + len, "Parse beacon failed at essid element id = %d\n",p[0]); 2650 seq_printf(m, "Parse beacon failed at essid element id = %d\n",p[0]);
2652 return len; 2651 return 0;
2653 } 2652 }
2654 2653
2655 if (p[0] == C_SUPPORTED_RATES_ELEMENT_ID) { 2654 if (p[0] == C_SUPPORTED_RATES_ELEMENT_ID) {
2656 len += sprintf(buf + len, "Supported rate codes = "); 2655 seq_puts(m, "Supported rate codes = ");
2657 for (i=2; i<p[1] + 2; i++) 2656 for (i=2; i<p[1] + 2; i++)
2658 len += sprintf(buf + len, "0x%02x ", p[i]); 2657 seq_printf(m, "0x%02x ", p[i]);
2659 len += sprintf(buf + len, "\n"); 2658 seq_putc(m, '\n');
2660 p += p[1] + 2; 2659 p += p[1] + 2;
2661 } 2660 }
2662 else { 2661 else {
2663 len += sprintf(buf + len, "Parse beacon failed at rates element\n"); 2662 seq_puts(m, "Parse beacon failed at rates element\n");
2664 return len; 2663 return 0;
2665 } 2664 }
2666 2665
2667 if (p[0] == C_FH_PARAM_SET_ELEMENT_ID) { 2666 if (p[0] == C_FH_PARAM_SET_ELEMENT_ID) {
2668 pfh = (struct freq_hop_element *)p; 2667 pfh = (struct freq_hop_element *)p;
2669 len += sprintf(buf + len, "Hop dwell = %d Kus\n", 2668 seq_printf(m, "Hop dwell = %d Kus\n",
2670 pfh->dwell_time[0] + 256 * pfh->dwell_time[1]); 2669 pfh->dwell_time[0] + 256 * pfh->dwell_time[1]);
2671 len += sprintf(buf + len, "Hop set = %d \n", pfh->hop_set); 2670 seq_printf(m, "Hop set = %d \n", pfh->hop_set);
2672 len += sprintf(buf + len, "Hop pattern = %d \n", pfh->hop_pattern); 2671 seq_printf(m, "Hop pattern = %d \n", pfh->hop_pattern);
2673 len += sprintf(buf + len, "Hop index = %d \n", pfh->hop_index); 2672 seq_printf(m, "Hop index = %d \n", pfh->hop_index);
2674 p += p[1] + 2; 2673 p += p[1] + 2;
2675 } 2674 }
2676 else { 2675 else {
2677 len += sprintf(buf + len, "Parse beacon failed at FH param element\n"); 2676 seq_puts(m, "Parse beacon failed at FH param element\n");
2678 return len; 2677 return 0;
2679 } 2678 }
2680 } else { 2679 } else {
2681 len += sprintf(buf + len, "No beacons received\n"); 2680 seq_puts(m, "No beacons received\n");
2682 } 2681 }
2683 return len; 2682 return 0;
2684} 2683}
2685 2684
2685static int ray_cs_proc_open(struct inode *inode, struct file *file)
2686{
2687 return single_open(file, ray_cs_proc_show, NULL);
2688}
2689
2690static const struct file_operations ray_cs_proc_fops = {
2691 .owner = THIS_MODULE,
2692 .open = ray_cs_proc_open,
2693 .read = seq_read,
2694 .llseek = seq_lseek,
2695 .release = single_release,
2696};
2686#endif 2697#endif
2687/*===========================================================================*/ 2698/*===========================================================================*/
2688static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type) 2699static int build_auth_frame(ray_dev_t *local, UCHAR *dest, int auth_type)
@@ -2815,7 +2826,7 @@ static int __init init_ray_cs(void)
2815#ifdef CONFIG_PROC_FS 2826#ifdef CONFIG_PROC_FS
2816 proc_mkdir("driver/ray_cs", NULL); 2827 proc_mkdir("driver/ray_cs", NULL);
2817 2828
2818 create_proc_info_entry("driver/ray_cs/ray_cs", 0, NULL, &ray_cs_proc_read); 2829 proc_create("driver/ray_cs/ray_cs", 0, NULL, &ray_cs_proc_fops);
2819 raycs_write("driver/ray_cs/essid", write_essid, NULL); 2830 raycs_write("driver/ray_cs/essid", write_essid, NULL);
2820 raycs_write("driver/ray_cs/net_type", write_int, &net_type); 2831 raycs_write("driver/ray_cs/net_type", write_int, &net_type);
2821 raycs_write("driver/ray_cs/translate", write_int, &translate); 2832 raycs_write("driver/ray_cs/translate", write_int, &translate);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 10b776c1adc5..977751f372ff 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -154,128 +154,121 @@ MODULE_PARM_DESC(workaround_interval,
154#define NDIS_802_11_LENGTH_RATES 8 154#define NDIS_802_11_LENGTH_RATES 8
155#define NDIS_802_11_LENGTH_RATES_EX 16 155#define NDIS_802_11_LENGTH_RATES_EX 16
156 156
157struct NDIS_802_11_SSID { 157enum ndis_80211_net_type {
158 __le32 SsidLength; 158 ndis_80211_type_freq_hop,
159 u8 Ssid[NDIS_802_11_LENGTH_SSID]; 159 ndis_80211_type_direct_seq,
160} __attribute__((packed)); 160 ndis_80211_type_ofdm_a,
161 161 ndis_80211_type_ofdm_g
162enum NDIS_802_11_NETWORK_TYPE {
163 Ndis802_11FH,
164 Ndis802_11DS,
165 Ndis802_11OFDM5,
166 Ndis802_11OFDM24,
167 Ndis802_11NetworkTypeMax
168}; 162};
169 163
170struct NDIS_802_11_CONFIGURATION_FH { 164enum ndis_80211_net_infra {
171 __le32 Length; 165 ndis_80211_infra_adhoc,
172 __le32 HopPattern; 166 ndis_80211_infra_infra,
173 __le32 HopSet; 167 ndis_80211_infra_auto_unknown
174 __le32 DwellTime;
175} __attribute__((packed));
176
177struct NDIS_802_11_CONFIGURATION {
178 __le32 Length;
179 __le32 BeaconPeriod;
180 __le32 ATIMWindow;
181 __le32 DSConfig;
182 struct NDIS_802_11_CONFIGURATION_FH FHConfig;
183} __attribute__((packed));
184
185enum NDIS_802_11_NETWORK_INFRASTRUCTURE {
186 Ndis802_11IBSS,
187 Ndis802_11Infrastructure,
188 Ndis802_11AutoUnknown,
189 Ndis802_11InfrastructureMax
190}; 168};
191 169
192enum NDIS_802_11_AUTHENTICATION_MODE { 170enum ndis_80211_auth_mode {
193 Ndis802_11AuthModeOpen, 171 ndis_80211_auth_open,
194 Ndis802_11AuthModeShared, 172 ndis_80211_auth_shared,
195 Ndis802_11AuthModeAutoSwitch, 173 ndis_80211_auth_auto_switch,
196 Ndis802_11AuthModeWPA, 174 ndis_80211_auth_wpa,
197 Ndis802_11AuthModeWPAPSK, 175 ndis_80211_auth_wpa_psk,
198 Ndis802_11AuthModeWPANone, 176 ndis_80211_auth_wpa_none,
199 Ndis802_11AuthModeWPA2, 177 ndis_80211_auth_wpa2,
200 Ndis802_11AuthModeWPA2PSK, 178 ndis_80211_auth_wpa2_psk
201 Ndis802_11AuthModeMax
202}; 179};
203 180
204enum NDIS_802_11_ENCRYPTION_STATUS { 181enum ndis_80211_encr_status {
205 Ndis802_11WEPEnabled, 182 ndis_80211_encr_wep_enabled,
206 Ndis802_11Encryption1Enabled = Ndis802_11WEPEnabled, 183 ndis_80211_encr_disabled,
207 Ndis802_11WEPDisabled, 184 ndis_80211_encr_wep_key_absent,
208 Ndis802_11EncryptionDisabled = Ndis802_11WEPDisabled, 185 ndis_80211_encr_not_supported,
209 Ndis802_11WEPKeyAbsent, 186 ndis_80211_encr_tkip_enabled,
210 Ndis802_11Encryption1KeyAbsent = Ndis802_11WEPKeyAbsent, 187 ndis_80211_encr_tkip_key_absent,
211 Ndis802_11WEPNotSupported, 188 ndis_80211_encr_ccmp_enabled,
212 Ndis802_11EncryptionNotSupported = Ndis802_11WEPNotSupported, 189 ndis_80211_encr_ccmp_key_absent
213 Ndis802_11Encryption2Enabled,
214 Ndis802_11Encryption2KeyAbsent,
215 Ndis802_11Encryption3Enabled,
216 Ndis802_11Encryption3KeyAbsent
217}; 190};
218 191
219enum NDIS_802_11_PRIVACY_FILTER { 192enum ndis_80211_priv_filter {
220 Ndis802_11PrivFilterAcceptAll, 193 ndis_80211_priv_accept_all,
221 Ndis802_11PrivFilter8021xWEP 194 ndis_80211_priv_8021x_wep
222}; 195};
223 196
224struct NDIS_WLAN_BSSID_EX { 197struct ndis_80211_ssid {
225 __le32 Length; 198 __le32 length;
226 u8 MacAddress[6]; 199 u8 essid[NDIS_802_11_LENGTH_SSID];
227 u8 Padding[2]; 200} __attribute__((packed));
228 struct NDIS_802_11_SSID Ssid; 201
229 __le32 Privacy; 202struct ndis_80211_conf_freq_hop {
230 __le32 Rssi; 203 __le32 length;
231 __le32 NetworkTypeInUse; 204 __le32 hop_pattern;
232 struct NDIS_802_11_CONFIGURATION Configuration; 205 __le32 hop_set;
233 __le32 InfrastructureMode; 206 __le32 dwell_time;
234 u8 SupportedRates[NDIS_802_11_LENGTH_RATES_EX]; 207} __attribute__((packed));
235 __le32 IELength; 208
236 u8 IEs[0]; 209struct ndis_80211_conf {
210 __le32 length;
211 __le32 beacon_period;
212 __le32 atim_window;
213 __le32 ds_config;
214 struct ndis_80211_conf_freq_hop fh_config;
215} __attribute__((packed));
216
217struct ndis_80211_bssid_ex {
218 __le32 length;
219 u8 mac[6];
220 u8 padding[2];
221 struct ndis_80211_ssid ssid;
222 __le32 privacy;
223 __le32 rssi;
224 __le32 net_type;
225 struct ndis_80211_conf config;
226 __le32 net_infra;
227 u8 rates[NDIS_802_11_LENGTH_RATES_EX];
228 __le32 ie_length;
229 u8 ies[0];
237} __attribute__((packed)); 230} __attribute__((packed));
238 231
239struct NDIS_802_11_BSSID_LIST_EX { 232struct ndis_80211_bssid_list_ex {
240 __le32 NumberOfItems; 233 __le32 num_items;
241 struct NDIS_WLAN_BSSID_EX Bssid[0]; 234 struct ndis_80211_bssid_ex bssid[0];
242} __attribute__((packed)); 235} __attribute__((packed));
243 236
244struct NDIS_802_11_FIXED_IEs { 237struct ndis_80211_fixed_ies {
245 u8 Timestamp[8]; 238 u8 timestamp[8];
246 __le16 BeaconInterval; 239 __le16 beacon_interval;
247 __le16 Capabilities; 240 __le16 capabilities;
248} __attribute__((packed)); 241} __attribute__((packed));
249 242
250struct NDIS_802_11_WEP { 243struct ndis_80211_wep_key {
251 __le32 Length; 244 __le32 size;
252 __le32 KeyIndex; 245 __le32 index;
253 __le32 KeyLength; 246 __le32 length;
254 u8 KeyMaterial[32]; 247 u8 material[32];
255} __attribute__((packed)); 248} __attribute__((packed));
256 249
257struct NDIS_802_11_KEY { 250struct ndis_80211_key {
258 __le32 Length; 251 __le32 size;
259 __le32 KeyIndex; 252 __le32 index;
260 __le32 KeyLength; 253 __le32 length;
261 u8 Bssid[6]; 254 u8 bssid[6];
262 u8 Padding[6]; 255 u8 padding[6];
263 u8 KeyRSC[8]; 256 u8 rsc[8];
264 u8 KeyMaterial[32]; 257 u8 material[32];
265} __attribute__((packed)); 258} __attribute__((packed));
266 259
267struct NDIS_802_11_REMOVE_KEY { 260struct ndis_80211_remove_key {
268 __le32 Length; 261 __le32 size;
269 __le32 KeyIndex; 262 __le32 index;
270 u8 Bssid[6]; 263 u8 bssid[6];
271} __attribute__((packed)); 264} __attribute__((packed));
272 265
273struct RNDIS_CONFIG_PARAMETER_INFOBUFFER { 266struct ndis_config_param {
274 __le32 ParameterNameOffset; 267 __le32 name_offs;
275 __le32 ParameterNameLength; 268 __le32 name_length;
276 __le32 ParameterType; 269 __le32 type;
277 __le32 ParameterValueOffset; 270 __le32 value_offs;
278 __le32 ParameterValueLength; 271 __le32 value_length;
279} __attribute__((packed)); 272} __attribute__((packed));
280 273
281/* these have to match what is in wpa_supplicant */ 274/* these have to match what is in wpa_supplicant */
@@ -334,7 +327,7 @@ struct rndis_wext_private {
334 /* hardware state */ 327 /* hardware state */
335 int radio_on; 328 int radio_on;
336 int infra_mode; 329 int infra_mode;
337 struct NDIS_802_11_SSID essid; 330 struct ndis_80211_ssid essid;
338 331
339 /* encryption stuff */ 332 /* encryption stuff */
340 int encr_tx_key_index; 333 int encr_tx_key_index;
@@ -484,7 +477,7 @@ static int rndis_set_oid(struct usbnet *dev, __le32 oid, void *data, int len)
484static int rndis_set_config_parameter(struct usbnet *dev, char *param, 477static int rndis_set_config_parameter(struct usbnet *dev, char *param,
485 int value_type, void *value) 478 int value_type, void *value)
486{ 479{
487 struct RNDIS_CONFIG_PARAMETER_INFOBUFFER *infobuf; 480 struct ndis_config_param *infobuf;
488 int value_len, info_len, param_len, ret, i; 481 int value_len, info_len, param_len, ret, i;
489 __le16 *unibuf; 482 __le16 *unibuf;
490 __le32 *dst_value; 483 __le32 *dst_value;
@@ -519,12 +512,11 @@ static int rndis_set_config_parameter(struct usbnet *dev, char *param,
519 devdbg(dev, "setting config parameter: %s, value: %d", 512 devdbg(dev, "setting config parameter: %s, value: %d",
520 param, *(u32 *)value); 513 param, *(u32 *)value);
521 514
522 infobuf->ParameterNameOffset = cpu_to_le32(sizeof(*infobuf)); 515 infobuf->name_offs = cpu_to_le32(sizeof(*infobuf));
523 infobuf->ParameterNameLength = cpu_to_le32(param_len); 516 infobuf->name_length = cpu_to_le32(param_len);
524 infobuf->ParameterType = cpu_to_le32(value_type); 517 infobuf->type = cpu_to_le32(value_type);
525 infobuf->ParameterValueOffset = cpu_to_le32(sizeof(*infobuf) + 518 infobuf->value_offs = cpu_to_le32(sizeof(*infobuf) + param_len);
526 param_len); 519 infobuf->value_length = cpu_to_le32(value_len);
527 infobuf->ParameterValueLength = cpu_to_le32(value_len);
528 520
529 /* simple string to unicode string conversion */ 521 /* simple string to unicode string conversion */
530 unibuf = (void *)infobuf + sizeof(*infobuf); 522 unibuf = (void *)infobuf + sizeof(*infobuf);
@@ -630,7 +622,7 @@ static int freq_to_dsconfig(struct iw_freq *freq, unsigned int *dsconfig)
630static int 622static int
631add_wep_key(struct usbnet *usbdev, char *key, int key_len, int index); 623add_wep_key(struct usbnet *usbdev, char *key, int key_len, int index);
632 624
633static int get_essid(struct usbnet *usbdev, struct NDIS_802_11_SSID *ssid) 625static int get_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
634{ 626{
635 int ret, len; 627 int ret, len;
636 628
@@ -638,14 +630,14 @@ static int get_essid(struct usbnet *usbdev, struct NDIS_802_11_SSID *ssid)
638 ret = rndis_query_oid(usbdev, OID_802_11_SSID, ssid, &len); 630 ret = rndis_query_oid(usbdev, OID_802_11_SSID, ssid, &len);
639 631
640 if (ret != 0) 632 if (ret != 0)
641 ssid->SsidLength = 0; 633 ssid->length = 0;
642 634
643#ifdef DEBUG 635#ifdef DEBUG
644 { 636 {
645 unsigned char tmp[NDIS_802_11_LENGTH_SSID + 1]; 637 unsigned char tmp[NDIS_802_11_LENGTH_SSID + 1];
646 638
647 memcpy(tmp, ssid->Ssid, le32_to_cpu(ssid->SsidLength)); 639 memcpy(tmp, ssid->essid, le32_to_cpu(ssid->length));
648 tmp[le32_to_cpu(ssid->SsidLength)] = 0; 640 tmp[le32_to_cpu(ssid->length)] = 0;
649 devdbg(usbdev, "get_essid: '%s', ret: %d", tmp, ret); 641 devdbg(usbdev, "get_essid: '%s', ret: %d", tmp, ret);
650 } 642 }
651#endif 643#endif
@@ -653,7 +645,7 @@ static int get_essid(struct usbnet *usbdev, struct NDIS_802_11_SSID *ssid)
653} 645}
654 646
655 647
656static int set_essid(struct usbnet *usbdev, struct NDIS_802_11_SSID *ssid) 648static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
657{ 649{
658 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 650 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
659 int ret; 651 int ret;
@@ -697,7 +689,7 @@ static int is_associated(struct usbnet *usbdev)
697static int disassociate(struct usbnet *usbdev, int reset_ssid) 689static int disassociate(struct usbnet *usbdev, int reset_ssid)
698{ 690{
699 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 691 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
700 struct NDIS_802_11_SSID ssid; 692 struct ndis_80211_ssid ssid;
701 int i, ret = 0; 693 int i, ret = 0;
702 694
703 if (priv->radio_on) { 695 if (priv->radio_on) {
@@ -714,12 +706,12 @@ static int disassociate(struct usbnet *usbdev, int reset_ssid)
714 /* disassociate causes radio to be turned off; if reset_ssid 706 /* disassociate causes radio to be turned off; if reset_ssid
715 * is given, set random ssid to enable radio */ 707 * is given, set random ssid to enable radio */
716 if (reset_ssid) { 708 if (reset_ssid) {
717 ssid.SsidLength = cpu_to_le32(sizeof(ssid.Ssid)); 709 ssid.length = cpu_to_le32(sizeof(ssid.essid));
718 get_random_bytes(&ssid.Ssid[2], sizeof(ssid.Ssid)-2); 710 get_random_bytes(&ssid.essid[2], sizeof(ssid.essid)-2);
719 ssid.Ssid[0] = 0x1; 711 ssid.essid[0] = 0x1;
720 ssid.Ssid[1] = 0xff; 712 ssid.essid[1] = 0xff;
721 for (i = 2; i < sizeof(ssid.Ssid); i++) 713 for (i = 2; i < sizeof(ssid.essid); i++)
722 ssid.Ssid[i] = 0x1 + (ssid.Ssid[i] * 0xfe / 0xff); 714 ssid.essid[i] = 0x1 + (ssid.essid[i] * 0xfe / 0xff);
723 ret = set_essid(usbdev, &ssid); 715 ret = set_essid(usbdev, &ssid);
724 } 716 }
725 return ret; 717 return ret;
@@ -737,23 +729,23 @@ static int set_auth_mode(struct usbnet *usbdev, int wpa_version, int authalg)
737 729
738 if (wpa_version & IW_AUTH_WPA_VERSION_WPA2) { 730 if (wpa_version & IW_AUTH_WPA_VERSION_WPA2) {
739 if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_802_1X) 731 if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_802_1X)
740 auth_mode = Ndis802_11AuthModeWPA2; 732 auth_mode = ndis_80211_auth_wpa2;
741 else 733 else
742 auth_mode = Ndis802_11AuthModeWPA2PSK; 734 auth_mode = ndis_80211_auth_wpa2_psk;
743 } else if (wpa_version & IW_AUTH_WPA_VERSION_WPA) { 735 } else if (wpa_version & IW_AUTH_WPA_VERSION_WPA) {
744 if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_802_1X) 736 if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_802_1X)
745 auth_mode = Ndis802_11AuthModeWPA; 737 auth_mode = ndis_80211_auth_wpa;
746 else if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_PSK) 738 else if (priv->wpa_keymgmt & IW_AUTH_KEY_MGMT_PSK)
747 auth_mode = Ndis802_11AuthModeWPAPSK; 739 auth_mode = ndis_80211_auth_wpa_psk;
748 else 740 else
749 auth_mode = Ndis802_11AuthModeWPANone; 741 auth_mode = ndis_80211_auth_wpa_none;
750 } else if (authalg & IW_AUTH_ALG_SHARED_KEY) { 742 } else if (authalg & IW_AUTH_ALG_SHARED_KEY) {
751 if (authalg & IW_AUTH_ALG_OPEN_SYSTEM) 743 if (authalg & IW_AUTH_ALG_OPEN_SYSTEM)
752 auth_mode = Ndis802_11AuthModeAutoSwitch; 744 auth_mode = ndis_80211_auth_auto_switch;
753 else 745 else
754 auth_mode = Ndis802_11AuthModeShared; 746 auth_mode = ndis_80211_auth_shared;
755 } else 747 } else
756 auth_mode = Ndis802_11AuthModeOpen; 748 auth_mode = ndis_80211_auth_open;
757 749
758 tmp = cpu_to_le32(auth_mode); 750 tmp = cpu_to_le32(auth_mode);
759 ret = rndis_set_oid(usbdev, OID_802_11_AUTHENTICATION_MODE, &tmp, 751 ret = rndis_set_oid(usbdev, OID_802_11_AUTHENTICATION_MODE, &tmp,
@@ -778,9 +770,9 @@ static int set_priv_filter(struct usbnet *usbdev)
778 770
779 if (priv->wpa_version & IW_AUTH_WPA_VERSION_WPA2 || 771 if (priv->wpa_version & IW_AUTH_WPA_VERSION_WPA2 ||
780 priv->wpa_version & IW_AUTH_WPA_VERSION_WPA) 772 priv->wpa_version & IW_AUTH_WPA_VERSION_WPA)
781 tmp = cpu_to_le32(Ndis802_11PrivFilter8021xWEP); 773 tmp = cpu_to_le32(ndis_80211_priv_8021x_wep);
782 else 774 else
783 tmp = cpu_to_le32(Ndis802_11PrivFilterAcceptAll); 775 tmp = cpu_to_le32(ndis_80211_priv_accept_all);
784 776
785 return rndis_set_oid(usbdev, OID_802_11_PRIVACY_FILTER, &tmp, 777 return rndis_set_oid(usbdev, OID_802_11_PRIVACY_FILTER, &tmp,
786 sizeof(tmp)); 778 sizeof(tmp));
@@ -798,18 +790,18 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
798 groupwise); 790 groupwise);
799 791
800 if (pairwise & IW_AUTH_CIPHER_CCMP) 792 if (pairwise & IW_AUTH_CIPHER_CCMP)
801 encr_mode = Ndis802_11Encryption3Enabled; 793 encr_mode = ndis_80211_encr_ccmp_enabled;
802 else if (pairwise & IW_AUTH_CIPHER_TKIP) 794 else if (pairwise & IW_AUTH_CIPHER_TKIP)
803 encr_mode = Ndis802_11Encryption2Enabled; 795 encr_mode = ndis_80211_encr_tkip_enabled;
804 else if (pairwise & 796 else if (pairwise &
805 (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104)) 797 (IW_AUTH_CIPHER_WEP40 | IW_AUTH_CIPHER_WEP104))
806 encr_mode = Ndis802_11Encryption1Enabled; 798 encr_mode = ndis_80211_encr_wep_enabled;
807 else if (groupwise & IW_AUTH_CIPHER_CCMP) 799 else if (groupwise & IW_AUTH_CIPHER_CCMP)
808 encr_mode = Ndis802_11Encryption3Enabled; 800 encr_mode = ndis_80211_encr_ccmp_enabled;
809 else if (groupwise & IW_AUTH_CIPHER_TKIP) 801 else if (groupwise & IW_AUTH_CIPHER_TKIP)
810 encr_mode = Ndis802_11Encryption2Enabled; 802 encr_mode = ndis_80211_encr_tkip_enabled;
811 else 803 else
812 encr_mode = Ndis802_11EncryptionDisabled; 804 encr_mode = ndis_80211_encr_disabled;
813 805
814 tmp = cpu_to_le32(encr_mode); 806 tmp = cpu_to_le32(encr_mode);
815 ret = rndis_set_oid(usbdev, OID_802_11_ENCRYPTION_STATUS, &tmp, 807 ret = rndis_set_oid(usbdev, OID_802_11_ENCRYPTION_STATUS, &tmp,
@@ -877,7 +869,7 @@ static void set_default_iw_params(struct usbnet *usbdev)
877 priv->wpa_keymgmt = 0; 869 priv->wpa_keymgmt = 0;
878 priv->wpa_version = 0; 870 priv->wpa_version = 0;
879 871
880 set_infra_mode(usbdev, Ndis802_11Infrastructure); 872 set_infra_mode(usbdev, ndis_80211_infra_infra);
881 set_auth_mode(usbdev, IW_AUTH_WPA_VERSION_DISABLED, 873 set_auth_mode(usbdev, IW_AUTH_WPA_VERSION_DISABLED,
882 IW_AUTH_ALG_OPEN_SYSTEM); 874 IW_AUTH_ALG_OPEN_SYSTEM);
883 set_priv_filter(usbdev); 875 set_priv_filter(usbdev);
@@ -899,7 +891,7 @@ static int deauthenticate(struct usbnet *usbdev)
899static int add_wep_key(struct usbnet *usbdev, char *key, int key_len, int index) 891static int add_wep_key(struct usbnet *usbdev, char *key, int key_len, int index)
900{ 892{
901 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 893 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
902 struct NDIS_802_11_WEP ndis_key; 894 struct ndis_80211_wep_key ndis_key;
903 int ret; 895 int ret;
904 896
905 if (key_len <= 0 || key_len > 32 || index < 0 || index >= 4) 897 if (key_len <= 0 || key_len > 32 || index < 0 || index >= 4)
@@ -907,13 +899,13 @@ static int add_wep_key(struct usbnet *usbdev, char *key, int key_len, int index)
907 899
908 memset(&ndis_key, 0, sizeof(ndis_key)); 900 memset(&ndis_key, 0, sizeof(ndis_key));
909 901
910 ndis_key.Length = cpu_to_le32(sizeof(ndis_key)); 902 ndis_key.size = cpu_to_le32(sizeof(ndis_key));
911 ndis_key.KeyLength = cpu_to_le32(key_len); 903 ndis_key.length = cpu_to_le32(key_len);
912 ndis_key.KeyIndex = cpu_to_le32(index); 904 ndis_key.index = cpu_to_le32(index);
913 memcpy(&ndis_key.KeyMaterial, key, key_len); 905 memcpy(&ndis_key.material, key, key_len);
914 906
915 if (index == priv->encr_tx_key_index) { 907 if (index == priv->encr_tx_key_index) {
916 ndis_key.KeyIndex |= cpu_to_le32(1 << 31); 908 ndis_key.index |= cpu_to_le32(1 << 31);
917 ret = set_encr_mode(usbdev, IW_AUTH_CIPHER_WEP104, 909 ret = set_encr_mode(usbdev, IW_AUTH_CIPHER_WEP104,
918 IW_AUTH_CIPHER_NONE); 910 IW_AUTH_CIPHER_NONE);
919 if (ret) 911 if (ret)
@@ -940,7 +932,7 @@ static int add_wep_key(struct usbnet *usbdev, char *key, int key_len, int index)
940static int remove_key(struct usbnet *usbdev, int index, u8 bssid[ETH_ALEN]) 932static int remove_key(struct usbnet *usbdev, int index, u8 bssid[ETH_ALEN])
941{ 933{
942 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 934 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
943 struct NDIS_802_11_REMOVE_KEY remove_key; 935 struct ndis_80211_remove_key remove_key;
944 __le32 keyindex; 936 __le32 keyindex;
945 int ret; 937 int ret;
946 938
@@ -954,17 +946,17 @@ static int remove_key(struct usbnet *usbdev, int index, u8 bssid[ETH_ALEN])
954 priv->wpa_cipher_pair == IW_AUTH_CIPHER_CCMP || 946 priv->wpa_cipher_pair == IW_AUTH_CIPHER_CCMP ||
955 priv->wpa_cipher_group == IW_AUTH_CIPHER_TKIP || 947 priv->wpa_cipher_group == IW_AUTH_CIPHER_TKIP ||
956 priv->wpa_cipher_group == IW_AUTH_CIPHER_CCMP) { 948 priv->wpa_cipher_group == IW_AUTH_CIPHER_CCMP) {
957 remove_key.Length = cpu_to_le32(sizeof(remove_key)); 949 remove_key.size = cpu_to_le32(sizeof(remove_key));
958 remove_key.KeyIndex = cpu_to_le32(index); 950 remove_key.index = cpu_to_le32(index);
959 if (bssid) { 951 if (bssid) {
960 /* pairwise key */ 952 /* pairwise key */
961 if (memcmp(bssid, ffff_bssid, ETH_ALEN) != 0) 953 if (memcmp(bssid, ffff_bssid, ETH_ALEN) != 0)
962 remove_key.KeyIndex |= cpu_to_le32(1 << 30); 954 remove_key.index |= cpu_to_le32(1 << 30);
963 memcpy(remove_key.Bssid, bssid, 955 memcpy(remove_key.bssid, bssid,
964 sizeof(remove_key.Bssid)); 956 sizeof(remove_key.bssid));
965 } else 957 } else
966 memset(remove_key.Bssid, 0xff, 958 memset(remove_key.bssid, 0xff,
967 sizeof(remove_key.Bssid)); 959 sizeof(remove_key.bssid));
968 960
969 ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_KEY, &remove_key, 961 ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_KEY, &remove_key,
970 sizeof(remove_key)); 962 sizeof(remove_key));
@@ -1184,7 +1176,7 @@ static int rndis_iw_get_name(struct net_device *dev,
1184static int rndis_iw_set_essid(struct net_device *dev, 1176static int rndis_iw_set_essid(struct net_device *dev,
1185 struct iw_request_info *info, union iwreq_data *wrqu, char *essid) 1177 struct iw_request_info *info, union iwreq_data *wrqu, char *essid)
1186{ 1178{
1187 struct NDIS_802_11_SSID ssid; 1179 struct ndis_80211_ssid ssid;
1188 int length = wrqu->essid.length; 1180 int length = wrqu->essid.length;
1189 struct usbnet *usbdev = dev->priv; 1181 struct usbnet *usbdev = dev->priv;
1190 1182
@@ -1194,11 +1186,11 @@ static int rndis_iw_set_essid(struct net_device *dev,
1194 if (length > NDIS_802_11_LENGTH_SSID) 1186 if (length > NDIS_802_11_LENGTH_SSID)
1195 length = NDIS_802_11_LENGTH_SSID; 1187 length = NDIS_802_11_LENGTH_SSID;
1196 1188
1197 ssid.SsidLength = cpu_to_le32(length); 1189 ssid.length = cpu_to_le32(length);
1198 if (length > 0) 1190 if (length > 0)
1199 memcpy(ssid.Ssid, essid, length); 1191 memcpy(ssid.essid, essid, length);
1200 else 1192 else
1201 memset(ssid.Ssid, 0, NDIS_802_11_LENGTH_SSID); 1193 memset(ssid.essid, 0, NDIS_802_11_LENGTH_SSID);
1202 1194
1203 set_assoc_params(usbdev); 1195 set_assoc_params(usbdev);
1204 1196
@@ -1212,16 +1204,16 @@ static int rndis_iw_set_essid(struct net_device *dev,
1212static int rndis_iw_get_essid(struct net_device *dev, 1204static int rndis_iw_get_essid(struct net_device *dev,
1213 struct iw_request_info *info, union iwreq_data *wrqu, char *essid) 1205 struct iw_request_info *info, union iwreq_data *wrqu, char *essid)
1214{ 1206{
1215 struct NDIS_802_11_SSID ssid; 1207 struct ndis_80211_ssid ssid;
1216 struct usbnet *usbdev = dev->priv; 1208 struct usbnet *usbdev = dev->priv;
1217 int ret; 1209 int ret;
1218 1210
1219 ret = get_essid(usbdev, &ssid); 1211 ret = get_essid(usbdev, &ssid);
1220 1212
1221 if (ret == 0 && le32_to_cpu(ssid.SsidLength) > 0) { 1213 if (ret == 0 && le32_to_cpu(ssid.length) > 0) {
1222 wrqu->essid.flags = 1; 1214 wrqu->essid.flags = 1;
1223 wrqu->essid.length = le32_to_cpu(ssid.SsidLength); 1215 wrqu->essid.length = le32_to_cpu(ssid.length);
1224 memcpy(essid, ssid.Ssid, wrqu->essid.length); 1216 memcpy(essid, ssid.essid, wrqu->essid.length);
1225 essid[wrqu->essid.length] = 0; 1217 essid[wrqu->essid.length] = 0;
1226 } else { 1218 } else {
1227 memset(essid, 0, sizeof(NDIS_802_11_LENGTH_SSID)); 1219 memset(essid, 0, sizeof(NDIS_802_11_LENGTH_SSID));
@@ -1398,13 +1390,13 @@ static int rndis_iw_get_mode(struct net_device *dev,
1398 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1390 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
1399 1391
1400 switch (priv->infra_mode) { 1392 switch (priv->infra_mode) {
1401 case Ndis802_11IBSS: 1393 case ndis_80211_infra_adhoc:
1402 wrqu->mode = IW_MODE_ADHOC; 1394 wrqu->mode = IW_MODE_ADHOC;
1403 break; 1395 break;
1404 case Ndis802_11Infrastructure: 1396 case ndis_80211_infra_infra:
1405 wrqu->mode = IW_MODE_INFRA; 1397 wrqu->mode = IW_MODE_INFRA;
1406 break; 1398 break;
1407 /*case Ndis802_11AutoUnknown:*/ 1399 /*case ndis_80211_infra_auto_unknown:*/
1408 default: 1400 default:
1409 wrqu->mode = IW_MODE_AUTO; 1401 wrqu->mode = IW_MODE_AUTO;
1410 break; 1402 break;
@@ -1424,14 +1416,14 @@ static int rndis_iw_set_mode(struct net_device *dev,
1424 1416
1425 switch (wrqu->mode) { 1417 switch (wrqu->mode) {
1426 case IW_MODE_ADHOC: 1418 case IW_MODE_ADHOC:
1427 mode = Ndis802_11IBSS; 1419 mode = ndis_80211_infra_adhoc;
1428 break; 1420 break;
1429 case IW_MODE_INFRA: 1421 case IW_MODE_INFRA:
1430 mode = Ndis802_11Infrastructure; 1422 mode = ndis_80211_infra_infra;
1431 break; 1423 break;
1432 /*case IW_MODE_AUTO:*/ 1424 /*case IW_MODE_AUTO:*/
1433 default: 1425 default:
1434 mode = Ndis802_11AutoUnknown; 1426 mode = ndis_80211_infra_auto_unknown;
1435 break; 1427 break;
1436 } 1428 }
1437 1429
@@ -1507,7 +1499,7 @@ static int rndis_iw_set_encode_ext(struct net_device *dev,
1507 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; 1499 struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
1508 struct usbnet *usbdev = dev->priv; 1500 struct usbnet *usbdev = dev->priv;
1509 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev); 1501 struct rndis_wext_private *priv = get_rndis_wext_priv(usbdev);
1510 struct NDIS_802_11_KEY ndis_key; 1502 struct ndis_80211_key ndis_key;
1511 int keyidx, ret; 1503 int keyidx, ret;
1512 u8 *addr; 1504 u8 *addr;
1513 1505
@@ -1532,54 +1524,54 @@ static int rndis_iw_set_encode_ext(struct net_device *dev,
1532 ext->alg == IW_ENCODE_ALG_NONE || ext->key_len == 0) 1524 ext->alg == IW_ENCODE_ALG_NONE || ext->key_len == 0)
1533 return remove_key(usbdev, keyidx, NULL); 1525 return remove_key(usbdev, keyidx, NULL);
1534 1526
1535 if (ext->key_len > sizeof(ndis_key.KeyMaterial)) 1527 if (ext->key_len > sizeof(ndis_key.material))
1536 return -1; 1528 return -1;
1537 1529
1538 memset(&ndis_key, 0, sizeof(ndis_key)); 1530 memset(&ndis_key, 0, sizeof(ndis_key));
1539 1531
1540 ndis_key.Length = cpu_to_le32(sizeof(ndis_key) - 1532 ndis_key.size = cpu_to_le32(sizeof(ndis_key) -
1541 sizeof(ndis_key.KeyMaterial) + ext->key_len); 1533 sizeof(ndis_key.material) + ext->key_len);
1542 ndis_key.KeyLength = cpu_to_le32(ext->key_len); 1534 ndis_key.length = cpu_to_le32(ext->key_len);
1543 ndis_key.KeyIndex = cpu_to_le32(keyidx); 1535 ndis_key.index = cpu_to_le32(keyidx);
1544 1536
1545 if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) { 1537 if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) {
1546 memcpy(ndis_key.KeyRSC, ext->rx_seq, 6); 1538 memcpy(ndis_key.rsc, ext->rx_seq, 6);
1547 ndis_key.KeyIndex |= cpu_to_le32(1 << 29); 1539 ndis_key.index |= cpu_to_le32(1 << 29);
1548 } 1540 }
1549 1541
1550 addr = ext->addr.sa_data; 1542 addr = ext->addr.sa_data;
1551 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) { 1543 if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
1552 /* group key */ 1544 /* group key */
1553 if (priv->infra_mode == Ndis802_11IBSS) 1545 if (priv->infra_mode == ndis_80211_infra_adhoc)
1554 memset(ndis_key.Bssid, 0xff, ETH_ALEN); 1546 memset(ndis_key.bssid, 0xff, ETH_ALEN);
1555 else 1547 else
1556 get_bssid(usbdev, ndis_key.Bssid); 1548 get_bssid(usbdev, ndis_key.bssid);
1557 } else { 1549 } else {
1558 /* pairwise key */ 1550 /* pairwise key */
1559 ndis_key.KeyIndex |= cpu_to_le32(1 << 30); 1551 ndis_key.index |= cpu_to_le32(1 << 30);
1560 memcpy(ndis_key.Bssid, addr, ETH_ALEN); 1552 memcpy(ndis_key.bssid, addr, ETH_ALEN);
1561 } 1553 }
1562 1554
1563 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) 1555 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
1564 ndis_key.KeyIndex |= cpu_to_le32(1 << 31); 1556 ndis_key.index |= cpu_to_le32(1 << 31);
1565 1557
1566 if (ext->alg == IW_ENCODE_ALG_TKIP && ext->key_len == 32) { 1558 if (ext->alg == IW_ENCODE_ALG_TKIP && ext->key_len == 32) {
1567 /* wpa_supplicant gives us the Michael MIC RX/TX keys in 1559 /* wpa_supplicant gives us the Michael MIC RX/TX keys in
1568 * different order than NDIS spec, so swap the order here. */ 1560 * different order than NDIS spec, so swap the order here. */
1569 memcpy(ndis_key.KeyMaterial, ext->key, 16); 1561 memcpy(ndis_key.material, ext->key, 16);
1570 memcpy(ndis_key.KeyMaterial + 16, ext->key + 24, 8); 1562 memcpy(ndis_key.material + 16, ext->key + 24, 8);
1571 memcpy(ndis_key.KeyMaterial + 24, ext->key + 16, 8); 1563 memcpy(ndis_key.material + 24, ext->key + 16, 8);
1572 } else 1564 } else
1573 memcpy(ndis_key.KeyMaterial, ext->key, ext->key_len); 1565 memcpy(ndis_key.material, ext->key, ext->key_len);
1574 1566
1575 ret = rndis_set_oid(usbdev, OID_802_11_ADD_KEY, &ndis_key, 1567 ret = rndis_set_oid(usbdev, OID_802_11_ADD_KEY, &ndis_key,
1576 le32_to_cpu(ndis_key.Length)); 1568 le32_to_cpu(ndis_key.size));
1577 devdbg(usbdev, "SIOCSIWENCODEEXT: OID_802_11_ADD_KEY -> %08X", ret); 1569 devdbg(usbdev, "SIOCSIWENCODEEXT: OID_802_11_ADD_KEY -> %08X", ret);
1578 if (ret != 0) 1570 if (ret != 0)
1579 return ret; 1571 return ret;
1580 1572
1581 priv->encr_key_len[keyidx] = ext->key_len; 1573 priv->encr_key_len[keyidx] = ext->key_len;
1582 memcpy(&priv->encr_keys[keyidx], ndis_key.KeyMaterial, ext->key_len); 1574 memcpy(&priv->encr_keys[keyidx], ndis_key.material, ext->key_len);
1583 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) 1575 if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
1584 priv->encr_tx_key_index = keyidx; 1576 priv->encr_tx_key_index = keyidx;
1585 1577
@@ -1611,7 +1603,7 @@ static int rndis_iw_set_scan(struct net_device *dev,
1611 1603
1612 1604
1613static char *rndis_translate_scan(struct net_device *dev, 1605static char *rndis_translate_scan(struct net_device *dev,
1614 char *cev, char *end_buf, struct NDIS_WLAN_BSSID_EX *bssid) 1606 char *cev, char *end_buf, struct ndis_80211_bssid_ex *bssid)
1615{ 1607{
1616#ifdef DEBUG 1608#ifdef DEBUG
1617 struct usbnet *usbdev = dev->priv; 1609 struct usbnet *usbdev = dev->priv;
@@ -1624,60 +1616,55 @@ static char *rndis_translate_scan(struct net_device *dev,
1624 unsigned char sbuf[32]; 1616 unsigned char sbuf[32];
1625 DECLARE_MAC_BUF(mac); 1617 DECLARE_MAC_BUF(mac);
1626 1618
1627 bssid_len = le32_to_cpu(bssid->Length); 1619 bssid_len = le32_to_cpu(bssid->length);
1628 1620
1629 devdbg(usbdev, "BSSID %s", print_mac(mac, bssid->MacAddress)); 1621 devdbg(usbdev, "BSSID %s", print_mac(mac, bssid->mac));
1630 iwe.cmd = SIOCGIWAP; 1622 iwe.cmd = SIOCGIWAP;
1631 iwe.u.ap_addr.sa_family = ARPHRD_ETHER; 1623 iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
1632 memcpy(iwe.u.ap_addr.sa_data, bssid->MacAddress, ETH_ALEN); 1624 memcpy(iwe.u.ap_addr.sa_data, bssid->mac, ETH_ALEN);
1633 cev = iwe_stream_add_event(cev, end_buf, &iwe, IW_EV_ADDR_LEN); 1625 cev = iwe_stream_add_event(cev, end_buf, &iwe, IW_EV_ADDR_LEN);
1634 1626
1635 devdbg(usbdev, "SSID(%d) %s", 1627 devdbg(usbdev, "SSID(%d) %s", le32_to_cpu(bssid->ssid.length),
1636 le32_to_cpu(bssid->Ssid.SsidLength), 1628 bssid->ssid.essid);
1637 bssid->Ssid.Ssid);
1638 iwe.cmd = SIOCGIWESSID; 1629 iwe.cmd = SIOCGIWESSID;
1639 iwe.u.essid.length = le32_to_cpu(bssid->Ssid.SsidLength); 1630 iwe.u.essid.length = le32_to_cpu(bssid->ssid.length);
1640 iwe.u.essid.flags = 1; 1631 iwe.u.essid.flags = 1;
1641 cev = iwe_stream_add_point(cev, end_buf, &iwe, 1632 cev = iwe_stream_add_point(cev, end_buf, &iwe, bssid->ssid.essid);
1642 bssid->Ssid.Ssid);
1643 1633
1644 devdbg(usbdev, "MODE %d", 1634 devdbg(usbdev, "MODE %d", le32_to_cpu(bssid->net_infra));
1645 le32_to_cpu(bssid->InfrastructureMode));
1646 iwe.cmd = SIOCGIWMODE; 1635 iwe.cmd = SIOCGIWMODE;
1647 switch (le32_to_cpu(bssid->InfrastructureMode)) { 1636 switch (le32_to_cpu(bssid->net_infra)) {
1648 case Ndis802_11IBSS: 1637 case ndis_80211_infra_adhoc:
1649 iwe.u.mode = IW_MODE_ADHOC; 1638 iwe.u.mode = IW_MODE_ADHOC;
1650 break; 1639 break;
1651 case Ndis802_11Infrastructure: 1640 case ndis_80211_infra_infra:
1652 iwe.u.mode = IW_MODE_INFRA; 1641 iwe.u.mode = IW_MODE_INFRA;
1653 break; 1642 break;
1654 /*case Ndis802_11AutoUnknown:*/ 1643 /*case ndis_80211_infra_auto_unknown:*/
1655 default: 1644 default:
1656 iwe.u.mode = IW_MODE_AUTO; 1645 iwe.u.mode = IW_MODE_AUTO;
1657 break; 1646 break;
1658 } 1647 }
1659 cev = iwe_stream_add_event(cev, end_buf, &iwe, IW_EV_UINT_LEN); 1648 cev = iwe_stream_add_event(cev, end_buf, &iwe, IW_EV_UINT_LEN);
1660 1649
1661 devdbg(usbdev, "FREQ %d kHz", 1650 devdbg(usbdev, "FREQ %d kHz", le32_to_cpu(bssid->config.ds_config));
1662 le32_to_cpu(bssid->Configuration.DSConfig));
1663 iwe.cmd = SIOCGIWFREQ; 1651 iwe.cmd = SIOCGIWFREQ;
1664 dsconfig_to_freq(le32_to_cpu(bssid->Configuration.DSConfig), 1652 dsconfig_to_freq(le32_to_cpu(bssid->config.ds_config), &iwe.u.freq);
1665 &iwe.u.freq);
1666 cev = iwe_stream_add_event(cev, end_buf, &iwe, IW_EV_FREQ_LEN); 1653 cev = iwe_stream_add_event(cev, end_buf, &iwe, IW_EV_FREQ_LEN);
1667 1654
1668 devdbg(usbdev, "QUAL %d", le32_to_cpu(bssid->Rssi)); 1655 devdbg(usbdev, "QUAL %d", le32_to_cpu(bssid->rssi));
1669 iwe.cmd = IWEVQUAL; 1656 iwe.cmd = IWEVQUAL;
1670 iwe.u.qual.qual = level_to_qual(le32_to_cpu(bssid->Rssi)); 1657 iwe.u.qual.qual = level_to_qual(le32_to_cpu(bssid->rssi));
1671 iwe.u.qual.level = le32_to_cpu(bssid->Rssi); 1658 iwe.u.qual.level = le32_to_cpu(bssid->rssi);
1672 iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED 1659 iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED
1673 | IW_QUAL_LEVEL_UPDATED 1660 | IW_QUAL_LEVEL_UPDATED
1674 | IW_QUAL_NOISE_INVALID; 1661 | IW_QUAL_NOISE_INVALID;
1675 cev = iwe_stream_add_event(cev, end_buf, &iwe, IW_EV_QUAL_LEN); 1662 cev = iwe_stream_add_event(cev, end_buf, &iwe, IW_EV_QUAL_LEN);
1676 1663
1677 devdbg(usbdev, "ENCODE %d", le32_to_cpu(bssid->Privacy)); 1664 devdbg(usbdev, "ENCODE %d", le32_to_cpu(bssid->privacy));
1678 iwe.cmd = SIOCGIWENCODE; 1665 iwe.cmd = SIOCGIWENCODE;
1679 iwe.u.data.length = 0; 1666 iwe.u.data.length = 0;
1680 if (le32_to_cpu(bssid->Privacy) == Ndis802_11PrivFilterAcceptAll) 1667 if (le32_to_cpu(bssid->privacy) == ndis_80211_priv_accept_all)
1681 iwe.u.data.flags = IW_ENCODE_DISABLED; 1668 iwe.u.data.flags = IW_ENCODE_DISABLED;
1682 else 1669 else
1683 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; 1670 iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
@@ -1687,10 +1674,10 @@ static char *rndis_translate_scan(struct net_device *dev,
1687 devdbg(usbdev, "RATES:"); 1674 devdbg(usbdev, "RATES:");
1688 current_val = cev + IW_EV_LCP_LEN; 1675 current_val = cev + IW_EV_LCP_LEN;
1689 iwe.cmd = SIOCGIWRATE; 1676 iwe.cmd = SIOCGIWRATE;
1690 for (i = 0; i < sizeof(bssid->SupportedRates); i++) { 1677 for (i = 0; i < sizeof(bssid->rates); i++) {
1691 if (bssid->SupportedRates[i] & 0x7f) { 1678 if (bssid->rates[i] & 0x7f) {
1692 iwe.u.bitrate.value = 1679 iwe.u.bitrate.value =
1693 ((bssid->SupportedRates[i] & 0x7f) * 1680 ((bssid->rates[i] & 0x7f) *
1694 500000); 1681 500000);
1695 devdbg(usbdev, " %d", iwe.u.bitrate.value); 1682 devdbg(usbdev, " %d", iwe.u.bitrate.value);
1696 current_val = iwe_stream_add_value(cev, 1683 current_val = iwe_stream_add_value(cev,
@@ -1702,24 +1689,24 @@ static char *rndis_translate_scan(struct net_device *dev,
1702 if ((current_val - cev) > IW_EV_LCP_LEN) 1689 if ((current_val - cev) > IW_EV_LCP_LEN)
1703 cev = current_val; 1690 cev = current_val;
1704 1691
1705 beacon = le32_to_cpu(bssid->Configuration.BeaconPeriod); 1692 beacon = le32_to_cpu(bssid->config.beacon_period);
1706 devdbg(usbdev, "BCN_INT %d", beacon); 1693 devdbg(usbdev, "BCN_INT %d", beacon);
1707 iwe.cmd = IWEVCUSTOM; 1694 iwe.cmd = IWEVCUSTOM;
1708 snprintf(sbuf, sizeof(sbuf), "bcn_int=%d", beacon); 1695 snprintf(sbuf, sizeof(sbuf), "bcn_int=%d", beacon);
1709 iwe.u.data.length = strlen(sbuf); 1696 iwe.u.data.length = strlen(sbuf);
1710 cev = iwe_stream_add_point(cev, end_buf, &iwe, sbuf); 1697 cev = iwe_stream_add_point(cev, end_buf, &iwe, sbuf);
1711 1698
1712 atim = le32_to_cpu(bssid->Configuration.ATIMWindow); 1699 atim = le32_to_cpu(bssid->config.atim_window);
1713 devdbg(usbdev, "ATIM %d", atim); 1700 devdbg(usbdev, "ATIM %d", atim);
1714 iwe.cmd = IWEVCUSTOM; 1701 iwe.cmd = IWEVCUSTOM;
1715 snprintf(sbuf, sizeof(sbuf), "atim=%u", atim); 1702 snprintf(sbuf, sizeof(sbuf), "atim=%u", atim);
1716 iwe.u.data.length = strlen(sbuf); 1703 iwe.u.data.length = strlen(sbuf);
1717 cev = iwe_stream_add_point(cev, end_buf, &iwe, sbuf); 1704 cev = iwe_stream_add_point(cev, end_buf, &iwe, sbuf);
1718 1705
1719 ie = (void *)(bssid->IEs + sizeof(struct NDIS_802_11_FIXED_IEs)); 1706 ie = (void *)(bssid->ies + sizeof(struct ndis_80211_fixed_ies));
1720 ie_len = min(bssid_len - (int)sizeof(*bssid), 1707 ie_len = min(bssid_len - (int)sizeof(*bssid),
1721 (int)le32_to_cpu(bssid->IELength)); 1708 (int)le32_to_cpu(bssid->ie_length));
1722 ie_len -= sizeof(struct NDIS_802_11_FIXED_IEs); 1709 ie_len -= sizeof(struct ndis_80211_fixed_ies);
1723 while (ie_len >= sizeof(*ie) && sizeof(*ie) + ie->len <= ie_len) { 1710 while (ie_len >= sizeof(*ie) && sizeof(*ie) + ie->len <= ie_len) {
1724 if ((ie->id == MFIE_TYPE_GENERIC && ie->len >= 4 && 1711 if ((ie->id == MFIE_TYPE_GENERIC && ie->len >= 4 &&
1725 memcmp(ie->data, "\x00\x50\xf2\x01", 4) == 0) || 1712 memcmp(ie->data, "\x00\x50\xf2\x01", 4) == 0) ||
@@ -1746,8 +1733,8 @@ static int rndis_iw_get_scan(struct net_device *dev,
1746 struct usbnet *usbdev = dev->priv; 1733 struct usbnet *usbdev = dev->priv;
1747 void *buf = NULL; 1734 void *buf = NULL;
1748 char *cev = extra; 1735 char *cev = extra;
1749 struct NDIS_802_11_BSSID_LIST_EX *bssid_list; 1736 struct ndis_80211_bssid_list_ex *bssid_list;
1750 struct NDIS_WLAN_BSSID_EX *bssid; 1737 struct ndis_80211_bssid_ex *bssid;
1751 int ret = -EINVAL, len, count, bssid_len; 1738 int ret = -EINVAL, len, count, bssid_len;
1752 1739
1753 devdbg(usbdev, "SIOCGIWSCAN"); 1740 devdbg(usbdev, "SIOCGIWSCAN");
@@ -1765,16 +1752,16 @@ static int rndis_iw_get_scan(struct net_device *dev,
1765 goto out; 1752 goto out;
1766 1753
1767 bssid_list = buf; 1754 bssid_list = buf;
1768 bssid = bssid_list->Bssid; 1755 bssid = bssid_list->bssid;
1769 bssid_len = le32_to_cpu(bssid->Length); 1756 bssid_len = le32_to_cpu(bssid->length);
1770 count = le32_to_cpu(bssid_list->NumberOfItems); 1757 count = le32_to_cpu(bssid_list->num_items);
1771 devdbg(usbdev, "SIOCGIWSCAN: %d BSSIDs found", count); 1758 devdbg(usbdev, "SIOCGIWSCAN: %d BSSIDs found", count);
1772 1759
1773 while (count && ((void *)bssid + bssid_len) <= (buf + len)) { 1760 while (count && ((void *)bssid + bssid_len) <= (buf + len)) {
1774 cev = rndis_translate_scan(dev, cev, extra + IW_SCAN_MAX_DATA, 1761 cev = rndis_translate_scan(dev, cev, extra + IW_SCAN_MAX_DATA,
1775 bssid); 1762 bssid);
1776 bssid = (void *)bssid + bssid_len; 1763 bssid = (void *)bssid + bssid_len;
1777 bssid_len = le32_to_cpu(bssid->Length); 1764 bssid_len = le32_to_cpu(bssid->length);
1778 count--; 1765 count--;
1779 } 1766 }
1780 1767
@@ -1948,7 +1935,7 @@ static int rndis_iw_set_freq(struct net_device *dev,
1948 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1935 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1949{ 1936{
1950 struct usbnet *usbdev = dev->priv; 1937 struct usbnet *usbdev = dev->priv;
1951 struct NDIS_802_11_CONFIGURATION config; 1938 struct ndis_80211_conf config;
1952 unsigned int dsconfig; 1939 unsigned int dsconfig;
1953 int len, ret; 1940 int len, ret;
1954 1941
@@ -1967,7 +1954,7 @@ static int rndis_iw_set_freq(struct net_device *dev,
1967 return 0; 1954 return 0;
1968 } 1955 }
1969 1956
1970 config.DSConfig = cpu_to_le32(dsconfig); 1957 config.ds_config = cpu_to_le32(dsconfig);
1971 1958
1972 devdbg(usbdev, "SIOCSIWFREQ: %d * 10^%d", wrqu->freq.m, wrqu->freq.e); 1959 devdbg(usbdev, "SIOCSIWFREQ: %d * 10^%d", wrqu->freq.m, wrqu->freq.e);
1973 return rndis_set_oid(usbdev, OID_802_11_CONFIGURATION, &config, 1960 return rndis_set_oid(usbdev, OID_802_11_CONFIGURATION, &config,
@@ -1979,13 +1966,13 @@ static int rndis_iw_get_freq(struct net_device *dev,
1979 struct iw_request_info *info, union iwreq_data *wrqu, char *extra) 1966 struct iw_request_info *info, union iwreq_data *wrqu, char *extra)
1980{ 1967{
1981 struct usbnet *usbdev = dev->priv; 1968 struct usbnet *usbdev = dev->priv;
1982 struct NDIS_802_11_CONFIGURATION config; 1969 struct ndis_80211_conf config;
1983 int len, ret; 1970 int len, ret;
1984 1971
1985 len = sizeof(config); 1972 len = sizeof(config);
1986 ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len); 1973 ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
1987 if (ret == 0) 1974 if (ret == 0)
1988 dsconfig_to_freq(le32_to_cpu(config.DSConfig), &wrqu->freq); 1975 dsconfig_to_freq(le32_to_cpu(config.ds_config), &wrqu->freq);
1989 1976
1990 devdbg(usbdev, "SIOCGIWFREQ: %d", wrqu->freq.m); 1977 devdbg(usbdev, "SIOCGIWFREQ: %d", wrqu->freq.m);
1991 return ret; 1978 return ret;
@@ -2266,14 +2253,14 @@ static int rndis_wext_get_caps(struct usbnet *dev)
2266 n = 8; 2253 n = 8;
2267 for (i = 0; i < n; i++) { 2254 for (i = 0; i < n; i++) {
2268 switch (le32_to_cpu(networks_supported.items[i])) { 2255 switch (le32_to_cpu(networks_supported.items[i])) {
2269 case Ndis802_11FH: 2256 case ndis_80211_type_freq_hop:
2270 case Ndis802_11DS: 2257 case ndis_80211_type_direct_seq:
2271 priv->caps |= CAP_MODE_80211B; 2258 priv->caps |= CAP_MODE_80211B;
2272 break; 2259 break;
2273 case Ndis802_11OFDM5: 2260 case ndis_80211_type_ofdm_a:
2274 priv->caps |= CAP_MODE_80211A; 2261 priv->caps |= CAP_MODE_80211A;
2275 break; 2262 break;
2276 case Ndis802_11OFDM24: 2263 case ndis_80211_type_ofdm_g:
2277 priv->caps |= CAP_MODE_80211G; 2264 priv->caps |= CAP_MODE_80211G;
2278 break; 2265 break;
2279 } 2266 }
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index da05b1faf60d..a1e3938cba9b 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -5,30 +5,28 @@ config RT2X00
5 This will enable the experimental support for the Ralink drivers, 5 This will enable the experimental support for the Ralink drivers,
6 developed in the rt2x00 project <http://rt2x00.serialmonkey.com>. 6 developed in the rt2x00 project <http://rt2x00.serialmonkey.com>.
7 7
8 These drivers will make use of the Devicescape ieee80211 stack. 8 These drivers will make use of the mac80211 stack.
9 9
10 When building one of the individual drivers, the rt2x00 library 10 When building one of the individual drivers, the rt2x00 library
11 will also be created. That library (when the driver is built as 11 will also be created. That library (when the driver is built as
12 a module) will be called "rt2x00lib.ko". 12 a module) will be called "rt2x00lib.ko".
13 13
14if RT2X00
15
14config RT2X00_LIB 16config RT2X00_LIB
15 tristate 17 tristate
16 depends on RT2X00
17 18
18config RT2X00_LIB_PCI 19config RT2X00_LIB_PCI
19 tristate 20 tristate
20 depends on RT2X00
21 select RT2X00_LIB 21 select RT2X00_LIB
22 22
23config RT2X00_LIB_USB 23config RT2X00_LIB_USB
24 tristate 24 tristate
25 depends on RT2X00
26 select RT2X00_LIB 25 select RT2X00_LIB
27 26
28config RT2X00_LIB_FIRMWARE 27config RT2X00_LIB_FIRMWARE
29 boolean 28 boolean
30 depends on RT2X00_LIB 29 depends on RT2X00_LIB
31 select CRC_ITU_T
32 select FW_LOADER 30 select FW_LOADER
33 31
34config RT2X00_LIB_RFKILL 32config RT2X00_LIB_RFKILL
@@ -37,9 +35,13 @@ config RT2X00_LIB_RFKILL
37 select RFKILL 35 select RFKILL
38 select INPUT_POLLDEV 36 select INPUT_POLLDEV
39 37
38config RT2X00_LIB_LEDS
39 boolean
40 depends on RT2X00_LIB
41
40config RT2400PCI 42config RT2400PCI
41 tristate "Ralink rt2400 pci/pcmcia support" 43 tristate "Ralink rt2400 pci/pcmcia support"
42 depends on RT2X00 && PCI 44 depends on PCI
43 select RT2X00_LIB_PCI 45 select RT2X00_LIB_PCI
44 select EEPROM_93CX6 46 select EEPROM_93CX6
45 ---help--- 47 ---help---
@@ -56,9 +58,16 @@ config RT2400PCI_RFKILL
56 hardware button to control the radio state. 58 hardware button to control the radio state.
57 This feature depends on the RF switch subsystem rfkill. 59 This feature depends on the RF switch subsystem rfkill.
58 60
61config RT2400PCI_LEDS
62 bool "RT2400 leds support"
63 depends on RT2400PCI && LEDS_CLASS
64 select RT2X00_LIB_LEDS
65 ---help---
66 This adds support for led triggers provided my mac80211.
67
59config RT2500PCI 68config RT2500PCI
60 tristate "Ralink rt2500 pci/pcmcia support" 69 tristate "Ralink rt2500 pci/pcmcia support"
61 depends on RT2X00 && PCI 70 depends on PCI
62 select RT2X00_LIB_PCI 71 select RT2X00_LIB_PCI
63 select EEPROM_93CX6 72 select EEPROM_93CX6
64 ---help--- 73 ---help---
@@ -75,11 +84,19 @@ config RT2500PCI_RFKILL
75 hardware button to control the radio state. 84 hardware button to control the radio state.
76 This feature depends on the RF switch subsystem rfkill. 85 This feature depends on the RF switch subsystem rfkill.
77 86
87config RT2500PCI_LEDS
88 bool "RT2500 leds support"
89 depends on RT2500PCI && LEDS_CLASS
90 select RT2X00_LIB_LEDS
91 ---help---
92 This adds support for led triggers provided my mac80211.
93
78config RT61PCI 94config RT61PCI
79 tristate "Ralink rt61 pci/pcmcia support" 95 tristate "Ralink rt61 pci/pcmcia support"
80 depends on RT2X00 && PCI 96 depends on PCI
81 select RT2X00_LIB_PCI 97 select RT2X00_LIB_PCI
82 select RT2X00_LIB_FIRMWARE 98 select RT2X00_LIB_FIRMWARE
99 select CRC_ITU_T
83 select EEPROM_93CX6 100 select EEPROM_93CX6
84 ---help--- 101 ---help---
85 This is an experimental driver for the Ralink rt61 wireless chip. 102 This is an experimental driver for the Ralink rt61 wireless chip.
@@ -95,25 +112,47 @@ config RT61PCI_RFKILL
95 hardware button to control the radio state. 112 hardware button to control the radio state.
96 This feature depends on the RF switch subsystem rfkill. 113 This feature depends on the RF switch subsystem rfkill.
97 114
115config RT61PCI_LEDS
116 bool "RT61 leds support"
117 depends on RT61PCI && LEDS_CLASS
118 select RT2X00_LIB_LEDS
119 ---help---
120 This adds support for led triggers provided my mac80211.
121
98config RT2500USB 122config RT2500USB
99 tristate "Ralink rt2500 usb support" 123 tristate "Ralink rt2500 usb support"
100 depends on RT2X00 && USB 124 depends on USB
101 select RT2X00_LIB_USB 125 select RT2X00_LIB_USB
102 ---help--- 126 ---help---
103 This is an experimental driver for the Ralink rt2500 wireless chip. 127 This is an experimental driver for the Ralink rt2500 wireless chip.
104 128
105 When compiled as a module, this driver will be called "rt2500usb.ko". 129 When compiled as a module, this driver will be called "rt2500usb.ko".
106 130
131config RT2500USB_LEDS
132 bool "RT2500 leds support"
133 depends on RT2500USB && LEDS_CLASS
134 select RT2X00_LIB_LEDS
135 ---help---
136 This adds support for led triggers provided my mac80211.
137
107config RT73USB 138config RT73USB
108 tristate "Ralink rt73 usb support" 139 tristate "Ralink rt73 usb support"
109 depends on RT2X00 && USB 140 depends on USB
110 select RT2X00_LIB_USB 141 select RT2X00_LIB_USB
111 select RT2X00_LIB_FIRMWARE 142 select RT2X00_LIB_FIRMWARE
143 select CRC_ITU_T
112 ---help--- 144 ---help---
113 This is an experimental driver for the Ralink rt73 wireless chip. 145 This is an experimental driver for the Ralink rt73 wireless chip.
114 146
115 When compiled as a module, this driver will be called "rt73usb.ko". 147 When compiled as a module, this driver will be called "rt73usb.ko".
116 148
149config RT73USB_LEDS
150 bool "RT73 leds support"
151 depends on RT73USB && LEDS_CLASS
152 select RT2X00_LIB_LEDS
153 ---help---
154 This adds support for led triggers provided my mac80211.
155
117config RT2X00_LIB_DEBUGFS 156config RT2X00_LIB_DEBUGFS
118 bool "Ralink debugfs support" 157 bool "Ralink debugfs support"
119 depends on RT2X00_LIB && MAC80211_DEBUGFS 158 depends on RT2X00_LIB && MAC80211_DEBUGFS
@@ -128,3 +167,4 @@ config RT2X00_DEBUG
128 ---help--- 167 ---help---
129 Enable debugging output for all rt2x00 modules 168 Enable debugging output for all rt2x00 modules
130 169
170endif
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index 30d654a42eea..1087dbcf1a04 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -1,22 +1,17 @@
1rt2x00lib-objs := rt2x00dev.o rt2x00mac.o rt2x00config.o 1rt2x00lib-y += rt2x00dev.o
2rt2x00lib-y += rt2x00mac.o
3rt2x00lib-y += rt2x00config.o
4rt2x00lib-y += rt2x00queue.o
5rt2x00lib-$(CONFIG_RT2X00_LIB_DEBUGFS) += rt2x00debug.o
6rt2x00lib-$(CONFIG_RT2X00_LIB_RFKILL) += rt2x00rfkill.o
7rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o
8rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o
2 9
3ifeq ($(CONFIG_RT2X00_LIB_DEBUGFS),y) 10obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o
4 rt2x00lib-objs += rt2x00debug.o 11obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
5endif 12obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o
6 13obj-$(CONFIG_RT2400PCI) += rt2400pci.o
7ifeq ($(CONFIG_RT2X00_LIB_RFKILL),y) 14obj-$(CONFIG_RT2500PCI) += rt2500pci.o
8 rt2x00lib-objs += rt2x00rfkill.o 15obj-$(CONFIG_RT61PCI) += rt61pci.o
9endif 16obj-$(CONFIG_RT2500USB) += rt2500usb.o
10 17obj-$(CONFIG_RT73USB) += rt73usb.o
11ifeq ($(CONFIG_RT2X00_LIB_FIRMWARE),y)
12 rt2x00lib-objs += rt2x00firmware.o
13endif
14
15obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o
16obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
17obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o
18obj-$(CONFIG_RT2400PCI) += rt2400pci.o
19obj-$(CONFIG_RT2500PCI) += rt2500pci.o
20obj-$(CONFIG_RT61PCI) += rt61pci.o
21obj-$(CONFIG_RT2500USB) += rt2500usb.o
22obj-$(CONFIG_RT73USB) += rt73usb.o
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index c69f85ed7669..b41187af1306 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -243,53 +243,109 @@ static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
243#define rt2400pci_rfkill_poll NULL 243#define rt2400pci_rfkill_poll NULL
244#endif /* CONFIG_RT2400PCI_RFKILL */ 244#endif /* CONFIG_RT2400PCI_RFKILL */
245 245
246/* 246#ifdef CONFIG_RT2400PCI_LEDS
247 * Configuration handlers. 247static void rt2400pci_brightness_set(struct led_classdev *led_cdev,
248 */ 248 enum led_brightness brightness)
249static void rt2400pci_config_mac_addr(struct rt2x00_dev *rt2x00dev,
250 __le32 *mac)
251{ 249{
252 rt2x00pci_register_multiwrite(rt2x00dev, CSR3, mac, 250 struct rt2x00_led *led =
253 (2 * sizeof(__le32))); 251 container_of(led_cdev, struct rt2x00_led, led_dev);
252 unsigned int enabled = brightness != LED_OFF;
253 u32 reg;
254
255 rt2x00pci_register_read(led->rt2x00dev, LEDCSR, &reg);
256
257 if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC)
258 rt2x00_set_field32(&reg, LEDCSR_LINK, enabled);
259 else if (led->type == LED_TYPE_ACTIVITY)
260 rt2x00_set_field32(&reg, LEDCSR_ACTIVITY, enabled);
261
262 rt2x00pci_register_write(led->rt2x00dev, LEDCSR, reg);
254} 263}
255 264
256static void rt2400pci_config_bssid(struct rt2x00_dev *rt2x00dev, 265static int rt2400pci_blink_set(struct led_classdev *led_cdev,
257 __le32 *bssid) 266 unsigned long *delay_on,
267 unsigned long *delay_off)
258{ 268{
259 rt2x00pci_register_multiwrite(rt2x00dev, CSR5, bssid, 269 struct rt2x00_led *led =
260 (2 * sizeof(__le32))); 270 container_of(led_cdev, struct rt2x00_led, led_dev);
271 u32 reg;
272
273 rt2x00pci_register_read(led->rt2x00dev, LEDCSR, &reg);
274 rt2x00_set_field32(&reg, LEDCSR_ON_PERIOD, *delay_on);
275 rt2x00_set_field32(&reg, LEDCSR_OFF_PERIOD, *delay_off);
276 rt2x00pci_register_write(led->rt2x00dev, LEDCSR, reg);
277
278 return 0;
261} 279}
280#endif /* CONFIG_RT2400PCI_LEDS */
262 281
263static void rt2400pci_config_type(struct rt2x00_dev *rt2x00dev, const int type, 282/*
264 const int tsf_sync) 283 * Configuration handlers.
284 */
285static void rt2400pci_config_filter(struct rt2x00_dev *rt2x00dev,
286 const unsigned int filter_flags)
265{ 287{
266 u32 reg; 288 u32 reg;
267 289
268 rt2x00pci_register_write(rt2x00dev, CSR14, 0);
269
270 /* 290 /*
271 * Enable beacon config 291 * Start configuration steps.
292 * Note that the version error will always be dropped
293 * since there is no filter for it at this time.
272 */ 294 */
273 rt2x00pci_register_read(rt2x00dev, BCNCSR1, &reg); 295 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
274 rt2x00_set_field32(&reg, BCNCSR1_PRELOAD, 296 rt2x00_set_field32(&reg, RXCSR0_DROP_CRC,
275 PREAMBLE + get_duration(IEEE80211_HEADER, 20)); 297 !(filter_flags & FIF_FCSFAIL));
276 rt2x00pci_register_write(rt2x00dev, BCNCSR1, reg); 298 rt2x00_set_field32(&reg, RXCSR0_DROP_PHYSICAL,
299 !(filter_flags & FIF_PLCPFAIL));
300 rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
301 !(filter_flags & FIF_CONTROL));
302 rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
303 !(filter_flags & FIF_PROMISC_IN_BSS));
304 rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
305 !(filter_flags & FIF_PROMISC_IN_BSS) &&
306 !rt2x00dev->intf_ap_count);
307 rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
308 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
309}
277 310
278 /* 311static void rt2400pci_config_intf(struct rt2x00_dev *rt2x00dev,
279 * Enable synchronisation. 312 struct rt2x00_intf *intf,
280 */ 313 struct rt2x00intf_conf *conf,
281 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 314 const unsigned int flags)
282 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1); 315{
283 rt2x00_set_field32(&reg, CSR14_TBCN, (tsf_sync == TSF_SYNC_BEACON)); 316 unsigned int bcn_preload;
284 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); 317 u32 reg;
285 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, tsf_sync); 318
286 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 319 if (flags & CONFIG_UPDATE_TYPE) {
320 /*
321 * Enable beacon config
322 */
323 bcn_preload = PREAMBLE + get_duration(IEEE80211_HEADER, 20);
324 rt2x00pci_register_read(rt2x00dev, BCNCSR1, &reg);
325 rt2x00_set_field32(&reg, BCNCSR1_PRELOAD, bcn_preload);
326 rt2x00pci_register_write(rt2x00dev, BCNCSR1, reg);
327
328 /*
329 * Enable synchronisation.
330 */
331 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
332 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
333 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
334 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
335 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
336 }
337
338 if (flags & CONFIG_UPDATE_MAC)
339 rt2x00pci_register_multiwrite(rt2x00dev, CSR3,
340 conf->mac, sizeof(conf->mac));
341
342 if (flags & CONFIG_UPDATE_BSSID)
343 rt2x00pci_register_multiwrite(rt2x00dev, CSR5,
344 conf->bssid, sizeof(conf->bssid));
287} 345}
288 346
289static void rt2400pci_config_preamble(struct rt2x00_dev *rt2x00dev, 347static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev,
290 const int short_preamble, 348 struct rt2x00lib_erp *erp)
291 const int ack_timeout,
292 const int ack_consume_time)
293{ 349{
294 int preamble_mask; 350 int preamble_mask;
295 u32 reg; 351 u32 reg;
@@ -297,11 +353,13 @@ static void rt2400pci_config_preamble(struct rt2x00_dev *rt2x00dev,
297 /* 353 /*
298 * When short preamble is enabled, we should set bit 0x08 354 * When short preamble is enabled, we should set bit 0x08
299 */ 355 */
300 preamble_mask = short_preamble << 3; 356 preamble_mask = erp->short_preamble << 3;
301 357
302 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg); 358 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg);
303 rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, ack_timeout); 359 rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT,
304 rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, ack_consume_time); 360 erp->ack_timeout);
361 rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME,
362 erp->ack_consume_time);
305 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); 363 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg);
306 364
307 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg); 365 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg);
@@ -397,6 +455,13 @@ static void rt2400pci_config_antenna(struct rt2x00_dev *rt2x00dev,
397 u8 r1; 455 u8 r1;
398 u8 r4; 456 u8 r4;
399 457
458 /*
459 * We should never come here because rt2x00lib is supposed
460 * to catch this and send us the correct antenna explicitely.
461 */
462 BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
463 ant->tx == ANTENNA_SW_DIVERSITY);
464
400 rt2400pci_bbp_read(rt2x00dev, 4, &r4); 465 rt2400pci_bbp_read(rt2x00dev, 4, &r4);
401 rt2400pci_bbp_read(rt2x00dev, 1, &r1); 466 rt2400pci_bbp_read(rt2x00dev, 1, &r1);
402 467
@@ -410,14 +475,8 @@ static void rt2400pci_config_antenna(struct rt2x00_dev *rt2x00dev,
410 case ANTENNA_A: 475 case ANTENNA_A:
411 rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 0); 476 rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 0);
412 break; 477 break;
413 case ANTENNA_SW_DIVERSITY:
414 /*
415 * NOTE: We should never come here because rt2x00lib is
416 * supposed to catch this and send us the correct antenna
417 * explicitely. However we are nog going to bug about this.
418 * Instead, just default to antenna B.
419 */
420 case ANTENNA_B: 478 case ANTENNA_B:
479 default:
421 rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 2); 480 rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 2);
422 break; 481 break;
423 } 482 }
@@ -432,14 +491,8 @@ static void rt2400pci_config_antenna(struct rt2x00_dev *rt2x00dev,
432 case ANTENNA_A: 491 case ANTENNA_A:
433 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 0); 492 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 0);
434 break; 493 break;
435 case ANTENNA_SW_DIVERSITY:
436 /*
437 * NOTE: We should never come here because rt2x00lib is
438 * supposed to catch this and send us the correct antenna
439 * explicitely. However we are nog going to bug about this.
440 * Instead, just default to antenna B.
441 */
442 case ANTENNA_B: 494 case ANTENNA_B:
495 default:
443 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2); 496 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2);
444 break; 497 break;
445 } 498 }
@@ -481,8 +534,8 @@ static void rt2400pci_config_duration(struct rt2x00_dev *rt2x00dev,
481} 534}
482 535
483static void rt2400pci_config(struct rt2x00_dev *rt2x00dev, 536static void rt2400pci_config(struct rt2x00_dev *rt2x00dev,
484 const unsigned int flags, 537 struct rt2x00lib_conf *libconf,
485 struct rt2x00lib_conf *libconf) 538 const unsigned int flags)
486{ 539{
487 if (flags & CONFIG_UPDATE_PHYMODE) 540 if (flags & CONFIG_UPDATE_PHYMODE)
488 rt2400pci_config_phymode(rt2x00dev, libconf->basic_rates); 541 rt2400pci_config_phymode(rt2x00dev, libconf->basic_rates);
@@ -498,45 +551,17 @@ static void rt2400pci_config(struct rt2x00_dev *rt2x00dev,
498} 551}
499 552
500static void rt2400pci_config_cw(struct rt2x00_dev *rt2x00dev, 553static void rt2400pci_config_cw(struct rt2x00_dev *rt2x00dev,
501 struct ieee80211_tx_queue_params *params) 554 const int cw_min, const int cw_max)
502{ 555{
503 u32 reg; 556 u32 reg;
504 557
505 rt2x00pci_register_read(rt2x00dev, CSR11, &reg); 558 rt2x00pci_register_read(rt2x00dev, CSR11, &reg);
506 rt2x00_set_field32(&reg, CSR11_CWMIN, params->cw_min); 559 rt2x00_set_field32(&reg, CSR11_CWMIN, cw_min);
507 rt2x00_set_field32(&reg, CSR11_CWMAX, params->cw_max); 560 rt2x00_set_field32(&reg, CSR11_CWMAX, cw_max);
508 rt2x00pci_register_write(rt2x00dev, CSR11, reg); 561 rt2x00pci_register_write(rt2x00dev, CSR11, reg);
509} 562}
510 563
511/* 564/*
512 * LED functions.
513 */
514static void rt2400pci_enable_led(struct rt2x00_dev *rt2x00dev)
515{
516 u32 reg;
517
518 rt2x00pci_register_read(rt2x00dev, LEDCSR, &reg);
519
520 rt2x00_set_field32(&reg, LEDCSR_ON_PERIOD, 70);
521 rt2x00_set_field32(&reg, LEDCSR_OFF_PERIOD, 30);
522 rt2x00_set_field32(&reg, LEDCSR_LINK,
523 (rt2x00dev->led_mode != LED_MODE_ASUS));
524 rt2x00_set_field32(&reg, LEDCSR_ACTIVITY,
525 (rt2x00dev->led_mode != LED_MODE_TXRX_ACTIVITY));
526 rt2x00pci_register_write(rt2x00dev, LEDCSR, reg);
527}
528
529static void rt2400pci_disable_led(struct rt2x00_dev *rt2x00dev)
530{
531 u32 reg;
532
533 rt2x00pci_register_read(rt2x00dev, LEDCSR, &reg);
534 rt2x00_set_field32(&reg, LEDCSR_LINK, 0);
535 rt2x00_set_field32(&reg, LEDCSR_ACTIVITY, 0);
536 rt2x00pci_register_write(rt2x00dev, LEDCSR, reg);
537}
538
539/*
540 * Link tuning 565 * Link tuning
541 */ 566 */
542static void rt2400pci_link_stats(struct rt2x00_dev *rt2x00dev, 567static void rt2400pci_link_stats(struct rt2x00_dev *rt2x00dev,
@@ -593,90 +618,94 @@ static void rt2400pci_link_tuner(struct rt2x00_dev *rt2x00dev)
593 * Initialization functions. 618 * Initialization functions.
594 */ 619 */
595static void rt2400pci_init_rxentry(struct rt2x00_dev *rt2x00dev, 620static void rt2400pci_init_rxentry(struct rt2x00_dev *rt2x00dev,
596 struct data_entry *entry) 621 struct queue_entry *entry)
597{ 622{
598 __le32 *rxd = entry->priv; 623 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data;
599 u32 word; 624 u32 word;
600 625
601 rt2x00_desc_read(rxd, 2, &word); 626 rt2x00_desc_read(priv_rx->desc, 2, &word);
602 rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH, entry->ring->data_size); 627 rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH,
603 rt2x00_desc_write(rxd, 2, word); 628 entry->queue->data_size);
629 rt2x00_desc_write(priv_rx->desc, 2, word);
604 630
605 rt2x00_desc_read(rxd, 1, &word); 631 rt2x00_desc_read(priv_rx->desc, 1, &word);
606 rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, entry->data_dma); 632 rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, priv_rx->data_dma);
607 rt2x00_desc_write(rxd, 1, word); 633 rt2x00_desc_write(priv_rx->desc, 1, word);
608 634
609 rt2x00_desc_read(rxd, 0, &word); 635 rt2x00_desc_read(priv_rx->desc, 0, &word);
610 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); 636 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
611 rt2x00_desc_write(rxd, 0, word); 637 rt2x00_desc_write(priv_rx->desc, 0, word);
612} 638}
613 639
614static void rt2400pci_init_txentry(struct rt2x00_dev *rt2x00dev, 640static void rt2400pci_init_txentry(struct rt2x00_dev *rt2x00dev,
615 struct data_entry *entry) 641 struct queue_entry *entry)
616{ 642{
617 __le32 *txd = entry->priv; 643 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
618 u32 word; 644 u32 word;
619 645
620 rt2x00_desc_read(txd, 1, &word); 646 rt2x00_desc_read(priv_tx->desc, 1, &word);
621 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, entry->data_dma); 647 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, priv_tx->data_dma);
622 rt2x00_desc_write(txd, 1, word); 648 rt2x00_desc_write(priv_tx->desc, 1, word);
623 649
624 rt2x00_desc_read(txd, 2, &word); 650 rt2x00_desc_read(priv_tx->desc, 2, &word);
625 rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, entry->ring->data_size); 651 rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH,
626 rt2x00_desc_write(txd, 2, word); 652 entry->queue->data_size);
653 rt2x00_desc_write(priv_tx->desc, 2, word);
627 654
628 rt2x00_desc_read(txd, 0, &word); 655 rt2x00_desc_read(priv_tx->desc, 0, &word);
629 rt2x00_set_field32(&word, TXD_W0_VALID, 0); 656 rt2x00_set_field32(&word, TXD_W0_VALID, 0);
630 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); 657 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
631 rt2x00_desc_write(txd, 0, word); 658 rt2x00_desc_write(priv_tx->desc, 0, word);
632} 659}
633 660
634static int rt2400pci_init_rings(struct rt2x00_dev *rt2x00dev) 661static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev)
635{ 662{
663 struct queue_entry_priv_pci_rx *priv_rx;
664 struct queue_entry_priv_pci_tx *priv_tx;
636 u32 reg; 665 u32 reg;
637 666
638 /* 667 /*
639 * Initialize registers. 668 * Initialize registers.
640 */ 669 */
641 rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg); 670 rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg);
642 rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, 671 rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
643 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].desc_size); 672 rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
644 rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, 673 rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit);
645 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].stats.limit); 674 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
646 rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM,
647 rt2x00dev->bcn[1].stats.limit);
648 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO,
649 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].stats.limit);
650 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); 675 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
651 676
677 priv_tx = rt2x00dev->tx[1].entries[0].priv_data;
652 rt2x00pci_register_read(rt2x00dev, TXCSR3, &reg); 678 rt2x00pci_register_read(rt2x00dev, TXCSR3, &reg);
653 rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER, 679 rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER,
654 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].data_dma); 680 priv_tx->desc_dma);
655 rt2x00pci_register_write(rt2x00dev, TXCSR3, reg); 681 rt2x00pci_register_write(rt2x00dev, TXCSR3, reg);
656 682
683 priv_tx = rt2x00dev->tx[0].entries[0].priv_data;
657 rt2x00pci_register_read(rt2x00dev, TXCSR5, &reg); 684 rt2x00pci_register_read(rt2x00dev, TXCSR5, &reg);
658 rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER, 685 rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER,
659 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].data_dma); 686 priv_tx->desc_dma);
660 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); 687 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
661 688
689 priv_tx = rt2x00dev->bcn[1].entries[0].priv_data;
662 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg); 690 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg);
663 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER, 691 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
664 rt2x00dev->bcn[1].data_dma); 692 priv_tx->desc_dma);
665 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); 693 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
666 694
695 priv_tx = rt2x00dev->bcn[0].entries[0].priv_data;
667 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg); 696 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg);
668 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER, 697 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
669 rt2x00dev->bcn[0].data_dma); 698 priv_tx->desc_dma);
670 rt2x00pci_register_write(rt2x00dev, TXCSR6, reg); 699 rt2x00pci_register_write(rt2x00dev, TXCSR6, reg);
671 700
672 rt2x00pci_register_read(rt2x00dev, RXCSR1, &reg); 701 rt2x00pci_register_read(rt2x00dev, RXCSR1, &reg);
673 rt2x00_set_field32(&reg, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size); 702 rt2x00_set_field32(&reg, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size);
674 rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->stats.limit); 703 rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->limit);
675 rt2x00pci_register_write(rt2x00dev, RXCSR1, reg); 704 rt2x00pci_register_write(rt2x00dev, RXCSR1, reg);
676 705
706 priv_rx = rt2x00dev->rx->entries[0].priv_data;
677 rt2x00pci_register_read(rt2x00dev, RXCSR2, &reg); 707 rt2x00pci_register_read(rt2x00dev, RXCSR2, &reg);
678 rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER, 708 rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER, priv_rx->desc_dma);
679 rt2x00dev->rx->data_dma);
680 rt2x00pci_register_write(rt2x00dev, RXCSR2, reg); 709 rt2x00pci_register_write(rt2x00dev, RXCSR2, reg);
681 710
682 return 0; 711 return 0;
@@ -795,19 +824,15 @@ continue_csr_init:
795 rt2400pci_bbp_write(rt2x00dev, 30, 0x21); 824 rt2400pci_bbp_write(rt2x00dev, 30, 0x21);
796 rt2400pci_bbp_write(rt2x00dev, 31, 0x00); 825 rt2400pci_bbp_write(rt2x00dev, 31, 0x00);
797 826
798 DEBUG(rt2x00dev, "Start initialization from EEPROM...\n");
799 for (i = 0; i < EEPROM_BBP_SIZE; i++) { 827 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
800 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); 828 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
801 829
802 if (eeprom != 0xffff && eeprom != 0x0000) { 830 if (eeprom != 0xffff && eeprom != 0x0000) {
803 reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); 831 reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
804 value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); 832 value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE);
805 DEBUG(rt2x00dev, "BBP: 0x%02x, value: 0x%02x.\n",
806 reg_id, value);
807 rt2400pci_bbp_write(rt2x00dev, reg_id, value); 833 rt2400pci_bbp_write(rt2x00dev, reg_id, value);
808 } 834 }
809 } 835 }
810 DEBUG(rt2x00dev, "...End initialization from EEPROM.\n");
811 836
812 return 0; 837 return 0;
813} 838}
@@ -859,7 +884,7 @@ static int rt2400pci_enable_radio(struct rt2x00_dev *rt2x00dev)
859 /* 884 /*
860 * Initialize all registers. 885 * Initialize all registers.
861 */ 886 */
862 if (rt2400pci_init_rings(rt2x00dev) || 887 if (rt2400pci_init_queues(rt2x00dev) ||
863 rt2400pci_init_registers(rt2x00dev) || 888 rt2400pci_init_registers(rt2x00dev) ||
864 rt2400pci_init_bbp(rt2x00dev)) { 889 rt2400pci_init_bbp(rt2x00dev)) {
865 ERROR(rt2x00dev, "Register initialization failed.\n"); 890 ERROR(rt2x00dev, "Register initialization failed.\n");
@@ -871,11 +896,6 @@ static int rt2400pci_enable_radio(struct rt2x00_dev *rt2x00dev)
871 */ 896 */
872 rt2400pci_toggle_irq(rt2x00dev, STATE_RADIO_IRQ_ON); 897 rt2400pci_toggle_irq(rt2x00dev, STATE_RADIO_IRQ_ON);
873 898
874 /*
875 * Enable LED
876 */
877 rt2400pci_enable_led(rt2x00dev);
878
879 return 0; 899 return 0;
880} 900}
881 901
@@ -883,11 +903,6 @@ static void rt2400pci_disable_radio(struct rt2x00_dev *rt2x00dev)
883{ 903{
884 u32 reg; 904 u32 reg;
885 905
886 /*
887 * Disable LED
888 */
889 rt2400pci_disable_led(rt2x00dev);
890
891 rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0); 906 rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0);
892 907
893 /* 908 /*
@@ -986,10 +1001,10 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
986 */ 1001 */
987static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1002static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
988 struct sk_buff *skb, 1003 struct sk_buff *skb,
989 struct txdata_entry_desc *desc, 1004 struct txentry_desc *txdesc,
990 struct ieee80211_tx_control *control) 1005 struct ieee80211_tx_control *control)
991{ 1006{
992 struct skb_desc *skbdesc = get_skb_desc(skb); 1007 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
993 __le32 *txd = skbdesc->desc; 1008 __le32 *txd = skbdesc->desc;
994 u32 word; 1009 u32 word;
995 1010
@@ -1001,19 +1016,19 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1001 rt2x00_desc_write(txd, 2, word); 1016 rt2x00_desc_write(txd, 2, word);
1002 1017
1003 rt2x00_desc_read(txd, 3, &word); 1018 rt2x00_desc_read(txd, 3, &word);
1004 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, desc->signal); 1019 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal);
1005 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_REGNUM, 5); 1020 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_REGNUM, 5);
1006 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_BUSY, 1); 1021 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_BUSY, 1);
1007 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, desc->service); 1022 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service);
1008 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_REGNUM, 6); 1023 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_REGNUM, 6);
1009 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_BUSY, 1); 1024 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_BUSY, 1);
1010 rt2x00_desc_write(txd, 3, word); 1025 rt2x00_desc_write(txd, 3, word);
1011 1026
1012 rt2x00_desc_read(txd, 4, &word); 1027 rt2x00_desc_read(txd, 4, &word);
1013 rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW, desc->length_low); 1028 rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW, txdesc->length_low);
1014 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_REGNUM, 8); 1029 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_REGNUM, 8);
1015 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_BUSY, 1); 1030 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_BUSY, 1);
1016 rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH, desc->length_high); 1031 rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH, txdesc->length_high);
1017 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_REGNUM, 7); 1032 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_REGNUM, 7);
1018 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_BUSY, 1); 1033 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_BUSY, 1);
1019 rt2x00_desc_write(txd, 4, word); 1034 rt2x00_desc_write(txd, 4, word);
@@ -1022,14 +1037,14 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1022 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); 1037 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
1023 rt2x00_set_field32(&word, TXD_W0_VALID, 1); 1038 rt2x00_set_field32(&word, TXD_W0_VALID, 1);
1024 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, 1039 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
1025 test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags)); 1040 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
1026 rt2x00_set_field32(&word, TXD_W0_ACK, 1041 rt2x00_set_field32(&word, TXD_W0_ACK,
1027 test_bit(ENTRY_TXD_ACK, &desc->flags)); 1042 test_bit(ENTRY_TXD_ACK, &txdesc->flags));
1028 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, 1043 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
1029 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags)); 1044 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
1030 rt2x00_set_field32(&word, TXD_W0_RTS, 1045 rt2x00_set_field32(&word, TXD_W0_RTS,
1031 test_bit(ENTRY_TXD_RTS_FRAME, &desc->flags)); 1046 test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags));
1032 rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs); 1047 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1033 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1048 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1034 !!(control->flags & 1049 !!(control->flags &
1035 IEEE80211_TXCTL_LONG_RETRY_LIMIT)); 1050 IEEE80211_TXCTL_LONG_RETRY_LIMIT));
@@ -1040,13 +1055,15 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1040 * TX data initialization 1055 * TX data initialization
1041 */ 1056 */
1042static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1057static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1043 unsigned int queue) 1058 const unsigned int queue)
1044{ 1059{
1045 u32 reg; 1060 u32 reg;
1046 1061
1047 if (queue == IEEE80211_TX_QUEUE_BEACON) { 1062 if (queue == RT2X00_BCN_QUEUE_BEACON) {
1048 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 1063 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
1049 if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) { 1064 if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) {
1065 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
1066 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
1050 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1); 1067 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
1051 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1068 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1052 } 1069 }
@@ -1059,56 +1076,62 @@ static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1059 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1076 rt2x00_set_field32(&reg, TXCSR0_KICK_TX,
1060 (queue == IEEE80211_TX_QUEUE_DATA1)); 1077 (queue == IEEE80211_TX_QUEUE_DATA1));
1061 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, 1078 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM,
1062 (queue == IEEE80211_TX_QUEUE_AFTER_BEACON)); 1079 (queue == RT2X00_BCN_QUEUE_ATIM));
1063 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 1080 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1064} 1081}
1065 1082
1066/* 1083/*
1067 * RX control handlers 1084 * RX control handlers
1068 */ 1085 */
1069static void rt2400pci_fill_rxdone(struct data_entry *entry, 1086static void rt2400pci_fill_rxdone(struct queue_entry *entry,
1070 struct rxdata_entry_desc *desc) 1087 struct rxdone_entry_desc *rxdesc)
1071{ 1088{
1072 __le32 *rxd = entry->priv; 1089 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data;
1073 u32 word0; 1090 u32 word0;
1074 u32 word2; 1091 u32 word2;
1092 u32 word3;
1075 1093
1076 rt2x00_desc_read(rxd, 0, &word0); 1094 rt2x00_desc_read(priv_rx->desc, 0, &word0);
1077 rt2x00_desc_read(rxd, 2, &word2); 1095 rt2x00_desc_read(priv_rx->desc, 2, &word2);
1096 rt2x00_desc_read(priv_rx->desc, 3, &word3);
1078 1097
1079 desc->flags = 0; 1098 rxdesc->flags = 0;
1080 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1099 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1081 desc->flags |= RX_FLAG_FAILED_FCS_CRC; 1100 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1082 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) 1101 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR))
1083 desc->flags |= RX_FLAG_FAILED_PLCP_CRC; 1102 rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC;
1084 1103
1085 /* 1104 /*
1086 * Obtain the status about this packet. 1105 * Obtain the status about this packet.
1106 * The signal is the PLCP value, and needs to be stripped
1107 * of the preamble bit (0x08).
1087 */ 1108 */
1088 desc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL); 1109 rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL) & ~0x08;
1089 desc->rssi = rt2x00_get_field32(word2, RXD_W2_RSSI) - 1110 rxdesc->rssi = rt2x00_get_field32(word2, RXD_W3_RSSI) -
1090 entry->ring->rt2x00dev->rssi_offset; 1111 entry->queue->rt2x00dev->rssi_offset;
1091 desc->ofdm = 0; 1112 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1092 desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1113
1093 desc->my_bss = !!rt2x00_get_field32(word0, RXD_W0_MY_BSS); 1114 rxdesc->dev_flags = RXDONE_SIGNAL_PLCP;
1115 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
1116 rxdesc->dev_flags |= RXDONE_MY_BSS;
1094} 1117}
1095 1118
1096/* 1119/*
1097 * Interrupt functions. 1120 * Interrupt functions.
1098 */ 1121 */
1099static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev, const int queue) 1122static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev,
1123 const enum ieee80211_tx_queue queue_idx)
1100{ 1124{
1101 struct data_ring *ring = rt2x00lib_get_ring(rt2x00dev, queue); 1125 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
1102 struct data_entry *entry; 1126 struct queue_entry_priv_pci_tx *priv_tx;
1103 __le32 *txd; 1127 struct queue_entry *entry;
1128 struct txdone_entry_desc txdesc;
1104 u32 word; 1129 u32 word;
1105 int tx_status;
1106 int retry;
1107 1130
1108 while (!rt2x00_ring_empty(ring)) { 1131 while (!rt2x00queue_empty(queue)) {
1109 entry = rt2x00_get_data_entry_done(ring); 1132 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
1110 txd = entry->priv; 1133 priv_tx = entry->priv_data;
1111 rt2x00_desc_read(txd, 0, &word); 1134 rt2x00_desc_read(priv_tx->desc, 0, &word);
1112 1135
1113 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || 1136 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
1114 !rt2x00_get_field32(word, TXD_W0_VALID)) 1137 !rt2x00_get_field32(word, TXD_W0_VALID))
@@ -1117,10 +1140,10 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev, const int queue)
1117 /* 1140 /*
1118 * Obtain the status about this packet. 1141 * Obtain the status about this packet.
1119 */ 1142 */
1120 tx_status = rt2x00_get_field32(word, TXD_W0_RESULT); 1143 txdesc.status = rt2x00_get_field32(word, TXD_W0_RESULT);
1121 retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT); 1144 txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT);
1122 1145
1123 rt2x00pci_txdone(rt2x00dev, entry, tx_status, retry); 1146 rt2x00pci_txdone(rt2x00dev, entry, &txdesc);
1124 } 1147 }
1125} 1148}
1126 1149
@@ -1164,7 +1187,7 @@ static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance)
1164 * 3 - Atim ring transmit done interrupt. 1187 * 3 - Atim ring transmit done interrupt.
1165 */ 1188 */
1166 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) 1189 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING))
1167 rt2400pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_AFTER_BEACON); 1190 rt2400pci_txdone(rt2x00dev, RT2X00_BCN_QUEUE_ATIM);
1168 1191
1169 /* 1192 /*
1170 * 4 - Priority ring transmit done interrupt. 1193 * 4 - Priority ring transmit done interrupt.
@@ -1272,8 +1295,27 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1272 /* 1295 /*
1273 * Store led mode, for correct led behaviour. 1296 * Store led mode, for correct led behaviour.
1274 */ 1297 */
1275 rt2x00dev->led_mode = 1298#ifdef CONFIG_RT2400PCI_LEDS
1276 rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); 1299 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE);
1300
1301 rt2x00dev->led_radio.rt2x00dev = rt2x00dev;
1302 rt2x00dev->led_radio.type = LED_TYPE_RADIO;
1303 rt2x00dev->led_radio.led_dev.brightness_set =
1304 rt2400pci_brightness_set;
1305 rt2x00dev->led_radio.led_dev.blink_set =
1306 rt2400pci_blink_set;
1307 rt2x00dev->led_radio.flags = LED_INITIALIZED;
1308
1309 if (value == LED_MODE_TXRX_ACTIVITY) {
1310 rt2x00dev->led_qual.rt2x00dev = rt2x00dev;
1311 rt2x00dev->led_radio.type = LED_TYPE_ACTIVITY;
1312 rt2x00dev->led_qual.led_dev.brightness_set =
1313 rt2400pci_brightness_set;
1314 rt2x00dev->led_qual.led_dev.blink_set =
1315 rt2400pci_blink_set;
1316 rt2x00dev->led_qual.flags = LED_INITIALIZED;
1317 }
1318#endif /* CONFIG_RT2400PCI_LEDS */
1277 1319
1278 /* 1320 /*
1279 * Detect if this device has an hardware controlled radio. 1321 * Detect if this device has an hardware controlled radio.
@@ -1343,8 +1385,8 @@ static void rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1343 /* 1385 /*
1344 * Initialize hw_mode information. 1386 * Initialize hw_mode information.
1345 */ 1387 */
1346 spec->num_modes = 1; 1388 spec->supported_bands = SUPPORT_BAND_2GHZ;
1347 spec->num_rates = 4; 1389 spec->supported_rates = SUPPORT_RATE_CCK;
1348 spec->tx_power_a = NULL; 1390 spec->tx_power_a = NULL;
1349 spec->tx_power_bg = txpower; 1391 spec->tx_power_bg = txpower;
1350 spec->tx_power_default = DEFAULT_TXPOWER; 1392 spec->tx_power_default = DEFAULT_TXPOWER;
@@ -1374,9 +1416,9 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1374 rt2400pci_probe_hw_mode(rt2x00dev); 1416 rt2400pci_probe_hw_mode(rt2x00dev);
1375 1417
1376 /* 1418 /*
1377 * This device requires the beacon ring 1419 * This device requires the atim queue
1378 */ 1420 */
1379 __set_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags); 1421 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
1380 1422
1381 /* 1423 /*
1382 * Set the rssi offset. 1424 * Set the rssi offset.
@@ -1389,64 +1431,6 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1389/* 1431/*
1390 * IEEE80211 stack callback functions. 1432 * IEEE80211 stack callback functions.
1391 */ 1433 */
1392static void rt2400pci_configure_filter(struct ieee80211_hw *hw,
1393 unsigned int changed_flags,
1394 unsigned int *total_flags,
1395 int mc_count,
1396 struct dev_addr_list *mc_list)
1397{
1398 struct rt2x00_dev *rt2x00dev = hw->priv;
1399 u32 reg;
1400
1401 /*
1402 * Mask off any flags we are going to ignore from
1403 * the total_flags field.
1404 */
1405 *total_flags &=
1406 FIF_ALLMULTI |
1407 FIF_FCSFAIL |
1408 FIF_PLCPFAIL |
1409 FIF_CONTROL |
1410 FIF_OTHER_BSS |
1411 FIF_PROMISC_IN_BSS;
1412
1413 /*
1414 * Apply some rules to the filters:
1415 * - Some filters imply different filters to be set.
1416 * - Some things we can't filter out at all.
1417 */
1418 *total_flags |= FIF_ALLMULTI;
1419 if (*total_flags & FIF_OTHER_BSS ||
1420 *total_flags & FIF_PROMISC_IN_BSS)
1421 *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS;
1422
1423 /*
1424 * Check if there is any work left for us.
1425 */
1426 if (rt2x00dev->packet_filter == *total_flags)
1427 return;
1428 rt2x00dev->packet_filter = *total_flags;
1429
1430 /*
1431 * Start configuration steps.
1432 * Note that the version error will always be dropped
1433 * since there is no filter for it at this time.
1434 */
1435 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
1436 rt2x00_set_field32(&reg, RXCSR0_DROP_CRC,
1437 !(*total_flags & FIF_FCSFAIL));
1438 rt2x00_set_field32(&reg, RXCSR0_DROP_PHYSICAL,
1439 !(*total_flags & FIF_PLCPFAIL));
1440 rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
1441 !(*total_flags & FIF_CONTROL));
1442 rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
1443 !(*total_flags & FIF_PROMISC_IN_BSS));
1444 rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
1445 !(*total_flags & FIF_PROMISC_IN_BSS));
1446 rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
1447 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
1448}
1449
1450static int rt2400pci_set_retry_limit(struct ieee80211_hw *hw, 1434static int rt2400pci_set_retry_limit(struct ieee80211_hw *hw,
1451 u32 short_retry, u32 long_retry) 1435 u32 short_retry, u32 long_retry)
1452{ 1436{
@@ -1481,7 +1465,8 @@ static int rt2400pci_conf_tx(struct ieee80211_hw *hw,
1481 /* 1465 /*
1482 * Write configuration to register. 1466 * Write configuration to register.
1483 */ 1467 */
1484 rt2400pci_config_cw(rt2x00dev, &rt2x00dev->tx->tx_params); 1468 rt2400pci_config_cw(rt2x00dev,
1469 rt2x00dev->tx->cw_min, rt2x00dev->tx->cw_max);
1485 1470
1486 return 0; 1471 return 0;
1487} 1472}
@@ -1500,12 +1485,58 @@ static u64 rt2400pci_get_tsf(struct ieee80211_hw *hw)
1500 return tsf; 1485 return tsf;
1501} 1486}
1502 1487
1503static void rt2400pci_reset_tsf(struct ieee80211_hw *hw) 1488static int rt2400pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
1489 struct ieee80211_tx_control *control)
1504{ 1490{
1505 struct rt2x00_dev *rt2x00dev = hw->priv; 1491 struct rt2x00_dev *rt2x00dev = hw->priv;
1492 struct rt2x00_intf *intf = vif_to_intf(control->vif);
1493 struct queue_entry_priv_pci_tx *priv_tx;
1494 struct skb_frame_desc *skbdesc;
1495 u32 reg;
1496
1497 if (unlikely(!intf->beacon))
1498 return -ENOBUFS;
1499 priv_tx = intf->beacon->priv_data;
1506 1500
1507 rt2x00pci_register_write(rt2x00dev, CSR16, 0); 1501 /*
1508 rt2x00pci_register_write(rt2x00dev, CSR17, 0); 1502 * Fill in skb descriptor
1503 */
1504 skbdesc = get_skb_frame_desc(skb);
1505 memset(skbdesc, 0, sizeof(*skbdesc));
1506 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED;
1507 skbdesc->data = skb->data;
1508 skbdesc->data_len = skb->len;
1509 skbdesc->desc = priv_tx->desc;
1510 skbdesc->desc_len = intf->beacon->queue->desc_size;
1511 skbdesc->entry = intf->beacon;
1512
1513 /*
1514 * Disable beaconing while we are reloading the beacon data,
1515 * otherwise we might be sending out invalid data.
1516 */
1517 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
1518 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0);
1519 rt2x00_set_field32(&reg, CSR14_TBCN, 0);
1520 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
1521 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1522
1523 /*
1524 * mac80211 doesn't provide the control->queue variable
1525 * for beacons. Set our own queue identification so
1526 * it can be used during descriptor initialization.
1527 */
1528 control->queue = RT2X00_BCN_QUEUE_BEACON;
1529 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
1530
1531 /*
1532 * Enable beacon generation.
1533 * Write entire beacon with descriptor to register,
1534 * and kick the beacon generator.
1535 */
1536 memcpy(priv_tx->data, skb->data, skb->len);
1537 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue);
1538
1539 return 0;
1509} 1540}
1510 1541
1511static int rt2400pci_tx_last_beacon(struct ieee80211_hw *hw) 1542static int rt2400pci_tx_last_beacon(struct ieee80211_hw *hw)
@@ -1525,15 +1556,14 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
1525 .remove_interface = rt2x00mac_remove_interface, 1556 .remove_interface = rt2x00mac_remove_interface,
1526 .config = rt2x00mac_config, 1557 .config = rt2x00mac_config,
1527 .config_interface = rt2x00mac_config_interface, 1558 .config_interface = rt2x00mac_config_interface,
1528 .configure_filter = rt2400pci_configure_filter, 1559 .configure_filter = rt2x00mac_configure_filter,
1529 .get_stats = rt2x00mac_get_stats, 1560 .get_stats = rt2x00mac_get_stats,
1530 .set_retry_limit = rt2400pci_set_retry_limit, 1561 .set_retry_limit = rt2400pci_set_retry_limit,
1531 .bss_info_changed = rt2x00mac_bss_info_changed, 1562 .bss_info_changed = rt2x00mac_bss_info_changed,
1532 .conf_tx = rt2400pci_conf_tx, 1563 .conf_tx = rt2400pci_conf_tx,
1533 .get_tx_stats = rt2x00mac_get_tx_stats, 1564 .get_tx_stats = rt2x00mac_get_tx_stats,
1534 .get_tsf = rt2400pci_get_tsf, 1565 .get_tsf = rt2400pci_get_tsf,
1535 .reset_tsf = rt2400pci_reset_tsf, 1566 .beacon_update = rt2400pci_beacon_update,
1536 .beacon_update = rt2x00pci_beacon_update,
1537 .tx_last_beacon = rt2400pci_tx_last_beacon, 1567 .tx_last_beacon = rt2400pci_tx_last_beacon,
1538}; 1568};
1539 1569
@@ -1553,19 +1583,50 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
1553 .write_tx_data = rt2x00pci_write_tx_data, 1583 .write_tx_data = rt2x00pci_write_tx_data,
1554 .kick_tx_queue = rt2400pci_kick_tx_queue, 1584 .kick_tx_queue = rt2400pci_kick_tx_queue,
1555 .fill_rxdone = rt2400pci_fill_rxdone, 1585 .fill_rxdone = rt2400pci_fill_rxdone,
1556 .config_mac_addr = rt2400pci_config_mac_addr, 1586 .config_filter = rt2400pci_config_filter,
1557 .config_bssid = rt2400pci_config_bssid, 1587 .config_intf = rt2400pci_config_intf,
1558 .config_type = rt2400pci_config_type, 1588 .config_erp = rt2400pci_config_erp,
1559 .config_preamble = rt2400pci_config_preamble,
1560 .config = rt2400pci_config, 1589 .config = rt2400pci_config,
1561}; 1590};
1562 1591
1592static const struct data_queue_desc rt2400pci_queue_rx = {
1593 .entry_num = RX_ENTRIES,
1594 .data_size = DATA_FRAME_SIZE,
1595 .desc_size = RXD_DESC_SIZE,
1596 .priv_size = sizeof(struct queue_entry_priv_pci_rx),
1597};
1598
1599static const struct data_queue_desc rt2400pci_queue_tx = {
1600 .entry_num = TX_ENTRIES,
1601 .data_size = DATA_FRAME_SIZE,
1602 .desc_size = TXD_DESC_SIZE,
1603 .priv_size = sizeof(struct queue_entry_priv_pci_tx),
1604};
1605
1606static const struct data_queue_desc rt2400pci_queue_bcn = {
1607 .entry_num = BEACON_ENTRIES,
1608 .data_size = MGMT_FRAME_SIZE,
1609 .desc_size = TXD_DESC_SIZE,
1610 .priv_size = sizeof(struct queue_entry_priv_pci_tx),
1611};
1612
1613static const struct data_queue_desc rt2400pci_queue_atim = {
1614 .entry_num = ATIM_ENTRIES,
1615 .data_size = DATA_FRAME_SIZE,
1616 .desc_size = TXD_DESC_SIZE,
1617 .priv_size = sizeof(struct queue_entry_priv_pci_tx),
1618};
1619
1563static const struct rt2x00_ops rt2400pci_ops = { 1620static const struct rt2x00_ops rt2400pci_ops = {
1564 .name = KBUILD_MODNAME, 1621 .name = KBUILD_MODNAME,
1565 .rxd_size = RXD_DESC_SIZE, 1622 .max_sta_intf = 1,
1566 .txd_size = TXD_DESC_SIZE, 1623 .max_ap_intf = 1,
1567 .eeprom_size = EEPROM_SIZE, 1624 .eeprom_size = EEPROM_SIZE,
1568 .rf_size = RF_SIZE, 1625 .rf_size = RF_SIZE,
1626 .rx = &rt2400pci_queue_rx,
1627 .tx = &rt2400pci_queue_tx,
1628 .bcn = &rt2400pci_queue_bcn,
1629 .atim = &rt2400pci_queue_atim,
1569 .lib = &rt2400pci_rt2x00_ops, 1630 .lib = &rt2400pci_rt2x00_ops,
1570 .hw = &rt2400pci_mac80211_ops, 1631 .hw = &rt2400pci_mac80211_ops,
1571#ifdef CONFIG_RT2X00_LIB_DEBUGFS 1632#ifdef CONFIG_RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index 369aac6d0336..a5210f9a3360 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -899,13 +899,13 @@
899 * Word2 899 * Word2
900 */ 900 */
901#define RXD_W2_BUFFER_LENGTH FIELD32(0x0000ffff) 901#define RXD_W2_BUFFER_LENGTH FIELD32(0x0000ffff)
902#define RXD_W2_SIGNAL FIELD32(0x00ff0000) 902#define RXD_W2_BBR0 FIELD32(0x00ff0000)
903#define RXD_W2_RSSI FIELD32(0xff000000) 903#define RXD_W2_SIGNAL FIELD32(0xff000000)
904 904
905/* 905/*
906 * Word3 906 * Word3
907 */ 907 */
908#define RXD_W3_BBR2 FIELD32(0x000000ff) 908#define RXD_W3_RSSI FIELD32(0x000000ff)
909#define RXD_W3_BBR3 FIELD32(0x0000ff00) 909#define RXD_W3_BBR3 FIELD32(0x0000ff00)
910#define RXD_W3_BBR4 FIELD32(0x00ff0000) 910#define RXD_W3_BBR4 FIELD32(0x00ff0000)
911#define RXD_W3_BBR5 FIELD32(0xff000000) 911#define RXD_W3_BBR5 FIELD32(0xff000000)
@@ -923,13 +923,13 @@
923#define RXD_W7_RESERVED FIELD32(0xffffffff) 923#define RXD_W7_RESERVED FIELD32(0xffffffff)
924 924
925/* 925/*
926 * Macro's for converting txpower from EEPROM to dscape value 926 * Macro's for converting txpower from EEPROM to mac80211 value
927 * and from dscape value to register value. 927 * and from mac80211 value to register value.
928 * NOTE: Logics in rt2400pci for txpower are reversed 928 * NOTE: Logics in rt2400pci for txpower are reversed
929 * compared to the other rt2x00 drivers. A higher txpower 929 * compared to the other rt2x00 drivers. A higher txpower
930 * value means that the txpower must be lowered. This is 930 * value means that the txpower must be lowered. This is
931 * important when converting the value coming from the 931 * important when converting the value coming from the
932 * dscape stack to the rt2400 acceptable value. 932 * mac80211 stack to the rt2400 acceptable value.
933 */ 933 */
934#define MIN_TXPOWER 31 934#define MIN_TXPOWER 31
935#define MAX_TXPOWER 62 935#define MAX_TXPOWER 62
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 91e87b53374f..5ade097ed45e 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -243,57 +243,116 @@ static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
243#define rt2500pci_rfkill_poll NULL 243#define rt2500pci_rfkill_poll NULL
244#endif /* CONFIG_RT2500PCI_RFKILL */ 244#endif /* CONFIG_RT2500PCI_RFKILL */
245 245
246/* 246#ifdef CONFIG_RT2500PCI_LEDS
247 * Configuration handlers. 247static void rt2500pci_brightness_set(struct led_classdev *led_cdev,
248 */ 248 enum led_brightness brightness)
249static void rt2500pci_config_mac_addr(struct rt2x00_dev *rt2x00dev,
250 __le32 *mac)
251{ 249{
252 rt2x00pci_register_multiwrite(rt2x00dev, CSR3, mac, 250 struct rt2x00_led *led =
253 (2 * sizeof(__le32))); 251 container_of(led_cdev, struct rt2x00_led, led_dev);
252 unsigned int enabled = brightness != LED_OFF;
253 u32 reg;
254
255 rt2x00pci_register_read(led->rt2x00dev, LEDCSR, &reg);
256
257 if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC)
258 rt2x00_set_field32(&reg, LEDCSR_LINK, enabled);
259 else if (led->type == LED_TYPE_ACTIVITY)
260 rt2x00_set_field32(&reg, LEDCSR_ACTIVITY, enabled);
261
262 rt2x00pci_register_write(led->rt2x00dev, LEDCSR, reg);
254} 263}
255 264
256static void rt2500pci_config_bssid(struct rt2x00_dev *rt2x00dev, 265static int rt2500pci_blink_set(struct led_classdev *led_cdev,
257 __le32 *bssid) 266 unsigned long *delay_on,
267 unsigned long *delay_off)
258{ 268{
259 rt2x00pci_register_multiwrite(rt2x00dev, CSR5, bssid, 269 struct rt2x00_led *led =
260 (2 * sizeof(__le32))); 270 container_of(led_cdev, struct rt2x00_led, led_dev);
271 u32 reg;
272
273 rt2x00pci_register_read(led->rt2x00dev, LEDCSR, &reg);
274 rt2x00_set_field32(&reg, LEDCSR_ON_PERIOD, *delay_on);
275 rt2x00_set_field32(&reg, LEDCSR_OFF_PERIOD, *delay_off);
276 rt2x00pci_register_write(led->rt2x00dev, LEDCSR, reg);
277
278 return 0;
261} 279}
280#endif /* CONFIG_RT2500PCI_LEDS */
262 281
263static void rt2500pci_config_type(struct rt2x00_dev *rt2x00dev, const int type, 282/*
264 const int tsf_sync) 283 * Configuration handlers.
284 */
285static void rt2500pci_config_filter(struct rt2x00_dev *rt2x00dev,
286 const unsigned int filter_flags)
265{ 287{
266 u32 reg; 288 u32 reg;
267 289
268 rt2x00pci_register_write(rt2x00dev, CSR14, 0);
269
270 /* 290 /*
271 * Enable beacon config 291 * Start configuration steps.
292 * Note that the version error will always be dropped
293 * and broadcast frames will always be accepted since
294 * there is no filter for it at this time.
272 */ 295 */
273 rt2x00pci_register_read(rt2x00dev, BCNCSR1, &reg); 296 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
274 rt2x00_set_field32(&reg, BCNCSR1_PRELOAD, 297 rt2x00_set_field32(&reg, RXCSR0_DROP_CRC,
275 PREAMBLE + get_duration(IEEE80211_HEADER, 20)); 298 !(filter_flags & FIF_FCSFAIL));
276 rt2x00_set_field32(&reg, BCNCSR1_BEACON_CWMIN, 299 rt2x00_set_field32(&reg, RXCSR0_DROP_PHYSICAL,
277 rt2x00lib_get_ring(rt2x00dev, 300 !(filter_flags & FIF_PLCPFAIL));
278 IEEE80211_TX_QUEUE_BEACON) 301 rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
279 ->tx_params.cw_min); 302 !(filter_flags & FIF_CONTROL));
280 rt2x00pci_register_write(rt2x00dev, BCNCSR1, reg); 303 rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
304 !(filter_flags & FIF_PROMISC_IN_BSS));
305 rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
306 !(filter_flags & FIF_PROMISC_IN_BSS) &&
307 !rt2x00dev->intf_ap_count);
308 rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
309 rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST,
310 !(filter_flags & FIF_ALLMULTI));
311 rt2x00_set_field32(&reg, RXCSR0_DROP_BCAST, 0);
312 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
313}
281 314
282 /* 315static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev,
283 * Enable synchronisation. 316 struct rt2x00_intf *intf,
284 */ 317 struct rt2x00intf_conf *conf,
285 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 318 const unsigned int flags)
286 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1); 319{
287 rt2x00_set_field32(&reg, CSR14_TBCN, (tsf_sync == TSF_SYNC_BEACON)); 320 struct data_queue *queue =
288 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); 321 rt2x00queue_get_queue(rt2x00dev, RT2X00_BCN_QUEUE_BEACON);
289 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, tsf_sync); 322 unsigned int bcn_preload;
290 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 323 u32 reg;
324
325 if (flags & CONFIG_UPDATE_TYPE) {
326 /*
327 * Enable beacon config
328 */
329 bcn_preload = PREAMBLE + get_duration(IEEE80211_HEADER, 20);
330 rt2x00pci_register_read(rt2x00dev, BCNCSR1, &reg);
331 rt2x00_set_field32(&reg, BCNCSR1_PRELOAD, bcn_preload);
332 rt2x00_set_field32(&reg, BCNCSR1_BEACON_CWMIN, queue->cw_min);
333 rt2x00pci_register_write(rt2x00dev, BCNCSR1, reg);
334
335 /*
336 * Enable synchronisation.
337 */
338 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
339 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
340 rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync);
341 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
342 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
343 }
344
345 if (flags & CONFIG_UPDATE_MAC)
346 rt2x00pci_register_multiwrite(rt2x00dev, CSR3,
347 conf->mac, sizeof(conf->mac));
348
349 if (flags & CONFIG_UPDATE_BSSID)
350 rt2x00pci_register_multiwrite(rt2x00dev, CSR5,
351 conf->bssid, sizeof(conf->bssid));
291} 352}
292 353
293static void rt2500pci_config_preamble(struct rt2x00_dev *rt2x00dev, 354static void rt2500pci_config_erp(struct rt2x00_dev *rt2x00dev,
294 const int short_preamble, 355 struct rt2x00lib_erp *erp)
295 const int ack_timeout,
296 const int ack_consume_time)
297{ 356{
298 int preamble_mask; 357 int preamble_mask;
299 u32 reg; 358 u32 reg;
@@ -301,11 +360,13 @@ static void rt2500pci_config_preamble(struct rt2x00_dev *rt2x00dev,
301 /* 360 /*
302 * When short preamble is enabled, we should set bit 0x08 361 * When short preamble is enabled, we should set bit 0x08
303 */ 362 */
304 preamble_mask = short_preamble << 3; 363 preamble_mask = erp->short_preamble << 3;
305 364
306 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg); 365 rt2x00pci_register_read(rt2x00dev, TXCSR1, &reg);
307 rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, ack_timeout); 366 rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT,
308 rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, ack_consume_time); 367 erp->ack_timeout);
368 rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME,
369 erp->ack_consume_time);
309 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg); 370 rt2x00pci_register_write(rt2x00dev, TXCSR1, reg);
310 371
311 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg); 372 rt2x00pci_register_read(rt2x00dev, ARCSR2, &reg);
@@ -425,6 +486,13 @@ static void rt2500pci_config_antenna(struct rt2x00_dev *rt2x00dev,
425 u8 r14; 486 u8 r14;
426 u8 r2; 487 u8 r2;
427 488
489 /*
490 * We should never come here because rt2x00lib is supposed
491 * to catch this and send us the correct antenna explicitely.
492 */
493 BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
494 ant->tx == ANTENNA_SW_DIVERSITY);
495
428 rt2x00pci_register_read(rt2x00dev, BBPCSR1, &reg); 496 rt2x00pci_register_read(rt2x00dev, BBPCSR1, &reg);
429 rt2500pci_bbp_read(rt2x00dev, 14, &r14); 497 rt2500pci_bbp_read(rt2x00dev, 14, &r14);
430 rt2500pci_bbp_read(rt2x00dev, 2, &r2); 498 rt2500pci_bbp_read(rt2x00dev, 2, &r2);
@@ -438,15 +506,8 @@ static void rt2500pci_config_antenna(struct rt2x00_dev *rt2x00dev,
438 rt2x00_set_field32(&reg, BBPCSR1_CCK, 0); 506 rt2x00_set_field32(&reg, BBPCSR1_CCK, 0);
439 rt2x00_set_field32(&reg, BBPCSR1_OFDM, 0); 507 rt2x00_set_field32(&reg, BBPCSR1_OFDM, 0);
440 break; 508 break;
441 case ANTENNA_HW_DIVERSITY:
442 case ANTENNA_SW_DIVERSITY:
443 /*
444 * NOTE: We should never come here because rt2x00lib is
445 * supposed to catch this and send us the correct antenna
446 * explicitely. However we are nog going to bug about this.
447 * Instead, just default to antenna B.
448 */
449 case ANTENNA_B: 509 case ANTENNA_B:
510 default:
450 rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2); 511 rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2);
451 rt2x00_set_field32(&reg, BBPCSR1_CCK, 2); 512 rt2x00_set_field32(&reg, BBPCSR1_CCK, 2);
452 rt2x00_set_field32(&reg, BBPCSR1_OFDM, 2); 513 rt2x00_set_field32(&reg, BBPCSR1_OFDM, 2);
@@ -460,15 +521,8 @@ static void rt2500pci_config_antenna(struct rt2x00_dev *rt2x00dev,
460 case ANTENNA_A: 521 case ANTENNA_A:
461 rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0); 522 rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0);
462 break; 523 break;
463 case ANTENNA_HW_DIVERSITY:
464 case ANTENNA_SW_DIVERSITY:
465 /*
466 * NOTE: We should never come here because rt2x00lib is
467 * supposed to catch this and send us the correct antenna
468 * explicitely. However we are nog going to bug about this.
469 * Instead, just default to antenna B.
470 */
471 case ANTENNA_B: 524 case ANTENNA_B:
525 default:
472 rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2); 526 rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2);
473 break; 527 break;
474 } 528 }
@@ -530,8 +584,8 @@ static void rt2500pci_config_duration(struct rt2x00_dev *rt2x00dev,
530} 584}
531 585
532static void rt2500pci_config(struct rt2x00_dev *rt2x00dev, 586static void rt2500pci_config(struct rt2x00_dev *rt2x00dev,
533 const unsigned int flags, 587 struct rt2x00lib_conf *libconf,
534 struct rt2x00lib_conf *libconf) 588 const unsigned int flags)
535{ 589{
536 if (flags & CONFIG_UPDATE_PHYMODE) 590 if (flags & CONFIG_UPDATE_PHYMODE)
537 rt2500pci_config_phymode(rt2x00dev, libconf->basic_rates); 591 rt2500pci_config_phymode(rt2x00dev, libconf->basic_rates);
@@ -548,34 +602,6 @@ static void rt2500pci_config(struct rt2x00_dev *rt2x00dev,
548} 602}
549 603
550/* 604/*
551 * LED functions.
552 */
553static void rt2500pci_enable_led(struct rt2x00_dev *rt2x00dev)
554{
555 u32 reg;
556
557 rt2x00pci_register_read(rt2x00dev, LEDCSR, &reg);
558
559 rt2x00_set_field32(&reg, LEDCSR_ON_PERIOD, 70);
560 rt2x00_set_field32(&reg, LEDCSR_OFF_PERIOD, 30);
561 rt2x00_set_field32(&reg, LEDCSR_LINK,
562 (rt2x00dev->led_mode != LED_MODE_ASUS));
563 rt2x00_set_field32(&reg, LEDCSR_ACTIVITY,
564 (rt2x00dev->led_mode != LED_MODE_TXRX_ACTIVITY));
565 rt2x00pci_register_write(rt2x00dev, LEDCSR, reg);
566}
567
568static void rt2500pci_disable_led(struct rt2x00_dev *rt2x00dev)
569{
570 u32 reg;
571
572 rt2x00pci_register_read(rt2x00dev, LEDCSR, &reg);
573 rt2x00_set_field32(&reg, LEDCSR_LINK, 0);
574 rt2x00_set_field32(&reg, LEDCSR_ACTIVITY, 0);
575 rt2x00pci_register_write(rt2x00dev, LEDCSR, reg);
576}
577
578/*
579 * Link tuning 605 * Link tuning
580 */ 606 */
581static void rt2500pci_link_stats(struct rt2x00_dev *rt2x00dev, 607static void rt2500pci_link_stats(struct rt2x00_dev *rt2x00dev,
@@ -610,9 +636,10 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev)
610 /* 636 /*
611 * To prevent collisions with MAC ASIC on chipsets 637 * To prevent collisions with MAC ASIC on chipsets
612 * up to version C the link tuning should halt after 20 638 * up to version C the link tuning should halt after 20
613 * seconds. 639 * seconds while being associated.
614 */ 640 */
615 if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D && 641 if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D &&
642 rt2x00dev->intf_associated &&
616 rt2x00dev->link.count > 20) 643 rt2x00dev->link.count > 20)
617 return; 644 return;
618 645
@@ -620,9 +647,12 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev)
620 647
621 /* 648 /*
622 * Chipset versions C and lower should directly continue 649 * Chipset versions C and lower should directly continue
623 * to the dynamic CCA tuning. 650 * to the dynamic CCA tuning. Chipset version D and higher
651 * should go straight to dynamic CCA tuning when they
652 * are not associated.
624 */ 653 */
625 if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D) 654 if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D ||
655 !rt2x00dev->intf_associated)
626 goto dynamic_cca_tune; 656 goto dynamic_cca_tune;
627 657
628 /* 658 /*
@@ -684,82 +714,84 @@ dynamic_cca_tune:
684 * Initialization functions. 714 * Initialization functions.
685 */ 715 */
686static void rt2500pci_init_rxentry(struct rt2x00_dev *rt2x00dev, 716static void rt2500pci_init_rxentry(struct rt2x00_dev *rt2x00dev,
687 struct data_entry *entry) 717 struct queue_entry *entry)
688{ 718{
689 __le32 *rxd = entry->priv; 719 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data;
690 u32 word; 720 u32 word;
691 721
692 rt2x00_desc_read(rxd, 1, &word); 722 rt2x00_desc_read(priv_rx->desc, 1, &word);
693 rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, entry->data_dma); 723 rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, priv_rx->data_dma);
694 rt2x00_desc_write(rxd, 1, word); 724 rt2x00_desc_write(priv_rx->desc, 1, word);
695 725
696 rt2x00_desc_read(rxd, 0, &word); 726 rt2x00_desc_read(priv_rx->desc, 0, &word);
697 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); 727 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
698 rt2x00_desc_write(rxd, 0, word); 728 rt2x00_desc_write(priv_rx->desc, 0, word);
699} 729}
700 730
701static void rt2500pci_init_txentry(struct rt2x00_dev *rt2x00dev, 731static void rt2500pci_init_txentry(struct rt2x00_dev *rt2x00dev,
702 struct data_entry *entry) 732 struct queue_entry *entry)
703{ 733{
704 __le32 *txd = entry->priv; 734 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
705 u32 word; 735 u32 word;
706 736
707 rt2x00_desc_read(txd, 1, &word); 737 rt2x00_desc_read(priv_tx->desc, 1, &word);
708 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, entry->data_dma); 738 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, priv_tx->data_dma);
709 rt2x00_desc_write(txd, 1, word); 739 rt2x00_desc_write(priv_tx->desc, 1, word);
710 740
711 rt2x00_desc_read(txd, 0, &word); 741 rt2x00_desc_read(priv_tx->desc, 0, &word);
712 rt2x00_set_field32(&word, TXD_W0_VALID, 0); 742 rt2x00_set_field32(&word, TXD_W0_VALID, 0);
713 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); 743 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
714 rt2x00_desc_write(txd, 0, word); 744 rt2x00_desc_write(priv_tx->desc, 0, word);
715} 745}
716 746
717static int rt2500pci_init_rings(struct rt2x00_dev *rt2x00dev) 747static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev)
718{ 748{
749 struct queue_entry_priv_pci_rx *priv_rx;
750 struct queue_entry_priv_pci_tx *priv_tx;
719 u32 reg; 751 u32 reg;
720 752
721 /* 753 /*
722 * Initialize registers. 754 * Initialize registers.
723 */ 755 */
724 rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg); 756 rt2x00pci_register_read(rt2x00dev, TXCSR2, &reg);
725 rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, 757 rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size);
726 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].desc_size); 758 rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit);
727 rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, 759 rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->bcn[1].limit);
728 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].stats.limit); 760 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit);
729 rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM,
730 rt2x00dev->bcn[1].stats.limit);
731 rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO,
732 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].stats.limit);
733 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg); 761 rt2x00pci_register_write(rt2x00dev, TXCSR2, reg);
734 762
763 priv_tx = rt2x00dev->tx[1].entries[0].priv_data;
735 rt2x00pci_register_read(rt2x00dev, TXCSR3, &reg); 764 rt2x00pci_register_read(rt2x00dev, TXCSR3, &reg);
736 rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER, 765 rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER,
737 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].data_dma); 766 priv_tx->desc_dma);
738 rt2x00pci_register_write(rt2x00dev, TXCSR3, reg); 767 rt2x00pci_register_write(rt2x00dev, TXCSR3, reg);
739 768
769 priv_tx = rt2x00dev->tx[0].entries[0].priv_data;
740 rt2x00pci_register_read(rt2x00dev, TXCSR5, &reg); 770 rt2x00pci_register_read(rt2x00dev, TXCSR5, &reg);
741 rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER, 771 rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER,
742 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].data_dma); 772 priv_tx->desc_dma);
743 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg); 773 rt2x00pci_register_write(rt2x00dev, TXCSR5, reg);
744 774
775 priv_tx = rt2x00dev->bcn[1].entries[0].priv_data;
745 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg); 776 rt2x00pci_register_read(rt2x00dev, TXCSR4, &reg);
746 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER, 777 rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER,
747 rt2x00dev->bcn[1].data_dma); 778 priv_tx->desc_dma);
748 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg); 779 rt2x00pci_register_write(rt2x00dev, TXCSR4, reg);
749 780
781 priv_tx = rt2x00dev->bcn[0].entries[0].priv_data;
750 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg); 782 rt2x00pci_register_read(rt2x00dev, TXCSR6, &reg);
751 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER, 783 rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER,
752 rt2x00dev->bcn[0].data_dma); 784 priv_tx->desc_dma);
753 rt2x00pci_register_write(rt2x00dev, TXCSR6, reg); 785 rt2x00pci_register_write(rt2x00dev, TXCSR6, reg);
754 786
755 rt2x00pci_register_read(rt2x00dev, RXCSR1, &reg); 787 rt2x00pci_register_read(rt2x00dev, RXCSR1, &reg);
756 rt2x00_set_field32(&reg, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size); 788 rt2x00_set_field32(&reg, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size);
757 rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->stats.limit); 789 rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->limit);
758 rt2x00pci_register_write(rt2x00dev, RXCSR1, reg); 790 rt2x00pci_register_write(rt2x00dev, RXCSR1, reg);
759 791
792 priv_rx = rt2x00dev->rx->entries[0].priv_data;
760 rt2x00pci_register_read(rt2x00dev, RXCSR2, &reg); 793 rt2x00pci_register_read(rt2x00dev, RXCSR2, &reg);
761 rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER, 794 rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER, priv_rx->desc_dma);
762 rt2x00dev->rx->data_dma);
763 rt2x00pci_register_write(rt2x00dev, RXCSR2, reg); 795 rt2x00pci_register_write(rt2x00dev, RXCSR2, reg);
764 796
765 return 0; 797 return 0;
@@ -947,19 +979,15 @@ continue_csr_init:
947 rt2500pci_bbp_write(rt2x00dev, 61, 0x6d); 979 rt2500pci_bbp_write(rt2x00dev, 61, 0x6d);
948 rt2500pci_bbp_write(rt2x00dev, 62, 0x10); 980 rt2500pci_bbp_write(rt2x00dev, 62, 0x10);
949 981
950 DEBUG(rt2x00dev, "Start initialization from EEPROM...\n");
951 for (i = 0; i < EEPROM_BBP_SIZE; i++) { 982 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
952 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); 983 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
953 984
954 if (eeprom != 0xffff && eeprom != 0x0000) { 985 if (eeprom != 0xffff && eeprom != 0x0000) {
955 reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); 986 reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
956 value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); 987 value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE);
957 DEBUG(rt2x00dev, "BBP: 0x%02x, value: 0x%02x.\n",
958 reg_id, value);
959 rt2500pci_bbp_write(rt2x00dev, reg_id, value); 988 rt2500pci_bbp_write(rt2x00dev, reg_id, value);
960 } 989 }
961 } 990 }
962 DEBUG(rt2x00dev, "...End initialization from EEPROM.\n");
963 991
964 return 0; 992 return 0;
965} 993}
@@ -1011,7 +1039,7 @@ static int rt2500pci_enable_radio(struct rt2x00_dev *rt2x00dev)
1011 /* 1039 /*
1012 * Initialize all registers. 1040 * Initialize all registers.
1013 */ 1041 */
1014 if (rt2500pci_init_rings(rt2x00dev) || 1042 if (rt2500pci_init_queues(rt2x00dev) ||
1015 rt2500pci_init_registers(rt2x00dev) || 1043 rt2500pci_init_registers(rt2x00dev) ||
1016 rt2500pci_init_bbp(rt2x00dev)) { 1044 rt2500pci_init_bbp(rt2x00dev)) {
1017 ERROR(rt2x00dev, "Register initialization failed.\n"); 1045 ERROR(rt2x00dev, "Register initialization failed.\n");
@@ -1023,11 +1051,6 @@ static int rt2500pci_enable_radio(struct rt2x00_dev *rt2x00dev)
1023 */ 1051 */
1024 rt2500pci_toggle_irq(rt2x00dev, STATE_RADIO_IRQ_ON); 1052 rt2500pci_toggle_irq(rt2x00dev, STATE_RADIO_IRQ_ON);
1025 1053
1026 /*
1027 * Enable LED
1028 */
1029 rt2500pci_enable_led(rt2x00dev);
1030
1031 return 0; 1054 return 0;
1032} 1055}
1033 1056
@@ -1035,11 +1058,6 @@ static void rt2500pci_disable_radio(struct rt2x00_dev *rt2x00dev)
1035{ 1058{
1036 u32 reg; 1059 u32 reg;
1037 1060
1038 /*
1039 * Disable LED
1040 */
1041 rt2500pci_disable_led(rt2x00dev);
1042
1043 rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0); 1061 rt2x00pci_register_write(rt2x00dev, PWRCSR0, 0);
1044 1062
1045 /* 1063 /*
@@ -1138,10 +1156,10 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1138 */ 1156 */
1139static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1157static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1140 struct sk_buff *skb, 1158 struct sk_buff *skb,
1141 struct txdata_entry_desc *desc, 1159 struct txentry_desc *txdesc,
1142 struct ieee80211_tx_control *control) 1160 struct ieee80211_tx_control *control)
1143{ 1161{
1144 struct skb_desc *skbdesc = get_skb_desc(skb); 1162 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1145 __le32 *txd = skbdesc->desc; 1163 __le32 *txd = skbdesc->desc;
1146 u32 word; 1164 u32 word;
1147 1165
@@ -1150,36 +1168,36 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1150 */ 1168 */
1151 rt2x00_desc_read(txd, 2, &word); 1169 rt2x00_desc_read(txd, 2, &word);
1152 rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER); 1170 rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER);
1153 rt2x00_set_field32(&word, TXD_W2_AIFS, desc->aifs); 1171 rt2x00_set_field32(&word, TXD_W2_AIFS, txdesc->aifs);
1154 rt2x00_set_field32(&word, TXD_W2_CWMIN, desc->cw_min); 1172 rt2x00_set_field32(&word, TXD_W2_CWMIN, txdesc->cw_min);
1155 rt2x00_set_field32(&word, TXD_W2_CWMAX, desc->cw_max); 1173 rt2x00_set_field32(&word, TXD_W2_CWMAX, txdesc->cw_max);
1156 rt2x00_desc_write(txd, 2, word); 1174 rt2x00_desc_write(txd, 2, word);
1157 1175
1158 rt2x00_desc_read(txd, 3, &word); 1176 rt2x00_desc_read(txd, 3, &word);
1159 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, desc->signal); 1177 rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->signal);
1160 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, desc->service); 1178 rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->service);
1161 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW, desc->length_low); 1179 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW, txdesc->length_low);
1162 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH, desc->length_high); 1180 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH, txdesc->length_high);
1163 rt2x00_desc_write(txd, 3, word); 1181 rt2x00_desc_write(txd, 3, word);
1164 1182
1165 rt2x00_desc_read(txd, 10, &word); 1183 rt2x00_desc_read(txd, 10, &word);
1166 rt2x00_set_field32(&word, TXD_W10_RTS, 1184 rt2x00_set_field32(&word, TXD_W10_RTS,
1167 test_bit(ENTRY_TXD_RTS_FRAME, &desc->flags)); 1185 test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags));
1168 rt2x00_desc_write(txd, 10, word); 1186 rt2x00_desc_write(txd, 10, word);
1169 1187
1170 rt2x00_desc_read(txd, 0, &word); 1188 rt2x00_desc_read(txd, 0, &word);
1171 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); 1189 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
1172 rt2x00_set_field32(&word, TXD_W0_VALID, 1); 1190 rt2x00_set_field32(&word, TXD_W0_VALID, 1);
1173 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, 1191 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
1174 test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags)); 1192 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
1175 rt2x00_set_field32(&word, TXD_W0_ACK, 1193 rt2x00_set_field32(&word, TXD_W0_ACK,
1176 test_bit(ENTRY_TXD_ACK, &desc->flags)); 1194 test_bit(ENTRY_TXD_ACK, &txdesc->flags));
1177 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, 1195 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
1178 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags)); 1196 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
1179 rt2x00_set_field32(&word, TXD_W0_OFDM, 1197 rt2x00_set_field32(&word, TXD_W0_OFDM,
1180 test_bit(ENTRY_TXD_OFDM_RATE, &desc->flags)); 1198 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags));
1181 rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1); 1199 rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1);
1182 rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs); 1200 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1183 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1201 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1184 !!(control->flags & 1202 !!(control->flags &
1185 IEEE80211_TXCTL_LONG_RETRY_LIMIT)); 1203 IEEE80211_TXCTL_LONG_RETRY_LIMIT));
@@ -1192,13 +1210,15 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1192 * TX data initialization 1210 * TX data initialization
1193 */ 1211 */
1194static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1212static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1195 unsigned int queue) 1213 const unsigned int queue)
1196{ 1214{
1197 u32 reg; 1215 u32 reg;
1198 1216
1199 if (queue == IEEE80211_TX_QUEUE_BEACON) { 1217 if (queue == RT2X00_BCN_QUEUE_BEACON) {
1200 rt2x00pci_register_read(rt2x00dev, CSR14, &reg); 1218 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
1201 if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) { 1219 if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) {
1220 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
1221 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
1202 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1); 1222 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
1203 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1223 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1204 } 1224 }
@@ -1211,53 +1231,63 @@ static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1211 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1231 rt2x00_set_field32(&reg, TXCSR0_KICK_TX,
1212 (queue == IEEE80211_TX_QUEUE_DATA1)); 1232 (queue == IEEE80211_TX_QUEUE_DATA1));
1213 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, 1233 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM,
1214 (queue == IEEE80211_TX_QUEUE_AFTER_BEACON)); 1234 (queue == RT2X00_BCN_QUEUE_ATIM));
1215 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg); 1235 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1216} 1236}
1217 1237
1218/* 1238/*
1219 * RX control handlers 1239 * RX control handlers
1220 */ 1240 */
1221static void rt2500pci_fill_rxdone(struct data_entry *entry, 1241static void rt2500pci_fill_rxdone(struct queue_entry *entry,
1222 struct rxdata_entry_desc *desc) 1242 struct rxdone_entry_desc *rxdesc)
1223{ 1243{
1224 __le32 *rxd = entry->priv; 1244 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data;
1225 u32 word0; 1245 u32 word0;
1226 u32 word2; 1246 u32 word2;
1227 1247
1228 rt2x00_desc_read(rxd, 0, &word0); 1248 rt2x00_desc_read(priv_rx->desc, 0, &word0);
1229 rt2x00_desc_read(rxd, 2, &word2); 1249 rt2x00_desc_read(priv_rx->desc, 2, &word2);
1230 1250
1231 desc->flags = 0; 1251 rxdesc->flags = 0;
1232 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1252 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1233 desc->flags |= RX_FLAG_FAILED_FCS_CRC; 1253 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1234 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) 1254 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR))
1235 desc->flags |= RX_FLAG_FAILED_PLCP_CRC; 1255 rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC;
1236 1256
1237 desc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL); 1257 /*
1238 desc->rssi = rt2x00_get_field32(word2, RXD_W2_RSSI) - 1258 * Obtain the status about this packet.
1239 entry->ring->rt2x00dev->rssi_offset; 1259 * When frame was received with an OFDM bitrate,
1240 desc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM); 1260 * the signal is the PLCP value. If it was received with
1241 desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1261 * a CCK bitrate the signal is the rate in 100kbit/s.
1242 desc->my_bss = !!rt2x00_get_field32(word0, RXD_W0_MY_BSS); 1262 */
1263 rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL);
1264 rxdesc->rssi = rt2x00_get_field32(word2, RXD_W2_RSSI) -
1265 entry->queue->rt2x00dev->rssi_offset;
1266 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1267
1268 rxdesc->dev_flags = 0;
1269 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1270 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1271 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
1272 rxdesc->dev_flags |= RXDONE_MY_BSS;
1243} 1273}
1244 1274
1245/* 1275/*
1246 * Interrupt functions. 1276 * Interrupt functions.
1247 */ 1277 */
1248static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev, const int queue) 1278static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev,
1279 const enum ieee80211_tx_queue queue_idx)
1249{ 1280{
1250 struct data_ring *ring = rt2x00lib_get_ring(rt2x00dev, queue); 1281 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
1251 struct data_entry *entry; 1282 struct queue_entry_priv_pci_tx *priv_tx;
1252 __le32 *txd; 1283 struct queue_entry *entry;
1284 struct txdone_entry_desc txdesc;
1253 u32 word; 1285 u32 word;
1254 int tx_status;
1255 int retry;
1256 1286
1257 while (!rt2x00_ring_empty(ring)) { 1287 while (!rt2x00queue_empty(queue)) {
1258 entry = rt2x00_get_data_entry_done(ring); 1288 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
1259 txd = entry->priv; 1289 priv_tx = entry->priv_data;
1260 rt2x00_desc_read(txd, 0, &word); 1290 rt2x00_desc_read(priv_tx->desc, 0, &word);
1261 1291
1262 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || 1292 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
1263 !rt2x00_get_field32(word, TXD_W0_VALID)) 1293 !rt2x00_get_field32(word, TXD_W0_VALID))
@@ -1266,10 +1296,10 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev, const int queue)
1266 /* 1296 /*
1267 * Obtain the status about this packet. 1297 * Obtain the status about this packet.
1268 */ 1298 */
1269 tx_status = rt2x00_get_field32(word, TXD_W0_RESULT); 1299 txdesc.status = rt2x00_get_field32(word, TXD_W0_RESULT);
1270 retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT); 1300 txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT);
1271 1301
1272 rt2x00pci_txdone(rt2x00dev, entry, tx_status, retry); 1302 rt2x00pci_txdone(rt2x00dev, entry, &txdesc);
1273 } 1303 }
1274} 1304}
1275 1305
@@ -1313,7 +1343,7 @@ static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance)
1313 * 3 - Atim ring transmit done interrupt. 1343 * 3 - Atim ring transmit done interrupt.
1314 */ 1344 */
1315 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING)) 1345 if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING))
1316 rt2500pci_txdone(rt2x00dev, IEEE80211_TX_QUEUE_AFTER_BEACON); 1346 rt2500pci_txdone(rt2x00dev, RT2X00_BCN_QUEUE_ATIM);
1317 1347
1318 /* 1348 /*
1319 * 4 - Priority ring transmit done interrupt. 1349 * 4 - Priority ring transmit done interrupt.
@@ -1442,8 +1472,27 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1442 /* 1472 /*
1443 * Store led mode, for correct led behaviour. 1473 * Store led mode, for correct led behaviour.
1444 */ 1474 */
1445 rt2x00dev->led_mode = 1475#ifdef CONFIG_RT2500PCI_LEDS
1446 rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); 1476 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE);
1477
1478 rt2x00dev->led_radio.rt2x00dev = rt2x00dev;
1479 rt2x00dev->led_radio.type = LED_TYPE_RADIO;
1480 rt2x00dev->led_radio.led_dev.brightness_set =
1481 rt2500pci_brightness_set;
1482 rt2x00dev->led_radio.led_dev.blink_set =
1483 rt2500pci_blink_set;
1484 rt2x00dev->led_radio.flags = LED_INITIALIZED;
1485
1486 if (value == LED_MODE_TXRX_ACTIVITY) {
1487 rt2x00dev->led_qual.rt2x00dev = rt2x00dev;
1488 rt2x00dev->led_radio.type = LED_TYPE_ACTIVITY;
1489 rt2x00dev->led_qual.led_dev.brightness_set =
1490 rt2500pci_brightness_set;
1491 rt2x00dev->led_qual.led_dev.blink_set =
1492 rt2500pci_blink_set;
1493 rt2x00dev->led_qual.flags = LED_INITIALIZED;
1494 }
1495#endif /* CONFIG_RT2500PCI_LEDS */
1447 1496
1448 /* 1497 /*
1449 * Detect if this device has an hardware controlled radio. 1498 * Detect if this device has an hardware controlled radio.
@@ -1656,8 +1705,8 @@ static void rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1656 /* 1705 /*
1657 * Initialize hw_mode information. 1706 * Initialize hw_mode information.
1658 */ 1707 */
1659 spec->num_modes = 2; 1708 spec->supported_bands = SUPPORT_BAND_2GHZ;
1660 spec->num_rates = 12; 1709 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
1661 spec->tx_power_a = NULL; 1710 spec->tx_power_a = NULL;
1662 spec->tx_power_bg = txpower; 1711 spec->tx_power_bg = txpower;
1663 spec->tx_power_default = DEFAULT_TXPOWER; 1712 spec->tx_power_default = DEFAULT_TXPOWER;
@@ -1678,9 +1727,9 @@ static void rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1678 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); 1727 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e);
1679 spec->channels = rf_vals_bg_2525e; 1728 spec->channels = rf_vals_bg_2525e;
1680 } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) { 1729 } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) {
1730 spec->supported_bands |= SUPPORT_BAND_5GHZ;
1681 spec->num_channels = ARRAY_SIZE(rf_vals_5222); 1731 spec->num_channels = ARRAY_SIZE(rf_vals_5222);
1682 spec->channels = rf_vals_5222; 1732 spec->channels = rf_vals_5222;
1683 spec->num_modes = 3;
1684 } 1733 }
1685} 1734}
1686 1735
@@ -1705,9 +1754,9 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1705 rt2500pci_probe_hw_mode(rt2x00dev); 1754 rt2500pci_probe_hw_mode(rt2x00dev);
1706 1755
1707 /* 1756 /*
1708 * This device requires the beacon ring 1757 * This device requires the atim queue
1709 */ 1758 */
1710 __set_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags); 1759 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
1711 1760
1712 /* 1761 /*
1713 * Set the rssi offset. 1762 * Set the rssi offset.
@@ -1720,69 +1769,6 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1720/* 1769/*
1721 * IEEE80211 stack callback functions. 1770 * IEEE80211 stack callback functions.
1722 */ 1771 */
1723static void rt2500pci_configure_filter(struct ieee80211_hw *hw,
1724 unsigned int changed_flags,
1725 unsigned int *total_flags,
1726 int mc_count,
1727 struct dev_addr_list *mc_list)
1728{
1729 struct rt2x00_dev *rt2x00dev = hw->priv;
1730 u32 reg;
1731
1732 /*
1733 * Mask off any flags we are going to ignore from
1734 * the total_flags field.
1735 */
1736 *total_flags &=
1737 FIF_ALLMULTI |
1738 FIF_FCSFAIL |
1739 FIF_PLCPFAIL |
1740 FIF_CONTROL |
1741 FIF_OTHER_BSS |
1742 FIF_PROMISC_IN_BSS;
1743
1744 /*
1745 * Apply some rules to the filters:
1746 * - Some filters imply different filters to be set.
1747 * - Some things we can't filter out at all.
1748 */
1749 if (mc_count)
1750 *total_flags |= FIF_ALLMULTI;
1751 if (*total_flags & FIF_OTHER_BSS ||
1752 *total_flags & FIF_PROMISC_IN_BSS)
1753 *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS;
1754
1755 /*
1756 * Check if there is any work left for us.
1757 */
1758 if (rt2x00dev->packet_filter == *total_flags)
1759 return;
1760 rt2x00dev->packet_filter = *total_flags;
1761
1762 /*
1763 * Start configuration steps.
1764 * Note that the version error will always be dropped
1765 * and broadcast frames will always be accepted since
1766 * there is no filter for it at this time.
1767 */
1768 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
1769 rt2x00_set_field32(&reg, RXCSR0_DROP_CRC,
1770 !(*total_flags & FIF_FCSFAIL));
1771 rt2x00_set_field32(&reg, RXCSR0_DROP_PHYSICAL,
1772 !(*total_flags & FIF_PLCPFAIL));
1773 rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL,
1774 !(*total_flags & FIF_CONTROL));
1775 rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME,
1776 !(*total_flags & FIF_PROMISC_IN_BSS));
1777 rt2x00_set_field32(&reg, RXCSR0_DROP_TODS,
1778 !(*total_flags & FIF_PROMISC_IN_BSS));
1779 rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
1780 rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST,
1781 !(*total_flags & FIF_ALLMULTI));
1782 rt2x00_set_field32(&reg, RXCSR0_DROP_BCAST, 0);
1783 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
1784}
1785
1786static int rt2500pci_set_retry_limit(struct ieee80211_hw *hw, 1772static int rt2500pci_set_retry_limit(struct ieee80211_hw *hw,
1787 u32 short_retry, u32 long_retry) 1773 u32 short_retry, u32 long_retry)
1788{ 1774{
@@ -1811,12 +1797,59 @@ static u64 rt2500pci_get_tsf(struct ieee80211_hw *hw)
1811 return tsf; 1797 return tsf;
1812} 1798}
1813 1799
1814static void rt2500pci_reset_tsf(struct ieee80211_hw *hw) 1800static int rt2500pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
1801 struct ieee80211_tx_control *control)
1815{ 1802{
1816 struct rt2x00_dev *rt2x00dev = hw->priv; 1803 struct rt2x00_dev *rt2x00dev = hw->priv;
1804 struct rt2x00_intf *intf = vif_to_intf(control->vif);
1805 struct queue_entry_priv_pci_tx *priv_tx;
1806 struct skb_frame_desc *skbdesc;
1807 u32 reg;
1817 1808
1818 rt2x00pci_register_write(rt2x00dev, CSR16, 0); 1809 if (unlikely(!intf->beacon))
1819 rt2x00pci_register_write(rt2x00dev, CSR17, 0); 1810 return -ENOBUFS;
1811
1812 priv_tx = intf->beacon->priv_data;
1813
1814 /*
1815 * Fill in skb descriptor
1816 */
1817 skbdesc = get_skb_frame_desc(skb);
1818 memset(skbdesc, 0, sizeof(*skbdesc));
1819 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED;
1820 skbdesc->data = skb->data;
1821 skbdesc->data_len = skb->len;
1822 skbdesc->desc = priv_tx->desc;
1823 skbdesc->desc_len = intf->beacon->queue->desc_size;
1824 skbdesc->entry = intf->beacon;
1825
1826 /*
1827 * Disable beaconing while we are reloading the beacon data,
1828 * otherwise we might be sending out invalid data.
1829 */
1830 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
1831 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0);
1832 rt2x00_set_field32(&reg, CSR14_TBCN, 0);
1833 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
1834 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1835
1836 /*
1837 * mac80211 doesn't provide the control->queue variable
1838 * for beacons. Set our own queue identification so
1839 * it can be used during descriptor initialization.
1840 */
1841 control->queue = RT2X00_BCN_QUEUE_BEACON;
1842 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
1843
1844 /*
1845 * Enable beacon generation.
1846 * Write entire beacon with descriptor to register,
1847 * and kick the beacon generator.
1848 */
1849 memcpy(priv_tx->data, skb->data, skb->len);
1850 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue);
1851
1852 return 0;
1820} 1853}
1821 1854
1822static int rt2500pci_tx_last_beacon(struct ieee80211_hw *hw) 1855static int rt2500pci_tx_last_beacon(struct ieee80211_hw *hw)
@@ -1836,15 +1869,14 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
1836 .remove_interface = rt2x00mac_remove_interface, 1869 .remove_interface = rt2x00mac_remove_interface,
1837 .config = rt2x00mac_config, 1870 .config = rt2x00mac_config,
1838 .config_interface = rt2x00mac_config_interface, 1871 .config_interface = rt2x00mac_config_interface,
1839 .configure_filter = rt2500pci_configure_filter, 1872 .configure_filter = rt2x00mac_configure_filter,
1840 .get_stats = rt2x00mac_get_stats, 1873 .get_stats = rt2x00mac_get_stats,
1841 .set_retry_limit = rt2500pci_set_retry_limit, 1874 .set_retry_limit = rt2500pci_set_retry_limit,
1842 .bss_info_changed = rt2x00mac_bss_info_changed, 1875 .bss_info_changed = rt2x00mac_bss_info_changed,
1843 .conf_tx = rt2x00mac_conf_tx, 1876 .conf_tx = rt2x00mac_conf_tx,
1844 .get_tx_stats = rt2x00mac_get_tx_stats, 1877 .get_tx_stats = rt2x00mac_get_tx_stats,
1845 .get_tsf = rt2500pci_get_tsf, 1878 .get_tsf = rt2500pci_get_tsf,
1846 .reset_tsf = rt2500pci_reset_tsf, 1879 .beacon_update = rt2500pci_beacon_update,
1847 .beacon_update = rt2x00pci_beacon_update,
1848 .tx_last_beacon = rt2500pci_tx_last_beacon, 1880 .tx_last_beacon = rt2500pci_tx_last_beacon,
1849}; 1881};
1850 1882
@@ -1864,19 +1896,50 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
1864 .write_tx_data = rt2x00pci_write_tx_data, 1896 .write_tx_data = rt2x00pci_write_tx_data,
1865 .kick_tx_queue = rt2500pci_kick_tx_queue, 1897 .kick_tx_queue = rt2500pci_kick_tx_queue,
1866 .fill_rxdone = rt2500pci_fill_rxdone, 1898 .fill_rxdone = rt2500pci_fill_rxdone,
1867 .config_mac_addr = rt2500pci_config_mac_addr, 1899 .config_filter = rt2500pci_config_filter,
1868 .config_bssid = rt2500pci_config_bssid, 1900 .config_intf = rt2500pci_config_intf,
1869 .config_type = rt2500pci_config_type, 1901 .config_erp = rt2500pci_config_erp,
1870 .config_preamble = rt2500pci_config_preamble,
1871 .config = rt2500pci_config, 1902 .config = rt2500pci_config,
1872}; 1903};
1873 1904
1905static const struct data_queue_desc rt2500pci_queue_rx = {
1906 .entry_num = RX_ENTRIES,
1907 .data_size = DATA_FRAME_SIZE,
1908 .desc_size = RXD_DESC_SIZE,
1909 .priv_size = sizeof(struct queue_entry_priv_pci_rx),
1910};
1911
1912static const struct data_queue_desc rt2500pci_queue_tx = {
1913 .entry_num = TX_ENTRIES,
1914 .data_size = DATA_FRAME_SIZE,
1915 .desc_size = TXD_DESC_SIZE,
1916 .priv_size = sizeof(struct queue_entry_priv_pci_tx),
1917};
1918
1919static const struct data_queue_desc rt2500pci_queue_bcn = {
1920 .entry_num = BEACON_ENTRIES,
1921 .data_size = MGMT_FRAME_SIZE,
1922 .desc_size = TXD_DESC_SIZE,
1923 .priv_size = sizeof(struct queue_entry_priv_pci_tx),
1924};
1925
1926static const struct data_queue_desc rt2500pci_queue_atim = {
1927 .entry_num = ATIM_ENTRIES,
1928 .data_size = DATA_FRAME_SIZE,
1929 .desc_size = TXD_DESC_SIZE,
1930 .priv_size = sizeof(struct queue_entry_priv_pci_tx),
1931};
1932
1874static const struct rt2x00_ops rt2500pci_ops = { 1933static const struct rt2x00_ops rt2500pci_ops = {
1875 .name = KBUILD_MODNAME, 1934 .name = KBUILD_MODNAME,
1876 .rxd_size = RXD_DESC_SIZE, 1935 .max_sta_intf = 1,
1877 .txd_size = TXD_DESC_SIZE, 1936 .max_ap_intf = 1,
1878 .eeprom_size = EEPROM_SIZE, 1937 .eeprom_size = EEPROM_SIZE,
1879 .rf_size = RF_SIZE, 1938 .rf_size = RF_SIZE,
1939 .rx = &rt2500pci_queue_rx,
1940 .tx = &rt2500pci_queue_tx,
1941 .bcn = &rt2500pci_queue_bcn,
1942 .atim = &rt2500pci_queue_atim,
1880 .lib = &rt2500pci_rt2x00_ops, 1943 .lib = &rt2500pci_rt2x00_ops,
1881 .hw = &rt2500pci_mac80211_ops, 1944 .hw = &rt2500pci_mac80211_ops,
1882#ifdef CONFIG_RT2X00_LIB_DEBUGFS 1945#ifdef CONFIG_RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index 92ba0902d107..13899550465a 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -1213,8 +1213,8 @@
1213#define RXD_W10_DROP FIELD32(0x00000001) 1213#define RXD_W10_DROP FIELD32(0x00000001)
1214 1214
1215/* 1215/*
1216 * Macro's for converting txpower from EEPROM to dscape value 1216 * Macro's for converting txpower from EEPROM to mac80211 value
1217 * and from dscape value to register value. 1217 * and from mac80211 value to register value.
1218 */ 1218 */
1219#define MIN_TXPOWER 0 1219#define MIN_TXPOWER 0
1220#define MAX_TXPOWER 31 1220#define MAX_TXPOWER 31
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 638c3d243108..6bb07b339325 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -282,97 +282,136 @@ static const struct rt2x00debug rt2500usb_rt2x00debug = {
282}; 282};
283#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 283#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
284 284
285/* 285#ifdef CONFIG_RT2500USB_LEDS
286 * Configuration handlers. 286static void rt2500usb_brightness_set(struct led_classdev *led_cdev,
287 */ 287 enum led_brightness brightness)
288static void rt2500usb_config_mac_addr(struct rt2x00_dev *rt2x00dev,
289 __le32 *mac)
290{ 288{
291 rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR2, mac, 289 struct rt2x00_led *led =
292 (3 * sizeof(__le16))); 290 container_of(led_cdev, struct rt2x00_led, led_dev);
293} 291 unsigned int enabled = brightness != LED_OFF;
292 u16 reg;
294 293
295static void rt2500usb_config_bssid(struct rt2x00_dev *rt2x00dev, 294 rt2500usb_register_read(led->rt2x00dev, MAC_CSR20, &reg);
296 __le32 *bssid) 295
297{ 296 if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC)
298 rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR5, bssid, 297 rt2x00_set_field16(&reg, MAC_CSR20_LINK, enabled);
299 (3 * sizeof(__le16))); 298 else if (led->type == LED_TYPE_ACTIVITY)
299 rt2x00_set_field16(&reg, MAC_CSR20_ACTIVITY, enabled);
300
301 rt2500usb_register_write(led->rt2x00dev, MAC_CSR20, reg);
300} 302}
301 303
302static void rt2500usb_config_type(struct rt2x00_dev *rt2x00dev, const int type, 304static int rt2500usb_blink_set(struct led_classdev *led_cdev,
303 const int tsf_sync) 305 unsigned long *delay_on,
306 unsigned long *delay_off)
304{ 307{
308 struct rt2x00_led *led =
309 container_of(led_cdev, struct rt2x00_led, led_dev);
305 u16 reg; 310 u16 reg;
306 311
307 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, 0); 312 rt2500usb_register_read(led->rt2x00dev, MAC_CSR21, &reg);
313 rt2x00_set_field16(&reg, MAC_CSR21_ON_PERIOD, *delay_on);
314 rt2x00_set_field16(&reg, MAC_CSR21_OFF_PERIOD, *delay_off);
315 rt2500usb_register_write(led->rt2x00dev, MAC_CSR21, reg);
308 316
309 /* 317 return 0;
310 * Enable beacon config 318}
311 */ 319#endif /* CONFIG_RT2500USB_LEDS */
312 rt2500usb_register_read(rt2x00dev, TXRX_CSR20, &reg); 320
313 rt2x00_set_field16(&reg, TXRX_CSR20_OFFSET, 321/*
314 (PREAMBLE + get_duration(IEEE80211_HEADER, 20)) >> 6); 322 * Configuration handlers.
315 if (type == IEEE80211_IF_TYPE_STA) 323 */
316 rt2x00_set_field16(&reg, TXRX_CSR20_BCN_EXPECT_WINDOW, 0); 324static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev,
317 else 325 const unsigned int filter_flags)
318 rt2x00_set_field16(&reg, TXRX_CSR20_BCN_EXPECT_WINDOW, 2); 326{
319 rt2500usb_register_write(rt2x00dev, TXRX_CSR20, reg); 327 u16 reg;
320 328
321 /* 329 /*
322 * Enable synchronisation. 330 * Start configuration steps.
331 * Note that the version error will always be dropped
332 * and broadcast frames will always be accepted since
333 * there is no filter for it at this time.
323 */ 334 */
324 rt2500usb_register_read(rt2x00dev, TXRX_CSR18, &reg); 335 rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg);
325 rt2x00_set_field16(&reg, TXRX_CSR18_OFFSET, 0); 336 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CRC,
326 rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); 337 !(filter_flags & FIF_FCSFAIL));
327 338 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_PHYSICAL,
328 rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg); 339 !(filter_flags & FIF_PLCPFAIL));
329 rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1); 340 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CONTROL,
330 rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 341 !(filter_flags & FIF_CONTROL));
331 (tsf_sync == TSF_SYNC_BEACON)); 342 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME,
332 rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0); 343 !(filter_flags & FIF_PROMISC_IN_BSS));
333 rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, tsf_sync); 344 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_TODS,
334 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); 345 !(filter_flags & FIF_PROMISC_IN_BSS) &&
346 !rt2x00dev->intf_ap_count);
347 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_VERSION_ERROR, 1);
348 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_MULTICAST,
349 !(filter_flags & FIF_ALLMULTI));
350 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_BROADCAST, 0);
351 rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg);
335} 352}
336 353
337static void rt2500usb_config_preamble(struct rt2x00_dev *rt2x00dev, 354static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev,
338 const int short_preamble, 355 struct rt2x00_intf *intf,
339 const int ack_timeout, 356 struct rt2x00intf_conf *conf,
340 const int ack_consume_time) 357 const unsigned int flags)
341{ 358{
359 unsigned int bcn_preload;
342 u16 reg; 360 u16 reg;
343 361
344 /* 362 if (flags & CONFIG_UPDATE_TYPE) {
345 * When in atomic context, reschedule and let rt2x00lib 363 /*
346 * call this function again. 364 * Enable beacon config
347 */ 365 */
348 if (in_atomic()) { 366 bcn_preload = PREAMBLE + get_duration(IEEE80211_HEADER, 20);
349 queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->config_work); 367 rt2500usb_register_read(rt2x00dev, TXRX_CSR20, &reg);
350 return; 368 rt2x00_set_field16(&reg, TXRX_CSR20_OFFSET, bcn_preload >> 6);
369 rt2x00_set_field16(&reg, TXRX_CSR20_BCN_EXPECT_WINDOW,
370 2 * (conf->type != IEEE80211_IF_TYPE_STA));
371 rt2500usb_register_write(rt2x00dev, TXRX_CSR20, reg);
372
373 /*
374 * Enable synchronisation.
375 */
376 rt2500usb_register_read(rt2x00dev, TXRX_CSR18, &reg);
377 rt2x00_set_field16(&reg, TXRX_CSR18_OFFSET, 0);
378 rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg);
379
380 rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
381 rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
382 rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, conf->sync);
383 rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
384 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
351 } 385 }
352 386
387 if (flags & CONFIG_UPDATE_MAC)
388 rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR2, conf->mac,
389 (3 * sizeof(__le16)));
390
391 if (flags & CONFIG_UPDATE_BSSID)
392 rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR5, conf->bssid,
393 (3 * sizeof(__le16)));
394}
395
396static void rt2500usb_config_erp(struct rt2x00_dev *rt2x00dev,
397 struct rt2x00lib_erp *erp)
398{
399 u16 reg;
400
353 rt2500usb_register_read(rt2x00dev, TXRX_CSR1, &reg); 401 rt2500usb_register_read(rt2x00dev, TXRX_CSR1, &reg);
354 rt2x00_set_field16(&reg, TXRX_CSR1_ACK_TIMEOUT, ack_timeout); 402 rt2x00_set_field16(&reg, TXRX_CSR1_ACK_TIMEOUT, erp->ack_timeout);
355 rt2500usb_register_write(rt2x00dev, TXRX_CSR1, reg); 403 rt2500usb_register_write(rt2x00dev, TXRX_CSR1, reg);
356 404
357 rt2500usb_register_read(rt2x00dev, TXRX_CSR10, &reg); 405 rt2500usb_register_read(rt2x00dev, TXRX_CSR10, &reg);
358 rt2x00_set_field16(&reg, TXRX_CSR10_AUTORESPOND_PREAMBLE, 406 rt2x00_set_field16(&reg, TXRX_CSR10_AUTORESPOND_PREAMBLE,
359 !!short_preamble); 407 !!erp->short_preamble);
360 rt2500usb_register_write(rt2x00dev, TXRX_CSR10, reg); 408 rt2500usb_register_write(rt2x00dev, TXRX_CSR10, reg);
361} 409}
362 410
363static void rt2500usb_config_phymode(struct rt2x00_dev *rt2x00dev, 411static void rt2500usb_config_phymode(struct rt2x00_dev *rt2x00dev,
364 const int phymode,
365 const int basic_rate_mask) 412 const int basic_rate_mask)
366{ 413{
367 rt2500usb_register_write(rt2x00dev, TXRX_CSR11, basic_rate_mask); 414 rt2500usb_register_write(rt2x00dev, TXRX_CSR11, basic_rate_mask);
368
369 if (phymode == HWMODE_B) {
370 rt2500usb_register_write(rt2x00dev, MAC_CSR11, 0x000b);
371 rt2500usb_register_write(rt2x00dev, MAC_CSR12, 0x0040);
372 } else {
373 rt2500usb_register_write(rt2x00dev, MAC_CSR11, 0x0005);
374 rt2500usb_register_write(rt2x00dev, MAC_CSR12, 0x016c);
375 }
376} 415}
377 416
378static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev, 417static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev,
@@ -424,6 +463,13 @@ static void rt2500usb_config_antenna(struct rt2x00_dev *rt2x00dev,
424 u16 csr5; 463 u16 csr5;
425 u16 csr6; 464 u16 csr6;
426 465
466 /*
467 * We should never come here because rt2x00lib is supposed
468 * to catch this and send us the correct antenna explicitely.
469 */
470 BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
471 ant->tx == ANTENNA_SW_DIVERSITY);
472
427 rt2500usb_bbp_read(rt2x00dev, 2, &r2); 473 rt2500usb_bbp_read(rt2x00dev, 2, &r2);
428 rt2500usb_bbp_read(rt2x00dev, 14, &r14); 474 rt2500usb_bbp_read(rt2x00dev, 14, &r14);
429 rt2500usb_register_read(rt2x00dev, PHY_CSR5, &csr5); 475 rt2500usb_register_read(rt2x00dev, PHY_CSR5, &csr5);
@@ -443,14 +489,8 @@ static void rt2500usb_config_antenna(struct rt2x00_dev *rt2x00dev,
443 rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 0); 489 rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 0);
444 rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 0); 490 rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 0);
445 break; 491 break;
446 case ANTENNA_SW_DIVERSITY:
447 /*
448 * NOTE: We should never come here because rt2x00lib is
449 * supposed to catch this and send us the correct antenna
450 * explicitely. However we are nog going to bug about this.
451 * Instead, just default to antenna B.
452 */
453 case ANTENNA_B: 492 case ANTENNA_B:
493 default:
454 rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2); 494 rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2);
455 rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 2); 495 rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 2);
456 rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 2); 496 rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 2);
@@ -467,14 +507,8 @@ static void rt2500usb_config_antenna(struct rt2x00_dev *rt2x00dev,
467 case ANTENNA_A: 507 case ANTENNA_A:
468 rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0); 508 rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0);
469 break; 509 break;
470 case ANTENNA_SW_DIVERSITY:
471 /*
472 * NOTE: We should never come here because rt2x00lib is
473 * supposed to catch this and send us the correct antenna
474 * explicitely. However we are nog going to bug about this.
475 * Instead, just default to antenna B.
476 */
477 case ANTENNA_B: 510 case ANTENNA_B:
511 default:
478 rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2); 512 rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2);
479 break; 513 break;
480 } 514 }
@@ -510,6 +544,8 @@ static void rt2500usb_config_duration(struct rt2x00_dev *rt2x00dev,
510 u16 reg; 544 u16 reg;
511 545
512 rt2500usb_register_write(rt2x00dev, MAC_CSR10, libconf->slot_time); 546 rt2500usb_register_write(rt2x00dev, MAC_CSR10, libconf->slot_time);
547 rt2500usb_register_write(rt2x00dev, MAC_CSR11, libconf->sifs);
548 rt2500usb_register_write(rt2x00dev, MAC_CSR12, libconf->eifs);
513 549
514 rt2500usb_register_read(rt2x00dev, TXRX_CSR18, &reg); 550 rt2500usb_register_read(rt2x00dev, TXRX_CSR18, &reg);
515 rt2x00_set_field16(&reg, TXRX_CSR18_INTERVAL, 551 rt2x00_set_field16(&reg, TXRX_CSR18_INTERVAL,
@@ -518,12 +554,11 @@ static void rt2500usb_config_duration(struct rt2x00_dev *rt2x00dev,
518} 554}
519 555
520static void rt2500usb_config(struct rt2x00_dev *rt2x00dev, 556static void rt2500usb_config(struct rt2x00_dev *rt2x00dev,
521 const unsigned int flags, 557 struct rt2x00lib_conf *libconf,
522 struct rt2x00lib_conf *libconf) 558 const unsigned int flags)
523{ 559{
524 if (flags & CONFIG_UPDATE_PHYMODE) 560 if (flags & CONFIG_UPDATE_PHYMODE)
525 rt2500usb_config_phymode(rt2x00dev, libconf->phymode, 561 rt2500usb_config_phymode(rt2x00dev, libconf->basic_rates);
526 libconf->basic_rates);
527 if (flags & CONFIG_UPDATE_CHANNEL) 562 if (flags & CONFIG_UPDATE_CHANNEL)
528 rt2500usb_config_channel(rt2x00dev, &libconf->rf, 563 rt2500usb_config_channel(rt2x00dev, &libconf->rf,
529 libconf->conf->power_level); 564 libconf->conf->power_level);
@@ -537,36 +572,6 @@ static void rt2500usb_config(struct rt2x00_dev *rt2x00dev,
537} 572}
538 573
539/* 574/*
540 * LED functions.
541 */
542static void rt2500usb_enable_led(struct rt2x00_dev *rt2x00dev)
543{
544 u16 reg;
545
546 rt2500usb_register_read(rt2x00dev, MAC_CSR21, &reg);
547 rt2x00_set_field16(&reg, MAC_CSR21_ON_PERIOD, 70);
548 rt2x00_set_field16(&reg, MAC_CSR21_OFF_PERIOD, 30);
549 rt2500usb_register_write(rt2x00dev, MAC_CSR21, reg);
550
551 rt2500usb_register_read(rt2x00dev, MAC_CSR20, &reg);
552 rt2x00_set_field16(&reg, MAC_CSR20_LINK,
553 (rt2x00dev->led_mode != LED_MODE_ASUS));
554 rt2x00_set_field16(&reg, MAC_CSR20_ACTIVITY,
555 (rt2x00dev->led_mode != LED_MODE_TXRX_ACTIVITY));
556 rt2500usb_register_write(rt2x00dev, MAC_CSR20, reg);
557}
558
559static void rt2500usb_disable_led(struct rt2x00_dev *rt2x00dev)
560{
561 u16 reg;
562
563 rt2500usb_register_read(rt2x00dev, MAC_CSR20, &reg);
564 rt2x00_set_field16(&reg, MAC_CSR20_LINK, 0);
565 rt2x00_set_field16(&reg, MAC_CSR20_ACTIVITY, 0);
566 rt2500usb_register_write(rt2x00dev, MAC_CSR20, reg);
567}
568
569/*
570 * Link tuning 575 * Link tuning
571 */ 576 */
572static void rt2500usb_link_stats(struct rt2x00_dev *rt2x00dev, 577static void rt2500usb_link_stats(struct rt2x00_dev *rt2x00dev,
@@ -626,6 +631,24 @@ static void rt2500usb_link_tuner(struct rt2x00_dev *rt2x00dev)
626 u8 low_bound; 631 u8 low_bound;
627 632
628 /* 633 /*
634 * Read current r17 value, as well as the sensitivity values
635 * for the r17 register.
636 */
637 rt2500usb_bbp_read(rt2x00dev, 17, &r17);
638 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17, &r17_sens);
639
640 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &vgc_bound);
641 up_bound = rt2x00_get_field16(vgc_bound, EEPROM_BBPTUNE_VGCUPPER);
642 low_bound = rt2x00_get_field16(vgc_bound, EEPROM_BBPTUNE_VGCLOWER);
643
644 /*
645 * If we are not associated, we should go straight to the
646 * dynamic CCA tuning.
647 */
648 if (!rt2x00dev->intf_associated)
649 goto dynamic_cca_tune;
650
651 /*
629 * Determine the BBP tuning threshold and correctly 652 * Determine the BBP tuning threshold and correctly
630 * set BBP 24, 25 and 61. 653 * set BBP 24, 25 and 61.
631 */ 654 */
@@ -651,13 +674,6 @@ static void rt2500usb_link_tuner(struct rt2x00_dev *rt2x00dev)
651 rt2500usb_bbp_write(rt2x00dev, 61, r61); 674 rt2500usb_bbp_write(rt2x00dev, 61, r61);
652 675
653 /* 676 /*
654 * Read current r17 value, as well as the sensitivity values
655 * for the r17 register.
656 */
657 rt2500usb_bbp_read(rt2x00dev, 17, &r17);
658 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17, &r17_sens);
659
660 /*
661 * A too low RSSI will cause too much false CCA which will 677 * A too low RSSI will cause too much false CCA which will
662 * then corrupt the R17 tuning. To remidy this the tuning should 678 * then corrupt the R17 tuning. To remidy this the tuning should
663 * be stopped (While making sure the R17 value will not exceed limits) 679 * be stopped (While making sure the R17 value will not exceed limits)
@@ -692,14 +708,9 @@ static void rt2500usb_link_tuner(struct rt2x00_dev *rt2x00dev)
692 * Leave short or middle distance condition, restore r17 708 * Leave short or middle distance condition, restore r17
693 * to the dynamic tuning range. 709 * to the dynamic tuning range.
694 */ 710 */
695 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &vgc_bound);
696 vgc_bound = rt2x00_get_field16(vgc_bound, EEPROM_BBPTUNE_VGCUPPER);
697
698 low_bound = 0x32; 711 low_bound = 0x32;
699 if (rssi >= -77) 712 if (rssi < -77)
700 up_bound = vgc_bound; 713 up_bound -= (-77 - rssi);
701 else
702 up_bound = vgc_bound - (-77 - rssi);
703 714
704 if (up_bound < low_bound) 715 if (up_bound < low_bound)
705 up_bound = low_bound; 716 up_bound = low_bound;
@@ -707,7 +718,16 @@ static void rt2500usb_link_tuner(struct rt2x00_dev *rt2x00dev)
707 if (r17 > up_bound) { 718 if (r17 > up_bound) {
708 rt2500usb_bbp_write(rt2x00dev, 17, up_bound); 719 rt2500usb_bbp_write(rt2x00dev, 17, up_bound);
709 rt2x00dev->link.vgc_level = up_bound; 720 rt2x00dev->link.vgc_level = up_bound;
710 } else if (rt2x00dev->link.qual.false_cca > 512 && r17 < up_bound) { 721 return;
722 }
723
724dynamic_cca_tune:
725
726 /*
727 * R17 is inside the dynamic tuning range,
728 * start tuning the link based on the false cca counter.
729 */
730 if (rt2x00dev->link.qual.false_cca > 512 && r17 < up_bound) {
711 rt2500usb_bbp_write(rt2x00dev, 17, ++r17); 731 rt2500usb_bbp_write(rt2x00dev, 17, ++r17);
712 rt2x00dev->link.vgc_level = r17; 732 rt2x00dev->link.vgc_level = r17;
713 } else if (rt2x00dev->link.qual.false_cca < 100 && r17 > low_bound) { 733 } else if (rt2x00dev->link.qual.false_cca < 100 && r17 > low_bound) {
@@ -878,19 +898,15 @@ continue_csr_init:
878 rt2500usb_bbp_write(rt2x00dev, 62, 0x10); 898 rt2500usb_bbp_write(rt2x00dev, 62, 0x10);
879 rt2500usb_bbp_write(rt2x00dev, 75, 0xff); 899 rt2500usb_bbp_write(rt2x00dev, 75, 0xff);
880 900
881 DEBUG(rt2x00dev, "Start initialization from EEPROM...\n");
882 for (i = 0; i < EEPROM_BBP_SIZE; i++) { 901 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
883 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); 902 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
884 903
885 if (eeprom != 0xffff && eeprom != 0x0000) { 904 if (eeprom != 0xffff && eeprom != 0x0000) {
886 reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); 905 reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
887 value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); 906 value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE);
888 DEBUG(rt2x00dev, "BBP: 0x%02x, value: 0x%02x.\n",
889 reg_id, value);
890 rt2500usb_bbp_write(rt2x00dev, reg_id, value); 907 rt2500usb_bbp_write(rt2x00dev, reg_id, value);
891 } 908 }
892 } 909 }
893 DEBUG(rt2x00dev, "...End initialization from EEPROM.\n");
894 910
895 return 0; 911 return 0;
896} 912}
@@ -920,21 +936,11 @@ static int rt2500usb_enable_radio(struct rt2x00_dev *rt2x00dev)
920 return -EIO; 936 return -EIO;
921 } 937 }
922 938
923 /*
924 * Enable LED
925 */
926 rt2500usb_enable_led(rt2x00dev);
927
928 return 0; 939 return 0;
929} 940}
930 941
931static void rt2500usb_disable_radio(struct rt2x00_dev *rt2x00dev) 942static void rt2500usb_disable_radio(struct rt2x00_dev *rt2x00dev)
932{ 943{
933 /*
934 * Disable LED
935 */
936 rt2500usb_disable_led(rt2x00dev);
937
938 rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x2121); 944 rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x2121);
939 rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x2121); 945 rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x2121);
940 946
@@ -1027,10 +1033,10 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1027 */ 1033 */
1028static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1034static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1029 struct sk_buff *skb, 1035 struct sk_buff *skb,
1030 struct txdata_entry_desc *desc, 1036 struct txentry_desc *txdesc,
1031 struct ieee80211_tx_control *control) 1037 struct ieee80211_tx_control *control)
1032{ 1038{
1033 struct skb_desc *skbdesc = get_skb_desc(skb); 1039 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1034 __le32 *txd = skbdesc->desc; 1040 __le32 *txd = skbdesc->desc;
1035 u32 word; 1041 u32 word;
1036 1042
@@ -1039,31 +1045,31 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1039 */ 1045 */
1040 rt2x00_desc_read(txd, 1, &word); 1046 rt2x00_desc_read(txd, 1, &word);
1041 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); 1047 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER);
1042 rt2x00_set_field32(&word, TXD_W1_AIFS, desc->aifs); 1048 rt2x00_set_field32(&word, TXD_W1_AIFS, txdesc->aifs);
1043 rt2x00_set_field32(&word, TXD_W1_CWMIN, desc->cw_min); 1049 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
1044 rt2x00_set_field32(&word, TXD_W1_CWMAX, desc->cw_max); 1050 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
1045 rt2x00_desc_write(txd, 1, word); 1051 rt2x00_desc_write(txd, 1, word);
1046 1052
1047 rt2x00_desc_read(txd, 2, &word); 1053 rt2x00_desc_read(txd, 2, &word);
1048 rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, desc->signal); 1054 rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
1049 rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, desc->service); 1055 rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
1050 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, desc->length_low); 1056 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
1051 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, desc->length_high); 1057 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
1052 rt2x00_desc_write(txd, 2, word); 1058 rt2x00_desc_write(txd, 2, word);
1053 1059
1054 rt2x00_desc_read(txd, 0, &word); 1060 rt2x00_desc_read(txd, 0, &word);
1055 rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, control->retry_limit); 1061 rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, control->retry_limit);
1056 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, 1062 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
1057 test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags)); 1063 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
1058 rt2x00_set_field32(&word, TXD_W0_ACK, 1064 rt2x00_set_field32(&word, TXD_W0_ACK,
1059 test_bit(ENTRY_TXD_ACK, &desc->flags)); 1065 test_bit(ENTRY_TXD_ACK, &txdesc->flags));
1060 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, 1066 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
1061 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags)); 1067 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
1062 rt2x00_set_field32(&word, TXD_W0_OFDM, 1068 rt2x00_set_field32(&word, TXD_W0_OFDM,
1063 test_bit(ENTRY_TXD_OFDM_RATE, &desc->flags)); 1069 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags));
1064 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, 1070 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
1065 !!(control->flags & IEEE80211_TXCTL_FIRST_FRAGMENT)); 1071 !!(control->flags & IEEE80211_TXCTL_FIRST_FRAGMENT));
1066 rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs); 1072 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1067 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len); 1073 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len);
1068 rt2x00_set_field32(&word, TXD_W0_CIPHER, CIPHER_NONE); 1074 rt2x00_set_field32(&word, TXD_W0_CIPHER, CIPHER_NONE);
1069 rt2x00_desc_write(txd, 0, word); 1075 rt2x00_desc_write(txd, 0, word);
@@ -1088,15 +1094,17 @@ static int rt2500usb_get_tx_data_len(struct rt2x00_dev *rt2x00dev,
1088 * TX data initialization 1094 * TX data initialization
1089 */ 1095 */
1090static void rt2500usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1096static void rt2500usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1091 unsigned int queue) 1097 const unsigned int queue)
1092{ 1098{
1093 u16 reg; 1099 u16 reg;
1094 1100
1095 if (queue != IEEE80211_TX_QUEUE_BEACON) 1101 if (queue != RT2X00_BCN_QUEUE_BEACON)
1096 return; 1102 return;
1097 1103
1098 rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg); 1104 rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
1099 if (!rt2x00_get_field16(reg, TXRX_CSR19_BEACON_GEN)) { 1105 if (!rt2x00_get_field16(reg, TXRX_CSR19_BEACON_GEN)) {
1106 rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
1107 rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
1100 rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1); 1108 rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1);
1101 /* 1109 /*
1102 * Beacon generation will fail initially. 1110 * Beacon generation will fail initially.
@@ -1114,42 +1122,68 @@ static void rt2500usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1114/* 1122/*
1115 * RX control handlers 1123 * RX control handlers
1116 */ 1124 */
1117static void rt2500usb_fill_rxdone(struct data_entry *entry, 1125static void rt2500usb_fill_rxdone(struct queue_entry *entry,
1118 struct rxdata_entry_desc *desc) 1126 struct rxdone_entry_desc *rxdesc)
1119{ 1127{
1120 struct skb_desc *skbdesc = get_skb_desc(entry->skb); 1128 struct queue_entry_priv_usb_rx *priv_rx = entry->priv_data;
1121 struct urb *urb = entry->priv; 1129 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1122 __le32 *rxd = (__le32 *)(entry->skb->data + 1130 __le32 *rxd =
1123 (urb->actual_length - entry->ring->desc_size)); 1131 (__le32 *)(entry->skb->data +
1132 (priv_rx->urb->actual_length - entry->queue->desc_size));
1133 unsigned int offset = entry->queue->desc_size + 2;
1124 u32 word0; 1134 u32 word0;
1125 u32 word1; 1135 u32 word1;
1126 1136
1137 /*
1138 * Copy descriptor to the available headroom inside the skbuffer.
1139 */
1140 skb_push(entry->skb, offset);
1141 memcpy(entry->skb->data, rxd, entry->queue->desc_size);
1142 rxd = (__le32 *)entry->skb->data;
1143
1144 /*
1145 * The descriptor is now aligned to 4 bytes and thus it is
1146 * now safe to read it on all architectures.
1147 */
1127 rt2x00_desc_read(rxd, 0, &word0); 1148 rt2x00_desc_read(rxd, 0, &word0);
1128 rt2x00_desc_read(rxd, 1, &word1); 1149 rt2x00_desc_read(rxd, 1, &word1);
1129 1150
1130 desc->flags = 0; 1151 rxdesc->flags = 0;
1131 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1152 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1132 desc->flags |= RX_FLAG_FAILED_FCS_CRC; 1153 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1133 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) 1154 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR))
1134 desc->flags |= RX_FLAG_FAILED_PLCP_CRC; 1155 rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC;
1135 1156
1136 /* 1157 /*
1137 * Obtain the status about this packet. 1158 * Obtain the status about this packet.
1159 * When frame was received with an OFDM bitrate,
1160 * the signal is the PLCP value. If it was received with
1161 * a CCK bitrate the signal is the rate in 100kbit/s.
1162 */
1163 rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL);
1164 rxdesc->rssi = rt2x00_get_field32(word1, RXD_W1_RSSI) -
1165 entry->queue->rt2x00dev->rssi_offset;
1166 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1167
1168 rxdesc->dev_flags = 0;
1169 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1170 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1171 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
1172 rxdesc->dev_flags |= RXDONE_MY_BSS;
1173
1174 /*
1175 * Adjust the skb memory window to the frame boundaries.
1138 */ 1176 */
1139 desc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); 1177 skb_pull(entry->skb, offset);
1140 desc->rssi = rt2x00_get_field32(word1, RXD_W1_RSSI) - 1178 skb_trim(entry->skb, rxdesc->size);
1141 entry->ring->rt2x00dev->rssi_offset;
1142 desc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM);
1143 desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1144 desc->my_bss = !!rt2x00_get_field32(word0, RXD_W0_MY_BSS);
1145 1179
1146 /* 1180 /*
1147 * Set descriptor and data pointer. 1181 * Set descriptor and data pointer.
1148 */ 1182 */
1149 skbdesc->desc = entry->skb->data + desc->size;
1150 skbdesc->desc_len = entry->ring->desc_size;
1151 skbdesc->data = entry->skb->data; 1183 skbdesc->data = entry->skb->data;
1152 skbdesc->data_len = desc->size; 1184 skbdesc->data_len = rxdesc->size;
1185 skbdesc->desc = rxd;
1186 skbdesc->desc_len = entry->queue->desc_size;
1153} 1187}
1154 1188
1155/* 1189/*
@@ -1157,10 +1191,10 @@ static void rt2500usb_fill_rxdone(struct data_entry *entry,
1157 */ 1191 */
1158static void rt2500usb_beacondone(struct urb *urb) 1192static void rt2500usb_beacondone(struct urb *urb)
1159{ 1193{
1160 struct data_entry *entry = (struct data_entry *)urb->context; 1194 struct queue_entry *entry = (struct queue_entry *)urb->context;
1161 struct data_ring *ring = entry->ring; 1195 struct queue_entry_priv_usb_bcn *priv_bcn = entry->priv_data;
1162 1196
1163 if (!test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags)) 1197 if (!test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags))
1164 return; 1198 return;
1165 1199
1166 /* 1200 /*
@@ -1169,18 +1203,11 @@ static void rt2500usb_beacondone(struct urb *urb)
1169 * Otherwise we should free the sk_buffer, the device 1203 * Otherwise we should free the sk_buffer, the device
1170 * should be doing the rest of the work now. 1204 * should be doing the rest of the work now.
1171 */ 1205 */
1172 if (ring->index == 1) { 1206 if (priv_bcn->guardian_urb == urb) {
1173 rt2x00_ring_index_done_inc(ring); 1207 usb_submit_urb(priv_bcn->urb, GFP_ATOMIC);
1174 entry = rt2x00_get_data_entry(ring); 1208 } else if (priv_bcn->urb == urb) {
1175 usb_submit_urb(entry->priv, GFP_ATOMIC); 1209 dev_kfree_skb(entry->skb);
1176 rt2x00_ring_index_inc(ring); 1210 entry->skb = NULL;
1177 } else if (ring->index_done == 1) {
1178 entry = rt2x00_get_data_entry_done(ring);
1179 if (entry->skb) {
1180 dev_kfree_skb(entry->skb);
1181 entry->skb = NULL;
1182 }
1183 rt2x00_ring_index_done_inc(ring);
1184 } 1211 }
1185} 1212}
1186 1213
@@ -1191,6 +1218,7 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1191{ 1218{
1192 u16 word; 1219 u16 word;
1193 u8 *mac; 1220 u8 *mac;
1221 u8 bbp;
1194 1222
1195 rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE); 1223 rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE);
1196 1224
@@ -1245,9 +1273,17 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1245 EEPROM(rt2x00dev, "BBPtune: 0x%04x\n", word); 1273 EEPROM(rt2x00dev, "BBPtune: 0x%04x\n", word);
1246 } 1274 }
1247 1275
1276 /*
1277 * Switch lower vgc bound to current BBP R17 value,
1278 * lower the value a bit for better quality.
1279 */
1280 rt2500usb_bbp_read(rt2x00dev, 17, &bbp);
1281 bbp -= 6;
1282
1248 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &word); 1283 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC, &word);
1249 if (word == 0xffff) { 1284 if (word == 0xffff) {
1250 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCUPPER, 0x40); 1285 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCUPPER, 0x40);
1286 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp);
1251 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); 1287 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word);
1252 EEPROM(rt2x00dev, "BBPtune vgc: 0x%04x\n", word); 1288 EEPROM(rt2x00dev, "BBPtune vgc: 0x%04x\n", word);
1253 } 1289 }
@@ -1258,6 +1294,9 @@ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1258 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41); 1294 rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41);
1259 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R17, word); 1295 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R17, word);
1260 EEPROM(rt2x00dev, "BBPtune r17: 0x%04x\n", word); 1296 EEPROM(rt2x00dev, "BBPtune r17: 0x%04x\n", word);
1297 } else {
1298 rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp);
1299 rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word);
1261 } 1300 }
1262 1301
1263 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &word); 1302 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24, &word);
@@ -1342,8 +1381,27 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1342 /* 1381 /*
1343 * Store led mode, for correct led behaviour. 1382 * Store led mode, for correct led behaviour.
1344 */ 1383 */
1345 rt2x00dev->led_mode = 1384#ifdef CONFIG_RT2500USB_LEDS
1346 rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); 1385 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE);
1386
1387 rt2x00dev->led_radio.rt2x00dev = rt2x00dev;
1388 rt2x00dev->led_radio.type = LED_TYPE_RADIO;
1389 rt2x00dev->led_radio.led_dev.brightness_set =
1390 rt2500usb_brightness_set;
1391 rt2x00dev->led_radio.led_dev.blink_set =
1392 rt2500usb_blink_set;
1393 rt2x00dev->led_radio.flags = LED_INITIALIZED;
1394
1395 if (value == LED_MODE_TXRX_ACTIVITY) {
1396 rt2x00dev->led_qual.rt2x00dev = rt2x00dev;
1397 rt2x00dev->led_radio.type = LED_TYPE_ACTIVITY;
1398 rt2x00dev->led_qual.led_dev.brightness_set =
1399 rt2500usb_brightness_set;
1400 rt2x00dev->led_qual.led_dev.blink_set =
1401 rt2500usb_blink_set;
1402 rt2x00dev->led_qual.flags = LED_INITIALIZED;
1403 }
1404#endif /* CONFIG_RT2500USB_LEDS */
1347 1405
1348 /* 1406 /*
1349 * Check if the BBP tuning should be disabled. 1407 * Check if the BBP tuning should be disabled.
@@ -1550,8 +1608,8 @@ static void rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1550 /* 1608 /*
1551 * Initialize hw_mode information. 1609 * Initialize hw_mode information.
1552 */ 1610 */
1553 spec->num_modes = 2; 1611 spec->supported_bands = SUPPORT_BAND_2GHZ;
1554 spec->num_rates = 12; 1612 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
1555 spec->tx_power_a = NULL; 1613 spec->tx_power_a = NULL;
1556 spec->tx_power_bg = txpower; 1614 spec->tx_power_bg = txpower;
1557 spec->tx_power_default = DEFAULT_TXPOWER; 1615 spec->tx_power_default = DEFAULT_TXPOWER;
@@ -1572,9 +1630,9 @@ static void rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1572 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); 1630 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e);
1573 spec->channels = rf_vals_bg_2525e; 1631 spec->channels = rf_vals_bg_2525e;
1574 } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) { 1632 } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) {
1633 spec->supported_bands |= SUPPORT_BAND_5GHZ;
1575 spec->num_channels = ARRAY_SIZE(rf_vals_5222); 1634 spec->num_channels = ARRAY_SIZE(rf_vals_5222);
1576 spec->channels = rf_vals_5222; 1635 spec->channels = rf_vals_5222;
1577 spec->num_modes = 3;
1578 } 1636 }
1579} 1637}
1580 1638
@@ -1599,9 +1657,11 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1599 rt2500usb_probe_hw_mode(rt2x00dev); 1657 rt2500usb_probe_hw_mode(rt2x00dev);
1600 1658
1601 /* 1659 /*
1602 * This device requires the beacon ring 1660 * This device requires the atim queue
1603 */ 1661 */
1604 __set_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags); 1662 __set_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
1663 __set_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
1664 __set_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags);
1605 1665
1606 /* 1666 /*
1607 * Set the rssi offset. 1667 * Set the rssi offset.
@@ -1614,125 +1674,58 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1614/* 1674/*
1615 * IEEE80211 stack callback functions. 1675 * IEEE80211 stack callback functions.
1616 */ 1676 */
1617static void rt2500usb_configure_filter(struct ieee80211_hw *hw,
1618 unsigned int changed_flags,
1619 unsigned int *total_flags,
1620 int mc_count,
1621 struct dev_addr_list *mc_list)
1622{
1623 struct rt2x00_dev *rt2x00dev = hw->priv;
1624 u16 reg;
1625
1626 /*
1627 * Mask off any flags we are going to ignore from
1628 * the total_flags field.
1629 */
1630 *total_flags &=
1631 FIF_ALLMULTI |
1632 FIF_FCSFAIL |
1633 FIF_PLCPFAIL |
1634 FIF_CONTROL |
1635 FIF_OTHER_BSS |
1636 FIF_PROMISC_IN_BSS;
1637
1638 /*
1639 * Apply some rules to the filters:
1640 * - Some filters imply different filters to be set.
1641 * - Some things we can't filter out at all.
1642 */
1643 if (mc_count)
1644 *total_flags |= FIF_ALLMULTI;
1645 if (*total_flags & FIF_OTHER_BSS ||
1646 *total_flags & FIF_PROMISC_IN_BSS)
1647 *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS;
1648
1649 /*
1650 * Check if there is any work left for us.
1651 */
1652 if (rt2x00dev->packet_filter == *total_flags)
1653 return;
1654 rt2x00dev->packet_filter = *total_flags;
1655
1656 /*
1657 * When in atomic context, reschedule and let rt2x00lib
1658 * call this function again.
1659 */
1660 if (in_atomic()) {
1661 queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->filter_work);
1662 return;
1663 }
1664
1665 /*
1666 * Start configuration steps.
1667 * Note that the version error will always be dropped
1668 * and broadcast frames will always be accepted since
1669 * there is no filter for it at this time.
1670 */
1671 rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg);
1672 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CRC,
1673 !(*total_flags & FIF_FCSFAIL));
1674 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_PHYSICAL,
1675 !(*total_flags & FIF_PLCPFAIL));
1676 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CONTROL,
1677 !(*total_flags & FIF_CONTROL));
1678 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME,
1679 !(*total_flags & FIF_PROMISC_IN_BSS));
1680 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_TODS,
1681 !(*total_flags & FIF_PROMISC_IN_BSS));
1682 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_VERSION_ERROR, 1);
1683 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_MULTICAST,
1684 !(*total_flags & FIF_ALLMULTI));
1685 rt2x00_set_field16(&reg, TXRX_CSR2_DROP_BROADCAST, 0);
1686 rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg);
1687}
1688
1689static int rt2500usb_beacon_update(struct ieee80211_hw *hw, 1677static int rt2500usb_beacon_update(struct ieee80211_hw *hw,
1690 struct sk_buff *skb, 1678 struct sk_buff *skb,
1691 struct ieee80211_tx_control *control) 1679 struct ieee80211_tx_control *control)
1692{ 1680{
1693 struct rt2x00_dev *rt2x00dev = hw->priv; 1681 struct rt2x00_dev *rt2x00dev = hw->priv;
1694 struct usb_device *usb_dev = 1682 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
1695 interface_to_usbdev(rt2x00dev_usb(rt2x00dev)); 1683 struct rt2x00_intf *intf = vif_to_intf(control->vif);
1696 struct skb_desc *desc; 1684 struct queue_entry_priv_usb_bcn *priv_bcn;
1697 struct data_ring *ring; 1685 struct skb_frame_desc *skbdesc;
1698 struct data_entry *beacon;
1699 struct data_entry *guardian;
1700 int pipe = usb_sndbulkpipe(usb_dev, 1); 1686 int pipe = usb_sndbulkpipe(usb_dev, 1);
1701 int length; 1687 int length;
1688 u16 reg;
1689
1690 if (unlikely(!intf->beacon))
1691 return -ENOBUFS;
1692
1693 priv_bcn = intf->beacon->priv_data;
1702 1694
1703 /* 1695 /*
1704 * Just in case the ieee80211 doesn't set this, 1696 * Add the descriptor in front of the skb.
1705 * but we need this queue set for the descriptor
1706 * initialization.
1707 */ 1697 */
1708 control->queue = IEEE80211_TX_QUEUE_BEACON; 1698 skb_push(skb, intf->beacon->queue->desc_size);
1709 ring = rt2x00lib_get_ring(rt2x00dev, control->queue); 1699 memset(skb->data, 0, intf->beacon->queue->desc_size);
1710 1700
1711 /* 1701 /*
1712 * Obtain 2 entries, one for the guardian byte, 1702 * Fill in skb descriptor
1713 * the second for the actual beacon.
1714 */ 1703 */
1715 guardian = rt2x00_get_data_entry(ring); 1704 skbdesc = get_skb_frame_desc(skb);
1716 rt2x00_ring_index_inc(ring); 1705 memset(skbdesc, 0, sizeof(*skbdesc));
1717 beacon = rt2x00_get_data_entry(ring); 1706 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED;
1707 skbdesc->data = skb->data + intf->beacon->queue->desc_size;
1708 skbdesc->data_len = skb->len - intf->beacon->queue->desc_size;
1709 skbdesc->desc = skb->data;
1710 skbdesc->desc_len = intf->beacon->queue->desc_size;
1711 skbdesc->entry = intf->beacon;
1718 1712
1719 /* 1713 /*
1720 * Add the descriptor in front of the skb. 1714 * Disable beaconing while we are reloading the beacon data,
1715 * otherwise we might be sending out invalid data.
1721 */ 1716 */
1722 skb_push(skb, ring->desc_size); 1717 rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
1723 memset(skb->data, 0, ring->desc_size); 1718 rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 0);
1719 rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 0);
1720 rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0);
1721 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
1724 1722
1725 /* 1723 /*
1726 * Fill in skb descriptor 1724 * mac80211 doesn't provide the control->queue variable
1725 * for beacons. Set our own queue identification so
1726 * it can be used during descriptor initialization.
1727 */ 1727 */
1728 desc = get_skb_desc(skb); 1728 control->queue = RT2X00_BCN_QUEUE_BEACON;
1729 desc->desc_len = ring->desc_size;
1730 desc->data_len = skb->len - ring->desc_size;
1731 desc->desc = skb->data;
1732 desc->data = skb->data + ring->desc_size;
1733 desc->ring = ring;
1734 desc->entry = beacon;
1735
1736 rt2x00lib_write_tx_desc(rt2x00dev, skb, control); 1729 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
1737 1730
1738 /* 1731 /*
@@ -1742,27 +1735,29 @@ static int rt2500usb_beacon_update(struct ieee80211_hw *hw,
1742 */ 1735 */
1743 length = rt2500usb_get_tx_data_len(rt2x00dev, skb); 1736 length = rt2500usb_get_tx_data_len(rt2x00dev, skb);
1744 1737
1745 usb_fill_bulk_urb(beacon->priv, usb_dev, pipe, 1738 usb_fill_bulk_urb(priv_bcn->urb, usb_dev, pipe,
1746 skb->data, length, rt2500usb_beacondone, beacon); 1739 skb->data, length, rt2500usb_beacondone,
1740 intf->beacon);
1747 1741
1748 /* 1742 /*
1749 * Second we need to create the guardian byte. 1743 * Second we need to create the guardian byte.
1750 * We only need a single byte, so lets recycle 1744 * We only need a single byte, so lets recycle
1751 * the 'flags' field we are not using for beacons. 1745 * the 'flags' field we are not using for beacons.
1752 */ 1746 */
1753 guardian->flags = 0; 1747 priv_bcn->guardian_data = 0;
1754 usb_fill_bulk_urb(guardian->priv, usb_dev, pipe, 1748 usb_fill_bulk_urb(priv_bcn->guardian_urb, usb_dev, pipe,
1755 &guardian->flags, 1, rt2500usb_beacondone, guardian); 1749 &priv_bcn->guardian_data, 1, rt2500usb_beacondone,
1750 intf->beacon);
1756 1751
1757 /* 1752 /*
1758 * Send out the guardian byte. 1753 * Send out the guardian byte.
1759 */ 1754 */
1760 usb_submit_urb(guardian->priv, GFP_ATOMIC); 1755 usb_submit_urb(priv_bcn->guardian_urb, GFP_ATOMIC);
1761 1756
1762 /* 1757 /*
1763 * Enable beacon generation. 1758 * Enable beacon generation.
1764 */ 1759 */
1765 rt2500usb_kick_tx_queue(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); 1760 rt2500usb_kick_tx_queue(rt2x00dev, control->queue);
1766 1761
1767 return 0; 1762 return 0;
1768} 1763}
@@ -1775,7 +1770,7 @@ static const struct ieee80211_ops rt2500usb_mac80211_ops = {
1775 .remove_interface = rt2x00mac_remove_interface, 1770 .remove_interface = rt2x00mac_remove_interface,
1776 .config = rt2x00mac_config, 1771 .config = rt2x00mac_config,
1777 .config_interface = rt2x00mac_config_interface, 1772 .config_interface = rt2x00mac_config_interface,
1778 .configure_filter = rt2500usb_configure_filter, 1773 .configure_filter = rt2x00mac_configure_filter,
1779 .get_stats = rt2x00mac_get_stats, 1774 .get_stats = rt2x00mac_get_stats,
1780 .bss_info_changed = rt2x00mac_bss_info_changed, 1775 .bss_info_changed = rt2x00mac_bss_info_changed,
1781 .conf_tx = rt2x00mac_conf_tx, 1776 .conf_tx = rt2x00mac_conf_tx,
@@ -1798,19 +1793,50 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
1798 .get_tx_data_len = rt2500usb_get_tx_data_len, 1793 .get_tx_data_len = rt2500usb_get_tx_data_len,
1799 .kick_tx_queue = rt2500usb_kick_tx_queue, 1794 .kick_tx_queue = rt2500usb_kick_tx_queue,
1800 .fill_rxdone = rt2500usb_fill_rxdone, 1795 .fill_rxdone = rt2500usb_fill_rxdone,
1801 .config_mac_addr = rt2500usb_config_mac_addr, 1796 .config_filter = rt2500usb_config_filter,
1802 .config_bssid = rt2500usb_config_bssid, 1797 .config_intf = rt2500usb_config_intf,
1803 .config_type = rt2500usb_config_type, 1798 .config_erp = rt2500usb_config_erp,
1804 .config_preamble = rt2500usb_config_preamble,
1805 .config = rt2500usb_config, 1799 .config = rt2500usb_config,
1806}; 1800};
1807 1801
1802static const struct data_queue_desc rt2500usb_queue_rx = {
1803 .entry_num = RX_ENTRIES,
1804 .data_size = DATA_FRAME_SIZE,
1805 .desc_size = RXD_DESC_SIZE,
1806 .priv_size = sizeof(struct queue_entry_priv_usb_rx),
1807};
1808
1809static const struct data_queue_desc rt2500usb_queue_tx = {
1810 .entry_num = TX_ENTRIES,
1811 .data_size = DATA_FRAME_SIZE,
1812 .desc_size = TXD_DESC_SIZE,
1813 .priv_size = sizeof(struct queue_entry_priv_usb_tx),
1814};
1815
1816static const struct data_queue_desc rt2500usb_queue_bcn = {
1817 .entry_num = BEACON_ENTRIES,
1818 .data_size = MGMT_FRAME_SIZE,
1819 .desc_size = TXD_DESC_SIZE,
1820 .priv_size = sizeof(struct queue_entry_priv_usb_bcn),
1821};
1822
1823static const struct data_queue_desc rt2500usb_queue_atim = {
1824 .entry_num = ATIM_ENTRIES,
1825 .data_size = DATA_FRAME_SIZE,
1826 .desc_size = TXD_DESC_SIZE,
1827 .priv_size = sizeof(struct queue_entry_priv_usb_tx),
1828};
1829
1808static const struct rt2x00_ops rt2500usb_ops = { 1830static const struct rt2x00_ops rt2500usb_ops = {
1809 .name = KBUILD_MODNAME, 1831 .name = KBUILD_MODNAME,
1810 .rxd_size = RXD_DESC_SIZE, 1832 .max_sta_intf = 1,
1811 .txd_size = TXD_DESC_SIZE, 1833 .max_ap_intf = 1,
1812 .eeprom_size = EEPROM_SIZE, 1834 .eeprom_size = EEPROM_SIZE,
1813 .rf_size = RF_SIZE, 1835 .rf_size = RF_SIZE,
1836 .rx = &rt2500usb_queue_rx,
1837 .tx = &rt2500usb_queue_tx,
1838 .bcn = &rt2500usb_queue_bcn,
1839 .atim = &rt2500usb_queue_atim,
1814 .lib = &rt2500usb_rt2x00_ops, 1840 .lib = &rt2500usb_rt2x00_ops,
1815 .hw = &rt2500usb_mac80211_ops, 1841 .hw = &rt2500usb_mac80211_ops,
1816#ifdef CONFIG_RT2X00_LIB_DEBUGFS 1842#ifdef CONFIG_RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.h b/drivers/net/wireless/rt2x00/rt2500usb.h
index 9e0433722e3d..a37a068d0c71 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.h
+++ b/drivers/net/wireless/rt2x00/rt2500usb.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -135,7 +135,7 @@
135 * Misc MAC_CSR registers. 135 * Misc MAC_CSR registers.
136 * MAC_CSR9: Timer control. 136 * MAC_CSR9: Timer control.
137 * MAC_CSR10: Slot time. 137 * MAC_CSR10: Slot time.
138 * MAC_CSR11: IFS. 138 * MAC_CSR11: SIFS.
139 * MAC_CSR12: EIFS. 139 * MAC_CSR12: EIFS.
140 * MAC_CSR13: Power mode0. 140 * MAC_CSR13: Power mode0.
141 * MAC_CSR14: Power mode1. 141 * MAC_CSR14: Power mode1.
@@ -686,6 +686,7 @@
686 */ 686 */
687#define EEPROM_BBPTUNE_VGC 0x0034 687#define EEPROM_BBPTUNE_VGC 0x0034
688#define EEPROM_BBPTUNE_VGCUPPER FIELD16(0x00ff) 688#define EEPROM_BBPTUNE_VGCUPPER FIELD16(0x00ff)
689#define EEPROM_BBPTUNE_VGCLOWER FIELD16(0xff00)
689 690
690/* 691/*
691 * EEPROM BBP R17 Tuning. 692 * EEPROM BBP R17 Tuning.
@@ -786,8 +787,8 @@
786#define RXD_W3_EIV FIELD32(0xffffffff) 787#define RXD_W3_EIV FIELD32(0xffffffff)
787 788
788/* 789/*
789 * Macro's for converting txpower from EEPROM to dscape value 790 * Macro's for converting txpower from EEPROM to mac80211 value
790 * and from dscape value to register value. 791 * and from mac80211 value to register value.
791 */ 792 */
792#define MIN_TXPOWER 0 793#define MIN_TXPOWER 0
793#define MAX_TXPOWER 31 794#define MAX_TXPOWER 31
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 6c725422af5a..57bdc153952f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -27,23 +27,24 @@
27#define RT2X00_H 27#define RT2X00_H
28 28
29#include <linux/bitops.h> 29#include <linux/bitops.h>
30#include <linux/prefetch.h>
31#include <linux/skbuff.h> 30#include <linux/skbuff.h>
32#include <linux/workqueue.h> 31#include <linux/workqueue.h>
33#include <linux/firmware.h> 32#include <linux/firmware.h>
33#include <linux/leds.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/etherdevice.h> 35#include <linux/etherdevice.h>
36 36
37#include <net/mac80211.h> 37#include <net/mac80211.h>
38 38
39#include "rt2x00debug.h" 39#include "rt2x00debug.h"
40#include "rt2x00leds.h"
40#include "rt2x00reg.h" 41#include "rt2x00reg.h"
41#include "rt2x00ring.h" 42#include "rt2x00queue.h"
42 43
43/* 44/*
44 * Module information. 45 * Module information.
45 */ 46 */
46#define DRV_VERSION "2.0.14" 47#define DRV_VERSION "2.1.4"
47#define DRV_PROJECT "http://rt2x00.serialmonkey.com" 48#define DRV_PROJECT "http://rt2x00.serialmonkey.com"
48 49
49/* 50/*
@@ -91,26 +92,6 @@
91 DEBUG_PRINTK(__dev, KERN_DEBUG, "EEPROM recovery", __msg, ##__args) 92 DEBUG_PRINTK(__dev, KERN_DEBUG, "EEPROM recovery", __msg, ##__args)
92 93
93/* 94/*
94 * Ring sizes.
95 * Ralink PCI devices demand the Frame size to be a multiple of 128 bytes.
96 * DATA_FRAME_SIZE is used for TX, RX, ATIM and PRIO rings.
97 * MGMT_FRAME_SIZE is used for the BEACON ring.
98 */
99#define DATA_FRAME_SIZE 2432
100#define MGMT_FRAME_SIZE 256
101
102/*
103 * Number of entries in a packet ring.
104 * PCI devices only need 1 Beacon entry,
105 * but USB devices require a second because they
106 * have to send a Guardian byte first.
107 */
108#define RX_ENTRIES 12
109#define TX_ENTRIES 12
110#define ATIM_ENTRIES 1
111#define BEACON_ENTRIES 2
112
113/*
114 * Standard timing and size defines. 95 * Standard timing and size defines.
115 * These values should follow the ieee80211 specifications. 96 * These values should follow the ieee80211 specifications.
116 */ 97 */
@@ -364,20 +345,22 @@ static inline int rt2x00_update_ant_rssi(struct link *link, int rssi)
364 345
365/* 346/*
366 * Interface structure 347 * Interface structure
367 * Configuration details about the current interface. 348 * Per interface configuration details, this structure
349 * is allocated as the private data for ieee80211_vif.
368 */ 350 */
369struct interface { 351struct rt2x00_intf {
370 /* 352 /*
371 * Interface identification. The value is assigned 353 * All fields within the rt2x00_intf structure
372 * to us by the 80211 stack, and is used to request 354 * must be protected with a spinlock.
373 * new beacons.
374 */ 355 */
375 struct ieee80211_vif *id; 356 spinlock_t lock;
376 357
377 /* 358 /*
378 * Current working type (IEEE80211_IF_TYPE_*). 359 * BSS configuration. Copied from the structure
360 * passed to us through the bss_info_changed()
361 * callback funtion.
379 */ 362 */
380 int type; 363 struct ieee80211_bss_conf conf;
381 364
382 /* 365 /*
383 * MAC of the device. 366 * MAC of the device.
@@ -388,42 +371,60 @@ struct interface {
388 * BBSID of the AP to associate with. 371 * BBSID of the AP to associate with.
389 */ 372 */
390 u8 bssid[ETH_ALEN]; 373 u8 bssid[ETH_ALEN];
391};
392 374
393static inline int is_interface_present(struct interface *intf) 375 /*
394{ 376 * Entry in the beacon queue which belongs to
395 return !!intf->id; 377 * this interface. Each interface has its own
396} 378 * dedicated beacon entry.
379 */
380 struct queue_entry *beacon;
381
382 /*
383 * Actions that needed rescheduling.
384 */
385 unsigned int delayed_flags;
386#define DELAYED_UPDATE_BEACON 0x00000001
387#define DELAYED_CONFIG_ERP 0x00000002
388#define DELAYED_LED_ASSOC 0x00000004
389};
397 390
398static inline int is_interface_type(struct interface *intf, int type) 391static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
399{ 392{
400 return intf->type == type; 393 return (struct rt2x00_intf *)vif->drv_priv;
401} 394}
402 395
403/* 396/**
397 * struct hw_mode_spec: Hardware specifications structure
398 *
404 * Details about the supported modes, rates and channels 399 * Details about the supported modes, rates and channels
405 * of a particular chipset. This is used by rt2x00lib 400 * of a particular chipset. This is used by rt2x00lib
406 * to build the ieee80211_hw_mode array for mac80211. 401 * to build the ieee80211_hw_mode array for mac80211.
402 *
403 * @supported_bands: Bitmask contained the supported bands (2.4GHz, 5.2GHz).
404 * @supported_rates: Rate types which are supported (CCK, OFDM).
405 * @num_channels: Number of supported channels. This is used as array size
406 * for @tx_power_a, @tx_power_bg and @channels.
407 * channels: Device/chipset specific channel values (See &struct rf_channel).
408 * @tx_power_a: TX power values for all 5.2GHz channels (may be NULL).
409 * @tx_power_bg: TX power values for all 2.4GHz channels (may be NULL).
410 * @tx_power_default: Default TX power value to use when either
411 * @tx_power_a or @tx_power_bg is missing.
407 */ 412 */
408struct hw_mode_spec { 413struct hw_mode_spec {
409 /* 414 unsigned int supported_bands;
410 * Number of modes, rates and channels. 415#define SUPPORT_BAND_2GHZ 0x00000001
411 */ 416#define SUPPORT_BAND_5GHZ 0x00000002
412 int num_modes; 417
413 int num_rates; 418 unsigned int supported_rates;
414 int num_channels; 419#define SUPPORT_RATE_CCK 0x00000001
420#define SUPPORT_RATE_OFDM 0x00000002
421
422 unsigned int num_channels;
423 const struct rf_channel *channels;
415 424
416 /*
417 * txpower values.
418 */
419 const u8 *tx_power_a; 425 const u8 *tx_power_a;
420 const u8 *tx_power_bg; 426 const u8 *tx_power_bg;
421 u8 tx_power_default; 427 u8 tx_power_default;
422
423 /*
424 * Device/chipset specific value.
425 */
426 const struct rf_channel *channels;
427}; 428};
428 429
429/* 430/*
@@ -439,10 +440,10 @@ struct rt2x00lib_conf {
439 440
440 struct antenna_setup ant; 441 struct antenna_setup ant;
441 442
442 int phymode; 443 enum ieee80211_band band;
443 444
444 int basic_rates; 445 u32 basic_rates;
445 int slot_time; 446 u32 slot_time;
446 447
447 short sifs; 448 short sifs;
448 short pifs; 449 short pifs;
@@ -451,6 +452,47 @@ struct rt2x00lib_conf {
451}; 452};
452 453
453/* 454/*
455 * Configuration structure for erp settings.
456 */
457struct rt2x00lib_erp {
458 int short_preamble;
459
460 int ack_timeout;
461 int ack_consume_time;
462};
463
464/*
465 * Configuration structure wrapper around the
466 * rt2x00 interface configuration handler.
467 */
468struct rt2x00intf_conf {
469 /*
470 * Interface type
471 */
472 enum ieee80211_if_types type;
473
474 /*
475 * TSF sync value, this is dependant on the operation type.
476 */
477 enum tsf_sync sync;
478
479 /*
480 * The MAC and BSSID addressess are simple array of bytes,
481 * these arrays are little endian, so when sending the addressess
482 * to the drivers, copy the it into a endian-signed variable.
483 *
484 * Note that all devices (except rt2500usb) have 32 bits
485 * register word sizes. This means that whatever variable we
486 * pass _must_ be a multiple of 32 bits. Otherwise the device
487 * might not accept what we are sending to it.
488 * This will also make it easier for the driver to write
489 * the data to the device.
490 */
491 __le32 mac[2];
492 __le32 bssid[2];
493};
494
495/*
454 * rt2x00lib callback functions. 496 * rt2x00lib callback functions.
455 */ 497 */
456struct rt2x00lib_ops { 498struct rt2x00lib_ops {
@@ -464,6 +506,7 @@ struct rt2x00lib_ops {
464 */ 506 */
465 int (*probe_hw) (struct rt2x00_dev *rt2x00dev); 507 int (*probe_hw) (struct rt2x00_dev *rt2x00dev);
466 char *(*get_firmware_name) (struct rt2x00_dev *rt2x00dev); 508 char *(*get_firmware_name) (struct rt2x00_dev *rt2x00dev);
509 u16 (*get_firmware_crc) (void *data, const size_t len);
467 int (*load_firmware) (struct rt2x00_dev *rt2x00dev, void *data, 510 int (*load_firmware) (struct rt2x00_dev *rt2x00dev, void *data,
468 const size_t len); 511 const size_t len);
469 512
@@ -474,12 +517,12 @@ struct rt2x00lib_ops {
474 void (*uninitialize) (struct rt2x00_dev *rt2x00dev); 517 void (*uninitialize) (struct rt2x00_dev *rt2x00dev);
475 518
476 /* 519 /*
477 * Ring initialization handlers 520 * queue initialization handlers
478 */ 521 */
479 void (*init_rxentry) (struct rt2x00_dev *rt2x00dev, 522 void (*init_rxentry) (struct rt2x00_dev *rt2x00dev,
480 struct data_entry *entry); 523 struct queue_entry *entry);
481 void (*init_txentry) (struct rt2x00_dev *rt2x00dev, 524 void (*init_txentry) (struct rt2x00_dev *rt2x00dev,
482 struct data_entry *entry); 525 struct queue_entry *entry);
483 526
484 /* 527 /*
485 * Radio control handlers. 528 * Radio control handlers.
@@ -497,35 +540,40 @@ struct rt2x00lib_ops {
497 */ 540 */
498 void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev, 541 void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev,
499 struct sk_buff *skb, 542 struct sk_buff *skb,
500 struct txdata_entry_desc *desc, 543 struct txentry_desc *txdesc,
501 struct ieee80211_tx_control *control); 544 struct ieee80211_tx_control *control);
502 int (*write_tx_data) (struct rt2x00_dev *rt2x00dev, 545 int (*write_tx_data) (struct rt2x00_dev *rt2x00dev,
503 struct data_ring *ring, struct sk_buff *skb, 546 struct data_queue *queue, struct sk_buff *skb,
504 struct ieee80211_tx_control *control); 547 struct ieee80211_tx_control *control);
505 int (*get_tx_data_len) (struct rt2x00_dev *rt2x00dev, 548 int (*get_tx_data_len) (struct rt2x00_dev *rt2x00dev,
506 struct sk_buff *skb); 549 struct sk_buff *skb);
507 void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev, 550 void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev,
508 unsigned int queue); 551 const unsigned int queue);
509 552
510 /* 553 /*
511 * RX control handlers 554 * RX control handlers
512 */ 555 */
513 void (*fill_rxdone) (struct data_entry *entry, 556 void (*fill_rxdone) (struct queue_entry *entry,
514 struct rxdata_entry_desc *desc); 557 struct rxdone_entry_desc *rxdesc);
515 558
516 /* 559 /*
517 * Configuration handlers. 560 * Configuration handlers.
518 */ 561 */
519 void (*config_mac_addr) (struct rt2x00_dev *rt2x00dev, __le32 *mac); 562 void (*config_filter) (struct rt2x00_dev *rt2x00dev,
520 void (*config_bssid) (struct rt2x00_dev *rt2x00dev, __le32 *bssid); 563 const unsigned int filter_flags);
521 void (*config_type) (struct rt2x00_dev *rt2x00dev, const int type, 564 void (*config_intf) (struct rt2x00_dev *rt2x00dev,
522 const int tsf_sync); 565 struct rt2x00_intf *intf,
523 void (*config_preamble) (struct rt2x00_dev *rt2x00dev, 566 struct rt2x00intf_conf *conf,
524 const int short_preamble, 567 const unsigned int flags);
525 const int ack_timeout, 568#define CONFIG_UPDATE_TYPE ( 1 << 1 )
526 const int ack_consume_time); 569#define CONFIG_UPDATE_MAC ( 1 << 2 )
527 void (*config) (struct rt2x00_dev *rt2x00dev, const unsigned int flags, 570#define CONFIG_UPDATE_BSSID ( 1 << 3 )
528 struct rt2x00lib_conf *libconf); 571
572 void (*config_erp) (struct rt2x00_dev *rt2x00dev,
573 struct rt2x00lib_erp *erp);
574 void (*config) (struct rt2x00_dev *rt2x00dev,
575 struct rt2x00lib_conf *libconf,
576 const unsigned int flags);
529#define CONFIG_UPDATE_PHYMODE ( 1 << 1 ) 577#define CONFIG_UPDATE_PHYMODE ( 1 << 1 )
530#define CONFIG_UPDATE_CHANNEL ( 1 << 2 ) 578#define CONFIG_UPDATE_CHANNEL ( 1 << 2 )
531#define CONFIG_UPDATE_TXPOWER ( 1 << 3 ) 579#define CONFIG_UPDATE_TXPOWER ( 1 << 3 )
@@ -540,10 +588,14 @@ struct rt2x00lib_ops {
540 */ 588 */
541struct rt2x00_ops { 589struct rt2x00_ops {
542 const char *name; 590 const char *name;
543 const unsigned int rxd_size; 591 const unsigned int max_sta_intf;
544 const unsigned int txd_size; 592 const unsigned int max_ap_intf;
545 const unsigned int eeprom_size; 593 const unsigned int eeprom_size;
546 const unsigned int rf_size; 594 const unsigned int rf_size;
595 const struct data_queue_desc *rx;
596 const struct data_queue_desc *tx;
597 const struct data_queue_desc *bcn;
598 const struct data_queue_desc *atim;
547 const struct rt2x00lib_ops *lib; 599 const struct rt2x00lib_ops *lib;
548 const struct ieee80211_ops *hw; 600 const struct ieee80211_ops *hw;
549#ifdef CONFIG_RT2X00_LIB_DEBUGFS 601#ifdef CONFIG_RT2X00_LIB_DEBUGFS
@@ -569,8 +621,11 @@ enum rt2x00_flags {
569 /* 621 /*
570 * Driver features 622 * Driver features
571 */ 623 */
624 DRIVER_SUPPORT_MIXED_INTERFACES,
572 DRIVER_REQUIRE_FIRMWARE, 625 DRIVER_REQUIRE_FIRMWARE,
573 DRIVER_REQUIRE_BEACON_RING, 626 DRIVER_REQUIRE_BEACON_GUARD,
627 DRIVER_REQUIRE_ATIM_QUEUE,
628 DRIVER_REQUIRE_SCHEDULED,
574 629
575 /* 630 /*
576 * Driver configuration 631 * Driver configuration
@@ -582,7 +637,6 @@ enum rt2x00_flags {
582 CONFIG_EXTERNAL_LNA_BG, 637 CONFIG_EXTERNAL_LNA_BG,
583 CONFIG_DOUBLE_ANTENNA, 638 CONFIG_DOUBLE_ANTENNA,
584 CONFIG_DISABLE_LINK_TUNING, 639 CONFIG_DISABLE_LINK_TUNING,
585 CONFIG_SHORT_PREAMBLE,
586}; 640};
587 641
588/* 642/*
@@ -597,8 +651,10 @@ struct rt2x00_dev {
597 * macro's should be used for correct typecasting. 651 * macro's should be used for correct typecasting.
598 */ 652 */
599 void *dev; 653 void *dev;
600#define rt2x00dev_pci(__dev) ( (struct pci_dev*)(__dev)->dev ) 654#define rt2x00dev_pci(__dev) ( (struct pci_dev *)(__dev)->dev )
601#define rt2x00dev_usb(__dev) ( (struct usb_interface*)(__dev)->dev ) 655#define rt2x00dev_usb(__dev) ( (struct usb_interface *)(__dev)->dev )
656#define rt2x00dev_usb_dev(__dev)\
657 ( (struct usb_device *)interface_to_usbdev(rt2x00dev_usb(__dev)) )
602 658
603 /* 659 /*
604 * Callback functions. 660 * Callback functions.
@@ -609,18 +665,15 @@ struct rt2x00_dev {
609 * IEEE80211 control structure. 665 * IEEE80211 control structure.
610 */ 666 */
611 struct ieee80211_hw *hw; 667 struct ieee80211_hw *hw;
612 struct ieee80211_hw_mode *hwmodes; 668 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
613 unsigned int curr_hwmode; 669 enum ieee80211_band curr_band;
614#define HWMODE_B 0
615#define HWMODE_G 1
616#define HWMODE_A 2
617 670
618 /* 671 /*
619 * rfkill structure for RF state switching support. 672 * rfkill structure for RF state switching support.
620 * This will only be compiled in when required. 673 * This will only be compiled in when required.
621 */ 674 */
622#ifdef CONFIG_RT2X00_LIB_RFKILL 675#ifdef CONFIG_RT2X00_LIB_RFKILL
623unsigned long rfkill_state; 676 unsigned long rfkill_state;
624#define RFKILL_STATE_ALLOCATED 1 677#define RFKILL_STATE_ALLOCATED 1
625#define RFKILL_STATE_REGISTERED 2 678#define RFKILL_STATE_REGISTERED 2
626 struct rfkill *rfkill; 679 struct rfkill *rfkill;
@@ -636,6 +689,17 @@ unsigned long rfkill_state;
636#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 689#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
637 690
638 /* 691 /*
692 * LED structure for changing the LED status
693 * by mac8011 or the kernel.
694 */
695#ifdef CONFIG_RT2X00_LIB_LEDS
696 struct rt2x00_led led_radio;
697 struct rt2x00_led led_assoc;
698 struct rt2x00_led led_qual;
699 u16 led_mcu_reg;
700#endif /* CONFIG_RT2X00_LIB_LEDS */
701
702 /*
639 * Device flags. 703 * Device flags.
640 * In these flags the current status and some 704 * In these flags the current status and some
641 * of the device capabilities are stored. 705 * of the device capabilities are stored.
@@ -661,11 +725,13 @@ unsigned long rfkill_state;
661 725
662 /* 726 /*
663 * Register pointers 727 * Register pointers
664 * csr_addr: Base register address. (PCI) 728 * csr.base: CSR base register address. (PCI)
665 * csr_cache: CSR cache for usb_control_msg. (USB) 729 * csr.cache: CSR cache for usb_control_msg. (USB)
666 */ 730 */
667 void __iomem *csr_addr; 731 union csr {
668 void *csr_cache; 732 void __iomem *base;
733 void *cache;
734 } csr;
669 735
670 /* 736 /*
671 * Mutex to protect register accesses on USB devices. 737 * Mutex to protect register accesses on USB devices.
@@ -687,9 +753,14 @@ unsigned long rfkill_state;
687 unsigned int packet_filter; 753 unsigned int packet_filter;
688 754
689 /* 755 /*
690 * Interface configuration. 756 * Interface details:
757 * - Open ap interface count.
758 * - Open sta interface count.
759 * - Association count.
691 */ 760 */
692 struct interface interface; 761 unsigned int intf_ap_count;
762 unsigned int intf_sta_count;
763 unsigned int intf_associated;
693 764
694 /* 765 /*
695 * Link quality 766 * Link quality
@@ -722,16 +793,6 @@ unsigned long rfkill_state;
722 u16 tx_power; 793 u16 tx_power;
723 794
724 /* 795 /*
725 * LED register (for rt61pci & rt73usb).
726 */
727 u16 led_reg;
728
729 /*
730 * Led mode (LED_MODE_*)
731 */
732 u8 led_mode;
733
734 /*
735 * Rssi <-> Dbm offset 796 * Rssi <-> Dbm offset
736 */ 797 */
737 u8 rssi_offset; 798 u8 rssi_offset;
@@ -755,19 +816,18 @@ unsigned long rfkill_state;
755 /* 816 /*
756 * Scheduled work. 817 * Scheduled work.
757 */ 818 */
758 struct work_struct beacon_work; 819 struct work_struct intf_work;
759 struct work_struct filter_work; 820 struct work_struct filter_work;
760 struct work_struct config_work;
761 821
762 /* 822 /*
763 * Data ring arrays for RX, TX and Beacon. 823 * Data queue arrays for RX, TX and Beacon.
764 * The Beacon array also contains the Atim ring 824 * The Beacon array also contains the Atim queue
765 * if that is supported by the device. 825 * if that is supported by the device.
766 */ 826 */
767 int data_rings; 827 int data_queues;
768 struct data_ring *rx; 828 struct data_queue *rx;
769 struct data_ring *tx; 829 struct data_queue *tx;
770 struct data_ring *bcn; 830 struct data_queue *bcn;
771 831
772 /* 832 /*
773 * Firmware image. 833 * Firmware image.
@@ -776,37 +836,6 @@ unsigned long rfkill_state;
776}; 836};
777 837
778/* 838/*
779 * For-each loop for the ring array.
780 * All rings have been allocated as a single array,
781 * this means we can create a very simply loop macro
782 * that is capable of looping through all rings.
783 * ring_end(), txring_end() and ring_loop() are helper macro's which
784 * should not be used directly. Instead the following should be used:
785 * ring_for_each() - Loops through all rings (RX, TX, Beacon & Atim)
786 * txring_for_each() - Loops through TX data rings (TX only)
787 * txringall_for_each() - Loops through all TX rings (TX, Beacon & Atim)
788 */
789#define ring_end(__dev) \
790 &(__dev)->rx[(__dev)->data_rings]
791
792#define txring_end(__dev) \
793 &(__dev)->tx[(__dev)->hw->queues]
794
795#define ring_loop(__entry, __start, __end) \
796 for ((__entry) = (__start); \
797 prefetch(&(__entry)[1]), (__entry) != (__end); \
798 (__entry) = &(__entry)[1])
799
800#define ring_for_each(__dev, __entry) \
801 ring_loop(__entry, (__dev)->rx, ring_end(__dev))
802
803#define txring_for_each(__dev, __entry) \
804 ring_loop(__entry, (__dev)->tx, txring_end(__dev))
805
806#define txringall_for_each(__dev, __entry) \
807 ring_loop(__entry, (__dev)->tx, ring_end(__dev))
808
809/*
810 * Generic RF access. 839 * Generic RF access.
811 * The RF is being accessed by word index. 840 * The RF is being accessed by word index.
812 */ 841 */
@@ -898,20 +927,43 @@ static inline u16 get_duration_res(const unsigned int size, const u8 rate)
898 return ((size * 8 * 10) % rate); 927 return ((size * 8 * 10) % rate);
899} 928}
900 929
901/* 930/**
902 * Library functions. 931 * rt2x00queue_get_queue - Convert mac80211 queue index to rt2x00 queue
932 * @rt2x00dev: Pointer to &struct rt2x00_dev.
933 * @queue: mac80211/rt2x00 queue index
934 * (see &enum ieee80211_tx_queue and &enum rt2x00_bcn_queue).
935 */
936struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
937 const unsigned int queue);
938
939/**
940 * rt2x00queue_get_entry - Get queue entry where the given index points to.
941 * @rt2x00dev: Pointer to &struct rt2x00_dev.
942 * @index: Index identifier for obtaining the correct index.
943 */
944struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
945 enum queue_index index);
946
947/**
948 * rt2x00queue_index_inc - Index incrementation function
949 * @queue: Queue (&struct data_queue) to perform the action on.
950 * @action: Index type (&enum queue_index) to perform the action on.
951 *
952 * This function will increase the requested index on the queue,
953 * it will grab the appropriate locks and handle queue overflow events by
954 * resetting the index to the start of the queue.
903 */ 955 */
904struct data_ring *rt2x00lib_get_ring(struct rt2x00_dev *rt2x00dev, 956void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index);
905 const unsigned int queue); 957
906 958
907/* 959/*
908 * Interrupt context handlers. 960 * Interrupt context handlers.
909 */ 961 */
910void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev); 962void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev);
911void rt2x00lib_txdone(struct data_entry *entry, 963void rt2x00lib_txdone(struct queue_entry *entry,
912 const int status, const int retry); 964 struct txdone_entry_desc *txdesc);
913void rt2x00lib_rxdone(struct data_entry *entry, struct sk_buff *skb, 965void rt2x00lib_rxdone(struct queue_entry *entry,
914 struct rxdata_entry_desc *desc); 966 struct rxdone_entry_desc *rxdesc);
915 967
916/* 968/*
917 * TX descriptor initializer 969 * TX descriptor initializer
@@ -935,6 +987,10 @@ int rt2x00mac_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf);
935int rt2x00mac_config_interface(struct ieee80211_hw *hw, 987int rt2x00mac_config_interface(struct ieee80211_hw *hw,
936 struct ieee80211_vif *vif, 988 struct ieee80211_vif *vif,
937 struct ieee80211_if_conf *conf); 989 struct ieee80211_if_conf *conf);
990void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
991 unsigned int changed_flags,
992 unsigned int *total_flags,
993 int mc_count, struct dev_addr_list *mc_list);
938int rt2x00mac_get_stats(struct ieee80211_hw *hw, 994int rt2x00mac_get_stats(struct ieee80211_hw *hw,
939 struct ieee80211_low_level_stats *stats); 995 struct ieee80211_low_level_stats *stats);
940int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw, 996int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 07adc576db49..a9930a03f450 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -29,64 +29,78 @@
29#include "rt2x00.h" 29#include "rt2x00.h"
30#include "rt2x00lib.h" 30#include "rt2x00lib.h"
31 31
32 32void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
33/* 33 struct rt2x00_intf *intf,
34 * The MAC and BSSID addressess are simple array of bytes, 34 enum ieee80211_if_types type,
35 * these arrays are little endian, so when sending the addressess 35 u8 *mac, u8 *bssid)
36 * to the drivers, copy the it into a endian-signed variable.
37 *
38 * Note that all devices (except rt2500usb) have 32 bits
39 * register word sizes. This means that whatever variable we
40 * pass _must_ be a multiple of 32 bits. Otherwise the device
41 * might not accept what we are sending to it.
42 * This will also make it easier for the driver to write
43 * the data to the device.
44 *
45 * Also note that when NULL is passed as address the
46 * we will send 00:00:00:00:00 to the device to clear the address.
47 * This will prevent the device being confused when it wants
48 * to ACK frames or consideres itself associated.
49 */
50void rt2x00lib_config_mac_addr(struct rt2x00_dev *rt2x00dev, u8 *mac)
51{
52 __le32 reg[2];
53
54 memset(&reg, 0, sizeof(reg));
55 if (mac)
56 memcpy(&reg, mac, ETH_ALEN);
57
58 rt2x00dev->ops->lib->config_mac_addr(rt2x00dev, &reg[0]);
59}
60
61void rt2x00lib_config_bssid(struct rt2x00_dev *rt2x00dev, u8 *bssid)
62{ 36{
63 __le32 reg[2]; 37 struct rt2x00intf_conf conf;
38 unsigned int flags = 0;
64 39
65 memset(&reg, 0, sizeof(reg)); 40 conf.type = type;
66 if (bssid)
67 memcpy(&reg, bssid, ETH_ALEN);
68
69 rt2x00dev->ops->lib->config_bssid(rt2x00dev, &reg[0]);
70}
71
72void rt2x00lib_config_type(struct rt2x00_dev *rt2x00dev, const int type)
73{
74 int tsf_sync;
75 41
76 switch (type) { 42 switch (type) {
77 case IEEE80211_IF_TYPE_IBSS: 43 case IEEE80211_IF_TYPE_IBSS:
78 case IEEE80211_IF_TYPE_AP: 44 case IEEE80211_IF_TYPE_AP:
79 tsf_sync = TSF_SYNC_BEACON; 45 conf.sync = TSF_SYNC_BEACON;
80 break; 46 break;
81 case IEEE80211_IF_TYPE_STA: 47 case IEEE80211_IF_TYPE_STA:
82 tsf_sync = TSF_SYNC_INFRA; 48 conf.sync = TSF_SYNC_INFRA;
83 break; 49 break;
84 default: 50 default:
85 tsf_sync = TSF_SYNC_NONE; 51 conf.sync = TSF_SYNC_NONE;
86 break; 52 break;
87 } 53 }
88 54
89 rt2x00dev->ops->lib->config_type(rt2x00dev, type, tsf_sync); 55 /*
56 * Note that when NULL is passed as address we will send
57 * 00:00:00:00:00 to the device to clear the address.
58 * This will prevent the device being confused when it wants
59 * to ACK frames or consideres itself associated.
60 */
61 memset(&conf.mac, 0, sizeof(conf.mac));
62 if (mac)
63 memcpy(&conf.mac, mac, ETH_ALEN);
64
65 memset(&conf.bssid, 0, sizeof(conf.bssid));
66 if (bssid)
67 memcpy(&conf.bssid, bssid, ETH_ALEN);
68
69 flags |= CONFIG_UPDATE_TYPE;
70 if (mac || (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count))
71 flags |= CONFIG_UPDATE_MAC;
72 if (bssid || (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count))
73 flags |= CONFIG_UPDATE_BSSID;
74
75 rt2x00dev->ops->lib->config_intf(rt2x00dev, intf, &conf, flags);
76}
77
78void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
79 struct rt2x00_intf *intf,
80 struct ieee80211_bss_conf *bss_conf)
81{
82 struct rt2x00lib_erp erp;
83
84 memset(&erp, 0, sizeof(erp));
85
86 erp.short_preamble = bss_conf->use_short_preamble;
87 erp.ack_timeout = PLCP + get_duration(ACK_SIZE, 10);
88 erp.ack_consume_time = SIFS + PLCP + get_duration(ACK_SIZE, 10);
89
90 if (rt2x00dev->hw->conf.flags & IEEE80211_CONF_SHORT_SLOT_TIME)
91 erp.ack_timeout += SHORT_DIFS;
92 else
93 erp.ack_timeout += DIFS;
94
95 if (bss_conf->use_short_preamble) {
96 erp.ack_timeout += SHORT_PREAMBLE;
97 erp.ack_consume_time += SHORT_PREAMBLE;
98 } else {
99 erp.ack_timeout += PREAMBLE;
100 erp.ack_consume_time += PREAMBLE;
101 }
102
103 rt2x00dev->ops->lib->config_erp(rt2x00dev, &erp);
90} 104}
91 105
92void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev, 106void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
@@ -113,7 +127,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
113 * The latter is required since we need to recalibrate the 127 * The latter is required since we need to recalibrate the
114 * noise-sensitivity ratio for the new setup. 128 * noise-sensitivity ratio for the new setup.
115 */ 129 */
116 rt2x00dev->ops->lib->config(rt2x00dev, CONFIG_UPDATE_ANTENNA, &libconf); 130 rt2x00dev->ops->lib->config(rt2x00dev, &libconf, CONFIG_UPDATE_ANTENNA);
117 rt2x00lib_reset_link_tuner(rt2x00dev); 131 rt2x00lib_reset_link_tuner(rt2x00dev);
118 132
119 rt2x00dev->link.ant.active.rx = libconf.ant.rx; 133 rt2x00dev->link.ant.active.rx = libconf.ant.rx;
@@ -123,12 +137,26 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
123 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK); 137 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK);
124} 138}
125 139
140static u32 rt2x00lib_get_basic_rates(struct ieee80211_supported_band *band)
141{
142 const struct rt2x00_rate *rate;
143 unsigned int i;
144 u32 mask = 0;
145
146 for (i = 0; i < band->n_bitrates; i++) {
147 rate = rt2x00_get_rate(band->bitrates[i].hw_value);
148 if (rate->flags & DEV_RATE_BASIC)
149 mask |= rate->ratemask;
150 }
151
152 return mask;
153}
154
126void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, 155void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
127 struct ieee80211_conf *conf, const int force_config) 156 struct ieee80211_conf *conf, const int force_config)
128{ 157{
129 struct rt2x00lib_conf libconf; 158 struct rt2x00lib_conf libconf;
130 struct ieee80211_hw_mode *mode; 159 struct ieee80211_supported_band *band;
131 struct ieee80211_rate *rate;
132 struct antenna_setup *default_ant = &rt2x00dev->default_ant; 160 struct antenna_setup *default_ant = &rt2x00dev->default_ant;
133 struct antenna_setup *active_ant = &rt2x00dev->link.ant.active; 161 struct antenna_setup *active_ant = &rt2x00dev->link.ant.active;
134 int flags = 0; 162 int flags = 0;
@@ -147,9 +175,9 @@ void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
147 * Check which configuration options have been 175 * Check which configuration options have been
148 * updated and should be send to the device. 176 * updated and should be send to the device.
149 */ 177 */
150 if (rt2x00dev->rx_status.phymode != conf->phymode) 178 if (rt2x00dev->rx_status.band != conf->channel->band)
151 flags |= CONFIG_UPDATE_PHYMODE; 179 flags |= CONFIG_UPDATE_PHYMODE;
152 if (rt2x00dev->rx_status.channel != conf->channel) 180 if (rt2x00dev->rx_status.freq != conf->channel->center_freq)
153 flags |= CONFIG_UPDATE_CHANNEL; 181 flags |= CONFIG_UPDATE_CHANNEL;
154 if (rt2x00dev->tx_power != conf->power_level) 182 if (rt2x00dev->tx_power != conf->power_level)
155 flags |= CONFIG_UPDATE_TXPOWER; 183 flags |= CONFIG_UPDATE_TXPOWER;
@@ -204,33 +232,15 @@ config:
204 memset(&libconf, 0, sizeof(libconf)); 232 memset(&libconf, 0, sizeof(libconf));
205 233
206 if (flags & CONFIG_UPDATE_PHYMODE) { 234 if (flags & CONFIG_UPDATE_PHYMODE) {
207 switch (conf->phymode) { 235 band = &rt2x00dev->bands[conf->channel->band];
208 case MODE_IEEE80211A: 236
209 libconf.phymode = HWMODE_A; 237 libconf.band = conf->channel->band;
210 break; 238 libconf.basic_rates = rt2x00lib_get_basic_rates(band);
211 case MODE_IEEE80211B:
212 libconf.phymode = HWMODE_B;
213 break;
214 case MODE_IEEE80211G:
215 libconf.phymode = HWMODE_G;
216 break;
217 default:
218 ERROR(rt2x00dev,
219 "Attempt to configure unsupported mode (%d)"
220 "Defaulting to 802.11b", conf->phymode);
221 libconf.phymode = HWMODE_B;
222 }
223
224 mode = &rt2x00dev->hwmodes[libconf.phymode];
225 rate = &mode->rates[mode->num_rates - 1];
226
227 libconf.basic_rates =
228 DEVICE_GET_RATE_FIELD(rate->val, RATEMASK) & DEV_BASIC_RATEMASK;
229 } 239 }
230 240
231 if (flags & CONFIG_UPDATE_CHANNEL) { 241 if (flags & CONFIG_UPDATE_CHANNEL) {
232 memcpy(&libconf.rf, 242 memcpy(&libconf.rf,
233 &rt2x00dev->spec.channels[conf->channel_val], 243 &rt2x00dev->spec.channels[conf->channel->hw_value],
234 sizeof(libconf.rf)); 244 sizeof(libconf.rf));
235 } 245 }
236 246
@@ -266,7 +276,7 @@ config:
266 /* 276 /*
267 * Start configuration. 277 * Start configuration.
268 */ 278 */
269 rt2x00dev->ops->lib->config(rt2x00dev, flags, &libconf); 279 rt2x00dev->ops->lib->config(rt2x00dev, &libconf, flags);
270 280
271 /* 281 /*
272 * Some configuration changes affect the link quality 282 * Some configuration changes affect the link quality
@@ -276,12 +286,11 @@ config:
276 rt2x00lib_reset_link_tuner(rt2x00dev); 286 rt2x00lib_reset_link_tuner(rt2x00dev);
277 287
278 if (flags & CONFIG_UPDATE_PHYMODE) { 288 if (flags & CONFIG_UPDATE_PHYMODE) {
279 rt2x00dev->curr_hwmode = libconf.phymode; 289 rt2x00dev->curr_band = conf->channel->band;
280 rt2x00dev->rx_status.phymode = conf->phymode; 290 rt2x00dev->rx_status.band = conf->channel->band;
281 } 291 }
282 292
283 rt2x00dev->rx_status.freq = conf->freq; 293 rt2x00dev->rx_status.freq = conf->channel->center_freq;
284 rt2x00dev->rx_status.channel = conf->channel;
285 rt2x00dev->tx_power = conf->power_level; 294 rt2x00dev->tx_power = conf->power_level;
286 295
287 if (flags & CONFIG_UPDATE_ANTENNA) { 296 if (flags & CONFIG_UPDATE_ANTENNA) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index b44a9f4b9b7f..bfab3b8780d6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -33,7 +33,7 @@
33#include "rt2x00lib.h" 33#include "rt2x00lib.h"
34#include "rt2x00dump.h" 34#include "rt2x00dump.h"
35 35
36#define PRINT_LINE_LEN_MAX 32 36#define MAX_LINE_LENGTH 64
37 37
38struct rt2x00debug_intf { 38struct rt2x00debug_intf {
39 /* 39 /*
@@ -60,8 +60,9 @@ struct rt2x00debug_intf {
60 * - eeprom offset/value files 60 * - eeprom offset/value files
61 * - bbp offset/value files 61 * - bbp offset/value files
62 * - rf offset/value files 62 * - rf offset/value files
63 * - frame dump folder 63 * - queue folder
64 * - frame dump file 64 * - frame dump file
65 * - queue stats file
65 */ 66 */
66 struct dentry *driver_folder; 67 struct dentry *driver_folder;
67 struct dentry *driver_entry; 68 struct dentry *driver_entry;
@@ -76,8 +77,9 @@ struct rt2x00debug_intf {
76 struct dentry *bbp_val_entry; 77 struct dentry *bbp_val_entry;
77 struct dentry *rf_off_entry; 78 struct dentry *rf_off_entry;
78 struct dentry *rf_val_entry; 79 struct dentry *rf_val_entry;
79 struct dentry *frame_folder; 80 struct dentry *queue_folder;
80 struct dentry *frame_dump_entry; 81 struct dentry *queue_frame_dump_entry;
82 struct dentry *queue_stats_entry;
81 83
82 /* 84 /*
83 * The frame dump file only allows a single reader, 85 * The frame dump file only allows a single reader,
@@ -116,7 +118,7 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
116 struct sk_buff *skb) 118 struct sk_buff *skb)
117{ 119{
118 struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf; 120 struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf;
119 struct skb_desc *desc = get_skb_desc(skb); 121 struct skb_frame_desc *desc = get_skb_frame_desc(skb);
120 struct sk_buff *skbcopy; 122 struct sk_buff *skbcopy;
121 struct rt2x00dump_hdr *dump_hdr; 123 struct rt2x00dump_hdr *dump_hdr;
122 struct timeval timestamp; 124 struct timeval timestamp;
@@ -147,7 +149,7 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
147 dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf); 149 dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf);
148 dump_hdr->chip_rev = cpu_to_le32(rt2x00dev->chip.rev); 150 dump_hdr->chip_rev = cpu_to_le32(rt2x00dev->chip.rev);
149 dump_hdr->type = cpu_to_le16(desc->frame_type); 151 dump_hdr->type = cpu_to_le16(desc->frame_type);
150 dump_hdr->ring_index = desc->ring->queue_idx; 152 dump_hdr->queue_index = desc->entry->queue->qid;
151 dump_hdr->entry_index = desc->entry->entry_idx; 153 dump_hdr->entry_index = desc->entry->entry_idx;
152 dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec); 154 dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec);
153 dump_hdr->timestamp_usec = cpu_to_le32(timestamp.tv_usec); 155 dump_hdr->timestamp_usec = cpu_to_le32(timestamp.tv_usec);
@@ -186,7 +188,7 @@ static int rt2x00debug_file_release(struct inode *inode, struct file *file)
186 return 0; 188 return 0;
187} 189}
188 190
189static int rt2x00debug_open_ring_dump(struct inode *inode, struct file *file) 191static int rt2x00debug_open_queue_dump(struct inode *inode, struct file *file)
190{ 192{
191 struct rt2x00debug_intf *intf = inode->i_private; 193 struct rt2x00debug_intf *intf = inode->i_private;
192 int retval; 194 int retval;
@@ -203,7 +205,7 @@ static int rt2x00debug_open_ring_dump(struct inode *inode, struct file *file)
203 return 0; 205 return 0;
204} 206}
205 207
206static int rt2x00debug_release_ring_dump(struct inode *inode, struct file *file) 208static int rt2x00debug_release_queue_dump(struct inode *inode, struct file *file)
207{ 209{
208 struct rt2x00debug_intf *intf = inode->i_private; 210 struct rt2x00debug_intf *intf = inode->i_private;
209 211
@@ -214,10 +216,10 @@ static int rt2x00debug_release_ring_dump(struct inode *inode, struct file *file)
214 return rt2x00debug_file_release(inode, file); 216 return rt2x00debug_file_release(inode, file);
215} 217}
216 218
217static ssize_t rt2x00debug_read_ring_dump(struct file *file, 219static ssize_t rt2x00debug_read_queue_dump(struct file *file,
218 char __user *buf, 220 char __user *buf,
219 size_t length, 221 size_t length,
220 loff_t *offset) 222 loff_t *offset)
221{ 223{
222 struct rt2x00debug_intf *intf = file->private_data; 224 struct rt2x00debug_intf *intf = file->private_data;
223 struct sk_buff *skb; 225 struct sk_buff *skb;
@@ -248,8 +250,8 @@ exit:
248 return status; 250 return status;
249} 251}
250 252
251static unsigned int rt2x00debug_poll_ring_dump(struct file *file, 253static unsigned int rt2x00debug_poll_queue_dump(struct file *file,
252 poll_table *wait) 254 poll_table *wait)
253{ 255{
254 struct rt2x00debug_intf *intf = file->private_data; 256 struct rt2x00debug_intf *intf = file->private_data;
255 257
@@ -261,12 +263,68 @@ static unsigned int rt2x00debug_poll_ring_dump(struct file *file,
261 return 0; 263 return 0;
262} 264}
263 265
264static const struct file_operations rt2x00debug_fop_ring_dump = { 266static const struct file_operations rt2x00debug_fop_queue_dump = {
265 .owner = THIS_MODULE, 267 .owner = THIS_MODULE,
266 .read = rt2x00debug_read_ring_dump, 268 .read = rt2x00debug_read_queue_dump,
267 .poll = rt2x00debug_poll_ring_dump, 269 .poll = rt2x00debug_poll_queue_dump,
268 .open = rt2x00debug_open_ring_dump, 270 .open = rt2x00debug_open_queue_dump,
269 .release = rt2x00debug_release_ring_dump, 271 .release = rt2x00debug_release_queue_dump,
272};
273
274static ssize_t rt2x00debug_read_queue_stats(struct file *file,
275 char __user *buf,
276 size_t length,
277 loff_t *offset)
278{
279 struct rt2x00debug_intf *intf = file->private_data;
280 struct data_queue *queue;
281 unsigned long irqflags;
282 unsigned int lines = 1 + intf->rt2x00dev->data_queues;
283 size_t size;
284 char *data;
285 char *temp;
286
287 if (*offset)
288 return 0;
289
290 data = kzalloc(lines * MAX_LINE_LENGTH, GFP_KERNEL);
291 if (!data)
292 return -ENOMEM;
293
294 temp = data +
295 sprintf(data, "qid\tcount\tlimit\tlength\tindex\tdone\tcrypto\n");
296
297 queue_for_each(intf->rt2x00dev, queue) {
298 spin_lock_irqsave(&queue->lock, irqflags);
299
300 temp += sprintf(temp, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n", queue->qid,
301 queue->count, queue->limit, queue->length,
302 queue->index[Q_INDEX],
303 queue->index[Q_INDEX_DONE],
304 queue->index[Q_INDEX_CRYPTO]);
305
306 spin_unlock_irqrestore(&queue->lock, irqflags);
307 }
308
309 size = strlen(data);
310 size = min(size, length);
311
312 if (copy_to_user(buf, data, size)) {
313 kfree(data);
314 return -EFAULT;
315 }
316
317 kfree(data);
318
319 *offset += size;
320 return size;
321}
322
323static const struct file_operations rt2x00debug_fop_queue_stats = {
324 .owner = THIS_MODULE,
325 .read = rt2x00debug_read_queue_stats,
326 .open = rt2x00debug_file_open,
327 .release = rt2x00debug_file_release,
270}; 328};
271 329
272#define RT2X00DEBUGFS_OPS_READ(__name, __format, __type) \ 330#define RT2X00DEBUGFS_OPS_READ(__name, __format, __type) \
@@ -386,7 +444,7 @@ static struct dentry *rt2x00debug_create_file_driver(const char *name,
386{ 444{
387 char *data; 445 char *data;
388 446
389 data = kzalloc(3 * PRINT_LINE_LEN_MAX, GFP_KERNEL); 447 data = kzalloc(3 * MAX_LINE_LENGTH, GFP_KERNEL);
390 if (!data) 448 if (!data)
391 return NULL; 449 return NULL;
392 450
@@ -409,7 +467,7 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name,
409 const struct rt2x00debug *debug = intf->debug; 467 const struct rt2x00debug *debug = intf->debug;
410 char *data; 468 char *data;
411 469
412 data = kzalloc(8 * PRINT_LINE_LEN_MAX, GFP_KERNEL); 470 data = kzalloc(8 * MAX_LINE_LENGTH, GFP_KERNEL);
413 if (!data) 471 if (!data)
414 return NULL; 472 return NULL;
415 473
@@ -496,20 +554,24 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
496 554
497#undef RT2X00DEBUGFS_CREATE_REGISTER_ENTRY 555#undef RT2X00DEBUGFS_CREATE_REGISTER_ENTRY
498 556
499 intf->frame_folder = 557 intf->queue_folder =
500 debugfs_create_dir("frame", intf->driver_folder); 558 debugfs_create_dir("queue", intf->driver_folder);
501 if (IS_ERR(intf->frame_folder)) 559 if (IS_ERR(intf->queue_folder))
502 goto exit; 560 goto exit;
503 561
504 intf->frame_dump_entry = 562 intf->queue_frame_dump_entry =
505 debugfs_create_file("dump", S_IRUGO, intf->frame_folder, 563 debugfs_create_file("dump", S_IRUGO, intf->queue_folder,
506 intf, &rt2x00debug_fop_ring_dump); 564 intf, &rt2x00debug_fop_queue_dump);
507 if (IS_ERR(intf->frame_dump_entry)) 565 if (IS_ERR(intf->queue_frame_dump_entry))
508 goto exit; 566 goto exit;
509 567
510 skb_queue_head_init(&intf->frame_dump_skbqueue); 568 skb_queue_head_init(&intf->frame_dump_skbqueue);
511 init_waitqueue_head(&intf->frame_dump_waitqueue); 569 init_waitqueue_head(&intf->frame_dump_waitqueue);
512 570
571 intf->queue_stats_entry =
572 debugfs_create_file("queue", S_IRUGO, intf->queue_folder,
573 intf, &rt2x00debug_fop_queue_stats);
574
513 return; 575 return;
514 576
515exit: 577exit:
@@ -528,8 +590,9 @@ void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
528 590
529 skb_queue_purge(&intf->frame_dump_skbqueue); 591 skb_queue_purge(&intf->frame_dump_skbqueue);
530 592
531 debugfs_remove(intf->frame_dump_entry); 593 debugfs_remove(intf->queue_stats_entry);
532 debugfs_remove(intf->frame_folder); 594 debugfs_remove(intf->queue_frame_dump_entry);
595 debugfs_remove(intf->queue_folder);
533 debugfs_remove(intf->rf_val_entry); 596 debugfs_remove(intf->rf_val_entry);
534 debugfs_remove(intf->rf_off_entry); 597 debugfs_remove(intf->rf_off_entry);
535 debugfs_remove(intf->bbp_val_entry); 598 debugfs_remove(intf->bbp_val_entry);
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.h b/drivers/net/wireless/rt2x00/rt2x00debug.h
index d37efbd09c41..c4ce895aa1c7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.h
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index e873a39fcce3..f8fe7a139a8a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -31,34 +31,6 @@
31#include "rt2x00dump.h" 31#include "rt2x00dump.h"
32 32
33/* 33/*
34 * Ring handler.
35 */
36struct data_ring *rt2x00lib_get_ring(struct rt2x00_dev *rt2x00dev,
37 const unsigned int queue)
38{
39 int beacon = test_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags);
40
41 /*
42 * Check if we are requesting a reqular TX ring,
43 * or if we are requesting a Beacon or Atim ring.
44 * For Atim rings, we should check if it is supported.
45 */
46 if (queue < rt2x00dev->hw->queues && rt2x00dev->tx)
47 return &rt2x00dev->tx[queue];
48
49 if (!rt2x00dev->bcn || !beacon)
50 return NULL;
51
52 if (queue == IEEE80211_TX_QUEUE_BEACON)
53 return &rt2x00dev->bcn[0];
54 else if (queue == IEEE80211_TX_QUEUE_AFTER_BEACON)
55 return &rt2x00dev->bcn[1];
56
57 return NULL;
58}
59EXPORT_SYMBOL_GPL(rt2x00lib_get_ring);
60
61/*
62 * Link tuning handlers 34 * Link tuning handlers
63 */ 35 */
64void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev) 36void rt2x00lib_reset_link_tuner(struct rt2x00_dev *rt2x00dev)
@@ -113,46 +85,6 @@ static void rt2x00lib_stop_link_tuner(struct rt2x00_dev *rt2x00dev)
113} 85}
114 86
115/* 87/*
116 * Ring initialization
117 */
118static void rt2x00lib_init_rxrings(struct rt2x00_dev *rt2x00dev)
119{
120 struct data_ring *ring = rt2x00dev->rx;
121 unsigned int i;
122
123 if (!rt2x00dev->ops->lib->init_rxentry)
124 return;
125
126 if (ring->data_addr)
127 memset(ring->data_addr, 0, rt2x00_get_ring_size(ring));
128
129 for (i = 0; i < ring->stats.limit; i++)
130 rt2x00dev->ops->lib->init_rxentry(rt2x00dev, &ring->entry[i]);
131
132 rt2x00_ring_index_clear(ring);
133}
134
135static void rt2x00lib_init_txrings(struct rt2x00_dev *rt2x00dev)
136{
137 struct data_ring *ring;
138 unsigned int i;
139
140 if (!rt2x00dev->ops->lib->init_txentry)
141 return;
142
143 txringall_for_each(rt2x00dev, ring) {
144 if (ring->data_addr)
145 memset(ring->data_addr, 0, rt2x00_get_ring_size(ring));
146
147 for (i = 0; i < ring->stats.limit; i++)
148 rt2x00dev->ops->lib->init_txentry(rt2x00dev,
149 &ring->entry[i]);
150
151 rt2x00_ring_index_clear(ring);
152 }
153}
154
155/*
156 * Radio control handlers. 88 * Radio control handlers.
157 */ 89 */
158int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev) 90int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
@@ -168,19 +100,21 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
168 return 0; 100 return 0;
169 101
170 /* 102 /*
171 * Initialize all data rings. 103 * Initialize all data queues.
172 */ 104 */
173 rt2x00lib_init_rxrings(rt2x00dev); 105 rt2x00queue_init_rx(rt2x00dev);
174 rt2x00lib_init_txrings(rt2x00dev); 106 rt2x00queue_init_tx(rt2x00dev);
175 107
176 /* 108 /*
177 * Enable radio. 109 * Enable radio.
178 */ 110 */
179 status = rt2x00dev->ops->lib->set_device_state(rt2x00dev, 111 status =
180 STATE_RADIO_ON); 112 rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_ON);
181 if (status) 113 if (status)
182 return status; 114 return status;
183 115
116 rt2x00leds_led_radio(rt2x00dev, true);
117
184 __set_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags); 118 __set_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags);
185 119
186 /* 120 /*
@@ -204,12 +138,10 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
204 /* 138 /*
205 * Stop all scheduled work. 139 * Stop all scheduled work.
206 */ 140 */
207 if (work_pending(&rt2x00dev->beacon_work)) 141 if (work_pending(&rt2x00dev->intf_work))
208 cancel_work_sync(&rt2x00dev->beacon_work); 142 cancel_work_sync(&rt2x00dev->intf_work);
209 if (work_pending(&rt2x00dev->filter_work)) 143 if (work_pending(&rt2x00dev->filter_work))
210 cancel_work_sync(&rt2x00dev->filter_work); 144 cancel_work_sync(&rt2x00dev->filter_work);
211 if (work_pending(&rt2x00dev->config_work))
212 cancel_work_sync(&rt2x00dev->config_work);
213 145
214 /* 146 /*
215 * Stop the TX queues. 147 * Stop the TX queues.
@@ -225,6 +157,7 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
225 * Disable radio. 157 * Disable radio.
226 */ 158 */
227 rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_OFF); 159 rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_OFF);
160 rt2x00leds_led_radio(rt2x00dev, false);
228} 161}
229 162
230void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state) 163void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state)
@@ -241,7 +174,7 @@ void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state)
241 * When we are enabling the RX, we should also start the link tuner. 174 * When we are enabling the RX, we should also start the link tuner.
242 */ 175 */
243 if (state == STATE_RADIO_RX_ON && 176 if (state == STATE_RADIO_RX_ON &&
244 is_interface_present(&rt2x00dev->interface)) 177 (rt2x00dev->intf_ap_count || rt2x00dev->intf_sta_count))
245 rt2x00lib_start_link_tuner(rt2x00dev); 178 rt2x00lib_start_link_tuner(rt2x00dev);
246} 179}
247 180
@@ -449,6 +382,11 @@ static void rt2x00lib_link_tuner(struct work_struct *work)
449 rt2x00lib_precalculate_link_signal(&rt2x00dev->link.qual); 382 rt2x00lib_precalculate_link_signal(&rt2x00dev->link.qual);
450 383
451 /* 384 /*
385 * Send a signal to the led to update the led signal strength.
386 */
387 rt2x00leds_led_quality(rt2x00dev, rt2x00dev->link.qual.avg_rssi);
388
389 /*
452 * Evaluate antenna setup, make this the last step since this could 390 * Evaluate antenna setup, make this the last step since this could
453 * possibly reset some statistics. 391 * possibly reset some statistics.
454 */ 392 */
@@ -466,59 +404,76 @@ static void rt2x00lib_packetfilter_scheduled(struct work_struct *work)
466{ 404{
467 struct rt2x00_dev *rt2x00dev = 405 struct rt2x00_dev *rt2x00dev =
468 container_of(work, struct rt2x00_dev, filter_work); 406 container_of(work, struct rt2x00_dev, filter_work);
469 unsigned int filter = rt2x00dev->packet_filter; 407
408 rt2x00dev->ops->lib->config_filter(rt2x00dev, rt2x00dev->packet_filter);
409}
410
411static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
412 struct ieee80211_vif *vif)
413{
414 struct rt2x00_dev *rt2x00dev = data;
415 struct rt2x00_intf *intf = vif_to_intf(vif);
416 struct sk_buff *skb;
417 struct ieee80211_tx_control control;
418 struct ieee80211_bss_conf conf;
419 int delayed_flags;
470 420
471 /* 421 /*
472 * Since we had stored the filter inside interface.filter, 422 * Copy all data we need during this action under the protection
473 * we should now clear that field. Otherwise the driver will 423 * of a spinlock. Otherwise race conditions might occur which results
474 * assume nothing has changed (*total_flags will be compared 424 * into an invalid configuration.
475 * to interface.filter to determine if any action is required).
476 */ 425 */
477 rt2x00dev->packet_filter = 0; 426 spin_lock(&intf->lock);
427
428 memcpy(&conf, &intf->conf, sizeof(conf));
429 delayed_flags = intf->delayed_flags;
430 intf->delayed_flags = 0;
478 431
479 rt2x00dev->ops->hw->configure_filter(rt2x00dev->hw, 432 spin_unlock(&intf->lock);
480 filter, &filter, 0, NULL); 433
434 if (delayed_flags & DELAYED_UPDATE_BEACON) {
435 skb = ieee80211_beacon_get(rt2x00dev->hw, vif, &control);
436 if (skb && rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw,
437 skb, &control))
438 dev_kfree_skb(skb);
439 }
440
441 if (delayed_flags & DELAYED_CONFIG_ERP)
442 rt2x00lib_config_erp(rt2x00dev, intf, &intf->conf);
443
444 if (delayed_flags & DELAYED_LED_ASSOC)
445 rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
481} 446}
482 447
483static void rt2x00lib_configuration_scheduled(struct work_struct *work) 448static void rt2x00lib_intf_scheduled(struct work_struct *work)
484{ 449{
485 struct rt2x00_dev *rt2x00dev = 450 struct rt2x00_dev *rt2x00dev =
486 container_of(work, struct rt2x00_dev, config_work); 451 container_of(work, struct rt2x00_dev, intf_work);
487 struct ieee80211_bss_conf bss_conf;
488
489 bss_conf.use_short_preamble =
490 test_bit(CONFIG_SHORT_PREAMBLE, &rt2x00dev->flags);
491 452
492 /* 453 /*
493 * FIXME: shouldn't invoke it this way because all other contents 454 * Iterate over each interface and perform the
494 * of bss_conf is invalid. 455 * requested configurations.
495 */ 456 */
496 rt2x00mac_bss_info_changed(rt2x00dev->hw, rt2x00dev->interface.id, 457 ieee80211_iterate_active_interfaces(rt2x00dev->hw,
497 &bss_conf, BSS_CHANGED_ERP_PREAMBLE); 458 rt2x00lib_intf_scheduled_iter,
459 rt2x00dev);
498} 460}
499 461
500/* 462/*
501 * Interrupt context handlers. 463 * Interrupt context handlers.
502 */ 464 */
503static void rt2x00lib_beacondone_scheduled(struct work_struct *work) 465static void rt2x00lib_beacondone_iter(void *data, u8 *mac,
466 struct ieee80211_vif *vif)
504{ 467{
505 struct rt2x00_dev *rt2x00dev = 468 struct rt2x00_intf *intf = vif_to_intf(vif);
506 container_of(work, struct rt2x00_dev, beacon_work);
507 struct data_ring *ring =
508 rt2x00lib_get_ring(rt2x00dev, IEEE80211_TX_QUEUE_BEACON);
509 struct data_entry *entry = rt2x00_get_data_entry(ring);
510 struct sk_buff *skb;
511 469
512 skb = ieee80211_beacon_get(rt2x00dev->hw, 470 if (vif->type != IEEE80211_IF_TYPE_AP &&
513 rt2x00dev->interface.id, 471 vif->type != IEEE80211_IF_TYPE_IBSS)
514 &entry->tx_status.control);
515 if (!skb)
516 return; 472 return;
517 473
518 rt2x00dev->ops->hw->beacon_update(rt2x00dev->hw, skb, 474 spin_lock(&intf->lock);
519 &entry->tx_status.control); 475 intf->delayed_flags |= DELAYED_UPDATE_BEACON;
520 476 spin_unlock(&intf->lock);
521 dev_kfree_skb(skb);
522} 477}
523 478
524void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) 479void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
@@ -526,116 +481,140 @@ void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev)
526 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags)) 481 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags))
527 return; 482 return;
528 483
529 queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->beacon_work); 484 ieee80211_iterate_active_interfaces(rt2x00dev->hw,
485 rt2x00lib_beacondone_iter,
486 rt2x00dev);
487
488 queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->intf_work);
530} 489}
531EXPORT_SYMBOL_GPL(rt2x00lib_beacondone); 490EXPORT_SYMBOL_GPL(rt2x00lib_beacondone);
532 491
533void rt2x00lib_txdone(struct data_entry *entry, 492void rt2x00lib_txdone(struct queue_entry *entry,
534 const int status, const int retry) 493 struct txdone_entry_desc *txdesc)
535{ 494{
536 struct rt2x00_dev *rt2x00dev = entry->ring->rt2x00dev; 495 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
537 struct ieee80211_tx_status *tx_status = &entry->tx_status; 496 struct skb_frame_desc *skbdesc;
538 struct ieee80211_low_level_stats *stats = &rt2x00dev->low_level_stats; 497 struct ieee80211_tx_status tx_status;
539 int success = !!(status == TX_SUCCESS || status == TX_SUCCESS_RETRY); 498 int success = !!(txdesc->status == TX_SUCCESS ||
540 int fail = !!(status == TX_FAIL_RETRY || status == TX_FAIL_INVALID || 499 txdesc->status == TX_SUCCESS_RETRY);
541 status == TX_FAIL_OTHER); 500 int fail = !!(txdesc->status == TX_FAIL_RETRY ||
501 txdesc->status == TX_FAIL_INVALID ||
502 txdesc->status == TX_FAIL_OTHER);
542 503
543 /* 504 /*
544 * Update TX statistics. 505 * Update TX statistics.
545 */ 506 */
546 tx_status->flags = 0;
547 tx_status->ack_signal = 0;
548 tx_status->excessive_retries = (status == TX_FAIL_RETRY);
549 tx_status->retry_count = retry;
550 rt2x00dev->link.qual.tx_success += success; 507 rt2x00dev->link.qual.tx_success += success;
551 rt2x00dev->link.qual.tx_failed += retry + fail; 508 rt2x00dev->link.qual.tx_failed += txdesc->retry + fail;
509
510 /*
511 * Initialize TX status
512 */
513 tx_status.flags = 0;
514 tx_status.ack_signal = 0;
515 tx_status.excessive_retries = (txdesc->status == TX_FAIL_RETRY);
516 tx_status.retry_count = txdesc->retry;
517 memcpy(&tx_status.control, txdesc->control, sizeof(*txdesc->control));
552 518
553 if (!(tx_status->control.flags & IEEE80211_TXCTL_NO_ACK)) { 519 if (!(tx_status.control.flags & IEEE80211_TXCTL_NO_ACK)) {
554 if (success) 520 if (success)
555 tx_status->flags |= IEEE80211_TX_STATUS_ACK; 521 tx_status.flags |= IEEE80211_TX_STATUS_ACK;
556 else 522 else
557 stats->dot11ACKFailureCount++; 523 rt2x00dev->low_level_stats.dot11ACKFailureCount++;
558 } 524 }
559 525
560 tx_status->queue_length = entry->ring->stats.limit; 526 tx_status.queue_length = entry->queue->limit;
561 tx_status->queue_number = tx_status->control.queue; 527 tx_status.queue_number = tx_status.control.queue;
562 528
563 if (tx_status->control.flags & IEEE80211_TXCTL_USE_RTS_CTS) { 529 if (tx_status.control.flags & IEEE80211_TXCTL_USE_RTS_CTS) {
564 if (success) 530 if (success)
565 stats->dot11RTSSuccessCount++; 531 rt2x00dev->low_level_stats.dot11RTSSuccessCount++;
566 else 532 else
567 stats->dot11RTSFailureCount++; 533 rt2x00dev->low_level_stats.dot11RTSFailureCount++;
568 } 534 }
569 535
570 /* 536 /*
571 * Send the tx_status to mac80211 & debugfs. 537 * Send the tx_status to debugfs. Only send the status report
572 * mac80211 will clean up the skb structure. 538 * to mac80211 when the frame originated from there. If this was
539 * a extra frame coming through a mac80211 library call (RTS/CTS)
540 * then we should not send the status report back.
541 * If send to mac80211, mac80211 will clean up the skb structure,
542 * otherwise we have to do it ourself.
573 */ 543 */
574 get_skb_desc(entry->skb)->frame_type = DUMP_FRAME_TXDONE; 544 skbdesc = get_skb_frame_desc(entry->skb);
545 skbdesc->frame_type = DUMP_FRAME_TXDONE;
546
575 rt2x00debug_dump_frame(rt2x00dev, entry->skb); 547 rt2x00debug_dump_frame(rt2x00dev, entry->skb);
576 ieee80211_tx_status_irqsafe(rt2x00dev->hw, entry->skb, tx_status); 548
549 if (!(skbdesc->flags & FRAME_DESC_DRIVER_GENERATED))
550 ieee80211_tx_status_irqsafe(rt2x00dev->hw,
551 entry->skb, &tx_status);
552 else
553 dev_kfree_skb(entry->skb);
577 entry->skb = NULL; 554 entry->skb = NULL;
578} 555}
579EXPORT_SYMBOL_GPL(rt2x00lib_txdone); 556EXPORT_SYMBOL_GPL(rt2x00lib_txdone);
580 557
581void rt2x00lib_rxdone(struct data_entry *entry, struct sk_buff *skb, 558void rt2x00lib_rxdone(struct queue_entry *entry,
582 struct rxdata_entry_desc *desc) 559 struct rxdone_entry_desc *rxdesc)
583{ 560{
584 struct rt2x00_dev *rt2x00dev = entry->ring->rt2x00dev; 561 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
585 struct ieee80211_rx_status *rx_status = &rt2x00dev->rx_status; 562 struct ieee80211_rx_status *rx_status = &rt2x00dev->rx_status;
586 struct ieee80211_hw_mode *mode; 563 struct ieee80211_supported_band *sband;
587 struct ieee80211_rate *rate;
588 struct ieee80211_hdr *hdr; 564 struct ieee80211_hdr *hdr;
565 const struct rt2x00_rate *rate;
589 unsigned int i; 566 unsigned int i;
590 int val = 0; 567 int idx = -1;
591 u16 fc; 568 u16 fc;
592 569
593 /* 570 /*
594 * Update RX statistics. 571 * Update RX statistics.
595 */ 572 */
596 mode = &rt2x00dev->hwmodes[rt2x00dev->curr_hwmode]; 573 sband = &rt2x00dev->bands[rt2x00dev->curr_band];
597 for (i = 0; i < mode->num_rates; i++) { 574 for (i = 0; i < sband->n_bitrates; i++) {
598 rate = &mode->rates[i]; 575 rate = rt2x00_get_rate(sband->bitrates[i].hw_value);
599 576
600 /* 577 if (((rxdesc->dev_flags & RXDONE_SIGNAL_PLCP) &&
601 * When frame was received with an OFDM bitrate, 578 (rate->plcp == rxdesc->signal)) ||
602 * the signal is the PLCP value. If it was received with 579 (!(rxdesc->dev_flags & RXDONE_SIGNAL_PLCP) &&
603 * a CCK bitrate the signal is the rate in 0.5kbit/s. 580 (rate->bitrate == rxdesc->signal))) {
604 */ 581 idx = i;
605 if (!desc->ofdm)
606 val = DEVICE_GET_RATE_FIELD(rate->val, RATE);
607 else
608 val = DEVICE_GET_RATE_FIELD(rate->val, PLCP);
609
610 if (val == desc->signal) {
611 val = rate->val;
612 break; 582 break;
613 } 583 }
614 } 584 }
615 585
586 if (idx < 0) {
587 WARNING(rt2x00dev, "Frame received with unrecognized signal,"
588 "signal=0x%.2x, plcp=%d.\n", rxdesc->signal,
589 !!(rxdesc->dev_flags & RXDONE_SIGNAL_PLCP));
590 idx = 0;
591 }
592
616 /* 593 /*
617 * Only update link status if this is a beacon frame carrying our bssid. 594 * Only update link status if this is a beacon frame carrying our bssid.
618 */ 595 */
619 hdr = (struct ieee80211_hdr*)skb->data; 596 hdr = (struct ieee80211_hdr *)entry->skb->data;
620 fc = le16_to_cpu(hdr->frame_control); 597 fc = le16_to_cpu(hdr->frame_control);
621 if (is_beacon(fc) && desc->my_bss) 598 if (is_beacon(fc) && (rxdesc->dev_flags & RXDONE_MY_BSS))
622 rt2x00lib_update_link_stats(&rt2x00dev->link, desc->rssi); 599 rt2x00lib_update_link_stats(&rt2x00dev->link, rxdesc->rssi);
623 600
624 rt2x00dev->link.qual.rx_success++; 601 rt2x00dev->link.qual.rx_success++;
625 602
626 rx_status->rate = val; 603 rx_status->rate_idx = idx;
627 rx_status->signal = 604 rx_status->signal =
628 rt2x00lib_calculate_link_signal(rt2x00dev, desc->rssi); 605 rt2x00lib_calculate_link_signal(rt2x00dev, rxdesc->rssi);
629 rx_status->ssi = desc->rssi; 606 rx_status->ssi = rxdesc->rssi;
630 rx_status->flag = desc->flags; 607 rx_status->flag = rxdesc->flags;
631 rx_status->antenna = rt2x00dev->link.ant.active.rx; 608 rx_status->antenna = rt2x00dev->link.ant.active.rx;
632 609
633 /* 610 /*
634 * Send frame to mac80211 & debugfs 611 * Send frame to mac80211 & debugfs.
612 * mac80211 will clean up the skb structure.
635 */ 613 */
636 get_skb_desc(skb)->frame_type = DUMP_FRAME_RXDONE; 614 get_skb_frame_desc(entry->skb)->frame_type = DUMP_FRAME_RXDONE;
637 rt2x00debug_dump_frame(rt2x00dev, skb); 615 rt2x00debug_dump_frame(rt2x00dev, entry->skb);
638 ieee80211_rx_irqsafe(rt2x00dev->hw, skb, rx_status); 616 ieee80211_rx_irqsafe(rt2x00dev->hw, entry->skb, rx_status);
617 entry->skb = NULL;
639} 618}
640EXPORT_SYMBOL_GPL(rt2x00lib_rxdone); 619EXPORT_SYMBOL_GPL(rt2x00lib_rxdone);
641 620
@@ -646,83 +625,69 @@ void rt2x00lib_write_tx_desc(struct rt2x00_dev *rt2x00dev,
646 struct sk_buff *skb, 625 struct sk_buff *skb,
647 struct ieee80211_tx_control *control) 626 struct ieee80211_tx_control *control)
648{ 627{
649 struct txdata_entry_desc desc; 628 struct txentry_desc txdesc;
650 struct skb_desc *skbdesc = get_skb_desc(skb); 629 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
651 struct ieee80211_hdr *ieee80211hdr = skbdesc->data; 630 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skbdesc->data;
631 const struct rt2x00_rate *rate;
652 int tx_rate; 632 int tx_rate;
653 int bitrate;
654 int length; 633 int length;
655 int duration; 634 int duration;
656 int residual; 635 int residual;
657 u16 frame_control; 636 u16 frame_control;
658 u16 seq_ctrl; 637 u16 seq_ctrl;
659 638
660 memset(&desc, 0, sizeof(desc)); 639 memset(&txdesc, 0, sizeof(txdesc));
661 640
662 desc.cw_min = skbdesc->ring->tx_params.cw_min; 641 txdesc.queue = skbdesc->entry->queue->qid;
663 desc.cw_max = skbdesc->ring->tx_params.cw_max; 642 txdesc.cw_min = skbdesc->entry->queue->cw_min;
664 desc.aifs = skbdesc->ring->tx_params.aifs; 643 txdesc.cw_max = skbdesc->entry->queue->cw_max;
665 644 txdesc.aifs = skbdesc->entry->queue->aifs;
666 /*
667 * Identify queue
668 */
669 if (control->queue < rt2x00dev->hw->queues)
670 desc.queue = control->queue;
671 else if (control->queue == IEEE80211_TX_QUEUE_BEACON ||
672 control->queue == IEEE80211_TX_QUEUE_AFTER_BEACON)
673 desc.queue = QUEUE_MGMT;
674 else
675 desc.queue = QUEUE_OTHER;
676 645
677 /* 646 /*
678 * Read required fields from ieee80211 header. 647 * Read required fields from ieee80211 header.
679 */ 648 */
680 frame_control = le16_to_cpu(ieee80211hdr->frame_control); 649 frame_control = le16_to_cpu(hdr->frame_control);
681 seq_ctrl = le16_to_cpu(ieee80211hdr->seq_ctrl); 650 seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
682 651
683 tx_rate = control->tx_rate; 652 tx_rate = control->tx_rate->hw_value;
684 653
685 /* 654 /*
686 * Check whether this frame is to be acked 655 * Check whether this frame is to be acked
687 */ 656 */
688 if (!(control->flags & IEEE80211_TXCTL_NO_ACK)) 657 if (!(control->flags & IEEE80211_TXCTL_NO_ACK))
689 __set_bit(ENTRY_TXD_ACK, &desc.flags); 658 __set_bit(ENTRY_TXD_ACK, &txdesc.flags);
690 659
691 /* 660 /*
692 * Check if this is a RTS/CTS frame 661 * Check if this is a RTS/CTS frame
693 */ 662 */
694 if (is_rts_frame(frame_control) || is_cts_frame(frame_control)) { 663 if (is_rts_frame(frame_control) || is_cts_frame(frame_control)) {
695 __set_bit(ENTRY_TXD_BURST, &desc.flags); 664 __set_bit(ENTRY_TXD_BURST, &txdesc.flags);
696 if (is_rts_frame(frame_control)) { 665 if (is_rts_frame(frame_control)) {
697 __set_bit(ENTRY_TXD_RTS_FRAME, &desc.flags); 666 __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc.flags);
698 __set_bit(ENTRY_TXD_ACK, &desc.flags); 667 __set_bit(ENTRY_TXD_ACK, &txdesc.flags);
699 } else 668 } else
700 __clear_bit(ENTRY_TXD_ACK, &desc.flags); 669 __clear_bit(ENTRY_TXD_ACK, &txdesc.flags);
701 if (control->rts_cts_rate) 670 if (control->rts_cts_rate)
702 tx_rate = control->rts_cts_rate; 671 tx_rate = control->rts_cts_rate->hw_value;
703 } 672 }
704 673
705 /* 674 rate = rt2x00_get_rate(tx_rate);
706 * Check for OFDM
707 */
708 if (DEVICE_GET_RATE_FIELD(tx_rate, RATEMASK) & DEV_OFDM_RATEMASK)
709 __set_bit(ENTRY_TXD_OFDM_RATE, &desc.flags);
710 675
711 /* 676 /*
712 * Check if more fragments are pending 677 * Check if more fragments are pending
713 */ 678 */
714 if (ieee80211_get_morefrag(ieee80211hdr)) { 679 if (ieee80211_get_morefrag(hdr)) {
715 __set_bit(ENTRY_TXD_BURST, &desc.flags); 680 __set_bit(ENTRY_TXD_BURST, &txdesc.flags);
716 __set_bit(ENTRY_TXD_MORE_FRAG, &desc.flags); 681 __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc.flags);
717 } 682 }
718 683
719 /* 684 /*
720 * Beacons and probe responses require the tsf timestamp 685 * Beacons and probe responses require the tsf timestamp
721 * to be inserted into the frame. 686 * to be inserted into the frame.
722 */ 687 */
723 if (control->queue == IEEE80211_TX_QUEUE_BEACON || 688 if (control->queue == RT2X00_BCN_QUEUE_BEACON ||
724 is_probe_resp(frame_control)) 689 is_probe_resp(frame_control))
725 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc.flags); 690 __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc.flags);
726 691
727 /* 692 /*
728 * Determine with what IFS priority this frame should be send. 693 * Determine with what IFS priority this frame should be send.
@@ -730,30 +695,30 @@ void rt2x00lib_write_tx_desc(struct rt2x00_dev *rt2x00dev,
730 * or this fragment came after RTS/CTS. 695 * or this fragment came after RTS/CTS.
731 */ 696 */
732 if ((seq_ctrl & IEEE80211_SCTL_FRAG) > 0 || 697 if ((seq_ctrl & IEEE80211_SCTL_FRAG) > 0 ||
733 test_bit(ENTRY_TXD_RTS_FRAME, &desc.flags)) 698 test_bit(ENTRY_TXD_RTS_FRAME, &txdesc.flags))
734 desc.ifs = IFS_SIFS; 699 txdesc.ifs = IFS_SIFS;
735 else 700 else
736 desc.ifs = IFS_BACKOFF; 701 txdesc.ifs = IFS_BACKOFF;
737 702
738 /* 703 /*
739 * PLCP setup 704 * PLCP setup
740 * Length calculation depends on OFDM/CCK rate. 705 * Length calculation depends on OFDM/CCK rate.
741 */ 706 */
742 desc.signal = DEVICE_GET_RATE_FIELD(tx_rate, PLCP); 707 txdesc.signal = rate->plcp;
743 desc.service = 0x04; 708 txdesc.service = 0x04;
744 709
745 length = skbdesc->data_len + FCS_LEN; 710 length = skbdesc->data_len + FCS_LEN;
746 if (test_bit(ENTRY_TXD_OFDM_RATE, &desc.flags)) { 711 if (rate->flags & DEV_RATE_OFDM) {
747 desc.length_high = (length >> 6) & 0x3f; 712 __set_bit(ENTRY_TXD_OFDM_RATE, &txdesc.flags);
748 desc.length_low = length & 0x3f;
749 } else {
750 bitrate = DEVICE_GET_RATE_FIELD(tx_rate, RATE);
751 713
714 txdesc.length_high = (length >> 6) & 0x3f;
715 txdesc.length_low = length & 0x3f;
716 } else {
752 /* 717 /*
753 * Convert length to microseconds. 718 * Convert length to microseconds.
754 */ 719 */
755 residual = get_duration_res(length, bitrate); 720 residual = get_duration_res(length, rate->bitrate);
756 duration = get_duration(length, bitrate); 721 duration = get_duration(length, rate->bitrate);
757 722
758 if (residual != 0) { 723 if (residual != 0) {
759 duration++; 724 duration++;
@@ -761,28 +726,27 @@ void rt2x00lib_write_tx_desc(struct rt2x00_dev *rt2x00dev,
761 /* 726 /*
762 * Check if we need to set the Length Extension 727 * Check if we need to set the Length Extension
763 */ 728 */
764 if (bitrate == 110 && residual <= 30) 729 if (rate->bitrate == 110 && residual <= 30)
765 desc.service |= 0x80; 730 txdesc.service |= 0x80;
766 } 731 }
767 732
768 desc.length_high = (duration >> 8) & 0xff; 733 txdesc.length_high = (duration >> 8) & 0xff;
769 desc.length_low = duration & 0xff; 734 txdesc.length_low = duration & 0xff;
770 735
771 /* 736 /*
772 * When preamble is enabled we should set the 737 * When preamble is enabled we should set the
773 * preamble bit for the signal. 738 * preamble bit for the signal.
774 */ 739 */
775 if (DEVICE_GET_RATE_FIELD(tx_rate, PREAMBLE)) 740 if (rt2x00_get_rate_preamble(tx_rate))
776 desc.signal |= 0x08; 741 txdesc.signal |= 0x08;
777 } 742 }
778 743
779 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, skb, &desc, control); 744 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, skb, &txdesc, control);
780 745
781 /* 746 /*
782 * Update ring entry. 747 * Update queue entry.
783 */ 748 */
784 skbdesc->entry->skb = skb; 749 skbdesc->entry->skb = skb;
785 memcpy(&skbdesc->entry->tx_status.control, control, sizeof(*control));
786 750
787 /* 751 /*
788 * The frame has been completely initialized and ready 752 * The frame has been completely initialized and ready
@@ -798,133 +762,167 @@ EXPORT_SYMBOL_GPL(rt2x00lib_write_tx_desc);
798/* 762/*
799 * Driver initialization handlers. 763 * Driver initialization handlers.
800 */ 764 */
765const struct rt2x00_rate rt2x00_supported_rates[12] = {
766 {
767 .flags = DEV_RATE_CCK | DEV_RATE_BASIC,
768 .bitrate = 10,
769 .ratemask = BIT(0),
770 .plcp = 0x00,
771 },
772 {
773 .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE | DEV_RATE_BASIC,
774 .bitrate = 20,
775 .ratemask = BIT(1),
776 .plcp = 0x01,
777 },
778 {
779 .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE | DEV_RATE_BASIC,
780 .bitrate = 55,
781 .ratemask = BIT(2),
782 .plcp = 0x02,
783 },
784 {
785 .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE | DEV_RATE_BASIC,
786 .bitrate = 110,
787 .ratemask = BIT(3),
788 .plcp = 0x03,
789 },
790 {
791 .flags = DEV_RATE_OFDM | DEV_RATE_BASIC,
792 .bitrate = 60,
793 .ratemask = BIT(4),
794 .plcp = 0x0b,
795 },
796 {
797 .flags = DEV_RATE_OFDM,
798 .bitrate = 90,
799 .ratemask = BIT(5),
800 .plcp = 0x0f,
801 },
802 {
803 .flags = DEV_RATE_OFDM | DEV_RATE_BASIC,
804 .bitrate = 120,
805 .ratemask = BIT(6),
806 .plcp = 0x0a,
807 },
808 {
809 .flags = DEV_RATE_OFDM,
810 .bitrate = 180,
811 .ratemask = BIT(7),
812 .plcp = 0x0e,
813 },
814 {
815 .flags = DEV_RATE_OFDM | DEV_RATE_BASIC,
816 .bitrate = 240,
817 .ratemask = BIT(8),
818 .plcp = 0x09,
819 },
820 {
821 .flags = DEV_RATE_OFDM,
822 .bitrate = 360,
823 .ratemask = BIT(9),
824 .plcp = 0x0d,
825 },
826 {
827 .flags = DEV_RATE_OFDM,
828 .bitrate = 480,
829 .ratemask = BIT(10),
830 .plcp = 0x08,
831 },
832 {
833 .flags = DEV_RATE_OFDM,
834 .bitrate = 540,
835 .ratemask = BIT(11),
836 .plcp = 0x0c,
837 },
838};
839
801static void rt2x00lib_channel(struct ieee80211_channel *entry, 840static void rt2x00lib_channel(struct ieee80211_channel *entry,
802 const int channel, const int tx_power, 841 const int channel, const int tx_power,
803 const int value) 842 const int value)
804{ 843{
805 entry->chan = channel; 844 entry->center_freq = ieee80211_channel_to_frequency(channel);
806 if (channel <= 14) 845 entry->hw_value = value;
807 entry->freq = 2407 + (5 * channel); 846 entry->max_power = tx_power;
808 else 847 entry->max_antenna_gain = 0xff;
809 entry->freq = 5000 + (5 * channel);
810 entry->val = value;
811 entry->flag =
812 IEEE80211_CHAN_W_IBSS |
813 IEEE80211_CHAN_W_ACTIVE_SCAN |
814 IEEE80211_CHAN_W_SCAN;
815 entry->power_level = tx_power;
816 entry->antenna_max = 0xff;
817} 848}
818 849
819static void rt2x00lib_rate(struct ieee80211_rate *entry, 850static void rt2x00lib_rate(struct ieee80211_rate *entry,
820 const int rate, const int mask, 851 const u16 index, const struct rt2x00_rate *rate)
821 const int plcp, const int flags)
822{ 852{
823 entry->rate = rate; 853 entry->flags = 0;
824 entry->val = 854 entry->bitrate = rate->bitrate;
825 DEVICE_SET_RATE_FIELD(rate, RATE) | 855 entry->hw_value = rt2x00_create_rate_hw_value(index, 0);
826 DEVICE_SET_RATE_FIELD(mask, RATEMASK) | 856 entry->hw_value_short = entry->hw_value;
827 DEVICE_SET_RATE_FIELD(plcp, PLCP); 857
828 entry->flags = flags; 858 if (rate->flags & DEV_RATE_SHORT_PREAMBLE) {
829 entry->val2 = entry->val; 859 entry->flags |= IEEE80211_RATE_SHORT_PREAMBLE;
830 if (entry->flags & IEEE80211_RATE_PREAMBLE2) 860 entry->hw_value_short |= rt2x00_create_rate_hw_value(index, 1);
831 entry->val2 |= DEVICE_SET_RATE_FIELD(1, PREAMBLE); 861 }
832 entry->min_rssi_ack = 0;
833 entry->min_rssi_ack_delta = 0;
834} 862}
835 863
836static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev, 864static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
837 struct hw_mode_spec *spec) 865 struct hw_mode_spec *spec)
838{ 866{
839 struct ieee80211_hw *hw = rt2x00dev->hw; 867 struct ieee80211_hw *hw = rt2x00dev->hw;
840 struct ieee80211_hw_mode *hwmodes;
841 struct ieee80211_channel *channels; 868 struct ieee80211_channel *channels;
842 struct ieee80211_rate *rates; 869 struct ieee80211_rate *rates;
870 unsigned int num_rates;
843 unsigned int i; 871 unsigned int i;
844 unsigned char tx_power; 872 unsigned char tx_power;
845 873
846 hwmodes = kzalloc(sizeof(*hwmodes) * spec->num_modes, GFP_KERNEL); 874 num_rates = 0;
847 if (!hwmodes) 875 if (spec->supported_rates & SUPPORT_RATE_CCK)
848 goto exit; 876 num_rates += 4;
877 if (spec->supported_rates & SUPPORT_RATE_OFDM)
878 num_rates += 8;
849 879
850 channels = kzalloc(sizeof(*channels) * spec->num_channels, GFP_KERNEL); 880 channels = kzalloc(sizeof(*channels) * spec->num_channels, GFP_KERNEL);
851 if (!channels) 881 if (!channels)
852 goto exit_free_modes; 882 return -ENOMEM;
853 883
854 rates = kzalloc(sizeof(*rates) * spec->num_rates, GFP_KERNEL); 884 rates = kzalloc(sizeof(*rates) * num_rates, GFP_KERNEL);
855 if (!rates) 885 if (!rates)
856 goto exit_free_channels; 886 goto exit_free_channels;
857 887
858 /* 888 /*
859 * Initialize Rate list. 889 * Initialize Rate list.
860 */ 890 */
861 rt2x00lib_rate(&rates[0], 10, DEV_RATEMASK_1MB, 891 for (i = 0; i < num_rates; i++)
862 0x00, IEEE80211_RATE_CCK); 892 rt2x00lib_rate(&rates[i], i, rt2x00_get_rate(i));
863 rt2x00lib_rate(&rates[1], 20, DEV_RATEMASK_2MB,
864 0x01, IEEE80211_RATE_CCK_2);
865 rt2x00lib_rate(&rates[2], 55, DEV_RATEMASK_5_5MB,
866 0x02, IEEE80211_RATE_CCK_2);
867 rt2x00lib_rate(&rates[3], 110, DEV_RATEMASK_11MB,
868 0x03, IEEE80211_RATE_CCK_2);
869
870 if (spec->num_rates > 4) {
871 rt2x00lib_rate(&rates[4], 60, DEV_RATEMASK_6MB,
872 0x0b, IEEE80211_RATE_OFDM);
873 rt2x00lib_rate(&rates[5], 90, DEV_RATEMASK_9MB,
874 0x0f, IEEE80211_RATE_OFDM);
875 rt2x00lib_rate(&rates[6], 120, DEV_RATEMASK_12MB,
876 0x0a, IEEE80211_RATE_OFDM);
877 rt2x00lib_rate(&rates[7], 180, DEV_RATEMASK_18MB,
878 0x0e, IEEE80211_RATE_OFDM);
879 rt2x00lib_rate(&rates[8], 240, DEV_RATEMASK_24MB,
880 0x09, IEEE80211_RATE_OFDM);
881 rt2x00lib_rate(&rates[9], 360, DEV_RATEMASK_36MB,
882 0x0d, IEEE80211_RATE_OFDM);
883 rt2x00lib_rate(&rates[10], 480, DEV_RATEMASK_48MB,
884 0x08, IEEE80211_RATE_OFDM);
885 rt2x00lib_rate(&rates[11], 540, DEV_RATEMASK_54MB,
886 0x0c, IEEE80211_RATE_OFDM);
887 }
888 893
889 /* 894 /*
890 * Initialize Channel list. 895 * Initialize Channel list.
891 */ 896 */
892 for (i = 0; i < spec->num_channels; i++) { 897 for (i = 0; i < spec->num_channels; i++) {
893 if (spec->channels[i].channel <= 14) 898 if (spec->channels[i].channel <= 14) {
894 tx_power = spec->tx_power_bg[i]; 899 if (spec->tx_power_bg)
895 else if (spec->tx_power_a) 900 tx_power = spec->tx_power_bg[i];
896 tx_power = spec->tx_power_a[i]; 901 else
897 else 902 tx_power = spec->tx_power_default;
898 tx_power = spec->tx_power_default; 903 } else {
904 if (spec->tx_power_a)
905 tx_power = spec->tx_power_a[i];
906 else
907 tx_power = spec->tx_power_default;
908 }
899 909
900 rt2x00lib_channel(&channels[i], 910 rt2x00lib_channel(&channels[i],
901 spec->channels[i].channel, tx_power, i); 911 spec->channels[i].channel, tx_power, i);
902 } 912 }
903 913
904 /* 914 /*
905 * Intitialize 802.11b 915 * Intitialize 802.11b, 802.11g
906 * Rates: CCK.
907 * Channels: OFDM.
908 */
909 if (spec->num_modes > HWMODE_B) {
910 hwmodes[HWMODE_B].mode = MODE_IEEE80211B;
911 hwmodes[HWMODE_B].num_channels = 14;
912 hwmodes[HWMODE_B].num_rates = 4;
913 hwmodes[HWMODE_B].channels = channels;
914 hwmodes[HWMODE_B].rates = rates;
915 }
916
917 /*
918 * Intitialize 802.11g
919 * Rates: CCK, OFDM. 916 * Rates: CCK, OFDM.
920 * Channels: OFDM. 917 * Channels: 2.4 GHz
921 */ 918 */
922 if (spec->num_modes > HWMODE_G) { 919 if (spec->supported_bands & SUPPORT_BAND_2GHZ) {
923 hwmodes[HWMODE_G].mode = MODE_IEEE80211G; 920 rt2x00dev->bands[IEEE80211_BAND_2GHZ].n_channels = 14;
924 hwmodes[HWMODE_G].num_channels = 14; 921 rt2x00dev->bands[IEEE80211_BAND_2GHZ].n_bitrates = num_rates;
925 hwmodes[HWMODE_G].num_rates = spec->num_rates; 922 rt2x00dev->bands[IEEE80211_BAND_2GHZ].channels = channels;
926 hwmodes[HWMODE_G].channels = channels; 923 rt2x00dev->bands[IEEE80211_BAND_2GHZ].bitrates = rates;
927 hwmodes[HWMODE_G].rates = rates; 924 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
925 &rt2x00dev->bands[IEEE80211_BAND_2GHZ];
928 } 926 }
929 927
930 /* 928 /*
@@ -932,40 +930,21 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
932 * Rates: OFDM. 930 * Rates: OFDM.
933 * Channels: OFDM, UNII, HiperLAN2. 931 * Channels: OFDM, UNII, HiperLAN2.
934 */ 932 */
935 if (spec->num_modes > HWMODE_A) { 933 if (spec->supported_bands & SUPPORT_BAND_5GHZ) {
936 hwmodes[HWMODE_A].mode = MODE_IEEE80211A; 934 rt2x00dev->bands[IEEE80211_BAND_5GHZ].n_channels =
937 hwmodes[HWMODE_A].num_channels = spec->num_channels - 14; 935 spec->num_channels - 14;
938 hwmodes[HWMODE_A].num_rates = spec->num_rates - 4; 936 rt2x00dev->bands[IEEE80211_BAND_5GHZ].n_bitrates =
939 hwmodes[HWMODE_A].channels = &channels[14]; 937 num_rates - 4;
940 hwmodes[HWMODE_A].rates = &rates[4]; 938 rt2x00dev->bands[IEEE80211_BAND_5GHZ].channels = &channels[14];
939 rt2x00dev->bands[IEEE80211_BAND_5GHZ].bitrates = &rates[4];
940 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
941 &rt2x00dev->bands[IEEE80211_BAND_5GHZ];
941 } 942 }
942 943
943 if (spec->num_modes > HWMODE_G &&
944 ieee80211_register_hwmode(hw, &hwmodes[HWMODE_G]))
945 goto exit_free_rates;
946
947 if (spec->num_modes > HWMODE_B &&
948 ieee80211_register_hwmode(hw, &hwmodes[HWMODE_B]))
949 goto exit_free_rates;
950
951 if (spec->num_modes > HWMODE_A &&
952 ieee80211_register_hwmode(hw, &hwmodes[HWMODE_A]))
953 goto exit_free_rates;
954
955 rt2x00dev->hwmodes = hwmodes;
956
957 return 0; 944 return 0;
958 945
959exit_free_rates: 946 exit_free_channels:
960 kfree(rates);
961
962exit_free_channels:
963 kfree(channels); 947 kfree(channels);
964
965exit_free_modes:
966 kfree(hwmodes);
967
968exit:
969 ERROR(rt2x00dev, "Allocation ieee80211 modes failed.\n"); 948 ERROR(rt2x00dev, "Allocation ieee80211 modes failed.\n");
970 return -ENOMEM; 949 return -ENOMEM;
971} 950}
@@ -975,11 +954,11 @@ static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev)
975 if (test_bit(DEVICE_REGISTERED_HW, &rt2x00dev->flags)) 954 if (test_bit(DEVICE_REGISTERED_HW, &rt2x00dev->flags))
976 ieee80211_unregister_hw(rt2x00dev->hw); 955 ieee80211_unregister_hw(rt2x00dev->hw);
977 956
978 if (likely(rt2x00dev->hwmodes)) { 957 if (likely(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ])) {
979 kfree(rt2x00dev->hwmodes->channels); 958 kfree(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
980 kfree(rt2x00dev->hwmodes->rates); 959 kfree(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->bitrates);
981 kfree(rt2x00dev->hwmodes); 960 rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
982 rt2x00dev->hwmodes = NULL; 961 rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
983 } 962 }
984} 963}
985 964
@@ -1012,86 +991,6 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
1012/* 991/*
1013 * Initialization/uninitialization handlers. 992 * Initialization/uninitialization handlers.
1014 */ 993 */
1015static int rt2x00lib_alloc_entries(struct data_ring *ring,
1016 const u16 max_entries, const u16 data_size,
1017 const u16 desc_size)
1018{
1019 struct data_entry *entry;
1020 unsigned int i;
1021
1022 ring->stats.limit = max_entries;
1023 ring->data_size = data_size;
1024 ring->desc_size = desc_size;
1025
1026 /*
1027 * Allocate all ring entries.
1028 */
1029 entry = kzalloc(ring->stats.limit * sizeof(*entry), GFP_KERNEL);
1030 if (!entry)
1031 return -ENOMEM;
1032
1033 for (i = 0; i < ring->stats.limit; i++) {
1034 entry[i].flags = 0;
1035 entry[i].ring = ring;
1036 entry[i].skb = NULL;
1037 entry[i].entry_idx = i;
1038 }
1039
1040 ring->entry = entry;
1041
1042 return 0;
1043}
1044
1045static int rt2x00lib_alloc_ring_entries(struct rt2x00_dev *rt2x00dev)
1046{
1047 struct data_ring *ring;
1048
1049 /*
1050 * Allocate the RX ring.
1051 */
1052 if (rt2x00lib_alloc_entries(rt2x00dev->rx, RX_ENTRIES, DATA_FRAME_SIZE,
1053 rt2x00dev->ops->rxd_size))
1054 return -ENOMEM;
1055
1056 /*
1057 * First allocate the TX rings.
1058 */
1059 txring_for_each(rt2x00dev, ring) {
1060 if (rt2x00lib_alloc_entries(ring, TX_ENTRIES, DATA_FRAME_SIZE,
1061 rt2x00dev->ops->txd_size))
1062 return -ENOMEM;
1063 }
1064
1065 if (!test_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags))
1066 return 0;
1067
1068 /*
1069 * Allocate the BEACON ring.
1070 */
1071 if (rt2x00lib_alloc_entries(&rt2x00dev->bcn[0], BEACON_ENTRIES,
1072 MGMT_FRAME_SIZE, rt2x00dev->ops->txd_size))
1073 return -ENOMEM;
1074
1075 /*
1076 * Allocate the Atim ring.
1077 */
1078 if (rt2x00lib_alloc_entries(&rt2x00dev->bcn[1], ATIM_ENTRIES,
1079 DATA_FRAME_SIZE, rt2x00dev->ops->txd_size))
1080 return -ENOMEM;
1081
1082 return 0;
1083}
1084
1085static void rt2x00lib_free_ring_entries(struct rt2x00_dev *rt2x00dev)
1086{
1087 struct data_ring *ring;
1088
1089 ring_for_each(rt2x00dev, ring) {
1090 kfree(ring->entry);
1091 ring->entry = NULL;
1092 }
1093}
1094
1095static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev) 994static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
1096{ 995{
1097 if (!__test_and_clear_bit(DEVICE_INITIALIZED, &rt2x00dev->flags)) 996 if (!__test_and_clear_bit(DEVICE_INITIALIZED, &rt2x00dev->flags))
@@ -1108,9 +1007,9 @@ static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
1108 rt2x00dev->ops->lib->uninitialize(rt2x00dev); 1007 rt2x00dev->ops->lib->uninitialize(rt2x00dev);
1109 1008
1110 /* 1009 /*
1111 * Free allocated ring entries. 1010 * Free allocated queue entries.
1112 */ 1011 */
1113 rt2x00lib_free_ring_entries(rt2x00dev); 1012 rt2x00queue_uninitialize(rt2x00dev);
1114} 1013}
1115 1014
1116static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev) 1015static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
@@ -1121,13 +1020,11 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
1121 return 0; 1020 return 0;
1122 1021
1123 /* 1022 /*
1124 * Allocate all ring entries. 1023 * Allocate all queue entries.
1125 */ 1024 */
1126 status = rt2x00lib_alloc_ring_entries(rt2x00dev); 1025 status = rt2x00queue_initialize(rt2x00dev);
1127 if (status) { 1026 if (status)
1128 ERROR(rt2x00dev, "Ring entries allocation failed.\n");
1129 return status; 1027 return status;
1130 }
1131 1028
1132 /* 1029 /*
1133 * Initialize the device. 1030 * Initialize the device.
@@ -1146,7 +1043,7 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
1146 return 0; 1043 return 0;
1147 1044
1148exit: 1045exit:
1149 rt2x00lib_free_ring_entries(rt2x00dev); 1046 rt2x00lib_uninitialize(rt2x00dev);
1150 1047
1151 return status; 1048 return status;
1152} 1049}
@@ -1162,11 +1059,9 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1162 * If this is the first interface which is added, 1059 * If this is the first interface which is added,
1163 * we should load the firmware now. 1060 * we should load the firmware now.
1164 */ 1061 */
1165 if (test_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags)) { 1062 retval = rt2x00lib_load_firmware(rt2x00dev);
1166 retval = rt2x00lib_load_firmware(rt2x00dev); 1063 if (retval)
1167 if (retval) 1064 return retval;
1168 return retval;
1169 }
1170 1065
1171 /* 1066 /*
1172 * Initialize the device. 1067 * Initialize the device.
@@ -1184,6 +1079,10 @@ int rt2x00lib_start(struct rt2x00_dev *rt2x00dev)
1184 return retval; 1079 return retval;
1185 } 1080 }
1186 1081
1082 rt2x00dev->intf_ap_count = 0;
1083 rt2x00dev->intf_sta_count = 0;
1084 rt2x00dev->intf_associated = 0;
1085
1187 __set_bit(DEVICE_STARTED, &rt2x00dev->flags); 1086 __set_bit(DEVICE_STARTED, &rt2x00dev->flags);
1188 1087
1189 return 0; 1088 return 0;
@@ -1200,74 +1099,25 @@ void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev)
1200 */ 1099 */
1201 rt2x00lib_disable_radio(rt2x00dev); 1100 rt2x00lib_disable_radio(rt2x00dev);
1202 1101
1102 rt2x00dev->intf_ap_count = 0;
1103 rt2x00dev->intf_sta_count = 0;
1104 rt2x00dev->intf_associated = 0;
1105
1203 __clear_bit(DEVICE_STARTED, &rt2x00dev->flags); 1106 __clear_bit(DEVICE_STARTED, &rt2x00dev->flags);
1204} 1107}
1205 1108
1206/* 1109/*
1207 * driver allocation handlers. 1110 * driver allocation handlers.
1208 */ 1111 */
1209static int rt2x00lib_alloc_rings(struct rt2x00_dev *rt2x00dev) 1112int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1210{ 1113{
1211 struct data_ring *ring; 1114 int retval = -ENOMEM;
1212 unsigned int index;
1213
1214 /*
1215 * We need the following rings:
1216 * RX: 1
1217 * TX: hw->queues
1218 * Beacon: 1 (if required)
1219 * Atim: 1 (if required)
1220 */
1221 rt2x00dev->data_rings = 1 + rt2x00dev->hw->queues +
1222 (2 * test_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags));
1223
1224 ring = kzalloc(rt2x00dev->data_rings * sizeof(*ring), GFP_KERNEL);
1225 if (!ring) {
1226 ERROR(rt2x00dev, "Ring allocation failed.\n");
1227 return -ENOMEM;
1228 }
1229
1230 /*
1231 * Initialize pointers
1232 */
1233 rt2x00dev->rx = ring;
1234 rt2x00dev->tx = &rt2x00dev->rx[1];
1235 if (test_bit(DRIVER_REQUIRE_BEACON_RING, &rt2x00dev->flags))
1236 rt2x00dev->bcn = &rt2x00dev->tx[rt2x00dev->hw->queues];
1237 1115
1238 /* 1116 /*
1239 * Initialize ring parameters. 1117 * Make room for rt2x00_intf inside the per-interface
1240 * RX: queue_idx = 0 1118 * structure ieee80211_vif.
1241 * TX: queue_idx = IEEE80211_TX_QUEUE_DATA0 + index
1242 * TX: cw_min: 2^5 = 32.
1243 * TX: cw_max: 2^10 = 1024.
1244 */ 1119 */
1245 rt2x00dev->rx->rt2x00dev = rt2x00dev; 1120 rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf);
1246 rt2x00dev->rx->queue_idx = 0;
1247
1248 index = IEEE80211_TX_QUEUE_DATA0;
1249 txring_for_each(rt2x00dev, ring) {
1250 ring->rt2x00dev = rt2x00dev;
1251 ring->queue_idx = index++;
1252 ring->tx_params.aifs = 2;
1253 ring->tx_params.cw_min = 5;
1254 ring->tx_params.cw_max = 10;
1255 }
1256
1257 return 0;
1258}
1259
1260static void rt2x00lib_free_rings(struct rt2x00_dev *rt2x00dev)
1261{
1262 kfree(rt2x00dev->rx);
1263 rt2x00dev->rx = NULL;
1264 rt2x00dev->tx = NULL;
1265 rt2x00dev->bcn = NULL;
1266}
1267
1268int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1269{
1270 int retval = -ENOMEM;
1271 1121
1272 /* 1122 /*
1273 * Let the driver probe the device to detect the capabilities. 1123 * Let the driver probe the device to detect the capabilities.
@@ -1281,20 +1131,14 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1281 /* 1131 /*
1282 * Initialize configuration work. 1132 * Initialize configuration work.
1283 */ 1133 */
1284 INIT_WORK(&rt2x00dev->beacon_work, rt2x00lib_beacondone_scheduled); 1134 INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled);
1285 INIT_WORK(&rt2x00dev->filter_work, rt2x00lib_packetfilter_scheduled); 1135 INIT_WORK(&rt2x00dev->filter_work, rt2x00lib_packetfilter_scheduled);
1286 INIT_WORK(&rt2x00dev->config_work, rt2x00lib_configuration_scheduled);
1287 INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00lib_link_tuner); 1136 INIT_DELAYED_WORK(&rt2x00dev->link.work, rt2x00lib_link_tuner);
1288 1137
1289 /* 1138 /*
1290 * Reset current working type. 1139 * Allocate queue array.
1291 */
1292 rt2x00dev->interface.type = IEEE80211_IF_TYPE_INVALID;
1293
1294 /*
1295 * Allocate ring array.
1296 */ 1140 */
1297 retval = rt2x00lib_alloc_rings(rt2x00dev); 1141 retval = rt2x00queue_allocate(rt2x00dev);
1298 if (retval) 1142 if (retval)
1299 goto exit; 1143 goto exit;
1300 1144
@@ -1310,6 +1154,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1310 /* 1154 /*
1311 * Register extra components. 1155 * Register extra components.
1312 */ 1156 */
1157 rt2x00leds_register(rt2x00dev);
1313 rt2x00rfkill_allocate(rt2x00dev); 1158 rt2x00rfkill_allocate(rt2x00dev);
1314 rt2x00debug_register(rt2x00dev); 1159 rt2x00debug_register(rt2x00dev);
1315 1160
@@ -1343,6 +1188,7 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1343 */ 1188 */
1344 rt2x00debug_deregister(rt2x00dev); 1189 rt2x00debug_deregister(rt2x00dev);
1345 rt2x00rfkill_free(rt2x00dev); 1190 rt2x00rfkill_free(rt2x00dev);
1191 rt2x00leds_unregister(rt2x00dev);
1346 1192
1347 /* 1193 /*
1348 * Free ieee80211_hw memory. 1194 * Free ieee80211_hw memory.
@@ -1355,9 +1201,9 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1355 rt2x00lib_free_firmware(rt2x00dev); 1201 rt2x00lib_free_firmware(rt2x00dev);
1356 1202
1357 /* 1203 /*
1358 * Free ring structures. 1204 * Free queue structures.
1359 */ 1205 */
1360 rt2x00lib_free_rings(rt2x00dev); 1206 rt2x00queue_free(rt2x00dev);
1361} 1207}
1362EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev); 1208EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev);
1363 1209
@@ -1388,6 +1234,7 @@ int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev, pm_message_t state)
1388 /* 1234 /*
1389 * Suspend/disable extra components. 1235 * Suspend/disable extra components.
1390 */ 1236 */
1237 rt2x00leds_suspend(rt2x00dev);
1391 rt2x00rfkill_suspend(rt2x00dev); 1238 rt2x00rfkill_suspend(rt2x00dev);
1392 rt2x00debug_deregister(rt2x00dev); 1239 rt2x00debug_deregister(rt2x00dev);
1393 1240
@@ -1412,9 +1259,30 @@ exit:
1412} 1259}
1413EXPORT_SYMBOL_GPL(rt2x00lib_suspend); 1260EXPORT_SYMBOL_GPL(rt2x00lib_suspend);
1414 1261
1262static void rt2x00lib_resume_intf(void *data, u8 *mac,
1263 struct ieee80211_vif *vif)
1264{
1265 struct rt2x00_dev *rt2x00dev = data;
1266 struct rt2x00_intf *intf = vif_to_intf(vif);
1267
1268 spin_lock(&intf->lock);
1269
1270 rt2x00lib_config_intf(rt2x00dev, intf,
1271 vif->type, intf->mac, intf->bssid);
1272
1273
1274 /*
1275 * Master or Ad-hoc mode require a new beacon update.
1276 */
1277 if (vif->type == IEEE80211_IF_TYPE_AP ||
1278 vif->type == IEEE80211_IF_TYPE_IBSS)
1279 intf->delayed_flags |= DELAYED_UPDATE_BEACON;
1280
1281 spin_unlock(&intf->lock);
1282}
1283
1415int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev) 1284int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1416{ 1285{
1417 struct interface *intf = &rt2x00dev->interface;
1418 int retval; 1286 int retval;
1419 1287
1420 NOTICE(rt2x00dev, "Waking up.\n"); 1288 NOTICE(rt2x00dev, "Waking up.\n");
@@ -1424,6 +1292,7 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1424 */ 1292 */
1425 rt2x00debug_register(rt2x00dev); 1293 rt2x00debug_register(rt2x00dev);
1426 rt2x00rfkill_resume(rt2x00dev); 1294 rt2x00rfkill_resume(rt2x00dev);
1295 rt2x00leds_resume(rt2x00dev);
1427 1296
1428 /* 1297 /*
1429 * Only continue if mac80211 had open interfaces. 1298 * Only continue if mac80211 had open interfaces.
@@ -1445,9 +1314,12 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1445 if (!rt2x00dev->hw->conf.radio_enabled) 1314 if (!rt2x00dev->hw->conf.radio_enabled)
1446 rt2x00lib_disable_radio(rt2x00dev); 1315 rt2x00lib_disable_radio(rt2x00dev);
1447 1316
1448 rt2x00lib_config_mac_addr(rt2x00dev, intf->mac); 1317 /*
1449 rt2x00lib_config_bssid(rt2x00dev, intf->bssid); 1318 * Iterator over each active interface to
1450 rt2x00lib_config_type(rt2x00dev, intf->type); 1319 * reconfigure the hardware.
1320 */
1321 ieee80211_iterate_active_interfaces(rt2x00dev->hw,
1322 rt2x00lib_resume_intf, rt2x00dev);
1451 1323
1452 /* 1324 /*
1453 * We are ready again to receive requests from mac80211. 1325 * We are ready again to receive requests from mac80211.
@@ -1463,12 +1335,11 @@ int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev)
1463 ieee80211_start_queues(rt2x00dev->hw); 1335 ieee80211_start_queues(rt2x00dev->hw);
1464 1336
1465 /* 1337 /*
1466 * When in Master or Ad-hoc mode, 1338 * During interface iteration we might have changed the
1467 * restart Beacon transmitting by faking a beacondone event. 1339 * delayed_flags, time to handles the event by calling
1340 * the work handler directly.
1468 */ 1341 */
1469 if (intf->type == IEEE80211_IF_TYPE_AP || 1342 rt2x00lib_intf_scheduled(&rt2x00dev->intf_work);
1470 intf->type == IEEE80211_IF_TYPE_IBSS)
1471 rt2x00lib_beacondone(rt2x00dev);
1472 1343
1473 return 0; 1344 return 0;
1474 1345
diff --git a/drivers/net/wireless/rt2x00/rt2x00dump.h b/drivers/net/wireless/rt2x00/rt2x00dump.h
index 99f3f367adce..7169c222a486 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dump.h
+++ b/drivers/net/wireless/rt2x00/rt2x00dump.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -93,8 +93,8 @@ enum rt2x00_dump_type {
93 * @chip_rf: RF chipset 93 * @chip_rf: RF chipset
94 * @chip_rev: Chipset revision 94 * @chip_rev: Chipset revision
95 * @type: The frame type (&rt2x00_dump_type) 95 * @type: The frame type (&rt2x00_dump_type)
96 * @ring_index: The index number of the data ring. 96 * @queue_index: The index number of the data queue.
97 * @entry_index: The index number of the entry inside the data ring. 97 * @entry_index: The index number of the entry inside the data queue.
98 * @timestamp_sec: Timestamp - seconds 98 * @timestamp_sec: Timestamp - seconds
99 * @timestamp_usec: Timestamp - microseconds 99 * @timestamp_usec: Timestamp - microseconds
100 */ 100 */
@@ -111,7 +111,7 @@ struct rt2x00dump_hdr {
111 __le32 chip_rev; 111 __le32 chip_rev;
112 112
113 __le16 type; 113 __le16 type;
114 __u8 ring_index; 114 __u8 queue_index;
115 __u8 entry_index; 115 __u8 entry_index;
116 116
117 __le32 timestamp_sec; 117 __le32 timestamp_sec;
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index 0a475e4e2442..b971bc6e7ee2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -23,7 +23,6 @@
23 Abstract: rt2x00 firmware loading routines. 23 Abstract: rt2x00 firmware loading routines.
24 */ 24 */
25 25
26#include <linux/crc-itu-t.h>
27#include <linux/kernel.h> 26#include <linux/kernel.h>
28#include <linux/module.h> 27#include <linux/module.h>
29 28
@@ -37,7 +36,6 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev)
37 char *fw_name; 36 char *fw_name;
38 int retval; 37 int retval;
39 u16 crc; 38 u16 crc;
40 u16 tmp;
41 39
42 /* 40 /*
43 * Read correct firmware from harddisk. 41 * Read correct firmware from harddisk.
@@ -63,18 +61,9 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev)
63 return -ENOENT; 61 return -ENOENT;
64 } 62 }
65 63
66 /* 64 crc = rt2x00dev->ops->lib->get_firmware_crc(fw->data, fw->size);
67 * Validate the firmware using 16 bit CRC.
68 * The last 2 bytes of the firmware are the CRC
69 * so substract those 2 bytes from the CRC checksum,
70 * and set those 2 bytes to 0 when calculating CRC.
71 */
72 tmp = 0;
73 crc = crc_itu_t(0, fw->data, fw->size - 2);
74 crc = crc_itu_t(crc, (u8 *)&tmp, 2);
75
76 if (crc != (fw->data[fw->size - 2] << 8 | fw->data[fw->size - 1])) { 65 if (crc != (fw->data[fw->size - 2] << 8 | fw->data[fw->size - 1])) {
77 ERROR(rt2x00dev, "Firmware CRC error.\n"); 66 ERROR(rt2x00dev, "Firmware checksum error.\n");
78 retval = -ENOENT; 67 retval = -ENOENT;
79 goto exit; 68 goto exit;
80 } 69 }
@@ -96,6 +85,9 @@ int rt2x00lib_load_firmware(struct rt2x00_dev *rt2x00dev)
96{ 85{
97 int retval; 86 int retval;
98 87
88 if (!test_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags))
89 return 0;
90
99 if (!rt2x00dev->fw) { 91 if (!rt2x00dev->fw) {
100 retval = rt2x00lib_request_firmware(rt2x00dev); 92 retval = rt2x00lib_request_firmware(rt2x00dev);
101 if (retval) 93 if (retval)
@@ -116,4 +108,3 @@ void rt2x00lib_free_firmware(struct rt2x00_dev *rt2x00dev)
116 release_firmware(rt2x00dev->fw); 108 release_firmware(rt2x00dev->fw);
117 rt2x00dev->fw = NULL; 109 rt2x00dev->fw = NULL;
118} 110}
119
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.c b/drivers/net/wireless/rt2x00/rt2x00leds.c
new file mode 100644
index 000000000000..40c1f5c1b805
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.c
@@ -0,0 +1,219 @@
1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00lib
23 Abstract: rt2x00 led specific routines.
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28
29#include "rt2x00.h"
30#include "rt2x00lib.h"
31
32void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi)
33{
34 struct rt2x00_led *led = &rt2x00dev->led_qual;
35 unsigned int brightness;
36
37 if ((led->type != LED_TYPE_QUALITY) || !(led->flags & LED_REGISTERED))
38 return;
39
40 /*
41 * Led handling requires a positive value for the rssi,
42 * to do that correctly we need to add the correction.
43 */
44 rssi += rt2x00dev->rssi_offset;
45
46 /*
47 * Get the rssi level, this is used to convert the rssi
48 * to a LED value inside the range LED_OFF - LED_FULL.
49 */
50 if (rssi <= 30)
51 rssi = 0;
52 else if (rssi <= 39)
53 rssi = 1;
54 else if (rssi <= 49)
55 rssi = 2;
56 else if (rssi <= 53)
57 rssi = 3;
58 else if (rssi <= 63)
59 rssi = 4;
60 else
61 rssi = 5;
62
63 /*
64 * Note that we must _not_ send LED_OFF since the driver
65 * is going to calculate the value and might use it in a
66 * division.
67 */
68 brightness = ((LED_FULL / 6) * rssi) + 1;
69 if (brightness != led->led_dev.brightness) {
70 led->led_dev.brightness_set(&led->led_dev, brightness);
71 led->led_dev.brightness = brightness;
72 }
73}
74
75void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled)
76{
77 struct rt2x00_led *led = &rt2x00dev->led_assoc;
78 unsigned int brightness;
79
80 if ((led->type != LED_TYPE_ASSOC) || !(led->flags & LED_REGISTERED))
81 return;
82
83 brightness = enabled ? LED_FULL : LED_OFF;
84 if (brightness != led->led_dev.brightness) {
85 led->led_dev.brightness_set(&led->led_dev, brightness);
86 led->led_dev.brightness = brightness;
87 }
88}
89
90void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, bool enabled)
91{
92 struct rt2x00_led *led = &rt2x00dev->led_radio;
93 unsigned int brightness;
94
95 if ((led->type != LED_TYPE_RADIO) || !(led->flags & LED_REGISTERED))
96 return;
97
98 brightness = enabled ? LED_FULL : LED_OFF;
99 if (brightness != led->led_dev.brightness) {
100 led->led_dev.brightness_set(&led->led_dev, brightness);
101 led->led_dev.brightness = brightness;
102 }
103}
104
105static int rt2x00leds_register_led(struct rt2x00_dev *rt2x00dev,
106 struct rt2x00_led *led,
107 const char *name)
108{
109 struct device *device = wiphy_dev(rt2x00dev->hw->wiphy);
110 int retval;
111
112 led->led_dev.name = name;
113
114 retval = led_classdev_register(device, &led->led_dev);
115 if (retval) {
116 ERROR(rt2x00dev, "Failed to register led handler.\n");
117 return retval;
118 }
119
120 led->flags |= LED_REGISTERED;
121
122 return 0;
123}
124
125void rt2x00leds_register(struct rt2x00_dev *rt2x00dev)
126{
127 char dev_name[16];
128 char name[32];
129 int retval;
130 unsigned long on_period;
131 unsigned long off_period;
132
133 snprintf(dev_name, sizeof(dev_name), "%s-%s",
134 rt2x00dev->ops->name, wiphy_name(rt2x00dev->hw->wiphy));
135
136 if (rt2x00dev->led_radio.flags & LED_INITIALIZED) {
137 snprintf(name, sizeof(name), "%s:radio", dev_name);
138
139 retval = rt2x00leds_register_led(rt2x00dev,
140 &rt2x00dev->led_radio,
141 name);
142 if (retval)
143 goto exit_fail;
144 }
145
146 if (rt2x00dev->led_assoc.flags & LED_INITIALIZED) {
147 snprintf(name, sizeof(name), "%s:assoc", dev_name);
148
149 retval = rt2x00leds_register_led(rt2x00dev,
150 &rt2x00dev->led_assoc,
151 name);
152 if (retval)
153 goto exit_fail;
154 }
155
156 if (rt2x00dev->led_qual.flags & LED_INITIALIZED) {
157 snprintf(name, sizeof(name), "%s:quality", dev_name);
158
159 retval = rt2x00leds_register_led(rt2x00dev,
160 &rt2x00dev->led_qual,
161 name);
162 if (retval)
163 goto exit_fail;
164 }
165
166 /*
167 * Initialize blink time to default value:
168 * On period: 70ms
169 * Off period: 30ms
170 */
171 if (rt2x00dev->led_radio.led_dev.blink_set) {
172 on_period = 70;
173 off_period = 30;
174 rt2x00dev->led_radio.led_dev.blink_set(
175 &rt2x00dev->led_radio.led_dev, &on_period, &off_period);
176 }
177
178 return;
179
180exit_fail:
181 rt2x00leds_unregister(rt2x00dev);
182}
183
184static void rt2x00leds_unregister_led(struct rt2x00_led *led)
185{
186 led_classdev_unregister(&led->led_dev);
187 led->led_dev.brightness_set(&led->led_dev, LED_OFF);
188 led->flags &= ~LED_REGISTERED;
189}
190
191void rt2x00leds_unregister(struct rt2x00_dev *rt2x00dev)
192{
193 if (rt2x00dev->led_qual.flags & LED_REGISTERED)
194 rt2x00leds_unregister_led(&rt2x00dev->led_qual);
195 if (rt2x00dev->led_assoc.flags & LED_REGISTERED)
196 rt2x00leds_unregister_led(&rt2x00dev->led_assoc);
197 if (rt2x00dev->led_radio.flags & LED_REGISTERED)
198 rt2x00leds_unregister_led(&rt2x00dev->led_radio);
199}
200
201void rt2x00leds_suspend(struct rt2x00_dev *rt2x00dev)
202{
203 if (rt2x00dev->led_qual.flags & LED_REGISTERED)
204 led_classdev_suspend(&rt2x00dev->led_qual.led_dev);
205 if (rt2x00dev->led_assoc.flags & LED_REGISTERED)
206 led_classdev_suspend(&rt2x00dev->led_assoc.led_dev);
207 if (rt2x00dev->led_radio.flags & LED_REGISTERED)
208 led_classdev_suspend(&rt2x00dev->led_radio.led_dev);
209}
210
211void rt2x00leds_resume(struct rt2x00_dev *rt2x00dev)
212{
213 if (rt2x00dev->led_radio.flags & LED_REGISTERED)
214 led_classdev_resume(&rt2x00dev->led_radio.led_dev);
215 if (rt2x00dev->led_assoc.flags & LED_REGISTERED)
216 led_classdev_resume(&rt2x00dev->led_assoc.led_dev);
217 if (rt2x00dev->led_qual.flags & LED_REGISTERED)
218 led_classdev_resume(&rt2x00dev->led_qual.led_dev);
219}
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.h b/drivers/net/wireless/rt2x00/rt2x00leds.h
new file mode 100644
index 000000000000..9df4a49bdcad
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.h
@@ -0,0 +1,50 @@
1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00lib
23 Abstract: rt2x00 led datastructures and routines
24 */
25
26#ifndef RT2X00LEDS_H
27#define RT2X00LEDS_H
28
29enum led_type {
30 LED_TYPE_RADIO,
31 LED_TYPE_ASSOC,
32 LED_TYPE_ACTIVITY,
33 LED_TYPE_QUALITY,
34};
35
36#ifdef CONFIG_RT2X00_LIB_LEDS
37
38struct rt2x00_led {
39 struct rt2x00_dev *rt2x00dev;
40 struct led_classdev led_dev;
41
42 enum led_type type;
43 unsigned int flags;
44#define LED_INITIALIZED ( 1 << 0 )
45#define LED_REGISTERED ( 1 << 1 )
46};
47
48#endif /* CONFIG_RT2X00_LIB_LEDS */
49
50#endif /* RT2X00LEDS_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index ce58c654ade1..5be32fffc74c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -34,6 +34,40 @@
34#define RFKILL_POLL_INTERVAL ( 1000 ) 34#define RFKILL_POLL_INTERVAL ( 1000 )
35 35
36/* 36/*
37 * rt2x00_rate: Per rate device information
38 */
39struct rt2x00_rate {
40 unsigned short flags;
41#define DEV_RATE_CCK 0x0001
42#define DEV_RATE_OFDM 0x0002
43#define DEV_RATE_SHORT_PREAMBLE 0x0004
44#define DEV_RATE_BASIC 0x0008
45
46 unsigned short bitrate; /* In 100kbit/s */
47 unsigned short ratemask;
48
49 unsigned short plcp;
50};
51
52extern const struct rt2x00_rate rt2x00_supported_rates[12];
53
54static inline u16 rt2x00_create_rate_hw_value(const u16 index,
55 const u16 short_preamble)
56{
57 return (short_preamble << 8) | (index & 0xff);
58}
59
60static inline const struct rt2x00_rate *rt2x00_get_rate(const u16 hw_value)
61{
62 return &rt2x00_supported_rates[hw_value & 0xff];
63}
64
65static inline int rt2x00_get_rate_preamble(const u16 hw_value)
66{
67 return (hw_value & 0xff00);
68}
69
70/*
37 * Radio control handlers. 71 * Radio control handlers.
38 */ 72 */
39int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev); 73int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev);
@@ -50,15 +84,29 @@ void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev);
50/* 84/*
51 * Configuration handlers. 85 * Configuration handlers.
52 */ 86 */
53void rt2x00lib_config_mac_addr(struct rt2x00_dev *rt2x00dev, u8 *mac); 87void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
54void rt2x00lib_config_bssid(struct rt2x00_dev *rt2x00dev, u8 *bssid); 88 struct rt2x00_intf *intf,
55void rt2x00lib_config_type(struct rt2x00_dev *rt2x00dev, const int type); 89 enum ieee80211_if_types type,
90 u8 *mac, u8 *bssid);
91void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev,
92 struct rt2x00_intf *intf,
93 struct ieee80211_bss_conf *conf);
56void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev, 94void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
57 enum antenna rx, enum antenna tx); 95 enum antenna rx, enum antenna tx);
58void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, 96void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
59 struct ieee80211_conf *conf, const int force_config); 97 struct ieee80211_conf *conf, const int force_config);
60 98
61/* 99/*
100 * Queue handlers.
101 */
102void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev);
103void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev);
104int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev);
105void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev);
106int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev);
107void rt2x00queue_free(struct rt2x00_dev *rt2x00dev);
108
109/*
62 * Firmware handlers. 110 * Firmware handlers.
63 */ 111 */
64#ifdef CONFIG_RT2X00_LIB_FIRMWARE 112#ifdef CONFIG_RT2X00_LIB_FIRMWARE
@@ -132,4 +180,48 @@ static inline void rt2x00rfkill_resume(struct rt2x00_dev *rt2x00dev)
132} 180}
133#endif /* CONFIG_RT2X00_LIB_RFKILL */ 181#endif /* CONFIG_RT2X00_LIB_RFKILL */
134 182
183/*
184 * LED handlers
185 */
186#ifdef CONFIG_RT2X00_LIB_LEDS
187void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi);
188void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled);
189void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, bool enabled);
190void rt2x00leds_register(struct rt2x00_dev *rt2x00dev);
191void rt2x00leds_unregister(struct rt2x00_dev *rt2x00dev);
192void rt2x00leds_suspend(struct rt2x00_dev *rt2x00dev);
193void rt2x00leds_resume(struct rt2x00_dev *rt2x00dev);
194#else
195static inline void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev,
196 int rssi)
197{
198}
199
200static inline void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev,
201 bool enabled)
202{
203}
204
205static inline void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev,
206 bool enabled)
207{
208}
209
210static inline void rt2x00leds_register(struct rt2x00_dev *rt2x00dev)
211{
212}
213
214static inline void rt2x00leds_unregister(struct rt2x00_dev *rt2x00dev)
215{
216}
217
218static inline void rt2x00leds_suspend(struct rt2x00_dev *rt2x00dev)
219{
220}
221
222static inline void rt2x00leds_resume(struct rt2x00_dev *rt2x00dev)
223{
224}
225#endif /* CONFIG_RT2X00_LIB_LEDS */
226
135#endif /* RT2X00LIB_H */ 227#endif /* RT2X00LIB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index e3f15e518c76..c206b5092070 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -30,10 +30,11 @@
30#include "rt2x00lib.h" 30#include "rt2x00lib.h"
31 31
32static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev, 32static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
33 struct data_ring *ring, 33 struct data_queue *queue,
34 struct sk_buff *frag_skb, 34 struct sk_buff *frag_skb,
35 struct ieee80211_tx_control *control) 35 struct ieee80211_tx_control *control)
36{ 36{
37 struct skb_frame_desc *skbdesc;
37 struct sk_buff *skb; 38 struct sk_buff *skb;
38 int size; 39 int size;
39 40
@@ -52,15 +53,22 @@ static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
52 skb_put(skb, size); 53 skb_put(skb, size);
53 54
54 if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) 55 if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)
55 ieee80211_ctstoself_get(rt2x00dev->hw, rt2x00dev->interface.id, 56 ieee80211_ctstoself_get(rt2x00dev->hw, control->vif,
56 frag_skb->data, frag_skb->len, control, 57 frag_skb->data, frag_skb->len, control,
57 (struct ieee80211_cts *)(skb->data)); 58 (struct ieee80211_cts *)(skb->data));
58 else 59 else
59 ieee80211_rts_get(rt2x00dev->hw, rt2x00dev->interface.id, 60 ieee80211_rts_get(rt2x00dev->hw, control->vif,
60 frag_skb->data, frag_skb->len, control, 61 frag_skb->data, frag_skb->len, control,
61 (struct ieee80211_rts *)(skb->data)); 62 (struct ieee80211_rts *)(skb->data));
62 63
63 if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, ring, skb, control)) { 64 /*
65 * Initialize skb descriptor
66 */
67 skbdesc = get_skb_frame_desc(skb);
68 memset(skbdesc, 0, sizeof(*skbdesc));
69 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED;
70
71 if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, queue, skb, control)) {
64 WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n"); 72 WARNING(rt2x00dev, "Failed to send RTS/CTS frame.\n");
65 return NETDEV_TX_BUSY; 73 return NETDEV_TX_BUSY;
66 } 74 }
@@ -73,7 +81,8 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
73{ 81{
74 struct rt2x00_dev *rt2x00dev = hw->priv; 82 struct rt2x00_dev *rt2x00dev = hw->priv;
75 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data; 83 struct ieee80211_hdr *ieee80211hdr = (struct ieee80211_hdr *)skb->data;
76 struct data_ring *ring; 84 struct data_queue *queue;
85 struct skb_frame_desc *skbdesc;
77 u16 frame_control; 86 u16 frame_control;
78 87
79 /* 88 /*
@@ -88,10 +97,14 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
88 } 97 }
89 98
90 /* 99 /*
91 * Determine which ring to put packet on. 100 * Determine which queue to put packet on.
92 */ 101 */
93 ring = rt2x00lib_get_ring(rt2x00dev, control->queue); 102 if (control->flags & IEEE80211_TXCTL_SEND_AFTER_DTIM &&
94 if (unlikely(!ring)) { 103 test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
104 queue = rt2x00queue_get_queue(rt2x00dev, RT2X00_BCN_QUEUE_ATIM);
105 else
106 queue = rt2x00queue_get_queue(rt2x00dev, control->queue);
107 if (unlikely(!queue)) {
95 ERROR(rt2x00dev, 108 ERROR(rt2x00dev,
96 "Attempt to send packet over invalid queue %d.\n" 109 "Attempt to send packet over invalid queue %d.\n"
97 "Please file bug report to %s.\n", 110 "Please file bug report to %s.\n",
@@ -110,23 +123,29 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb,
110 if (!is_rts_frame(frame_control) && !is_cts_frame(frame_control) && 123 if (!is_rts_frame(frame_control) && !is_cts_frame(frame_control) &&
111 (control->flags & (IEEE80211_TXCTL_USE_RTS_CTS | 124 (control->flags & (IEEE80211_TXCTL_USE_RTS_CTS |
112 IEEE80211_TXCTL_USE_CTS_PROTECT))) { 125 IEEE80211_TXCTL_USE_CTS_PROTECT))) {
113 if (rt2x00_ring_free(ring) <= 1) { 126 if (rt2x00queue_available(queue) <= 1) {
114 ieee80211_stop_queue(rt2x00dev->hw, control->queue); 127 ieee80211_stop_queue(rt2x00dev->hw, control->queue);
115 return NETDEV_TX_BUSY; 128 return NETDEV_TX_BUSY;
116 } 129 }
117 130
118 if (rt2x00mac_tx_rts_cts(rt2x00dev, ring, skb, control)) { 131 if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb, control)) {
119 ieee80211_stop_queue(rt2x00dev->hw, control->queue); 132 ieee80211_stop_queue(rt2x00dev->hw, control->queue);
120 return NETDEV_TX_BUSY; 133 return NETDEV_TX_BUSY;
121 } 134 }
122 } 135 }
123 136
124 if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, ring, skb, control)) { 137 /*
138 * Initialize skb descriptor
139 */
140 skbdesc = get_skb_frame_desc(skb);
141 memset(skbdesc, 0, sizeof(*skbdesc));
142
143 if (rt2x00dev->ops->lib->write_tx_data(rt2x00dev, queue, skb, control)) {
125 ieee80211_stop_queue(rt2x00dev->hw, control->queue); 144 ieee80211_stop_queue(rt2x00dev->hw, control->queue);
126 return NETDEV_TX_BUSY; 145 return NETDEV_TX_BUSY;
127 } 146 }
128 147
129 if (rt2x00_ring_full(ring)) 148 if (rt2x00queue_full(queue))
130 ieee80211_stop_queue(rt2x00dev->hw, control->queue); 149 ieee80211_stop_queue(rt2x00dev->hw, control->queue);
131 150
132 if (rt2x00dev->ops->lib->kick_tx_queue) 151 if (rt2x00dev->ops->lib->kick_tx_queue)
@@ -162,27 +181,67 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
162 struct ieee80211_if_init_conf *conf) 181 struct ieee80211_if_init_conf *conf)
163{ 182{
164 struct rt2x00_dev *rt2x00dev = hw->priv; 183 struct rt2x00_dev *rt2x00dev = hw->priv;
165 struct interface *intf = &rt2x00dev->interface; 184 struct rt2x00_intf *intf = vif_to_intf(conf->vif);
166 185 struct data_queue *queue =
167 /* FIXME: Beaconing is broken in rt2x00. */ 186 rt2x00queue_get_queue(rt2x00dev, RT2X00_BCN_QUEUE_BEACON);
168 if (conf->type == IEEE80211_IF_TYPE_IBSS || 187 struct queue_entry *entry = NULL;
169 conf->type == IEEE80211_IF_TYPE_AP) { 188 unsigned int i;
170 ERROR(rt2x00dev,
171 "rt2x00 does not support Adhoc or Master mode");
172 return -EOPNOTSUPP;
173 }
174 189
175 /* 190 /*
176 * Don't allow interfaces to be added while 191 * Don't allow interfaces to be added
177 * either the device has disappeared or when 192 * the device has disappeared.
178 * another interface is already present.
179 */ 193 */
180 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags) || 194 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags) ||
181 is_interface_present(intf)) 195 !test_bit(DEVICE_STARTED, &rt2x00dev->flags))
196 return -ENODEV;
197
198 /*
199 * When we don't support mixed interfaces (a combination
200 * of sta and ap virtual interfaces) then we can only
201 * add this interface when the rival interface count is 0.
202 */
203 if (!test_bit(DRIVER_SUPPORT_MIXED_INTERFACES, &rt2x00dev->flags) &&
204 ((conf->type == IEEE80211_IF_TYPE_AP && rt2x00dev->intf_sta_count) ||
205 (conf->type != IEEE80211_IF_TYPE_AP && rt2x00dev->intf_ap_count)))
206 return -ENOBUFS;
207
208 /*
209 * Check if we exceeded the maximum amount of supported interfaces.
210 */
211 if ((conf->type == IEEE80211_IF_TYPE_AP &&
212 rt2x00dev->intf_ap_count >= rt2x00dev->ops->max_ap_intf) ||
213 (conf->type != IEEE80211_IF_TYPE_AP &&
214 rt2x00dev->intf_sta_count >= rt2x00dev->ops->max_sta_intf))
182 return -ENOBUFS; 215 return -ENOBUFS;
183 216
184 intf->id = conf->vif; 217 /*
185 intf->type = conf->type; 218 * Loop through all beacon queues to find a free
219 * entry. Since there are as much beacon entries
220 * as the maximum interfaces, this search shouldn't
221 * fail.
222 */
223 for (i = 0; i < queue->limit; i++) {
224 entry = &queue->entries[i];
225 if (!__test_and_set_bit(ENTRY_BCN_ASSIGNED, &entry->flags))
226 break;
227 }
228
229 if (unlikely(i == queue->limit))
230 return -ENOBUFS;
231
232 /*
233 * We are now absolutely sure the interface can be created,
234 * increase interface count and start initialization.
235 */
236
237 if (conf->type == IEEE80211_IF_TYPE_AP)
238 rt2x00dev->intf_ap_count++;
239 else
240 rt2x00dev->intf_sta_count++;
241
242 spin_lock_init(&intf->lock);
243 intf->beacon = entry;
244
186 if (conf->type == IEEE80211_IF_TYPE_AP) 245 if (conf->type == IEEE80211_IF_TYPE_AP)
187 memcpy(&intf->bssid, conf->mac_addr, ETH_ALEN); 246 memcpy(&intf->bssid, conf->mac_addr, ETH_ALEN);
188 memcpy(&intf->mac, conf->mac_addr, ETH_ALEN); 247 memcpy(&intf->mac, conf->mac_addr, ETH_ALEN);
@@ -192,8 +251,14 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
192 * has been initialized. Otherwise the device can reset 251 * has been initialized. Otherwise the device can reset
193 * the MAC registers. 252 * the MAC registers.
194 */ 253 */
195 rt2x00lib_config_mac_addr(rt2x00dev, intf->mac); 254 rt2x00lib_config_intf(rt2x00dev, intf, conf->type, intf->mac, NULL);
196 rt2x00lib_config_type(rt2x00dev, conf->type); 255
256 /*
257 * Some filters depend on the current working mode. We can force
258 * an update during the next configure_filter() run by mac80211 by
259 * resetting the current packet_filter state.
260 */
261 rt2x00dev->packet_filter = 0;
197 262
198 return 0; 263 return 0;
199} 264}
@@ -203,7 +268,7 @@ void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
203 struct ieee80211_if_init_conf *conf) 268 struct ieee80211_if_init_conf *conf)
204{ 269{
205 struct rt2x00_dev *rt2x00dev = hw->priv; 270 struct rt2x00_dev *rt2x00dev = hw->priv;
206 struct interface *intf = &rt2x00dev->interface; 271 struct rt2x00_intf *intf = vif_to_intf(conf->vif);
207 272
208 /* 273 /*
209 * Don't allow interfaces to be remove while 274 * Don't allow interfaces to be remove while
@@ -211,21 +276,27 @@ void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
211 * no interface is present. 276 * no interface is present.
212 */ 277 */
213 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags) || 278 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags) ||
214 !is_interface_present(intf)) 279 (conf->type == IEEE80211_IF_TYPE_AP && !rt2x00dev->intf_ap_count) ||
280 (conf->type != IEEE80211_IF_TYPE_AP && !rt2x00dev->intf_sta_count))
215 return; 281 return;
216 282
217 intf->id = 0; 283 if (conf->type == IEEE80211_IF_TYPE_AP)
218 intf->type = IEEE80211_IF_TYPE_INVALID; 284 rt2x00dev->intf_ap_count--;
219 memset(&intf->bssid, 0x00, ETH_ALEN); 285 else
220 memset(&intf->mac, 0x00, ETH_ALEN); 286 rt2x00dev->intf_sta_count--;
287
288 /*
289 * Release beacon entry so it is available for
290 * new interfaces again.
291 */
292 __clear_bit(ENTRY_BCN_ASSIGNED, &intf->beacon->flags);
221 293
222 /* 294 /*
223 * Make sure the bssid and mac address registers 295 * Make sure the bssid and mac address registers
224 * are cleared to prevent false ACKing of frames. 296 * are cleared to prevent false ACKing of frames.
225 */ 297 */
226 rt2x00lib_config_mac_addr(rt2x00dev, intf->mac); 298 rt2x00lib_config_intf(rt2x00dev, intf,
227 rt2x00lib_config_bssid(rt2x00dev, intf->bssid); 299 IEEE80211_IF_TYPE_INVALID, NULL, NULL);
228 rt2x00lib_config_type(rt2x00dev, intf->type);
229} 300}
230EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface); 301EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface);
231 302
@@ -270,7 +341,7 @@ int rt2x00mac_config_interface(struct ieee80211_hw *hw,
270 struct ieee80211_if_conf *conf) 341 struct ieee80211_if_conf *conf)
271{ 342{
272 struct rt2x00_dev *rt2x00dev = hw->priv; 343 struct rt2x00_dev *rt2x00dev = hw->priv;
273 struct interface *intf = &rt2x00dev->interface; 344 struct rt2x00_intf *intf = vif_to_intf(vif);
274 int status; 345 int status;
275 346
276 /* 347 /*
@@ -280,12 +351,7 @@ int rt2x00mac_config_interface(struct ieee80211_hw *hw,
280 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags)) 351 if (!test_bit(DEVICE_PRESENT, &rt2x00dev->flags))
281 return 0; 352 return 0;
282 353
283 /* 354 spin_lock(&intf->lock);
284 * If the given type does not match the configured type,
285 * there has been a problem.
286 */
287 if (conf->type != intf->type)
288 return -EINVAL;
289 355
290 /* 356 /*
291 * If the interface does not work in master mode, 357 * If the interface does not work in master mode,
@@ -294,7 +360,16 @@ int rt2x00mac_config_interface(struct ieee80211_hw *hw,
294 */ 360 */
295 if (conf->type != IEEE80211_IF_TYPE_AP) 361 if (conf->type != IEEE80211_IF_TYPE_AP)
296 memcpy(&intf->bssid, conf->bssid, ETH_ALEN); 362 memcpy(&intf->bssid, conf->bssid, ETH_ALEN);
297 rt2x00lib_config_bssid(rt2x00dev, intf->bssid); 363
364 spin_unlock(&intf->lock);
365
366 /*
367 * Call rt2x00_config_intf() outside of the spinlock context since
368 * the call will sleep for USB drivers. By using the ieee80211_if_conf
369 * values as arguments we make keep access to rt2x00_intf thread safe
370 * even without the lock.
371 */
372 rt2x00lib_config_intf(rt2x00dev, intf, conf->type, NULL, conf->bssid);
298 373
299 /* 374 /*
300 * We only need to initialize the beacon when master mode is enabled. 375 * We only need to initialize the beacon when master mode is enabled.
@@ -312,6 +387,50 @@ int rt2x00mac_config_interface(struct ieee80211_hw *hw,
312} 387}
313EXPORT_SYMBOL_GPL(rt2x00mac_config_interface); 388EXPORT_SYMBOL_GPL(rt2x00mac_config_interface);
314 389
390void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
391 unsigned int changed_flags,
392 unsigned int *total_flags,
393 int mc_count, struct dev_addr_list *mc_list)
394{
395 struct rt2x00_dev *rt2x00dev = hw->priv;
396
397 /*
398 * Mask off any flags we are going to ignore
399 * from the total_flags field.
400 */
401 *total_flags &=
402 FIF_ALLMULTI |
403 FIF_FCSFAIL |
404 FIF_PLCPFAIL |
405 FIF_CONTROL |
406 FIF_OTHER_BSS |
407 FIF_PROMISC_IN_BSS;
408
409 /*
410 * Apply some rules to the filters:
411 * - Some filters imply different filters to be set.
412 * - Some things we can't filter out at all.
413 * - Multicast filter seems to kill broadcast traffic so never use it.
414 */
415 *total_flags |= FIF_ALLMULTI;
416 if (*total_flags & FIF_OTHER_BSS ||
417 *total_flags & FIF_PROMISC_IN_BSS)
418 *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS;
419
420 /*
421 * Check if there is any work left for us.
422 */
423 if (rt2x00dev->packet_filter == *total_flags)
424 return;
425 rt2x00dev->packet_filter = *total_flags;
426
427 if (!test_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags))
428 rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags);
429 else
430 queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->filter_work);
431}
432EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter);
433
315int rt2x00mac_get_stats(struct ieee80211_hw *hw, 434int rt2x00mac_get_stats(struct ieee80211_hw *hw,
316 struct ieee80211_low_level_stats *stats) 435 struct ieee80211_low_level_stats *stats)
317{ 436{
@@ -334,9 +453,11 @@ int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw,
334 struct rt2x00_dev *rt2x00dev = hw->priv; 453 struct rt2x00_dev *rt2x00dev = hw->priv;
335 unsigned int i; 454 unsigned int i;
336 455
337 for (i = 0; i < hw->queues; i++) 456 for (i = 0; i < hw->queues; i++) {
338 memcpy(&stats->data[i], &rt2x00dev->tx[i].stats, 457 stats->data[i].len = rt2x00dev->tx[i].length;
339 sizeof(rt2x00dev->tx[i].stats)); 458 stats->data[i].limit = rt2x00dev->tx[i].limit;
459 stats->data[i].count = rt2x00dev->tx[i].count;
460 }
340 461
341 return 0; 462 return 0;
342} 463}
@@ -348,71 +469,83 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
348 u32 changes) 469 u32 changes)
349{ 470{
350 struct rt2x00_dev *rt2x00dev = hw->priv; 471 struct rt2x00_dev *rt2x00dev = hw->priv;
351 int short_preamble; 472 struct rt2x00_intf *intf = vif_to_intf(vif);
352 int ack_timeout; 473 unsigned int delayed = 0;
353 int ack_consume_time;
354 int difs;
355 int preamble;
356 474
357 /* 475 /*
358 * We only support changing preamble mode. 476 * When the association status has changed we must reset the link
477 * tuner counter. This is because some drivers determine if they
478 * should perform link tuning based on the number of seconds
479 * while associated or not associated.
359 */ 480 */
360 if (!(changes & BSS_CHANGED_ERP_PREAMBLE)) 481 if (changes & BSS_CHANGED_ASSOC) {
361 return; 482 rt2x00dev->link.count = 0;
362
363 short_preamble = bss_conf->use_short_preamble;
364 preamble = bss_conf->use_short_preamble ?
365 SHORT_PREAMBLE : PREAMBLE;
366 483
367 difs = (hw->conf.flags & IEEE80211_CONF_SHORT_SLOT_TIME) ? 484 if (bss_conf->assoc)
368 SHORT_DIFS : DIFS; 485 rt2x00dev->intf_associated++;
369 ack_timeout = difs + PLCP + preamble + get_duration(ACK_SIZE, 10); 486 else
487 rt2x00dev->intf_associated--;
370 488
371 ack_consume_time = SIFS + PLCP + preamble + get_duration(ACK_SIZE, 10); 489 if (!test_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags))
490 rt2x00leds_led_assoc(rt2x00dev,
491 !!rt2x00dev->intf_associated);
492 else
493 delayed |= DELAYED_LED_ASSOC;
494 }
372 495
373 if (short_preamble) 496 /*
374 __set_bit(CONFIG_SHORT_PREAMBLE, &rt2x00dev->flags); 497 * When the erp information has changed, we should perform
375 else 498 * additional configuration steps. For all other changes we are done.
376 __clear_bit(CONFIG_SHORT_PREAMBLE, &rt2x00dev->flags); 499 */
500 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
501 if (!test_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags))
502 rt2x00lib_config_erp(rt2x00dev, intf, bss_conf);
503 else
504 delayed |= DELAYED_CONFIG_ERP;
505 }
377 506
378 rt2x00dev->ops->lib->config_preamble(rt2x00dev, short_preamble, 507 spin_lock(&intf->lock);
379 ack_timeout, ack_consume_time); 508 memcpy(&intf->conf, bss_conf, sizeof(*bss_conf));
509 if (delayed) {
510 intf->delayed_flags |= delayed;
511 queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->intf_work);
512 }
513 spin_unlock(&intf->lock);
380} 514}
381EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed); 515EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed);
382 516
383int rt2x00mac_conf_tx(struct ieee80211_hw *hw, int queue, 517int rt2x00mac_conf_tx(struct ieee80211_hw *hw, int queue_idx,
384 const struct ieee80211_tx_queue_params *params) 518 const struct ieee80211_tx_queue_params *params)
385{ 519{
386 struct rt2x00_dev *rt2x00dev = hw->priv; 520 struct rt2x00_dev *rt2x00dev = hw->priv;
387 struct data_ring *ring; 521 struct data_queue *queue;
388 522
389 ring = rt2x00lib_get_ring(rt2x00dev, queue); 523 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
390 if (unlikely(!ring)) 524 if (unlikely(!queue))
391 return -EINVAL; 525 return -EINVAL;
392 526
393 /* 527 /*
394 * The passed variables are stored as real value ((2^n)-1). 528 * The passed variables are stored as real value ((2^n)-1).
395 * Ralink registers require to know the bit number 'n'. 529 * Ralink registers require to know the bit number 'n'.
396 */ 530 */
397 if (params->cw_min) 531 if (params->cw_min > 0)
398 ring->tx_params.cw_min = fls(params->cw_min); 532 queue->cw_min = fls(params->cw_min);
399 else 533 else
400 ring->tx_params.cw_min = 5; /* cw_min: 2^5 = 32. */ 534 queue->cw_min = 5; /* cw_min: 2^5 = 32. */
401 535
402 if (params->cw_max) 536 if (params->cw_max > 0)
403 ring->tx_params.cw_max = fls(params->cw_max); 537 queue->cw_max = fls(params->cw_max);
404 else 538 else
405 ring->tx_params.cw_max = 10; /* cw_min: 2^10 = 1024. */ 539 queue->cw_max = 10; /* cw_min: 2^10 = 1024. */
406 540
407 if (params->aifs) 541 if (params->aifs >= 0)
408 ring->tx_params.aifs = params->aifs; 542 queue->aifs = params->aifs;
409 else 543 else
410 ring->tx_params.aifs = 2; 544 queue->aifs = 2;
411 545
412 INFO(rt2x00dev, 546 INFO(rt2x00dev,
413 "Configured TX ring %d - CWmin: %d, CWmax: %d, Aifs: %d.\n", 547 "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d.\n",
414 queue, ring->tx_params.cw_min, ring->tx_params.cw_max, 548 queue_idx, queue->cw_min, queue->cw_max, queue->aifs);
415 ring->tx_params.aifs);
416 549
417 return 0; 550 return 0;
418} 551}
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 804a9980055d..7867ec64bd2c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -32,64 +32,21 @@
32#include "rt2x00pci.h" 32#include "rt2x00pci.h"
33 33
34/* 34/*
35 * Beacon handlers.
36 */
37int rt2x00pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
38 struct ieee80211_tx_control *control)
39{
40 struct rt2x00_dev *rt2x00dev = hw->priv;
41 struct skb_desc *desc;
42 struct data_ring *ring;
43 struct data_entry *entry;
44
45 /*
46 * Just in case mac80211 doesn't set this correctly,
47 * but we need this queue set for the descriptor
48 * initialization.
49 */
50 control->queue = IEEE80211_TX_QUEUE_BEACON;
51 ring = rt2x00lib_get_ring(rt2x00dev, control->queue);
52 entry = rt2x00_get_data_entry(ring);
53
54 /*
55 * Fill in skb descriptor
56 */
57 desc = get_skb_desc(skb);
58 desc->desc_len = ring->desc_size;
59 desc->data_len = skb->len;
60 desc->desc = entry->priv;
61 desc->data = skb->data;
62 desc->ring = ring;
63 desc->entry = entry;
64
65 memcpy(entry->data_addr, skb->data, skb->len);
66 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
67
68 /*
69 * Enable beacon generation.
70 */
71 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, control->queue);
72
73 return 0;
74}
75EXPORT_SYMBOL_GPL(rt2x00pci_beacon_update);
76
77/*
78 * TX data handlers. 35 * TX data handlers.
79 */ 36 */
80int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev, 37int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
81 struct data_ring *ring, struct sk_buff *skb, 38 struct data_queue *queue, struct sk_buff *skb,
82 struct ieee80211_tx_control *control) 39 struct ieee80211_tx_control *control)
83{ 40{
84 struct data_entry *entry = rt2x00_get_data_entry(ring); 41 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
85 __le32 *txd = entry->priv; 42 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
86 struct skb_desc *desc; 43 struct skb_frame_desc *skbdesc;
87 u32 word; 44 u32 word;
88 45
89 if (rt2x00_ring_full(ring)) 46 if (rt2x00queue_full(queue))
90 return -EINVAL; 47 return -EINVAL;
91 48
92 rt2x00_desc_read(txd, 0, &word); 49 rt2x00_desc_read(priv_tx->desc, 0, &word);
93 50
94 if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) || 51 if (rt2x00_get_field32(word, TXD_ENTRY_OWNER_NIC) ||
95 rt2x00_get_field32(word, TXD_ENTRY_VALID)) { 52 rt2x00_get_field32(word, TXD_ENTRY_VALID)) {
@@ -103,18 +60,18 @@ int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
103 /* 60 /*
104 * Fill in skb descriptor 61 * Fill in skb descriptor
105 */ 62 */
106 desc = get_skb_desc(skb); 63 skbdesc = get_skb_frame_desc(skb);
107 desc->desc_len = ring->desc_size; 64 skbdesc->data = skb->data;
108 desc->data_len = skb->len; 65 skbdesc->data_len = skb->len;
109 desc->desc = entry->priv; 66 skbdesc->desc = priv_tx->desc;
110 desc->data = skb->data; 67 skbdesc->desc_len = queue->desc_size;
111 desc->ring = ring; 68 skbdesc->entry = entry;
112 desc->entry = entry; 69
113 70 memcpy(&priv_tx->control, control, sizeof(priv_tx->control));
114 memcpy(entry->data_addr, skb->data, skb->len); 71 memcpy(priv_tx->data, skb->data, skb->len);
115 rt2x00lib_write_tx_desc(rt2x00dev, skb, control); 72 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
116 73
117 rt2x00_ring_index_inc(ring); 74 rt2x00queue_index_inc(queue, Q_INDEX);
118 75
119 return 0; 76 return 0;
120} 77}
@@ -125,29 +82,28 @@ EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
125 */ 82 */
126void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev) 83void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
127{ 84{
128 struct data_ring *ring = rt2x00dev->rx; 85 struct data_queue *queue = rt2x00dev->rx;
129 struct data_entry *entry; 86 struct queue_entry *entry;
130 struct sk_buff *skb; 87 struct queue_entry_priv_pci_rx *priv_rx;
131 struct ieee80211_hdr *hdr; 88 struct ieee80211_hdr *hdr;
132 struct skb_desc *skbdesc; 89 struct skb_frame_desc *skbdesc;
133 struct rxdata_entry_desc desc; 90 struct rxdone_entry_desc rxdesc;
134 int header_size; 91 int header_size;
135 __le32 *rxd;
136 int align; 92 int align;
137 u32 word; 93 u32 word;
138 94
139 while (1) { 95 while (1) {
140 entry = rt2x00_get_data_entry(ring); 96 entry = rt2x00queue_get_entry(queue, Q_INDEX);
141 rxd = entry->priv; 97 priv_rx = entry->priv_data;
142 rt2x00_desc_read(rxd, 0, &word); 98 rt2x00_desc_read(priv_rx->desc, 0, &word);
143 99
144 if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC)) 100 if (rt2x00_get_field32(word, RXD_ENTRY_OWNER_NIC))
145 break; 101 break;
146 102
147 memset(&desc, 0, sizeof(desc)); 103 memset(&rxdesc, 0, sizeof(rxdesc));
148 rt2x00dev->ops->lib->fill_rxdone(entry, &desc); 104 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
149 105
150 hdr = (struct ieee80211_hdr *)entry->data_addr; 106 hdr = (struct ieee80211_hdr *)priv_rx->data;
151 header_size = 107 header_size =
152 ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control)); 108 ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
153 109
@@ -161,66 +117,68 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
161 * Allocate the sk_buffer, initialize it and copy 117 * Allocate the sk_buffer, initialize it and copy
162 * all data into it. 118 * all data into it.
163 */ 119 */
164 skb = dev_alloc_skb(desc.size + align); 120 entry->skb = dev_alloc_skb(rxdesc.size + align);
165 if (!skb) 121 if (!entry->skb)
166 return; 122 return;
167 123
168 skb_reserve(skb, align); 124 skb_reserve(entry->skb, align);
169 memcpy(skb_put(skb, desc.size), entry->data_addr, desc.size); 125 memcpy(skb_put(entry->skb, rxdesc.size),
126 priv_rx->data, rxdesc.size);
170 127
171 /* 128 /*
172 * Fill in skb descriptor 129 * Fill in skb descriptor
173 */ 130 */
174 skbdesc = get_skb_desc(skb); 131 skbdesc = get_skb_frame_desc(entry->skb);
175 skbdesc->desc_len = entry->ring->desc_size; 132 memset(skbdesc, 0, sizeof(*skbdesc));
176 skbdesc->data_len = skb->len; 133 skbdesc->data = entry->skb->data;
177 skbdesc->desc = entry->priv; 134 skbdesc->data_len = entry->skb->len;
178 skbdesc->data = skb->data; 135 skbdesc->desc = priv_rx->desc;
179 skbdesc->ring = ring; 136 skbdesc->desc_len = queue->desc_size;
180 skbdesc->entry = entry; 137 skbdesc->entry = entry;
181 138
182 /* 139 /*
183 * Send the frame to rt2x00lib for further processing. 140 * Send the frame to rt2x00lib for further processing.
184 */ 141 */
185 rt2x00lib_rxdone(entry, skb, &desc); 142 rt2x00lib_rxdone(entry, &rxdesc);
186 143
187 if (test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags)) { 144 if (test_bit(DEVICE_ENABLED_RADIO, &queue->rt2x00dev->flags)) {
188 rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1); 145 rt2x00_set_field32(&word, RXD_ENTRY_OWNER_NIC, 1);
189 rt2x00_desc_write(rxd, 0, word); 146 rt2x00_desc_write(priv_rx->desc, 0, word);
190 } 147 }
191 148
192 rt2x00_ring_index_inc(ring); 149 rt2x00queue_index_inc(queue, Q_INDEX);
193 } 150 }
194} 151}
195EXPORT_SYMBOL_GPL(rt2x00pci_rxdone); 152EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
196 153
197void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct data_entry *entry, 154void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
198 const int tx_status, const int retry) 155 struct txdone_entry_desc *txdesc)
199{ 156{
157 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
200 u32 word; 158 u32 word;
201 159
202 rt2x00lib_txdone(entry, tx_status, retry); 160 txdesc->control = &priv_tx->control;
161 rt2x00lib_txdone(entry, txdesc);
203 162
204 /* 163 /*
205 * Make this entry available for reuse. 164 * Make this entry available for reuse.
206 */ 165 */
207 entry->flags = 0; 166 entry->flags = 0;
208 167
209 rt2x00_desc_read(entry->priv, 0, &word); 168 rt2x00_desc_read(priv_tx->desc, 0, &word);
210 rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0); 169 rt2x00_set_field32(&word, TXD_ENTRY_OWNER_NIC, 0);
211 rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0); 170 rt2x00_set_field32(&word, TXD_ENTRY_VALID, 0);
212 rt2x00_desc_write(entry->priv, 0, word); 171 rt2x00_desc_write(priv_tx->desc, 0, word);
213 172
214 rt2x00_ring_index_done_inc(entry->ring); 173 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
215 174
216 /* 175 /*
217 * If the data ring was full before the txdone handler 176 * If the data queue was full before the txdone handler
218 * we must make sure the packet queue in the mac80211 stack 177 * we must make sure the packet queue in the mac80211 stack
219 * is reenabled when the txdone handler has finished. 178 * is reenabled when the txdone handler has finished.
220 */ 179 */
221 if (!rt2x00_ring_full(entry->ring)) 180 if (!rt2x00queue_full(entry->queue))
222 ieee80211_wake_queue(rt2x00dev->hw, 181 ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue);
223 entry->tx_status.control.queue);
224 182
225} 183}
226EXPORT_SYMBOL_GPL(rt2x00pci_txdone); 184EXPORT_SYMBOL_GPL(rt2x00pci_txdone);
@@ -228,73 +186,122 @@ EXPORT_SYMBOL_GPL(rt2x00pci_txdone);
228/* 186/*
229 * Device initialization handlers. 187 * Device initialization handlers.
230 */ 188 */
231#define priv_offset(__ring, __i) \ 189#define desc_size(__queue) \
232({ \ 190({ \
233 ring->data_addr + (i * ring->desc_size); \ 191 ((__queue)->limit * (__queue)->desc_size);\
192})
193
194#define data_size(__queue) \
195({ \
196 ((__queue)->limit * (__queue)->data_size);\
234}) 197})
235 198
236#define data_addr_offset(__ring, __i) \ 199#define dma_size(__queue) \
237({ \ 200({ \
238 (__ring)->data_addr + \ 201 data_size(__queue) + desc_size(__queue);\
239 ((__ring)->stats.limit * (__ring)->desc_size) + \
240 ((__i) * (__ring)->data_size); \
241}) 202})
242 203
243#define data_dma_offset(__ring, __i) \ 204#define desc_offset(__queue, __base, __i) \
244({ \ 205({ \
245 (__ring)->data_dma + \ 206 (__base) + data_size(__queue) + \
246 ((__ring)->stats.limit * (__ring)->desc_size) + \ 207 ((__i) * (__queue)->desc_size); \
247 ((__i) * (__ring)->data_size); \
248}) 208})
249 209
250static int rt2x00pci_alloc_dma(struct rt2x00_dev *rt2x00dev, 210#define data_offset(__queue, __base, __i) \
251 struct data_ring *ring) 211({ \
212 (__base) + \
213 ((__i) * (__queue)->data_size); \
214})
215
216static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
217 struct data_queue *queue)
252{ 218{
219 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
220 struct queue_entry_priv_pci_rx *priv_rx;
221 struct queue_entry_priv_pci_tx *priv_tx;
222 void *addr;
223 dma_addr_t dma;
224 void *desc_addr;
225 dma_addr_t desc_dma;
226 void *data_addr;
227 dma_addr_t data_dma;
253 unsigned int i; 228 unsigned int i;
254 229
255 /* 230 /*
256 * Allocate DMA memory for descriptor and buffer. 231 * Allocate DMA memory for descriptor and buffer.
257 */ 232 */
258 ring->data_addr = pci_alloc_consistent(rt2x00dev_pci(rt2x00dev), 233 addr = pci_alloc_consistent(pci_dev, dma_size(queue), &dma);
259 rt2x00_get_ring_size(ring), 234 if (!addr)
260 &ring->data_dma);
261 if (!ring->data_addr)
262 return -ENOMEM; 235 return -ENOMEM;
263 236
237 memset(addr, 0, dma_size(queue));
238
264 /* 239 /*
265 * Initialize all ring entries to contain valid 240 * Initialize all queue entries to contain valid addresses.
266 * addresses.
267 */ 241 */
268 for (i = 0; i < ring->stats.limit; i++) { 242 for (i = 0; i < queue->limit; i++) {
269 ring->entry[i].priv = priv_offset(ring, i); 243 desc_addr = desc_offset(queue, addr, i);
270 ring->entry[i].data_addr = data_addr_offset(ring, i); 244 desc_dma = desc_offset(queue, dma, i);
271 ring->entry[i].data_dma = data_dma_offset(ring, i); 245 data_addr = data_offset(queue, addr, i);
246 data_dma = data_offset(queue, dma, i);
247
248 if (queue->qid == QID_RX) {
249 priv_rx = queue->entries[i].priv_data;
250 priv_rx->desc = desc_addr;
251 priv_rx->desc_dma = desc_dma;
252 priv_rx->data = data_addr;
253 priv_rx->data_dma = data_dma;
254 } else {
255 priv_tx = queue->entries[i].priv_data;
256 priv_tx->desc = desc_addr;
257 priv_tx->desc_dma = desc_dma;
258 priv_tx->data = data_addr;
259 priv_tx->data_dma = data_dma;
260 }
272 } 261 }
273 262
274 return 0; 263 return 0;
275} 264}
276 265
277static void rt2x00pci_free_dma(struct rt2x00_dev *rt2x00dev, 266static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
278 struct data_ring *ring) 267 struct data_queue *queue)
279{ 268{
280 if (ring->data_addr) 269 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
281 pci_free_consistent(rt2x00dev_pci(rt2x00dev), 270 struct queue_entry_priv_pci_rx *priv_rx;
282 rt2x00_get_ring_size(ring), 271 struct queue_entry_priv_pci_tx *priv_tx;
283 ring->data_addr, ring->data_dma); 272 void *data_addr;
284 ring->data_addr = NULL; 273 dma_addr_t data_dma;
274
275 if (queue->qid == QID_RX) {
276 priv_rx = queue->entries[0].priv_data;
277 data_addr = priv_rx->data;
278 data_dma = priv_rx->data_dma;
279
280 priv_rx->data = NULL;
281 } else {
282 priv_tx = queue->entries[0].priv_data;
283 data_addr = priv_tx->data;
284 data_dma = priv_tx->data_dma;
285
286 priv_tx->data = NULL;
287 }
288
289 if (data_addr)
290 pci_free_consistent(pci_dev, dma_size(queue),
291 data_addr, data_dma);
285} 292}
286 293
287int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev) 294int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
288{ 295{
289 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev); 296 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
290 struct data_ring *ring; 297 struct data_queue *queue;
291 int status; 298 int status;
292 299
293 /* 300 /*
294 * Allocate DMA 301 * Allocate DMA
295 */ 302 */
296 ring_for_each(rt2x00dev, ring) { 303 queue_for_each(rt2x00dev, queue) {
297 status = rt2x00pci_alloc_dma(rt2x00dev, ring); 304 status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
298 if (status) 305 if (status)
299 goto exit; 306 goto exit;
300 } 307 }
@@ -321,7 +328,7 @@ EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
321 328
322void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev) 329void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
323{ 330{
324 struct data_ring *ring; 331 struct data_queue *queue;
325 332
326 /* 333 /*
327 * Free irq line. 334 * Free irq line.
@@ -331,8 +338,8 @@ void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
331 /* 338 /*
332 * Free DMA 339 * Free DMA
333 */ 340 */
334 ring_for_each(rt2x00dev, ring) 341 queue_for_each(rt2x00dev, queue)
335 rt2x00pci_free_dma(rt2x00dev, ring); 342 rt2x00pci_free_queue_dma(rt2x00dev, queue);
336} 343}
337EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize); 344EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
338 345
@@ -347,9 +354,9 @@ static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev)
347 kfree(rt2x00dev->eeprom); 354 kfree(rt2x00dev->eeprom);
348 rt2x00dev->eeprom = NULL; 355 rt2x00dev->eeprom = NULL;
349 356
350 if (rt2x00dev->csr_addr) { 357 if (rt2x00dev->csr.base) {
351 iounmap(rt2x00dev->csr_addr); 358 iounmap(rt2x00dev->csr.base);
352 rt2x00dev->csr_addr = NULL; 359 rt2x00dev->csr.base = NULL;
353 } 360 }
354} 361}
355 362
@@ -357,9 +364,9 @@ static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev)
357{ 364{
358 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev); 365 struct pci_dev *pci_dev = rt2x00dev_pci(rt2x00dev);
359 366
360 rt2x00dev->csr_addr = ioremap(pci_resource_start(pci_dev, 0), 367 rt2x00dev->csr.base = ioremap(pci_resource_start(pci_dev, 0),
361 pci_resource_len(pci_dev, 0)); 368 pci_resource_len(pci_dev, 0));
362 if (!rt2x00dev->csr_addr) 369 if (!rt2x00dev->csr.base)
363 goto exit; 370 goto exit;
364 371
365 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); 372 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
@@ -530,5 +537,5 @@ EXPORT_SYMBOL_GPL(rt2x00pci_resume);
530 */ 537 */
531MODULE_AUTHOR(DRV_PROJECT); 538MODULE_AUTHOR(DRV_PROJECT);
532MODULE_VERSION(DRV_VERSION); 539MODULE_VERSION(DRV_VERSION);
533MODULE_DESCRIPTION("rt2x00 library"); 540MODULE_DESCRIPTION("rt2x00 pci library");
534MODULE_LICENSE("GPL"); 541MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 2d1eb8144da4..9d1cdb99431c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -61,7 +61,7 @@ static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
61 const unsigned long offset, 61 const unsigned long offset,
62 u32 *value) 62 u32 *value)
63{ 63{
64 *value = readl(rt2x00dev->csr_addr + offset); 64 *value = readl(rt2x00dev->csr.base + offset);
65} 65}
66 66
67static inline void 67static inline void
@@ -69,14 +69,14 @@ rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
69 const unsigned long offset, 69 const unsigned long offset,
70 void *value, const u16 length) 70 void *value, const u16 length)
71{ 71{
72 memcpy_fromio(value, rt2x00dev->csr_addr + offset, length); 72 memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
73} 73}
74 74
75static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev, 75static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
76 const unsigned long offset, 76 const unsigned long offset,
77 u32 value) 77 u32 value)
78{ 78{
79 writel(value, rt2x00dev->csr_addr + offset); 79 writel(value, rt2x00dev->csr.base + offset);
80} 80}
81 81
82static inline void 82static inline void
@@ -84,28 +84,63 @@ rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
84 const unsigned long offset, 84 const unsigned long offset,
85 void *value, const u16 length) 85 void *value, const u16 length)
86{ 86{
87 memcpy_toio(rt2x00dev->csr_addr + offset, value, length); 87 memcpy_toio(rt2x00dev->csr.base + offset, value, length);
88} 88}
89 89
90/* 90/*
91 * Beacon handlers.
92 */
93int rt2x00pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
94 struct ieee80211_tx_control *control);
95
96/*
97 * TX data handlers. 91 * TX data handlers.
98 */ 92 */
99int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev, 93int rt2x00pci_write_tx_data(struct rt2x00_dev *rt2x00dev,
100 struct data_ring *ring, struct sk_buff *skb, 94 struct data_queue *queue, struct sk_buff *skb,
101 struct ieee80211_tx_control *control); 95 struct ieee80211_tx_control *control);
102 96
103/* 97/**
104 * RX/TX data handlers. 98 * struct queue_entry_priv_pci_rx: Per RX entry PCI specific information
99 *
100 * @desc: Pointer to device descriptor.
101 * @data: Pointer to device's entry memory.
102 * @dma: DMA pointer to &data.
103 */
104struct queue_entry_priv_pci_rx {
105 __le32 *desc;
106 dma_addr_t desc_dma;
107
108 void *data;
109 dma_addr_t data_dma;
110};
111
112/**
113 * struct queue_entry_priv_pci_tx: Per TX entry PCI specific information
114 *
115 * @desc: Pointer to device descriptor
116 * @data: Pointer to device's entry memory.
117 * @dma: DMA pointer to &data.
118 * @control: mac80211 control structure used to transmit data.
119 */
120struct queue_entry_priv_pci_tx {
121 __le32 *desc;
122 dma_addr_t desc_dma;
123
124 void *data;
125 dma_addr_t data_dma;
126
127 struct ieee80211_tx_control control;
128};
129
130/**
131 * rt2x00pci_rxdone - Handle RX done events
132 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
105 */ 133 */
106void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev); 134void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
107void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct data_entry *entry, 135
108 const int tx_status, const int retry); 136/**
137 * rt2x00pci_txdone - Handle TX done events
138 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
139 * @entry: Entry which has completed the transmission of a frame.
140 * @desc: TX done descriptor
141 */
142void rt2x00pci_txdone(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry,
143 struct txdone_entry_desc *desc);
109 144
110/* 145/*
111 * Device initialization handlers. 146 * Device initialization handlers.
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
new file mode 100644
index 000000000000..659e9f44c40c
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -0,0 +1,304 @@
1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00lib
23 Abstract: rt2x00 queue specific routines.
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28
29#include "rt2x00.h"
30#include "rt2x00lib.h"
31
32struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
33 const unsigned int queue)
34{
35 int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
36
37 if (queue < rt2x00dev->hw->queues && rt2x00dev->tx)
38 return &rt2x00dev->tx[queue];
39
40 if (!rt2x00dev->bcn)
41 return NULL;
42
43 if (queue == RT2X00_BCN_QUEUE_BEACON)
44 return &rt2x00dev->bcn[0];
45 else if (queue == RT2X00_BCN_QUEUE_ATIM && atim)
46 return &rt2x00dev->bcn[1];
47
48 return NULL;
49}
50EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
51
52struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
53 enum queue_index index)
54{
55 struct queue_entry *entry;
56 unsigned long irqflags;
57
58 if (unlikely(index >= Q_INDEX_MAX)) {
59 ERROR(queue->rt2x00dev,
60 "Entry requested from invalid index type (%d)\n", index);
61 return NULL;
62 }
63
64 spin_lock_irqsave(&queue->lock, irqflags);
65
66 entry = &queue->entries[queue->index[index]];
67
68 spin_unlock_irqrestore(&queue->lock, irqflags);
69
70 return entry;
71}
72EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
73
74void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
75{
76 unsigned long irqflags;
77
78 if (unlikely(index >= Q_INDEX_MAX)) {
79 ERROR(queue->rt2x00dev,
80 "Index change on invalid index type (%d)\n", index);
81 return;
82 }
83
84 spin_lock_irqsave(&queue->lock, irqflags);
85
86 queue->index[index]++;
87 if (queue->index[index] >= queue->limit)
88 queue->index[index] = 0;
89
90 if (index == Q_INDEX) {
91 queue->length++;
92 } else if (index == Q_INDEX_DONE) {
93 queue->length--;
94 queue->count ++;
95 }
96
97 spin_unlock_irqrestore(&queue->lock, irqflags);
98}
99EXPORT_SYMBOL_GPL(rt2x00queue_index_inc);
100
101static void rt2x00queue_reset(struct data_queue *queue)
102{
103 unsigned long irqflags;
104
105 spin_lock_irqsave(&queue->lock, irqflags);
106
107 queue->count = 0;
108 queue->length = 0;
109 memset(queue->index, 0, sizeof(queue->index));
110
111 spin_unlock_irqrestore(&queue->lock, irqflags);
112}
113
114void rt2x00queue_init_rx(struct rt2x00_dev *rt2x00dev)
115{
116 struct data_queue *queue = rt2x00dev->rx;
117 unsigned int i;
118
119 rt2x00queue_reset(queue);
120
121 if (!rt2x00dev->ops->lib->init_rxentry)
122 return;
123
124 for (i = 0; i < queue->limit; i++)
125 rt2x00dev->ops->lib->init_rxentry(rt2x00dev,
126 &queue->entries[i]);
127}
128
129void rt2x00queue_init_tx(struct rt2x00_dev *rt2x00dev)
130{
131 struct data_queue *queue;
132 unsigned int i;
133
134 txall_queue_for_each(rt2x00dev, queue) {
135 rt2x00queue_reset(queue);
136
137 if (!rt2x00dev->ops->lib->init_txentry)
138 continue;
139
140 for (i = 0; i < queue->limit; i++)
141 rt2x00dev->ops->lib->init_txentry(rt2x00dev,
142 &queue->entries[i]);
143 }
144}
145
146static int rt2x00queue_alloc_entries(struct data_queue *queue,
147 const struct data_queue_desc *qdesc)
148{
149 struct queue_entry *entries;
150 unsigned int entry_size;
151 unsigned int i;
152
153 rt2x00queue_reset(queue);
154
155 queue->limit = qdesc->entry_num;
156 queue->data_size = qdesc->data_size;
157 queue->desc_size = qdesc->desc_size;
158
159 /*
160 * Allocate all queue entries.
161 */
162 entry_size = sizeof(*entries) + qdesc->priv_size;
163 entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
164 if (!entries)
165 return -ENOMEM;
166
167#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
168 ( ((char *)(__base)) + ((__limit) * (__esize)) + \
169 ((__index) * (__psize)) )
170
171 for (i = 0; i < queue->limit; i++) {
172 entries[i].flags = 0;
173 entries[i].queue = queue;
174 entries[i].skb = NULL;
175 entries[i].entry_idx = i;
176 entries[i].priv_data =
177 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
178 sizeof(*entries), qdesc->priv_size);
179 }
180
181#undef QUEUE_ENTRY_PRIV_OFFSET
182
183 queue->entries = entries;
184
185 return 0;
186}
187
188int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
189{
190 struct data_queue *queue;
191 int status;
192
193
194 status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
195 if (status)
196 goto exit;
197
198 tx_queue_for_each(rt2x00dev, queue) {
199 status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
200 if (status)
201 goto exit;
202 }
203
204 status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
205 if (status)
206 goto exit;
207
208 if (!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
209 return 0;
210
211 status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
212 rt2x00dev->ops->atim);
213 if (status)
214 goto exit;
215
216 return 0;
217
218exit:
219 ERROR(rt2x00dev, "Queue entries allocation failed.\n");
220
221 rt2x00queue_uninitialize(rt2x00dev);
222
223 return status;
224}
225
226void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
227{
228 struct data_queue *queue;
229
230 queue_for_each(rt2x00dev, queue) {
231 kfree(queue->entries);
232 queue->entries = NULL;
233 }
234}
235
236static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
237 struct data_queue *queue, enum data_queue_qid qid)
238{
239 spin_lock_init(&queue->lock);
240
241 queue->rt2x00dev = rt2x00dev;
242 queue->qid = qid;
243 queue->aifs = 2;
244 queue->cw_min = 5;
245 queue->cw_max = 10;
246}
247
248int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
249{
250 struct data_queue *queue;
251 enum data_queue_qid qid;
252 unsigned int req_atim =
253 !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
254
255 /*
256 * We need the following queues:
257 * RX: 1
258 * TX: hw->queues
259 * Beacon: 1
260 * Atim: 1 (if required)
261 */
262 rt2x00dev->data_queues = 2 + rt2x00dev->hw->queues + req_atim;
263
264 queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
265 if (!queue) {
266 ERROR(rt2x00dev, "Queue allocation failed.\n");
267 return -ENOMEM;
268 }
269
270 /*
271 * Initialize pointers
272 */
273 rt2x00dev->rx = queue;
274 rt2x00dev->tx = &queue[1];
275 rt2x00dev->bcn = &queue[1 + rt2x00dev->hw->queues];
276
277 /*
278 * Initialize queue parameters.
279 * RX: qid = QID_RX
280 * TX: qid = QID_AC_BE + index
281 * TX: cw_min: 2^5 = 32.
282 * TX: cw_max: 2^10 = 1024.
283 * BCN & Atim: qid = QID_MGMT
284 */
285 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
286
287 qid = QID_AC_BE;
288 tx_queue_for_each(rt2x00dev, queue)
289 rt2x00queue_init(rt2x00dev, queue, qid++);
290
291 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_MGMT);
292 if (req_atim)
293 rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_MGMT);
294
295 return 0;
296}
297
298void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
299{
300 kfree(rt2x00dev->rx);
301 rt2x00dev->rx = NULL;
302 rt2x00dev->tx = NULL;
303 rt2x00dev->bcn = NULL;
304}
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
new file mode 100644
index 000000000000..7027c9f47d3f
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -0,0 +1,468 @@
1/*
2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00
23 Abstract: rt2x00 queue datastructures and routines
24 */
25
26#ifndef RT2X00QUEUE_H
27#define RT2X00QUEUE_H
28
29#include <linux/prefetch.h>
30
31/**
32 * DOC: Entrie frame size
33 *
34 * Ralink PCI devices demand the Frame size to be a multiple of 128 bytes,
35 * for USB devices this restriction does not apply, but the value of
36 * 2432 makes sense since it is big enough to contain the maximum fragment
37 * size according to the ieee802.11 specs.
38 */
39#define DATA_FRAME_SIZE 2432
40#define MGMT_FRAME_SIZE 256
41
42/**
43 * DOC: Number of entries per queue
44 *
45 * After research it was concluded that 12 entries in a RX and TX
46 * queue would be sufficient. Although this is almost one third of
47 * the amount the legacy driver allocated, the queues aren't getting
48 * filled to the maximum even when working with the maximum rate.
49 */
50#define RX_ENTRIES 12
51#define TX_ENTRIES 12
52#define BEACON_ENTRIES 1
53#define ATIM_ENTRIES 1
54
55/**
56 * enum data_queue_qid: Queue identification
57 */
58enum data_queue_qid {
59 QID_AC_BE = 0,
60 QID_AC_BK = 1,
61 QID_AC_VI = 2,
62 QID_AC_VO = 3,
63 QID_HCCA = 4,
64 QID_MGMT = 13,
65 QID_RX = 14,
66 QID_OTHER = 15,
67};
68
69/**
70 * enum rt2x00_bcn_queue: Beacon queue index
71 *
72 * Start counting with a high offset, this because this enumeration
73 * supplements &enum ieee80211_tx_queue and we should prevent value
74 * conflicts.
75 *
76 * @RT2X00_BCN_QUEUE_BEACON: Beacon queue
77 * @RT2X00_BCN_QUEUE_ATIM: Atim queue (sends frame after beacon)
78 */
79enum rt2x00_bcn_queue {
80 RT2X00_BCN_QUEUE_BEACON = 100,
81 RT2X00_BCN_QUEUE_ATIM = 101,
82};
83
84/**
85 * enum skb_frame_desc_flags: Flags for &struct skb_frame_desc
86 *
87 * @FRAME_DESC_DRIVER_GENERATED: Frame was generated inside driver
88 * and should not be reported back to mac80211 during txdone.
89 */
90enum skb_frame_desc_flags {
91 FRAME_DESC_DRIVER_GENERATED = 1 << 0,
92};
93
94/**
95 * struct skb_frame_desc: Descriptor information for the skb buffer
96 *
97 * This structure is placed over the skb->cb array, this means that
98 * this structure should not exceed the size of that array (48 bytes).
99 *
100 * @flags: Frame flags, see &enum skb_frame_desc_flags.
101 * @frame_type: Frame type, see &enum rt2x00_dump_type.
102 * @data: Pointer to data part of frame (Start of ieee80211 header).
103 * @desc: Pointer to descriptor part of the frame.
104 * Note that this pointer could point to something outside
105 * of the scope of the skb->data pointer.
106 * @data_len: Length of the frame data.
107 * @desc_len: Length of the frame descriptor.
108
109 * @entry: The entry to which this sk buffer belongs.
110 */
111struct skb_frame_desc {
112 unsigned int flags;
113
114 unsigned int frame_type;
115
116 void *data;
117 void *desc;
118
119 unsigned int data_len;
120 unsigned int desc_len;
121
122 struct queue_entry *entry;
123};
124
125static inline struct skb_frame_desc* get_skb_frame_desc(struct sk_buff *skb)
126{
127 BUILD_BUG_ON(sizeof(struct skb_frame_desc) > sizeof(skb->cb));
128 return (struct skb_frame_desc *)&skb->cb[0];
129}
130
131/**
132 * enum rxdone_entry_desc_flags: Flags for &struct rxdone_entry_desc
133 *
134 * @RXDONE_SIGNAL_PLCP: Does the signal field contain the plcp value,
135 * or does it contain the bitrate itself.
136 * @RXDONE_MY_BSS: Does this frame originate from device's BSS.
137 */
138enum rxdone_entry_desc_flags {
139 RXDONE_SIGNAL_PLCP = 1 << 0,
140 RXDONE_MY_BSS = 1 << 1,
141};
142
143/**
144 * struct rxdone_entry_desc: RX Entry descriptor
145 *
146 * Summary of information that has been read from the RX frame descriptor.
147 *
148 * @signal: Signal of the received frame.
149 * @rssi: RSSI of the received frame.
150 * @size: Data size of the received frame.
151 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
152 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
153
154 */
155struct rxdone_entry_desc {
156 int signal;
157 int rssi;
158 int size;
159 int flags;
160 int dev_flags;
161};
162
163/**
164 * struct txdone_entry_desc: TX done entry descriptor
165 *
166 * Summary of information that has been read from the TX frame descriptor
167 * after the device is done with transmission.
168 *
169 * @control: Control structure which was used to transmit the frame.
170 * @status: TX status (See &enum tx_status).
171 * @retry: Retry count.
172 */
173struct txdone_entry_desc {
174 struct ieee80211_tx_control *control;
175 int status;
176 int retry;
177};
178
179/**
180 * enum txentry_desc_flags: Status flags for TX entry descriptor
181 *
182 * @ENTRY_TXD_RTS_FRAME: This frame is a RTS frame.
183 * @ENTRY_TXD_OFDM_RATE: This frame is send out with an OFDM rate.
184 * @ENTRY_TXD_MORE_FRAG: This frame is followed by another fragment.
185 * @ENTRY_TXD_REQ_TIMESTAMP: Require timestamp to be inserted.
186 * @ENTRY_TXD_BURST: This frame belongs to the same burst event.
187 * @ENTRY_TXD_ACK: An ACK is required for this frame.
188 */
189enum txentry_desc_flags {
190 ENTRY_TXD_RTS_FRAME,
191 ENTRY_TXD_OFDM_RATE,
192 ENTRY_TXD_MORE_FRAG,
193 ENTRY_TXD_REQ_TIMESTAMP,
194 ENTRY_TXD_BURST,
195 ENTRY_TXD_ACK,
196};
197
198/**
199 * struct txentry_desc: TX Entry descriptor
200 *
201 * Summary of information for the frame descriptor before sending a TX frame.
202 *
203 * @flags: Descriptor flags (See &enum queue_entry_flags).
204 * @queue: Queue identification (See &enum data_queue_qid).
205 * @length_high: PLCP length high word.
206 * @length_low: PLCP length low word.
207 * @signal: PLCP signal.
208 * @service: PLCP service.
209 * @aifs: AIFS value.
210 * @ifs: IFS value.
211 * @cw_min: cwmin value.
212 * @cw_max: cwmax value.
213 */
214struct txentry_desc {
215 unsigned long flags;
216
217 enum data_queue_qid queue;
218
219 u16 length_high;
220 u16 length_low;
221 u16 signal;
222 u16 service;
223
224 int aifs;
225 int ifs;
226 int cw_min;
227 int cw_max;
228};
229
230/**
231 * enum queue_entry_flags: Status flags for queue entry
232 *
233 * @ENTRY_BCN_ASSIGNED: This entry has been assigned to an interface.
234 * As long as this bit is set, this entry may only be touched
235 * through the interface structure.
236 * @ENTRY_OWNER_DEVICE_DATA: This entry is owned by the device for data
237 * transfer (either TX or RX depending on the queue). The entry should
238 * only be touched after the device has signaled it is done with it.
239 * @ENTRY_OWNER_DEVICE_CRYPTO: This entry is owned by the device for data
240 * encryption or decryption. The entry should only be touched after
241 * the device has signaled it is done with it.
242 */
243
244enum queue_entry_flags {
245 ENTRY_BCN_ASSIGNED,
246 ENTRY_OWNER_DEVICE_DATA,
247 ENTRY_OWNER_DEVICE_CRYPTO,
248};
249
250/**
251 * struct queue_entry: Entry inside the &struct data_queue
252 *
253 * @flags: Entry flags, see &enum queue_entry_flags.
254 * @queue: The data queue (&struct data_queue) to which this entry belongs.
255 * @skb: The buffer which is currently being transmitted (for TX queue),
256 * or used to directly recieve data in (for RX queue).
257 * @entry_idx: The entry index number.
258 * @priv_data: Private data belonging to this queue entry. The pointer
259 * points to data specific to a particular driver and queue type.
260 */
261struct queue_entry {
262 unsigned long flags;
263
264 struct data_queue *queue;
265
266 struct sk_buff *skb;
267
268 unsigned int entry_idx;
269
270 void *priv_data;
271};
272
273/**
274 * enum queue_index: Queue index type
275 *
276 * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
277 * owned by the hardware then the queue is considered to be full.
278 * @Q_INDEX_DONE: Index pointer to the next entry which will be completed by
279 * the hardware and for which we need to run the txdone handler. If this
280 * entry is not owned by the hardware the queue is considered to be empty.
281 * @Q_INDEX_CRYPTO: Index pointer to the next entry which encryption/decription
282 * will be completed by the hardware next.
283 * @Q_INDEX_MAX: Keep last, used in &struct data_queue to determine the size
284 * of the index array.
285 */
286enum queue_index {
287 Q_INDEX,
288 Q_INDEX_DONE,
289 Q_INDEX_CRYPTO,
290 Q_INDEX_MAX,
291};
292
293/**
294 * struct data_queue: Data queue
295 *
296 * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to.
297 * @entries: Base address of the &struct queue_entry which are
298 * part of this queue.
299 * @qid: The queue identification, see &enum data_queue_qid.
300 * @lock: Spinlock to protect index handling. Whenever @index, @index_done or
301 * @index_crypt needs to be changed this lock should be grabbed to prevent
302 * index corruption due to concurrency.
303 * @count: Number of frames handled in the queue.
304 * @limit: Maximum number of entries in the queue.
305 * @length: Number of frames in queue.
306 * @index: Index pointers to entry positions in the queue,
307 * use &enum queue_index to get a specific index field.
308 * @aifs: The aifs value for outgoing frames (field ignored in RX queue).
309 * @cw_min: The cw min value for outgoing frames (field ignored in RX queue).
310 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
311 * @data_size: Maximum data size for the frames in this queue.
312 * @desc_size: Hardware descriptor size for the data in this queue.
313 */
314struct data_queue {
315 struct rt2x00_dev *rt2x00dev;
316 struct queue_entry *entries;
317
318 enum data_queue_qid qid;
319
320 spinlock_t lock;
321 unsigned int count;
322 unsigned short limit;
323 unsigned short length;
324 unsigned short index[Q_INDEX_MAX];
325
326 unsigned short aifs;
327 unsigned short cw_min;
328 unsigned short cw_max;
329
330 unsigned short data_size;
331 unsigned short desc_size;
332};
333
334/**
335 * struct data_queue_desc: Data queue description
336 *
337 * The information in this structure is used by drivers
338 * to inform rt2x00lib about the creation of the data queue.
339 *
340 * @entry_num: Maximum number of entries for a queue.
341 * @data_size: Maximum data size for the frames in this queue.
342 * @desc_size: Hardware descriptor size for the data in this queue.
343 * @priv_size: Size of per-queue_entry private data.
344 */
345struct data_queue_desc {
346 unsigned short entry_num;
347 unsigned short data_size;
348 unsigned short desc_size;
349 unsigned short priv_size;
350};
351
352/**
353 * queue_end - Return pointer to the last queue (HELPER MACRO).
354 * @__dev: Pointer to &struct rt2x00_dev
355 *
356 * Using the base rx pointer and the maximum number of available queues,
357 * this macro will return the address of 1 position beyond the end of the
358 * queues array.
359 */
360#define queue_end(__dev) \
361 &(__dev)->rx[(__dev)->data_queues]
362
363/**
364 * tx_queue_end - Return pointer to the last TX queue (HELPER MACRO).
365 * @__dev: Pointer to &struct rt2x00_dev
366 *
367 * Using the base tx pointer and the maximum number of available TX
368 * queues, this macro will return the address of 1 position beyond
369 * the end of the TX queue array.
370 */
371#define tx_queue_end(__dev) \
372 &(__dev)->tx[(__dev)->hw->queues]
373
374/**
375 * queue_loop - Loop through the queues within a specific range (HELPER MACRO).
376 * @__entry: Pointer where the current queue entry will be stored in.
377 * @__start: Start queue pointer.
378 * @__end: End queue pointer.
379 *
380 * This macro will loop through all queues between &__start and &__end.
381 */
382#define queue_loop(__entry, __start, __end) \
383 for ((__entry) = (__start); \
384 prefetch(&(__entry)[1]), (__entry) != (__end); \
385 (__entry) = &(__entry)[1])
386
387/**
388 * queue_for_each - Loop through all queues
389 * @__dev: Pointer to &struct rt2x00_dev
390 * @__entry: Pointer where the current queue entry will be stored in.
391 *
392 * This macro will loop through all available queues.
393 */
394#define queue_for_each(__dev, __entry) \
395 queue_loop(__entry, (__dev)->rx, queue_end(__dev))
396
397/**
398 * tx_queue_for_each - Loop through the TX queues
399 * @__dev: Pointer to &struct rt2x00_dev
400 * @__entry: Pointer where the current queue entry will be stored in.
401 *
402 * This macro will loop through all TX related queues excluding
403 * the Beacon and Atim queues.
404 */
405#define tx_queue_for_each(__dev, __entry) \
406 queue_loop(__entry, (__dev)->tx, tx_queue_end(__dev))
407
408/**
409 * txall_queue_for_each - Loop through all TX related queues
410 * @__dev: Pointer to &struct rt2x00_dev
411 * @__entry: Pointer where the current queue entry will be stored in.
412 *
413 * This macro will loop through all TX related queues including
414 * the Beacon and Atim queues.
415 */
416#define txall_queue_for_each(__dev, __entry) \
417 queue_loop(__entry, (__dev)->tx, queue_end(__dev))
418
419/**
420 * rt2x00queue_empty - Check if the queue is empty.
421 * @queue: Queue to check if empty.
422 */
423static inline int rt2x00queue_empty(struct data_queue *queue)
424{
425 return queue->length == 0;
426}
427
428/**
429 * rt2x00queue_full - Check if the queue is full.
430 * @queue: Queue to check if full.
431 */
432static inline int rt2x00queue_full(struct data_queue *queue)
433{
434 return queue->length == queue->limit;
435}
436
437/**
438 * rt2x00queue_free - Check the number of available entries in queue.
439 * @queue: Queue to check.
440 */
441static inline int rt2x00queue_available(struct data_queue *queue)
442{
443 return queue->limit - queue->length;
444}
445
446/**
447 * rt2x00_desc_read - Read a word from the hardware descriptor.
448 * @desc: Base descriptor address
449 * @word: Word index from where the descriptor should be read.
450 * @value: Address where the descriptor value should be written into.
451 */
452static inline void rt2x00_desc_read(__le32 *desc, const u8 word, u32 *value)
453{
454 *value = le32_to_cpu(desc[word]);
455}
456
457/**
458 * rt2x00_desc_write - wrote a word to the hardware descriptor.
459 * @desc: Base descriptor address
460 * @word: Word index from where the descriptor should be written.
461 * @value: Value that should be written into the descriptor.
462 */
463static inline void rt2x00_desc_write(__le32 *desc, const u8 word, u32 value)
464{
465 desc[word] = cpu_to_le32(value);
466}
467
468#endif /* RT2X00QUEUE_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index b1915dc7dda1..0325bed2fbf5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -29,7 +29,7 @@
29/* 29/*
30 * TX result flags. 30 * TX result flags.
31 */ 31 */
32enum TX_STATUS { 32enum tx_status {
33 TX_SUCCESS = 0, 33 TX_SUCCESS = 0,
34 TX_SUCCESS_RETRY = 1, 34 TX_SUCCESS_RETRY = 1,
35 TX_FAIL_RETRY = 2, 35 TX_FAIL_RETRY = 2,
@@ -220,75 +220,4 @@ static inline u8 rt2x00_get_field8(const u8 reg,
220 return (reg & field.bit_mask) >> field.bit_offset; 220 return (reg & field.bit_mask) >> field.bit_offset;
221} 221}
222 222
223/*
224 * Device specific rate value.
225 * We will have to create the device specific rate value
226 * passed to the ieee80211 kernel. We need to make it a consist of
227 * multiple fields because we want to store more then 1 device specific
228 * values inside the value.
229 * 1 - rate, stored as 100 kbit/s.
230 * 2 - preamble, short_preamble enabled flag.
231 * 3 - MASK_RATE, which rates are enabled in this mode, this mask
232 * corresponds with the TX register format for the current device.
233 * 4 - plcp, 802.11b rates are device specific,
234 * 802.11g rates are set according to the ieee802.11a-1999 p.14.
235 * The bit to enable preamble is set in a seperate define.
236 */
237#define DEV_RATE FIELD32(0x000007ff)
238#define DEV_PREAMBLE FIELD32(0x00000800)
239#define DEV_RATEMASK FIELD32(0x00fff000)
240#define DEV_PLCP FIELD32(0xff000000)
241
242/*
243 * Bitfields
244 */
245#define DEV_RATEBIT_1MB ( 1 << 0 )
246#define DEV_RATEBIT_2MB ( 1 << 1 )
247#define DEV_RATEBIT_5_5MB ( 1 << 2 )
248#define DEV_RATEBIT_11MB ( 1 << 3 )
249#define DEV_RATEBIT_6MB ( 1 << 4 )
250#define DEV_RATEBIT_9MB ( 1 << 5 )
251#define DEV_RATEBIT_12MB ( 1 << 6 )
252#define DEV_RATEBIT_18MB ( 1 << 7 )
253#define DEV_RATEBIT_24MB ( 1 << 8 )
254#define DEV_RATEBIT_36MB ( 1 << 9 )
255#define DEV_RATEBIT_48MB ( 1 << 10 )
256#define DEV_RATEBIT_54MB ( 1 << 11 )
257
258/*
259 * Bitmasks for DEV_RATEMASK
260 */
261#define DEV_RATEMASK_1MB ( (DEV_RATEBIT_1MB << 1) -1 )
262#define DEV_RATEMASK_2MB ( (DEV_RATEBIT_2MB << 1) -1 )
263#define DEV_RATEMASK_5_5MB ( (DEV_RATEBIT_5_5MB << 1) -1 )
264#define DEV_RATEMASK_11MB ( (DEV_RATEBIT_11MB << 1) -1 )
265#define DEV_RATEMASK_6MB ( (DEV_RATEBIT_6MB << 1) -1 )
266#define DEV_RATEMASK_9MB ( (DEV_RATEBIT_9MB << 1) -1 )
267#define DEV_RATEMASK_12MB ( (DEV_RATEBIT_12MB << 1) -1 )
268#define DEV_RATEMASK_18MB ( (DEV_RATEBIT_18MB << 1) -1 )
269#define DEV_RATEMASK_24MB ( (DEV_RATEBIT_24MB << 1) -1 )
270#define DEV_RATEMASK_36MB ( (DEV_RATEBIT_36MB << 1) -1 )
271#define DEV_RATEMASK_48MB ( (DEV_RATEBIT_48MB << 1) -1 )
272#define DEV_RATEMASK_54MB ( (DEV_RATEBIT_54MB << 1) -1 )
273
274/*
275 * Bitmask groups of bitrates
276 */
277#define DEV_BASIC_RATEMASK \
278 ( DEV_RATEMASK_11MB | \
279 DEV_RATEBIT_6MB | DEV_RATEBIT_12MB | DEV_RATEBIT_24MB )
280
281#define DEV_CCK_RATEMASK ( DEV_RATEMASK_11MB )
282#define DEV_OFDM_RATEMASK ( DEV_RATEMASK_54MB & ~DEV_CCK_RATEMASK )
283
284/*
285 * Macro's to set and get specific fields from the device specific val and val2
286 * fields inside the ieee80211_rate entry.
287 */
288#define DEVICE_SET_RATE_FIELD(__value, __mask) \
289 (int)( ((__value) << DEV_##__mask.bit_offset) & DEV_##__mask.bit_mask )
290
291#define DEVICE_GET_RATE_FIELD(__value, __mask) \
292 (int)( ((__value) & DEV_##__mask.bit_mask) >> DEV_##__mask.bit_offset )
293
294#endif /* RT2X00REG_H */ 223#endif /* RT2X00REG_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00rfkill.c b/drivers/net/wireless/rt2x00/rt2x00rfkill.c
index f95577596206..fcef9885ab5e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00rfkill.c
+++ b/drivers/net/wireless/rt2x00/rt2x00rfkill.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
diff --git a/drivers/net/wireless/rt2x00/rt2x00ring.h b/drivers/net/wireless/rt2x00/rt2x00ring.h
deleted file mode 100644
index 1caa6d688c40..000000000000
--- a/drivers/net/wireless/rt2x00/rt2x00ring.h
+++ /dev/null
@@ -1,290 +0,0 @@
1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00
23 Abstract: rt2x00 ring datastructures and routines
24 */
25
26#ifndef RT2X00RING_H
27#define RT2X00RING_H
28
29/*
30 * skb_desc
31 * Descriptor information for the skb buffer
32 */
33struct skb_desc {
34 unsigned int frame_type;
35
36 unsigned int desc_len;
37 unsigned int data_len;
38
39 void *desc;
40 void *data;
41
42 struct data_ring *ring;
43 struct data_entry *entry;
44};
45
46static inline struct skb_desc* get_skb_desc(struct sk_buff *skb)
47{
48 return (struct skb_desc*)&skb->cb[0];
49}
50
51/*
52 * rxdata_entry_desc
53 * Summary of information that has been read from the
54 * RX frame descriptor.
55 */
56struct rxdata_entry_desc {
57 int signal;
58 int rssi;
59 int ofdm;
60 int size;
61 int flags;
62 int my_bss;
63};
64
65/*
66 * txdata_entry_desc
67 * Summary of information that should be written into the
68 * descriptor for sending a TX frame.
69 */
70struct txdata_entry_desc {
71 unsigned long flags;
72#define ENTRY_TXDONE 1
73#define ENTRY_TXD_RTS_FRAME 2
74#define ENTRY_TXD_OFDM_RATE 3
75#define ENTRY_TXD_MORE_FRAG 4
76#define ENTRY_TXD_REQ_TIMESTAMP 5
77#define ENTRY_TXD_BURST 6
78#define ENTRY_TXD_ACK 7
79
80/*
81 * Queue ID. ID's 0-4 are data TX rings
82 */
83 int queue;
84#define QUEUE_MGMT 13
85#define QUEUE_RX 14
86#define QUEUE_OTHER 15
87
88 /*
89 * PLCP values.
90 */
91 u16 length_high;
92 u16 length_low;
93 u16 signal;
94 u16 service;
95
96 /*
97 * Timing information
98 */
99 int aifs;
100 int ifs;
101 int cw_min;
102 int cw_max;
103};
104
105/*
106 * data_entry
107 * The data ring is a list of data entries.
108 * Each entry holds a reference to the descriptor
109 * and the data buffer. For TX rings the reference to the
110 * sk_buff of the packet being transmitted is also stored here.
111 */
112struct data_entry {
113 /*
114 * Status flags
115 */
116 unsigned long flags;
117#define ENTRY_OWNER_NIC 1
118
119 /*
120 * Ring we belong to.
121 */
122 struct data_ring *ring;
123
124 /*
125 * sk_buff for the packet which is being transmitted
126 * in this entry (Only used with TX related rings).
127 */
128 struct sk_buff *skb;
129
130 /*
131 * Store a ieee80211_tx_status structure in each
132 * ring entry, this will optimize the txdone
133 * handler.
134 */
135 struct ieee80211_tx_status tx_status;
136
137 /*
138 * private pointer specific to driver.
139 */
140 void *priv;
141
142 /*
143 * Data address for this entry.
144 */
145 void *data_addr;
146 dma_addr_t data_dma;
147
148 /*
149 * Entry identification number (index).
150 */
151 unsigned int entry_idx;
152};
153
154/*
155 * data_ring
156 * Data rings are used by the device to send and receive packets.
157 * The data_addr is the base address of the data memory.
158 * To determine at which point in the ring we are,
159 * have to use the rt2x00_ring_index_*() functions.
160 */
161struct data_ring {
162 /*
163 * Pointer to main rt2x00dev structure where this
164 * ring belongs to.
165 */
166 struct rt2x00_dev *rt2x00dev;
167
168 /*
169 * Base address for the device specific data entries.
170 */
171 struct data_entry *entry;
172
173 /*
174 * TX queue statistic info.
175 */
176 struct ieee80211_tx_queue_stats_data stats;
177
178 /*
179 * TX Queue parameters.
180 */
181 struct ieee80211_tx_queue_params tx_params;
182
183 /*
184 * Base address for data ring.
185 */
186 dma_addr_t data_dma;
187 void *data_addr;
188
189 /*
190 * Queue identification number:
191 * RX: 0
192 * TX: IEEE80211_TX_*
193 */
194 unsigned int queue_idx;
195
196 /*
197 * Index variables.
198 */
199 u16 index;
200 u16 index_done;
201
202 /*
203 * Size of packet and descriptor in bytes.
204 */
205 u16 data_size;
206 u16 desc_size;
207};
208
209/*
210 * Handlers to determine the address of the current device specific
211 * data entry, where either index or index_done points to.
212 */
213static inline struct data_entry *rt2x00_get_data_entry(struct data_ring *ring)
214{
215 return &ring->entry[ring->index];
216}
217
218static inline struct data_entry *rt2x00_get_data_entry_done(struct data_ring
219 *ring)
220{
221 return &ring->entry[ring->index_done];
222}
223
224/*
225 * Total ring memory
226 */
227static inline int rt2x00_get_ring_size(struct data_ring *ring)
228{
229 return ring->stats.limit * (ring->desc_size + ring->data_size);
230}
231
232/*
233 * Ring index manipulation functions.
234 */
235static inline void rt2x00_ring_index_inc(struct data_ring *ring)
236{
237 ring->index++;
238 if (ring->index >= ring->stats.limit)
239 ring->index = 0;
240 ring->stats.len++;
241}
242
243static inline void rt2x00_ring_index_done_inc(struct data_ring *ring)
244{
245 ring->index_done++;
246 if (ring->index_done >= ring->stats.limit)
247 ring->index_done = 0;
248 ring->stats.len--;
249 ring->stats.count++;
250}
251
252static inline void rt2x00_ring_index_clear(struct data_ring *ring)
253{
254 ring->index = 0;
255 ring->index_done = 0;
256 ring->stats.len = 0;
257 ring->stats.count = 0;
258}
259
260static inline int rt2x00_ring_empty(struct data_ring *ring)
261{
262 return ring->stats.len == 0;
263}
264
265static inline int rt2x00_ring_full(struct data_ring *ring)
266{
267 return ring->stats.len == ring->stats.limit;
268}
269
270static inline int rt2x00_ring_free(struct data_ring *ring)
271{
272 return ring->stats.limit - ring->stats.len;
273}
274
275/*
276 * TX/RX Descriptor access functions.
277 */
278static inline void rt2x00_desc_read(__le32 *desc,
279 const u8 word, u32 *value)
280{
281 *value = le32_to_cpu(desc[word]);
282}
283
284static inline void rt2x00_desc_write(__le32 *desc,
285 const u8 word, const u32 value)
286{
287 desc[word] = cpu_to_le32(value);
288}
289
290#endif /* RT2X00RING_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 84e9bdb73910..5a331674dcb2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -40,8 +40,7 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
40 void *buffer, const u16 buffer_length, 40 void *buffer, const u16 buffer_length,
41 const int timeout) 41 const int timeout)
42{ 42{
43 struct usb_device *usb_dev = 43 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
44 interface_to_usbdev(rt2x00dev_usb(rt2x00dev));
45 int status; 44 int status;
46 unsigned int i; 45 unsigned int i;
47 unsigned int pipe = 46 unsigned int pipe =
@@ -85,20 +84,20 @@ int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
85 /* 84 /*
86 * Check for Cache availability. 85 * Check for Cache availability.
87 */ 86 */
88 if (unlikely(!rt2x00dev->csr_cache || buffer_length > CSR_CACHE_SIZE)) { 87 if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) {
89 ERROR(rt2x00dev, "CSR cache not available.\n"); 88 ERROR(rt2x00dev, "CSR cache not available.\n");
90 return -ENOMEM; 89 return -ENOMEM;
91 } 90 }
92 91
93 if (requesttype == USB_VENDOR_REQUEST_OUT) 92 if (requesttype == USB_VENDOR_REQUEST_OUT)
94 memcpy(rt2x00dev->csr_cache, buffer, buffer_length); 93 memcpy(rt2x00dev->csr.cache, buffer, buffer_length);
95 94
96 status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype, 95 status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype,
97 offset, 0, rt2x00dev->csr_cache, 96 offset, 0, rt2x00dev->csr.cache,
98 buffer_length, timeout); 97 buffer_length, timeout);
99 98
100 if (!status && requesttype == USB_VENDOR_REQUEST_IN) 99 if (!status && requesttype == USB_VENDOR_REQUEST_IN)
101 memcpy(buffer, rt2x00dev->csr_cache, buffer_length); 100 memcpy(buffer, rt2x00dev->csr.cache, buffer_length);
102 101
103 return status; 102 return status;
104} 103}
@@ -128,15 +127,15 @@ EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
128 */ 127 */
129static void rt2x00usb_interrupt_txdone(struct urb *urb) 128static void rt2x00usb_interrupt_txdone(struct urb *urb)
130{ 129{
131 struct data_entry *entry = (struct data_entry *)urb->context; 130 struct queue_entry *entry = (struct queue_entry *)urb->context;
132 struct data_ring *ring = entry->ring; 131 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
133 struct rt2x00_dev *rt2x00dev = ring->rt2x00dev; 132 struct queue_entry_priv_usb_tx *priv_tx = entry->priv_data;
133 struct txdone_entry_desc txdesc;
134 __le32 *txd = (__le32 *)entry->skb->data; 134 __le32 *txd = (__le32 *)entry->skb->data;
135 u32 word; 135 u32 word;
136 int tx_status;
137 136
138 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || 137 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
139 !__test_and_clear_bit(ENTRY_OWNER_NIC, &entry->flags)) 138 !__test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
140 return; 139 return;
141 140
142 rt2x00_desc_read(txd, 0, &word); 141 rt2x00_desc_read(txd, 0, &word);
@@ -144,45 +143,46 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
144 /* 143 /*
145 * Remove the descriptor data from the buffer. 144 * Remove the descriptor data from the buffer.
146 */ 145 */
147 skb_pull(entry->skb, ring->desc_size); 146 skb_pull(entry->skb, entry->queue->desc_size);
148 147
149 /* 148 /*
150 * Obtain the status about this packet. 149 * Obtain the status about this packet.
151 */ 150 */
152 tx_status = !urb->status ? TX_SUCCESS : TX_FAIL_RETRY; 151 txdesc.status = !urb->status ? TX_SUCCESS : TX_FAIL_RETRY;
152 txdesc.retry = 0;
153 txdesc.control = &priv_tx->control;
153 154
154 rt2x00lib_txdone(entry, tx_status, 0); 155 rt2x00lib_txdone(entry, &txdesc);
155 156
156 /* 157 /*
157 * Make this entry available for reuse. 158 * Make this entry available for reuse.
158 */ 159 */
159 entry->flags = 0; 160 entry->flags = 0;
160 rt2x00_ring_index_done_inc(entry->ring); 161 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
161 162
162 /* 163 /*
163 * If the data ring was full before the txdone handler 164 * If the data queue was full before the txdone handler
164 * we must make sure the packet queue in the mac80211 stack 165 * we must make sure the packet queue in the mac80211 stack
165 * is reenabled when the txdone handler has finished. 166 * is reenabled when the txdone handler has finished.
166 */ 167 */
167 if (!rt2x00_ring_full(ring)) 168 if (!rt2x00queue_full(entry->queue))
168 ieee80211_wake_queue(rt2x00dev->hw, 169 ieee80211_wake_queue(rt2x00dev->hw, priv_tx->control.queue);
169 entry->tx_status.control.queue);
170} 170}
171 171
172int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev, 172int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
173 struct data_ring *ring, struct sk_buff *skb, 173 struct data_queue *queue, struct sk_buff *skb,
174 struct ieee80211_tx_control *control) 174 struct ieee80211_tx_control *control)
175{ 175{
176 struct usb_device *usb_dev = 176 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
177 interface_to_usbdev(rt2x00dev_usb(rt2x00dev)); 177 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
178 struct data_entry *entry = rt2x00_get_data_entry(ring); 178 struct queue_entry_priv_usb_tx *priv_tx = entry->priv_data;
179 struct skb_desc *desc; 179 struct skb_frame_desc *skbdesc;
180 u32 length; 180 u32 length;
181 181
182 if (rt2x00_ring_full(ring)) 182 if (rt2x00queue_full(queue))
183 return -EINVAL; 183 return -EINVAL;
184 184
185 if (test_bit(ENTRY_OWNER_NIC, &entry->flags)) { 185 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
186 ERROR(rt2x00dev, 186 ERROR(rt2x00dev,
187 "Arrived at non-free entry in the non-full queue %d.\n" 187 "Arrived at non-free entry in the non-full queue %d.\n"
188 "Please file bug report to %s.\n", 188 "Please file bug report to %s.\n",
@@ -193,20 +193,20 @@ int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
193 /* 193 /*
194 * Add the descriptor in front of the skb. 194 * Add the descriptor in front of the skb.
195 */ 195 */
196 skb_push(skb, ring->desc_size); 196 skb_push(skb, queue->desc_size);
197 memset(skb->data, 0, ring->desc_size); 197 memset(skb->data, 0, queue->desc_size);
198 198
199 /* 199 /*
200 * Fill in skb descriptor 200 * Fill in skb descriptor
201 */ 201 */
202 desc = get_skb_desc(skb); 202 skbdesc = get_skb_frame_desc(skb);
203 desc->desc_len = ring->desc_size; 203 skbdesc->data = skb->data + queue->desc_size;
204 desc->data_len = skb->len - ring->desc_size; 204 skbdesc->data_len = skb->len - queue->desc_size;
205 desc->desc = skb->data; 205 skbdesc->desc = skb->data;
206 desc->data = skb->data + ring->desc_size; 206 skbdesc->desc_len = queue->desc_size;
207 desc->ring = ring; 207 skbdesc->entry = entry;
208 desc->entry = entry;
209 208
209 memcpy(&priv_tx->control, control, sizeof(priv_tx->control));
210 rt2x00lib_write_tx_desc(rt2x00dev, skb, control); 210 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
211 211
212 /* 212 /*
@@ -219,12 +219,12 @@ int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
219 /* 219 /*
220 * Initialize URB and send the frame to the device. 220 * Initialize URB and send the frame to the device.
221 */ 221 */
222 __set_bit(ENTRY_OWNER_NIC, &entry->flags); 222 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
223 usb_fill_bulk_urb(entry->priv, usb_dev, usb_sndbulkpipe(usb_dev, 1), 223 usb_fill_bulk_urb(priv_tx->urb, usb_dev, usb_sndbulkpipe(usb_dev, 1),
224 skb->data, length, rt2x00usb_interrupt_txdone, entry); 224 skb->data, length, rt2x00usb_interrupt_txdone, entry);
225 usb_submit_urb(entry->priv, GFP_ATOMIC); 225 usb_submit_urb(priv_tx->urb, GFP_ATOMIC);
226 226
227 rt2x00_ring_index_inc(ring); 227 rt2x00queue_index_inc(queue, Q_INDEX);
228 228
229 return 0; 229 return 0;
230} 230}
@@ -233,20 +233,42 @@ EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data);
233/* 233/*
234 * RX data handlers. 234 * RX data handlers.
235 */ 235 */
236static struct sk_buff* rt2x00usb_alloc_rxskb(struct data_queue *queue)
237{
238 struct sk_buff *skb;
239 unsigned int frame_size;
240
241 /*
242 * As alignment we use 2 and not NET_IP_ALIGN because we need
243 * to be sure we have 2 bytes room in the head. (NET_IP_ALIGN
244 * can be 0 on some hardware). We use these 2 bytes for frame
245 * alignment later, we assume that the chance that
246 * header_size % 4 == 2 is bigger then header_size % 2 == 0
247 * and thus optimize alignment by reserving the 2 bytes in
248 * advance.
249 */
250 frame_size = queue->data_size + queue->desc_size;
251 skb = dev_alloc_skb(queue->desc_size + frame_size + 2);
252 if (!skb)
253 return NULL;
254
255 skb_reserve(skb, queue->desc_size + 2);
256 skb_put(skb, frame_size);
257
258 return skb;
259}
260
236static void rt2x00usb_interrupt_rxdone(struct urb *urb) 261static void rt2x00usb_interrupt_rxdone(struct urb *urb)
237{ 262{
238 struct data_entry *entry = (struct data_entry *)urb->context; 263 struct queue_entry *entry = (struct queue_entry *)urb->context;
239 struct data_ring *ring = entry->ring; 264 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
240 struct rt2x00_dev *rt2x00dev = ring->rt2x00dev;
241 struct sk_buff *skb; 265 struct sk_buff *skb;
242 struct ieee80211_hdr *hdr; 266 struct skb_frame_desc *skbdesc;
243 struct skb_desc *skbdesc; 267 struct rxdone_entry_desc rxdesc;
244 struct rxdata_entry_desc desc;
245 int header_size; 268 int header_size;
246 int frame_size;
247 269
248 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) || 270 if (!test_bit(DEVICE_ENABLED_RADIO, &rt2x00dev->flags) ||
249 !test_and_clear_bit(ENTRY_OWNER_NIC, &entry->flags)) 271 !test_and_clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
250 return; 272 return;
251 273
252 /* 274 /*
@@ -254,67 +276,45 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
254 * to be actually valid, or if the urb is signaling 276 * to be actually valid, or if the urb is signaling
255 * a problem. 277 * a problem.
256 */ 278 */
257 if (urb->actual_length < entry->ring->desc_size || urb->status) 279 if (urb->actual_length < entry->queue->desc_size || urb->status)
258 goto skip_entry; 280 goto skip_entry;
259 281
260 /* 282 /*
261 * Fill in skb descriptor 283 * Fill in skb descriptor
262 */ 284 */
263 skbdesc = get_skb_desc(entry->skb); 285 skbdesc = get_skb_frame_desc(entry->skb);
264 skbdesc->ring = ring; 286 memset(skbdesc, 0, sizeof(*skbdesc));
265 skbdesc->entry = entry; 287 skbdesc->entry = entry;
266 288
267 memset(&desc, 0, sizeof(desc)); 289 memset(&rxdesc, 0, sizeof(rxdesc));
268 rt2x00dev->ops->lib->fill_rxdone(entry, &desc); 290 rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
269
270 /*
271 * Allocate a new sk buffer to replace the current one.
272 * If allocation fails, we should drop the current frame
273 * so we can recycle the existing sk buffer for the new frame.
274 * As alignment we use 2 and not NET_IP_ALIGN because we need
275 * to be sure we have 2 bytes room in the head. (NET_IP_ALIGN
276 * can be 0 on some hardware). We use these 2 bytes for frame
277 * alignment later, we assume that the chance that
278 * header_size % 4 == 2 is bigger then header_size % 2 == 0
279 * and thus optimize alignment by reserving the 2 bytes in
280 * advance.
281 */
282 frame_size = entry->ring->data_size + entry->ring->desc_size;
283 skb = dev_alloc_skb(frame_size + 2);
284 if (!skb)
285 goto skip_entry;
286
287 skb_reserve(skb, 2);
288 skb_put(skb, frame_size);
289 291
290 /* 292 /*
291 * The data behind the ieee80211 header must be 293 * The data behind the ieee80211 header must be
292 * aligned on a 4 byte boundary. 294 * aligned on a 4 byte boundary.
293 */ 295 */
294 hdr = (struct ieee80211_hdr *)entry->skb->data; 296 header_size = ieee80211_get_hdrlen_from_skb(entry->skb);
295 header_size =
296 ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_control));
297
298 if (header_size % 4 == 0) { 297 if (header_size % 4 == 0) {
299 skb_push(entry->skb, 2); 298 skb_push(entry->skb, 2);
300 memmove(entry->skb->data, entry->skb->data + 2, skb->len - 2); 299 memmove(entry->skb->data, entry->skb->data + 2,
300 entry->skb->len - 2);
301 skbdesc->data = entry->skb->data;
302 skb_trim(entry->skb,entry->skb->len - 2);
301 } 303 }
302 304
303 /* 305 /*
304 * Trim the entire buffer down to only contain the valid frame data 306 * Allocate a new sk buffer to replace the current one.
305 * excluding the device descriptor. The position of the descriptor 307 * If allocation fails, we should drop the current frame
306 * varies. This means that we should check where the descriptor is 308 * so we can recycle the existing sk buffer for the new frame.
307 * and decide if we need to pull the data pointer to exclude the
308 * device descriptor.
309 */ 309 */
310 if (skbdesc->data > skbdesc->desc) 310 skb = rt2x00usb_alloc_rxskb(entry->queue);
311 skb_pull(entry->skb, skbdesc->desc_len); 311 if (!skb)
312 skb_trim(entry->skb, desc.size); 312 goto skip_entry;
313 313
314 /* 314 /*
315 * Send the frame to rt2x00lib for further processing. 315 * Send the frame to rt2x00lib for further processing.
316 */ 316 */
317 rt2x00lib_rxdone(entry, entry->skb, &desc); 317 rt2x00lib_rxdone(entry, &rxdesc);
318 318
319 /* 319 /*
320 * Replace current entry's skb with the newly allocated one, 320 * Replace current entry's skb with the newly allocated one,
@@ -325,12 +325,12 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
325 urb->transfer_buffer_length = entry->skb->len; 325 urb->transfer_buffer_length = entry->skb->len;
326 326
327skip_entry: 327skip_entry:
328 if (test_bit(DEVICE_ENABLED_RADIO, &ring->rt2x00dev->flags)) { 328 if (test_bit(DEVICE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) {
329 __set_bit(ENTRY_OWNER_NIC, &entry->flags); 329 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
330 usb_submit_urb(urb, GFP_ATOMIC); 330 usb_submit_urb(urb, GFP_ATOMIC);
331 } 331 }
332 332
333 rt2x00_ring_index_inc(ring); 333 rt2x00queue_index_inc(entry->queue, Q_INDEX);
334} 334}
335 335
336/* 336/*
@@ -338,18 +338,44 @@ skip_entry:
338 */ 338 */
339void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev) 339void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
340{ 340{
341 struct data_ring *ring; 341 struct queue_entry_priv_usb_rx *priv_rx;
342 struct queue_entry_priv_usb_tx *priv_tx;
343 struct queue_entry_priv_usb_bcn *priv_bcn;
344 struct data_queue *queue;
342 unsigned int i; 345 unsigned int i;
343 346
344 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0x0000, 0x0000, 347 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0x0000, 0x0000,
345 REGISTER_TIMEOUT); 348 REGISTER_TIMEOUT);
346 349
347 /* 350 /*
348 * Cancel all rings. 351 * Cancel all queues.
349 */ 352 */
350 ring_for_each(rt2x00dev, ring) { 353 for (i = 0; i < rt2x00dev->rx->limit; i++) {
351 for (i = 0; i < ring->stats.limit; i++) 354 priv_rx = rt2x00dev->rx->entries[i].priv_data;
352 usb_kill_urb(ring->entry[i].priv); 355 usb_kill_urb(priv_rx->urb);
356 }
357
358 tx_queue_for_each(rt2x00dev, queue) {
359 for (i = 0; i < queue->limit; i++) {
360 priv_tx = queue->entries[i].priv_data;
361 usb_kill_urb(priv_tx->urb);
362 }
363 }
364
365 for (i = 0; i < rt2x00dev->bcn->limit; i++) {
366 priv_bcn = rt2x00dev->bcn->entries[i].priv_data;
367 usb_kill_urb(priv_bcn->urb);
368
369 if (priv_bcn->guardian_urb)
370 usb_kill_urb(priv_bcn->guardian_urb);
371 }
372
373 if (!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags))
374 return;
375
376 for (i = 0; i < rt2x00dev->bcn[1].limit; i++) {
377 priv_tx = rt2x00dev->bcn[1].entries[i].priv_data;
378 usb_kill_urb(priv_tx->urb);
353 } 379 }
354} 380}
355EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); 381EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
@@ -358,64 +384,108 @@ EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
358 * Device initialization handlers. 384 * Device initialization handlers.
359 */ 385 */
360void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev, 386void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev,
361 struct data_entry *entry) 387 struct queue_entry *entry)
362{ 388{
363 struct usb_device *usb_dev = 389 struct usb_device *usb_dev = rt2x00dev_usb_dev(rt2x00dev);
364 interface_to_usbdev(rt2x00dev_usb(rt2x00dev)); 390 struct queue_entry_priv_usb_rx *priv_rx = entry->priv_data;
365 391
366 usb_fill_bulk_urb(entry->priv, usb_dev, 392 usb_fill_bulk_urb(priv_rx->urb, usb_dev,
367 usb_rcvbulkpipe(usb_dev, 1), 393 usb_rcvbulkpipe(usb_dev, 1),
368 entry->skb->data, entry->skb->len, 394 entry->skb->data, entry->skb->len,
369 rt2x00usb_interrupt_rxdone, entry); 395 rt2x00usb_interrupt_rxdone, entry);
370 396
371 __set_bit(ENTRY_OWNER_NIC, &entry->flags); 397 __set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
372 usb_submit_urb(entry->priv, GFP_ATOMIC); 398 usb_submit_urb(priv_rx->urb, GFP_ATOMIC);
373} 399}
374EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry); 400EXPORT_SYMBOL_GPL(rt2x00usb_init_rxentry);
375 401
376void rt2x00usb_init_txentry(struct rt2x00_dev *rt2x00dev, 402void rt2x00usb_init_txentry(struct rt2x00_dev *rt2x00dev,
377 struct data_entry *entry) 403 struct queue_entry *entry)
378{ 404{
379 entry->flags = 0; 405 entry->flags = 0;
380} 406}
381EXPORT_SYMBOL_GPL(rt2x00usb_init_txentry); 407EXPORT_SYMBOL_GPL(rt2x00usb_init_txentry);
382 408
383static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev, 409static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
384 struct data_ring *ring) 410 struct data_queue *queue)
385{ 411{
412 struct queue_entry_priv_usb_rx *priv_rx;
413 struct queue_entry_priv_usb_tx *priv_tx;
414 struct queue_entry_priv_usb_bcn *priv_bcn;
415 struct urb *urb;
416 unsigned int guardian =
417 test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
386 unsigned int i; 418 unsigned int i;
387 419
388 /* 420 /*
389 * Allocate the URB's 421 * Allocate the URB's
390 */ 422 */
391 for (i = 0; i < ring->stats.limit; i++) { 423 for (i = 0; i < queue->limit; i++) {
392 ring->entry[i].priv = usb_alloc_urb(0, GFP_KERNEL); 424 urb = usb_alloc_urb(0, GFP_KERNEL);
393 if (!ring->entry[i].priv) 425 if (!urb)
394 return -ENOMEM; 426 return -ENOMEM;
427
428 if (queue->qid == QID_RX) {
429 priv_rx = queue->entries[i].priv_data;
430 priv_rx->urb = urb;
431 } else if (queue->qid == QID_MGMT && guardian) {
432 priv_bcn = queue->entries[i].priv_data;
433 priv_bcn->urb = urb;
434
435 urb = usb_alloc_urb(0, GFP_KERNEL);
436 if (!urb)
437 return -ENOMEM;
438
439 priv_bcn->guardian_urb = urb;
440 } else {
441 priv_tx = queue->entries[i].priv_data;
442 priv_tx->urb = urb;
443 }
395 } 444 }
396 445
397 return 0; 446 return 0;
398} 447}
399 448
400static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev, 449static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev,
401 struct data_ring *ring) 450 struct data_queue *queue)
402{ 451{
452 struct queue_entry_priv_usb_rx *priv_rx;
453 struct queue_entry_priv_usb_tx *priv_tx;
454 struct queue_entry_priv_usb_bcn *priv_bcn;
455 struct urb *urb;
456 unsigned int guardian =
457 test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags);
403 unsigned int i; 458 unsigned int i;
404 459
405 if (!ring->entry) 460 if (!queue->entries)
406 return; 461 return;
407 462
408 for (i = 0; i < ring->stats.limit; i++) { 463 for (i = 0; i < queue->limit; i++) {
409 usb_kill_urb(ring->entry[i].priv); 464 if (queue->qid == QID_RX) {
410 usb_free_urb(ring->entry[i].priv); 465 priv_rx = queue->entries[i].priv_data;
411 if (ring->entry[i].skb) 466 urb = priv_rx->urb;
412 kfree_skb(ring->entry[i].skb); 467 } else if (queue->qid == QID_MGMT && guardian) {
468 priv_bcn = queue->entries[i].priv_data;
469
470 usb_kill_urb(priv_bcn->guardian_urb);
471 usb_free_urb(priv_bcn->guardian_urb);
472
473 urb = priv_bcn->urb;
474 } else {
475 priv_tx = queue->entries[i].priv_data;
476 urb = priv_tx->urb;
477 }
478
479 usb_kill_urb(urb);
480 usb_free_urb(urb);
481 if (queue->entries[i].skb)
482 kfree_skb(queue->entries[i].skb);
413 } 483 }
414} 484}
415 485
416int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev) 486int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
417{ 487{
418 struct data_ring *ring; 488 struct data_queue *queue;
419 struct sk_buff *skb; 489 struct sk_buff *skb;
420 unsigned int entry_size; 490 unsigned int entry_size;
421 unsigned int i; 491 unsigned int i;
@@ -424,25 +494,22 @@ int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
424 /* 494 /*
425 * Allocate DMA 495 * Allocate DMA
426 */ 496 */
427 ring_for_each(rt2x00dev, ring) { 497 queue_for_each(rt2x00dev, queue) {
428 status = rt2x00usb_alloc_urb(rt2x00dev, ring); 498 status = rt2x00usb_alloc_urb(rt2x00dev, queue);
429 if (status) 499 if (status)
430 goto exit; 500 goto exit;
431 } 501 }
432 502
433 /* 503 /*
434 * For the RX ring, skb's should be allocated. 504 * For the RX queue, skb's should be allocated.
435 */ 505 */
436 entry_size = rt2x00dev->rx->data_size + rt2x00dev->rx->desc_size; 506 entry_size = rt2x00dev->rx->data_size + rt2x00dev->rx->desc_size;
437 for (i = 0; i < rt2x00dev->rx->stats.limit; i++) { 507 for (i = 0; i < rt2x00dev->rx->limit; i++) {
438 skb = dev_alloc_skb(NET_IP_ALIGN + entry_size); 508 skb = rt2x00usb_alloc_rxskb(rt2x00dev->rx);
439 if (!skb) 509 if (!skb)
440 goto exit; 510 goto exit;
441 511
442 skb_reserve(skb, NET_IP_ALIGN); 512 rt2x00dev->rx->entries[i].skb = skb;
443 skb_put(skb, entry_size);
444
445 rt2x00dev->rx->entry[i].skb = skb;
446 } 513 }
447 514
448 return 0; 515 return 0;
@@ -456,10 +523,10 @@ EXPORT_SYMBOL_GPL(rt2x00usb_initialize);
456 523
457void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev) 524void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
458{ 525{
459 struct data_ring *ring; 526 struct data_queue *queue;
460 527
461 ring_for_each(rt2x00dev, ring) 528 queue_for_each(rt2x00dev, queue)
462 rt2x00usb_free_urb(rt2x00dev, ring); 529 rt2x00usb_free_urb(rt2x00dev, queue);
463} 530}
464EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize); 531EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize);
465 532
@@ -474,14 +541,14 @@ static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev)
474 kfree(rt2x00dev->eeprom); 541 kfree(rt2x00dev->eeprom);
475 rt2x00dev->eeprom = NULL; 542 rt2x00dev->eeprom = NULL;
476 543
477 kfree(rt2x00dev->csr_cache); 544 kfree(rt2x00dev->csr.cache);
478 rt2x00dev->csr_cache = NULL; 545 rt2x00dev->csr.cache = NULL;
479} 546}
480 547
481static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev) 548static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev)
482{ 549{
483 rt2x00dev->csr_cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL); 550 rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL);
484 if (!rt2x00dev->csr_cache) 551 if (!rt2x00dev->csr.cache)
485 goto exit; 552 goto exit;
486 553
487 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); 554 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
@@ -627,9 +694,9 @@ EXPORT_SYMBOL_GPL(rt2x00usb_resume);
627#endif /* CONFIG_PM */ 694#endif /* CONFIG_PM */
628 695
629/* 696/*
630 * rt2x00pci module information. 697 * rt2x00usb module information.
631 */ 698 */
632MODULE_AUTHOR(DRV_PROJECT); 699MODULE_AUTHOR(DRV_PROJECT);
633MODULE_VERSION(DRV_VERSION); 700MODULE_VERSION(DRV_VERSION);
634MODULE_DESCRIPTION("rt2x00 library"); 701MODULE_DESCRIPTION("rt2x00 usb library");
635MODULE_LICENSE("GPL"); 702MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index e40df4050cd0..11e55180cbaf 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -60,34 +60,47 @@
60#define USB_VENDOR_REQUEST_IN ( USB_DIR_IN | USB_VENDOR_REQUEST ) 60#define USB_VENDOR_REQUEST_IN ( USB_DIR_IN | USB_VENDOR_REQUEST )
61#define USB_VENDOR_REQUEST_OUT ( USB_DIR_OUT | USB_VENDOR_REQUEST ) 61#define USB_VENDOR_REQUEST_OUT ( USB_DIR_OUT | USB_VENDOR_REQUEST )
62 62
63/* 63/**
64 * USB vendor commands. 64 * enum rt2x00usb_vendor_request: USB vendor commands.
65 */ 65 */
66#define USB_DEVICE_MODE 0x01 66enum rt2x00usb_vendor_request {
67#define USB_SINGLE_WRITE 0x02 67 USB_DEVICE_MODE = 1,
68#define USB_SINGLE_READ 0x03 68 USB_SINGLE_WRITE = 2,
69#define USB_MULTI_WRITE 0x06 69 USB_SINGLE_READ = 3,
70#define USB_MULTI_READ 0x07 70 USB_MULTI_WRITE = 6,
71#define USB_EEPROM_WRITE 0x08 71 USB_MULTI_READ = 7,
72#define USB_EEPROM_READ 0x09 72 USB_EEPROM_WRITE = 8,
73#define USB_LED_CONTROL 0x0a /* RT73USB */ 73 USB_EEPROM_READ = 9,
74#define USB_RX_CONTROL 0x0c 74 USB_LED_CONTROL = 10, /* RT73USB */
75 USB_RX_CONTROL = 12,
76};
75 77
76/* 78/**
77 * Device modes offset 79 * enum rt2x00usb_mode_offset: Device modes offset.
78 */ 80 */
79#define USB_MODE_RESET 0x01 81enum rt2x00usb_mode_offset {
80#define USB_MODE_UNPLUG 0x02 82 USB_MODE_RESET = 1,
81#define USB_MODE_FUNCTION 0x03 83 USB_MODE_UNPLUG = 2,
82#define USB_MODE_TEST 0x04 84 USB_MODE_FUNCTION = 3,
83#define USB_MODE_SLEEP 0x07 /* RT73USB */ 85 USB_MODE_TEST = 4,
84#define USB_MODE_FIRMWARE 0x08 /* RT73USB */ 86 USB_MODE_SLEEP = 7, /* RT73USB */
85#define USB_MODE_WAKEUP 0x09 /* RT73USB */ 87 USB_MODE_FIRMWARE = 8, /* RT73USB */
88 USB_MODE_WAKEUP = 9, /* RT73USB */
89};
86 90
87/* 91/**
88 * Used to read/write from/to the device. 92 * rt2x00usb_vendor_request - Send register command to device
93 * @rt2x00dev: Pointer to &struct rt2x00_dev
94 * @request: USB vendor command (See &enum rt2x00usb_vendor_request)
95 * @requesttype: Request type &USB_VENDOR_REQUEST_*
96 * @offset: Register offset to perform action on
97 * @value: Value to write to device
98 * @buffer: Buffer where information will be read/written to by device
99 * @buffer_length: Size of &buffer
100 * @timeout: Operation timeout
101 *
89 * This is the main function to communicate with the device, 102 * This is the main function to communicate with the device,
90 * the buffer argument _must_ either be NULL or point to 103 * the &buffer argument _must_ either be NULL or point to
91 * a buffer allocated by kmalloc. Failure to do so can lead 104 * a buffer allocated by kmalloc. Failure to do so can lead
92 * to unexpected behavior depending on the architecture. 105 * to unexpected behavior depending on the architecture.
93 */ 106 */
@@ -97,13 +110,21 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
97 void *buffer, const u16 buffer_length, 110 void *buffer, const u16 buffer_length,
98 const int timeout); 111 const int timeout);
99 112
100/* 113/**
101 * Used to read/write from/to the device. 114 * rt2x00usb_vendor_request_buff - Send register command to device (buffered)
115 * @rt2x00dev: Pointer to &struct rt2x00_dev
116 * @request: USB vendor command (See &enum rt2x00usb_vendor_request)
117 * @requesttype: Request type &USB_VENDOR_REQUEST_*
118 * @offset: Register offset to perform action on
119 * @buffer: Buffer where information will be read/written to by device
120 * @buffer_length: Size of &buffer
121 * @timeout: Operation timeout
122 *
102 * This function will use a previously with kmalloc allocated cache 123 * This function will use a previously with kmalloc allocated cache
103 * to communicate with the device. The contents of the buffer pointer 124 * to communicate with the device. The contents of the buffer pointer
104 * will be copied to this cache when writing, or read from the cache 125 * will be copied to this cache when writing, or read from the cache
105 * when reading. 126 * when reading.
106 * Buffers send to rt2x00usb_vendor_request _must_ be allocated with 127 * Buffers send to &rt2x00usb_vendor_request _must_ be allocated with
107 * kmalloc. Hence the reason for using a previously allocated cache 128 * kmalloc. Hence the reason for using a previously allocated cache
108 * which has been allocated properly. 129 * which has been allocated properly.
109 */ 130 */
@@ -112,15 +133,32 @@ int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
112 const u16 offset, void *buffer, 133 const u16 offset, void *buffer,
113 const u16 buffer_length, const int timeout); 134 const u16 buffer_length, const int timeout);
114 135
115/* 136/**
116 * A version of rt2x00usb_vendor_request_buff which must be called 137 * rt2x00usb_vendor_request_buff - Send register command to device (buffered)
117 * if the usb_cache_mutex is already held. */ 138 * @rt2x00dev: Pointer to &struct rt2x00_dev
139 * @request: USB vendor command (See &enum rt2x00usb_vendor_request)
140 * @requesttype: Request type &USB_VENDOR_REQUEST_*
141 * @offset: Register offset to perform action on
142 * @buffer: Buffer where information will be read/written to by device
143 * @buffer_length: Size of &buffer
144 * @timeout: Operation timeout
145 *
146 * A version of &rt2x00usb_vendor_request_buff which must be called
147 * if the usb_cache_mutex is already held.
148 */
118int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev, 149int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
119 const u8 request, const u8 requesttype, 150 const u8 request, const u8 requesttype,
120 const u16 offset, void *buffer, 151 const u16 offset, void *buffer,
121 const u16 buffer_length, const int timeout); 152 const u16 buffer_length, const int timeout);
122 153
123/* 154/**
155 * rt2x00usb_vendor_request_sw - Send single register command to device
156 * @rt2x00dev: Pointer to &struct rt2x00_dev
157 * @request: USB vendor command (See &enum rt2x00usb_vendor_request)
158 * @offset: Register offset to perform action on
159 * @value: Value to write to device
160 * @timeout: Operation timeout
161 *
124 * Simple wrapper around rt2x00usb_vendor_request to write a single 162 * Simple wrapper around rt2x00usb_vendor_request to write a single
125 * command to the device. Since we don't use the buffer argument we 163 * command to the device. Since we don't use the buffer argument we
126 * don't have to worry about kmalloc here. 164 * don't have to worry about kmalloc here.
@@ -136,7 +174,12 @@ static inline int rt2x00usb_vendor_request_sw(struct rt2x00_dev *rt2x00dev,
136 value, NULL, 0, timeout); 174 value, NULL, 0, timeout);
137} 175}
138 176
139/* 177/**
178 * rt2x00usb_eeprom_read - Read eeprom from device
179 * @rt2x00dev: Pointer to &struct rt2x00_dev
180 * @eeprom: Pointer to eeprom array to store the information in
181 * @length: Number of bytes to read from the eeprom
182 *
140 * Simple wrapper around rt2x00usb_vendor_request to read the eeprom 183 * Simple wrapper around rt2x00usb_vendor_request to read the eeprom
141 * from the device. Note that the eeprom argument _must_ be allocated using 184 * from the device. Note that the eeprom argument _must_ be allocated using
142 * kmalloc for correct handling inside the kernel USB layer. 185 * kmalloc for correct handling inside the kernel USB layer.
@@ -147,8 +190,8 @@ static inline int rt2x00usb_eeprom_read(struct rt2x00_dev *rt2x00dev,
147 int timeout = REGISTER_TIMEOUT * (lenght / sizeof(u16)); 190 int timeout = REGISTER_TIMEOUT * (lenght / sizeof(u16));
148 191
149 return rt2x00usb_vendor_request(rt2x00dev, USB_EEPROM_READ, 192 return rt2x00usb_vendor_request(rt2x00dev, USB_EEPROM_READ,
150 USB_VENDOR_REQUEST_IN, 0x0000, 193 USB_VENDOR_REQUEST_IN, 0, 0,
151 0x0000, eeprom, lenght, timeout); 194 eeprom, lenght, timeout);
152} 195}
153 196
154/* 197/*
@@ -160,16 +203,58 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev);
160 * TX data handlers. 203 * TX data handlers.
161 */ 204 */
162int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev, 205int rt2x00usb_write_tx_data(struct rt2x00_dev *rt2x00dev,
163 struct data_ring *ring, struct sk_buff *skb, 206 struct data_queue *queue, struct sk_buff *skb,
164 struct ieee80211_tx_control *control); 207 struct ieee80211_tx_control *control);
165 208
209/**
210 * struct queue_entry_priv_usb_rx: Per RX entry USB specific information
211 *
212 * @urb: Urb structure used for device communication.
213 */
214struct queue_entry_priv_usb_rx {
215 struct urb *urb;
216};
217
218/**
219 * struct queue_entry_priv_usb_tx: Per TX entry USB specific information
220 *
221 * @urb: Urb structure used for device communication.
222 * @control: mac80211 control structure used to transmit data.
223 */
224struct queue_entry_priv_usb_tx {
225 struct urb *urb;
226
227 struct ieee80211_tx_control control;
228};
229
230/**
231 * struct queue_entry_priv_usb_tx: Per TX entry USB specific information
232 *
233 * The first section should match &struct queue_entry_priv_usb_tx exactly.
234 * rt2500usb can use this structure to send a guardian byte when working
235 * with beacons.
236 *
237 * @urb: Urb structure used for device communication.
238 * @control: mac80211 control structure used to transmit data.
239 * @guardian_data: Set to 0, used for sending the guardian data.
240 * @guardian_urb: Urb structure used to send the guardian data.
241 */
242struct queue_entry_priv_usb_bcn {
243 struct urb *urb;
244
245 struct ieee80211_tx_control control;
246
247 unsigned int guardian_data;
248 struct urb *guardian_urb;
249};
250
166/* 251/*
167 * Device initialization handlers. 252 * Device initialization handlers.
168 */ 253 */
169void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev, 254void rt2x00usb_init_rxentry(struct rt2x00_dev *rt2x00dev,
170 struct data_entry *entry); 255 struct queue_entry *entry);
171void rt2x00usb_init_txentry(struct rt2x00_dev *rt2x00dev, 256void rt2x00usb_init_txentry(struct rt2x00_dev *rt2x00dev,
172 struct data_entry *entry); 257 struct queue_entry *entry);
173int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev); 258int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev);
174void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev); 259void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev);
175 260
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index ad2e7d53b3da..468a31c8c113 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -24,6 +24,7 @@
24 Supported chipsets: RT2561, RT2561s, RT2661. 24 Supported chipsets: RT2561, RT2561s, RT2661.
25 */ 25 */
26 26
27#include <linux/crc-itu-t.h>
27#include <linux/delay.h> 28#include <linux/delay.h>
28#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
29#include <linux/init.h> 30#include <linux/init.h>
@@ -155,6 +156,12 @@ rf_write:
155 rt2x00_rf_write(rt2x00dev, word, value); 156 rt2x00_rf_write(rt2x00dev, word, value);
156} 157}
157 158
159#ifdef CONFIG_RT61PCI_LEDS
160/*
161 * This function is only called from rt61pci_led_brightness()
162 * make gcc happy by placing this function inside the
163 * same ifdef statement as the caller.
164 */
158static void rt61pci_mcu_request(struct rt2x00_dev *rt2x00dev, 165static void rt61pci_mcu_request(struct rt2x00_dev *rt2x00dev,
159 const u8 command, const u8 token, 166 const u8 command, const u8 token,
160 const u8 arg0, const u8 arg1) 167 const u8 arg0, const u8 arg1)
@@ -181,6 +188,7 @@ static void rt61pci_mcu_request(struct rt2x00_dev *rt2x00dev,
181 rt2x00_set_field32(&reg, HOST_CMD_CSR_INTERRUPT_MCU, 1); 188 rt2x00_set_field32(&reg, HOST_CMD_CSR_INTERRUPT_MCU, 1);
182 rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, reg); 189 rt2x00pci_register_write(rt2x00dev, HOST_CMD_CSR, reg);
183} 190}
191#endif /* CONFIG_RT61PCI_LEDS */
184 192
185static void rt61pci_eepromregister_read(struct eeprom_93cx6 *eeprom) 193static void rt61pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
186{ 194{
@@ -262,82 +270,162 @@ static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev)
262 u32 reg; 270 u32 reg;
263 271
264 rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg); 272 rt2x00pci_register_read(rt2x00dev, MAC_CSR13, &reg);
265 return rt2x00_get_field32(reg, MAC_CSR13_BIT5);; 273 return rt2x00_get_field32(reg, MAC_CSR13_BIT5);
266} 274}
267#else 275#else
268#define rt61pci_rfkill_poll NULL 276#define rt61pci_rfkill_poll NULL
269#endif /* CONFIG_RT61PCI_RFKILL */ 277#endif /* CONFIG_RT61PCI_RFKILL */
270 278
271/* 279#ifdef CONFIG_RT61PCI_LEDS
272 * Configuration handlers. 280static void rt61pci_brightness_set(struct led_classdev *led_cdev,
273 */ 281 enum led_brightness brightness)
274static void rt61pci_config_mac_addr(struct rt2x00_dev *rt2x00dev, __le32 *mac)
275{ 282{
276 u32 tmp; 283 struct rt2x00_led *led =
277 284 container_of(led_cdev, struct rt2x00_led, led_dev);
278 tmp = le32_to_cpu(mac[1]); 285 unsigned int enabled = brightness != LED_OFF;
279 rt2x00_set_field32(&tmp, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff); 286 unsigned int a_mode =
280 mac[1] = cpu_to_le32(tmp); 287 (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
281 288 unsigned int bg_mode =
282 rt2x00pci_register_multiwrite(rt2x00dev, MAC_CSR2, mac, 289 (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
283 (2 * sizeof(__le32))); 290
291 if (led->type == LED_TYPE_RADIO) {
292 rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
293 MCU_LEDCS_RADIO_STATUS, enabled);
294
295 rt61pci_mcu_request(led->rt2x00dev, MCU_LED, 0xff,
296 (led->rt2x00dev->led_mcu_reg & 0xff),
297 ((led->rt2x00dev->led_mcu_reg >> 8)));
298 } else if (led->type == LED_TYPE_ASSOC) {
299 rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
300 MCU_LEDCS_LINK_BG_STATUS, bg_mode);
301 rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
302 MCU_LEDCS_LINK_A_STATUS, a_mode);
303
304 rt61pci_mcu_request(led->rt2x00dev, MCU_LED, 0xff,
305 (led->rt2x00dev->led_mcu_reg & 0xff),
306 ((led->rt2x00dev->led_mcu_reg >> 8)));
307 } else if (led->type == LED_TYPE_QUALITY) {
308 /*
309 * The brightness is divided into 6 levels (0 - 5),
310 * this means we need to convert the brightness
311 * argument into the matching level within that range.
312 */
313 rt61pci_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff,
314 brightness / (LED_FULL / 6), 0);
315 }
284} 316}
285 317
286static void rt61pci_config_bssid(struct rt2x00_dev *rt2x00dev, __le32 *bssid) 318static int rt61pci_blink_set(struct led_classdev *led_cdev,
319 unsigned long *delay_on,
320 unsigned long *delay_off)
287{ 321{
288 u32 tmp; 322 struct rt2x00_led *led =
323 container_of(led_cdev, struct rt2x00_led, led_dev);
324 u32 reg;
289 325
290 tmp = le32_to_cpu(bssid[1]); 326 rt2x00pci_register_read(led->rt2x00dev, MAC_CSR14, &reg);
291 rt2x00_set_field32(&tmp, MAC_CSR5_BSS_ID_MASK, 3); 327 rt2x00_set_field32(&reg, MAC_CSR14_ON_PERIOD, *delay_on);
292 bssid[1] = cpu_to_le32(tmp); 328 rt2x00_set_field32(&reg, MAC_CSR14_OFF_PERIOD, *delay_off);
329 rt2x00pci_register_write(led->rt2x00dev, MAC_CSR14, reg);
293 330
294 rt2x00pci_register_multiwrite(rt2x00dev, MAC_CSR4, bssid, 331 return 0;
295 (2 * sizeof(__le32)));
296} 332}
333#endif /* CONFIG_RT61PCI_LEDS */
297 334
298static void rt61pci_config_type(struct rt2x00_dev *rt2x00dev, const int type, 335/*
299 const int tsf_sync) 336 * Configuration handlers.
337 */
338static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev,
339 const unsigned int filter_flags)
300{ 340{
301 u32 reg; 341 u32 reg;
302 342
303 /* 343 /*
304 * Clear current synchronisation setup. 344 * Start configuration steps.
305 * For the Beacon base registers we only need to clear 345 * Note that the version error will always be dropped
306 * the first byte since that byte contains the VALID and OWNER 346 * and broadcast frames will always be accepted since
307 * bits which (when set to 0) will invalidate the entire beacon. 347 * there is no filter for it at this time.
308 */ 348 */
309 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, 0); 349 rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg);
310 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE0, 0); 350 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CRC,
311 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE1, 0); 351 !(filter_flags & FIF_FCSFAIL));
312 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE2, 0); 352 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_PHYSICAL,
313 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE3, 0); 353 !(filter_flags & FIF_PLCPFAIL));
354 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
355 !(filter_flags & FIF_CONTROL));
356 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
357 !(filter_flags & FIF_PROMISC_IN_BSS));
358 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
359 !(filter_flags & FIF_PROMISC_IN_BSS) &&
360 !rt2x00dev->intf_ap_count);
361 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
362 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
363 !(filter_flags & FIF_ALLMULTI));
364 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_BROADCAST, 0);
365 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_ACK_CTS,
366 !(filter_flags & FIF_CONTROL));
367 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
368}
314 369
315 /* 370static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev,
316 * Enable synchronisation. 371 struct rt2x00_intf *intf,
317 */ 372 struct rt2x00intf_conf *conf,
318 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg); 373 const unsigned int flags)
319 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1); 374{
320 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 375 unsigned int beacon_base;
321 (tsf_sync == TSF_SYNC_BEACON)); 376 u32 reg;
322 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 377
323 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, tsf_sync); 378 if (flags & CONFIG_UPDATE_TYPE) {
324 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 379 /*
380 * Clear current synchronisation setup.
381 * For the Beacon base registers we only need to clear
382 * the first byte since that byte contains the VALID and OWNER
383 * bits which (when set to 0) will invalidate the entire beacon.
384 */
385 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
386 rt2x00pci_register_write(rt2x00dev, beacon_base, 0);
387
388 /*
389 * Enable synchronisation.
390 */
391 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
392 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
393 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
394 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
395 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
396 }
397
398 if (flags & CONFIG_UPDATE_MAC) {
399 reg = le32_to_cpu(conf->mac[1]);
400 rt2x00_set_field32(&reg, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff);
401 conf->mac[1] = cpu_to_le32(reg);
402
403 rt2x00pci_register_multiwrite(rt2x00dev, MAC_CSR2,
404 conf->mac, sizeof(conf->mac));
405 }
406
407 if (flags & CONFIG_UPDATE_BSSID) {
408 reg = le32_to_cpu(conf->bssid[1]);
409 rt2x00_set_field32(&reg, MAC_CSR5_BSS_ID_MASK, 3);
410 conf->bssid[1] = cpu_to_le32(reg);
411
412 rt2x00pci_register_multiwrite(rt2x00dev, MAC_CSR4,
413 conf->bssid, sizeof(conf->bssid));
414 }
325} 415}
326 416
327static void rt61pci_config_preamble(struct rt2x00_dev *rt2x00dev, 417static void rt61pci_config_erp(struct rt2x00_dev *rt2x00dev,
328 const int short_preamble, 418 struct rt2x00lib_erp *erp)
329 const int ack_timeout,
330 const int ack_consume_time)
331{ 419{
332 u32 reg; 420 u32 reg;
333 421
334 rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg); 422 rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg);
335 rt2x00_set_field32(&reg, TXRX_CSR0_RX_ACK_TIMEOUT, ack_timeout); 423 rt2x00_set_field32(&reg, TXRX_CSR0_RX_ACK_TIMEOUT, erp->ack_timeout);
336 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg); 424 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
337 425
338 rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, &reg); 426 rt2x00pci_register_read(rt2x00dev, TXRX_CSR4, &reg);
339 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE, 427 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE,
340 !!short_preamble); 428 !!erp->short_preamble);
341 rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg); 429 rt2x00pci_register_write(rt2x00dev, TXRX_CSR4, reg);
342} 430}
343 431
@@ -427,27 +515,21 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
427 case ANTENNA_HW_DIVERSITY: 515 case ANTENNA_HW_DIVERSITY:
428 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); 516 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
429 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 517 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
430 (rt2x00dev->curr_hwmode != HWMODE_A)); 518 (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ));
431 break; 519 break;
432 case ANTENNA_A: 520 case ANTENNA_A:
433 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); 521 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
434 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); 522 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
435 if (rt2x00dev->curr_hwmode == HWMODE_A) 523 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
436 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); 524 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
437 else 525 else
438 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); 526 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
439 break; 527 break;
440 case ANTENNA_SW_DIVERSITY:
441 /*
442 * NOTE: We should never come here because rt2x00lib is
443 * supposed to catch this and send us the correct antenna
444 * explicitely. However we are nog going to bug about this.
445 * Instead, just default to antenna B.
446 */
447 case ANTENNA_B: 528 case ANTENNA_B:
529 default:
448 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); 530 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
449 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); 531 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
450 if (rt2x00dev->curr_hwmode == HWMODE_A) 532 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
451 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); 533 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
452 else 534 else
453 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); 535 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
@@ -486,14 +568,8 @@ static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
486 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); 568 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
487 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); 569 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
488 break; 570 break;
489 case ANTENNA_SW_DIVERSITY:
490 /*
491 * NOTE: We should never come here because rt2x00lib is
492 * supposed to catch this and send us the correct antenna
493 * explicitely. However we are nog going to bug about this.
494 * Instead, just default to antenna B.
495 */
496 case ANTENNA_B: 571 case ANTENNA_B:
572 default:
497 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); 573 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
498 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); 574 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
499 break; 575 break;
@@ -531,10 +607,6 @@ static void rt61pci_config_antenna_2529(struct rt2x00_dev *rt2x00dev,
531 rt61pci_bbp_read(rt2x00dev, 4, &r4); 607 rt61pci_bbp_read(rt2x00dev, 4, &r4);
532 rt61pci_bbp_read(rt2x00dev, 77, &r77); 608 rt61pci_bbp_read(rt2x00dev, 77, &r77);
533 609
534 /* FIXME: Antenna selection for the rf 2529 is very confusing in the
535 * legacy driver. The code below should be ok for non-diversity setups.
536 */
537
538 /* 610 /*
539 * Configure the RX antenna. 611 * Configure the RX antenna.
540 */ 612 */
@@ -544,15 +616,14 @@ static void rt61pci_config_antenna_2529(struct rt2x00_dev *rt2x00dev,
544 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); 616 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
545 rt61pci_config_antenna_2529_rx(rt2x00dev, 0, 0); 617 rt61pci_config_antenna_2529_rx(rt2x00dev, 0, 0);
546 break; 618 break;
547 case ANTENNA_SW_DIVERSITY:
548 case ANTENNA_HW_DIVERSITY: 619 case ANTENNA_HW_DIVERSITY:
549 /* 620 /*
550 * NOTE: We should never come here because rt2x00lib is 621 * FIXME: Antenna selection for the rf 2529 is very confusing
551 * supposed to catch this and send us the correct antenna 622 * in the legacy driver. Just default to antenna B until the
552 * explicitely. However we are nog going to bug about this. 623 * legacy code can be properly translated into rt2x00 code.
553 * Instead, just default to antenna B.
554 */ 624 */
555 case ANTENNA_B: 625 case ANTENNA_B:
626 default:
556 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); 627 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
557 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); 628 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
558 rt61pci_config_antenna_2529_rx(rt2x00dev, 1, 1); 629 rt61pci_config_antenna_2529_rx(rt2x00dev, 1, 1);
@@ -603,7 +674,14 @@ static void rt61pci_config_antenna(struct rt2x00_dev *rt2x00dev,
603 unsigned int i; 674 unsigned int i;
604 u32 reg; 675 u32 reg;
605 676
606 if (rt2x00dev->curr_hwmode == HWMODE_A) { 677 /*
678 * We should never come here because rt2x00lib is supposed
679 * to catch this and send us the correct antenna explicitely.
680 */
681 BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
682 ant->tx == ANTENNA_SW_DIVERSITY);
683
684 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
607 sel = antenna_sel_a; 685 sel = antenna_sel_a;
608 lna = test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); 686 lna = test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags);
609 } else { 687 } else {
@@ -617,10 +695,9 @@ static void rt61pci_config_antenna(struct rt2x00_dev *rt2x00dev,
617 rt2x00pci_register_read(rt2x00dev, PHY_CSR0, &reg); 695 rt2x00pci_register_read(rt2x00dev, PHY_CSR0, &reg);
618 696
619 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG, 697 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG,
620 (rt2x00dev->curr_hwmode == HWMODE_B || 698 rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
621 rt2x00dev->curr_hwmode == HWMODE_G));
622 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A, 699 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A,
623 (rt2x00dev->curr_hwmode == HWMODE_A)); 700 rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
624 701
625 rt2x00pci_register_write(rt2x00dev, PHY_CSR0, reg); 702 rt2x00pci_register_write(rt2x00dev, PHY_CSR0, reg);
626 703
@@ -667,8 +744,8 @@ static void rt61pci_config_duration(struct rt2x00_dev *rt2x00dev,
667} 744}
668 745
669static void rt61pci_config(struct rt2x00_dev *rt2x00dev, 746static void rt61pci_config(struct rt2x00_dev *rt2x00dev,
670 const unsigned int flags, 747 struct rt2x00lib_conf *libconf,
671 struct rt2x00lib_conf *libconf) 748 const unsigned int flags)
672{ 749{
673 if (flags & CONFIG_UPDATE_PHYMODE) 750 if (flags & CONFIG_UPDATE_PHYMODE)
674 rt61pci_config_phymode(rt2x00dev, libconf->basic_rates); 751 rt61pci_config_phymode(rt2x00dev, libconf->basic_rates);
@@ -684,78 +761,6 @@ static void rt61pci_config(struct rt2x00_dev *rt2x00dev,
684} 761}
685 762
686/* 763/*
687 * LED functions.
688 */
689static void rt61pci_enable_led(struct rt2x00_dev *rt2x00dev)
690{
691 u32 reg;
692 u8 arg0;
693 u8 arg1;
694
695 rt2x00pci_register_read(rt2x00dev, MAC_CSR14, &reg);
696 rt2x00_set_field32(&reg, MAC_CSR14_ON_PERIOD, 70);
697 rt2x00_set_field32(&reg, MAC_CSR14_OFF_PERIOD, 30);
698 rt2x00pci_register_write(rt2x00dev, MAC_CSR14, reg);
699
700 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_RADIO_STATUS, 1);
701 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_LINK_A_STATUS,
702 (rt2x00dev->rx_status.phymode == MODE_IEEE80211A));
703 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_LINK_BG_STATUS,
704 (rt2x00dev->rx_status.phymode != MODE_IEEE80211A));
705
706 arg0 = rt2x00dev->led_reg & 0xff;
707 arg1 = (rt2x00dev->led_reg >> 8) & 0xff;
708
709 rt61pci_mcu_request(rt2x00dev, MCU_LED, 0xff, arg0, arg1);
710}
711
712static void rt61pci_disable_led(struct rt2x00_dev *rt2x00dev)
713{
714 u16 led_reg;
715 u8 arg0;
716 u8 arg1;
717
718 led_reg = rt2x00dev->led_reg;
719 rt2x00_set_field16(&led_reg, MCU_LEDCS_RADIO_STATUS, 0);
720 rt2x00_set_field16(&led_reg, MCU_LEDCS_LINK_BG_STATUS, 0);
721 rt2x00_set_field16(&led_reg, MCU_LEDCS_LINK_A_STATUS, 0);
722
723 arg0 = led_reg & 0xff;
724 arg1 = (led_reg >> 8) & 0xff;
725
726 rt61pci_mcu_request(rt2x00dev, MCU_LED, 0xff, arg0, arg1);
727}
728
729static void rt61pci_activity_led(struct rt2x00_dev *rt2x00dev, int rssi)
730{
731 u8 led;
732
733 if (rt2x00dev->led_mode != LED_MODE_SIGNAL_STRENGTH)
734 return;
735
736 /*
737 * Led handling requires a positive value for the rssi,
738 * to do that correctly we need to add the correction.
739 */
740 rssi += rt2x00dev->rssi_offset;
741
742 if (rssi <= 30)
743 led = 0;
744 else if (rssi <= 39)
745 led = 1;
746 else if (rssi <= 49)
747 led = 2;
748 else if (rssi <= 53)
749 led = 3;
750 else if (rssi <= 63)
751 led = 4;
752 else
753 led = 5;
754
755 rt61pci_mcu_request(rt2x00dev, MCU_LED_STRENGTH, 0xff, led, 0);
756}
757
758/*
759 * Link tuning 764 * Link tuning
760 */ 765 */
761static void rt61pci_link_stats(struct rt2x00_dev *rt2x00dev, 766static void rt61pci_link_stats(struct rt2x00_dev *rt2x00dev,
@@ -789,17 +794,12 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev)
789 u8 up_bound; 794 u8 up_bound;
790 u8 low_bound; 795 u8 low_bound;
791 796
792 /*
793 * Update Led strength
794 */
795 rt61pci_activity_led(rt2x00dev, rssi);
796
797 rt61pci_bbp_read(rt2x00dev, 17, &r17); 797 rt61pci_bbp_read(rt2x00dev, 17, &r17);
798 798
799 /* 799 /*
800 * Determine r17 bounds. 800 * Determine r17 bounds.
801 */ 801 */
802 if (rt2x00dev->rx_status.phymode == MODE_IEEE80211A) { 802 if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) {
803 low_bound = 0x28; 803 low_bound = 0x28;
804 up_bound = 0x48; 804 up_bound = 0x48;
805 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { 805 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) {
@@ -816,6 +816,13 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev)
816 } 816 }
817 817
818 /* 818 /*
819 * If we are not associated, we should go straight to the
820 * dynamic CCA tuning.
821 */
822 if (!rt2x00dev->intf_associated)
823 goto dynamic_cca_tune;
824
825 /*
819 * Special big-R17 for very short distance 826 * Special big-R17 for very short distance
820 */ 827 */
821 if (rssi >= -35) { 828 if (rssi >= -35) {
@@ -866,6 +873,8 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev)
866 return; 873 return;
867 } 874 }
868 875
876dynamic_cca_tune:
877
869 /* 878 /*
870 * r17 does not yet exceed upper limit, continue and base 879 * r17 does not yet exceed upper limit, continue and base
871 * the r17 tuning on the false CCA count. 880 * the r17 tuning on the false CCA count.
@@ -882,7 +891,7 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev)
882} 891}
883 892
884/* 893/*
885 * Firmware name function. 894 * Firmware functions
886 */ 895 */
887static char *rt61pci_get_firmware_name(struct rt2x00_dev *rt2x00dev) 896static char *rt61pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
888{ 897{
@@ -906,9 +915,23 @@ static char *rt61pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
906 return fw_name; 915 return fw_name;
907} 916}
908 917
909/* 918static u16 rt61pci_get_firmware_crc(void *data, const size_t len)
910 * Initialization functions. 919{
911 */ 920 u16 crc;
921
922 /*
923 * Use the crc itu-t algorithm.
924 * The last 2 bytes in the firmware array are the crc checksum itself,
925 * this means that we should never pass those 2 bytes to the crc
926 * algorithm.
927 */
928 crc = crc_itu_t(0, data, len - 2);
929 crc = crc_itu_t_byte(crc, 0);
930 crc = crc_itu_t_byte(crc, 0);
931
932 return crc;
933}
934
912static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev, void *data, 935static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev, void *data,
913 const size_t len) 936 const size_t len)
914{ 937{
@@ -989,50 +1012,55 @@ static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev, void *data,
989 return 0; 1012 return 0;
990} 1013}
991 1014
1015/*
1016 * Initialization functions.
1017 */
992static void rt61pci_init_rxentry(struct rt2x00_dev *rt2x00dev, 1018static void rt61pci_init_rxentry(struct rt2x00_dev *rt2x00dev,
993 struct data_entry *entry) 1019 struct queue_entry *entry)
994{ 1020{
995 __le32 *rxd = entry->priv; 1021 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data;
996 u32 word; 1022 u32 word;
997 1023
998 rt2x00_desc_read(rxd, 5, &word); 1024 rt2x00_desc_read(priv_rx->desc, 5, &word);
999 rt2x00_set_field32(&word, RXD_W5_BUFFER_PHYSICAL_ADDRESS, 1025 rt2x00_set_field32(&word, RXD_W5_BUFFER_PHYSICAL_ADDRESS,
1000 entry->data_dma); 1026 priv_rx->data_dma);
1001 rt2x00_desc_write(rxd, 5, word); 1027 rt2x00_desc_write(priv_rx->desc, 5, word);
1002 1028
1003 rt2x00_desc_read(rxd, 0, &word); 1029 rt2x00_desc_read(priv_rx->desc, 0, &word);
1004 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); 1030 rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1);
1005 rt2x00_desc_write(rxd, 0, word); 1031 rt2x00_desc_write(priv_rx->desc, 0, word);
1006} 1032}
1007 1033
1008static void rt61pci_init_txentry(struct rt2x00_dev *rt2x00dev, 1034static void rt61pci_init_txentry(struct rt2x00_dev *rt2x00dev,
1009 struct data_entry *entry) 1035 struct queue_entry *entry)
1010{ 1036{
1011 __le32 *txd = entry->priv; 1037 struct queue_entry_priv_pci_tx *priv_tx = entry->priv_data;
1012 u32 word; 1038 u32 word;
1013 1039
1014 rt2x00_desc_read(txd, 1, &word); 1040 rt2x00_desc_read(priv_tx->desc, 1, &word);
1015 rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1); 1041 rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1);
1016 rt2x00_desc_write(txd, 1, word); 1042 rt2x00_desc_write(priv_tx->desc, 1, word);
1017 1043
1018 rt2x00_desc_read(txd, 5, &word); 1044 rt2x00_desc_read(priv_tx->desc, 5, &word);
1019 rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->ring->queue_idx); 1045 rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->queue->qid);
1020 rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, entry->entry_idx); 1046 rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, entry->entry_idx);
1021 rt2x00_desc_write(txd, 5, word); 1047 rt2x00_desc_write(priv_tx->desc, 5, word);
1022 1048
1023 rt2x00_desc_read(txd, 6, &word); 1049 rt2x00_desc_read(priv_tx->desc, 6, &word);
1024 rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS, 1050 rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
1025 entry->data_dma); 1051 priv_tx->data_dma);
1026 rt2x00_desc_write(txd, 6, word); 1052 rt2x00_desc_write(priv_tx->desc, 6, word);
1027 1053
1028 rt2x00_desc_read(txd, 0, &word); 1054 rt2x00_desc_read(priv_tx->desc, 0, &word);
1029 rt2x00_set_field32(&word, TXD_W0_VALID, 0); 1055 rt2x00_set_field32(&word, TXD_W0_VALID, 0);
1030 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); 1056 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0);
1031 rt2x00_desc_write(txd, 0, word); 1057 rt2x00_desc_write(priv_tx->desc, 0, word);
1032} 1058}
1033 1059
1034static int rt61pci_init_rings(struct rt2x00_dev *rt2x00dev) 1060static int rt61pci_init_queues(struct rt2x00_dev *rt2x00dev)
1035{ 1061{
1062 struct queue_entry_priv_pci_rx *priv_rx;
1063 struct queue_entry_priv_pci_tx *priv_tx;
1036 u32 reg; 1064 u32 reg;
1037 1065
1038 /* 1066 /*
@@ -1040,59 +1068,55 @@ static int rt61pci_init_rings(struct rt2x00_dev *rt2x00dev)
1040 */ 1068 */
1041 rt2x00pci_register_read(rt2x00dev, TX_RING_CSR0, &reg); 1069 rt2x00pci_register_read(rt2x00dev, TX_RING_CSR0, &reg);
1042 rt2x00_set_field32(&reg, TX_RING_CSR0_AC0_RING_SIZE, 1070 rt2x00_set_field32(&reg, TX_RING_CSR0_AC0_RING_SIZE,
1043 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].stats.limit); 1071 rt2x00dev->tx[0].limit);
1044 rt2x00_set_field32(&reg, TX_RING_CSR0_AC1_RING_SIZE, 1072 rt2x00_set_field32(&reg, TX_RING_CSR0_AC1_RING_SIZE,
1045 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].stats.limit); 1073 rt2x00dev->tx[1].limit);
1046 rt2x00_set_field32(&reg, TX_RING_CSR0_AC2_RING_SIZE, 1074 rt2x00_set_field32(&reg, TX_RING_CSR0_AC2_RING_SIZE,
1047 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA2].stats.limit); 1075 rt2x00dev->tx[2].limit);
1048 rt2x00_set_field32(&reg, TX_RING_CSR0_AC3_RING_SIZE, 1076 rt2x00_set_field32(&reg, TX_RING_CSR0_AC3_RING_SIZE,
1049 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA3].stats.limit); 1077 rt2x00dev->tx[3].limit);
1050 rt2x00pci_register_write(rt2x00dev, TX_RING_CSR0, reg); 1078 rt2x00pci_register_write(rt2x00dev, TX_RING_CSR0, reg);
1051 1079
1052 rt2x00pci_register_read(rt2x00dev, TX_RING_CSR1, &reg); 1080 rt2x00pci_register_read(rt2x00dev, TX_RING_CSR1, &reg);
1053 rt2x00_set_field32(&reg, TX_RING_CSR1_MGMT_RING_SIZE,
1054 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA4].stats.limit);
1055 rt2x00_set_field32(&reg, TX_RING_CSR1_TXD_SIZE, 1081 rt2x00_set_field32(&reg, TX_RING_CSR1_TXD_SIZE,
1056 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].desc_size / 1082 rt2x00dev->tx[0].desc_size / 4);
1057 4);
1058 rt2x00pci_register_write(rt2x00dev, TX_RING_CSR1, reg); 1083 rt2x00pci_register_write(rt2x00dev, TX_RING_CSR1, reg);
1059 1084
1085 priv_tx = rt2x00dev->tx[0].entries[0].priv_data;
1060 rt2x00pci_register_read(rt2x00dev, AC0_BASE_CSR, &reg); 1086 rt2x00pci_register_read(rt2x00dev, AC0_BASE_CSR, &reg);
1061 rt2x00_set_field32(&reg, AC0_BASE_CSR_RING_REGISTER, 1087 rt2x00_set_field32(&reg, AC0_BASE_CSR_RING_REGISTER,
1062 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA0].data_dma); 1088 priv_tx->desc_dma);
1063 rt2x00pci_register_write(rt2x00dev, AC0_BASE_CSR, reg); 1089 rt2x00pci_register_write(rt2x00dev, AC0_BASE_CSR, reg);
1064 1090
1091 priv_tx = rt2x00dev->tx[1].entries[0].priv_data;
1065 rt2x00pci_register_read(rt2x00dev, AC1_BASE_CSR, &reg); 1092 rt2x00pci_register_read(rt2x00dev, AC1_BASE_CSR, &reg);
1066 rt2x00_set_field32(&reg, AC1_BASE_CSR_RING_REGISTER, 1093 rt2x00_set_field32(&reg, AC1_BASE_CSR_RING_REGISTER,
1067 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA1].data_dma); 1094 priv_tx->desc_dma);
1068 rt2x00pci_register_write(rt2x00dev, AC1_BASE_CSR, reg); 1095 rt2x00pci_register_write(rt2x00dev, AC1_BASE_CSR, reg);
1069 1096
1097 priv_tx = rt2x00dev->tx[2].entries[0].priv_data;
1070 rt2x00pci_register_read(rt2x00dev, AC2_BASE_CSR, &reg); 1098 rt2x00pci_register_read(rt2x00dev, AC2_BASE_CSR, &reg);
1071 rt2x00_set_field32(&reg, AC2_BASE_CSR_RING_REGISTER, 1099 rt2x00_set_field32(&reg, AC2_BASE_CSR_RING_REGISTER,
1072 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA2].data_dma); 1100 priv_tx->desc_dma);
1073 rt2x00pci_register_write(rt2x00dev, AC2_BASE_CSR, reg); 1101 rt2x00pci_register_write(rt2x00dev, AC2_BASE_CSR, reg);
1074 1102
1103 priv_tx = rt2x00dev->tx[3].entries[0].priv_data;
1075 rt2x00pci_register_read(rt2x00dev, AC3_BASE_CSR, &reg); 1104 rt2x00pci_register_read(rt2x00dev, AC3_BASE_CSR, &reg);
1076 rt2x00_set_field32(&reg, AC3_BASE_CSR_RING_REGISTER, 1105 rt2x00_set_field32(&reg, AC3_BASE_CSR_RING_REGISTER,
1077 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA3].data_dma); 1106 priv_tx->desc_dma);
1078 rt2x00pci_register_write(rt2x00dev, AC3_BASE_CSR, reg); 1107 rt2x00pci_register_write(rt2x00dev, AC3_BASE_CSR, reg);
1079 1108
1080 rt2x00pci_register_read(rt2x00dev, MGMT_BASE_CSR, &reg);
1081 rt2x00_set_field32(&reg, MGMT_BASE_CSR_RING_REGISTER,
1082 rt2x00dev->tx[IEEE80211_TX_QUEUE_DATA4].data_dma);
1083 rt2x00pci_register_write(rt2x00dev, MGMT_BASE_CSR, reg);
1084
1085 rt2x00pci_register_read(rt2x00dev, RX_RING_CSR, &reg); 1109 rt2x00pci_register_read(rt2x00dev, RX_RING_CSR, &reg);
1086 rt2x00_set_field32(&reg, RX_RING_CSR_RING_SIZE, 1110 rt2x00_set_field32(&reg, RX_RING_CSR_RING_SIZE, rt2x00dev->rx->limit);
1087 rt2x00dev->rx->stats.limit);
1088 rt2x00_set_field32(&reg, RX_RING_CSR_RXD_SIZE, 1111 rt2x00_set_field32(&reg, RX_RING_CSR_RXD_SIZE,
1089 rt2x00dev->rx->desc_size / 4); 1112 rt2x00dev->rx->desc_size / 4);
1090 rt2x00_set_field32(&reg, RX_RING_CSR_RXD_WRITEBACK_SIZE, 4); 1113 rt2x00_set_field32(&reg, RX_RING_CSR_RXD_WRITEBACK_SIZE, 4);
1091 rt2x00pci_register_write(rt2x00dev, RX_RING_CSR, reg); 1114 rt2x00pci_register_write(rt2x00dev, RX_RING_CSR, reg);
1092 1115
1116 priv_rx = rt2x00dev->rx->entries[0].priv_data;
1093 rt2x00pci_register_read(rt2x00dev, RX_BASE_CSR, &reg); 1117 rt2x00pci_register_read(rt2x00dev, RX_BASE_CSR, &reg);
1094 rt2x00_set_field32(&reg, RX_BASE_CSR_RING_REGISTER, 1118 rt2x00_set_field32(&reg, RX_BASE_CSR_RING_REGISTER,
1095 rt2x00dev->rx->data_dma); 1119 priv_rx->desc_dma);
1096 rt2x00pci_register_write(rt2x00dev, RX_BASE_CSR, reg); 1120 rt2x00pci_register_write(rt2x00dev, RX_BASE_CSR, reg);
1097 1121
1098 rt2x00pci_register_read(rt2x00dev, TX_DMA_DST_CSR, &reg); 1122 rt2x00pci_register_read(rt2x00dev, TX_DMA_DST_CSR, &reg);
@@ -1100,7 +1124,6 @@ static int rt61pci_init_rings(struct rt2x00_dev *rt2x00dev)
1100 rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC1, 2); 1124 rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC1, 2);
1101 rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC2, 2); 1125 rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC2, 2);
1102 rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC3, 2); 1126 rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC3, 2);
1103 rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_MGMT, 0);
1104 rt2x00pci_register_write(rt2x00dev, TX_DMA_DST_CSR, reg); 1127 rt2x00pci_register_write(rt2x00dev, TX_DMA_DST_CSR, reg);
1105 1128
1106 rt2x00pci_register_read(rt2x00dev, LOAD_TX_RING_CSR, &reg); 1129 rt2x00pci_register_read(rt2x00dev, LOAD_TX_RING_CSR, &reg);
@@ -1108,7 +1131,6 @@ static int rt61pci_init_rings(struct rt2x00_dev *rt2x00dev)
1108 rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC1, 1); 1131 rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC1, 1);
1109 rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC2, 1); 1132 rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC2, 1);
1110 rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC3, 1); 1133 rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC3, 1);
1111 rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_MGMT, 1);
1112 rt2x00pci_register_write(rt2x00dev, LOAD_TX_RING_CSR, reg); 1134 rt2x00pci_register_write(rt2x00dev, LOAD_TX_RING_CSR, reg);
1113 1135
1114 rt2x00pci_register_read(rt2x00dev, RX_CNTL_CSR, &reg); 1136 rt2x00pci_register_read(rt2x00dev, RX_CNTL_CSR, &reg);
@@ -1224,6 +1246,17 @@ static int rt61pci_init_registers(struct rt2x00_dev *rt2x00dev)
1224 rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR1, reg); 1246 rt2x00pci_register_write(rt2x00dev, AC_TXOP_CSR1, reg);
1225 1247
1226 /* 1248 /*
1249 * Clear all beacons
1250 * For the Beacon base registers we only need to clear
1251 * the first byte since that byte contains the VALID and OWNER
1252 * bits which (when set to 0) will invalidate the entire beacon.
1253 */
1254 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE0, 0);
1255 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE1, 0);
1256 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE2, 0);
1257 rt2x00pci_register_write(rt2x00dev, HW_BEACON_BASE3, 0);
1258
1259 /*
1227 * We must clear the error counters. 1260 * We must clear the error counters.
1228 * These registers are cleared on read, 1261 * These registers are cleared on read,
1229 * so we may pass a useless variable to store the value. 1262 * so we may pass a useless variable to store the value.
@@ -1296,19 +1329,15 @@ continue_csr_init:
1296 rt61pci_bbp_write(rt2x00dev, 102, 0x16); 1329 rt61pci_bbp_write(rt2x00dev, 102, 0x16);
1297 rt61pci_bbp_write(rt2x00dev, 107, 0x04); 1330 rt61pci_bbp_write(rt2x00dev, 107, 0x04);
1298 1331
1299 DEBUG(rt2x00dev, "Start initialization from EEPROM...\n");
1300 for (i = 0; i < EEPROM_BBP_SIZE; i++) { 1332 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
1301 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); 1333 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
1302 1334
1303 if (eeprom != 0xffff && eeprom != 0x0000) { 1335 if (eeprom != 0xffff && eeprom != 0x0000) {
1304 reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); 1336 reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
1305 value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); 1337 value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE);
1306 DEBUG(rt2x00dev, "BBP: 0x%02x, value: 0x%02x.\n",
1307 reg_id, value);
1308 rt61pci_bbp_write(rt2x00dev, reg_id, value); 1338 rt61pci_bbp_write(rt2x00dev, reg_id, value);
1309 } 1339 }
1310 } 1340 }
1311 DEBUG(rt2x00dev, "...End initialization from EEPROM.\n");
1312 1341
1313 return 0; 1342 return 0;
1314} 1343}
@@ -1375,7 +1404,7 @@ static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev)
1375 /* 1404 /*
1376 * Initialize all registers. 1405 * Initialize all registers.
1377 */ 1406 */
1378 if (rt61pci_init_rings(rt2x00dev) || 1407 if (rt61pci_init_queues(rt2x00dev) ||
1379 rt61pci_init_registers(rt2x00dev) || 1408 rt61pci_init_registers(rt2x00dev) ||
1380 rt61pci_init_bbp(rt2x00dev)) { 1409 rt61pci_init_bbp(rt2x00dev)) {
1381 ERROR(rt2x00dev, "Register initialization failed.\n"); 1410 ERROR(rt2x00dev, "Register initialization failed.\n");
@@ -1394,11 +1423,6 @@ static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev)
1394 rt2x00_set_field32(&reg, RX_CNTL_CSR_ENABLE_RX_DMA, 1); 1423 rt2x00_set_field32(&reg, RX_CNTL_CSR_ENABLE_RX_DMA, 1);
1395 rt2x00pci_register_write(rt2x00dev, RX_CNTL_CSR, reg); 1424 rt2x00pci_register_write(rt2x00dev, RX_CNTL_CSR, reg);
1396 1425
1397 /*
1398 * Enable LED
1399 */
1400 rt61pci_enable_led(rt2x00dev);
1401
1402 return 0; 1426 return 0;
1403} 1427}
1404 1428
@@ -1406,11 +1430,6 @@ static void rt61pci_disable_radio(struct rt2x00_dev *rt2x00dev)
1406{ 1430{
1407 u32 reg; 1431 u32 reg;
1408 1432
1409 /*
1410 * Disable LED
1411 */
1412 rt61pci_disable_led(rt2x00dev);
1413
1414 rt2x00pci_register_write(rt2x00dev, MAC_CSR10, 0x00001818); 1433 rt2x00pci_register_write(rt2x00dev, MAC_CSR10, 0x00001818);
1415 1434
1416 /* 1435 /*
@@ -1426,7 +1445,6 @@ static void rt61pci_disable_radio(struct rt2x00_dev *rt2x00dev)
1426 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC1, 1); 1445 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC1, 1);
1427 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC2, 1); 1446 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC2, 1);
1428 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC3, 1); 1447 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC3, 1);
1429 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_MGMT, 1);
1430 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); 1448 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1431 1449
1432 /* 1450 /*
@@ -1508,10 +1526,10 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1508 */ 1526 */
1509static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1527static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1510 struct sk_buff *skb, 1528 struct sk_buff *skb,
1511 struct txdata_entry_desc *desc, 1529 struct txentry_desc *txdesc,
1512 struct ieee80211_tx_control *control) 1530 struct ieee80211_tx_control *control)
1513{ 1531{
1514 struct skb_desc *skbdesc = get_skb_desc(skb); 1532 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1515 __le32 *txd = skbdesc->desc; 1533 __le32 *txd = skbdesc->desc;
1516 u32 word; 1534 u32 word;
1517 1535
@@ -1519,50 +1537,52 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1519 * Start writing the descriptor words. 1537 * Start writing the descriptor words.
1520 */ 1538 */
1521 rt2x00_desc_read(txd, 1, &word); 1539 rt2x00_desc_read(txd, 1, &word);
1522 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, desc->queue); 1540 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->queue);
1523 rt2x00_set_field32(&word, TXD_W1_AIFSN, desc->aifs); 1541 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
1524 rt2x00_set_field32(&word, TXD_W1_CWMIN, desc->cw_min); 1542 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
1525 rt2x00_set_field32(&word, TXD_W1_CWMAX, desc->cw_max); 1543 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
1526 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); 1544 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER);
1527 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1); 1545 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1);
1528 rt2x00_desc_write(txd, 1, word); 1546 rt2x00_desc_write(txd, 1, word);
1529 1547
1530 rt2x00_desc_read(txd, 2, &word); 1548 rt2x00_desc_read(txd, 2, &word);
1531 rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, desc->signal); 1549 rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
1532 rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, desc->service); 1550 rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
1533 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, desc->length_low); 1551 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
1534 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, desc->length_high); 1552 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
1535 rt2x00_desc_write(txd, 2, word); 1553 rt2x00_desc_write(txd, 2, word);
1536 1554
1537 rt2x00_desc_read(txd, 5, &word); 1555 rt2x00_desc_read(txd, 5, &word);
1538 rt2x00_set_field32(&word, TXD_W5_TX_POWER, 1556 rt2x00_set_field32(&word, TXD_W5_TX_POWER,
1539 TXPOWER_TO_DEV(control->power_level)); 1557 TXPOWER_TO_DEV(rt2x00dev->tx_power));
1540 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); 1558 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
1541 rt2x00_desc_write(txd, 5, word); 1559 rt2x00_desc_write(txd, 5, word);
1542 1560
1543 rt2x00_desc_read(txd, 11, &word); 1561 if (skbdesc->desc_len > TXINFO_SIZE) {
1544 rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0, skbdesc->data_len); 1562 rt2x00_desc_read(txd, 11, &word);
1545 rt2x00_desc_write(txd, 11, word); 1563 rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0, skbdesc->data_len);
1564 rt2x00_desc_write(txd, 11, word);
1565 }
1546 1566
1547 rt2x00_desc_read(txd, 0, &word); 1567 rt2x00_desc_read(txd, 0, &word);
1548 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); 1568 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
1549 rt2x00_set_field32(&word, TXD_W0_VALID, 1); 1569 rt2x00_set_field32(&word, TXD_W0_VALID, 1);
1550 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, 1570 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
1551 test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags)); 1571 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
1552 rt2x00_set_field32(&word, TXD_W0_ACK, 1572 rt2x00_set_field32(&word, TXD_W0_ACK,
1553 test_bit(ENTRY_TXD_ACK, &desc->flags)); 1573 test_bit(ENTRY_TXD_ACK, &txdesc->flags));
1554 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, 1574 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
1555 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags)); 1575 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
1556 rt2x00_set_field32(&word, TXD_W0_OFDM, 1576 rt2x00_set_field32(&word, TXD_W0_OFDM,
1557 test_bit(ENTRY_TXD_OFDM_RATE, &desc->flags)); 1577 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags));
1558 rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs); 1578 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1559 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1579 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1560 !!(control->flags & 1580 !!(control->flags &
1561 IEEE80211_TXCTL_LONG_RETRY_LIMIT)); 1581 IEEE80211_TXCTL_LONG_RETRY_LIMIT));
1562 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0); 1582 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0);
1563 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len); 1583 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len);
1564 rt2x00_set_field32(&word, TXD_W0_BURST, 1584 rt2x00_set_field32(&word, TXD_W0_BURST,
1565 test_bit(ENTRY_TXD_BURST, &desc->flags)); 1585 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
1566 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); 1586 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE);
1567 rt2x00_desc_write(txd, 0, word); 1587 rt2x00_desc_write(txd, 0, word);
1568} 1588}
@@ -1571,11 +1591,11 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1571 * TX data initialization 1591 * TX data initialization
1572 */ 1592 */
1573static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1593static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1574 unsigned int queue) 1594 const unsigned int queue)
1575{ 1595{
1576 u32 reg; 1596 u32 reg;
1577 1597
1578 if (queue == IEEE80211_TX_QUEUE_BEACON) { 1598 if (queue == RT2X00_BCN_QUEUE_BEACON) {
1579 /* 1599 /*
1580 * For Wi-Fi faily generated beacons between participating 1600 * For Wi-Fi faily generated beacons between participating
1581 * stations. Set TBTT phase adaptive adjustment step to 8us. 1601 * stations. Set TBTT phase adaptive adjustment step to 8us.
@@ -1584,6 +1604,8 @@ static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1584 1604
1585 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg); 1605 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
1586 if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) { 1606 if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) {
1607 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
1608 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
1587 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); 1609 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
1588 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg); 1610 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
1589 } 1611 }
@@ -1599,8 +1621,6 @@ static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1599 (queue == IEEE80211_TX_QUEUE_DATA2)); 1621 (queue == IEEE80211_TX_QUEUE_DATA2));
1600 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3, 1622 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3,
1601 (queue == IEEE80211_TX_QUEUE_DATA3)); 1623 (queue == IEEE80211_TX_QUEUE_DATA3));
1602 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_MGMT,
1603 (queue == IEEE80211_TX_QUEUE_DATA4));
1604 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg); 1624 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1605} 1625}
1606 1626
@@ -1628,7 +1648,7 @@ static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1628 return 0; 1648 return 0;
1629 } 1649 }
1630 1650
1631 if (rt2x00dev->rx_status.phymode == MODE_IEEE80211A) { 1651 if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) {
1632 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) 1652 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
1633 offset += 14; 1653 offset += 14;
1634 1654
@@ -1648,28 +1668,35 @@ static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1648 return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset; 1668 return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset;
1649} 1669}
1650 1670
1651static void rt61pci_fill_rxdone(struct data_entry *entry, 1671static void rt61pci_fill_rxdone(struct queue_entry *entry,
1652 struct rxdata_entry_desc *desc) 1672 struct rxdone_entry_desc *rxdesc)
1653{ 1673{
1654 __le32 *rxd = entry->priv; 1674 struct queue_entry_priv_pci_rx *priv_rx = entry->priv_data;
1655 u32 word0; 1675 u32 word0;
1656 u32 word1; 1676 u32 word1;
1657 1677
1658 rt2x00_desc_read(rxd, 0, &word0); 1678 rt2x00_desc_read(priv_rx->desc, 0, &word0);
1659 rt2x00_desc_read(rxd, 1, &word1); 1679 rt2x00_desc_read(priv_rx->desc, 1, &word1);
1660 1680
1661 desc->flags = 0; 1681 rxdesc->flags = 0;
1662 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1682 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1663 desc->flags |= RX_FLAG_FAILED_FCS_CRC; 1683 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1664 1684
1665 /* 1685 /*
1666 * Obtain the status about this packet. 1686 * Obtain the status about this packet.
1667 */ 1687 * When frame was received with an OFDM bitrate,
1668 desc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); 1688 * the signal is the PLCP value. If it was received with
1669 desc->rssi = rt61pci_agc_to_rssi(entry->ring->rt2x00dev, word1); 1689 * a CCK bitrate the signal is the rate in 100kbit/s.
1670 desc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM); 1690 */
1671 desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1691 rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL);
1672 desc->my_bss = !!rt2x00_get_field32(word0, RXD_W0_MY_BSS); 1692 rxdesc->rssi = rt61pci_agc_to_rssi(entry->queue->rt2x00dev, word1);
1693 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1694
1695 rxdesc->dev_flags = 0;
1696 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1697 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1698 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
1699 rxdesc->dev_flags |= RXDONE_MY_BSS;
1673} 1700}
1674 1701
1675/* 1702/*
@@ -1677,17 +1704,16 @@ static void rt61pci_fill_rxdone(struct data_entry *entry,
1677 */ 1704 */
1678static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev) 1705static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
1679{ 1706{
1680 struct data_ring *ring; 1707 struct data_queue *queue;
1681 struct data_entry *entry; 1708 struct queue_entry *entry;
1682 struct data_entry *entry_done; 1709 struct queue_entry *entry_done;
1683 __le32 *txd; 1710 struct queue_entry_priv_pci_tx *priv_tx;
1711 struct txdone_entry_desc txdesc;
1684 u32 word; 1712 u32 word;
1685 u32 reg; 1713 u32 reg;
1686 u32 old_reg; 1714 u32 old_reg;
1687 int type; 1715 int type;
1688 int index; 1716 int index;
1689 int tx_status;
1690 int retry;
1691 1717
1692 /* 1718 /*
1693 * During each loop we will compare the freshly read 1719 * During each loop we will compare the freshly read
@@ -1710,11 +1736,11 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
1710 1736
1711 /* 1737 /*
1712 * Skip this entry when it contains an invalid 1738 * Skip this entry when it contains an invalid
1713 * ring identication number. 1739 * queue identication number.
1714 */ 1740 */
1715 type = rt2x00_get_field32(reg, STA_CSR4_PID_TYPE); 1741 type = rt2x00_get_field32(reg, STA_CSR4_PID_TYPE);
1716 ring = rt2x00lib_get_ring(rt2x00dev, type); 1742 queue = rt2x00queue_get_queue(rt2x00dev, type);
1717 if (unlikely(!ring)) 1743 if (unlikely(!queue))
1718 continue; 1744 continue;
1719 1745
1720 /* 1746 /*
@@ -1722,36 +1748,40 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
1722 * index number. 1748 * index number.
1723 */ 1749 */
1724 index = rt2x00_get_field32(reg, STA_CSR4_PID_SUBTYPE); 1750 index = rt2x00_get_field32(reg, STA_CSR4_PID_SUBTYPE);
1725 if (unlikely(index >= ring->stats.limit)) 1751 if (unlikely(index >= queue->limit))
1726 continue; 1752 continue;
1727 1753
1728 entry = &ring->entry[index]; 1754 entry = &queue->entries[index];
1729 txd = entry->priv; 1755 priv_tx = entry->priv_data;
1730 rt2x00_desc_read(txd, 0, &word); 1756 rt2x00_desc_read(priv_tx->desc, 0, &word);
1731 1757
1732 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || 1758 if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) ||
1733 !rt2x00_get_field32(word, TXD_W0_VALID)) 1759 !rt2x00_get_field32(word, TXD_W0_VALID))
1734 return; 1760 return;
1735 1761
1736 entry_done = rt2x00_get_data_entry_done(ring); 1762 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
1737 while (entry != entry_done) { 1763 while (entry != entry_done) {
1738 /* Catch up. Just report any entries we missed as 1764 /* Catch up.
1739 * failed. */ 1765 * Just report any entries we missed as failed.
1766 */
1740 WARNING(rt2x00dev, 1767 WARNING(rt2x00dev,
1741 "TX status report missed for entry %p\n", 1768 "TX status report missed for entry %d\n",
1742 entry_done); 1769 entry_done->entry_idx);
1743 rt2x00pci_txdone(rt2x00dev, entry_done, TX_FAIL_OTHER, 1770
1744 0); 1771 txdesc.status = TX_FAIL_OTHER;
1745 entry_done = rt2x00_get_data_entry_done(ring); 1772 txdesc.retry = 0;
1773
1774 rt2x00pci_txdone(rt2x00dev, entry_done, &txdesc);
1775 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
1746 } 1776 }
1747 1777
1748 /* 1778 /*
1749 * Obtain the status about this packet. 1779 * Obtain the status about this packet.
1750 */ 1780 */
1751 tx_status = rt2x00_get_field32(reg, STA_CSR4_TX_RESULT); 1781 txdesc.status = rt2x00_get_field32(reg, STA_CSR4_TX_RESULT);
1752 retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT); 1782 txdesc.retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT);
1753 1783
1754 rt2x00pci_txdone(rt2x00dev, entry, tx_status, retry); 1784 rt2x00pci_txdone(rt2x00dev, entry, &txdesc);
1755 } 1785 }
1756} 1786}
1757 1787
@@ -1906,7 +1936,7 @@ static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1906 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); 1936 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0);
1907 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); 1937 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0);
1908 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); 1938 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word);
1909 EEPROM(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word); 1939 EEPROM(rt2x00dev, "RSSI OFFSET A: 0x%04x\n", word);
1910 } else { 1940 } else {
1911 value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1); 1941 value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1);
1912 if (value < -10 || value > 10) 1942 if (value < -10 || value > 10)
@@ -2035,35 +2065,61 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
2035 * If the eeprom value is invalid, 2065 * If the eeprom value is invalid,
2036 * switch to default led mode. 2066 * switch to default led mode.
2037 */ 2067 */
2068#ifdef CONFIG_RT61PCI_LEDS
2038 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom); 2069 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom);
2070 value = rt2x00_get_field16(eeprom, EEPROM_LED_LED_MODE);
2071
2072 rt2x00dev->led_radio.rt2x00dev = rt2x00dev;
2073 rt2x00dev->led_radio.type = LED_TYPE_RADIO;
2074 rt2x00dev->led_radio.led_dev.brightness_set =
2075 rt61pci_brightness_set;
2076 rt2x00dev->led_radio.led_dev.blink_set =
2077 rt61pci_blink_set;
2078 rt2x00dev->led_radio.flags = LED_INITIALIZED;
2079
2080 rt2x00dev->led_assoc.rt2x00dev = rt2x00dev;
2081 rt2x00dev->led_assoc.type = LED_TYPE_ASSOC;
2082 rt2x00dev->led_assoc.led_dev.brightness_set =
2083 rt61pci_brightness_set;
2084 rt2x00dev->led_assoc.led_dev.blink_set =
2085 rt61pci_blink_set;
2086 rt2x00dev->led_assoc.flags = LED_INITIALIZED;
2087
2088 if (value == LED_MODE_SIGNAL_STRENGTH) {
2089 rt2x00dev->led_qual.rt2x00dev = rt2x00dev;
2090 rt2x00dev->led_radio.type = LED_TYPE_QUALITY;
2091 rt2x00dev->led_qual.led_dev.brightness_set =
2092 rt61pci_brightness_set;
2093 rt2x00dev->led_qual.led_dev.blink_set =
2094 rt61pci_blink_set;
2095 rt2x00dev->led_qual.flags = LED_INITIALIZED;
2096 }
2039 2097
2040 rt2x00dev->led_mode = rt2x00_get_field16(eeprom, EEPROM_LED_LED_MODE); 2098 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_LED_MODE, value);
2041 2099 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_0,
2042 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_LED_MODE,
2043 rt2x00dev->led_mode);
2044 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_0,
2045 rt2x00_get_field16(eeprom, 2100 rt2x00_get_field16(eeprom,
2046 EEPROM_LED_POLARITY_GPIO_0)); 2101 EEPROM_LED_POLARITY_GPIO_0));
2047 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_1, 2102 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_1,
2048 rt2x00_get_field16(eeprom, 2103 rt2x00_get_field16(eeprom,
2049 EEPROM_LED_POLARITY_GPIO_1)); 2104 EEPROM_LED_POLARITY_GPIO_1));
2050 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_2, 2105 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_2,
2051 rt2x00_get_field16(eeprom, 2106 rt2x00_get_field16(eeprom,
2052 EEPROM_LED_POLARITY_GPIO_2)); 2107 EEPROM_LED_POLARITY_GPIO_2));
2053 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_3, 2108 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_3,
2054 rt2x00_get_field16(eeprom, 2109 rt2x00_get_field16(eeprom,
2055 EEPROM_LED_POLARITY_GPIO_3)); 2110 EEPROM_LED_POLARITY_GPIO_3));
2056 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_4, 2111 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_4,
2057 rt2x00_get_field16(eeprom, 2112 rt2x00_get_field16(eeprom,
2058 EEPROM_LED_POLARITY_GPIO_4)); 2113 EEPROM_LED_POLARITY_GPIO_4));
2059 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_ACT, 2114 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_ACT,
2060 rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_ACT)); 2115 rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_ACT));
2061 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_READY_BG, 2116 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_BG,
2062 rt2x00_get_field16(eeprom, 2117 rt2x00_get_field16(eeprom,
2063 EEPROM_LED_POLARITY_RDY_G)); 2118 EEPROM_LED_POLARITY_RDY_G));
2064 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_READY_A, 2119 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A,
2065 rt2x00_get_field16(eeprom, 2120 rt2x00_get_field16(eeprom,
2066 EEPROM_LED_POLARITY_RDY_A)); 2121 EEPROM_LED_POLARITY_RDY_A));
2122#endif /* CONFIG_RT61PCI_LEDS */
2067 2123
2068 return 0; 2124 return 0;
2069} 2125}
@@ -2197,7 +2253,7 @@ static void rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2197 rt2x00dev->hw->extra_tx_headroom = 0; 2253 rt2x00dev->hw->extra_tx_headroom = 0;
2198 rt2x00dev->hw->max_signal = MAX_SIGNAL; 2254 rt2x00dev->hw->max_signal = MAX_SIGNAL;
2199 rt2x00dev->hw->max_rssi = MAX_RX_SSI; 2255 rt2x00dev->hw->max_rssi = MAX_RX_SSI;
2200 rt2x00dev->hw->queues = 5; 2256 rt2x00dev->hw->queues = 4;
2201 2257
2202 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev); 2258 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_pci(rt2x00dev)->dev);
2203 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, 2259 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -2214,8 +2270,8 @@ static void rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2214 /* 2270 /*
2215 * Initialize hw_mode information. 2271 * Initialize hw_mode information.
2216 */ 2272 */
2217 spec->num_modes = 2; 2273 spec->supported_bands = SUPPORT_BAND_2GHZ;
2218 spec->num_rates = 12; 2274 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
2219 spec->tx_power_a = NULL; 2275 spec->tx_power_a = NULL;
2220 spec->tx_power_bg = txpower; 2276 spec->tx_power_bg = txpower;
2221 spec->tx_power_default = DEFAULT_TXPOWER; 2277 spec->tx_power_default = DEFAULT_TXPOWER;
@@ -2230,7 +2286,7 @@ static void rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2230 2286
2231 if (rt2x00_rf(&rt2x00dev->chip, RF5225) || 2287 if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
2232 rt2x00_rf(&rt2x00dev->chip, RF5325)) { 2288 rt2x00_rf(&rt2x00dev->chip, RF5325)) {
2233 spec->num_modes = 3; 2289 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2234 spec->num_channels = ARRAY_SIZE(rf_vals_seq); 2290 spec->num_channels = ARRAY_SIZE(rf_vals_seq);
2235 2291
2236 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); 2292 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
@@ -2262,7 +2318,7 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
2262 rt61pci_probe_hw_mode(rt2x00dev); 2318 rt61pci_probe_hw_mode(rt2x00dev);
2263 2319
2264 /* 2320 /*
2265 * This device requires firmware 2321 * This device requires firmware.
2266 */ 2322 */
2267 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); 2323 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
2268 2324
@@ -2277,70 +2333,6 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev)
2277/* 2333/*
2278 * IEEE80211 stack callback functions. 2334 * IEEE80211 stack callback functions.
2279 */ 2335 */
2280static void rt61pci_configure_filter(struct ieee80211_hw *hw,
2281 unsigned int changed_flags,
2282 unsigned int *total_flags,
2283 int mc_count,
2284 struct dev_addr_list *mc_list)
2285{
2286 struct rt2x00_dev *rt2x00dev = hw->priv;
2287 u32 reg;
2288
2289 /*
2290 * Mask off any flags we are going to ignore from
2291 * the total_flags field.
2292 */
2293 *total_flags &=
2294 FIF_ALLMULTI |
2295 FIF_FCSFAIL |
2296 FIF_PLCPFAIL |
2297 FIF_CONTROL |
2298 FIF_OTHER_BSS |
2299 FIF_PROMISC_IN_BSS;
2300
2301 /*
2302 * Apply some rules to the filters:
2303 * - Some filters imply different filters to be set.
2304 * - Some things we can't filter out at all.
2305 * - Multicast filter seems to kill broadcast traffic so never use it.
2306 */
2307 *total_flags |= FIF_ALLMULTI;
2308 if (*total_flags & FIF_OTHER_BSS ||
2309 *total_flags & FIF_PROMISC_IN_BSS)
2310 *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS;
2311
2312 /*
2313 * Check if there is any work left for us.
2314 */
2315 if (rt2x00dev->packet_filter == *total_flags)
2316 return;
2317 rt2x00dev->packet_filter = *total_flags;
2318
2319 /*
2320 * Start configuration steps.
2321 * Note that the version error will always be dropped
2322 * and broadcast frames will always be accepted since
2323 * there is no filter for it at this time.
2324 */
2325 rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg);
2326 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CRC,
2327 !(*total_flags & FIF_FCSFAIL));
2328 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_PHYSICAL,
2329 !(*total_flags & FIF_PLCPFAIL));
2330 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
2331 !(*total_flags & FIF_CONTROL));
2332 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
2333 !(*total_flags & FIF_PROMISC_IN_BSS));
2334 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
2335 !(*total_flags & FIF_PROMISC_IN_BSS));
2336 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
2337 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
2338 !(*total_flags & FIF_ALLMULTI));
2339 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_BORADCAST, 0);
2340 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_ACK_CTS, 1);
2341 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
2342}
2343
2344static int rt61pci_set_retry_limit(struct ieee80211_hw *hw, 2336static int rt61pci_set_retry_limit(struct ieee80211_hw *hw,
2345 u32 short_retry, u32 long_retry) 2337 u32 short_retry, u32 long_retry)
2346{ 2338{
@@ -2369,66 +2361,72 @@ static u64 rt61pci_get_tsf(struct ieee80211_hw *hw)
2369 return tsf; 2361 return tsf;
2370} 2362}
2371 2363
2372static void rt61pci_reset_tsf(struct ieee80211_hw *hw)
2373{
2374 struct rt2x00_dev *rt2x00dev = hw->priv;
2375
2376 rt2x00pci_register_write(rt2x00dev, TXRX_CSR12, 0);
2377 rt2x00pci_register_write(rt2x00dev, TXRX_CSR13, 0);
2378}
2379
2380static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 2364static int rt61pci_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
2381 struct ieee80211_tx_control *control) 2365 struct ieee80211_tx_control *control)
2382{ 2366{
2383 struct rt2x00_dev *rt2x00dev = hw->priv; 2367 struct rt2x00_dev *rt2x00dev = hw->priv;
2384 struct skb_desc *desc; 2368 struct rt2x00_intf *intf = vif_to_intf(control->vif);
2385 struct data_ring *ring; 2369 struct skb_frame_desc *skbdesc;
2386 struct data_entry *entry; 2370 unsigned int beacon_base;
2371 u32 reg;
2387 2372
2388 /* 2373 if (unlikely(!intf->beacon))
2389 * Just in case the ieee80211 doesn't set this, 2374 return -ENOBUFS;
2390 * but we need this queue set for the descriptor
2391 * initialization.
2392 */
2393 control->queue = IEEE80211_TX_QUEUE_BEACON;
2394 ring = rt2x00lib_get_ring(rt2x00dev, control->queue);
2395 entry = rt2x00_get_data_entry(ring);
2396 2375
2397 /* 2376 /*
2398 * We need to append the descriptor in front of the 2377 * We need to append the descriptor in front of the
2399 * beacon frame. 2378 * beacon frame.
2400 */ 2379 */
2401 if (skb_headroom(skb) < TXD_DESC_SIZE) { 2380 if (skb_headroom(skb) < intf->beacon->queue->desc_size) {
2402 if (pskb_expand_head(skb, TXD_DESC_SIZE, 0, GFP_ATOMIC)) 2381 if (pskb_expand_head(skb, intf->beacon->queue->desc_size,
2382 0, GFP_ATOMIC))
2403 return -ENOMEM; 2383 return -ENOMEM;
2404 } 2384 }
2405 2385
2406 /* 2386 /*
2407 * Add the descriptor in front of the skb. 2387 * Add the descriptor in front of the skb.
2408 */ 2388 */
2409 skb_push(skb, ring->desc_size); 2389 skb_push(skb, intf->beacon->queue->desc_size);
2410 memset(skb->data, 0, ring->desc_size); 2390 memset(skb->data, 0, intf->beacon->queue->desc_size);
2411 2391
2412 /* 2392 /*
2413 * Fill in skb descriptor 2393 * Fill in skb descriptor
2414 */ 2394 */
2415 desc = get_skb_desc(skb); 2395 skbdesc = get_skb_frame_desc(skb);
2416 desc->desc_len = ring->desc_size; 2396 memset(skbdesc, 0, sizeof(*skbdesc));
2417 desc->data_len = skb->len - ring->desc_size; 2397 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED;
2418 desc->desc = skb->data; 2398 skbdesc->data = skb->data + intf->beacon->queue->desc_size;
2419 desc->data = skb->data + ring->desc_size; 2399 skbdesc->data_len = skb->len - intf->beacon->queue->desc_size;
2420 desc->ring = ring; 2400 skbdesc->desc = skb->data;
2421 desc->entry = entry; 2401 skbdesc->desc_len = intf->beacon->queue->desc_size;
2402 skbdesc->entry = intf->beacon;
2403
2404 /*
2405 * Disable beaconing while we are reloading the beacon data,
2406 * otherwise we might be sending out invalid data.
2407 */
2408 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
2409 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0);
2410 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
2411 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
2412 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
2422 2413
2414 /*
2415 * mac80211 doesn't provide the control->queue variable
2416 * for beacons. Set our own queue identification so
2417 * it can be used during descriptor initialization.
2418 */
2419 control->queue = RT2X00_BCN_QUEUE_BEACON;
2423 rt2x00lib_write_tx_desc(rt2x00dev, skb, control); 2420 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
2424 2421
2425 /* 2422 /*
2426 * Write entire beacon with descriptor to register, 2423 * Write entire beacon with descriptor to register,
2427 * and kick the beacon generator. 2424 * and kick the beacon generator.
2428 */ 2425 */
2429 rt2x00pci_register_multiwrite(rt2x00dev, HW_BEACON_BASE0, 2426 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
2427 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base,
2430 skb->data, skb->len); 2428 skb->data, skb->len);
2431 rt61pci_kick_tx_queue(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); 2429 rt61pci_kick_tx_queue(rt2x00dev, control->queue);
2432 2430
2433 return 0; 2431 return 0;
2434} 2432}
@@ -2441,14 +2439,13 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
2441 .remove_interface = rt2x00mac_remove_interface, 2439 .remove_interface = rt2x00mac_remove_interface,
2442 .config = rt2x00mac_config, 2440 .config = rt2x00mac_config,
2443 .config_interface = rt2x00mac_config_interface, 2441 .config_interface = rt2x00mac_config_interface,
2444 .configure_filter = rt61pci_configure_filter, 2442 .configure_filter = rt2x00mac_configure_filter,
2445 .get_stats = rt2x00mac_get_stats, 2443 .get_stats = rt2x00mac_get_stats,
2446 .set_retry_limit = rt61pci_set_retry_limit, 2444 .set_retry_limit = rt61pci_set_retry_limit,
2447 .bss_info_changed = rt2x00mac_bss_info_changed, 2445 .bss_info_changed = rt2x00mac_bss_info_changed,
2448 .conf_tx = rt2x00mac_conf_tx, 2446 .conf_tx = rt2x00mac_conf_tx,
2449 .get_tx_stats = rt2x00mac_get_tx_stats, 2447 .get_tx_stats = rt2x00mac_get_tx_stats,
2450 .get_tsf = rt61pci_get_tsf, 2448 .get_tsf = rt61pci_get_tsf,
2451 .reset_tsf = rt61pci_reset_tsf,
2452 .beacon_update = rt61pci_beacon_update, 2449 .beacon_update = rt61pci_beacon_update,
2453}; 2450};
2454 2451
@@ -2456,6 +2453,7 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2456 .irq_handler = rt61pci_interrupt, 2453 .irq_handler = rt61pci_interrupt,
2457 .probe_hw = rt61pci_probe_hw, 2454 .probe_hw = rt61pci_probe_hw,
2458 .get_firmware_name = rt61pci_get_firmware_name, 2455 .get_firmware_name = rt61pci_get_firmware_name,
2456 .get_firmware_crc = rt61pci_get_firmware_crc,
2459 .load_firmware = rt61pci_load_firmware, 2457 .load_firmware = rt61pci_load_firmware,
2460 .initialize = rt2x00pci_initialize, 2458 .initialize = rt2x00pci_initialize,
2461 .uninitialize = rt2x00pci_uninitialize, 2459 .uninitialize = rt2x00pci_uninitialize,
@@ -2470,19 +2468,42 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2470 .write_tx_data = rt2x00pci_write_tx_data, 2468 .write_tx_data = rt2x00pci_write_tx_data,
2471 .kick_tx_queue = rt61pci_kick_tx_queue, 2469 .kick_tx_queue = rt61pci_kick_tx_queue,
2472 .fill_rxdone = rt61pci_fill_rxdone, 2470 .fill_rxdone = rt61pci_fill_rxdone,
2473 .config_mac_addr = rt61pci_config_mac_addr, 2471 .config_filter = rt61pci_config_filter,
2474 .config_bssid = rt61pci_config_bssid, 2472 .config_intf = rt61pci_config_intf,
2475 .config_type = rt61pci_config_type, 2473 .config_erp = rt61pci_config_erp,
2476 .config_preamble = rt61pci_config_preamble,
2477 .config = rt61pci_config, 2474 .config = rt61pci_config,
2478}; 2475};
2479 2476
2477static const struct data_queue_desc rt61pci_queue_rx = {
2478 .entry_num = RX_ENTRIES,
2479 .data_size = DATA_FRAME_SIZE,
2480 .desc_size = RXD_DESC_SIZE,
2481 .priv_size = sizeof(struct queue_entry_priv_pci_rx),
2482};
2483
2484static const struct data_queue_desc rt61pci_queue_tx = {
2485 .entry_num = TX_ENTRIES,
2486 .data_size = DATA_FRAME_SIZE,
2487 .desc_size = TXD_DESC_SIZE,
2488 .priv_size = sizeof(struct queue_entry_priv_pci_tx),
2489};
2490
2491static const struct data_queue_desc rt61pci_queue_bcn = {
2492 .entry_num = 4 * BEACON_ENTRIES,
2493 .data_size = MGMT_FRAME_SIZE,
2494 .desc_size = TXINFO_SIZE,
2495 .priv_size = sizeof(struct queue_entry_priv_pci_tx),
2496};
2497
2480static const struct rt2x00_ops rt61pci_ops = { 2498static const struct rt2x00_ops rt61pci_ops = {
2481 .name = KBUILD_MODNAME, 2499 .name = KBUILD_MODNAME,
2482 .rxd_size = RXD_DESC_SIZE, 2500 .max_sta_intf = 1,
2483 .txd_size = TXD_DESC_SIZE, 2501 .max_ap_intf = 4,
2484 .eeprom_size = EEPROM_SIZE, 2502 .eeprom_size = EEPROM_SIZE,
2485 .rf_size = RF_SIZE, 2503 .rf_size = RF_SIZE,
2504 .rx = &rt61pci_queue_rx,
2505 .tx = &rt61pci_queue_tx,
2506 .bcn = &rt61pci_queue_bcn,
2486 .lib = &rt61pci_rt2x00_ops, 2507 .lib = &rt61pci_rt2x00_ops,
2487 .hw = &rt61pci_mac80211_ops, 2508 .hw = &rt61pci_mac80211_ops,
2488#ifdef CONFIG_RT2X00_LIB_DEBUGFS 2509#ifdef CONFIG_RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 4c6524eedad0..3511bba7ff65 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -161,7 +161,9 @@ struct hw_pairwise_ta_entry {
161#define HW_BEACON_BASE1 0x2d00 161#define HW_BEACON_BASE1 0x2d00
162#define HW_BEACON_BASE2 0x2e00 162#define HW_BEACON_BASE2 0x2e00
163#define HW_BEACON_BASE3 0x2f00 163#define HW_BEACON_BASE3 0x2f00
164#define HW_BEACON_OFFSET 0x0100 164
165#define HW_BEACON_OFFSET(__index) \
166 ( HW_BEACON_BASE0 + (__index * 0x0100) )
165 167
166/* 168/*
167 * HOST-MCU shared memory. 169 * HOST-MCU shared memory.
@@ -234,6 +236,11 @@ struct hw_pairwise_ta_entry {
234 236
235/* 237/*
236 * MAC_CSR3: STA MAC register 1. 238 * MAC_CSR3: STA MAC register 1.
239 * UNICAST_TO_ME_MASK:
240 * Used to mask off bits from byte 5 of the MAC address
241 * to determine the UNICAST_TO_ME bit for RX frames.
242 * The full mask is complemented by BSS_ID_MASK:
243 * MASK = BSS_ID_MASK & UNICAST_TO_ME_MASK
237 */ 244 */
238#define MAC_CSR3 0x300c 245#define MAC_CSR3 0x300c
239#define MAC_CSR3_BYTE4 FIELD32(0x000000ff) 246#define MAC_CSR3_BYTE4 FIELD32(0x000000ff)
@@ -251,7 +258,14 @@ struct hw_pairwise_ta_entry {
251 258
252/* 259/*
253 * MAC_CSR5: BSSID register 1. 260 * MAC_CSR5: BSSID register 1.
254 * BSS_ID_MASK: 3: one BSSID, 0: 4 BSSID, 2 or 1: 2 BSSID. 261 * BSS_ID_MASK:
262 * This mask is used to mask off bits 0 and 1 of byte 5 of the
263 * BSSID. This will make sure that those bits will be ignored
264 * when determining the MY_BSS of RX frames.
265 * 0: 1-BSSID mode (BSS index = 0)
266 * 1: 2-BSSID mode (BSS index: Byte5, bit 0)
267 * 2: 2-BSSID mode (BSS index: byte5, bit 1)
268 * 3: 4-BSSID mode (BSS index: byte5, bit 0 - 1)
255 */ 269 */
256#define MAC_CSR5 0x3014 270#define MAC_CSR5 0x3014
257#define MAC_CSR5_BYTE4 FIELD32(0x000000ff) 271#define MAC_CSR5_BYTE4 FIELD32(0x000000ff)
@@ -391,7 +405,7 @@ struct hw_pairwise_ta_entry {
391#define TXRX_CSR0_DROP_TO_DS FIELD32(0x00200000) 405#define TXRX_CSR0_DROP_TO_DS FIELD32(0x00200000)
392#define TXRX_CSR0_DROP_VERSION_ERROR FIELD32(0x00400000) 406#define TXRX_CSR0_DROP_VERSION_ERROR FIELD32(0x00400000)
393#define TXRX_CSR0_DROP_MULTICAST FIELD32(0x00800000) 407#define TXRX_CSR0_DROP_MULTICAST FIELD32(0x00800000)
394#define TXRX_CSR0_DROP_BORADCAST FIELD32(0x01000000) 408#define TXRX_CSR0_DROP_BROADCAST FIELD32(0x01000000)
395#define TXRX_CSR0_DROP_ACK_CTS FIELD32(0x02000000) 409#define TXRX_CSR0_DROP_ACK_CTS FIELD32(0x02000000)
396#define TXRX_CSR0_TX_WITHOUT_WAITING FIELD32(0x04000000) 410#define TXRX_CSR0_TX_WITHOUT_WAITING FIELD32(0x04000000)
397 411
@@ -866,7 +880,7 @@ struct hw_pairwise_ta_entry {
866#define TX_CNTL_CSR_ABORT_TX_MGMT FIELD32(0x00100000) 880#define TX_CNTL_CSR_ABORT_TX_MGMT FIELD32(0x00100000)
867 881
868/* 882/*
869 * LOAD_TX_RING_CSR: Load RX de 883 * LOAD_TX_RING_CSR: Load RX desriptor
870 */ 884 */
871#define LOAD_TX_RING_CSR 0x3434 885#define LOAD_TX_RING_CSR 0x3434
872#define LOAD_TX_RING_CSR_LOAD_TXD_AC0 FIELD32(0x00000001) 886#define LOAD_TX_RING_CSR_LOAD_TXD_AC0 FIELD32(0x00000001)
@@ -1116,10 +1130,10 @@ struct hw_pairwise_ta_entry {
1116#define EEPROM_MAC_ADDR_0 0x0002 1130#define EEPROM_MAC_ADDR_0 0x0002
1117#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff) 1131#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff)
1118#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00) 1132#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00)
1119#define EEPROM_MAC_ADDR1 0x0004 1133#define EEPROM_MAC_ADDR1 0x0003
1120#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff) 1134#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff)
1121#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00) 1135#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00)
1122#define EEPROM_MAC_ADDR_2 0x0006 1136#define EEPROM_MAC_ADDR_2 0x0004
1123#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff) 1137#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff)
1124#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) 1138#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00)
1125 1139
@@ -1247,6 +1261,7 @@ struct hw_pairwise_ta_entry {
1247 * DMA descriptor defines. 1261 * DMA descriptor defines.
1248 */ 1262 */
1249#define TXD_DESC_SIZE ( 16 * sizeof(__le32) ) 1263#define TXD_DESC_SIZE ( 16 * sizeof(__le32) )
1264#define TXINFO_SIZE ( 6 * sizeof(__le32) )
1250#define RXD_DESC_SIZE ( 16 * sizeof(__le32) ) 1265#define RXD_DESC_SIZE ( 16 * sizeof(__le32) )
1251 1266
1252/* 1267/*
@@ -1440,8 +1455,8 @@ struct hw_pairwise_ta_entry {
1440#define RXD_W15_RESERVED FIELD32(0xffffffff) 1455#define RXD_W15_RESERVED FIELD32(0xffffffff)
1441 1456
1442/* 1457/*
1443 * Macro's for converting txpower from EEPROM to dscape value 1458 * Macro's for converting txpower from EEPROM to mac80211 value
1444 * and from dscape value to register value. 1459 * and from mac80211 value to register value.
1445 */ 1460 */
1446#define MIN_TXPOWER 0 1461#define MIN_TXPOWER 0
1447#define MAX_TXPOWER 31 1462#define MAX_TXPOWER 31
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 3909cf42f472..a9efe25f1ea7 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -24,6 +24,7 @@
24 Supported chipsets: rt2571W & rt2671. 24 Supported chipsets: rt2571W & rt2671.
25 */ 25 */
26 26
27#include <linux/crc-itu-t.h>
27#include <linux/delay.h> 28#include <linux/delay.h>
28#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
29#include <linux/init.h> 30#include <linux/init.h>
@@ -278,85 +279,158 @@ static const struct rt2x00debug rt73usb_rt2x00debug = {
278}; 279};
279#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 280#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
280 281
281/* 282#ifdef CONFIG_RT73USB_LEDS
282 * Configuration handlers. 283static void rt73usb_brightness_set(struct led_classdev *led_cdev,
283 */ 284 enum led_brightness brightness)
284static void rt73usb_config_mac_addr(struct rt2x00_dev *rt2x00dev, __le32 *mac)
285{ 285{
286 u32 tmp; 286 struct rt2x00_led *led =
287 287 container_of(led_cdev, struct rt2x00_led, led_dev);
288 tmp = le32_to_cpu(mac[1]); 288 unsigned int enabled = brightness != LED_OFF;
289 rt2x00_set_field32(&tmp, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff); 289 unsigned int a_mode =
290 mac[1] = cpu_to_le32(tmp); 290 (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
291 291 unsigned int bg_mode =
292 rt73usb_register_multiwrite(rt2x00dev, MAC_CSR2, mac, 292 (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
293 (2 * sizeof(__le32))); 293
294 if (led->type == LED_TYPE_RADIO) {
295 rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
296 MCU_LEDCS_RADIO_STATUS, enabled);
297
298 rt2x00usb_vendor_request_sw(led->rt2x00dev, USB_LED_CONTROL,
299 0, led->rt2x00dev->led_mcu_reg,
300 REGISTER_TIMEOUT);
301 } else if (led->type == LED_TYPE_ASSOC) {
302 rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
303 MCU_LEDCS_LINK_BG_STATUS, bg_mode);
304 rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
305 MCU_LEDCS_LINK_A_STATUS, a_mode);
306
307 rt2x00usb_vendor_request_sw(led->rt2x00dev, USB_LED_CONTROL,
308 0, led->rt2x00dev->led_mcu_reg,
309 REGISTER_TIMEOUT);
310 } else if (led->type == LED_TYPE_QUALITY) {
311 /*
312 * The brightness is divided into 6 levels (0 - 5),
313 * this means we need to convert the brightness
314 * argument into the matching level within that range.
315 */
316 rt2x00usb_vendor_request_sw(led->rt2x00dev, USB_LED_CONTROL,
317 brightness / (LED_FULL / 6),
318 led->rt2x00dev->led_mcu_reg,
319 REGISTER_TIMEOUT);
320 }
294} 321}
295 322
296static void rt73usb_config_bssid(struct rt2x00_dev *rt2x00dev, __le32 *bssid) 323static int rt73usb_blink_set(struct led_classdev *led_cdev,
324 unsigned long *delay_on,
325 unsigned long *delay_off)
297{ 326{
298 u32 tmp; 327 struct rt2x00_led *led =
328 container_of(led_cdev, struct rt2x00_led, led_dev);
329 u32 reg;
299 330
300 tmp = le32_to_cpu(bssid[1]); 331 rt73usb_register_read(led->rt2x00dev, MAC_CSR14, &reg);
301 rt2x00_set_field32(&tmp, MAC_CSR5_BSS_ID_MASK, 3); 332 rt2x00_set_field32(&reg, MAC_CSR14_ON_PERIOD, *delay_on);
302 bssid[1] = cpu_to_le32(tmp); 333 rt2x00_set_field32(&reg, MAC_CSR14_OFF_PERIOD, *delay_off);
334 rt73usb_register_write(led->rt2x00dev, MAC_CSR14, reg);
303 335
304 rt73usb_register_multiwrite(rt2x00dev, MAC_CSR4, bssid, 336 return 0;
305 (2 * sizeof(__le32)));
306} 337}
338#endif /* CONFIG_RT73USB_LEDS */
307 339
308static void rt73usb_config_type(struct rt2x00_dev *rt2x00dev, const int type, 340/*
309 const int tsf_sync) 341 * Configuration handlers.
342 */
343static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev,
344 const unsigned int filter_flags)
310{ 345{
311 u32 reg; 346 u32 reg;
312 347
313 /* 348 /*
314 * Clear current synchronisation setup. 349 * Start configuration steps.
315 * For the Beacon base registers we only need to clear 350 * Note that the version error will always be dropped
316 * the first byte since that byte contains the VALID and OWNER 351 * and broadcast frames will always be accepted since
317 * bits which (when set to 0) will invalidate the entire beacon. 352 * there is no filter for it at this time.
318 */
319 rt73usb_register_write(rt2x00dev, TXRX_CSR9, 0);
320 rt73usb_register_write(rt2x00dev, HW_BEACON_BASE0, 0);
321 rt73usb_register_write(rt2x00dev, HW_BEACON_BASE1, 0);
322 rt73usb_register_write(rt2x00dev, HW_BEACON_BASE2, 0);
323 rt73usb_register_write(rt2x00dev, HW_BEACON_BASE3, 0);
324
325 /*
326 * Enable synchronisation.
327 */ 353 */
328 rt73usb_register_read(rt2x00dev, TXRX_CSR9, &reg); 354 rt73usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
329 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1); 355 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CRC,
330 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 356 !(filter_flags & FIF_FCSFAIL));
331 (tsf_sync == TSF_SYNC_BEACON)); 357 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_PHYSICAL,
332 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); 358 !(filter_flags & FIF_PLCPFAIL));
333 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, tsf_sync); 359 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
334 rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg); 360 !(filter_flags & FIF_CONTROL));
361 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
362 !(filter_flags & FIF_PROMISC_IN_BSS));
363 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
364 !(filter_flags & FIF_PROMISC_IN_BSS) &&
365 !rt2x00dev->intf_ap_count);
366 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
367 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
368 !(filter_flags & FIF_ALLMULTI));
369 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_BROADCAST, 0);
370 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_ACK_CTS,
371 !(filter_flags & FIF_CONTROL));
372 rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg);
335} 373}
336 374
337static void rt73usb_config_preamble(struct rt2x00_dev *rt2x00dev, 375static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev,
338 const int short_preamble, 376 struct rt2x00_intf *intf,
339 const int ack_timeout, 377 struct rt2x00intf_conf *conf,
340 const int ack_consume_time) 378 const unsigned int flags)
341{ 379{
380 unsigned int beacon_base;
342 u32 reg; 381 u32 reg;
343 382
344 /* 383 if (flags & CONFIG_UPDATE_TYPE) {
345 * When in atomic context, reschedule and let rt2x00lib 384 /*
346 * call this function again. 385 * Clear current synchronisation setup.
347 */ 386 * For the Beacon base registers we only need to clear
348 if (in_atomic()) { 387 * the first byte since that byte contains the VALID and OWNER
349 queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->config_work); 388 * bits which (when set to 0) will invalidate the entire beacon.
350 return; 389 */
390 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
391 rt73usb_register_write(rt2x00dev, beacon_base, 0);
392
393 /*
394 * Enable synchronisation.
395 */
396 rt73usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
397 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
398 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync);
399 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
400 rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg);
351 } 401 }
352 402
403 if (flags & CONFIG_UPDATE_MAC) {
404 reg = le32_to_cpu(conf->mac[1]);
405 rt2x00_set_field32(&reg, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff);
406 conf->mac[1] = cpu_to_le32(reg);
407
408 rt73usb_register_multiwrite(rt2x00dev, MAC_CSR2,
409 conf->mac, sizeof(conf->mac));
410 }
411
412 if (flags & CONFIG_UPDATE_BSSID) {
413 reg = le32_to_cpu(conf->bssid[1]);
414 rt2x00_set_field32(&reg, MAC_CSR5_BSS_ID_MASK, 3);
415 conf->bssid[1] = cpu_to_le32(reg);
416
417 rt73usb_register_multiwrite(rt2x00dev, MAC_CSR4,
418 conf->bssid, sizeof(conf->bssid));
419 }
420}
421
422static void rt73usb_config_erp(struct rt2x00_dev *rt2x00dev,
423 struct rt2x00lib_erp *erp)
424{
425 u32 reg;
426
353 rt73usb_register_read(rt2x00dev, TXRX_CSR0, &reg); 427 rt73usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
354 rt2x00_set_field32(&reg, TXRX_CSR0_RX_ACK_TIMEOUT, ack_timeout); 428 rt2x00_set_field32(&reg, TXRX_CSR0_RX_ACK_TIMEOUT, erp->ack_timeout);
355 rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg); 429 rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg);
356 430
357 rt73usb_register_read(rt2x00dev, TXRX_CSR4, &reg); 431 rt73usb_register_read(rt2x00dev, TXRX_CSR4, &reg);
358 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE, 432 rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE,
359 !!short_preamble); 433 !!erp->short_preamble);
360 rt73usb_register_write(rt2x00dev, TXRX_CSR4, reg); 434 rt73usb_register_write(rt2x00dev, TXRX_CSR4, reg);
361} 435}
362 436
@@ -442,28 +516,22 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
442 case ANTENNA_HW_DIVERSITY: 516 case ANTENNA_HW_DIVERSITY:
443 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); 517 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
444 temp = !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags) 518 temp = !test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags)
445 && (rt2x00dev->curr_hwmode != HWMODE_A); 519 && (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
446 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp); 520 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp);
447 break; 521 break;
448 case ANTENNA_A: 522 case ANTENNA_A:
449 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); 523 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
450 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); 524 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
451 if (rt2x00dev->curr_hwmode == HWMODE_A) 525 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
452 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); 526 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
453 else 527 else
454 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); 528 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
455 break; 529 break;
456 case ANTENNA_SW_DIVERSITY:
457 /*
458 * NOTE: We should never come here because rt2x00lib is
459 * supposed to catch this and send us the correct antenna
460 * explicitely. However we are nog going to bug about this.
461 * Instead, just default to antenna B.
462 */
463 case ANTENNA_B: 530 case ANTENNA_B:
531 default:
464 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); 532 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
465 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); 533 rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
466 if (rt2x00dev->curr_hwmode == HWMODE_A) 534 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
467 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); 535 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
468 else 536 else
469 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); 537 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
@@ -501,14 +569,8 @@ static void rt73usb_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
501 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); 569 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
502 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); 570 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
503 break; 571 break;
504 case ANTENNA_SW_DIVERSITY:
505 /*
506 * NOTE: We should never come here because rt2x00lib is
507 * supposed to catch this and send us the correct antenna
508 * explicitely. However we are nog going to bug about this.
509 * Instead, just default to antenna B.
510 */
511 case ANTENNA_B: 572 case ANTENNA_B:
573 default:
512 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); 574 rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
513 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); 575 rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
514 break; 576 break;
@@ -558,7 +620,14 @@ static void rt73usb_config_antenna(struct rt2x00_dev *rt2x00dev,
558 unsigned int i; 620 unsigned int i;
559 u32 reg; 621 u32 reg;
560 622
561 if (rt2x00dev->curr_hwmode == HWMODE_A) { 623 /*
624 * We should never come here because rt2x00lib is supposed
625 * to catch this and send us the correct antenna explicitely.
626 */
627 BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
628 ant->tx == ANTENNA_SW_DIVERSITY);
629
630 if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
562 sel = antenna_sel_a; 631 sel = antenna_sel_a;
563 lna = test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); 632 lna = test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags);
564 } else { 633 } else {
@@ -572,10 +641,9 @@ static void rt73usb_config_antenna(struct rt2x00_dev *rt2x00dev,
572 rt73usb_register_read(rt2x00dev, PHY_CSR0, &reg); 641 rt73usb_register_read(rt2x00dev, PHY_CSR0, &reg);
573 642
574 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG, 643 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG,
575 (rt2x00dev->curr_hwmode == HWMODE_B || 644 (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ));
576 rt2x00dev->curr_hwmode == HWMODE_G));
577 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A, 645 rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A,
578 (rt2x00dev->curr_hwmode == HWMODE_A)); 646 (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ));
579 647
580 rt73usb_register_write(rt2x00dev, PHY_CSR0, reg); 648 rt73usb_register_write(rt2x00dev, PHY_CSR0, reg);
581 649
@@ -617,8 +685,8 @@ static void rt73usb_config_duration(struct rt2x00_dev *rt2x00dev,
617} 685}
618 686
619static void rt73usb_config(struct rt2x00_dev *rt2x00dev, 687static void rt73usb_config(struct rt2x00_dev *rt2x00dev,
620 const unsigned int flags, 688 struct rt2x00lib_conf *libconf,
621 struct rt2x00lib_conf *libconf) 689 const unsigned int flags)
622{ 690{
623 if (flags & CONFIG_UPDATE_PHYMODE) 691 if (flags & CONFIG_UPDATE_PHYMODE)
624 rt73usb_config_phymode(rt2x00dev, libconf->basic_rates); 692 rt73usb_config_phymode(rt2x00dev, libconf->basic_rates);
@@ -634,68 +702,6 @@ static void rt73usb_config(struct rt2x00_dev *rt2x00dev,
634} 702}
635 703
636/* 704/*
637 * LED functions.
638 */
639static void rt73usb_enable_led(struct rt2x00_dev *rt2x00dev)
640{
641 u32 reg;
642
643 rt73usb_register_read(rt2x00dev, MAC_CSR14, &reg);
644 rt2x00_set_field32(&reg, MAC_CSR14_ON_PERIOD, 70);
645 rt2x00_set_field32(&reg, MAC_CSR14_OFF_PERIOD, 30);
646 rt73usb_register_write(rt2x00dev, MAC_CSR14, reg);
647
648 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_RADIO_STATUS, 1);
649 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_LINK_A_STATUS,
650 (rt2x00dev->rx_status.phymode == MODE_IEEE80211A));
651 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_LINK_BG_STATUS,
652 (rt2x00dev->rx_status.phymode != MODE_IEEE80211A));
653
654 rt2x00usb_vendor_request_sw(rt2x00dev, USB_LED_CONTROL, 0x0000,
655 rt2x00dev->led_reg, REGISTER_TIMEOUT);
656}
657
658static void rt73usb_disable_led(struct rt2x00_dev *rt2x00dev)
659{
660 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_RADIO_STATUS, 0);
661 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_LINK_BG_STATUS, 0);
662 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_LINK_A_STATUS, 0);
663
664 rt2x00usb_vendor_request_sw(rt2x00dev, USB_LED_CONTROL, 0x0000,
665 rt2x00dev->led_reg, REGISTER_TIMEOUT);
666}
667
668static void rt73usb_activity_led(struct rt2x00_dev *rt2x00dev, int rssi)
669{
670 u32 led;
671
672 if (rt2x00dev->led_mode != LED_MODE_SIGNAL_STRENGTH)
673 return;
674
675 /*
676 * Led handling requires a positive value for the rssi,
677 * to do that correctly we need to add the correction.
678 */
679 rssi += rt2x00dev->rssi_offset;
680
681 if (rssi <= 30)
682 led = 0;
683 else if (rssi <= 39)
684 led = 1;
685 else if (rssi <= 49)
686 led = 2;
687 else if (rssi <= 53)
688 led = 3;
689 else if (rssi <= 63)
690 led = 4;
691 else
692 led = 5;
693
694 rt2x00usb_vendor_request_sw(rt2x00dev, USB_LED_CONTROL, led,
695 rt2x00dev->led_reg, REGISTER_TIMEOUT);
696}
697
698/*
699 * Link tuning 705 * Link tuning
700 */ 706 */
701static void rt73usb_link_stats(struct rt2x00_dev *rt2x00dev, 707static void rt73usb_link_stats(struct rt2x00_dev *rt2x00dev,
@@ -729,17 +735,12 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev)
729 u8 up_bound; 735 u8 up_bound;
730 u8 low_bound; 736 u8 low_bound;
731 737
732 /*
733 * Update Led strength
734 */
735 rt73usb_activity_led(rt2x00dev, rssi);
736
737 rt73usb_bbp_read(rt2x00dev, 17, &r17); 738 rt73usb_bbp_read(rt2x00dev, 17, &r17);
738 739
739 /* 740 /*
740 * Determine r17 bounds. 741 * Determine r17 bounds.
741 */ 742 */
742 if (rt2x00dev->rx_status.phymode == MODE_IEEE80211A) { 743 if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) {
743 low_bound = 0x28; 744 low_bound = 0x28;
744 up_bound = 0x48; 745 up_bound = 0x48;
745 746
@@ -766,6 +767,13 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev)
766 } 767 }
767 768
768 /* 769 /*
770 * If we are not associated, we should go straight to the
771 * dynamic CCA tuning.
772 */
773 if (!rt2x00dev->intf_associated)
774 goto dynamic_cca_tune;
775
776 /*
769 * Special big-R17 for very short distance 777 * Special big-R17 for very short distance
770 */ 778 */
771 if (rssi > -35) { 779 if (rssi > -35) {
@@ -815,6 +823,8 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev)
815 return; 823 return;
816 } 824 }
817 825
826dynamic_cca_tune:
827
818 /* 828 /*
819 * r17 does not yet exceed upper limit, continue and base 829 * r17 does not yet exceed upper limit, continue and base
820 * the r17 tuning on the false CCA count. 830 * the r17 tuning on the false CCA count.
@@ -833,16 +843,30 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev)
833} 843}
834 844
835/* 845/*
836 * Firmware name function. 846 * Firmware functions
837 */ 847 */
838static char *rt73usb_get_firmware_name(struct rt2x00_dev *rt2x00dev) 848static char *rt73usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
839{ 849{
840 return FIRMWARE_RT2571; 850 return FIRMWARE_RT2571;
841} 851}
842 852
843/* 853static u16 rt73usb_get_firmware_crc(void *data, const size_t len)
844 * Initialization functions. 854{
845 */ 855 u16 crc;
856
857 /*
858 * Use the crc itu-t algorithm.
859 * The last 2 bytes in the firmware array are the crc checksum itself,
860 * this means that we should never pass those 2 bytes to the crc
861 * algorithm.
862 */
863 crc = crc_itu_t(0, data, len - 2);
864 crc = crc_itu_t_byte(crc, 0);
865 crc = crc_itu_t_byte(crc, 0);
866
867 return crc;
868}
869
846static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, void *data, 870static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, void *data,
847 const size_t len) 871 const size_t len)
848{ 872{
@@ -889,7 +913,7 @@ static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, void *data,
889 913
890 rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE, 914 rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE,
891 USB_VENDOR_REQUEST_OUT, 915 USB_VENDOR_REQUEST_OUT,
892 FIRMWARE_IMAGE_BASE + i, 0x0000, 916 FIRMWARE_IMAGE_BASE + i, 0,
893 cache, buflen, timeout); 917 cache, buflen, timeout);
894 918
895 ptr += buflen; 919 ptr += buflen;
@@ -902,18 +926,19 @@ static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, void *data,
902 * we need to specify a long timeout time. 926 * we need to specify a long timeout time.
903 */ 927 */
904 status = rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 928 status = rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE,
905 0x0000, USB_MODE_FIRMWARE, 929 0, USB_MODE_FIRMWARE,
906 REGISTER_TIMEOUT_FIRMWARE); 930 REGISTER_TIMEOUT_FIRMWARE);
907 if (status < 0) { 931 if (status < 0) {
908 ERROR(rt2x00dev, "Failed to write Firmware to device.\n"); 932 ERROR(rt2x00dev, "Failed to write Firmware to device.\n");
909 return status; 933 return status;
910 } 934 }
911 935
912 rt73usb_disable_led(rt2x00dev);
913
914 return 0; 936 return 0;
915} 937}
916 938
939/*
940 * Initialization functions.
941 */
917static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev) 942static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
918{ 943{
919 u32 reg; 944 u32 reg;
@@ -1021,6 +1046,17 @@ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
1021 rt73usb_register_write(rt2x00dev, MAC_CSR9, reg); 1046 rt73usb_register_write(rt2x00dev, MAC_CSR9, reg);
1022 1047
1023 /* 1048 /*
1049 * Clear all beacons
1050 * For the Beacon base registers we only need to clear
1051 * the first byte since that byte contains the VALID and OWNER
1052 * bits which (when set to 0) will invalidate the entire beacon.
1053 */
1054 rt73usb_register_write(rt2x00dev, HW_BEACON_BASE0, 0);
1055 rt73usb_register_write(rt2x00dev, HW_BEACON_BASE1, 0);
1056 rt73usb_register_write(rt2x00dev, HW_BEACON_BASE2, 0);
1057 rt73usb_register_write(rt2x00dev, HW_BEACON_BASE3, 0);
1058
1059 /*
1024 * We must clear the error counters. 1060 * We must clear the error counters.
1025 * These registers are cleared on read, 1061 * These registers are cleared on read,
1026 * so we may pass a useless variable to store the value. 1062 * so we may pass a useless variable to store the value.
@@ -1094,19 +1130,15 @@ continue_csr_init:
1094 rt73usb_bbp_write(rt2x00dev, 102, 0x16); 1130 rt73usb_bbp_write(rt2x00dev, 102, 0x16);
1095 rt73usb_bbp_write(rt2x00dev, 107, 0x04); 1131 rt73usb_bbp_write(rt2x00dev, 107, 0x04);
1096 1132
1097 DEBUG(rt2x00dev, "Start initialization from EEPROM...\n");
1098 for (i = 0; i < EEPROM_BBP_SIZE; i++) { 1133 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
1099 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); 1134 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
1100 1135
1101 if (eeprom != 0xffff && eeprom != 0x0000) { 1136 if (eeprom != 0xffff && eeprom != 0x0000) {
1102 reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); 1137 reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
1103 value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); 1138 value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE);
1104 DEBUG(rt2x00dev, "BBP: 0x%02x, value: 0x%02x.\n",
1105 reg_id, value);
1106 rt73usb_bbp_write(rt2x00dev, reg_id, value); 1139 rt73usb_bbp_write(rt2x00dev, reg_id, value);
1107 } 1140 }
1108 } 1141 }
1109 DEBUG(rt2x00dev, "...End initialization from EEPROM.\n");
1110 1142
1111 return 0; 1143 return 0;
1112} 1144}
@@ -1136,21 +1168,11 @@ static int rt73usb_enable_radio(struct rt2x00_dev *rt2x00dev)
1136 return -EIO; 1168 return -EIO;
1137 } 1169 }
1138 1170
1139 /*
1140 * Enable LED
1141 */
1142 rt73usb_enable_led(rt2x00dev);
1143
1144 return 0; 1171 return 0;
1145} 1172}
1146 1173
1147static void rt73usb_disable_radio(struct rt2x00_dev *rt2x00dev) 1174static void rt73usb_disable_radio(struct rt2x00_dev *rt2x00dev)
1148{ 1175{
1149 /*
1150 * Disable LED
1151 */
1152 rt73usb_disable_led(rt2x00dev);
1153
1154 rt73usb_register_write(rt2x00dev, MAC_CSR10, 0x00001818); 1176 rt73usb_register_write(rt2x00dev, MAC_CSR10, 0x00001818);
1155 1177
1156 /* 1178 /*
@@ -1234,10 +1256,10 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1234 */ 1256 */
1235static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev, 1257static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1236 struct sk_buff *skb, 1258 struct sk_buff *skb,
1237 struct txdata_entry_desc *desc, 1259 struct txentry_desc *txdesc,
1238 struct ieee80211_tx_control *control) 1260 struct ieee80211_tx_control *control)
1239{ 1261{
1240 struct skb_desc *skbdesc = get_skb_desc(skb); 1262 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1241 __le32 *txd = skbdesc->desc; 1263 __le32 *txd = skbdesc->desc;
1242 u32 word; 1264 u32 word;
1243 1265
@@ -1245,47 +1267,47 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1245 * Start writing the descriptor words. 1267 * Start writing the descriptor words.
1246 */ 1268 */
1247 rt2x00_desc_read(txd, 1, &word); 1269 rt2x00_desc_read(txd, 1, &word);
1248 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, desc->queue); 1270 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->queue);
1249 rt2x00_set_field32(&word, TXD_W1_AIFSN, desc->aifs); 1271 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
1250 rt2x00_set_field32(&word, TXD_W1_CWMIN, desc->cw_min); 1272 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min);
1251 rt2x00_set_field32(&word, TXD_W1_CWMAX, desc->cw_max); 1273 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max);
1252 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER); 1274 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, IEEE80211_HEADER);
1253 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1); 1275 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1);
1254 rt2x00_desc_write(txd, 1, word); 1276 rt2x00_desc_write(txd, 1, word);
1255 1277
1256 rt2x00_desc_read(txd, 2, &word); 1278 rt2x00_desc_read(txd, 2, &word);
1257 rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, desc->signal); 1279 rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->signal);
1258 rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, desc->service); 1280 rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->service);
1259 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, desc->length_low); 1281 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->length_low);
1260 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, desc->length_high); 1282 rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->length_high);
1261 rt2x00_desc_write(txd, 2, word); 1283 rt2x00_desc_write(txd, 2, word);
1262 1284
1263 rt2x00_desc_read(txd, 5, &word); 1285 rt2x00_desc_read(txd, 5, &word);
1264 rt2x00_set_field32(&word, TXD_W5_TX_POWER, 1286 rt2x00_set_field32(&word, TXD_W5_TX_POWER,
1265 TXPOWER_TO_DEV(control->power_level)); 1287 TXPOWER_TO_DEV(rt2x00dev->tx_power));
1266 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); 1288 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
1267 rt2x00_desc_write(txd, 5, word); 1289 rt2x00_desc_write(txd, 5, word);
1268 1290
1269 rt2x00_desc_read(txd, 0, &word); 1291 rt2x00_desc_read(txd, 0, &word);
1270 rt2x00_set_field32(&word, TXD_W0_BURST, 1292 rt2x00_set_field32(&word, TXD_W0_BURST,
1271 test_bit(ENTRY_TXD_BURST, &desc->flags)); 1293 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
1272 rt2x00_set_field32(&word, TXD_W0_VALID, 1); 1294 rt2x00_set_field32(&word, TXD_W0_VALID, 1);
1273 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, 1295 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
1274 test_bit(ENTRY_TXD_MORE_FRAG, &desc->flags)); 1296 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
1275 rt2x00_set_field32(&word, TXD_W0_ACK, 1297 rt2x00_set_field32(&word, TXD_W0_ACK,
1276 test_bit(ENTRY_TXD_ACK, &desc->flags)); 1298 test_bit(ENTRY_TXD_ACK, &txdesc->flags));
1277 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, 1299 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
1278 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &desc->flags)); 1300 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
1279 rt2x00_set_field32(&word, TXD_W0_OFDM, 1301 rt2x00_set_field32(&word, TXD_W0_OFDM,
1280 test_bit(ENTRY_TXD_OFDM_RATE, &desc->flags)); 1302 test_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags));
1281 rt2x00_set_field32(&word, TXD_W0_IFS, desc->ifs); 1303 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1282 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1304 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1283 !!(control->flags & 1305 !!(control->flags &
1284 IEEE80211_TXCTL_LONG_RETRY_LIMIT)); 1306 IEEE80211_TXCTL_LONG_RETRY_LIMIT));
1285 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0); 1307 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, 0);
1286 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len); 1308 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skbdesc->data_len);
1287 rt2x00_set_field32(&word, TXD_W0_BURST2, 1309 rt2x00_set_field32(&word, TXD_W0_BURST2,
1288 test_bit(ENTRY_TXD_BURST, &desc->flags)); 1310 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
1289 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); 1311 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE);
1290 rt2x00_desc_write(txd, 0, word); 1312 rt2x00_desc_write(txd, 0, word);
1291} 1313}
@@ -1309,11 +1331,11 @@ static int rt73usb_get_tx_data_len(struct rt2x00_dev *rt2x00dev,
1309 * TX data initialization 1331 * TX data initialization
1310 */ 1332 */
1311static void rt73usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1333static void rt73usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1312 unsigned int queue) 1334 const unsigned int queue)
1313{ 1335{
1314 u32 reg; 1336 u32 reg;
1315 1337
1316 if (queue != IEEE80211_TX_QUEUE_BEACON) 1338 if (queue != RT2X00_BCN_QUEUE_BEACON)
1317 return; 1339 return;
1318 1340
1319 /* 1341 /*
@@ -1324,6 +1346,8 @@ static void rt73usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1324 1346
1325 rt73usb_register_read(rt2x00dev, TXRX_CSR9, &reg); 1347 rt73usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
1326 if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) { 1348 if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) {
1349 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
1350 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
1327 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); 1351 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
1328 rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg); 1352 rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1329 } 1353 }
@@ -1353,7 +1377,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1353 return 0; 1377 return 0;
1354 } 1378 }
1355 1379
1356 if (rt2x00dev->rx_status.phymode == MODE_IEEE80211A) { 1380 if (rt2x00dev->rx_status.band == IEEE80211_BAND_5GHZ) {
1357 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) { 1381 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags)) {
1358 if (lna == 3 || lna == 2) 1382 if (lna == 3 || lna == 2)
1359 offset += 10; 1383 offset += 10;
@@ -1377,37 +1401,62 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
1377 return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset; 1401 return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset;
1378} 1402}
1379 1403
1380static void rt73usb_fill_rxdone(struct data_entry *entry, 1404static void rt73usb_fill_rxdone(struct queue_entry *entry,
1381 struct rxdata_entry_desc *desc) 1405 struct rxdone_entry_desc *rxdesc)
1382{ 1406{
1383 struct skb_desc *skbdesc = get_skb_desc(entry->skb); 1407 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1384 __le32 *rxd = (__le32 *)entry->skb->data; 1408 __le32 *rxd = (__le32 *)entry->skb->data;
1409 unsigned int offset = entry->queue->desc_size + 2;
1385 u32 word0; 1410 u32 word0;
1386 u32 word1; 1411 u32 word1;
1387 1412
1413 /*
1414 * Copy descriptor to the available headroom inside the skbuffer.
1415 */
1416 skb_push(entry->skb, offset);
1417 memcpy(entry->skb->data, rxd, entry->queue->desc_size);
1418 rxd = (__le32 *)entry->skb->data;
1419
1420 /*
1421 * The descriptor is now aligned to 4 bytes and thus it is
1422 * now safe to read it on all architectures.
1423 */
1388 rt2x00_desc_read(rxd, 0, &word0); 1424 rt2x00_desc_read(rxd, 0, &word0);
1389 rt2x00_desc_read(rxd, 1, &word1); 1425 rt2x00_desc_read(rxd, 1, &word1);
1390 1426
1391 desc->flags = 0; 1427 rxdesc->flags = 0;
1392 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1428 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1393 desc->flags |= RX_FLAG_FAILED_FCS_CRC; 1429 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1394 1430
1395 /* 1431 /*
1396 * Obtain the status about this packet. 1432 * Obtain the status about this packet.
1433 * When frame was received with an OFDM bitrate,
1434 * the signal is the PLCP value. If it was received with
1435 * a CCK bitrate the signal is the rate in 100kbit/s.
1397 */ 1436 */
1398 desc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); 1437 rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL);
1399 desc->rssi = rt73usb_agc_to_rssi(entry->ring->rt2x00dev, word1); 1438 rxdesc->rssi = rt73usb_agc_to_rssi(entry->queue->rt2x00dev, word1);
1400 desc->ofdm = rt2x00_get_field32(word0, RXD_W0_OFDM); 1439 rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT);
1401 desc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); 1440
1402 desc->my_bss = !!rt2x00_get_field32(word0, RXD_W0_MY_BSS); 1441 rxdesc->dev_flags = 0;
1442 if (rt2x00_get_field32(word0, RXD_W0_OFDM))
1443 rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP;
1444 if (rt2x00_get_field32(word0, RXD_W0_MY_BSS))
1445 rxdesc->dev_flags |= RXDONE_MY_BSS;
1446
1447 /*
1448 * Adjust the skb memory window to the frame boundaries.
1449 */
1450 skb_pull(entry->skb, offset + entry->queue->desc_size);
1451 skb_trim(entry->skb, rxdesc->size);
1403 1452
1404 /* 1453 /*
1405 * Set descriptor and data pointer. 1454 * Set descriptor and data pointer.
1406 */ 1455 */
1407 skbdesc->desc = entry->skb->data; 1456 skbdesc->data = entry->skb->data;
1408 skbdesc->desc_len = entry->ring->desc_size; 1457 skbdesc->data_len = rxdesc->size;
1409 skbdesc->data = entry->skb->data + entry->ring->desc_size; 1458 skbdesc->desc = rxd;
1410 skbdesc->data_len = desc->size; 1459 skbdesc->desc_len = entry->queue->desc_size;
1411} 1460}
1412 1461
1413/* 1462/*
@@ -1499,7 +1548,7 @@ static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1499 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); 1548 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0);
1500 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); 1549 rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0);
1501 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); 1550 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word);
1502 EEPROM(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word); 1551 EEPROM(rt2x00dev, "RSSI OFFSET A: 0x%04x\n", word);
1503 } else { 1552 } else {
1504 value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1); 1553 value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1);
1505 if (value < -10 || value > 10) 1554 if (value < -10 || value > 10)
@@ -1577,33 +1626,60 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
1577 /* 1626 /*
1578 * Store led settings, for correct led behaviour. 1627 * Store led settings, for correct led behaviour.
1579 */ 1628 */
1629#ifdef CONFIG_RT73USB_LEDS
1580 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom); 1630 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED, &eeprom);
1581 1631
1582 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_LED_MODE, 1632 rt2x00dev->led_radio.rt2x00dev = rt2x00dev;
1583 rt2x00dev->led_mode); 1633 rt2x00dev->led_radio.type = LED_TYPE_RADIO;
1584 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_0, 1634 rt2x00dev->led_radio.led_dev.brightness_set =
1635 rt73usb_brightness_set;
1636 rt2x00dev->led_radio.led_dev.blink_set =
1637 rt73usb_blink_set;
1638 rt2x00dev->led_radio.flags = LED_INITIALIZED;
1639
1640 rt2x00dev->led_assoc.rt2x00dev = rt2x00dev;
1641 rt2x00dev->led_assoc.type = LED_TYPE_ASSOC;
1642 rt2x00dev->led_assoc.led_dev.brightness_set =
1643 rt73usb_brightness_set;
1644 rt2x00dev->led_assoc.led_dev.blink_set =
1645 rt73usb_blink_set;
1646 rt2x00dev->led_assoc.flags = LED_INITIALIZED;
1647
1648 if (value == LED_MODE_SIGNAL_STRENGTH) {
1649 rt2x00dev->led_qual.rt2x00dev = rt2x00dev;
1650 rt2x00dev->led_radio.type = LED_TYPE_QUALITY;
1651 rt2x00dev->led_qual.led_dev.brightness_set =
1652 rt73usb_brightness_set;
1653 rt2x00dev->led_qual.led_dev.blink_set =
1654 rt73usb_blink_set;
1655 rt2x00dev->led_qual.flags = LED_INITIALIZED;
1656 }
1657
1658 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_LED_MODE, value);
1659 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_0,
1585 rt2x00_get_field16(eeprom, 1660 rt2x00_get_field16(eeprom,
1586 EEPROM_LED_POLARITY_GPIO_0)); 1661 EEPROM_LED_POLARITY_GPIO_0));
1587 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_1, 1662 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_1,
1588 rt2x00_get_field16(eeprom, 1663 rt2x00_get_field16(eeprom,
1589 EEPROM_LED_POLARITY_GPIO_1)); 1664 EEPROM_LED_POLARITY_GPIO_1));
1590 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_2, 1665 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_2,
1591 rt2x00_get_field16(eeprom, 1666 rt2x00_get_field16(eeprom,
1592 EEPROM_LED_POLARITY_GPIO_2)); 1667 EEPROM_LED_POLARITY_GPIO_2));
1593 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_3, 1668 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_3,
1594 rt2x00_get_field16(eeprom, 1669 rt2x00_get_field16(eeprom,
1595 EEPROM_LED_POLARITY_GPIO_3)); 1670 EEPROM_LED_POLARITY_GPIO_3));
1596 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_GPIO_4, 1671 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_4,
1597 rt2x00_get_field16(eeprom, 1672 rt2x00_get_field16(eeprom,
1598 EEPROM_LED_POLARITY_GPIO_4)); 1673 EEPROM_LED_POLARITY_GPIO_4));
1599 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_ACT, 1674 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_ACT,
1600 rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_ACT)); 1675 rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_ACT));
1601 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_READY_BG, 1676 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_BG,
1602 rt2x00_get_field16(eeprom, 1677 rt2x00_get_field16(eeprom,
1603 EEPROM_LED_POLARITY_RDY_G)); 1678 EEPROM_LED_POLARITY_RDY_G));
1604 rt2x00_set_field16(&rt2x00dev->led_reg, MCU_LEDCS_POLARITY_READY_A, 1679 rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A,
1605 rt2x00_get_field16(eeprom, 1680 rt2x00_get_field16(eeprom,
1606 EEPROM_LED_POLARITY_RDY_A)); 1681 EEPROM_LED_POLARITY_RDY_A));
1682#endif /* CONFIG_RT73USB_LEDS */
1607 1683
1608 return 0; 1684 return 0;
1609} 1685}
@@ -1759,7 +1835,7 @@ static void rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1759 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE; 1835 rt2x00dev->hw->extra_tx_headroom = TXD_DESC_SIZE;
1760 rt2x00dev->hw->max_signal = MAX_SIGNAL; 1836 rt2x00dev->hw->max_signal = MAX_SIGNAL;
1761 rt2x00dev->hw->max_rssi = MAX_RX_SSI; 1837 rt2x00dev->hw->max_rssi = MAX_RX_SSI;
1762 rt2x00dev->hw->queues = 5; 1838 rt2x00dev->hw->queues = 4;
1763 1839
1764 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_usb(rt2x00dev)->dev); 1840 SET_IEEE80211_DEV(rt2x00dev->hw, &rt2x00dev_usb(rt2x00dev)->dev);
1765 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, 1841 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
@@ -1776,8 +1852,8 @@ static void rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1776 /* 1852 /*
1777 * Initialize hw_mode information. 1853 * Initialize hw_mode information.
1778 */ 1854 */
1779 spec->num_modes = 2; 1855 spec->supported_bands = SUPPORT_BAND_2GHZ;
1780 spec->num_rates = 12; 1856 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
1781 spec->tx_power_a = NULL; 1857 spec->tx_power_a = NULL;
1782 spec->tx_power_bg = txpower; 1858 spec->tx_power_bg = txpower;
1783 spec->tx_power_default = DEFAULT_TXPOWER; 1859 spec->tx_power_default = DEFAULT_TXPOWER;
@@ -1786,20 +1862,20 @@ static void rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1786 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528); 1862 spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528);
1787 spec->channels = rf_vals_bg_2528; 1863 spec->channels = rf_vals_bg_2528;
1788 } else if (rt2x00_rf(&rt2x00dev->chip, RF5226)) { 1864 } else if (rt2x00_rf(&rt2x00dev->chip, RF5226)) {
1865 spec->supported_bands |= SUPPORT_BAND_5GHZ;
1789 spec->num_channels = ARRAY_SIZE(rf_vals_5226); 1866 spec->num_channels = ARRAY_SIZE(rf_vals_5226);
1790 spec->channels = rf_vals_5226; 1867 spec->channels = rf_vals_5226;
1791 } else if (rt2x00_rf(&rt2x00dev->chip, RF2527)) { 1868 } else if (rt2x00_rf(&rt2x00dev->chip, RF2527)) {
1792 spec->num_channels = 14; 1869 spec->num_channels = 14;
1793 spec->channels = rf_vals_5225_2527; 1870 spec->channels = rf_vals_5225_2527;
1794 } else if (rt2x00_rf(&rt2x00dev->chip, RF5225)) { 1871 } else if (rt2x00_rf(&rt2x00dev->chip, RF5225)) {
1872 spec->supported_bands |= SUPPORT_BAND_5GHZ;
1795 spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527); 1873 spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527);
1796 spec->channels = rf_vals_5225_2527; 1874 spec->channels = rf_vals_5225_2527;
1797 } 1875 }
1798 1876
1799 if (rt2x00_rf(&rt2x00dev->chip, RF5225) || 1877 if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
1800 rt2x00_rf(&rt2x00dev->chip, RF5226)) { 1878 rt2x00_rf(&rt2x00dev->chip, RF5226)) {
1801 spec->num_modes = 3;
1802
1803 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); 1879 txpower = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START);
1804 for (i = 0; i < 14; i++) 1880 for (i = 0; i < 14; i++)
1805 txpower[i] = TXPOWER_FROM_DEV(txpower[i]); 1881 txpower[i] = TXPOWER_FROM_DEV(txpower[i]);
@@ -1829,9 +1905,10 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1829 rt73usb_probe_hw_mode(rt2x00dev); 1905 rt73usb_probe_hw_mode(rt2x00dev);
1830 1906
1831 /* 1907 /*
1832 * This device requires firmware 1908 * This device requires firmware.
1833 */ 1909 */
1834 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags); 1910 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
1911 __set_bit(DRIVER_REQUIRE_SCHEDULED, &rt2x00dev->flags);
1835 1912
1836 /* 1913 /*
1837 * Set the rssi offset. 1914 * Set the rssi offset.
@@ -1844,79 +1921,6 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev)
1844/* 1921/*
1845 * IEEE80211 stack callback functions. 1922 * IEEE80211 stack callback functions.
1846 */ 1923 */
1847static void rt73usb_configure_filter(struct ieee80211_hw *hw,
1848 unsigned int changed_flags,
1849 unsigned int *total_flags,
1850 int mc_count,
1851 struct dev_addr_list *mc_list)
1852{
1853 struct rt2x00_dev *rt2x00dev = hw->priv;
1854 u32 reg;
1855
1856 /*
1857 * Mask off any flags we are going to ignore from
1858 * the total_flags field.
1859 */
1860 *total_flags &=
1861 FIF_ALLMULTI |
1862 FIF_FCSFAIL |
1863 FIF_PLCPFAIL |
1864 FIF_CONTROL |
1865 FIF_OTHER_BSS |
1866 FIF_PROMISC_IN_BSS;
1867
1868 /*
1869 * Apply some rules to the filters:
1870 * - Some filters imply different filters to be set.
1871 * - Some things we can't filter out at all.
1872 * - Multicast filter seems to kill broadcast traffic so never use it.
1873 */
1874 *total_flags |= FIF_ALLMULTI;
1875 if (*total_flags & FIF_OTHER_BSS ||
1876 *total_flags & FIF_PROMISC_IN_BSS)
1877 *total_flags |= FIF_PROMISC_IN_BSS | FIF_OTHER_BSS;
1878
1879 /*
1880 * Check if there is any work left for us.
1881 */
1882 if (rt2x00dev->packet_filter == *total_flags)
1883 return;
1884 rt2x00dev->packet_filter = *total_flags;
1885
1886 /*
1887 * When in atomic context, reschedule and let rt2x00lib
1888 * call this function again.
1889 */
1890 if (in_atomic()) {
1891 queue_work(rt2x00dev->hw->workqueue, &rt2x00dev->filter_work);
1892 return;
1893 }
1894
1895 /*
1896 * Start configuration steps.
1897 * Note that the version error will always be dropped
1898 * and broadcast frames will always be accepted since
1899 * there is no filter for it at this time.
1900 */
1901 rt73usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
1902 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CRC,
1903 !(*total_flags & FIF_FCSFAIL));
1904 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_PHYSICAL,
1905 !(*total_flags & FIF_PLCPFAIL));
1906 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL,
1907 !(*total_flags & FIF_CONTROL));
1908 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME,
1909 !(*total_flags & FIF_PROMISC_IN_BSS));
1910 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS,
1911 !(*total_flags & FIF_PROMISC_IN_BSS));
1912 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1);
1913 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST,
1914 !(*total_flags & FIF_ALLMULTI));
1915 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_BROADCAST, 0);
1916 rt2x00_set_field32(&reg, TXRX_CSR0_DROP_ACK_CTS, 1);
1917 rt73usb_register_write(rt2x00dev, TXRX_CSR0, reg);
1918}
1919
1920static int rt73usb_set_retry_limit(struct ieee80211_hw *hw, 1924static int rt73usb_set_retry_limit(struct ieee80211_hw *hw,
1921 u32 short_retry, u32 long_retry) 1925 u32 short_retry, u32 long_retry)
1922{ 1926{
@@ -1955,61 +1959,65 @@ static u64 rt73usb_get_tsf(struct ieee80211_hw *hw)
1955#define rt73usb_get_tsf NULL 1959#define rt73usb_get_tsf NULL
1956#endif 1960#endif
1957 1961
1958static void rt73usb_reset_tsf(struct ieee80211_hw *hw)
1959{
1960 struct rt2x00_dev *rt2x00dev = hw->priv;
1961
1962 rt73usb_register_write(rt2x00dev, TXRX_CSR12, 0);
1963 rt73usb_register_write(rt2x00dev, TXRX_CSR13, 0);
1964}
1965
1966static int rt73usb_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb, 1962static int rt73usb_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb,
1967 struct ieee80211_tx_control *control) 1963 struct ieee80211_tx_control *control)
1968{ 1964{
1969 struct rt2x00_dev *rt2x00dev = hw->priv; 1965 struct rt2x00_dev *rt2x00dev = hw->priv;
1970 struct skb_desc *desc; 1966 struct rt2x00_intf *intf = vif_to_intf(control->vif);
1971 struct data_ring *ring; 1967 struct skb_frame_desc *skbdesc;
1972 struct data_entry *entry; 1968 unsigned int beacon_base;
1973 int timeout; 1969 unsigned int timeout;
1970 u32 reg;
1974 1971
1975 /* 1972 if (unlikely(!intf->beacon))
1976 * Just in case the ieee80211 doesn't set this, 1973 return -ENOBUFS;
1977 * but we need this queue set for the descriptor
1978 * initialization.
1979 */
1980 control->queue = IEEE80211_TX_QUEUE_BEACON;
1981 ring = rt2x00lib_get_ring(rt2x00dev, control->queue);
1982 entry = rt2x00_get_data_entry(ring);
1983 1974
1984 /* 1975 /*
1985 * Add the descriptor in front of the skb. 1976 * Add the descriptor in front of the skb.
1986 */ 1977 */
1987 skb_push(skb, ring->desc_size); 1978 skb_push(skb, intf->beacon->queue->desc_size);
1988 memset(skb->data, 0, ring->desc_size); 1979 memset(skb->data, 0, intf->beacon->queue->desc_size);
1989 1980
1990 /* 1981 /*
1991 * Fill in skb descriptor 1982 * Fill in skb descriptor
1992 */ 1983 */
1993 desc = get_skb_desc(skb); 1984 skbdesc = get_skb_frame_desc(skb);
1994 desc->desc_len = ring->desc_size; 1985 memset(skbdesc, 0, sizeof(*skbdesc));
1995 desc->data_len = skb->len - ring->desc_size; 1986 skbdesc->flags |= FRAME_DESC_DRIVER_GENERATED;
1996 desc->desc = skb->data; 1987 skbdesc->data = skb->data + intf->beacon->queue->desc_size;
1997 desc->data = skb->data + ring->desc_size; 1988 skbdesc->data_len = skb->len - intf->beacon->queue->desc_size;
1998 desc->ring = ring; 1989 skbdesc->desc = skb->data;
1999 desc->entry = entry; 1990 skbdesc->desc_len = intf->beacon->queue->desc_size;
1991 skbdesc->entry = intf->beacon;
2000 1992
1993 /*
1994 * Disable beaconing while we are reloading the beacon data,
1995 * otherwise we might be sending out invalid data.
1996 */
1997 rt73usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
1998 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0);
1999 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
2000 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
2001 rt73usb_register_write(rt2x00dev, TXRX_CSR9, reg);
2002
2003 /*
2004 * mac80211 doesn't provide the control->queue variable
2005 * for beacons. Set our own queue identification so
2006 * it can be used during descriptor initialization.
2007 */
2008 control->queue = RT2X00_BCN_QUEUE_BEACON;
2001 rt2x00lib_write_tx_desc(rt2x00dev, skb, control); 2009 rt2x00lib_write_tx_desc(rt2x00dev, skb, control);
2002 2010
2003 /* 2011 /*
2004 * Write entire beacon with descriptor to register, 2012 * Write entire beacon with descriptor to register,
2005 * and kick the beacon generator. 2013 * and kick the beacon generator.
2006 */ 2014 */
2015 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
2007 timeout = REGISTER_TIMEOUT * (skb->len / sizeof(u32)); 2016 timeout = REGISTER_TIMEOUT * (skb->len / sizeof(u32));
2008 rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE, 2017 rt2x00usb_vendor_request(rt2x00dev, USB_MULTI_WRITE,
2009 USB_VENDOR_REQUEST_OUT, 2018 USB_VENDOR_REQUEST_OUT, beacon_base, 0,
2010 HW_BEACON_BASE0, 0x0000,
2011 skb->data, skb->len, timeout); 2019 skb->data, skb->len, timeout);
2012 rt73usb_kick_tx_queue(rt2x00dev, IEEE80211_TX_QUEUE_BEACON); 2020 rt73usb_kick_tx_queue(rt2x00dev, control->queue);
2013 2021
2014 return 0; 2022 return 0;
2015} 2023}
@@ -2022,20 +2030,20 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
2022 .remove_interface = rt2x00mac_remove_interface, 2030 .remove_interface = rt2x00mac_remove_interface,
2023 .config = rt2x00mac_config, 2031 .config = rt2x00mac_config,
2024 .config_interface = rt2x00mac_config_interface, 2032 .config_interface = rt2x00mac_config_interface,
2025 .configure_filter = rt73usb_configure_filter, 2033 .configure_filter = rt2x00mac_configure_filter,
2026 .get_stats = rt2x00mac_get_stats, 2034 .get_stats = rt2x00mac_get_stats,
2027 .set_retry_limit = rt73usb_set_retry_limit, 2035 .set_retry_limit = rt73usb_set_retry_limit,
2028 .bss_info_changed = rt2x00mac_bss_info_changed, 2036 .bss_info_changed = rt2x00mac_bss_info_changed,
2029 .conf_tx = rt2x00mac_conf_tx, 2037 .conf_tx = rt2x00mac_conf_tx,
2030 .get_tx_stats = rt2x00mac_get_tx_stats, 2038 .get_tx_stats = rt2x00mac_get_tx_stats,
2031 .get_tsf = rt73usb_get_tsf, 2039 .get_tsf = rt73usb_get_tsf,
2032 .reset_tsf = rt73usb_reset_tsf,
2033 .beacon_update = rt73usb_beacon_update, 2040 .beacon_update = rt73usb_beacon_update,
2034}; 2041};
2035 2042
2036static const struct rt2x00lib_ops rt73usb_rt2x00_ops = { 2043static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2037 .probe_hw = rt73usb_probe_hw, 2044 .probe_hw = rt73usb_probe_hw,
2038 .get_firmware_name = rt73usb_get_firmware_name, 2045 .get_firmware_name = rt73usb_get_firmware_name,
2046 .get_firmware_crc = rt73usb_get_firmware_crc,
2039 .load_firmware = rt73usb_load_firmware, 2047 .load_firmware = rt73usb_load_firmware,
2040 .initialize = rt2x00usb_initialize, 2048 .initialize = rt2x00usb_initialize,
2041 .uninitialize = rt2x00usb_uninitialize, 2049 .uninitialize = rt2x00usb_uninitialize,
@@ -2050,19 +2058,42 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2050 .get_tx_data_len = rt73usb_get_tx_data_len, 2058 .get_tx_data_len = rt73usb_get_tx_data_len,
2051 .kick_tx_queue = rt73usb_kick_tx_queue, 2059 .kick_tx_queue = rt73usb_kick_tx_queue,
2052 .fill_rxdone = rt73usb_fill_rxdone, 2060 .fill_rxdone = rt73usb_fill_rxdone,
2053 .config_mac_addr = rt73usb_config_mac_addr, 2061 .config_filter = rt73usb_config_filter,
2054 .config_bssid = rt73usb_config_bssid, 2062 .config_intf = rt73usb_config_intf,
2055 .config_type = rt73usb_config_type, 2063 .config_erp = rt73usb_config_erp,
2056 .config_preamble = rt73usb_config_preamble,
2057 .config = rt73usb_config, 2064 .config = rt73usb_config,
2058}; 2065};
2059 2066
2067static const struct data_queue_desc rt73usb_queue_rx = {
2068 .entry_num = RX_ENTRIES,
2069 .data_size = DATA_FRAME_SIZE,
2070 .desc_size = RXD_DESC_SIZE,
2071 .priv_size = sizeof(struct queue_entry_priv_usb_rx),
2072};
2073
2074static const struct data_queue_desc rt73usb_queue_tx = {
2075 .entry_num = TX_ENTRIES,
2076 .data_size = DATA_FRAME_SIZE,
2077 .desc_size = TXD_DESC_SIZE,
2078 .priv_size = sizeof(struct queue_entry_priv_usb_tx),
2079};
2080
2081static const struct data_queue_desc rt73usb_queue_bcn = {
2082 .entry_num = 4 * BEACON_ENTRIES,
2083 .data_size = MGMT_FRAME_SIZE,
2084 .desc_size = TXINFO_SIZE,
2085 .priv_size = sizeof(struct queue_entry_priv_usb_tx),
2086};
2087
2060static const struct rt2x00_ops rt73usb_ops = { 2088static const struct rt2x00_ops rt73usb_ops = {
2061 .name = KBUILD_MODNAME, 2089 .name = KBUILD_MODNAME,
2062 .rxd_size = RXD_DESC_SIZE, 2090 .max_sta_intf = 1,
2063 .txd_size = TXD_DESC_SIZE, 2091 .max_ap_intf = 4,
2064 .eeprom_size = EEPROM_SIZE, 2092 .eeprom_size = EEPROM_SIZE,
2065 .rf_size = RF_SIZE, 2093 .rf_size = RF_SIZE,
2094 .rx = &rt73usb_queue_rx,
2095 .tx = &rt73usb_queue_tx,
2096 .bcn = &rt73usb_queue_bcn,
2066 .lib = &rt73usb_rt2x00_ops, 2097 .lib = &rt73usb_rt2x00_ops,
2067 .hw = &rt73usb_mac80211_ops, 2098 .hw = &rt73usb_mac80211_ops,
2068#ifdef CONFIG_RT2X00_LIB_DEBUGFS 2099#ifdef CONFIG_RT2X00_LIB_DEBUGFS
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index d49dcaacecee..06d687425fef 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -1,5 +1,5 @@
1/* 1/*
2 Copyright (C) 2004 - 2007 rt2x00 SourceForge Project 2 Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com> 3 <http://rt2x00.serialmonkey.com>
4 4
5 This program is free software; you can redistribute it and/or modify 5 This program is free software; you can redistribute it and/or modify
@@ -114,6 +114,9 @@ struct hw_pairwise_ta_entry {
114#define HW_BEACON_BASE2 0x2600 114#define HW_BEACON_BASE2 0x2600
115#define HW_BEACON_BASE3 0x2700 115#define HW_BEACON_BASE3 0x2700
116 116
117#define HW_BEACON_OFFSET(__index) \
118 ( HW_BEACON_BASE0 + (__index * 0x0100) )
119
117/* 120/*
118 * MAC Control/Status Registers(CSR). 121 * MAC Control/Status Registers(CSR).
119 * Some values are set in TU, whereas 1 TU == 1024 us. 122 * Some values are set in TU, whereas 1 TU == 1024 us.
@@ -146,6 +149,11 @@ struct hw_pairwise_ta_entry {
146 149
147/* 150/*
148 * MAC_CSR3: STA MAC register 1. 151 * MAC_CSR3: STA MAC register 1.
152 * UNICAST_TO_ME_MASK:
153 * Used to mask off bits from byte 5 of the MAC address
154 * to determine the UNICAST_TO_ME bit for RX frames.
155 * The full mask is complemented by BSS_ID_MASK:
156 * MASK = BSS_ID_MASK & UNICAST_TO_ME_MASK
149 */ 157 */
150#define MAC_CSR3 0x300c 158#define MAC_CSR3 0x300c
151#define MAC_CSR3_BYTE4 FIELD32(0x000000ff) 159#define MAC_CSR3_BYTE4 FIELD32(0x000000ff)
@@ -163,7 +171,14 @@ struct hw_pairwise_ta_entry {
163 171
164/* 172/*
165 * MAC_CSR5: BSSID register 1. 173 * MAC_CSR5: BSSID register 1.
166 * BSS_ID_MASK: 3: one BSSID, 0: 4 BSSID, 2 or 1: 2 BSSID. 174 * BSS_ID_MASK:
175 * This mask is used to mask off bits 0 and 1 of byte 5 of the
176 * BSSID. This will make sure that those bits will be ignored
177 * when determining the MY_BSS of RX frames.
178 * 0: 1-BSSID mode (BSS index = 0)
179 * 1: 2-BSSID mode (BSS index: Byte5, bit 0)
180 * 2: 2-BSSID mode (BSS index: byte5, bit 1)
181 * 3: 4-BSSID mode (BSS index: byte5, bit 0 - 1)
167 */ 182 */
168#define MAC_CSR5 0x3014 183#define MAC_CSR5 0x3014
169#define MAC_CSR5_BYTE4 FIELD32(0x000000ff) 184#define MAC_CSR5_BYTE4 FIELD32(0x000000ff)
@@ -867,6 +882,7 @@ struct hw_pairwise_ta_entry {
867 * DMA descriptor defines. 882 * DMA descriptor defines.
868 */ 883 */
869#define TXD_DESC_SIZE ( 6 * sizeof(__le32) ) 884#define TXD_DESC_SIZE ( 6 * sizeof(__le32) )
885#define TXINFO_SIZE ( 6 * sizeof(__le32) )
870#define RXD_DESC_SIZE ( 6 * sizeof(__le32) ) 886#define RXD_DESC_SIZE ( 6 * sizeof(__le32) )
871 887
872/* 888/*
@@ -1007,8 +1023,8 @@ struct hw_pairwise_ta_entry {
1007#define RXD_W5_RESERVED FIELD32(0xffffffff) 1023#define RXD_W5_RESERVED FIELD32(0xffffffff)
1008 1024
1009/* 1025/*
1010 * Macro's for converting txpower from EEPROM to dscape value 1026 * Macro's for converting txpower from EEPROM to mac80211 value
1011 * and from dscape value to register value. 1027 * and from mac80211 value to register value.
1012 */ 1028 */
1013#define MIN_TXPOWER 0 1029#define MIN_TXPOWER 0
1014#define MAX_TXPOWER 31 1030#define MAX_TXPOWER 31
diff --git a/drivers/net/wireless/rtl8180.h b/drivers/net/wireless/rtl8180.h
index 2cbfe3c8081f..082a11f93beb 100644
--- a/drivers/net/wireless/rtl8180.h
+++ b/drivers/net/wireless/rtl8180.h
@@ -102,7 +102,7 @@ struct rtl8180_priv {
102 struct rtl8180_tx_ring tx_ring[4]; 102 struct rtl8180_tx_ring tx_ring[4];
103 struct ieee80211_channel channels[14]; 103 struct ieee80211_channel channels[14];
104 struct ieee80211_rate rates[12]; 104 struct ieee80211_rate rates[12];
105 struct ieee80211_hw_mode modes[2]; 105 struct ieee80211_supported_band band;
106 struct pci_dev *pdev; 106 struct pci_dev *pdev;
107 u32 rx_conf; 107 u32 rx_conf;
108 108
diff --git a/drivers/net/wireless/rtl8180_dev.c b/drivers/net/wireless/rtl8180_dev.c
index 5e9a8ace0d81..c181f23e930d 100644
--- a/drivers/net/wireless/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl8180_dev.c
@@ -49,6 +49,41 @@ static struct pci_device_id rtl8180_table[] __devinitdata = {
49 49
50MODULE_DEVICE_TABLE(pci, rtl8180_table); 50MODULE_DEVICE_TABLE(pci, rtl8180_table);
51 51
52static const struct ieee80211_rate rtl818x_rates[] = {
53 { .bitrate = 10, .hw_value = 0, },
54 { .bitrate = 20, .hw_value = 1, },
55 { .bitrate = 55, .hw_value = 2, },
56 { .bitrate = 110, .hw_value = 3, },
57 { .bitrate = 60, .hw_value = 4, },
58 { .bitrate = 90, .hw_value = 5, },
59 { .bitrate = 120, .hw_value = 6, },
60 { .bitrate = 180, .hw_value = 7, },
61 { .bitrate = 240, .hw_value = 8, },
62 { .bitrate = 360, .hw_value = 9, },
63 { .bitrate = 480, .hw_value = 10, },
64 { .bitrate = 540, .hw_value = 11, },
65};
66
67static const struct ieee80211_channel rtl818x_channels[] = {
68 { .center_freq = 2412 },
69 { .center_freq = 2417 },
70 { .center_freq = 2422 },
71 { .center_freq = 2427 },
72 { .center_freq = 2432 },
73 { .center_freq = 2437 },
74 { .center_freq = 2442 },
75 { .center_freq = 2447 },
76 { .center_freq = 2452 },
77 { .center_freq = 2457 },
78 { .center_freq = 2462 },
79 { .center_freq = 2467 },
80 { .center_freq = 2472 },
81 { .center_freq = 2484 },
82};
83
84
85
86
52void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data) 87void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
53{ 88{
54 struct rtl8180_priv *priv = dev->priv; 89 struct rtl8180_priv *priv = dev->priv;
@@ -99,10 +134,10 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
99 /* TODO: improve signal/rssi reporting */ 134 /* TODO: improve signal/rssi reporting */
100 rx_status.signal = flags2 & 0xFF; 135 rx_status.signal = flags2 & 0xFF;
101 rx_status.ssi = (flags2 >> 8) & 0x7F; 136 rx_status.ssi = (flags2 >> 8) & 0x7F;
102 rx_status.rate = (flags >> 20) & 0xF; 137 /* XXX: is this correct? */
103 rx_status.freq = dev->conf.freq; 138 rx_status.rate_idx = (flags >> 20) & 0xF;
104 rx_status.channel = dev->conf.channel; 139 rx_status.freq = dev->conf.channel->center_freq;
105 rx_status.phymode = dev->conf.phymode; 140 rx_status.band = dev->conf.channel->band;
106 rx_status.mactime = le64_to_cpu(entry->tsft); 141 rx_status.mactime = le64_to_cpu(entry->tsft);
107 rx_status.flag |= RX_FLAG_TSFT; 142 rx_status.flag |= RX_FLAG_TSFT;
108 if (flags & RTL8180_RX_DESC_FLAG_CRC32_ERR) 143 if (flags & RTL8180_RX_DESC_FLAG_CRC32_ERR)
@@ -222,18 +257,25 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
222 mapping = pci_map_single(priv->pdev, skb->data, 257 mapping = pci_map_single(priv->pdev, skb->data,
223 skb->len, PCI_DMA_TODEVICE); 258 skb->len, PCI_DMA_TODEVICE);
224 259
260 BUG_ON(!control->tx_rate);
261
225 tx_flags = RTL8180_TX_DESC_FLAG_OWN | RTL8180_TX_DESC_FLAG_FS | 262 tx_flags = RTL8180_TX_DESC_FLAG_OWN | RTL8180_TX_DESC_FLAG_FS |
226 RTL8180_TX_DESC_FLAG_LS | (control->tx_rate << 24) | 263 RTL8180_TX_DESC_FLAG_LS |
227 (control->rts_cts_rate << 19) | skb->len; 264 (control->tx_rate->hw_value << 24) | skb->len;
228 265
229 if (priv->r8185) 266 if (priv->r8185)
230 tx_flags |= RTL8180_TX_DESC_FLAG_DMA | 267 tx_flags |= RTL8180_TX_DESC_FLAG_DMA |
231 RTL8180_TX_DESC_FLAG_NO_ENC; 268 RTL8180_TX_DESC_FLAG_NO_ENC;
232 269
233 if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) 270 if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
271 BUG_ON(!control->rts_cts_rate);
234 tx_flags |= RTL8180_TX_DESC_FLAG_RTS; 272 tx_flags |= RTL8180_TX_DESC_FLAG_RTS;
235 else if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) 273 tx_flags |= control->rts_cts_rate->hw_value << 19;
274 } else if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
275 BUG_ON(!control->rts_cts_rate);
236 tx_flags |= RTL8180_TX_DESC_FLAG_CTS; 276 tx_flags |= RTL8180_TX_DESC_FLAG_CTS;
277 tx_flags |= control->rts_cts_rate->hw_value << 19;
278 }
237 279
238 *((struct ieee80211_tx_control **) skb->cb) = 280 *((struct ieee80211_tx_control **) skb->cb) =
239 kmemdup(control, sizeof(*control), GFP_ATOMIC); 281 kmemdup(control, sizeof(*control), GFP_ATOMIC);
@@ -246,9 +288,9 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
246 unsigned int remainder; 288 unsigned int remainder;
247 289
248 plcp_len = DIV_ROUND_UP(16 * (skb->len + 4), 290 plcp_len = DIV_ROUND_UP(16 * (skb->len + 4),
249 (control->rate->rate * 2) / 10); 291 (control->tx_rate->bitrate * 2) / 10);
250 remainder = (16 * (skb->len + 4)) % 292 remainder = (16 * (skb->len + 4)) %
251 ((control->rate->rate * 2) / 10); 293 ((control->tx_rate->bitrate * 2) / 10);
252 if (remainder > 0 && remainder <= 6) 294 if (remainder > 0 && remainder <= 6)
253 plcp_len |= 1 << 15; 295 plcp_len |= 1 << 15;
254 } 296 }
@@ -261,8 +303,8 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
261 entry->plcp_len = cpu_to_le16(plcp_len); 303 entry->plcp_len = cpu_to_le16(plcp_len);
262 entry->tx_buf = cpu_to_le32(mapping); 304 entry->tx_buf = cpu_to_le32(mapping);
263 entry->frame_len = cpu_to_le32(skb->len); 305 entry->frame_len = cpu_to_le32(skb->len);
264 entry->flags2 = control->alt_retry_rate != -1 ? 306 entry->flags2 = control->alt_retry_rate != NULL ?
265 control->alt_retry_rate << 4 : 0; 307 control->alt_retry_rate->bitrate << 4 : 0;
266 entry->retry_limit = control->retry_limit; 308 entry->retry_limit = control->retry_limit;
267 entry->flags = cpu_to_le32(tx_flags); 309 entry->flags = cpu_to_le32(tx_flags);
268 __skb_queue_tail(&ring->queue, skb); 310 __skb_queue_tail(&ring->queue, skb);
@@ -646,9 +688,9 @@ static int rtl8180_add_interface(struct ieee80211_hw *dev,
646 688
647 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 689 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
648 rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0], 690 rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0],
649 cpu_to_le32(*(u32 *)conf->mac_addr)); 691 le32_to_cpu(*(__le32 *)conf->mac_addr));
650 rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->MAC[4], 692 rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->MAC[4],
651 cpu_to_le16(*(u16 *)(conf->mac_addr + 4))); 693 le16_to_cpu(*(__le16 *)(conf->mac_addr + 4)));
652 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL); 694 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
653 695
654 return 0; 696 return 0;
@@ -838,19 +880,19 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
838 goto err_free_dev; 880 goto err_free_dev;
839 } 881 }
840 882
883 BUILD_BUG_ON(sizeof(priv->channels) != sizeof(rtl818x_channels));
884 BUILD_BUG_ON(sizeof(priv->rates) != sizeof(rtl818x_rates));
885
841 memcpy(priv->channels, rtl818x_channels, sizeof(rtl818x_channels)); 886 memcpy(priv->channels, rtl818x_channels, sizeof(rtl818x_channels));
842 memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates)); 887 memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates));
843 priv->modes[0].mode = MODE_IEEE80211G; 888
844 priv->modes[0].num_rates = ARRAY_SIZE(rtl818x_rates); 889 priv->band.band = IEEE80211_BAND_2GHZ;
845 priv->modes[0].rates = priv->rates; 890 priv->band.channels = priv->channels;
846 priv->modes[0].num_channels = ARRAY_SIZE(rtl818x_channels); 891 priv->band.n_channels = ARRAY_SIZE(rtl818x_channels);
847 priv->modes[0].channels = priv->channels; 892 priv->band.bitrates = priv->rates;
848 priv->modes[1].mode = MODE_IEEE80211B; 893 priv->band.n_bitrates = 4;
849 priv->modes[1].num_rates = 4; 894 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
850 priv->modes[1].rates = priv->rates; 895
851 priv->modes[1].num_channels = ARRAY_SIZE(rtl818x_channels);
852 priv->modes[1].channels = priv->channels;
853 priv->mode = IEEE80211_IF_TYPE_INVALID;
854 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 896 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
855 IEEE80211_HW_RX_INCLUDES_FCS; 897 IEEE80211_HW_RX_INCLUDES_FCS;
856 dev->queues = 1; 898 dev->queues = 1;
@@ -879,15 +921,10 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
879 921
880 priv->r8185 = reg & RTL818X_TX_CONF_R8185_ABC; 922 priv->r8185 = reg & RTL818X_TX_CONF_R8185_ABC;
881 if (priv->r8185) { 923 if (priv->r8185) {
882 if ((err = ieee80211_register_hwmode(dev, &priv->modes[0]))) 924 priv->band.n_bitrates = ARRAY_SIZE(rtl818x_rates);
883 goto err_iounmap;
884
885 pci_try_set_mwi(pdev); 925 pci_try_set_mwi(pdev);
886 } 926 }
887 927
888 if ((err = ieee80211_register_hwmode(dev, &priv->modes[1])))
889 goto err_iounmap;
890
891 eeprom.data = dev; 928 eeprom.data = dev;
892 eeprom.register_read = rtl8180_eeprom_register_read; 929 eeprom.register_read = rtl8180_eeprom_register_read;
893 eeprom.register_write = rtl8180_eeprom_register_write; 930 eeprom.register_write = rtl8180_eeprom_register_write;
@@ -950,8 +987,8 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
950 for (i = 0; i < 14; i += 2) { 987 for (i = 0; i < 14; i += 2) {
951 u16 txpwr; 988 u16 txpwr;
952 eeprom_93cx6_read(&eeprom, 0x10 + (i >> 1), &txpwr); 989 eeprom_93cx6_read(&eeprom, 0x10 + (i >> 1), &txpwr);
953 priv->channels[i].val = txpwr & 0xFF; 990 priv->channels[i].hw_value = txpwr & 0xFF;
954 priv->channels[i + 1].val = txpwr >> 8; 991 priv->channels[i + 1].hw_value = txpwr >> 8;
955 } 992 }
956 993
957 /* OFDM TX power */ 994 /* OFDM TX power */
@@ -959,8 +996,8 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
959 for (i = 0; i < 14; i += 2) { 996 for (i = 0; i < 14; i += 2) {
960 u16 txpwr; 997 u16 txpwr;
961 eeprom_93cx6_read(&eeprom, 0x20 + (i >> 1), &txpwr); 998 eeprom_93cx6_read(&eeprom, 0x20 + (i >> 1), &txpwr);
962 priv->channels[i].val |= (txpwr & 0xFF) << 8; 999 priv->channels[i].hw_value |= (txpwr & 0xFF) << 8;
963 priv->channels[i + 1].val |= txpwr & 0xFF00; 1000 priv->channels[i + 1].hw_value |= txpwr & 0xFF00;
964 } 1001 }
965 } 1002 }
966 1003
diff --git a/drivers/net/wireless/rtl8180_grf5101.c b/drivers/net/wireless/rtl8180_grf5101.c
index 8293e19c4c59..5d47935dbac3 100644
--- a/drivers/net/wireless/rtl8180_grf5101.c
+++ b/drivers/net/wireless/rtl8180_grf5101.c
@@ -73,8 +73,9 @@ static void grf5101_rf_set_channel(struct ieee80211_hw *dev,
73 struct ieee80211_conf *conf) 73 struct ieee80211_conf *conf)
74{ 74{
75 struct rtl8180_priv *priv = dev->priv; 75 struct rtl8180_priv *priv = dev->priv;
76 u32 txpw = priv->channels[conf->channel - 1].val & 0xFF; 76 int channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
77 u32 chan = conf->channel - 1; 77 u32 txpw = priv->channels[channel - 1].hw_value & 0xFF;
78 u32 chan = channel - 1;
78 79
79 /* set TX power */ 80 /* set TX power */
80 write_grf5101(dev, 0x15, 0x0); 81 write_grf5101(dev, 0x15, 0x0);
diff --git a/drivers/net/wireless/rtl8180_max2820.c b/drivers/net/wireless/rtl8180_max2820.c
index 98fe9fd64968..a34dfd382b6d 100644
--- a/drivers/net/wireless/rtl8180_max2820.c
+++ b/drivers/net/wireless/rtl8180_max2820.c
@@ -78,8 +78,9 @@ static void max2820_rf_set_channel(struct ieee80211_hw *dev,
78 struct ieee80211_conf *conf) 78 struct ieee80211_conf *conf)
79{ 79{
80 struct rtl8180_priv *priv = dev->priv; 80 struct rtl8180_priv *priv = dev->priv;
81 unsigned int chan_idx = conf ? conf->channel - 1 : 0; 81 int channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
82 u32 txpw = priv->channels[chan_idx].val & 0xFF; 82 unsigned int chan_idx = channel - 1;
83 u32 txpw = priv->channels[chan_idx].hw_value & 0xFF;
83 u32 chan = max2820_chan[chan_idx]; 84 u32 chan = max2820_chan[chan_idx];
84 85
85 /* While philips SA2400 drive the PA bias from 86 /* While philips SA2400 drive the PA bias from
diff --git a/drivers/net/wireless/rtl8180_rtl8225.c b/drivers/net/wireless/rtl8180_rtl8225.c
index ef3832bee85c..cd22781728a9 100644
--- a/drivers/net/wireless/rtl8180_rtl8225.c
+++ b/drivers/net/wireless/rtl8180_rtl8225.c
@@ -261,8 +261,8 @@ static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
261 u32 reg; 261 u32 reg;
262 int i; 262 int i;
263 263
264 cck_power = priv->channels[channel - 1].val & 0xFF; 264 cck_power = priv->channels[channel - 1].hw_value & 0xFF;
265 ofdm_power = priv->channels[channel - 1].val >> 8; 265 ofdm_power = priv->channels[channel - 1].hw_value >> 8;
266 266
267 cck_power = min(cck_power, (u8)35); 267 cck_power = min(cck_power, (u8)35);
268 ofdm_power = min(ofdm_power, (u8)35); 268 ofdm_power = min(ofdm_power, (u8)35);
@@ -476,8 +476,8 @@ static void rtl8225z2_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
476 const u8 *tmp; 476 const u8 *tmp;
477 int i; 477 int i;
478 478
479 cck_power = priv->channels[channel - 1].val & 0xFF; 479 cck_power = priv->channels[channel - 1].hw_value & 0xFF;
480 ofdm_power = priv->channels[channel - 1].val >> 8; 480 ofdm_power = priv->channels[channel - 1].hw_value >> 8;
481 481
482 if (channel == 14) 482 if (channel == 14)
483 tmp = rtl8225z2_tx_power_cck_ch14; 483 tmp = rtl8225z2_tx_power_cck_ch14;
@@ -716,13 +716,14 @@ static void rtl8225_rf_set_channel(struct ieee80211_hw *dev,
716 struct ieee80211_conf *conf) 716 struct ieee80211_conf *conf)
717{ 717{
718 struct rtl8180_priv *priv = dev->priv; 718 struct rtl8180_priv *priv = dev->priv;
719 int chan = ieee80211_frequency_to_channel(conf->channel->center_freq);
719 720
720 if (priv->rf->init == rtl8225_rf_init) 721 if (priv->rf->init == rtl8225_rf_init)
721 rtl8225_rf_set_tx_power(dev, conf->channel); 722 rtl8225_rf_set_tx_power(dev, chan);
722 else 723 else
723 rtl8225z2_rf_set_tx_power(dev, conf->channel); 724 rtl8225z2_rf_set_tx_power(dev, chan);
724 725
725 rtl8225_write(dev, 0x7, rtl8225_chan[conf->channel - 1]); 726 rtl8225_write(dev, 0x7, rtl8225_chan[chan - 1]);
726 msleep(10); 727 msleep(10);
727 728
728 if (conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME) { 729 if (conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME) {
diff --git a/drivers/net/wireless/rtl8180_sa2400.c b/drivers/net/wireless/rtl8180_sa2400.c
index e08ace7b1cb7..0311b4ea124c 100644
--- a/drivers/net/wireless/rtl8180_sa2400.c
+++ b/drivers/net/wireless/rtl8180_sa2400.c
@@ -80,8 +80,9 @@ static void sa2400_rf_set_channel(struct ieee80211_hw *dev,
80 struct ieee80211_conf *conf) 80 struct ieee80211_conf *conf)
81{ 81{
82 struct rtl8180_priv *priv = dev->priv; 82 struct rtl8180_priv *priv = dev->priv;
83 u32 txpw = priv->channels[conf->channel - 1].val & 0xFF; 83 int channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
84 u32 chan = sa2400_chan[conf->channel - 1]; 84 u32 txpw = priv->channels[channel - 1].hw_value & 0xFF;
85 u32 chan = sa2400_chan[channel - 1];
85 86
86 write_sa2400(dev, 7, txpw); 87 write_sa2400(dev, 7, txpw);
87 88
diff --git a/drivers/net/wireless/rtl8187.h b/drivers/net/wireless/rtl8187.h
index 8680a0b6433c..076d88b6db0e 100644
--- a/drivers/net/wireless/rtl8187.h
+++ b/drivers/net/wireless/rtl8187.h
@@ -71,7 +71,7 @@ struct rtl8187_priv {
71 /* rtl8187 specific */ 71 /* rtl8187 specific */
72 struct ieee80211_channel channels[14]; 72 struct ieee80211_channel channels[14];
73 struct ieee80211_rate rates[12]; 73 struct ieee80211_rate rates[12];
74 struct ieee80211_hw_mode modes[2]; 74 struct ieee80211_supported_band band;
75 struct usb_device *udev; 75 struct usb_device *udev;
76 u32 rx_conf; 76 u32 rx_conf;
77 u16 txpwr_base; 77 u16 txpwr_base;
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index 133b3f39eeb6..d5787b37e1fb 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -45,6 +45,38 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
45 45
46MODULE_DEVICE_TABLE(usb, rtl8187_table); 46MODULE_DEVICE_TABLE(usb, rtl8187_table);
47 47
48static const struct ieee80211_rate rtl818x_rates[] = {
49 { .bitrate = 10, .hw_value = 0, },
50 { .bitrate = 20, .hw_value = 1, },
51 { .bitrate = 55, .hw_value = 2, },
52 { .bitrate = 110, .hw_value = 3, },
53 { .bitrate = 60, .hw_value = 4, },
54 { .bitrate = 90, .hw_value = 5, },
55 { .bitrate = 120, .hw_value = 6, },
56 { .bitrate = 180, .hw_value = 7, },
57 { .bitrate = 240, .hw_value = 8, },
58 { .bitrate = 360, .hw_value = 9, },
59 { .bitrate = 480, .hw_value = 10, },
60 { .bitrate = 540, .hw_value = 11, },
61};
62
63static const struct ieee80211_channel rtl818x_channels[] = {
64 { .center_freq = 2412 },
65 { .center_freq = 2417 },
66 { .center_freq = 2422 },
67 { .center_freq = 2427 },
68 { .center_freq = 2432 },
69 { .center_freq = 2437 },
70 { .center_freq = 2442 },
71 { .center_freq = 2447 },
72 { .center_freq = 2452 },
73 { .center_freq = 2457 },
74 { .center_freq = 2462 },
75 { .center_freq = 2467 },
76 { .center_freq = 2472 },
77 { .center_freq = 2484 },
78};
79
48static void rtl8187_iowrite_async_cb(struct urb *urb) 80static void rtl8187_iowrite_async_cb(struct urb *urb)
49{ 81{
50 kfree(urb->context); 82 kfree(urb->context);
@@ -146,17 +178,23 @@ static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
146 178
147 flags = skb->len; 179 flags = skb->len;
148 flags |= RTL8187_TX_FLAG_NO_ENCRYPT; 180 flags |= RTL8187_TX_FLAG_NO_ENCRYPT;
149 flags |= control->rts_cts_rate << 19; 181
150 flags |= control->tx_rate << 24; 182 BUG_ON(!control->tx_rate);
183
184 flags |= control->tx_rate->hw_value << 24;
151 if (ieee80211_get_morefrag((struct ieee80211_hdr *)skb->data)) 185 if (ieee80211_get_morefrag((struct ieee80211_hdr *)skb->data))
152 flags |= RTL8187_TX_FLAG_MORE_FRAG; 186 flags |= RTL8187_TX_FLAG_MORE_FRAG;
153 if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) { 187 if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
188 BUG_ON(!control->rts_cts_rate);
154 flags |= RTL8187_TX_FLAG_RTS; 189 flags |= RTL8187_TX_FLAG_RTS;
190 flags |= control->rts_cts_rate->hw_value << 19;
155 rts_dur = ieee80211_rts_duration(dev, priv->vif, 191 rts_dur = ieee80211_rts_duration(dev, priv->vif,
156 skb->len, control); 192 skb->len, control);
157 } 193 } else if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) {
158 if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT) 194 BUG_ON(!control->rts_cts_rate);
159 flags |= RTL8187_TX_FLAG_CTS; 195 flags |= RTL8187_TX_FLAG_CTS;
196 flags |= control->rts_cts_rate->hw_value << 19;
197 }
160 198
161 hdr = (struct rtl8187_tx_hdr *)skb_push(skb, sizeof(*hdr)); 199 hdr = (struct rtl8187_tx_hdr *)skb_push(skb, sizeof(*hdr));
162 hdr->flags = cpu_to_le32(flags); 200 hdr->flags = cpu_to_le32(flags);
@@ -225,10 +263,9 @@ static void rtl8187_rx_cb(struct urb *urb)
225 rx_status.antenna = (hdr->signal >> 7) & 1; 263 rx_status.antenna = (hdr->signal >> 7) & 1;
226 rx_status.signal = 64 - min(hdr->noise, (u8)64); 264 rx_status.signal = 64 - min(hdr->noise, (u8)64);
227 rx_status.ssi = signal; 265 rx_status.ssi = signal;
228 rx_status.rate = rate; 266 rx_status.rate_idx = rate;
229 rx_status.freq = dev->conf.freq; 267 rx_status.freq = dev->conf.channel->center_freq;
230 rx_status.channel = dev->conf.channel; 268 rx_status.band = dev->conf.channel->band;
231 rx_status.phymode = dev->conf.phymode;
232 rx_status.mactime = le64_to_cpu(hdr->mac_time); 269 rx_status.mactime = le64_to_cpu(hdr->mac_time);
233 rx_status.flag |= RX_FLAG_TSFT; 270 rx_status.flag |= RX_FLAG_TSFT;
234 if (flags & (1 << 13)) 271 if (flags & (1 << 13))
@@ -685,19 +722,22 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
685 usb_get_dev(udev); 722 usb_get_dev(udev);
686 723
687 skb_queue_head_init(&priv->rx_queue); 724 skb_queue_head_init(&priv->rx_queue);
725
726 BUILD_BUG_ON(sizeof(priv->channels) != sizeof(rtl818x_channels));
727 BUILD_BUG_ON(sizeof(priv->rates) != sizeof(rtl818x_rates));
728
688 memcpy(priv->channels, rtl818x_channels, sizeof(rtl818x_channels)); 729 memcpy(priv->channels, rtl818x_channels, sizeof(rtl818x_channels));
689 memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates)); 730 memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates));
690 priv->map = (struct rtl818x_csr *)0xFF00; 731 priv->map = (struct rtl818x_csr *)0xFF00;
691 priv->modes[0].mode = MODE_IEEE80211G; 732
692 priv->modes[0].num_rates = ARRAY_SIZE(rtl818x_rates); 733 priv->band.band = IEEE80211_BAND_2GHZ;
693 priv->modes[0].rates = priv->rates; 734 priv->band.channels = priv->channels;
694 priv->modes[0].num_channels = ARRAY_SIZE(rtl818x_channels); 735 priv->band.n_channels = ARRAY_SIZE(rtl818x_channels);
695 priv->modes[0].channels = priv->channels; 736 priv->band.bitrates = priv->rates;
696 priv->modes[1].mode = MODE_IEEE80211B; 737 priv->band.n_bitrates = ARRAY_SIZE(rtl818x_rates);
697 priv->modes[1].num_rates = 4; 738 dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
698 priv->modes[1].rates = priv->rates; 739
699 priv->modes[1].num_channels = ARRAY_SIZE(rtl818x_channels); 740
700 priv->modes[1].channels = priv->channels;
701 priv->mode = IEEE80211_IF_TYPE_MNTR; 741 priv->mode = IEEE80211_IF_TYPE_MNTR;
702 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 742 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
703 IEEE80211_HW_RX_INCLUDES_FCS; 743 IEEE80211_HW_RX_INCLUDES_FCS;
@@ -706,10 +746,6 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
706 dev->max_rssi = 65; 746 dev->max_rssi = 65;
707 dev->max_signal = 64; 747 dev->max_signal = 64;
708 748
709 for (i = 0; i < 2; i++)
710 if ((err = ieee80211_register_hwmode(dev, &priv->modes[i])))
711 goto err_free_dev;
712
713 eeprom.data = dev; 749 eeprom.data = dev;
714 eeprom.register_read = rtl8187_eeprom_register_read; 750 eeprom.register_read = rtl8187_eeprom_register_read;
715 eeprom.register_write = rtl8187_eeprom_register_write; 751 eeprom.register_write = rtl8187_eeprom_register_write;
@@ -733,20 +769,20 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
733 for (i = 0; i < 3; i++) { 769 for (i = 0; i < 3; i++) {
734 eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_CHAN_1 + i, 770 eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_CHAN_1 + i,
735 &txpwr); 771 &txpwr);
736 (*channel++).val = txpwr & 0xFF; 772 (*channel++).hw_value = txpwr & 0xFF;
737 (*channel++).val = txpwr >> 8; 773 (*channel++).hw_value = txpwr >> 8;
738 } 774 }
739 for (i = 0; i < 2; i++) { 775 for (i = 0; i < 2; i++) {
740 eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_CHAN_4 + i, 776 eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_CHAN_4 + i,
741 &txpwr); 777 &txpwr);
742 (*channel++).val = txpwr & 0xFF; 778 (*channel++).hw_value = txpwr & 0xFF;
743 (*channel++).val = txpwr >> 8; 779 (*channel++).hw_value = txpwr >> 8;
744 } 780 }
745 for (i = 0; i < 2; i++) { 781 for (i = 0; i < 2; i++) {
746 eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_CHAN_6 + i, 782 eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_CHAN_6 + i,
747 &txpwr); 783 &txpwr);
748 (*channel++).val = txpwr & 0xFF; 784 (*channel++).hw_value = txpwr & 0xFF;
749 (*channel++).val = txpwr >> 8; 785 (*channel++).hw_value = txpwr >> 8;
750 } 786 }
751 787
752 eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_BASE, 788 eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_BASE,
diff --git a/drivers/net/wireless/rtl8187_rtl8225.c b/drivers/net/wireless/rtl8187_rtl8225.c
index b713de17ba0a..9146387b4c5e 100644
--- a/drivers/net/wireless/rtl8187_rtl8225.c
+++ b/drivers/net/wireless/rtl8187_rtl8225.c
@@ -283,8 +283,8 @@ static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
283 u32 reg; 283 u32 reg;
284 int i; 284 int i;
285 285
286 cck_power = priv->channels[channel - 1].val & 0xF; 286 cck_power = priv->channels[channel - 1].hw_value & 0xF;
287 ofdm_power = priv->channels[channel - 1].val >> 4; 287 ofdm_power = priv->channels[channel - 1].hw_value >> 4;
288 288
289 cck_power = min(cck_power, (u8)11); 289 cck_power = min(cck_power, (u8)11);
290 ofdm_power = min(ofdm_power, (u8)35); 290 ofdm_power = min(ofdm_power, (u8)35);
@@ -500,8 +500,8 @@ static void rtl8225z2_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
500 u32 reg; 500 u32 reg;
501 int i; 501 int i;
502 502
503 cck_power = priv->channels[channel - 1].val & 0xF; 503 cck_power = priv->channels[channel - 1].hw_value & 0xF;
504 ofdm_power = priv->channels[channel - 1].val >> 4; 504 ofdm_power = priv->channels[channel - 1].hw_value >> 4;
505 505
506 cck_power = min(cck_power, (u8)15); 506 cck_power = min(cck_power, (u8)15);
507 cck_power += priv->txpwr_base & 0xF; 507 cck_power += priv->txpwr_base & 0xF;
@@ -735,13 +735,14 @@ static void rtl8225_rf_set_channel(struct ieee80211_hw *dev,
735 struct ieee80211_conf *conf) 735 struct ieee80211_conf *conf)
736{ 736{
737 struct rtl8187_priv *priv = dev->priv; 737 struct rtl8187_priv *priv = dev->priv;
738 int chan = ieee80211_frequency_to_channel(conf->channel->center_freq);
738 739
739 if (priv->rf->init == rtl8225_rf_init) 740 if (priv->rf->init == rtl8225_rf_init)
740 rtl8225_rf_set_tx_power(dev, conf->channel); 741 rtl8225_rf_set_tx_power(dev, chan);
741 else 742 else
742 rtl8225z2_rf_set_tx_power(dev, conf->channel); 743 rtl8225z2_rf_set_tx_power(dev, chan);
743 744
744 rtl8225_write(dev, 0x7, rtl8225_chan[conf->channel - 1]); 745 rtl8225_write(dev, 0x7, rtl8225_chan[chan - 1]);
745 msleep(10); 746 msleep(10);
746} 747}
747 748
diff --git a/drivers/net/wireless/rtl818x.h b/drivers/net/wireless/rtl818x.h
index 1e7d6f8278d7..4f7d38f506eb 100644
--- a/drivers/net/wireless/rtl818x.h
+++ b/drivers/net/wireless/rtl818x.h
@@ -175,74 +175,4 @@ struct rtl818x_rf_ops {
175 void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *); 175 void (*set_chan)(struct ieee80211_hw *, struct ieee80211_conf *);
176}; 176};
177 177
178static const struct ieee80211_rate rtl818x_rates[] = {
179 { .rate = 10,
180 .val = 0,
181 .flags = IEEE80211_RATE_CCK },
182 { .rate = 20,
183 .val = 1,
184 .flags = IEEE80211_RATE_CCK },
185 { .rate = 55,
186 .val = 2,
187 .flags = IEEE80211_RATE_CCK },
188 { .rate = 110,
189 .val = 3,
190 .flags = IEEE80211_RATE_CCK },
191 { .rate = 60,
192 .val = 4,
193 .flags = IEEE80211_RATE_OFDM },
194 { .rate = 90,
195 .val = 5,
196 .flags = IEEE80211_RATE_OFDM },
197 { .rate = 120,
198 .val = 6,
199 .flags = IEEE80211_RATE_OFDM },
200 { .rate = 180,
201 .val = 7,
202 .flags = IEEE80211_RATE_OFDM },
203 { .rate = 240,
204 .val = 8,
205 .flags = IEEE80211_RATE_OFDM },
206 { .rate = 360,
207 .val = 9,
208 .flags = IEEE80211_RATE_OFDM },
209 { .rate = 480,
210 .val = 10,
211 .flags = IEEE80211_RATE_OFDM },
212 { .rate = 540,
213 .val = 11,
214 .flags = IEEE80211_RATE_OFDM },
215};
216
217static const struct ieee80211_channel rtl818x_channels[] = {
218 { .chan = 1,
219 .freq = 2412},
220 { .chan = 2,
221 .freq = 2417},
222 { .chan = 3,
223 .freq = 2422},
224 { .chan = 4,
225 .freq = 2427},
226 { .chan = 5,
227 .freq = 2432},
228 { .chan = 6,
229 .freq = 2437},
230 { .chan = 7,
231 .freq = 2442},
232 { .chan = 8,
233 .freq = 2447},
234 { .chan = 9,
235 .freq = 2452},
236 { .chan = 10,
237 .freq = 2457},
238 { .chan = 11,
239 .freq = 2462},
240 { .chan = 12,
241 .freq = 2467},
242 { .chan = 13,
243 .freq = 2472},
244 { .chan = 14,
245 .freq = 2484}
246};
247
248#endif /* RTL818X_H */ 178#endif /* RTL818X_H */
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c
index 88efe1bae58f..bced3fe1cf8a 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/net/wireless/strip.c
@@ -962,12 +962,12 @@ static char *time_delta(char buffer[], long time)
962/* get Nth element of the linked list */ 962/* get Nth element of the linked list */
963static struct strip *strip_get_idx(loff_t pos) 963static struct strip *strip_get_idx(loff_t pos)
964{ 964{
965 struct list_head *l; 965 struct strip *str;
966 int i = 0; 966 int i = 0;
967 967
968 list_for_each_rcu(l, &strip_list) { 968 list_for_each_entry_rcu(str, &strip_list, list) {
969 if (pos == i) 969 if (pos == i)
970 return list_entry(l, struct strip, list); 970 return str;
971 ++i; 971 ++i;
972 } 972 }
973 return NULL; 973 return NULL;
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/net/wireless/wavelan_cs.c
index 06eea6ab7bf0..baf74015751c 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/net/wireless/wavelan_cs.c
@@ -102,7 +102,7 @@ hacr_write(u_long base,
102 * Write to card's Host Adapter Command Register. Include a delay for 102 * Write to card's Host Adapter Command Register. Include a delay for
103 * those times when it is needed. 103 * those times when it is needed.
104 */ 104 */
105static inline void 105static void
106hacr_write_slow(u_long base, 106hacr_write_slow(u_long base,
107 u_char hacr) 107 u_char hacr)
108{ 108{
@@ -255,7 +255,7 @@ update_psa_checksum(struct net_device * dev)
255/* 255/*
256 * Write 1 byte to the MMC. 256 * Write 1 byte to the MMC.
257 */ 257 */
258static inline void 258static void
259mmc_out(u_long base, 259mmc_out(u_long base,
260 u_short o, 260 u_short o,
261 u_char d) 261 u_char d)
@@ -275,7 +275,7 @@ mmc_out(u_long base,
275 * Routine to write bytes to the Modem Management Controller. 275 * Routine to write bytes to the Modem Management Controller.
276 * We start by the end because it is the way it should be ! 276 * We start by the end because it is the way it should be !
277 */ 277 */
278static inline void 278static void
279mmc_write(u_long base, 279mmc_write(u_long base,
280 u_char o, 280 u_char o,
281 u_char * b, 281 u_char * b,
@@ -293,7 +293,7 @@ mmc_write(u_long base,
293 * Read 1 byte from the MMC. 293 * Read 1 byte from the MMC.
294 * Optimised version for 1 byte, avoid using memory... 294 * Optimised version for 1 byte, avoid using memory...
295 */ 295 */
296static inline u_char 296static u_char
297mmc_in(u_long base, 297mmc_in(u_long base,
298 u_short o) 298 u_short o)
299{ 299{
@@ -318,7 +318,7 @@ mmc_in(u_long base,
318 * (code has just been moved in the above function) 318 * (code has just been moved in the above function)
319 * We start by the end because it is the way it should be ! 319 * We start by the end because it is the way it should be !
320 */ 320 */
321static inline void 321static void
322mmc_read(u_long base, 322mmc_read(u_long base,
323 u_char o, 323 u_char o,
324 u_char * b, 324 u_char * b,
@@ -350,9 +350,8 @@ mmc_encr(u_long base) /* i/o port of the card */
350/*------------------------------------------------------------------*/ 350/*------------------------------------------------------------------*/
351/* 351/*
352 * Wait for the frequency EEprom to complete a command... 352 * Wait for the frequency EEprom to complete a command...
353 * I hope this one will be optimally inlined...
354 */ 353 */
355static inline void 354static void
356fee_wait(u_long base, /* i/o port of the card */ 355fee_wait(u_long base, /* i/o port of the card */
357 int delay, /* Base delay to wait for */ 356 int delay, /* Base delay to wait for */
358 int number) /* Number of time to wait */ 357 int number) /* Number of time to wait */
@@ -738,9 +737,9 @@ static void wv_roam_handover(wavepoint_history *wavepoint, net_local *lp)
738} 737}
739 738
740/* Called when a WavePoint beacon is received */ 739/* Called when a WavePoint beacon is received */
741static inline void wl_roam_gather(struct net_device * dev, 740static void wl_roam_gather(struct net_device * dev,
742 u_char * hdr, /* Beacon header */ 741 u_char * hdr, /* Beacon header */
743 u_char * stats) /* SNR, Signal quality 742 u_char * stats) /* SNR, Signal quality
744 of packet */ 743 of packet */
745{ 744{
746 wavepoint_beacon *beacon= (wavepoint_beacon *)hdr; /* Rcvd. Beacon */ 745 wavepoint_beacon *beacon= (wavepoint_beacon *)hdr; /* Rcvd. Beacon */
@@ -794,7 +793,7 @@ out:
794static inline int WAVELAN_BEACON(unsigned char *data) 793static inline int WAVELAN_BEACON(unsigned char *data)
795{ 794{
796 wavepoint_beacon *beacon= (wavepoint_beacon *)data; 795 wavepoint_beacon *beacon= (wavepoint_beacon *)data;
797 static wavepoint_beacon beacon_template={0xaa,0xaa,0x03,0x08,0x00,0x0e,0x20,0x03,0x00}; 796 static const wavepoint_beacon beacon_template={0xaa,0xaa,0x03,0x08,0x00,0x0e,0x20,0x03,0x00};
798 797
799 if(memcmp(beacon,&beacon_template,9)==0) 798 if(memcmp(beacon,&beacon_template,9)==0)
800 return 1; 799 return 1;
@@ -980,7 +979,7 @@ read_ringbuf(struct net_device * dev,
980 * wavelan_interrupt is not an option...), so you may experience 979 * wavelan_interrupt is not an option...), so you may experience
981 * some delay sometime... 980 * some delay sometime...
982 */ 981 */
983static inline void 982static void
984wv_82593_reconfig(struct net_device * dev) 983wv_82593_reconfig(struct net_device * dev)
985{ 984{
986 net_local * lp = netdev_priv(dev); 985 net_local * lp = netdev_priv(dev);
@@ -1233,7 +1232,7 @@ wv_local_show(struct net_device * dev)
1233/* 1232/*
1234 * Dump packet header (and content if necessary) on the screen 1233 * Dump packet header (and content if necessary) on the screen
1235 */ 1234 */
1236static inline void 1235static void
1237wv_packet_info(u_char * p, /* Packet to dump */ 1236wv_packet_info(u_char * p, /* Packet to dump */
1238 int length, /* Length of the packet */ 1237 int length, /* Length of the packet */
1239 char * msg1, /* Name of the device */ 1238 char * msg1, /* Name of the device */
@@ -1272,7 +1271,7 @@ wv_packet_info(u_char * p, /* Packet to dump */
1272 * This is the information which is displayed by the driver at startup 1271 * This is the information which is displayed by the driver at startup
1273 * There is a lot of flag to configure it at your will... 1272 * There is a lot of flag to configure it at your will...
1274 */ 1273 */
1275static inline void 1274static void
1276wv_init_info(struct net_device * dev) 1275wv_init_info(struct net_device * dev)
1277{ 1276{
1278 unsigned int base = dev->base_addr; 1277 unsigned int base = dev->base_addr;
@@ -1509,7 +1508,7 @@ wavelan_set_mac_address(struct net_device * dev,
1509 * Frequency setting (for hardware able of it) 1508 * Frequency setting (for hardware able of it)
1510 * It's a bit complicated and you don't really want to look into it... 1509 * It's a bit complicated and you don't really want to look into it...
1511 */ 1510 */
1512static inline int 1511static int
1513wv_set_frequency(u_long base, /* i/o port of the card */ 1512wv_set_frequency(u_long base, /* i/o port of the card */
1514 iw_freq * frequency) 1513 iw_freq * frequency)
1515{ 1514{
@@ -1706,7 +1705,7 @@ wv_set_frequency(u_long base, /* i/o port of the card */
1706/* 1705/*
1707 * Give the list of available frequencies 1706 * Give the list of available frequencies
1708 */ 1707 */
1709static inline int 1708static int
1710wv_frequency_list(u_long base, /* i/o port of the card */ 1709wv_frequency_list(u_long base, /* i/o port of the card */
1711 iw_freq * list, /* List of frequency to fill */ 1710 iw_freq * list, /* List of frequency to fill */
1712 int max) /* Maximum number of frequencies */ 1711 int max) /* Maximum number of frequencies */
@@ -2759,7 +2758,7 @@ wavelan_get_wireless_stats(struct net_device * dev)
2759 * frame pointer and verify that the frame seem correct 2758 * frame pointer and verify that the frame seem correct
2760 * (called by wv_packet_rcv()) 2759 * (called by wv_packet_rcv())
2761 */ 2760 */
2762static inline int 2761static int
2763wv_start_of_frame(struct net_device * dev, 2762wv_start_of_frame(struct net_device * dev,
2764 int rfp, /* end of frame */ 2763 int rfp, /* end of frame */
2765 int wrap) /* start of buffer */ 2764 int wrap) /* start of buffer */
@@ -2821,7 +2820,7 @@ wv_start_of_frame(struct net_device * dev,
2821 * Note: if any errors occur, the packet is "dropped on the floor" 2820 * Note: if any errors occur, the packet is "dropped on the floor"
2822 * (called by wv_packet_rcv()) 2821 * (called by wv_packet_rcv())
2823 */ 2822 */
2824static inline void 2823static void
2825wv_packet_read(struct net_device * dev, 2824wv_packet_read(struct net_device * dev,
2826 int fd_p, 2825 int fd_p,
2827 int sksize) 2826 int sksize)
@@ -2922,7 +2921,7 @@ wv_packet_read(struct net_device * dev,
2922 * (called by wavelan_interrupt()) 2921 * (called by wavelan_interrupt())
2923 * Note : the spinlock is already grabbed for us and irq are disabled. 2922 * Note : the spinlock is already grabbed for us and irq are disabled.
2924 */ 2923 */
2925static inline void 2924static void
2926wv_packet_rcv(struct net_device * dev) 2925wv_packet_rcv(struct net_device * dev)
2927{ 2926{
2928 unsigned int base = dev->base_addr; 2927 unsigned int base = dev->base_addr;
@@ -3056,7 +3055,7 @@ wv_packet_rcv(struct net_device * dev)
3056 * the transmit. 3055 * the transmit.
3057 * (called in wavelan_packet_xmit()) 3056 * (called in wavelan_packet_xmit())
3058 */ 3057 */
3059static inline void 3058static void
3060wv_packet_write(struct net_device * dev, 3059wv_packet_write(struct net_device * dev,
3061 void * buf, 3060 void * buf,
3062 short length) 3061 short length)
@@ -3180,7 +3179,7 @@ wavelan_packet_xmit(struct sk_buff * skb,
3180 * Routine to initialize the Modem Management Controller. 3179 * Routine to initialize the Modem Management Controller.
3181 * (called by wv_hw_config()) 3180 * (called by wv_hw_config())
3182 */ 3181 */
3183static inline int 3182static int
3184wv_mmc_init(struct net_device * dev) 3183wv_mmc_init(struct net_device * dev)
3185{ 3184{
3186 unsigned int base = dev->base_addr; 3185 unsigned int base = dev->base_addr;
@@ -3699,7 +3698,7 @@ wv_82593_config(struct net_device * dev)
3699 * wavelan. 3698 * wavelan.
3700 * (called by wv_config()) 3699 * (called by wv_config())
3701 */ 3700 */
3702static inline int 3701static int
3703wv_pcmcia_reset(struct net_device * dev) 3702wv_pcmcia_reset(struct net_device * dev)
3704{ 3703{
3705 int i; 3704 int i;
@@ -3864,7 +3863,7 @@ wv_hw_config(struct net_device * dev)
3864 * 2. Start the LAN controller's receive unit 3863 * 2. Start the LAN controller's receive unit
3865 * (called by wavelan_event(), wavelan_watchdog() and wavelan_open()) 3864 * (called by wavelan_event(), wavelan_watchdog() and wavelan_open())
3866 */ 3865 */
3867static inline void 3866static void
3868wv_hw_reset(struct net_device * dev) 3867wv_hw_reset(struct net_device * dev)
3869{ 3868{
3870 net_local * lp = netdev_priv(dev); 3869 net_local * lp = netdev_priv(dev);
@@ -3895,7 +3894,7 @@ wv_hw_reset(struct net_device * dev)
3895 * device available to the system. 3894 * device available to the system.
3896 * (called by wavelan_event()) 3895 * (called by wavelan_event())
3897 */ 3896 */
3898static inline int 3897static int
3899wv_pcmcia_config(struct pcmcia_device * link) 3898wv_pcmcia_config(struct pcmcia_device * link)
3900{ 3899{
3901 struct net_device * dev = (struct net_device *) link->priv; 3900 struct net_device * dev = (struct net_device *) link->priv;
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/net/wireless/wavelan_cs.p.h
index 33dd97094227..628192d7248f 100644
--- a/drivers/net/wireless/wavelan_cs.p.h
+++ b/drivers/net/wireless/wavelan_cs.p.h
@@ -637,7 +637,7 @@ struct net_local
637/* ----------------- MODEM MANAGEMENT SUBROUTINES ----------------- */ 637/* ----------------- MODEM MANAGEMENT SUBROUTINES ----------------- */
638static inline u_char /* data */ 638static inline u_char /* data */
639 hasr_read(u_long); /* Read the host interface : base address */ 639 hasr_read(u_long); /* Read the host interface : base address */
640static inline void 640static void
641 hacr_write(u_long, /* Write to host interface : base address */ 641 hacr_write(u_long, /* Write to host interface : base address */
642 u_char), /* data */ 642 u_char), /* data */
643 hacr_write_slow(u_long, 643 hacr_write_slow(u_long,
@@ -651,7 +651,7 @@ static void
651 int, /* Offset in psa */ 651 int, /* Offset in psa */
652 u_char *, /* Buffer in memory */ 652 u_char *, /* Buffer in memory */
653 int); /* Length of buffer */ 653 int); /* Length of buffer */
654static inline void 654static void
655 mmc_out(u_long, /* Write 1 byte to the Modem Manag Control */ 655 mmc_out(u_long, /* Write 1 byte to the Modem Manag Control */
656 u_short, 656 u_short,
657 u_char), 657 u_char),
@@ -659,10 +659,10 @@ static inline void
659 u_char, 659 u_char,
660 u_char *, 660 u_char *,
661 int); 661 int);
662static inline u_char /* Read 1 byte from the MMC */ 662static u_char /* Read 1 byte from the MMC */
663 mmc_in(u_long, 663 mmc_in(u_long,
664 u_short); 664 u_short);
665static inline void 665static void
666 mmc_read(u_long, /* Read n bytes from the MMC */ 666 mmc_read(u_long, /* Read n bytes from the MMC */
667 u_char, 667 u_char,
668 u_char *, 668 u_char *,
@@ -688,10 +688,10 @@ static int
688 int, 688 int,
689 char *, 689 char *,
690 int); 690 int);
691static inline void 691static void
692 wv_82593_reconfig(struct net_device *); /* Reconfigure the controller */ 692 wv_82593_reconfig(struct net_device *); /* Reconfigure the controller */
693/* ------------------- DEBUG & INFO SUBROUTINES ------------------- */ 693/* ------------------- DEBUG & INFO SUBROUTINES ------------------- */
694static inline void 694static void
695 wv_init_info(struct net_device *); /* display startup info */ 695 wv_init_info(struct net_device *); /* display startup info */
696/* ------------------- IOCTL, STATS & RECONFIG ------------------- */ 696/* ------------------- IOCTL, STATS & RECONFIG ------------------- */
697static en_stats * 697static en_stats *
@@ -699,17 +699,17 @@ static en_stats *
699static iw_stats * 699static iw_stats *
700 wavelan_get_wireless_stats(struct net_device *); 700 wavelan_get_wireless_stats(struct net_device *);
701/* ----------------------- PACKET RECEPTION ----------------------- */ 701/* ----------------------- PACKET RECEPTION ----------------------- */
702static inline int 702static int
703 wv_start_of_frame(struct net_device *, /* Seek beggining of current frame */ 703 wv_start_of_frame(struct net_device *, /* Seek beggining of current frame */
704 int, /* end of frame */ 704 int, /* end of frame */
705 int); /* start of buffer */ 705 int); /* start of buffer */
706static inline void 706static void
707 wv_packet_read(struct net_device *, /* Read a packet from a frame */ 707 wv_packet_read(struct net_device *, /* Read a packet from a frame */
708 int, 708 int,
709 int), 709 int),
710 wv_packet_rcv(struct net_device *); /* Read all packets waiting */ 710 wv_packet_rcv(struct net_device *); /* Read all packets waiting */
711/* --------------------- PACKET TRANSMISSION --------------------- */ 711/* --------------------- PACKET TRANSMISSION --------------------- */
712static inline void 712static void
713 wv_packet_write(struct net_device *, /* Write a packet to the Tx buffer */ 713 wv_packet_write(struct net_device *, /* Write a packet to the Tx buffer */
714 void *, 714 void *,
715 short); 715 short);
@@ -717,20 +717,20 @@ static int
717 wavelan_packet_xmit(struct sk_buff *, /* Send a packet */ 717 wavelan_packet_xmit(struct sk_buff *, /* Send a packet */
718 struct net_device *); 718 struct net_device *);
719/* -------------------- HARDWARE CONFIGURATION -------------------- */ 719/* -------------------- HARDWARE CONFIGURATION -------------------- */
720static inline int 720static int
721 wv_mmc_init(struct net_device *); /* Initialize the modem */ 721 wv_mmc_init(struct net_device *); /* Initialize the modem */
722static int 722static int
723 wv_ru_stop(struct net_device *), /* Stop the i82593 receiver unit */ 723 wv_ru_stop(struct net_device *), /* Stop the i82593 receiver unit */
724 wv_ru_start(struct net_device *); /* Start the i82593 receiver unit */ 724 wv_ru_start(struct net_device *); /* Start the i82593 receiver unit */
725static int 725static int
726 wv_82593_config(struct net_device *); /* Configure the i82593 */ 726 wv_82593_config(struct net_device *); /* Configure the i82593 */
727static inline int 727static int
728 wv_pcmcia_reset(struct net_device *); /* Reset the pcmcia interface */ 728 wv_pcmcia_reset(struct net_device *); /* Reset the pcmcia interface */
729static int 729static int
730 wv_hw_config(struct net_device *); /* Reset & configure the whole hardware */ 730 wv_hw_config(struct net_device *); /* Reset & configure the whole hardware */
731static inline void 731static void
732 wv_hw_reset(struct net_device *); /* Same, + start receiver unit */ 732 wv_hw_reset(struct net_device *); /* Same, + start receiver unit */
733static inline int 733static int
734 wv_pcmcia_config(struct pcmcia_device *); /* Configure the pcmcia interface */ 734 wv_pcmcia_config(struct pcmcia_device *); /* Configure the pcmcia interface */
735static void 735static void
736 wv_pcmcia_release(struct pcmcia_device *);/* Remove a device */ 736 wv_pcmcia_release(struct pcmcia_device *);/* Remove a device */
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 99e5b03b3f51..0acb5c345734 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -771,10 +771,10 @@ static int zd1211b_hw_init_hmac(struct zd_chip *chip)
771{ 771{
772 static const struct zd_ioreq32 ioreqs[] = { 772 static const struct zd_ioreq32 ioreqs[] = {
773 { CR_ZD1211B_RETRY_MAX, 0x02020202 }, 773 { CR_ZD1211B_RETRY_MAX, 0x02020202 },
774 { CR_ZD1211B_TX_PWR_CTL4, 0x007f003f }, 774 { CR_ZD1211B_CWIN_MAX_MIN_AC0, 0x007f003f },
775 { CR_ZD1211B_TX_PWR_CTL3, 0x007f003f }, 775 { CR_ZD1211B_CWIN_MAX_MIN_AC1, 0x007f003f },
776 { CR_ZD1211B_TX_PWR_CTL2, 0x003f001f }, 776 { CR_ZD1211B_CWIN_MAX_MIN_AC2, 0x003f001f },
777 { CR_ZD1211B_TX_PWR_CTL1, 0x001f000f }, 777 { CR_ZD1211B_CWIN_MAX_MIN_AC3, 0x001f000f },
778 { CR_ZD1211B_AIFS_CTL1, 0x00280028 }, 778 { CR_ZD1211B_AIFS_CTL1, 0x00280028 },
779 { CR_ZD1211B_AIFS_CTL2, 0x008C003C }, 779 { CR_ZD1211B_AIFS_CTL2, 0x008C003C },
780 { CR_ZD1211B_TXOP, 0x01800824 }, 780 { CR_ZD1211B_TXOP, 0x01800824 },
@@ -809,6 +809,7 @@ static int hw_init_hmac(struct zd_chip *chip)
809 { CR_AFTER_PNP, 0x1 }, 809 { CR_AFTER_PNP, 0x1 },
810 { CR_WEP_PROTECT, 0x114 }, 810 { CR_WEP_PROTECT, 0x114 },
811 { CR_IFS_VALUE, IFS_VALUE_DEFAULT }, 811 { CR_IFS_VALUE, IFS_VALUE_DEFAULT },
812 { CR_CAM_MODE, MODE_AP_WDS},
812 }; 813 };
813 814
814 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 815 ZD_ASSERT(mutex_is_locked(&chip->mutex));
@@ -986,7 +987,7 @@ static int print_fw_version(struct zd_chip *chip)
986 return 0; 987 return 0;
987} 988}
988 989
989static int set_mandatory_rates(struct zd_chip *chip, int mode) 990static int set_mandatory_rates(struct zd_chip *chip, int gmode)
990{ 991{
991 u32 rates; 992 u32 rates;
992 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 993 ZD_ASSERT(mutex_is_locked(&chip->mutex));
@@ -994,17 +995,12 @@ static int set_mandatory_rates(struct zd_chip *chip, int mode)
994 * that the device is supporting. Until further notice we should try 995 * that the device is supporting. Until further notice we should try
995 * to support 802.11g also for full speed USB. 996 * to support 802.11g also for full speed USB.
996 */ 997 */
997 switch (mode) { 998 if (!gmode)
998 case MODE_IEEE80211B:
999 rates = CR_RATE_1M|CR_RATE_2M|CR_RATE_5_5M|CR_RATE_11M; 999 rates = CR_RATE_1M|CR_RATE_2M|CR_RATE_5_5M|CR_RATE_11M;
1000 break; 1000 else
1001 case MODE_IEEE80211G:
1002 rates = CR_RATE_1M|CR_RATE_2M|CR_RATE_5_5M|CR_RATE_11M| 1001 rates = CR_RATE_1M|CR_RATE_2M|CR_RATE_5_5M|CR_RATE_11M|
1003 CR_RATE_6M|CR_RATE_12M|CR_RATE_24M; 1002 CR_RATE_6M|CR_RATE_12M|CR_RATE_24M;
1004 break; 1003
1005 default:
1006 return -EINVAL;
1007 }
1008 return zd_iowrite32_locked(chip, rates, CR_MANDATORY_RATE_TBL); 1004 return zd_iowrite32_locked(chip, rates, CR_MANDATORY_RATE_TBL);
1009} 1005}
1010 1006
@@ -1108,7 +1104,7 @@ int zd_chip_init_hw(struct zd_chip *chip)
1108 * It might be discussed, whether we should suppport pure b mode for 1104 * It might be discussed, whether we should suppport pure b mode for
1109 * full speed USB. 1105 * full speed USB.
1110 */ 1106 */
1111 r = set_mandatory_rates(chip, MODE_IEEE80211G); 1107 r = set_mandatory_rates(chip, 1);
1112 if (r) 1108 if (r)
1113 goto out; 1109 goto out;
1114 /* Disabling interrupts is certainly a smart thing here. 1110 /* Disabling interrupts is certainly a smart thing here.
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 009c03777a35..f8c061a9b6ec 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -489,6 +489,7 @@ enum {
489 489
490#define CR_RX_OFFSET CTL_REG(0x065c) 490#define CR_RX_OFFSET CTL_REG(0x065c)
491 491
492#define CR_BCN_LENGTH CTL_REG(0x0664)
492#define CR_PHY_DELAY CTL_REG(0x066C) 493#define CR_PHY_DELAY CTL_REG(0x066C)
493#define CR_BCN_FIFO CTL_REG(0x0670) 494#define CR_BCN_FIFO CTL_REG(0x0670)
494#define CR_SNIFFER_ON CTL_REG(0x0674) 495#define CR_SNIFFER_ON CTL_REG(0x0674)
@@ -545,6 +546,8 @@ enum {
545#define RX_FILTER_CTRL (RX_FILTER_RTS | RX_FILTER_CTS | \ 546#define RX_FILTER_CTRL (RX_FILTER_RTS | RX_FILTER_CTS | \
546 RX_FILTER_CFEND | RX_FILTER_CFACK) 547 RX_FILTER_CFEND | RX_FILTER_CFACK)
547 548
549#define BCN_MODE_IBSS 0x2000000
550
548/* Monitor mode sets filter to 0xfffff */ 551/* Monitor mode sets filter to 0xfffff */
549 552
550#define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690) 553#define CR_ACK_TIMEOUT_EXT CTL_REG(0x0690)
@@ -578,6 +581,11 @@ enum {
578 581
579/* CAM: Continuous Access Mode (power management) */ 582/* CAM: Continuous Access Mode (power management) */
580#define CR_CAM_MODE CTL_REG(0x0700) 583#define CR_CAM_MODE CTL_REG(0x0700)
584#define MODE_IBSS 0x0
585#define MODE_AP 0x1
586#define MODE_STA 0x2
587#define MODE_AP_WDS 0x3
588
581#define CR_CAM_ROLL_TB_LOW CTL_REG(0x0704) 589#define CR_CAM_ROLL_TB_LOW CTL_REG(0x0704)
582#define CR_CAM_ROLL_TB_HIGH CTL_REG(0x0708) 590#define CR_CAM_ROLL_TB_HIGH CTL_REG(0x0708)
583#define CR_CAM_ADDRESS CTL_REG(0x070C) 591#define CR_CAM_ADDRESS CTL_REG(0x070C)
@@ -625,11 +633,10 @@ enum {
625#define CR_S_MD CTL_REG(0x0830) 633#define CR_S_MD CTL_REG(0x0830)
626 634
627#define CR_USB_DEBUG_PORT CTL_REG(0x0888) 635#define CR_USB_DEBUG_PORT CTL_REG(0x0888)
628 636#define CR_ZD1211B_CWIN_MAX_MIN_AC0 CTL_REG(0x0b00)
629#define CR_ZD1211B_TX_PWR_CTL1 CTL_REG(0x0b00) 637#define CR_ZD1211B_CWIN_MAX_MIN_AC1 CTL_REG(0x0b04)
630#define CR_ZD1211B_TX_PWR_CTL2 CTL_REG(0x0b04) 638#define CR_ZD1211B_CWIN_MAX_MIN_AC2 CTL_REG(0x0b08)
631#define CR_ZD1211B_TX_PWR_CTL3 CTL_REG(0x0b08) 639#define CR_ZD1211B_CWIN_MAX_MIN_AC3 CTL_REG(0x0b0c)
632#define CR_ZD1211B_TX_PWR_CTL4 CTL_REG(0x0b0c)
633#define CR_ZD1211B_AIFS_CTL1 CTL_REG(0x0b10) 640#define CR_ZD1211B_AIFS_CTL1 CTL_REG(0x0b10)
634#define CR_ZD1211B_AIFS_CTL2 CTL_REG(0x0b14) 641#define CR_ZD1211B_AIFS_CTL2 CTL_REG(0x0b14)
635#define CR_ZD1211B_TXOP CTL_REG(0x0b20) 642#define CR_ZD1211B_TXOP CTL_REG(0x0b20)
diff --git a/drivers/net/wireless/zd1211rw/zd_ieee80211.c b/drivers/net/wireless/zd1211rw/zd_ieee80211.c
index 7c277ec43f79..d8dc41ec0e5d 100644
--- a/drivers/net/wireless/zd1211rw/zd_ieee80211.c
+++ b/drivers/net/wireless/zd1211rw/zd_ieee80211.c
@@ -65,16 +65,14 @@ static const struct channel_range *zd_channel_range(u8 regdomain)
65 65
66static void unmask_bg_channels(struct ieee80211_hw *hw, 66static void unmask_bg_channels(struct ieee80211_hw *hw,
67 const struct channel_range *range, 67 const struct channel_range *range,
68 struct ieee80211_hw_mode *mode) 68 struct ieee80211_supported_band *sband)
69{ 69{
70 u8 channel; 70 u8 channel;
71 71
72 for (channel = range->start; channel < range->end; channel++) { 72 for (channel = range->start; channel < range->end; channel++) {
73 struct ieee80211_channel *chan = 73 struct ieee80211_channel *chan =
74 &mode->channels[CHAN_TO_IDX(channel)]; 74 &sband->channels[CHAN_TO_IDX(channel)];
75 chan->flag |= IEEE80211_CHAN_W_SCAN | 75 chan->flags = 0;
76 IEEE80211_CHAN_W_ACTIVE_SCAN |
77 IEEE80211_CHAN_W_IBSS;
78 } 76 }
79} 77}
80 78
@@ -97,7 +95,6 @@ void zd_geo_init(struct ieee80211_hw *hw, u8 regdomain)
97 range = zd_channel_range(ZD_REGDOMAIN_FCC); 95 range = zd_channel_range(ZD_REGDOMAIN_FCC);
98 } 96 }
99 97
100 unmask_bg_channels(hw, range, &mac->modes[0]); 98 unmask_bg_channels(hw, range, &mac->band);
101 unmask_bg_channels(hw, range, &mac->modes[1]);
102} 99}
103 100
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 76ef2d83919d..69c45ca99051 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -34,76 +34,61 @@
34 34
35/* This table contains the hardware specific values for the modulation rates. */ 35/* This table contains the hardware specific values for the modulation rates. */
36static const struct ieee80211_rate zd_rates[] = { 36static const struct ieee80211_rate zd_rates[] = {
37 { .rate = 10, 37 { .bitrate = 10,
38 .val = ZD_CCK_RATE_1M, 38 .hw_value = ZD_CCK_RATE_1M, },
39 .flags = IEEE80211_RATE_CCK }, 39 { .bitrate = 20,
40 { .rate = 20, 40 .hw_value = ZD_CCK_RATE_2M,
41 .val = ZD_CCK_RATE_2M, 41 .hw_value_short = ZD_CCK_RATE_2M | ZD_CCK_PREA_SHORT,
42 .val2 = ZD_CCK_RATE_2M | ZD_CCK_PREA_SHORT, 42 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
43 .flags = IEEE80211_RATE_CCK_2 }, 43 { .bitrate = 55,
44 { .rate = 55, 44 .hw_value = ZD_CCK_RATE_5_5M,
45 .val = ZD_CCK_RATE_5_5M, 45 .hw_value_short = ZD_CCK_RATE_5_5M | ZD_CCK_PREA_SHORT,
46 .val2 = ZD_CCK_RATE_5_5M | ZD_CCK_PREA_SHORT, 46 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
47 .flags = IEEE80211_RATE_CCK_2 }, 47 { .bitrate = 110,
48 { .rate = 110, 48 .hw_value = ZD_CCK_RATE_11M,
49 .val = ZD_CCK_RATE_11M, 49 .hw_value_short = ZD_CCK_RATE_11M | ZD_CCK_PREA_SHORT,
50 .val2 = ZD_CCK_RATE_11M | ZD_CCK_PREA_SHORT, 50 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
51 .flags = IEEE80211_RATE_CCK_2 }, 51 { .bitrate = 60,
52 { .rate = 60, 52 .hw_value = ZD_OFDM_RATE_6M,
53 .val = ZD_OFDM_RATE_6M, 53 .flags = 0 },
54 .flags = IEEE80211_RATE_OFDM }, 54 { .bitrate = 90,
55 { .rate = 90, 55 .hw_value = ZD_OFDM_RATE_9M,
56 .val = ZD_OFDM_RATE_9M, 56 .flags = 0 },
57 .flags = IEEE80211_RATE_OFDM }, 57 { .bitrate = 120,
58 { .rate = 120, 58 .hw_value = ZD_OFDM_RATE_12M,
59 .val = ZD_OFDM_RATE_12M, 59 .flags = 0 },
60 .flags = IEEE80211_RATE_OFDM }, 60 { .bitrate = 180,
61 { .rate = 180, 61 .hw_value = ZD_OFDM_RATE_18M,
62 .val = ZD_OFDM_RATE_18M, 62 .flags = 0 },
63 .flags = IEEE80211_RATE_OFDM }, 63 { .bitrate = 240,
64 { .rate = 240, 64 .hw_value = ZD_OFDM_RATE_24M,
65 .val = ZD_OFDM_RATE_24M, 65 .flags = 0 },
66 .flags = IEEE80211_RATE_OFDM }, 66 { .bitrate = 360,
67 { .rate = 360, 67 .hw_value = ZD_OFDM_RATE_36M,
68 .val = ZD_OFDM_RATE_36M, 68 .flags = 0 },
69 .flags = IEEE80211_RATE_OFDM }, 69 { .bitrate = 480,
70 { .rate = 480, 70 .hw_value = ZD_OFDM_RATE_48M,
71 .val = ZD_OFDM_RATE_48M, 71 .flags = 0 },
72 .flags = IEEE80211_RATE_OFDM }, 72 { .bitrate = 540,
73 { .rate = 540, 73 .hw_value = ZD_OFDM_RATE_54M,
74 .val = ZD_OFDM_RATE_54M, 74 .flags = 0 },
75 .flags = IEEE80211_RATE_OFDM },
76}; 75};
77 76
78static const struct ieee80211_channel zd_channels[] = { 77static const struct ieee80211_channel zd_channels[] = {
79 { .chan = 1, 78 { .center_freq = 2412, .hw_value = 1 },
80 .freq = 2412}, 79 { .center_freq = 2417, .hw_value = 2 },
81 { .chan = 2, 80 { .center_freq = 2422, .hw_value = 3 },
82 .freq = 2417}, 81 { .center_freq = 2427, .hw_value = 4 },
83 { .chan = 3, 82 { .center_freq = 2432, .hw_value = 5 },
84 .freq = 2422}, 83 { .center_freq = 2437, .hw_value = 6 },
85 { .chan = 4, 84 { .center_freq = 2442, .hw_value = 7 },
86 .freq = 2427}, 85 { .center_freq = 2447, .hw_value = 8 },
87 { .chan = 5, 86 { .center_freq = 2452, .hw_value = 9 },
88 .freq = 2432}, 87 { .center_freq = 2457, .hw_value = 10 },
89 { .chan = 6, 88 { .center_freq = 2462, .hw_value = 11 },
90 .freq = 2437}, 89 { .center_freq = 2467, .hw_value = 12 },
91 { .chan = 7, 90 { .center_freq = 2472, .hw_value = 13 },
92 .freq = 2442}, 91 { .center_freq = 2484, .hw_value = 14 },
93 { .chan = 8,
94 .freq = 2447},
95 { .chan = 9,
96 .freq = 2452},
97 { .chan = 10,
98 .freq = 2457},
99 { .chan = 11,
100 .freq = 2462},
101 { .chan = 12,
102 .freq = 2467},
103 { .chan = 13,
104 .freq = 2472},
105 { .chan = 14,
106 .freq = 2484}
107}; 92};
108 93
109static void housekeeping_init(struct zd_mac *mac); 94static void housekeeping_init(struct zd_mac *mac);
@@ -490,6 +475,46 @@ static void cs_set_control(struct zd_mac *mac, struct zd_ctrlset *cs,
490 /* FIXME: Management frame? */ 475 /* FIXME: Management frame? */
491} 476}
492 477
478void zd_mac_config_beacon(struct ieee80211_hw *hw, struct sk_buff *beacon)
479{
480 struct zd_mac *mac = zd_hw_mac(hw);
481 u32 tmp, j = 0;
482 /* 4 more bytes for tail CRC */
483 u32 full_len = beacon->len + 4;
484 zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 0);
485 zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp);
486 while (tmp & 0x2) {
487 zd_ioread32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, &tmp);
488 if ((++j % 100) == 0) {
489 printk(KERN_ERR "CR_BCN_FIFO_SEMAPHORE not ready\n");
490 if (j >= 500) {
491 printk(KERN_ERR "Giving up beacon config.\n");
492 return;
493 }
494 }
495 msleep(1);
496 }
497
498 zd_iowrite32(&mac->chip, CR_BCN_FIFO, full_len - 1);
499 if (zd_chip_is_zd1211b(&mac->chip))
500 zd_iowrite32(&mac->chip, CR_BCN_LENGTH, full_len - 1);
501
502 for (j = 0 ; j < beacon->len; j++)
503 zd_iowrite32(&mac->chip, CR_BCN_FIFO,
504 *((u8 *)(beacon->data + j)));
505
506 for (j = 0; j < 4; j++)
507 zd_iowrite32(&mac->chip, CR_BCN_FIFO, 0x0);
508
509 zd_iowrite32(&mac->chip, CR_BCN_FIFO_SEMAPHORE, 1);
510 /* 802.11b/g 2.4G CCK 1Mb
511 * 802.11a, not yet implemented, uses different values (see GPL vendor
512 * driver)
513 */
514 zd_iowrite32(&mac->chip, CR_BCN_PLCP_CFG, 0x00000400 |
515 (full_len << 19));
516}
517
493static int fill_ctrlset(struct zd_mac *mac, 518static int fill_ctrlset(struct zd_mac *mac,
494 struct sk_buff *skb, 519 struct sk_buff *skb,
495 struct ieee80211_tx_control *control) 520 struct ieee80211_tx_control *control)
@@ -503,7 +528,9 @@ static int fill_ctrlset(struct zd_mac *mac,
503 528
504 ZD_ASSERT(frag_len <= 0xffff); 529 ZD_ASSERT(frag_len <= 0xffff);
505 530
506 cs->modulation = control->tx_rate; 531 cs->modulation = control->tx_rate->hw_value;
532 if (control->flags & IEEE80211_TXCTL_SHORT_PREAMBLE)
533 cs->modulation = control->tx_rate->hw_value_short;
507 534
508 cs->tx_length = cpu_to_le16(frag_len); 535 cs->tx_length = cpu_to_le16(frag_len);
509 536
@@ -631,6 +658,8 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
631 int bad_frame = 0; 658 int bad_frame = 0;
632 u16 fc; 659 u16 fc;
633 bool is_qos, is_4addr, need_padding; 660 bool is_qos, is_4addr, need_padding;
661 int i;
662 u8 rate;
634 663
635 if (length < ZD_PLCP_HEADER_SIZE + 10 /* IEEE80211_1ADDR_LEN */ + 664 if (length < ZD_PLCP_HEADER_SIZE + 10 /* IEEE80211_1ADDR_LEN */ +
636 FCS_LEN + sizeof(struct rx_status)) 665 FCS_LEN + sizeof(struct rx_status))
@@ -660,14 +689,19 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
660 } 689 }
661 } 690 }
662 691
663 stats.channel = _zd_chip_get_channel(&mac->chip); 692 stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
664 stats.freq = zd_channels[stats.channel - 1].freq; 693 stats.band = IEEE80211_BAND_2GHZ;
665 stats.phymode = MODE_IEEE80211G;
666 stats.ssi = status->signal_strength; 694 stats.ssi = status->signal_strength;
667 stats.signal = zd_rx_qual_percent(buffer, 695 stats.signal = zd_rx_qual_percent(buffer,
668 length - sizeof(struct rx_status), 696 length - sizeof(struct rx_status),
669 status); 697 status);
670 stats.rate = zd_rx_rate(buffer, status); 698
699 rate = zd_rx_rate(buffer, status);
700
701 /* todo: return index in the big switches in zd_rx_rate instead */
702 for (i = 0; i < mac->band.n_bitrates; i++)
703 if (rate == mac->band.bitrates[i].hw_value)
704 stats.rate_idx = i;
671 705
672 length -= ZD_PLCP_HEADER_SIZE + sizeof(struct rx_status); 706 length -= ZD_PLCP_HEADER_SIZE + sizeof(struct rx_status);
673 buffer += ZD_PLCP_HEADER_SIZE; 707 buffer += ZD_PLCP_HEADER_SIZE;
@@ -715,6 +749,7 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
715 749
716 switch (conf->type) { 750 switch (conf->type) {
717 case IEEE80211_IF_TYPE_MNTR: 751 case IEEE80211_IF_TYPE_MNTR:
752 case IEEE80211_IF_TYPE_MESH_POINT:
718 case IEEE80211_IF_TYPE_STA: 753 case IEEE80211_IF_TYPE_STA:
719 mac->type = conf->type; 754 mac->type = conf->type;
720 break; 755 break;
@@ -736,7 +771,7 @@ static void zd_op_remove_interface(struct ieee80211_hw *hw,
736static int zd_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf) 771static int zd_op_config(struct ieee80211_hw *hw, struct ieee80211_conf *conf)
737{ 772{
738 struct zd_mac *mac = zd_hw_mac(hw); 773 struct zd_mac *mac = zd_hw_mac(hw);
739 return zd_chip_set_channel(&mac->chip, conf->channel); 774 return zd_chip_set_channel(&mac->chip, conf->channel->hw_value);
740} 775}
741 776
742static int zd_op_config_interface(struct ieee80211_hw *hw, 777static int zd_op_config_interface(struct ieee80211_hw *hw,
@@ -744,15 +779,43 @@ static int zd_op_config_interface(struct ieee80211_hw *hw,
744 struct ieee80211_if_conf *conf) 779 struct ieee80211_if_conf *conf)
745{ 780{
746 struct zd_mac *mac = zd_hw_mac(hw); 781 struct zd_mac *mac = zd_hw_mac(hw);
782 int associated;
783
784 if (mac->type == IEEE80211_IF_TYPE_MESH_POINT) {
785 associated = true;
786 if (conf->beacon) {
787 zd_mac_config_beacon(hw, conf->beacon);
788 kfree_skb(conf->beacon);
789 zd_set_beacon_interval(&mac->chip, BCN_MODE_IBSS |
790 hw->conf.beacon_int);
791 }
792 } else
793 associated = is_valid_ether_addr(conf->bssid);
747 794
748 spin_lock_irq(&mac->lock); 795 spin_lock_irq(&mac->lock);
749 mac->associated = is_valid_ether_addr(conf->bssid); 796 mac->associated = associated;
750 spin_unlock_irq(&mac->lock); 797 spin_unlock_irq(&mac->lock);
751 798
752 /* TODO: do hardware bssid filtering */ 799 /* TODO: do hardware bssid filtering */
753 return 0; 800 return 0;
754} 801}
755 802
803void zd_process_intr(struct work_struct *work)
804{
805 u16 int_status;
806 struct zd_mac *mac = container_of(work, struct zd_mac, process_intr);
807
808 int_status = le16_to_cpu(*(u16 *)(mac->intr_buffer+4));
809 if (int_status & INT_CFG_NEXT_BCN) {
810 if (net_ratelimit())
811 dev_dbg_f(zd_mac_dev(mac), "INT_CFG_NEXT_BCN\n");
812 } else
813 dev_dbg_f(zd_mac_dev(mac), "Unsupported interrupt\n");
814
815 zd_chip_enable_hwint(&mac->chip);
816}
817
818
756static void set_multicast_hash_handler(struct work_struct *work) 819static void set_multicast_hash_handler(struct work_struct *work)
757{ 820{
758 struct zd_mac *mac = 821 struct zd_mac *mac =
@@ -780,7 +843,7 @@ static void set_rx_filter_handler(struct work_struct *work)
780 843
781#define SUPPORTED_FIF_FLAGS \ 844#define SUPPORTED_FIF_FLAGS \
782 (FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \ 845 (FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \
783 FIF_OTHER_BSS) 846 FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC)
784static void zd_op_configure_filter(struct ieee80211_hw *hw, 847static void zd_op_configure_filter(struct ieee80211_hw *hw,
785 unsigned int changed_flags, 848 unsigned int changed_flags,
786 unsigned int *new_flags, 849 unsigned int *new_flags,
@@ -894,7 +957,6 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
894{ 957{
895 struct zd_mac *mac; 958 struct zd_mac *mac;
896 struct ieee80211_hw *hw; 959 struct ieee80211_hw *hw;
897 int i;
898 960
899 hw = ieee80211_alloc_hw(sizeof(struct zd_mac), &zd_ops); 961 hw = ieee80211_alloc_hw(sizeof(struct zd_mac), &zd_ops);
900 if (!hw) { 962 if (!hw) {
@@ -912,19 +974,15 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
912 974
913 memcpy(mac->channels, zd_channels, sizeof(zd_channels)); 975 memcpy(mac->channels, zd_channels, sizeof(zd_channels));
914 memcpy(mac->rates, zd_rates, sizeof(zd_rates)); 976 memcpy(mac->rates, zd_rates, sizeof(zd_rates));
915 mac->modes[0].mode = MODE_IEEE80211G; 977 mac->band.n_bitrates = ARRAY_SIZE(zd_rates);
916 mac->modes[0].num_rates = ARRAY_SIZE(zd_rates); 978 mac->band.bitrates = mac->rates;
917 mac->modes[0].rates = mac->rates; 979 mac->band.n_channels = ARRAY_SIZE(zd_channels);
918 mac->modes[0].num_channels = ARRAY_SIZE(zd_channels); 980 mac->band.channels = mac->channels;
919 mac->modes[0].channels = mac->channels; 981
920 mac->modes[1].mode = MODE_IEEE80211B; 982 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
921 mac->modes[1].num_rates = 4;
922 mac->modes[1].rates = mac->rates;
923 mac->modes[1].num_channels = ARRAY_SIZE(zd_channels);
924 mac->modes[1].channels = mac->channels;
925 983
926 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 984 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
927 IEEE80211_HW_DEFAULT_REG_DOMAIN_CONFIGURED; 985 IEEE80211_HW_HOST_GEN_BEACON_TEMPLATE;
928 hw->max_rssi = 100; 986 hw->max_rssi = 100;
929 hw->max_signal = 100; 987 hw->max_signal = 100;
930 988
@@ -933,19 +991,12 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
933 991
934 skb_queue_head_init(&mac->ack_wait_queue); 992 skb_queue_head_init(&mac->ack_wait_queue);
935 993
936 for (i = 0; i < 2; i++) {
937 if (ieee80211_register_hwmode(hw, &mac->modes[i])) {
938 dev_dbg_f(&intf->dev, "cannot register hwmode\n");
939 ieee80211_free_hw(hw);
940 return NULL;
941 }
942 }
943
944 zd_chip_init(&mac->chip, hw, intf); 994 zd_chip_init(&mac->chip, hw, intf);
945 housekeeping_init(mac); 995 housekeeping_init(mac);
946 INIT_WORK(&mac->set_multicast_hash_work, set_multicast_hash_handler); 996 INIT_WORK(&mac->set_multicast_hash_work, set_multicast_hash_handler);
947 INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work); 997 INIT_WORK(&mac->set_rts_cts_work, set_rts_cts_work);
948 INIT_WORK(&mac->set_rx_filter_work, set_rx_filter_handler); 998 INIT_WORK(&mac->set_rx_filter_work, set_rx_filter_handler);
999 INIT_WORK(&mac->process_intr, zd_process_intr);
949 1000
950 SET_IEEE80211_DEV(hw, &intf->dev); 1001 SET_IEEE80211_DEV(hw, &intf->dev);
951 return hw; 1002 return hw;
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 2dde108df767..71170244d2c9 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -172,12 +172,15 @@ struct zd_tx_skb_control_block {
172struct zd_mac { 172struct zd_mac {
173 struct zd_chip chip; 173 struct zd_chip chip;
174 spinlock_t lock; 174 spinlock_t lock;
175 spinlock_t intr_lock;
175 struct ieee80211_hw *hw; 176 struct ieee80211_hw *hw;
176 struct housekeeping housekeeping; 177 struct housekeeping housekeeping;
177 struct work_struct set_multicast_hash_work; 178 struct work_struct set_multicast_hash_work;
178 struct work_struct set_rts_cts_work; 179 struct work_struct set_rts_cts_work;
179 struct work_struct set_rx_filter_work; 180 struct work_struct set_rx_filter_work;
181 struct work_struct process_intr;
180 struct zd_mc_hash multicast_hash; 182 struct zd_mc_hash multicast_hash;
183 u8 intr_buffer[USB_MAX_EP_INT_BUFFER];
181 u8 regdomain; 184 u8 regdomain;
182 u8 default_regdomain; 185 u8 default_regdomain;
183 int type; 186 int type;
@@ -185,7 +188,7 @@ struct zd_mac {
185 struct sk_buff_head ack_wait_queue; 188 struct sk_buff_head ack_wait_queue;
186 struct ieee80211_channel channels[14]; 189 struct ieee80211_channel channels[14];
187 struct ieee80211_rate rates[12]; 190 struct ieee80211_rate rates[12];
188 struct ieee80211_hw_mode modes[2]; 191 struct ieee80211_supported_band band;
189 192
190 /* Short preamble (used for RTS/CTS) */ 193 /* Short preamble (used for RTS/CTS) */
191 unsigned int short_preamble:1; 194 unsigned int short_preamble:1;
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 7942b15acfe7..e34675c2f8fc 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -97,6 +97,7 @@ MODULE_DEVICE_TABLE(usb, usb_ids);
97#define FW_ZD1211B_PREFIX "zd1211/zd1211b_" 97#define FW_ZD1211B_PREFIX "zd1211/zd1211b_"
98 98
99/* USB device initialization */ 99/* USB device initialization */
100static void int_urb_complete(struct urb *urb);
100 101
101static int request_fw_file( 102static int request_fw_file(
102 const struct firmware **fw, const char *name, struct device *device) 103 const struct firmware **fw, const char *name, struct device *device)
@@ -336,11 +337,18 @@ static inline void handle_regs_int(struct urb *urb)
336 struct zd_usb *usb = urb->context; 337 struct zd_usb *usb = urb->context;
337 struct zd_usb_interrupt *intr = &usb->intr; 338 struct zd_usb_interrupt *intr = &usb->intr;
338 int len; 339 int len;
340 u16 int_num;
339 341
340 ZD_ASSERT(in_interrupt()); 342 ZD_ASSERT(in_interrupt());
341 spin_lock(&intr->lock); 343 spin_lock(&intr->lock);
342 344
343 if (intr->read_regs_enabled) { 345 int_num = le16_to_cpu(*(u16 *)(urb->transfer_buffer+2));
346 if (int_num == CR_INTERRUPT) {
347 struct zd_mac *mac = zd_hw_mac(zd_usb_to_hw(urb->context));
348 memcpy(&mac->intr_buffer, urb->transfer_buffer,
349 USB_MAX_EP_INT_BUFFER);
350 schedule_work(&mac->process_intr);
351 } else if (intr->read_regs_enabled) {
344 intr->read_regs.length = len = urb->actual_length; 352 intr->read_regs.length = len = urb->actual_length;
345 353
346 if (len > sizeof(intr->read_regs.buffer)) 354 if (len > sizeof(intr->read_regs.buffer))
@@ -351,7 +359,6 @@ static inline void handle_regs_int(struct urb *urb)
351 goto out; 359 goto out;
352 } 360 }
353 361
354 dev_dbg_f(urb_dev(urb), "regs interrupt ignored\n");
355out: 362out:
356 spin_unlock(&intr->lock); 363 spin_unlock(&intr->lock);
357} 364}
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index fe6ff3e3d525..24640726f8bb 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -770,14 +770,14 @@ static void yellowfin_init_ring(struct net_device *dev)
770 /* Branch on Tx error. */ 770 /* Branch on Tx error. */
771 yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP); 771 yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP);
772 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + 772 yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma +
773 (j+1)*sizeof(struct yellowfin_desc); 773 (j+1)*sizeof(struct yellowfin_desc));
774 j++; 774 j++;
775 if (yp->flags & FullTxStatus) { 775 if (yp->flags & FullTxStatus) {
776 yp->tx_ring[j].dbdma_cmd = 776 yp->tx_ring[j].dbdma_cmd =
777 cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status)); 777 cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status));
778 yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status); 778 yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status);
779 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma + 779 yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma +
780 i*sizeof(struct tx_status_words); 780 i*sizeof(struct tx_status_words));
781 } else { 781 } else {
782 /* Symbios chips write only tx_errs word. */ 782 /* Symbios chips write only tx_errs word. */
783 yp->tx_ring[j].dbdma_cmd = 783 yp->tx_ring[j].dbdma_cmd =